re PR target/87532 (bad results from vec_extract(unsigned char, foo) dependent upon...
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2019 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84 #include "tree-vrp.h"
85 #include "tree-ssanames.h"
86
87 /* This file should be included last. */
88 #include "target-def.h"
89
90 #ifndef TARGET_NO_PROTOTYPE
91 #define TARGET_NO_PROTOTYPE 0
92 #endif
93
94 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
95 systems will also set long double to be IEEE 128-bit. AIX and Darwin
96 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
97 those systems will not pick up this default. This needs to be after all
98 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
99 properly defined. */
100 #ifndef TARGET_IEEEQUAD_DEFAULT
101 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
102 #define TARGET_IEEEQUAD_DEFAULT 1
103 #else
104 #define TARGET_IEEEQUAD_DEFAULT 0
105 #endif
106 #endif
107
108 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
109
110 /* Structure used to define the rs6000 stack */
111 typedef struct rs6000_stack {
112 int reload_completed; /* stack info won't change from here on */
113 int first_gp_reg_save; /* first callee saved GP register used */
114 int first_fp_reg_save; /* first callee saved FP register used */
115 int first_altivec_reg_save; /* first callee saved AltiVec register used */
116 int lr_save_p; /* true if the link reg needs to be saved */
117 int cr_save_p; /* true if the CR reg needs to be saved */
118 unsigned int vrsave_mask; /* mask of vec registers to save */
119 int push_p; /* true if we need to allocate stack space */
120 int calls_p; /* true if the function makes any calls */
121 int world_save_p; /* true if we're saving *everything*:
122 r13-r31, cr, f14-f31, vrsave, v20-v31 */
123 enum rs6000_abi abi; /* which ABI to use */
124 int gp_save_offset; /* offset to save GP regs from initial SP */
125 int fp_save_offset; /* offset to save FP regs from initial SP */
126 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
127 int lr_save_offset; /* offset to save LR from initial SP */
128 int cr_save_offset; /* offset to save CR from initial SP */
129 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
130 int varargs_save_offset; /* offset to save the varargs registers */
131 int ehrd_offset; /* offset to EH return data */
132 int ehcr_offset; /* offset to EH CR field data */
133 int reg_size; /* register size (4 or 8) */
134 HOST_WIDE_INT vars_size; /* variable save area size */
135 int parm_size; /* outgoing parameter size */
136 int save_size; /* save area size */
137 int fixed_size; /* fixed size of stack frame */
138 int gp_size; /* size of saved GP registers */
139 int fp_size; /* size of saved FP registers */
140 int altivec_size; /* size of saved AltiVec registers */
141 int cr_size; /* size to hold CR if not in fixed area */
142 int vrsave_size; /* size to hold VRSAVE */
143 int altivec_padding_size; /* size of altivec alignment padding */
144 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
145 int savres_strategy;
146 } rs6000_stack_t;
147
148 /* A C structure for machine-specific, per-function data.
149 This is added to the cfun structure. */
150 typedef struct GTY(()) machine_function
151 {
152 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
153 int ra_needs_full_frame;
154 /* Flags if __builtin_return_address (0) was used. */
155 int ra_need_lr;
156 /* Cache lr_save_p after expansion of builtin_eh_return. */
157 int lr_save_state;
158 /* Whether we need to save the TOC to the reserved stack location in the
159 function prologue. */
160 bool save_toc_in_prologue;
161 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
162 varargs save area. */
163 HOST_WIDE_INT varargs_save_offset;
164 /* Alternative internal arg pointer for -fsplit-stack. */
165 rtx split_stack_arg_pointer;
166 bool split_stack_argp_used;
167 /* Flag if r2 setup is needed with ELFv2 ABI. */
168 bool r2_setup_needed;
169 /* The number of components we use for separate shrink-wrapping. */
170 int n_components;
171 /* The components already handled by separate shrink-wrapping, which should
172 not be considered by the prologue and epilogue. */
173 bool gpr_is_wrapped_separately[32];
174 bool fpr_is_wrapped_separately[32];
175 bool lr_is_wrapped_separately;
176 bool toc_is_wrapped_separately;
177 } machine_function;
178
179 /* Support targetm.vectorize.builtin_mask_for_load. */
180 static GTY(()) tree altivec_builtin_mask_for_load;
181
182 /* Set to nonzero once AIX common-mode calls have been defined. */
183 static GTY(()) int common_mode_defined;
184
185 /* Label number of label created for -mrelocatable, to call to so we can
186 get the address of the GOT section */
187 static int rs6000_pic_labelno;
188
189 #ifdef USING_ELFOS_H
190 /* Counter for labels which are to be placed in .fixup. */
191 int fixuplabelno = 0;
192 #endif
193
194 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
195 int dot_symbols;
196
197 /* Specify the machine mode that pointers have. After generation of rtl, the
198 compiler makes no further distinction between pointers and any other objects
199 of this machine mode. */
200 scalar_int_mode rs6000_pmode;
201
202 #if TARGET_ELF
203 /* Note whether IEEE 128-bit floating point was passed or returned, either as
204 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
205 floating point. We changed the default C++ mangling for these types and we
206 may want to generate a weak alias of the old mangling (U10__float128) to the
207 new mangling (u9__ieee128). */
208 static bool rs6000_passes_ieee128;
209 #endif
210
211 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
212 name used in current releases (i.e. u9__ieee128). */
213 static bool ieee128_mangling_gcc_8_1;
214
215 /* Width in bits of a pointer. */
216 unsigned rs6000_pointer_size;
217
218 #ifdef HAVE_AS_GNU_ATTRIBUTE
219 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
220 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
221 # endif
222 /* Flag whether floating point values have been passed/returned.
223 Note that this doesn't say whether fprs are used, since the
224 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
225 should be set for soft-float values passed in gprs and ieee128
226 values passed in vsx registers. */
227 static bool rs6000_passes_float;
228 static bool rs6000_passes_long_double;
229 /* Flag whether vector values have been passed/returned. */
230 static bool rs6000_passes_vector;
231 /* Flag whether small (<= 8 byte) structures have been returned. */
232 static bool rs6000_returns_struct;
233 #endif
234
235 /* Value is TRUE if register/mode pair is acceptable. */
236 static bool rs6000_hard_regno_mode_ok_p
237 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
238
239 /* Maximum number of registers needed for a given register class and mode. */
240 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
241
242 /* How many registers are needed for a given register and mode. */
243 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
244
245 /* Map register number to register class. */
246 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
247
248 static int dbg_cost_ctrl;
249
250 /* Built in types. */
251 tree rs6000_builtin_types[RS6000_BTI_MAX];
252 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
253
254 /* Flag to say the TOC is initialized */
255 int toc_initialized, need_toc_init;
256 char toc_label_name[10];
257
258 /* Cached value of rs6000_variable_issue. This is cached in
259 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
260 static short cached_can_issue_more;
261
262 static GTY(()) section *read_only_data_section;
263 static GTY(()) section *private_data_section;
264 static GTY(()) section *tls_data_section;
265 static GTY(()) section *tls_private_data_section;
266 static GTY(()) section *read_only_private_data_section;
267 static GTY(()) section *sdata2_section;
268 static GTY(()) section *toc_section;
269
270 struct builtin_description
271 {
272 const HOST_WIDE_INT mask;
273 const enum insn_code icode;
274 const char *const name;
275 const enum rs6000_builtins code;
276 };
277
278 /* Describe the vector unit used for modes. */
279 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
280 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
281
282 /* Register classes for various constraints that are based on the target
283 switches. */
284 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
285
286 /* Describe the alignment of a vector. */
287 int rs6000_vector_align[NUM_MACHINE_MODES];
288
289 /* Map selected modes to types for builtins. */
290 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
291
292 /* What modes to automatically generate reciprocal divide estimate (fre) and
293 reciprocal sqrt (frsqrte) for. */
294 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
295
296 /* Masks to determine which reciprocal esitmate instructions to generate
297 automatically. */
298 enum rs6000_recip_mask {
299 RECIP_SF_DIV = 0x001, /* Use divide estimate */
300 RECIP_DF_DIV = 0x002,
301 RECIP_V4SF_DIV = 0x004,
302 RECIP_V2DF_DIV = 0x008,
303
304 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
305 RECIP_DF_RSQRT = 0x020,
306 RECIP_V4SF_RSQRT = 0x040,
307 RECIP_V2DF_RSQRT = 0x080,
308
309 /* Various combination of flags for -mrecip=xxx. */
310 RECIP_NONE = 0,
311 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
312 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
313 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
314
315 RECIP_HIGH_PRECISION = RECIP_ALL,
316
317 /* On low precision machines like the power5, don't enable double precision
318 reciprocal square root estimate, since it isn't accurate enough. */
319 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
320 };
321
322 /* -mrecip options. */
323 static struct
324 {
325 const char *string; /* option name */
326 unsigned int mask; /* mask bits to set */
327 } recip_options[] = {
328 { "all", RECIP_ALL },
329 { "none", RECIP_NONE },
330 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
331 | RECIP_V2DF_DIV) },
332 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
333 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
334 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
335 | RECIP_V2DF_RSQRT) },
336 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
337 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
338 };
339
340 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
341 static const struct
342 {
343 const char *cpu;
344 unsigned int cpuid;
345 } cpu_is_info[] = {
346 { "power9", PPC_PLATFORM_POWER9 },
347 { "power8", PPC_PLATFORM_POWER8 },
348 { "power7", PPC_PLATFORM_POWER7 },
349 { "power6x", PPC_PLATFORM_POWER6X },
350 { "power6", PPC_PLATFORM_POWER6 },
351 { "power5+", PPC_PLATFORM_POWER5_PLUS },
352 { "power5", PPC_PLATFORM_POWER5 },
353 { "ppc970", PPC_PLATFORM_PPC970 },
354 { "power4", PPC_PLATFORM_POWER4 },
355 { "ppca2", PPC_PLATFORM_PPCA2 },
356 { "ppc476", PPC_PLATFORM_PPC476 },
357 { "ppc464", PPC_PLATFORM_PPC464 },
358 { "ppc440", PPC_PLATFORM_PPC440 },
359 { "ppc405", PPC_PLATFORM_PPC405 },
360 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
361 };
362
363 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
364 static const struct
365 {
366 const char *hwcap;
367 int mask;
368 unsigned int id;
369 } cpu_supports_info[] = {
370 /* AT_HWCAP masks. */
371 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
372 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
373 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
374 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
375 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
376 { "booke", PPC_FEATURE_BOOKE, 0 },
377 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
378 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
379 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
380 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
381 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
382 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
383 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
384 { "notb", PPC_FEATURE_NO_TB, 0 },
385 { "pa6t", PPC_FEATURE_PA6T, 0 },
386 { "power4", PPC_FEATURE_POWER4, 0 },
387 { "power5", PPC_FEATURE_POWER5, 0 },
388 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
389 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
390 { "ppc32", PPC_FEATURE_32, 0 },
391 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
392 { "ppc64", PPC_FEATURE_64, 0 },
393 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
394 { "smt", PPC_FEATURE_SMT, 0 },
395 { "spe", PPC_FEATURE_HAS_SPE, 0 },
396 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
397 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
398 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
399
400 /* AT_HWCAP2 masks. */
401 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
402 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
403 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
404 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
405 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
406 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
407 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
408 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
409 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
410 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
411 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
412 { "darn", PPC_FEATURE2_DARN, 1 },
413 { "scv", PPC_FEATURE2_SCV, 1 }
414 };
415
416 /* On PowerPC, we have a limited number of target clones that we care about
417 which means we can use an array to hold the options, rather than having more
418 elaborate data structures to identify each possible variation. Order the
419 clones from the default to the highest ISA. */
420 enum {
421 CLONE_DEFAULT = 0, /* default clone. */
422 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
423 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
424 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
425 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
426 CLONE_MAX
427 };
428
429 /* Map compiler ISA bits into HWCAP names. */
430 struct clone_map {
431 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
432 const char *name; /* name to use in __builtin_cpu_supports. */
433 };
434
435 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
436 { 0, "" }, /* Default options. */
437 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
438 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
439 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
440 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
441 };
442
443
444 /* Newer LIBCs explicitly export this symbol to declare that they provide
445 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
446 reference to this symbol whenever we expand a CPU builtin, so that
447 we never link against an old LIBC. */
448 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
449
450 /* True if we have expanded a CPU builtin. */
451 bool cpu_builtin_p;
452
453 /* Pointer to function (in rs6000-c.c) that can define or undefine target
454 macros that have changed. Languages that don't support the preprocessor
455 don't link in rs6000-c.c, so we can't call it directly. */
456 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
457
458 /* Simplfy register classes into simpler classifications. We assume
459 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
460 check for standard register classes (gpr/floating/altivec/vsx) and
461 floating/vector classes (float/altivec/vsx). */
462
463 enum rs6000_reg_type {
464 NO_REG_TYPE,
465 PSEUDO_REG_TYPE,
466 GPR_REG_TYPE,
467 VSX_REG_TYPE,
468 ALTIVEC_REG_TYPE,
469 FPR_REG_TYPE,
470 SPR_REG_TYPE,
471 CR_REG_TYPE
472 };
473
474 /* Map register class to register type. */
475 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
476
477 /* First/last register type for the 'normal' register types (i.e. general
478 purpose, floating point, altivec, and VSX registers). */
479 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
480
481 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
482
483
484 /* Register classes we care about in secondary reload or go if legitimate
485 address. We only need to worry about GPR, FPR, and Altivec registers here,
486 along an ANY field that is the OR of the 3 register classes. */
487
488 enum rs6000_reload_reg_type {
489 RELOAD_REG_GPR, /* General purpose registers. */
490 RELOAD_REG_FPR, /* Traditional floating point regs. */
491 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
492 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
493 N_RELOAD_REG
494 };
495
496 /* For setting up register classes, loop through the 3 register classes mapping
497 into real registers, and skip the ANY class, which is just an OR of the
498 bits. */
499 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
500 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
501
502 /* Map reload register type to a register in the register class. */
503 struct reload_reg_map_type {
504 const char *name; /* Register class name. */
505 int reg; /* Register in the register class. */
506 };
507
508 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
509 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
510 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
511 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
512 { "Any", -1 }, /* RELOAD_REG_ANY. */
513 };
514
515 /* Mask bits for each register class, indexed per mode. Historically the
516 compiler has been more restrictive which types can do PRE_MODIFY instead of
517 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
518 typedef unsigned char addr_mask_type;
519
520 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
521 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
522 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
523 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
524 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
525 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
526 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
527 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
528
529 /* Register type masks based on the type, of valid addressing modes. */
530 struct rs6000_reg_addr {
531 enum insn_code reload_load; /* INSN to reload for loading. */
532 enum insn_code reload_store; /* INSN to reload for storing. */
533 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
534 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
535 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
536 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
537 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
538 };
539
540 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
541
542 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
543 static inline bool
544 mode_supports_pre_incdec_p (machine_mode mode)
545 {
546 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
547 != 0);
548 }
549
550 /* Helper function to say whether a mode supports PRE_MODIFY. */
551 static inline bool
552 mode_supports_pre_modify_p (machine_mode mode)
553 {
554 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
555 != 0);
556 }
557
558 /* Return true if we have D-form addressing in altivec registers. */
559 static inline bool
560 mode_supports_vmx_dform (machine_mode mode)
561 {
562 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
563 }
564
565 /* Return true if we have D-form addressing in VSX registers. This addressing
566 is more limited than normal d-form addressing in that the offset must be
567 aligned on a 16-byte boundary. */
568 static inline bool
569 mode_supports_dq_form (machine_mode mode)
570 {
571 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
572 != 0);
573 }
574
575 /* Given that there exists at least one variable that is set (produced)
576 by OUT_INSN and read (consumed) by IN_INSN, return true iff
577 IN_INSN represents one or more memory store operations and none of
578 the variables set by OUT_INSN is used by IN_INSN as the address of a
579 store operation. If either IN_INSN or OUT_INSN does not represent
580 a "single" RTL SET expression (as loosely defined by the
581 implementation of the single_set function) or a PARALLEL with only
582 SETs, CLOBBERs, and USEs inside, this function returns false.
583
584 This rs6000-specific version of store_data_bypass_p checks for
585 certain conditions that result in assertion failures (and internal
586 compiler errors) in the generic store_data_bypass_p function and
587 returns false rather than calling store_data_bypass_p if one of the
588 problematic conditions is detected. */
589
590 int
591 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
592 {
593 rtx out_set, in_set;
594 rtx out_pat, in_pat;
595 rtx out_exp, in_exp;
596 int i, j;
597
598 in_set = single_set (in_insn);
599 if (in_set)
600 {
601 if (MEM_P (SET_DEST (in_set)))
602 {
603 out_set = single_set (out_insn);
604 if (!out_set)
605 {
606 out_pat = PATTERN (out_insn);
607 if (GET_CODE (out_pat) == PARALLEL)
608 {
609 for (i = 0; i < XVECLEN (out_pat, 0); i++)
610 {
611 out_exp = XVECEXP (out_pat, 0, i);
612 if ((GET_CODE (out_exp) == CLOBBER)
613 || (GET_CODE (out_exp) == USE))
614 continue;
615 else if (GET_CODE (out_exp) != SET)
616 return false;
617 }
618 }
619 }
620 }
621 }
622 else
623 {
624 in_pat = PATTERN (in_insn);
625 if (GET_CODE (in_pat) != PARALLEL)
626 return false;
627
628 for (i = 0; i < XVECLEN (in_pat, 0); i++)
629 {
630 in_exp = XVECEXP (in_pat, 0, i);
631 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
632 continue;
633 else if (GET_CODE (in_exp) != SET)
634 return false;
635
636 if (MEM_P (SET_DEST (in_exp)))
637 {
638 out_set = single_set (out_insn);
639 if (!out_set)
640 {
641 out_pat = PATTERN (out_insn);
642 if (GET_CODE (out_pat) != PARALLEL)
643 return false;
644 for (j = 0; j < XVECLEN (out_pat, 0); j++)
645 {
646 out_exp = XVECEXP (out_pat, 0, j);
647 if ((GET_CODE (out_exp) == CLOBBER)
648 || (GET_CODE (out_exp) == USE))
649 continue;
650 else if (GET_CODE (out_exp) != SET)
651 return false;
652 }
653 }
654 }
655 }
656 }
657 return store_data_bypass_p (out_insn, in_insn);
658 }
659
660 \f
661 /* Processor costs (relative to an add) */
662
663 const struct processor_costs *rs6000_cost;
664
665 /* Instruction size costs on 32bit processors. */
666 static const
667 struct processor_costs size32_cost = {
668 COSTS_N_INSNS (1), /* mulsi */
669 COSTS_N_INSNS (1), /* mulsi_const */
670 COSTS_N_INSNS (1), /* mulsi_const9 */
671 COSTS_N_INSNS (1), /* muldi */
672 COSTS_N_INSNS (1), /* divsi */
673 COSTS_N_INSNS (1), /* divdi */
674 COSTS_N_INSNS (1), /* fp */
675 COSTS_N_INSNS (1), /* dmul */
676 COSTS_N_INSNS (1), /* sdiv */
677 COSTS_N_INSNS (1), /* ddiv */
678 32, /* cache line size */
679 0, /* l1 cache */
680 0, /* l2 cache */
681 0, /* streams */
682 0, /* SF->DF convert */
683 };
684
685 /* Instruction size costs on 64bit processors. */
686 static const
687 struct processor_costs size64_cost = {
688 COSTS_N_INSNS (1), /* mulsi */
689 COSTS_N_INSNS (1), /* mulsi_const */
690 COSTS_N_INSNS (1), /* mulsi_const9 */
691 COSTS_N_INSNS (1), /* muldi */
692 COSTS_N_INSNS (1), /* divsi */
693 COSTS_N_INSNS (1), /* divdi */
694 COSTS_N_INSNS (1), /* fp */
695 COSTS_N_INSNS (1), /* dmul */
696 COSTS_N_INSNS (1), /* sdiv */
697 COSTS_N_INSNS (1), /* ddiv */
698 128, /* cache line size */
699 0, /* l1 cache */
700 0, /* l2 cache */
701 0, /* streams */
702 0, /* SF->DF convert */
703 };
704
705 /* Instruction costs on RS64A processors. */
706 static const
707 struct processor_costs rs64a_cost = {
708 COSTS_N_INSNS (20), /* mulsi */
709 COSTS_N_INSNS (12), /* mulsi_const */
710 COSTS_N_INSNS (8), /* mulsi_const9 */
711 COSTS_N_INSNS (34), /* muldi */
712 COSTS_N_INSNS (65), /* divsi */
713 COSTS_N_INSNS (67), /* divdi */
714 COSTS_N_INSNS (4), /* fp */
715 COSTS_N_INSNS (4), /* dmul */
716 COSTS_N_INSNS (31), /* sdiv */
717 COSTS_N_INSNS (31), /* ddiv */
718 128, /* cache line size */
719 128, /* l1 cache */
720 2048, /* l2 cache */
721 1, /* streams */
722 0, /* SF->DF convert */
723 };
724
725 /* Instruction costs on MPCCORE processors. */
726 static const
727 struct processor_costs mpccore_cost = {
728 COSTS_N_INSNS (2), /* mulsi */
729 COSTS_N_INSNS (2), /* mulsi_const */
730 COSTS_N_INSNS (2), /* mulsi_const9 */
731 COSTS_N_INSNS (2), /* muldi */
732 COSTS_N_INSNS (6), /* divsi */
733 COSTS_N_INSNS (6), /* divdi */
734 COSTS_N_INSNS (4), /* fp */
735 COSTS_N_INSNS (5), /* dmul */
736 COSTS_N_INSNS (10), /* sdiv */
737 COSTS_N_INSNS (17), /* ddiv */
738 32, /* cache line size */
739 4, /* l1 cache */
740 16, /* l2 cache */
741 1, /* streams */
742 0, /* SF->DF convert */
743 };
744
745 /* Instruction costs on PPC403 processors. */
746 static const
747 struct processor_costs ppc403_cost = {
748 COSTS_N_INSNS (4), /* mulsi */
749 COSTS_N_INSNS (4), /* mulsi_const */
750 COSTS_N_INSNS (4), /* mulsi_const9 */
751 COSTS_N_INSNS (4), /* muldi */
752 COSTS_N_INSNS (33), /* divsi */
753 COSTS_N_INSNS (33), /* divdi */
754 COSTS_N_INSNS (11), /* fp */
755 COSTS_N_INSNS (11), /* dmul */
756 COSTS_N_INSNS (11), /* sdiv */
757 COSTS_N_INSNS (11), /* ddiv */
758 32, /* cache line size */
759 4, /* l1 cache */
760 16, /* l2 cache */
761 1, /* streams */
762 0, /* SF->DF convert */
763 };
764
765 /* Instruction costs on PPC405 processors. */
766 static const
767 struct processor_costs ppc405_cost = {
768 COSTS_N_INSNS (5), /* mulsi */
769 COSTS_N_INSNS (4), /* mulsi_const */
770 COSTS_N_INSNS (3), /* mulsi_const9 */
771 COSTS_N_INSNS (5), /* muldi */
772 COSTS_N_INSNS (35), /* divsi */
773 COSTS_N_INSNS (35), /* divdi */
774 COSTS_N_INSNS (11), /* fp */
775 COSTS_N_INSNS (11), /* dmul */
776 COSTS_N_INSNS (11), /* sdiv */
777 COSTS_N_INSNS (11), /* ddiv */
778 32, /* cache line size */
779 16, /* l1 cache */
780 128, /* l2 cache */
781 1, /* streams */
782 0, /* SF->DF convert */
783 };
784
785 /* Instruction costs on PPC440 processors. */
786 static const
787 struct processor_costs ppc440_cost = {
788 COSTS_N_INSNS (3), /* mulsi */
789 COSTS_N_INSNS (2), /* mulsi_const */
790 COSTS_N_INSNS (2), /* mulsi_const9 */
791 COSTS_N_INSNS (3), /* muldi */
792 COSTS_N_INSNS (34), /* divsi */
793 COSTS_N_INSNS (34), /* divdi */
794 COSTS_N_INSNS (5), /* fp */
795 COSTS_N_INSNS (5), /* dmul */
796 COSTS_N_INSNS (19), /* sdiv */
797 COSTS_N_INSNS (33), /* ddiv */
798 32, /* cache line size */
799 32, /* l1 cache */
800 256, /* l2 cache */
801 1, /* streams */
802 0, /* SF->DF convert */
803 };
804
805 /* Instruction costs on PPC476 processors. */
806 static const
807 struct processor_costs ppc476_cost = {
808 COSTS_N_INSNS (4), /* mulsi */
809 COSTS_N_INSNS (4), /* mulsi_const */
810 COSTS_N_INSNS (4), /* mulsi_const9 */
811 COSTS_N_INSNS (4), /* muldi */
812 COSTS_N_INSNS (11), /* divsi */
813 COSTS_N_INSNS (11), /* divdi */
814 COSTS_N_INSNS (6), /* fp */
815 COSTS_N_INSNS (6), /* dmul */
816 COSTS_N_INSNS (19), /* sdiv */
817 COSTS_N_INSNS (33), /* ddiv */
818 32, /* l1 cache line size */
819 32, /* l1 cache */
820 512, /* l2 cache */
821 1, /* streams */
822 0, /* SF->DF convert */
823 };
824
825 /* Instruction costs on PPC601 processors. */
826 static const
827 struct processor_costs ppc601_cost = {
828 COSTS_N_INSNS (5), /* mulsi */
829 COSTS_N_INSNS (5), /* mulsi_const */
830 COSTS_N_INSNS (5), /* mulsi_const9 */
831 COSTS_N_INSNS (5), /* muldi */
832 COSTS_N_INSNS (36), /* divsi */
833 COSTS_N_INSNS (36), /* divdi */
834 COSTS_N_INSNS (4), /* fp */
835 COSTS_N_INSNS (5), /* dmul */
836 COSTS_N_INSNS (17), /* sdiv */
837 COSTS_N_INSNS (31), /* ddiv */
838 32, /* cache line size */
839 32, /* l1 cache */
840 256, /* l2 cache */
841 1, /* streams */
842 0, /* SF->DF convert */
843 };
844
845 /* Instruction costs on PPC603 processors. */
846 static const
847 struct processor_costs ppc603_cost = {
848 COSTS_N_INSNS (5), /* mulsi */
849 COSTS_N_INSNS (3), /* mulsi_const */
850 COSTS_N_INSNS (2), /* mulsi_const9 */
851 COSTS_N_INSNS (5), /* muldi */
852 COSTS_N_INSNS (37), /* divsi */
853 COSTS_N_INSNS (37), /* divdi */
854 COSTS_N_INSNS (3), /* fp */
855 COSTS_N_INSNS (4), /* dmul */
856 COSTS_N_INSNS (18), /* sdiv */
857 COSTS_N_INSNS (33), /* ddiv */
858 32, /* cache line size */
859 8, /* l1 cache */
860 64, /* l2 cache */
861 1, /* streams */
862 0, /* SF->DF convert */
863 };
864
865 /* Instruction costs on PPC604 processors. */
866 static const
867 struct processor_costs ppc604_cost = {
868 COSTS_N_INSNS (4), /* mulsi */
869 COSTS_N_INSNS (4), /* mulsi_const */
870 COSTS_N_INSNS (4), /* mulsi_const9 */
871 COSTS_N_INSNS (4), /* muldi */
872 COSTS_N_INSNS (20), /* divsi */
873 COSTS_N_INSNS (20), /* divdi */
874 COSTS_N_INSNS (3), /* fp */
875 COSTS_N_INSNS (3), /* dmul */
876 COSTS_N_INSNS (18), /* sdiv */
877 COSTS_N_INSNS (32), /* ddiv */
878 32, /* cache line size */
879 16, /* l1 cache */
880 512, /* l2 cache */
881 1, /* streams */
882 0, /* SF->DF convert */
883 };
884
885 /* Instruction costs on PPC604e processors. */
886 static const
887 struct processor_costs ppc604e_cost = {
888 COSTS_N_INSNS (2), /* mulsi */
889 COSTS_N_INSNS (2), /* mulsi_const */
890 COSTS_N_INSNS (2), /* mulsi_const9 */
891 COSTS_N_INSNS (2), /* muldi */
892 COSTS_N_INSNS (20), /* divsi */
893 COSTS_N_INSNS (20), /* divdi */
894 COSTS_N_INSNS (3), /* fp */
895 COSTS_N_INSNS (3), /* dmul */
896 COSTS_N_INSNS (18), /* sdiv */
897 COSTS_N_INSNS (32), /* ddiv */
898 32, /* cache line size */
899 32, /* l1 cache */
900 1024, /* l2 cache */
901 1, /* streams */
902 0, /* SF->DF convert */
903 };
904
905 /* Instruction costs on PPC620 processors. */
906 static const
907 struct processor_costs ppc620_cost = {
908 COSTS_N_INSNS (5), /* mulsi */
909 COSTS_N_INSNS (4), /* mulsi_const */
910 COSTS_N_INSNS (3), /* mulsi_const9 */
911 COSTS_N_INSNS (7), /* muldi */
912 COSTS_N_INSNS (21), /* divsi */
913 COSTS_N_INSNS (37), /* divdi */
914 COSTS_N_INSNS (3), /* fp */
915 COSTS_N_INSNS (3), /* dmul */
916 COSTS_N_INSNS (18), /* sdiv */
917 COSTS_N_INSNS (32), /* ddiv */
918 128, /* cache line size */
919 32, /* l1 cache */
920 1024, /* l2 cache */
921 1, /* streams */
922 0, /* SF->DF convert */
923 };
924
925 /* Instruction costs on PPC630 processors. */
926 static const
927 struct processor_costs ppc630_cost = {
928 COSTS_N_INSNS (5), /* mulsi */
929 COSTS_N_INSNS (4), /* mulsi_const */
930 COSTS_N_INSNS (3), /* mulsi_const9 */
931 COSTS_N_INSNS (7), /* muldi */
932 COSTS_N_INSNS (21), /* divsi */
933 COSTS_N_INSNS (37), /* divdi */
934 COSTS_N_INSNS (3), /* fp */
935 COSTS_N_INSNS (3), /* dmul */
936 COSTS_N_INSNS (17), /* sdiv */
937 COSTS_N_INSNS (21), /* ddiv */
938 128, /* cache line size */
939 64, /* l1 cache */
940 1024, /* l2 cache */
941 1, /* streams */
942 0, /* SF->DF convert */
943 };
944
945 /* Instruction costs on Cell processor. */
946 /* COSTS_N_INSNS (1) ~ one add. */
947 static const
948 struct processor_costs ppccell_cost = {
949 COSTS_N_INSNS (9/2)+2, /* mulsi */
950 COSTS_N_INSNS (6/2), /* mulsi_const */
951 COSTS_N_INSNS (6/2), /* mulsi_const9 */
952 COSTS_N_INSNS (15/2)+2, /* muldi */
953 COSTS_N_INSNS (38/2), /* divsi */
954 COSTS_N_INSNS (70/2), /* divdi */
955 COSTS_N_INSNS (10/2), /* fp */
956 COSTS_N_INSNS (10/2), /* dmul */
957 COSTS_N_INSNS (74/2), /* sdiv */
958 COSTS_N_INSNS (74/2), /* ddiv */
959 128, /* cache line size */
960 32, /* l1 cache */
961 512, /* l2 cache */
962 6, /* streams */
963 0, /* SF->DF convert */
964 };
965
966 /* Instruction costs on PPC750 and PPC7400 processors. */
967 static const
968 struct processor_costs ppc750_cost = {
969 COSTS_N_INSNS (5), /* mulsi */
970 COSTS_N_INSNS (3), /* mulsi_const */
971 COSTS_N_INSNS (2), /* mulsi_const9 */
972 COSTS_N_INSNS (5), /* muldi */
973 COSTS_N_INSNS (17), /* divsi */
974 COSTS_N_INSNS (17), /* divdi */
975 COSTS_N_INSNS (3), /* fp */
976 COSTS_N_INSNS (3), /* dmul */
977 COSTS_N_INSNS (17), /* sdiv */
978 COSTS_N_INSNS (31), /* ddiv */
979 32, /* cache line size */
980 32, /* l1 cache */
981 512, /* l2 cache */
982 1, /* streams */
983 0, /* SF->DF convert */
984 };
985
986 /* Instruction costs on PPC7450 processors. */
987 static const
988 struct processor_costs ppc7450_cost = {
989 COSTS_N_INSNS (4), /* mulsi */
990 COSTS_N_INSNS (3), /* mulsi_const */
991 COSTS_N_INSNS (3), /* mulsi_const9 */
992 COSTS_N_INSNS (4), /* muldi */
993 COSTS_N_INSNS (23), /* divsi */
994 COSTS_N_INSNS (23), /* divdi */
995 COSTS_N_INSNS (5), /* fp */
996 COSTS_N_INSNS (5), /* dmul */
997 COSTS_N_INSNS (21), /* sdiv */
998 COSTS_N_INSNS (35), /* ddiv */
999 32, /* cache line size */
1000 32, /* l1 cache */
1001 1024, /* l2 cache */
1002 1, /* streams */
1003 0, /* SF->DF convert */
1004 };
1005
1006 /* Instruction costs on PPC8540 processors. */
1007 static const
1008 struct processor_costs ppc8540_cost = {
1009 COSTS_N_INSNS (4), /* mulsi */
1010 COSTS_N_INSNS (4), /* mulsi_const */
1011 COSTS_N_INSNS (4), /* mulsi_const9 */
1012 COSTS_N_INSNS (4), /* muldi */
1013 COSTS_N_INSNS (19), /* divsi */
1014 COSTS_N_INSNS (19), /* divdi */
1015 COSTS_N_INSNS (4), /* fp */
1016 COSTS_N_INSNS (4), /* dmul */
1017 COSTS_N_INSNS (29), /* sdiv */
1018 COSTS_N_INSNS (29), /* ddiv */
1019 32, /* cache line size */
1020 32, /* l1 cache */
1021 256, /* l2 cache */
1022 1, /* prefetch streams /*/
1023 0, /* SF->DF convert */
1024 };
1025
1026 /* Instruction costs on E300C2 and E300C3 cores. */
1027 static const
1028 struct processor_costs ppce300c2c3_cost = {
1029 COSTS_N_INSNS (4), /* mulsi */
1030 COSTS_N_INSNS (4), /* mulsi_const */
1031 COSTS_N_INSNS (4), /* mulsi_const9 */
1032 COSTS_N_INSNS (4), /* muldi */
1033 COSTS_N_INSNS (19), /* divsi */
1034 COSTS_N_INSNS (19), /* divdi */
1035 COSTS_N_INSNS (3), /* fp */
1036 COSTS_N_INSNS (4), /* dmul */
1037 COSTS_N_INSNS (18), /* sdiv */
1038 COSTS_N_INSNS (33), /* ddiv */
1039 32,
1040 16, /* l1 cache */
1041 16, /* l2 cache */
1042 1, /* prefetch streams /*/
1043 0, /* SF->DF convert */
1044 };
1045
1046 /* Instruction costs on PPCE500MC processors. */
1047 static const
1048 struct processor_costs ppce500mc_cost = {
1049 COSTS_N_INSNS (4), /* mulsi */
1050 COSTS_N_INSNS (4), /* mulsi_const */
1051 COSTS_N_INSNS (4), /* mulsi_const9 */
1052 COSTS_N_INSNS (4), /* muldi */
1053 COSTS_N_INSNS (14), /* divsi */
1054 COSTS_N_INSNS (14), /* divdi */
1055 COSTS_N_INSNS (8), /* fp */
1056 COSTS_N_INSNS (10), /* dmul */
1057 COSTS_N_INSNS (36), /* sdiv */
1058 COSTS_N_INSNS (66), /* ddiv */
1059 64, /* cache line size */
1060 32, /* l1 cache */
1061 128, /* l2 cache */
1062 1, /* prefetch streams /*/
1063 0, /* SF->DF convert */
1064 };
1065
1066 /* Instruction costs on PPCE500MC64 processors. */
1067 static const
1068 struct processor_costs ppce500mc64_cost = {
1069 COSTS_N_INSNS (4), /* mulsi */
1070 COSTS_N_INSNS (4), /* mulsi_const */
1071 COSTS_N_INSNS (4), /* mulsi_const9 */
1072 COSTS_N_INSNS (4), /* muldi */
1073 COSTS_N_INSNS (14), /* divsi */
1074 COSTS_N_INSNS (14), /* divdi */
1075 COSTS_N_INSNS (4), /* fp */
1076 COSTS_N_INSNS (10), /* dmul */
1077 COSTS_N_INSNS (36), /* sdiv */
1078 COSTS_N_INSNS (66), /* ddiv */
1079 64, /* cache line size */
1080 32, /* l1 cache */
1081 128, /* l2 cache */
1082 1, /* prefetch streams /*/
1083 0, /* SF->DF convert */
1084 };
1085
1086 /* Instruction costs on PPCE5500 processors. */
1087 static const
1088 struct processor_costs ppce5500_cost = {
1089 COSTS_N_INSNS (5), /* mulsi */
1090 COSTS_N_INSNS (5), /* mulsi_const */
1091 COSTS_N_INSNS (4), /* mulsi_const9 */
1092 COSTS_N_INSNS (5), /* muldi */
1093 COSTS_N_INSNS (14), /* divsi */
1094 COSTS_N_INSNS (14), /* divdi */
1095 COSTS_N_INSNS (7), /* fp */
1096 COSTS_N_INSNS (10), /* dmul */
1097 COSTS_N_INSNS (36), /* sdiv */
1098 COSTS_N_INSNS (66), /* ddiv */
1099 64, /* cache line size */
1100 32, /* l1 cache */
1101 128, /* l2 cache */
1102 1, /* prefetch streams /*/
1103 0, /* SF->DF convert */
1104 };
1105
1106 /* Instruction costs on PPCE6500 processors. */
1107 static const
1108 struct processor_costs ppce6500_cost = {
1109 COSTS_N_INSNS (5), /* mulsi */
1110 COSTS_N_INSNS (5), /* mulsi_const */
1111 COSTS_N_INSNS (4), /* mulsi_const9 */
1112 COSTS_N_INSNS (5), /* muldi */
1113 COSTS_N_INSNS (14), /* divsi */
1114 COSTS_N_INSNS (14), /* divdi */
1115 COSTS_N_INSNS (7), /* fp */
1116 COSTS_N_INSNS (10), /* dmul */
1117 COSTS_N_INSNS (36), /* sdiv */
1118 COSTS_N_INSNS (66), /* ddiv */
1119 64, /* cache line size */
1120 32, /* l1 cache */
1121 128, /* l2 cache */
1122 1, /* prefetch streams /*/
1123 0, /* SF->DF convert */
1124 };
1125
1126 /* Instruction costs on AppliedMicro Titan processors. */
1127 static const
1128 struct processor_costs titan_cost = {
1129 COSTS_N_INSNS (5), /* mulsi */
1130 COSTS_N_INSNS (5), /* mulsi_const */
1131 COSTS_N_INSNS (5), /* mulsi_const9 */
1132 COSTS_N_INSNS (5), /* muldi */
1133 COSTS_N_INSNS (18), /* divsi */
1134 COSTS_N_INSNS (18), /* divdi */
1135 COSTS_N_INSNS (10), /* fp */
1136 COSTS_N_INSNS (10), /* dmul */
1137 COSTS_N_INSNS (46), /* sdiv */
1138 COSTS_N_INSNS (72), /* ddiv */
1139 32, /* cache line size */
1140 32, /* l1 cache */
1141 512, /* l2 cache */
1142 1, /* prefetch streams /*/
1143 0, /* SF->DF convert */
1144 };
1145
1146 /* Instruction costs on POWER4 and POWER5 processors. */
1147 static const
1148 struct processor_costs power4_cost = {
1149 COSTS_N_INSNS (3), /* mulsi */
1150 COSTS_N_INSNS (2), /* mulsi_const */
1151 COSTS_N_INSNS (2), /* mulsi_const9 */
1152 COSTS_N_INSNS (4), /* muldi */
1153 COSTS_N_INSNS (18), /* divsi */
1154 COSTS_N_INSNS (34), /* divdi */
1155 COSTS_N_INSNS (3), /* fp */
1156 COSTS_N_INSNS (3), /* dmul */
1157 COSTS_N_INSNS (17), /* sdiv */
1158 COSTS_N_INSNS (17), /* ddiv */
1159 128, /* cache line size */
1160 32, /* l1 cache */
1161 1024, /* l2 cache */
1162 8, /* prefetch streams /*/
1163 0, /* SF->DF convert */
1164 };
1165
1166 /* Instruction costs on POWER6 processors. */
1167 static const
1168 struct processor_costs power6_cost = {
1169 COSTS_N_INSNS (8), /* mulsi */
1170 COSTS_N_INSNS (8), /* mulsi_const */
1171 COSTS_N_INSNS (8), /* mulsi_const9 */
1172 COSTS_N_INSNS (8), /* muldi */
1173 COSTS_N_INSNS (22), /* divsi */
1174 COSTS_N_INSNS (28), /* divdi */
1175 COSTS_N_INSNS (3), /* fp */
1176 COSTS_N_INSNS (3), /* dmul */
1177 COSTS_N_INSNS (13), /* sdiv */
1178 COSTS_N_INSNS (16), /* ddiv */
1179 128, /* cache line size */
1180 64, /* l1 cache */
1181 2048, /* l2 cache */
1182 16, /* prefetch streams */
1183 0, /* SF->DF convert */
1184 };
1185
1186 /* Instruction costs on POWER7 processors. */
1187 static const
1188 struct processor_costs power7_cost = {
1189 COSTS_N_INSNS (2), /* mulsi */
1190 COSTS_N_INSNS (2), /* mulsi_const */
1191 COSTS_N_INSNS (2), /* mulsi_const9 */
1192 COSTS_N_INSNS (2), /* muldi */
1193 COSTS_N_INSNS (18), /* divsi */
1194 COSTS_N_INSNS (34), /* divdi */
1195 COSTS_N_INSNS (3), /* fp */
1196 COSTS_N_INSNS (3), /* dmul */
1197 COSTS_N_INSNS (13), /* sdiv */
1198 COSTS_N_INSNS (16), /* ddiv */
1199 128, /* cache line size */
1200 32, /* l1 cache */
1201 256, /* l2 cache */
1202 12, /* prefetch streams */
1203 COSTS_N_INSNS (3), /* SF->DF convert */
1204 };
1205
1206 /* Instruction costs on POWER8 processors. */
1207 static const
1208 struct processor_costs power8_cost = {
1209 COSTS_N_INSNS (3), /* mulsi */
1210 COSTS_N_INSNS (3), /* mulsi_const */
1211 COSTS_N_INSNS (3), /* mulsi_const9 */
1212 COSTS_N_INSNS (3), /* muldi */
1213 COSTS_N_INSNS (19), /* divsi */
1214 COSTS_N_INSNS (35), /* divdi */
1215 COSTS_N_INSNS (3), /* fp */
1216 COSTS_N_INSNS (3), /* dmul */
1217 COSTS_N_INSNS (14), /* sdiv */
1218 COSTS_N_INSNS (17), /* ddiv */
1219 128, /* cache line size */
1220 32, /* l1 cache */
1221 256, /* l2 cache */
1222 12, /* prefetch streams */
1223 COSTS_N_INSNS (3), /* SF->DF convert */
1224 };
1225
1226 /* Instruction costs on POWER9 processors. */
1227 static const
1228 struct processor_costs power9_cost = {
1229 COSTS_N_INSNS (3), /* mulsi */
1230 COSTS_N_INSNS (3), /* mulsi_const */
1231 COSTS_N_INSNS (3), /* mulsi_const9 */
1232 COSTS_N_INSNS (3), /* muldi */
1233 COSTS_N_INSNS (8), /* divsi */
1234 COSTS_N_INSNS (12), /* divdi */
1235 COSTS_N_INSNS (3), /* fp */
1236 COSTS_N_INSNS (3), /* dmul */
1237 COSTS_N_INSNS (13), /* sdiv */
1238 COSTS_N_INSNS (18), /* ddiv */
1239 128, /* cache line size */
1240 32, /* l1 cache */
1241 512, /* l2 cache */
1242 8, /* prefetch streams */
1243 COSTS_N_INSNS (3), /* SF->DF convert */
1244 };
1245
1246 /* Instruction costs on POWER A2 processors. */
1247 static const
1248 struct processor_costs ppca2_cost = {
1249 COSTS_N_INSNS (16), /* mulsi */
1250 COSTS_N_INSNS (16), /* mulsi_const */
1251 COSTS_N_INSNS (16), /* mulsi_const9 */
1252 COSTS_N_INSNS (16), /* muldi */
1253 COSTS_N_INSNS (22), /* divsi */
1254 COSTS_N_INSNS (28), /* divdi */
1255 COSTS_N_INSNS (3), /* fp */
1256 COSTS_N_INSNS (3), /* dmul */
1257 COSTS_N_INSNS (59), /* sdiv */
1258 COSTS_N_INSNS (72), /* ddiv */
1259 64,
1260 16, /* l1 cache */
1261 2048, /* l2 cache */
1262 16, /* prefetch streams */
1263 0, /* SF->DF convert */
1264 };
1265
1266 \f
1267 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1268 #undef RS6000_BUILTIN_0
1269 #undef RS6000_BUILTIN_1
1270 #undef RS6000_BUILTIN_2
1271 #undef RS6000_BUILTIN_3
1272 #undef RS6000_BUILTIN_A
1273 #undef RS6000_BUILTIN_D
1274 #undef RS6000_BUILTIN_H
1275 #undef RS6000_BUILTIN_P
1276 #undef RS6000_BUILTIN_X
1277
1278 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1279 { NAME, ICODE, MASK, ATTR },
1280
1281 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1282 { NAME, ICODE, MASK, ATTR },
1283
1284 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1285 { NAME, ICODE, MASK, ATTR },
1286
1287 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1288 { NAME, ICODE, MASK, ATTR },
1289
1290 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1291 { NAME, ICODE, MASK, ATTR },
1292
1293 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1294 { NAME, ICODE, MASK, ATTR },
1295
1296 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1297 { NAME, ICODE, MASK, ATTR },
1298
1299 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1300 { NAME, ICODE, MASK, ATTR },
1301
1302 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1303 { NAME, ICODE, MASK, ATTR },
1304
1305 struct rs6000_builtin_info_type {
1306 const char *name;
1307 const enum insn_code icode;
1308 const HOST_WIDE_INT mask;
1309 const unsigned attr;
1310 };
1311
1312 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1313 {
1314 #include "rs6000-builtin.def"
1315 };
1316
1317 #undef RS6000_BUILTIN_0
1318 #undef RS6000_BUILTIN_1
1319 #undef RS6000_BUILTIN_2
1320 #undef RS6000_BUILTIN_3
1321 #undef RS6000_BUILTIN_A
1322 #undef RS6000_BUILTIN_D
1323 #undef RS6000_BUILTIN_H
1324 #undef RS6000_BUILTIN_P
1325 #undef RS6000_BUILTIN_X
1326
1327 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1328 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1329
1330 \f
1331 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1332 static struct machine_function * rs6000_init_machine_status (void);
1333 static int rs6000_ra_ever_killed (void);
1334 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1336 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1337 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1338 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1339 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1340 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1341 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1342 bool);
1343 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1344 unsigned int);
1345 static bool is_microcoded_insn (rtx_insn *);
1346 static bool is_nonpipeline_insn (rtx_insn *);
1347 static bool is_cracked_insn (rtx_insn *);
1348 static bool is_load_insn (rtx, rtx *);
1349 static bool is_store_insn (rtx, rtx *);
1350 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1351 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1352 static bool insn_must_be_first_in_group (rtx_insn *);
1353 static bool insn_must_be_last_in_group (rtx_insn *);
1354 static void altivec_init_builtins (void);
1355 static tree builtin_function_type (machine_mode, machine_mode,
1356 machine_mode, machine_mode,
1357 enum rs6000_builtins, const char *name);
1358 static void rs6000_common_init_builtins (void);
1359 static void htm_init_builtins (void);
1360 static rs6000_stack_t *rs6000_stack_info (void);
1361 static void is_altivec_return_reg (rtx, void *);
1362 int easy_vector_constant (rtx, machine_mode);
1363 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1364 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1365 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1366 bool, bool);
1367 #if TARGET_MACHO
1368 static void macho_branch_islands (void);
1369 static tree get_prev_label (tree);
1370 #endif
1371 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1372 int, int *);
1373 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1374 int, int, int *);
1375 static bool rs6000_mode_dependent_address (const_rtx);
1376 static bool rs6000_debug_mode_dependent_address (const_rtx);
1377 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1378 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1379 machine_mode, rtx);
1380 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1381 machine_mode,
1382 rtx);
1383 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1384 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1385 enum reg_class);
1386 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1387 reg_class_t,
1388 reg_class_t);
1389 static bool rs6000_debug_can_change_mode_class (machine_mode,
1390 machine_mode,
1391 reg_class_t);
1392 static bool rs6000_save_toc_in_prologue_p (void);
1393 static rtx rs6000_internal_arg_pointer (void);
1394
1395 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1396 int, int *)
1397 = rs6000_legitimize_reload_address;
1398
1399 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1400 = rs6000_mode_dependent_address;
1401
1402 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1403 machine_mode, rtx)
1404 = rs6000_secondary_reload_class;
1405
1406 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1407 = rs6000_preferred_reload_class;
1408
1409 const int INSN_NOT_AVAILABLE = -1;
1410
1411 static void rs6000_print_isa_options (FILE *, int, const char *,
1412 HOST_WIDE_INT);
1413 static void rs6000_print_builtin_options (FILE *, int, const char *,
1414 HOST_WIDE_INT);
1415 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1416
1417 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1418 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1419 enum rs6000_reg_type,
1420 machine_mode,
1421 secondary_reload_info *,
1422 bool);
1423 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1424 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1425 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1426
1427 /* Hash table stuff for keeping track of TOC entries. */
1428
1429 struct GTY((for_user)) toc_hash_struct
1430 {
1431 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1432 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1433 rtx key;
1434 machine_mode key_mode;
1435 int labelno;
1436 };
1437
1438 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1439 {
1440 static hashval_t hash (toc_hash_struct *);
1441 static bool equal (toc_hash_struct *, toc_hash_struct *);
1442 };
1443
1444 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1445
1446 /* Hash table to keep track of the argument types for builtin functions. */
1447
1448 struct GTY((for_user)) builtin_hash_struct
1449 {
1450 tree type;
1451 machine_mode mode[4]; /* return value + 3 arguments. */
1452 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1453 };
1454
1455 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1456 {
1457 static hashval_t hash (builtin_hash_struct *);
1458 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1459 };
1460
1461 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1462
1463 \f
1464 /* Default register names. */
1465 char rs6000_reg_names[][8] =
1466 {
1467 "0", "1", "2", "3", "4", "5", "6", "7",
1468 "8", "9", "10", "11", "12", "13", "14", "15",
1469 "16", "17", "18", "19", "20", "21", "22", "23",
1470 "24", "25", "26", "27", "28", "29", "30", "31",
1471 "0", "1", "2", "3", "4", "5", "6", "7",
1472 "8", "9", "10", "11", "12", "13", "14", "15",
1473 "16", "17", "18", "19", "20", "21", "22", "23",
1474 "24", "25", "26", "27", "28", "29", "30", "31",
1475 "mq", "lr", "ctr","ap",
1476 "0", "1", "2", "3", "4", "5", "6", "7",
1477 "ca",
1478 /* AltiVec registers. */
1479 "0", "1", "2", "3", "4", "5", "6", "7",
1480 "8", "9", "10", "11", "12", "13", "14", "15",
1481 "16", "17", "18", "19", "20", "21", "22", "23",
1482 "24", "25", "26", "27", "28", "29", "30", "31",
1483 "vrsave", "vscr",
1484 /* Soft frame pointer. */
1485 "sfp",
1486 /* HTM SPR registers. */
1487 "tfhar", "tfiar", "texasr"
1488 };
1489
1490 #ifdef TARGET_REGNAMES
1491 static const char alt_reg_names[][8] =
1492 {
1493 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1494 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1495 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1496 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1497 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1498 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1499 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1500 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1501 "mq", "lr", "ctr", "ap",
1502 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1503 "ca",
1504 /* AltiVec registers. */
1505 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1506 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1507 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1508 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1509 "vrsave", "vscr",
1510 /* Soft frame pointer. */
1511 "sfp",
1512 /* HTM SPR registers. */
1513 "tfhar", "tfiar", "texasr"
1514 };
1515 #endif
1516
1517 /* Table of valid machine attributes. */
1518
1519 static const struct attribute_spec rs6000_attribute_table[] =
1520 {
1521 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1522 affects_type_identity, handler, exclude } */
1523 { "altivec", 1, 1, false, true, false, false,
1524 rs6000_handle_altivec_attribute, NULL },
1525 { "longcall", 0, 0, false, true, true, false,
1526 rs6000_handle_longcall_attribute, NULL },
1527 { "shortcall", 0, 0, false, true, true, false,
1528 rs6000_handle_longcall_attribute, NULL },
1529 { "ms_struct", 0, 0, false, false, false, false,
1530 rs6000_handle_struct_attribute, NULL },
1531 { "gcc_struct", 0, 0, false, false, false, false,
1532 rs6000_handle_struct_attribute, NULL },
1533 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1534 SUBTARGET_ATTRIBUTE_TABLE,
1535 #endif
1536 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1537 };
1538 \f
1539 #ifndef TARGET_PROFILE_KERNEL
1540 #define TARGET_PROFILE_KERNEL 0
1541 #endif
1542
1543 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1544 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1545 \f
1546 /* Initialize the GCC target structure. */
1547 #undef TARGET_ATTRIBUTE_TABLE
1548 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1549 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1550 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1551 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1552 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1553
1554 #undef TARGET_ASM_ALIGNED_DI_OP
1555 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1556
1557 /* Default unaligned ops are only provided for ELF. Find the ops needed
1558 for non-ELF systems. */
1559 #ifndef OBJECT_FORMAT_ELF
1560 #if TARGET_XCOFF
1561 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1562 64-bit targets. */
1563 #undef TARGET_ASM_UNALIGNED_HI_OP
1564 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1565 #undef TARGET_ASM_UNALIGNED_SI_OP
1566 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1567 #undef TARGET_ASM_UNALIGNED_DI_OP
1568 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1569 #else
1570 /* For Darwin. */
1571 #undef TARGET_ASM_UNALIGNED_HI_OP
1572 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1573 #undef TARGET_ASM_UNALIGNED_SI_OP
1574 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1575 #undef TARGET_ASM_UNALIGNED_DI_OP
1576 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1577 #undef TARGET_ASM_ALIGNED_DI_OP
1578 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1579 #endif
1580 #endif
1581
1582 /* This hook deals with fixups for relocatable code and DI-mode objects
1583 in 64-bit code. */
1584 #undef TARGET_ASM_INTEGER
1585 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1586
1587 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1588 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1589 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1590 #endif
1591
1592 #undef TARGET_SET_UP_BY_PROLOGUE
1593 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1594
1595 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1596 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1597 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1598 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1599 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1600 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1601 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1602 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1603 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1604 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1605 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1606 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1607
1608 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1609 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1610
1611 #undef TARGET_INTERNAL_ARG_POINTER
1612 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1613
1614 #undef TARGET_HAVE_TLS
1615 #define TARGET_HAVE_TLS HAVE_AS_TLS
1616
1617 #undef TARGET_CANNOT_FORCE_CONST_MEM
1618 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1619
1620 #undef TARGET_DELEGITIMIZE_ADDRESS
1621 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1622
1623 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1624 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1625
1626 #undef TARGET_LEGITIMATE_COMBINED_INSN
1627 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1628
1629 #undef TARGET_ASM_FUNCTION_PROLOGUE
1630 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1631 #undef TARGET_ASM_FUNCTION_EPILOGUE
1632 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1633
1634 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1635 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1636
1637 #undef TARGET_LEGITIMIZE_ADDRESS
1638 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1639
1640 #undef TARGET_SCHED_VARIABLE_ISSUE
1641 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1642
1643 #undef TARGET_SCHED_ISSUE_RATE
1644 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1645 #undef TARGET_SCHED_ADJUST_COST
1646 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1647 #undef TARGET_SCHED_ADJUST_PRIORITY
1648 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1649 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1650 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1651 #undef TARGET_SCHED_INIT
1652 #define TARGET_SCHED_INIT rs6000_sched_init
1653 #undef TARGET_SCHED_FINISH
1654 #define TARGET_SCHED_FINISH rs6000_sched_finish
1655 #undef TARGET_SCHED_REORDER
1656 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1657 #undef TARGET_SCHED_REORDER2
1658 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1659
1660 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1661 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1662
1663 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1664 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1665
1666 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1667 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1668 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1669 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1670 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1671 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1672 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1673 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1674
1675 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1676 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1677
1678 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1679 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1680 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1681 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1682 rs6000_builtin_support_vector_misalignment
1683 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1684 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1685 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1686 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1687 rs6000_builtin_vectorization_cost
1688 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1689 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1690 rs6000_preferred_simd_mode
1691 #undef TARGET_VECTORIZE_INIT_COST
1692 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1693 #undef TARGET_VECTORIZE_ADD_STMT_COST
1694 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1695 #undef TARGET_VECTORIZE_FINISH_COST
1696 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1697 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1698 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1699
1700 #undef TARGET_INIT_BUILTINS
1701 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1702 #undef TARGET_BUILTIN_DECL
1703 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1704
1705 #undef TARGET_FOLD_BUILTIN
1706 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1707 #undef TARGET_GIMPLE_FOLD_BUILTIN
1708 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1709
1710 #undef TARGET_EXPAND_BUILTIN
1711 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1712
1713 #undef TARGET_MANGLE_TYPE
1714 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1715
1716 #undef TARGET_INIT_LIBFUNCS
1717 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1718
1719 #if TARGET_MACHO
1720 #undef TARGET_BINDS_LOCAL_P
1721 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1722 #endif
1723
1724 #undef TARGET_MS_BITFIELD_LAYOUT_P
1725 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1726
1727 #undef TARGET_ASM_OUTPUT_MI_THUNK
1728 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1729
1730 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1731 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1732
1733 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1734 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1735
1736 #undef TARGET_REGISTER_MOVE_COST
1737 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1738 #undef TARGET_MEMORY_MOVE_COST
1739 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1740 #undef TARGET_CANNOT_COPY_INSN_P
1741 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1742 #undef TARGET_RTX_COSTS
1743 #define TARGET_RTX_COSTS rs6000_rtx_costs
1744 #undef TARGET_ADDRESS_COST
1745 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1746 #undef TARGET_INSN_COST
1747 #define TARGET_INSN_COST rs6000_insn_cost
1748
1749 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1750 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1751
1752 #undef TARGET_PROMOTE_FUNCTION_MODE
1753 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1754
1755 #undef TARGET_RETURN_IN_MEMORY
1756 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1757
1758 #undef TARGET_RETURN_IN_MSB
1759 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1760
1761 #undef TARGET_SETUP_INCOMING_VARARGS
1762 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1763
1764 /* Always strict argument naming on rs6000. */
1765 #undef TARGET_STRICT_ARGUMENT_NAMING
1766 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1767 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1768 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1769 #undef TARGET_SPLIT_COMPLEX_ARG
1770 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1771 #undef TARGET_MUST_PASS_IN_STACK
1772 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1773 #undef TARGET_PASS_BY_REFERENCE
1774 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1775 #undef TARGET_ARG_PARTIAL_BYTES
1776 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1777 #undef TARGET_FUNCTION_ARG_ADVANCE
1778 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1779 #undef TARGET_FUNCTION_ARG
1780 #define TARGET_FUNCTION_ARG rs6000_function_arg
1781 #undef TARGET_FUNCTION_ARG_PADDING
1782 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1783 #undef TARGET_FUNCTION_ARG_BOUNDARY
1784 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1785
1786 #undef TARGET_BUILD_BUILTIN_VA_LIST
1787 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1788
1789 #undef TARGET_EXPAND_BUILTIN_VA_START
1790 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1791
1792 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1793 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1794
1795 #undef TARGET_EH_RETURN_FILTER_MODE
1796 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1797
1798 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1799 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1800
1801 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1802 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1803
1804 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1805 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1806
1807 #undef TARGET_FLOATN_MODE
1808 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1809
1810 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1811 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1812
1813 #undef TARGET_MD_ASM_ADJUST
1814 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1815
1816 #undef TARGET_OPTION_OVERRIDE
1817 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1818
1819 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1820 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1821 rs6000_builtin_vectorized_function
1822
1823 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1824 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1825 rs6000_builtin_md_vectorized_function
1826
1827 #undef TARGET_STACK_PROTECT_GUARD
1828 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1829
1830 #if !TARGET_MACHO
1831 #undef TARGET_STACK_PROTECT_FAIL
1832 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1833 #endif
1834
1835 #ifdef HAVE_AS_TLS
1836 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1837 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1838 #endif
1839
1840 /* Use a 32-bit anchor range. This leads to sequences like:
1841
1842 addis tmp,anchor,high
1843 add dest,tmp,low
1844
1845 where tmp itself acts as an anchor, and can be shared between
1846 accesses to the same 64k page. */
1847 #undef TARGET_MIN_ANCHOR_OFFSET
1848 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1849 #undef TARGET_MAX_ANCHOR_OFFSET
1850 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1851 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1852 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1853 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1854 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1855
1856 #undef TARGET_BUILTIN_RECIPROCAL
1857 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1858
1859 #undef TARGET_SECONDARY_RELOAD
1860 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1861 #undef TARGET_SECONDARY_MEMORY_NEEDED
1862 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1863 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1864 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1865
1866 #undef TARGET_LEGITIMATE_ADDRESS_P
1867 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1868
1869 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1870 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1871
1872 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1873 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1874
1875 #undef TARGET_CAN_ELIMINATE
1876 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1877
1878 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1879 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1880
1881 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1882 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1883
1884 #undef TARGET_TRAMPOLINE_INIT
1885 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1886
1887 #undef TARGET_FUNCTION_VALUE
1888 #define TARGET_FUNCTION_VALUE rs6000_function_value
1889
1890 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1891 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1892
1893 #undef TARGET_OPTION_SAVE
1894 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1895
1896 #undef TARGET_OPTION_RESTORE
1897 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1898
1899 #undef TARGET_OPTION_PRINT
1900 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1901
1902 #undef TARGET_CAN_INLINE_P
1903 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1904
1905 #undef TARGET_SET_CURRENT_FUNCTION
1906 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1907
1908 #undef TARGET_LEGITIMATE_CONSTANT_P
1909 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1910
1911 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1912 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1913
1914 #undef TARGET_CAN_USE_DOLOOP_P
1915 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1916
1917 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1918 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1919
1920 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1921 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1922 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1923 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1924 #undef TARGET_UNWIND_WORD_MODE
1925 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1926
1927 #undef TARGET_OFFLOAD_OPTIONS
1928 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1929
1930 #undef TARGET_C_MODE_FOR_SUFFIX
1931 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1932
1933 #undef TARGET_INVALID_BINARY_OP
1934 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1935
1936 #undef TARGET_OPTAB_SUPPORTED_P
1937 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1938
1939 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1940 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1941
1942 #undef TARGET_COMPARE_VERSION_PRIORITY
1943 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1944
1945 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1946 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1947 rs6000_generate_version_dispatcher_body
1948
1949 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1950 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1951 rs6000_get_function_versions_dispatcher
1952
1953 #undef TARGET_OPTION_FUNCTION_VERSIONS
1954 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1955
1956 #undef TARGET_HARD_REGNO_NREGS
1957 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1958 #undef TARGET_HARD_REGNO_MODE_OK
1959 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1960
1961 #undef TARGET_MODES_TIEABLE_P
1962 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1963
1964 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1965 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1966 rs6000_hard_regno_call_part_clobbered
1967
1968 #undef TARGET_SLOW_UNALIGNED_ACCESS
1969 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1970
1971 #undef TARGET_CAN_CHANGE_MODE_CLASS
1972 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1973
1974 #undef TARGET_CONSTANT_ALIGNMENT
1975 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1976
1977 #undef TARGET_STARTING_FRAME_OFFSET
1978 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1979
1980 #if TARGET_ELF && RS6000_WEAK
1981 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1982 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1983 #endif
1984
1985 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1986 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1987
1988 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1989 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1990 \f
1991
1992 /* Processor table. */
1993 struct rs6000_ptt
1994 {
1995 const char *const name; /* Canonical processor name. */
1996 const enum processor_type processor; /* Processor type enum value. */
1997 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1998 };
1999
2000 static struct rs6000_ptt const processor_target_table[] =
2001 {
2002 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
2003 #include "rs6000-cpus.def"
2004 #undef RS6000_CPU
2005 };
2006
2007 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2008 name is invalid. */
2009
2010 static int
2011 rs6000_cpu_name_lookup (const char *name)
2012 {
2013 size_t i;
2014
2015 if (name != NULL)
2016 {
2017 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2018 if (! strcmp (name, processor_target_table[i].name))
2019 return (int)i;
2020 }
2021
2022 return -1;
2023 }
2024
2025 \f
2026 /* Return number of consecutive hard regs needed starting at reg REGNO
2027 to hold something of mode MODE.
2028 This is ordinarily the length in words of a value of mode MODE
2029 but can be less for certain modes in special long registers.
2030
2031 POWER and PowerPC GPRs hold 32 bits worth;
2032 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2033
2034 static int
2035 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2036 {
2037 unsigned HOST_WIDE_INT reg_size;
2038
2039 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2040 128-bit floating point that can go in vector registers, which has VSX
2041 memory addressing. */
2042 if (FP_REGNO_P (regno))
2043 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2044 ? UNITS_PER_VSX_WORD
2045 : UNITS_PER_FP_WORD);
2046
2047 else if (ALTIVEC_REGNO_P (regno))
2048 reg_size = UNITS_PER_ALTIVEC_WORD;
2049
2050 else
2051 reg_size = UNITS_PER_WORD;
2052
2053 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2054 }
2055
2056 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2057 MODE. */
2058 static int
2059 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2060 {
2061 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2062
2063 if (COMPLEX_MODE_P (mode))
2064 mode = GET_MODE_INNER (mode);
2065
2066 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2067 register combinations, and use PTImode where we need to deal with quad
2068 word memory operations. Don't allow quad words in the argument or frame
2069 pointer registers, just registers 0..31. */
2070 if (mode == PTImode)
2071 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2072 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2073 && ((regno & 1) == 0));
2074
2075 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2076 implementations. Don't allow an item to be split between a FP register
2077 and an Altivec register. Allow TImode in all VSX registers if the user
2078 asked for it. */
2079 if (TARGET_VSX && VSX_REGNO_P (regno)
2080 && (VECTOR_MEM_VSX_P (mode)
2081 || FLOAT128_VECTOR_P (mode)
2082 || reg_addr[mode].scalar_in_vmx_p
2083 || mode == TImode
2084 || (TARGET_VADDUQM && mode == V1TImode)))
2085 {
2086 if (FP_REGNO_P (regno))
2087 return FP_REGNO_P (last_regno);
2088
2089 if (ALTIVEC_REGNO_P (regno))
2090 {
2091 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2092 return 0;
2093
2094 return ALTIVEC_REGNO_P (last_regno);
2095 }
2096 }
2097
2098 /* The GPRs can hold any mode, but values bigger than one register
2099 cannot go past R31. */
2100 if (INT_REGNO_P (regno))
2101 return INT_REGNO_P (last_regno);
2102
2103 /* The float registers (except for VSX vector modes) can only hold floating
2104 modes and DImode. */
2105 if (FP_REGNO_P (regno))
2106 {
2107 if (FLOAT128_VECTOR_P (mode))
2108 return false;
2109
2110 if (SCALAR_FLOAT_MODE_P (mode)
2111 && (mode != TDmode || (regno % 2) == 0)
2112 && FP_REGNO_P (last_regno))
2113 return 1;
2114
2115 if (GET_MODE_CLASS (mode) == MODE_INT)
2116 {
2117 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2118 return 1;
2119
2120 if (TARGET_P8_VECTOR && (mode == SImode))
2121 return 1;
2122
2123 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2124 return 1;
2125 }
2126
2127 return 0;
2128 }
2129
2130 /* The CR register can only hold CC modes. */
2131 if (CR_REGNO_P (regno))
2132 return GET_MODE_CLASS (mode) == MODE_CC;
2133
2134 if (CA_REGNO_P (regno))
2135 return mode == Pmode || mode == SImode;
2136
2137 /* AltiVec only in AldyVec registers. */
2138 if (ALTIVEC_REGNO_P (regno))
2139 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2140 || mode == V1TImode);
2141
2142 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2143 and it must be able to fit within the register set. */
2144
2145 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2146 }
2147
2148 /* Implement TARGET_HARD_REGNO_NREGS. */
2149
2150 static unsigned int
2151 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2152 {
2153 return rs6000_hard_regno_nregs[mode][regno];
2154 }
2155
2156 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2157
2158 static bool
2159 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2160 {
2161 return rs6000_hard_regno_mode_ok_p[mode][regno];
2162 }
2163
2164 /* Implement TARGET_MODES_TIEABLE_P.
2165
2166 PTImode cannot tie with other modes because PTImode is restricted to even
2167 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2168 57744).
2169
2170 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2171 128-bit floating point on VSX systems ties with other vectors. */
2172
2173 static bool
2174 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2175 {
2176 if (mode1 == PTImode)
2177 return mode2 == PTImode;
2178 if (mode2 == PTImode)
2179 return false;
2180
2181 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2182 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2183 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2184 return false;
2185
2186 if (SCALAR_FLOAT_MODE_P (mode1))
2187 return SCALAR_FLOAT_MODE_P (mode2);
2188 if (SCALAR_FLOAT_MODE_P (mode2))
2189 return false;
2190
2191 if (GET_MODE_CLASS (mode1) == MODE_CC)
2192 return GET_MODE_CLASS (mode2) == MODE_CC;
2193 if (GET_MODE_CLASS (mode2) == MODE_CC)
2194 return false;
2195
2196 return true;
2197 }
2198
2199 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2200
2201 static bool
2202 rs6000_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
2203 unsigned int regno, machine_mode mode)
2204 {
2205 if (TARGET_32BIT
2206 && TARGET_POWERPC64
2207 && GET_MODE_SIZE (mode) > 4
2208 && INT_REGNO_P (regno))
2209 return true;
2210
2211 if (TARGET_VSX
2212 && FP_REGNO_P (regno)
2213 && GET_MODE_SIZE (mode) > 8
2214 && !FLOAT128_2REG_P (mode))
2215 return true;
2216
2217 return false;
2218 }
2219
2220 /* Print interesting facts about registers. */
2221 static void
2222 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2223 {
2224 int r, m;
2225
2226 for (r = first_regno; r <= last_regno; ++r)
2227 {
2228 const char *comma = "";
2229 int len;
2230
2231 if (first_regno == last_regno)
2232 fprintf (stderr, "%s:\t", reg_name);
2233 else
2234 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2235
2236 len = 8;
2237 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2238 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2239 {
2240 if (len > 70)
2241 {
2242 fprintf (stderr, ",\n\t");
2243 len = 8;
2244 comma = "";
2245 }
2246
2247 if (rs6000_hard_regno_nregs[m][r] > 1)
2248 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2249 rs6000_hard_regno_nregs[m][r]);
2250 else
2251 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2252
2253 comma = ", ";
2254 }
2255
2256 if (call_used_regs[r])
2257 {
2258 if (len > 70)
2259 {
2260 fprintf (stderr, ",\n\t");
2261 len = 8;
2262 comma = "";
2263 }
2264
2265 len += fprintf (stderr, "%s%s", comma, "call-used");
2266 comma = ", ";
2267 }
2268
2269 if (fixed_regs[r])
2270 {
2271 if (len > 70)
2272 {
2273 fprintf (stderr, ",\n\t");
2274 len = 8;
2275 comma = "";
2276 }
2277
2278 len += fprintf (stderr, "%s%s", comma, "fixed");
2279 comma = ", ";
2280 }
2281
2282 if (len > 70)
2283 {
2284 fprintf (stderr, ",\n\t");
2285 comma = "";
2286 }
2287
2288 len += fprintf (stderr, "%sreg-class = %s", comma,
2289 reg_class_names[(int)rs6000_regno_regclass[r]]);
2290 comma = ", ";
2291
2292 if (len > 70)
2293 {
2294 fprintf (stderr, ",\n\t");
2295 comma = "";
2296 }
2297
2298 fprintf (stderr, "%sregno = %d\n", comma, r);
2299 }
2300 }
2301
2302 static const char *
2303 rs6000_debug_vector_unit (enum rs6000_vector v)
2304 {
2305 const char *ret;
2306
2307 switch (v)
2308 {
2309 case VECTOR_NONE: ret = "none"; break;
2310 case VECTOR_ALTIVEC: ret = "altivec"; break;
2311 case VECTOR_VSX: ret = "vsx"; break;
2312 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2313 default: ret = "unknown"; break;
2314 }
2315
2316 return ret;
2317 }
2318
2319 /* Inner function printing just the address mask for a particular reload
2320 register class. */
2321 DEBUG_FUNCTION char *
2322 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2323 {
2324 static char ret[8];
2325 char *p = ret;
2326
2327 if ((mask & RELOAD_REG_VALID) != 0)
2328 *p++ = 'v';
2329 else if (keep_spaces)
2330 *p++ = ' ';
2331
2332 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2333 *p++ = 'm';
2334 else if (keep_spaces)
2335 *p++ = ' ';
2336
2337 if ((mask & RELOAD_REG_INDEXED) != 0)
2338 *p++ = 'i';
2339 else if (keep_spaces)
2340 *p++ = ' ';
2341
2342 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2343 *p++ = 'O';
2344 else if ((mask & RELOAD_REG_OFFSET) != 0)
2345 *p++ = 'o';
2346 else if (keep_spaces)
2347 *p++ = ' ';
2348
2349 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2350 *p++ = '+';
2351 else if (keep_spaces)
2352 *p++ = ' ';
2353
2354 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2355 *p++ = '+';
2356 else if (keep_spaces)
2357 *p++ = ' ';
2358
2359 if ((mask & RELOAD_REG_AND_M16) != 0)
2360 *p++ = '&';
2361 else if (keep_spaces)
2362 *p++ = ' ';
2363
2364 *p = '\0';
2365
2366 return ret;
2367 }
2368
2369 /* Print the address masks in a human readble fashion. */
2370 DEBUG_FUNCTION void
2371 rs6000_debug_print_mode (ssize_t m)
2372 {
2373 ssize_t rc;
2374 int spaces = 0;
2375
2376 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2377 for (rc = 0; rc < N_RELOAD_REG; rc++)
2378 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2379 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2380
2381 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2382 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2383 {
2384 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2385 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2386 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2387 spaces = 0;
2388 }
2389 else
2390 spaces += sizeof (" Reload=sl") - 1;
2391
2392 if (reg_addr[m].scalar_in_vmx_p)
2393 {
2394 fprintf (stderr, "%*s Upper=y", spaces, "");
2395 spaces = 0;
2396 }
2397 else
2398 spaces += sizeof (" Upper=y") - 1;
2399
2400 if (rs6000_vector_unit[m] != VECTOR_NONE
2401 || rs6000_vector_mem[m] != VECTOR_NONE)
2402 {
2403 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2404 spaces, "",
2405 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2406 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2407 }
2408
2409 fputs ("\n", stderr);
2410 }
2411
2412 #define DEBUG_FMT_ID "%-32s= "
2413 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2414 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2415 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2416
2417 /* Print various interesting information with -mdebug=reg. */
2418 static void
2419 rs6000_debug_reg_global (void)
2420 {
2421 static const char *const tf[2] = { "false", "true" };
2422 const char *nl = (const char *)0;
2423 int m;
2424 size_t m1, m2, v;
2425 char costly_num[20];
2426 char nop_num[20];
2427 char flags_buffer[40];
2428 const char *costly_str;
2429 const char *nop_str;
2430 const char *trace_str;
2431 const char *abi_str;
2432 const char *cmodel_str;
2433 struct cl_target_option cl_opts;
2434
2435 /* Modes we want tieable information on. */
2436 static const machine_mode print_tieable_modes[] = {
2437 QImode,
2438 HImode,
2439 SImode,
2440 DImode,
2441 TImode,
2442 PTImode,
2443 SFmode,
2444 DFmode,
2445 TFmode,
2446 IFmode,
2447 KFmode,
2448 SDmode,
2449 DDmode,
2450 TDmode,
2451 V16QImode,
2452 V8HImode,
2453 V4SImode,
2454 V2DImode,
2455 V1TImode,
2456 V32QImode,
2457 V16HImode,
2458 V8SImode,
2459 V4DImode,
2460 V2TImode,
2461 V4SFmode,
2462 V2DFmode,
2463 V8SFmode,
2464 V4DFmode,
2465 CCmode,
2466 CCUNSmode,
2467 CCEQmode,
2468 };
2469
2470 /* Virtual regs we are interested in. */
2471 const static struct {
2472 int regno; /* register number. */
2473 const char *name; /* register name. */
2474 } virtual_regs[] = {
2475 { STACK_POINTER_REGNUM, "stack pointer:" },
2476 { TOC_REGNUM, "toc: " },
2477 { STATIC_CHAIN_REGNUM, "static chain: " },
2478 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2479 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2480 { ARG_POINTER_REGNUM, "arg pointer: " },
2481 { FRAME_POINTER_REGNUM, "frame pointer:" },
2482 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2483 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2484 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2485 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2486 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2487 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2488 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2489 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2490 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2491 };
2492
2493 fputs ("\nHard register information:\n", stderr);
2494 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2495 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2496 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2497 LAST_ALTIVEC_REGNO,
2498 "vs");
2499 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2500 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2501 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2502 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2503 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2504 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2505
2506 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2507 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2508 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2509
2510 fprintf (stderr,
2511 "\n"
2512 "d reg_class = %s\n"
2513 "f reg_class = %s\n"
2514 "v reg_class = %s\n"
2515 "wa reg_class = %s\n"
2516 "wb reg_class = %s\n"
2517 "wd reg_class = %s\n"
2518 "we reg_class = %s\n"
2519 "wf reg_class = %s\n"
2520 "wg reg_class = %s\n"
2521 "wh reg_class = %s\n"
2522 "wi reg_class = %s\n"
2523 "wj reg_class = %s\n"
2524 "wk reg_class = %s\n"
2525 "wl reg_class = %s\n"
2526 "wm reg_class = %s\n"
2527 "wo reg_class = %s\n"
2528 "wp reg_class = %s\n"
2529 "wq reg_class = %s\n"
2530 "wr reg_class = %s\n"
2531 "ws reg_class = %s\n"
2532 "wt reg_class = %s\n"
2533 "wu reg_class = %s\n"
2534 "wv reg_class = %s\n"
2535 "ww reg_class = %s\n"
2536 "wx reg_class = %s\n"
2537 "wy reg_class = %s\n"
2538 "wz reg_class = %s\n"
2539 "wA reg_class = %s\n"
2540 "wH reg_class = %s\n"
2541 "wI reg_class = %s\n"
2542 "wJ reg_class = %s\n"
2543 "wK reg_class = %s\n"
2544 "\n",
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2560 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2561 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2562 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2563 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2564 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2565 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2566 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2567 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2568 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2569 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2570 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2571 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2572 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2573 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2574 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2575 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2576 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2577
2578 nl = "\n";
2579 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2580 rs6000_debug_print_mode (m);
2581
2582 fputs ("\n", stderr);
2583
2584 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2585 {
2586 machine_mode mode1 = print_tieable_modes[m1];
2587 bool first_time = true;
2588
2589 nl = (const char *)0;
2590 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2591 {
2592 machine_mode mode2 = print_tieable_modes[m2];
2593 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2594 {
2595 if (first_time)
2596 {
2597 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2598 nl = "\n";
2599 first_time = false;
2600 }
2601
2602 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2603 }
2604 }
2605
2606 if (!first_time)
2607 fputs ("\n", stderr);
2608 }
2609
2610 if (nl)
2611 fputs (nl, stderr);
2612
2613 if (rs6000_recip_control)
2614 {
2615 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2616
2617 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2618 if (rs6000_recip_bits[m])
2619 {
2620 fprintf (stderr,
2621 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2622 GET_MODE_NAME (m),
2623 (RS6000_RECIP_AUTO_RE_P (m)
2624 ? "auto"
2625 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2626 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2627 ? "auto"
2628 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2629 }
2630
2631 fputs ("\n", stderr);
2632 }
2633
2634 if (rs6000_cpu_index >= 0)
2635 {
2636 const char *name = processor_target_table[rs6000_cpu_index].name;
2637 HOST_WIDE_INT flags
2638 = processor_target_table[rs6000_cpu_index].target_enable;
2639
2640 sprintf (flags_buffer, "-mcpu=%s flags", name);
2641 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2642 }
2643 else
2644 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2645
2646 if (rs6000_tune_index >= 0)
2647 {
2648 const char *name = processor_target_table[rs6000_tune_index].name;
2649 HOST_WIDE_INT flags
2650 = processor_target_table[rs6000_tune_index].target_enable;
2651
2652 sprintf (flags_buffer, "-mtune=%s flags", name);
2653 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2654 }
2655 else
2656 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2657
2658 cl_target_option_save (&cl_opts, &global_options);
2659 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2660 rs6000_isa_flags);
2661
2662 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2663 rs6000_isa_flags_explicit);
2664
2665 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2666 rs6000_builtin_mask);
2667
2668 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2669
2670 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2671 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2672
2673 switch (rs6000_sched_costly_dep)
2674 {
2675 case max_dep_latency:
2676 costly_str = "max_dep_latency";
2677 break;
2678
2679 case no_dep_costly:
2680 costly_str = "no_dep_costly";
2681 break;
2682
2683 case all_deps_costly:
2684 costly_str = "all_deps_costly";
2685 break;
2686
2687 case true_store_to_load_dep_costly:
2688 costly_str = "true_store_to_load_dep_costly";
2689 break;
2690
2691 case store_to_load_dep_costly:
2692 costly_str = "store_to_load_dep_costly";
2693 break;
2694
2695 default:
2696 costly_str = costly_num;
2697 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2698 break;
2699 }
2700
2701 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2702
2703 switch (rs6000_sched_insert_nops)
2704 {
2705 case sched_finish_regroup_exact:
2706 nop_str = "sched_finish_regroup_exact";
2707 break;
2708
2709 case sched_finish_pad_groups:
2710 nop_str = "sched_finish_pad_groups";
2711 break;
2712
2713 case sched_finish_none:
2714 nop_str = "sched_finish_none";
2715 break;
2716
2717 default:
2718 nop_str = nop_num;
2719 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2720 break;
2721 }
2722
2723 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2724
2725 switch (rs6000_sdata)
2726 {
2727 default:
2728 case SDATA_NONE:
2729 break;
2730
2731 case SDATA_DATA:
2732 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2733 break;
2734
2735 case SDATA_SYSV:
2736 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2737 break;
2738
2739 case SDATA_EABI:
2740 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2741 break;
2742
2743 }
2744
2745 switch (rs6000_traceback)
2746 {
2747 case traceback_default: trace_str = "default"; break;
2748 case traceback_none: trace_str = "none"; break;
2749 case traceback_part: trace_str = "part"; break;
2750 case traceback_full: trace_str = "full"; break;
2751 default: trace_str = "unknown"; break;
2752 }
2753
2754 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2755
2756 switch (rs6000_current_cmodel)
2757 {
2758 case CMODEL_SMALL: cmodel_str = "small"; break;
2759 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2760 case CMODEL_LARGE: cmodel_str = "large"; break;
2761 default: cmodel_str = "unknown"; break;
2762 }
2763
2764 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2765
2766 switch (rs6000_current_abi)
2767 {
2768 case ABI_NONE: abi_str = "none"; break;
2769 case ABI_AIX: abi_str = "aix"; break;
2770 case ABI_ELFv2: abi_str = "ELFv2"; break;
2771 case ABI_V4: abi_str = "V4"; break;
2772 case ABI_DARWIN: abi_str = "darwin"; break;
2773 default: abi_str = "unknown"; break;
2774 }
2775
2776 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2777
2778 if (rs6000_altivec_abi)
2779 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2780
2781 if (rs6000_darwin64_abi)
2782 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2783
2784 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2785 (TARGET_SOFT_FLOAT ? "true" : "false"));
2786
2787 if (TARGET_LINK_STACK)
2788 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2789
2790 if (TARGET_P8_FUSION)
2791 {
2792 char options[80];
2793
2794 strcpy (options, "power8");
2795 if (TARGET_P8_FUSION_SIGN)
2796 strcat (options, ", sign");
2797
2798 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2799 }
2800
2801 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2802 TARGET_SECURE_PLT ? "secure" : "bss");
2803 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2804 aix_struct_return ? "aix" : "sysv");
2805 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2806 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2807 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2808 tf[!!rs6000_align_branch_targets]);
2809 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2810 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2811 rs6000_long_double_type_size);
2812 if (rs6000_long_double_type_size > 64)
2813 {
2814 fprintf (stderr, DEBUG_FMT_S, "long double type",
2815 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2816 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2817 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2818 }
2819 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2820 (int)rs6000_sched_restricted_insns_priority);
2821 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2822 (int)END_BUILTINS);
2823 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2824 (int)RS6000_BUILTIN_COUNT);
2825
2826 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2827 (int)TARGET_FLOAT128_ENABLE_TYPE);
2828
2829 if (TARGET_VSX)
2830 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2831 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2832
2833 if (TARGET_DIRECT_MOVE_128)
2834 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2835 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2836 }
2837
2838 \f
2839 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2840 legitimate address support to figure out the appropriate addressing to
2841 use. */
2842
2843 static void
2844 rs6000_setup_reg_addr_masks (void)
2845 {
2846 ssize_t rc, reg, m, nregs;
2847 addr_mask_type any_addr_mask, addr_mask;
2848
2849 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2850 {
2851 machine_mode m2 = (machine_mode) m;
2852 bool complex_p = false;
2853 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2854 size_t msize;
2855
2856 if (COMPLEX_MODE_P (m2))
2857 {
2858 complex_p = true;
2859 m2 = GET_MODE_INNER (m2);
2860 }
2861
2862 msize = GET_MODE_SIZE (m2);
2863
2864 /* SDmode is special in that we want to access it only via REG+REG
2865 addressing on power7 and above, since we want to use the LFIWZX and
2866 STFIWZX instructions to load it. */
2867 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2868
2869 any_addr_mask = 0;
2870 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2871 {
2872 addr_mask = 0;
2873 reg = reload_reg_map[rc].reg;
2874
2875 /* Can mode values go in the GPR/FPR/Altivec registers? */
2876 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2877 {
2878 bool small_int_vsx_p = (small_int_p
2879 && (rc == RELOAD_REG_FPR
2880 || rc == RELOAD_REG_VMX));
2881
2882 nregs = rs6000_hard_regno_nregs[m][reg];
2883 addr_mask |= RELOAD_REG_VALID;
2884
2885 /* Indicate if the mode takes more than 1 physical register. If
2886 it takes a single register, indicate it can do REG+REG
2887 addressing. Small integers in VSX registers can only do
2888 REG+REG addressing. */
2889 if (small_int_vsx_p)
2890 addr_mask |= RELOAD_REG_INDEXED;
2891 else if (nregs > 1 || m == BLKmode || complex_p)
2892 addr_mask |= RELOAD_REG_MULTIPLE;
2893 else
2894 addr_mask |= RELOAD_REG_INDEXED;
2895
2896 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2897 addressing. If we allow scalars into Altivec registers,
2898 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2899
2900 For VSX systems, we don't allow update addressing for
2901 DFmode/SFmode if those registers can go in both the
2902 traditional floating point registers and Altivec registers.
2903 The load/store instructions for the Altivec registers do not
2904 have update forms. If we allowed update addressing, it seems
2905 to break IV-OPT code using floating point if the index type is
2906 int instead of long (PR target/81550 and target/84042). */
2907
2908 if (TARGET_UPDATE
2909 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2910 && msize <= 8
2911 && !VECTOR_MODE_P (m2)
2912 && !FLOAT128_VECTOR_P (m2)
2913 && !complex_p
2914 && (m != E_DFmode || !TARGET_VSX)
2915 && (m != E_SFmode || !TARGET_P8_VECTOR)
2916 && !small_int_vsx_p)
2917 {
2918 addr_mask |= RELOAD_REG_PRE_INCDEC;
2919
2920 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2921 we don't allow PRE_MODIFY for some multi-register
2922 operations. */
2923 switch (m)
2924 {
2925 default:
2926 addr_mask |= RELOAD_REG_PRE_MODIFY;
2927 break;
2928
2929 case E_DImode:
2930 if (TARGET_POWERPC64)
2931 addr_mask |= RELOAD_REG_PRE_MODIFY;
2932 break;
2933
2934 case E_DFmode:
2935 case E_DDmode:
2936 if (TARGET_HARD_FLOAT)
2937 addr_mask |= RELOAD_REG_PRE_MODIFY;
2938 break;
2939 }
2940 }
2941 }
2942
2943 /* GPR and FPR registers can do REG+OFFSET addressing, except
2944 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2945 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2946 if ((addr_mask != 0) && !indexed_only_p
2947 && msize <= 8
2948 && (rc == RELOAD_REG_GPR
2949 || ((msize == 8 || m2 == SFmode)
2950 && (rc == RELOAD_REG_FPR
2951 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2952 addr_mask |= RELOAD_REG_OFFSET;
2953
2954 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2955 instructions are enabled. The offset for 128-bit VSX registers is
2956 only 12-bits. While GPRs can handle the full offset range, VSX
2957 registers can only handle the restricted range. */
2958 else if ((addr_mask != 0) && !indexed_only_p
2959 && msize == 16 && TARGET_P9_VECTOR
2960 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2961 || (m2 == TImode && TARGET_VSX)))
2962 {
2963 addr_mask |= RELOAD_REG_OFFSET;
2964 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2965 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2966 }
2967
2968 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2969 addressing on 128-bit types. */
2970 if (rc == RELOAD_REG_VMX && msize == 16
2971 && (addr_mask & RELOAD_REG_VALID) != 0)
2972 addr_mask |= RELOAD_REG_AND_M16;
2973
2974 reg_addr[m].addr_mask[rc] = addr_mask;
2975 any_addr_mask |= addr_mask;
2976 }
2977
2978 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2979 }
2980 }
2981
2982 \f
2983 /* Initialize the various global tables that are based on register size. */
2984 static void
2985 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2986 {
2987 ssize_t r, m, c;
2988 int align64;
2989 int align32;
2990
2991 /* Precalculate REGNO_REG_CLASS. */
2992 rs6000_regno_regclass[0] = GENERAL_REGS;
2993 for (r = 1; r < 32; ++r)
2994 rs6000_regno_regclass[r] = BASE_REGS;
2995
2996 for (r = 32; r < 64; ++r)
2997 rs6000_regno_regclass[r] = FLOAT_REGS;
2998
2999 for (r = 64; HARD_REGISTER_NUM_P (r); ++r)
3000 rs6000_regno_regclass[r] = NO_REGS;
3001
3002 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3003 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3004
3005 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3006 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3007 rs6000_regno_regclass[r] = CR_REGS;
3008
3009 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3010 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3011 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3012 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3013 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3014 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3015 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3016 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3017 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3018 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3019
3020 /* Precalculate register class to simpler reload register class. We don't
3021 need all of the register classes that are combinations of different
3022 classes, just the simple ones that have constraint letters. */
3023 for (c = 0; c < N_REG_CLASSES; c++)
3024 reg_class_to_reg_type[c] = NO_REG_TYPE;
3025
3026 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3027 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3028 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3029 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3030 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3031 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3032 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3033 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3034 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3035 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3036
3037 if (TARGET_VSX)
3038 {
3039 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3040 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3041 }
3042 else
3043 {
3044 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3045 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3046 }
3047
3048 /* Precalculate the valid memory formats as well as the vector information,
3049 this must be set up before the rs6000_hard_regno_nregs_internal calls
3050 below. */
3051 gcc_assert ((int)VECTOR_NONE == 0);
3052 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3053 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_mem));
3054
3055 gcc_assert ((int)CODE_FOR_nothing == 0);
3056 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3057
3058 gcc_assert ((int)NO_REGS == 0);
3059 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3060
3061 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3062 believes it can use native alignment or still uses 128-bit alignment. */
3063 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3064 {
3065 align64 = 64;
3066 align32 = 32;
3067 }
3068 else
3069 {
3070 align64 = 128;
3071 align32 = 128;
3072 }
3073
3074 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3075 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3076 if (TARGET_FLOAT128_TYPE)
3077 {
3078 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3079 rs6000_vector_align[KFmode] = 128;
3080
3081 if (FLOAT128_IEEE_P (TFmode))
3082 {
3083 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3084 rs6000_vector_align[TFmode] = 128;
3085 }
3086 }
3087
3088 /* V2DF mode, VSX only. */
3089 if (TARGET_VSX)
3090 {
3091 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3092 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3093 rs6000_vector_align[V2DFmode] = align64;
3094 }
3095
3096 /* V4SF mode, either VSX or Altivec. */
3097 if (TARGET_VSX)
3098 {
3099 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3100 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3101 rs6000_vector_align[V4SFmode] = align32;
3102 }
3103 else if (TARGET_ALTIVEC)
3104 {
3105 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3106 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3107 rs6000_vector_align[V4SFmode] = align32;
3108 }
3109
3110 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3111 and stores. */
3112 if (TARGET_ALTIVEC)
3113 {
3114 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3115 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3116 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3117 rs6000_vector_align[V4SImode] = align32;
3118 rs6000_vector_align[V8HImode] = align32;
3119 rs6000_vector_align[V16QImode] = align32;
3120
3121 if (TARGET_VSX)
3122 {
3123 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3124 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3125 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3126 }
3127 else
3128 {
3129 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3130 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3131 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3132 }
3133 }
3134
3135 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3136 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3137 if (TARGET_VSX)
3138 {
3139 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3140 rs6000_vector_unit[V2DImode]
3141 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3142 rs6000_vector_align[V2DImode] = align64;
3143
3144 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3145 rs6000_vector_unit[V1TImode]
3146 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3147 rs6000_vector_align[V1TImode] = 128;
3148 }
3149
3150 /* DFmode, see if we want to use the VSX unit. Memory is handled
3151 differently, so don't set rs6000_vector_mem. */
3152 if (TARGET_VSX)
3153 {
3154 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3155 rs6000_vector_align[DFmode] = 64;
3156 }
3157
3158 /* SFmode, see if we want to use the VSX unit. */
3159 if (TARGET_P8_VECTOR)
3160 {
3161 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3162 rs6000_vector_align[SFmode] = 32;
3163 }
3164
3165 /* Allow TImode in VSX register and set the VSX memory macros. */
3166 if (TARGET_VSX)
3167 {
3168 rs6000_vector_mem[TImode] = VECTOR_VSX;
3169 rs6000_vector_align[TImode] = align64;
3170 }
3171
3172 /* Register class constraints for the constraints that depend on compile
3173 switches. When the VSX code was added, different constraints were added
3174 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3175 of the VSX registers are used. The register classes for scalar floating
3176 point types is set, based on whether we allow that type into the upper
3177 (Altivec) registers. GCC has register classes to target the Altivec
3178 registers for load/store operations, to select using a VSX memory
3179 operation instead of the traditional floating point operation. The
3180 constraints are:
3181
3182 d - Register class to use with traditional DFmode instructions.
3183 f - Register class to use with traditional SFmode instructions.
3184 v - Altivec register.
3185 wa - Any VSX register.
3186 wc - Reserved to represent individual CR bits (used in LLVM).
3187 wd - Preferred register class for V2DFmode.
3188 wf - Preferred register class for V4SFmode.
3189 wg - Float register for power6x move insns.
3190 wh - FP register for direct move instructions.
3191 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3192 wj - FP or VSX register to hold 64-bit integers for direct moves.
3193 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3194 wl - Float register if we can do 32-bit signed int loads.
3195 wm - VSX register for ISA 2.07 direct move operations.
3196 wn - always NO_REGS.
3197 wr - GPR if 64-bit mode is permitted.
3198 ws - Register class to do ISA 2.06 DF operations.
3199 wt - VSX register for TImode in VSX registers.
3200 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3201 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3202 ww - Register class to do SF conversions in with VSX operations.
3203 wx - Float register if we can do 32-bit int stores.
3204 wy - Register class to do ISA 2.07 SF operations.
3205 wz - Float register if we can do 32-bit unsigned int loads.
3206 wH - Altivec register if SImode is allowed in VSX registers.
3207 wI - Float register if SImode is allowed in VSX registers.
3208 wJ - Float register if QImode/HImode are allowed in VSX registers.
3209 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3210
3211 if (TARGET_HARD_FLOAT)
3212 {
3213 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3214 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3215 }
3216
3217 if (TARGET_VSX)
3218 {
3219 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3220 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3221 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3222 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3223 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3224 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3225 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3226 }
3227
3228 /* Add conditional constraints based on various options, to allow us to
3229 collapse multiple insn patterns. */
3230 if (TARGET_ALTIVEC)
3231 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3232
3233 if (TARGET_MFPGPR) /* DFmode */
3234 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3235
3236 if (TARGET_LFIWAX)
3237 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3238
3239 if (TARGET_DIRECT_MOVE)
3240 {
3241 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3242 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3243 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3244 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3245 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3246 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3247 }
3248
3249 if (TARGET_POWERPC64)
3250 {
3251 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3252 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3253 }
3254
3255 if (TARGET_P8_VECTOR) /* SFmode */
3256 {
3257 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3258 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3259 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3260 }
3261 else if (TARGET_VSX)
3262 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3263
3264 if (TARGET_STFIWX)
3265 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3266
3267 if (TARGET_LFIWZX)
3268 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3269
3270 if (TARGET_FLOAT128_TYPE)
3271 {
3272 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3273 if (FLOAT128_IEEE_P (TFmode))
3274 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3275 }
3276
3277 if (TARGET_P9_VECTOR)
3278 {
3279 /* Support for new D-form instructions. */
3280 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3281
3282 /* Support for ISA 3.0 (power9) vectors. */
3283 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3284 }
3285
3286 /* Support for new direct moves (ISA 3.0 + 64bit). */
3287 if (TARGET_DIRECT_MOVE_128)
3288 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3289
3290 /* Support small integers in VSX registers. */
3291 if (TARGET_P8_VECTOR)
3292 {
3293 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3294 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3295 if (TARGET_P9_VECTOR)
3296 {
3297 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3298 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3299 }
3300 }
3301
3302 /* Set up the reload helper and direct move functions. */
3303 if (TARGET_VSX || TARGET_ALTIVEC)
3304 {
3305 if (TARGET_64BIT)
3306 {
3307 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3308 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3309 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3310 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3311 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3312 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3313 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3314 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3315 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3316 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3317 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3318 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3319 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3320 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3321 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3322 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3323 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3324 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3325 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3326 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3327
3328 if (FLOAT128_VECTOR_P (KFmode))
3329 {
3330 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3331 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3332 }
3333
3334 if (FLOAT128_VECTOR_P (TFmode))
3335 {
3336 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3337 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3338 }
3339
3340 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3341 available. */
3342 if (TARGET_NO_SDMODE_STACK)
3343 {
3344 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3345 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3346 }
3347
3348 if (TARGET_VSX)
3349 {
3350 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3351 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3352 }
3353
3354 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3355 {
3356 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3357 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3358 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3359 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3360 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3361 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3362 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3363 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3364 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3365
3366 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3367 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3368 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3369 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3370 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3371 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3372 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3373 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3374 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3375
3376 if (FLOAT128_VECTOR_P (KFmode))
3377 {
3378 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3379 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3380 }
3381
3382 if (FLOAT128_VECTOR_P (TFmode))
3383 {
3384 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3385 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3386 }
3387 }
3388 }
3389 else
3390 {
3391 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3392 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3393 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3394 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3395 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3396 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3397 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3398 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3399 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3400 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3401 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3402 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3403 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3404 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3405 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3406 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3407 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3408 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3409 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3410 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3411
3412 if (FLOAT128_VECTOR_P (KFmode))
3413 {
3414 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3415 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3416 }
3417
3418 if (FLOAT128_IEEE_P (TFmode))
3419 {
3420 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3421 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3422 }
3423
3424 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3425 available. */
3426 if (TARGET_NO_SDMODE_STACK)
3427 {
3428 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3429 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3430 }
3431
3432 if (TARGET_VSX)
3433 {
3434 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3435 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3436 }
3437
3438 if (TARGET_DIRECT_MOVE)
3439 {
3440 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3441 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3442 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3443 }
3444 }
3445
3446 reg_addr[DFmode].scalar_in_vmx_p = true;
3447 reg_addr[DImode].scalar_in_vmx_p = true;
3448
3449 if (TARGET_P8_VECTOR)
3450 {
3451 reg_addr[SFmode].scalar_in_vmx_p = true;
3452 reg_addr[SImode].scalar_in_vmx_p = true;
3453
3454 if (TARGET_P9_VECTOR)
3455 {
3456 reg_addr[HImode].scalar_in_vmx_p = true;
3457 reg_addr[QImode].scalar_in_vmx_p = true;
3458 }
3459 }
3460 }
3461
3462 /* Precalculate HARD_REGNO_NREGS. */
3463 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3464 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3465 rs6000_hard_regno_nregs[m][r]
3466 = rs6000_hard_regno_nregs_internal (r, (machine_mode) m);
3467
3468 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3469 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3470 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3471 rs6000_hard_regno_mode_ok_p[m][r]
3472 = rs6000_hard_regno_mode_ok_uncached (r, (machine_mode) m);
3473
3474 /* Precalculate CLASS_MAX_NREGS sizes. */
3475 for (c = 0; c < LIM_REG_CLASSES; ++c)
3476 {
3477 int reg_size;
3478
3479 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3480 reg_size = UNITS_PER_VSX_WORD;
3481
3482 else if (c == ALTIVEC_REGS)
3483 reg_size = UNITS_PER_ALTIVEC_WORD;
3484
3485 else if (c == FLOAT_REGS)
3486 reg_size = UNITS_PER_FP_WORD;
3487
3488 else
3489 reg_size = UNITS_PER_WORD;
3490
3491 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3492 {
3493 machine_mode m2 = (machine_mode)m;
3494 int reg_size2 = reg_size;
3495
3496 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3497 in VSX. */
3498 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3499 reg_size2 = UNITS_PER_FP_WORD;
3500
3501 rs6000_class_max_nregs[m][c]
3502 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3503 }
3504 }
3505
3506 /* Calculate which modes to automatically generate code to use a the
3507 reciprocal divide and square root instructions. In the future, possibly
3508 automatically generate the instructions even if the user did not specify
3509 -mrecip. The older machines double precision reciprocal sqrt estimate is
3510 not accurate enough. */
3511 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3512 if (TARGET_FRES)
3513 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3514 if (TARGET_FRE)
3515 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3516 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3517 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3518 if (VECTOR_UNIT_VSX_P (V2DFmode))
3519 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3520
3521 if (TARGET_FRSQRTES)
3522 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3523 if (TARGET_FRSQRTE)
3524 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3525 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3526 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3527 if (VECTOR_UNIT_VSX_P (V2DFmode))
3528 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3529
3530 if (rs6000_recip_control)
3531 {
3532 if (!flag_finite_math_only)
3533 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3534 "-ffast-math");
3535 if (flag_trapping_math)
3536 warning (0, "%qs requires %qs or %qs", "-mrecip",
3537 "-fno-trapping-math", "-ffast-math");
3538 if (!flag_reciprocal_math)
3539 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3540 "-ffast-math");
3541 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3542 {
3543 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3544 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3545 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3546
3547 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3548 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3549 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3550
3551 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3552 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3553 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3554
3555 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3556 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3557 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3558
3559 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3560 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3561 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3562
3563 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3564 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3565 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3566
3567 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3568 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3569 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3570
3571 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3572 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3573 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3574 }
3575 }
3576
3577 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3578 legitimate address support to figure out the appropriate addressing to
3579 use. */
3580 rs6000_setup_reg_addr_masks ();
3581
3582 if (global_init_p || TARGET_DEBUG_TARGET)
3583 {
3584 if (TARGET_DEBUG_REG)
3585 rs6000_debug_reg_global ();
3586
3587 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3588 fprintf (stderr,
3589 "SImode variable mult cost = %d\n"
3590 "SImode constant mult cost = %d\n"
3591 "SImode short constant mult cost = %d\n"
3592 "DImode multipliciation cost = %d\n"
3593 "SImode division cost = %d\n"
3594 "DImode division cost = %d\n"
3595 "Simple fp operation cost = %d\n"
3596 "DFmode multiplication cost = %d\n"
3597 "SFmode division cost = %d\n"
3598 "DFmode division cost = %d\n"
3599 "cache line size = %d\n"
3600 "l1 cache size = %d\n"
3601 "l2 cache size = %d\n"
3602 "simultaneous prefetches = %d\n"
3603 "\n",
3604 rs6000_cost->mulsi,
3605 rs6000_cost->mulsi_const,
3606 rs6000_cost->mulsi_const9,
3607 rs6000_cost->muldi,
3608 rs6000_cost->divsi,
3609 rs6000_cost->divdi,
3610 rs6000_cost->fp,
3611 rs6000_cost->dmul,
3612 rs6000_cost->sdiv,
3613 rs6000_cost->ddiv,
3614 rs6000_cost->cache_line_size,
3615 rs6000_cost->l1_cache_size,
3616 rs6000_cost->l2_cache_size,
3617 rs6000_cost->simultaneous_prefetches);
3618 }
3619 }
3620
3621 #if TARGET_MACHO
3622 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3623
3624 static void
3625 darwin_rs6000_override_options (void)
3626 {
3627 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3628 off. */
3629 rs6000_altivec_abi = 1;
3630 TARGET_ALTIVEC_VRSAVE = 1;
3631 rs6000_current_abi = ABI_DARWIN;
3632
3633 if (DEFAULT_ABI == ABI_DARWIN
3634 && TARGET_64BIT)
3635 darwin_one_byte_bool = 1;
3636
3637 if (TARGET_64BIT && ! TARGET_POWERPC64)
3638 {
3639 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3640 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3641 }
3642 if (flag_mkernel)
3643 {
3644 rs6000_default_long_calls = 1;
3645 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3646 }
3647
3648 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3649 Altivec. */
3650 if (!flag_mkernel && !flag_apple_kext
3651 && TARGET_64BIT
3652 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3653 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3654
3655 /* Unless the user (not the configurer) has explicitly overridden
3656 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3657 G4 unless targeting the kernel. */
3658 if (!flag_mkernel
3659 && !flag_apple_kext
3660 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3661 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3662 && ! global_options_set.x_rs6000_cpu_index)
3663 {
3664 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3665 }
3666 }
3667 #endif
3668
3669 /* If not otherwise specified by a target, make 'long double' equivalent to
3670 'double'. */
3671
3672 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3673 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3674 #endif
3675
3676 /* Return the builtin mask of the various options used that could affect which
3677 builtins were used. In the past we used target_flags, but we've run out of
3678 bits, and some options are no longer in target_flags. */
3679
3680 HOST_WIDE_INT
3681 rs6000_builtin_mask_calculate (void)
3682 {
3683 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3684 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3685 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3686 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3687 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3688 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3689 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3690 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3691 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3692 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3693 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3694 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3695 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3696 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3697 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3698 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3699 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3700 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3701 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3702 | ((TARGET_LONG_DOUBLE_128
3703 && TARGET_HARD_FLOAT
3704 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3705 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3706 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3707 }
3708
3709 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3710 to clobber the XER[CA] bit because clobbering that bit without telling
3711 the compiler worked just fine with versions of GCC before GCC 5, and
3712 breaking a lot of older code in ways that are hard to track down is
3713 not such a great idea. */
3714
3715 static rtx_insn *
3716 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3717 vec<const char *> &/*constraints*/,
3718 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3719 {
3720 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3721 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3722 return NULL;
3723 }
3724
3725 /* Override command line options.
3726
3727 Combine build-specific configuration information with options
3728 specified on the command line to set various state variables which
3729 influence code generation, optimization, and expansion of built-in
3730 functions. Assure that command-line configuration preferences are
3731 compatible with each other and with the build configuration; issue
3732 warnings while adjusting configuration or error messages while
3733 rejecting configuration.
3734
3735 Upon entry to this function:
3736
3737 This function is called once at the beginning of
3738 compilation, and then again at the start and end of compiling
3739 each section of code that has a different configuration, as
3740 indicated, for example, by adding the
3741
3742 __attribute__((__target__("cpu=power9")))
3743
3744 qualifier to a function definition or, for example, by bracketing
3745 code between
3746
3747 #pragma GCC target("altivec")
3748
3749 and
3750
3751 #pragma GCC reset_options
3752
3753 directives. Parameter global_init_p is true for the initial
3754 invocation, which initializes global variables, and false for all
3755 subsequent invocations.
3756
3757
3758 Various global state information is assumed to be valid. This
3759 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3760 default CPU specified at build configure time, TARGET_DEFAULT,
3761 representing the default set of option flags for the default
3762 target, and global_options_set.x_rs6000_isa_flags, representing
3763 which options were requested on the command line.
3764
3765 Upon return from this function:
3766
3767 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3768 was set by name on the command line. Additionally, if certain
3769 attributes are automatically enabled or disabled by this function
3770 in order to assure compatibility between options and
3771 configuration, the flags associated with those attributes are
3772 also set. By setting these "explicit bits", we avoid the risk
3773 that other code might accidentally overwrite these particular
3774 attributes with "default values".
3775
3776 The various bits of rs6000_isa_flags are set to indicate the
3777 target options that have been selected for the most current
3778 compilation efforts. This has the effect of also turning on the
3779 associated TARGET_XXX values since these are macros which are
3780 generally defined to test the corresponding bit of the
3781 rs6000_isa_flags variable.
3782
3783 The variable rs6000_builtin_mask is set to represent the target
3784 options for the most current compilation efforts, consistent with
3785 the current contents of rs6000_isa_flags. This variable controls
3786 expansion of built-in functions.
3787
3788 Various other global variables and fields of global structures
3789 (over 50 in all) are initialized to reflect the desired options
3790 for the most current compilation efforts. */
3791
3792 static bool
3793 rs6000_option_override_internal (bool global_init_p)
3794 {
3795 bool ret = true;
3796
3797 HOST_WIDE_INT set_masks;
3798 HOST_WIDE_INT ignore_masks;
3799 int cpu_index = -1;
3800 int tune_index;
3801 struct cl_target_option *main_target_opt
3802 = ((global_init_p || target_option_default_node == NULL)
3803 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3804
3805 /* Print defaults. */
3806 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3807 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3808
3809 /* Remember the explicit arguments. */
3810 if (global_init_p)
3811 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3812
3813 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3814 library functions, so warn about it. The flag may be useful for
3815 performance studies from time to time though, so don't disable it
3816 entirely. */
3817 if (global_options_set.x_rs6000_alignment_flags
3818 && rs6000_alignment_flags == MASK_ALIGN_POWER
3819 && DEFAULT_ABI == ABI_DARWIN
3820 && TARGET_64BIT)
3821 warning (0, "%qs is not supported for 64-bit Darwin;"
3822 " it is incompatible with the installed C and C++ libraries",
3823 "-malign-power");
3824
3825 /* Numerous experiment shows that IRA based loop pressure
3826 calculation works better for RTL loop invariant motion on targets
3827 with enough (>= 32) registers. It is an expensive optimization.
3828 So it is on only for peak performance. */
3829 if (optimize >= 3 && global_init_p
3830 && !global_options_set.x_flag_ira_loop_pressure)
3831 flag_ira_loop_pressure = 1;
3832
3833 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3834 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3835 options were already specified. */
3836 if (flag_sanitize & SANITIZE_USER_ADDRESS
3837 && !global_options_set.x_flag_asynchronous_unwind_tables)
3838 flag_asynchronous_unwind_tables = 1;
3839
3840 /* Set the pointer size. */
3841 if (TARGET_64BIT)
3842 {
3843 rs6000_pmode = DImode;
3844 rs6000_pointer_size = 64;
3845 }
3846 else
3847 {
3848 rs6000_pmode = SImode;
3849 rs6000_pointer_size = 32;
3850 }
3851
3852 /* Some OSs don't support saving the high part of 64-bit registers on context
3853 switch. Other OSs don't support saving Altivec registers. On those OSs,
3854 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3855 if the user wants either, the user must explicitly specify them and we
3856 won't interfere with the user's specification. */
3857
3858 set_masks = POWERPC_MASKS;
3859 #ifdef OS_MISSING_POWERPC64
3860 if (OS_MISSING_POWERPC64)
3861 set_masks &= ~OPTION_MASK_POWERPC64;
3862 #endif
3863 #ifdef OS_MISSING_ALTIVEC
3864 if (OS_MISSING_ALTIVEC)
3865 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3866 | OTHER_VSX_VECTOR_MASKS);
3867 #endif
3868
3869 /* Don't override by the processor default if given explicitly. */
3870 set_masks &= ~rs6000_isa_flags_explicit;
3871
3872 if (global_init_p && rs6000_dejagnu_cpu_index >= 0)
3873 rs6000_cpu_index = rs6000_dejagnu_cpu_index;
3874
3875 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3876 the cpu in a target attribute or pragma, but did not specify a tuning
3877 option, use the cpu for the tuning option rather than the option specified
3878 with -mtune on the command line. Process a '--with-cpu' configuration
3879 request as an implicit --cpu. */
3880 if (rs6000_cpu_index >= 0)
3881 cpu_index = rs6000_cpu_index;
3882 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3883 cpu_index = main_target_opt->x_rs6000_cpu_index;
3884 else if (OPTION_TARGET_CPU_DEFAULT)
3885 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3886
3887 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3888 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3889 with those from the cpu, except for options that were explicitly set. If
3890 we don't have a cpu, do not override the target bits set in
3891 TARGET_DEFAULT. */
3892 if (cpu_index >= 0)
3893 {
3894 rs6000_cpu_index = cpu_index;
3895 rs6000_isa_flags &= ~set_masks;
3896 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3897 & set_masks);
3898 }
3899 else
3900 {
3901 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3902 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3903 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3904 to using rs6000_isa_flags, we need to do the initialization here.
3905
3906 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3907 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3908 HOST_WIDE_INT flags;
3909 if (TARGET_DEFAULT)
3910 flags = TARGET_DEFAULT;
3911 else
3912 {
3913 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3914 const char *default_cpu = (!TARGET_POWERPC64
3915 ? "powerpc"
3916 : (BYTES_BIG_ENDIAN
3917 ? "powerpc64"
3918 : "powerpc64le"));
3919 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3920 flags = processor_target_table[default_cpu_index].target_enable;
3921 }
3922 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3923 }
3924
3925 if (rs6000_tune_index >= 0)
3926 tune_index = rs6000_tune_index;
3927 else if (cpu_index >= 0)
3928 rs6000_tune_index = tune_index = cpu_index;
3929 else
3930 {
3931 size_t i;
3932 enum processor_type tune_proc
3933 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3934
3935 tune_index = -1;
3936 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3937 if (processor_target_table[i].processor == tune_proc)
3938 {
3939 tune_index = i;
3940 break;
3941 }
3942 }
3943
3944 if (cpu_index >= 0)
3945 rs6000_cpu = processor_target_table[cpu_index].processor;
3946 else
3947 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3948
3949 gcc_assert (tune_index >= 0);
3950 rs6000_tune = processor_target_table[tune_index].processor;
3951
3952 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3953 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3954 || rs6000_cpu == PROCESSOR_PPCE5500)
3955 {
3956 if (TARGET_ALTIVEC)
3957 error ("AltiVec not supported in this target");
3958 }
3959
3960 /* If we are optimizing big endian systems for space, use the load/store
3961 multiple instructions. */
3962 if (BYTES_BIG_ENDIAN && optimize_size)
3963 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3964
3965 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3966 because the hardware doesn't support the instructions used in little
3967 endian mode, and causes an alignment trap. The 750 does not cause an
3968 alignment trap (except when the target is unaligned). */
3969
3970 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3971 {
3972 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3973 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3974 warning (0, "%qs is not supported on little endian systems",
3975 "-mmultiple");
3976 }
3977
3978 /* If little-endian, default to -mstrict-align on older processors.
3979 Testing for htm matches power8 and later. */
3980 if (!BYTES_BIG_ENDIAN
3981 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3982 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3983
3984 if (!rs6000_fold_gimple)
3985 fprintf (stderr,
3986 "gimple folding of rs6000 builtins has been disabled.\n");
3987
3988 /* Add some warnings for VSX. */
3989 if (TARGET_VSX)
3990 {
3991 const char *msg = NULL;
3992 if (!TARGET_HARD_FLOAT)
3993 {
3994 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3995 msg = N_("%<-mvsx%> requires hardware floating point");
3996 else
3997 {
3998 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3999 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4000 }
4001 }
4002 else if (TARGET_AVOID_XFORM > 0)
4003 msg = N_("%<-mvsx%> needs indexed addressing");
4004 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4005 & OPTION_MASK_ALTIVEC))
4006 {
4007 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4008 msg = N_("%<-mvsx%> and %<-mno-altivec%> are incompatible");
4009 else
4010 msg = N_("%<-mno-altivec%> disables vsx");
4011 }
4012
4013 if (msg)
4014 {
4015 warning (0, msg);
4016 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4017 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4018 }
4019 }
4020
4021 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4022 the -mcpu setting to enable options that conflict. */
4023 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4024 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4025 | OPTION_MASK_ALTIVEC
4026 | OPTION_MASK_VSX)) != 0)
4027 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4028 | OPTION_MASK_DIRECT_MOVE)
4029 & ~rs6000_isa_flags_explicit);
4030
4031 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4032 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4033
4034 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4035 off all of the options that depend on those flags. */
4036 ignore_masks = rs6000_disable_incompatible_switches ();
4037
4038 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4039 unless the user explicitly used the -mno-<option> to disable the code. */
4040 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4041 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4042 else if (TARGET_P9_MINMAX)
4043 {
4044 if (cpu_index >= 0)
4045 {
4046 if (cpu_index == PROCESSOR_POWER9)
4047 {
4048 /* legacy behavior: allow -mcpu=power9 with certain
4049 capabilities explicitly disabled. */
4050 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4051 }
4052 else
4053 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4054 "for <xxx> less than power9", "-mcpu");
4055 }
4056 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4057 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4058 & rs6000_isa_flags_explicit))
4059 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4060 were explicitly cleared. */
4061 error ("%qs incompatible with explicitly disabled options",
4062 "-mpower9-minmax");
4063 else
4064 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4065 }
4066 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4067 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4068 else if (TARGET_VSX)
4069 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4070 else if (TARGET_POPCNTD)
4071 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4072 else if (TARGET_DFP)
4073 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4074 else if (TARGET_CMPB)
4075 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4076 else if (TARGET_FPRND)
4077 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4078 else if (TARGET_POPCNTB)
4079 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4080 else if (TARGET_ALTIVEC)
4081 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4082
4083 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4084 {
4085 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4086 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4087 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4088 }
4089
4090 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4091 {
4092 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4093 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4094 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4095 }
4096
4097 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4098 {
4099 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4100 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4101 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4102 }
4103
4104 if (TARGET_P8_VECTOR && !TARGET_VSX)
4105 {
4106 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4107 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4108 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4109 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4110 {
4111 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4112 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4113 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4114 }
4115 else
4116 {
4117 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4118 not explicit. */
4119 rs6000_isa_flags |= OPTION_MASK_VSX;
4120 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4121 }
4122 }
4123
4124 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4125 {
4126 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4127 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4128 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4129 }
4130
4131 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4132 silently turn off quad memory mode. */
4133 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4134 {
4135 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4136 warning (0, N_("%<-mquad-memory%> requires 64-bit mode"));
4137
4138 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4139 warning (0, N_("%<-mquad-memory-atomic%> requires 64-bit mode"));
4140
4141 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4142 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4143 }
4144
4145 /* Non-atomic quad memory load/store are disabled for little endian, since
4146 the words are reversed, but atomic operations can still be done by
4147 swapping the words. */
4148 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4149 {
4150 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4151 warning (0, N_("%<-mquad-memory%> is not available in little endian "
4152 "mode"));
4153
4154 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4155 }
4156
4157 /* Assume if the user asked for normal quad memory instructions, they want
4158 the atomic versions as well, unless they explicity told us not to use quad
4159 word atomic instructions. */
4160 if (TARGET_QUAD_MEMORY
4161 && !TARGET_QUAD_MEMORY_ATOMIC
4162 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4163 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4164
4165 /* If we can shrink-wrap the TOC register save separately, then use
4166 -msave-toc-indirect unless explicitly disabled. */
4167 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4168 && flag_shrink_wrap_separate
4169 && optimize_function_for_speed_p (cfun))
4170 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4171
4172 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4173 generating power8 instructions. Power9 does not optimize power8 fusion
4174 cases. */
4175 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4176 {
4177 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
4178 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4179 else
4180 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4181 }
4182
4183 /* Setting additional fusion flags turns on base fusion. */
4184 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4185 {
4186 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4187 {
4188 if (TARGET_P8_FUSION_SIGN)
4189 error ("%qs requires %qs", "-mpower8-fusion-sign",
4190 "-mpower8-fusion");
4191
4192 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4193 }
4194 else
4195 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4196 }
4197
4198 /* Power8 does not fuse sign extended loads with the addis. If we are
4199 optimizing at high levels for speed, convert a sign extended load into a
4200 zero extending load, and an explicit sign extension. */
4201 if (TARGET_P8_FUSION
4202 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4203 && optimize_function_for_speed_p (cfun)
4204 && optimize >= 3)
4205 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4206
4207 /* ISA 3.0 vector instructions include ISA 2.07. */
4208 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4209 {
4210 /* We prefer to not mention undocumented options in
4211 error messages. However, if users have managed to select
4212 power9-vector without selecting power8-vector, they
4213 already know about undocumented flags. */
4214 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4215 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4216 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4217 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4218 {
4219 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4220 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4221 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4222 }
4223 else
4224 {
4225 /* OPTION_MASK_P9_VECTOR is explicit and
4226 OPTION_MASK_P8_VECTOR is not explicit. */
4227 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4228 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4229 }
4230 }
4231
4232 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4233 support. If we only have ISA 2.06 support, and the user did not specify
4234 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4235 but we don't enable the full vectorization support */
4236 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4237 TARGET_ALLOW_MOVMISALIGN = 1;
4238
4239 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4240 {
4241 if (TARGET_ALLOW_MOVMISALIGN > 0
4242 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4243 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4244
4245 TARGET_ALLOW_MOVMISALIGN = 0;
4246 }
4247
4248 /* Determine when unaligned vector accesses are permitted, and when
4249 they are preferred over masked Altivec loads. Note that if
4250 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4251 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4252 not true. */
4253 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4254 {
4255 if (!TARGET_VSX)
4256 {
4257 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4258 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4259
4260 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4261 }
4262
4263 else if (!TARGET_ALLOW_MOVMISALIGN)
4264 {
4265 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4266 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4267 "-mallow-movmisalign");
4268
4269 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4270 }
4271 }
4272
4273 /* Use long double size to select the appropriate long double. We use
4274 TYPE_PRECISION to differentiate the 3 different long double types. We map
4275 128 into the precision used for TFmode. */
4276 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4277 ? 64
4278 : FLOAT_PRECISION_TFmode);
4279
4280 /* Set long double size before the IEEE 128-bit tests. */
4281 if (!global_options_set.x_rs6000_long_double_type_size)
4282 {
4283 if (main_target_opt != NULL
4284 && (main_target_opt->x_rs6000_long_double_type_size
4285 != default_long_double_size))
4286 error ("target attribute or pragma changes long double size");
4287 else
4288 rs6000_long_double_type_size = default_long_double_size;
4289 }
4290 else if (rs6000_long_double_type_size == 128)
4291 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4292 else if (global_options_set.x_rs6000_ieeequad)
4293 {
4294 if (global_options.x_rs6000_ieeequad)
4295 error ("%qs requires %qs", "-mabi=ieeelongdouble", "-mlong-double-128");
4296 else
4297 error ("%qs requires %qs", "-mabi=ibmlongdouble", "-mlong-double-128");
4298 }
4299
4300 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4301 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4302 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4303 those systems will not pick up this default. Warn if the user changes the
4304 default unless -Wno-psabi. */
4305 if (!global_options_set.x_rs6000_ieeequad)
4306 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4307
4308 else
4309 {
4310 if (global_options.x_rs6000_ieeequad
4311 && (!TARGET_POPCNTD || !TARGET_VSX))
4312 error ("%qs requires full ISA 2.06 support", "-mabi=ieeelongdouble");
4313
4314 if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4315 {
4316 static bool warned_change_long_double;
4317 if (!warned_change_long_double)
4318 {
4319 warned_change_long_double = true;
4320 if (TARGET_IEEEQUAD)
4321 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4322 else
4323 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4324 }
4325 }
4326 }
4327
4328 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4329 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4330 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4331 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4332 the keyword as well as the type. */
4333 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4334
4335 /* IEEE 128-bit floating point requires VSX support. */
4336 if (TARGET_FLOAT128_KEYWORD)
4337 {
4338 if (!TARGET_VSX)
4339 {
4340 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4341 error ("%qs requires VSX support", "%<-mfloat128%>");
4342
4343 TARGET_FLOAT128_TYPE = 0;
4344 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4345 | OPTION_MASK_FLOAT128_HW);
4346 }
4347 else if (!TARGET_FLOAT128_TYPE)
4348 {
4349 TARGET_FLOAT128_TYPE = 1;
4350 warning (0, "The %<-mfloat128%> option may not be fully supported");
4351 }
4352 }
4353
4354 /* Enable the __float128 keyword under Linux by default. */
4355 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4356 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4357 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4358
4359 /* If we have are supporting the float128 type and full ISA 3.0 support,
4360 enable -mfloat128-hardware by default. However, don't enable the
4361 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4362 because sometimes the compiler wants to put things in an integer
4363 container, and if we don't have __int128 support, it is impossible. */
4364 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4365 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4366 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4367 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4368
4369 if (TARGET_FLOAT128_HW
4370 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4371 {
4372 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4373 error ("%qs requires full ISA 3.0 support", "%<-mfloat128-hardware%>");
4374
4375 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4376 }
4377
4378 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4379 {
4380 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4381 error ("%qs requires %qs", "%<-mfloat128-hardware%>", "-m64");
4382
4383 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4384 }
4385
4386 /* Print the options after updating the defaults. */
4387 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4388 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4389
4390 /* E500mc does "better" if we inline more aggressively. Respect the
4391 user's opinion, though. */
4392 if (rs6000_block_move_inline_limit == 0
4393 && (rs6000_tune == PROCESSOR_PPCE500MC
4394 || rs6000_tune == PROCESSOR_PPCE500MC64
4395 || rs6000_tune == PROCESSOR_PPCE5500
4396 || rs6000_tune == PROCESSOR_PPCE6500))
4397 rs6000_block_move_inline_limit = 128;
4398
4399 /* store_one_arg depends on expand_block_move to handle at least the
4400 size of reg_parm_stack_space. */
4401 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4402 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4403
4404 if (global_init_p)
4405 {
4406 /* If the appropriate debug option is enabled, replace the target hooks
4407 with debug versions that call the real version and then prints
4408 debugging information. */
4409 if (TARGET_DEBUG_COST)
4410 {
4411 targetm.rtx_costs = rs6000_debug_rtx_costs;
4412 targetm.address_cost = rs6000_debug_address_cost;
4413 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4414 }
4415
4416 if (TARGET_DEBUG_ADDR)
4417 {
4418 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4419 targetm.legitimize_address = rs6000_debug_legitimize_address;
4420 rs6000_secondary_reload_class_ptr
4421 = rs6000_debug_secondary_reload_class;
4422 targetm.secondary_memory_needed
4423 = rs6000_debug_secondary_memory_needed;
4424 targetm.can_change_mode_class
4425 = rs6000_debug_can_change_mode_class;
4426 rs6000_preferred_reload_class_ptr
4427 = rs6000_debug_preferred_reload_class;
4428 rs6000_legitimize_reload_address_ptr
4429 = rs6000_debug_legitimize_reload_address;
4430 rs6000_mode_dependent_address_ptr
4431 = rs6000_debug_mode_dependent_address;
4432 }
4433
4434 if (rs6000_veclibabi_name)
4435 {
4436 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4437 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4438 else
4439 {
4440 error ("unknown vectorization library ABI type (%qs) for "
4441 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4442 ret = false;
4443 }
4444 }
4445 }
4446
4447 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4448 target attribute or pragma which automatically enables both options,
4449 unless the altivec ABI was set. This is set by default for 64-bit, but
4450 not for 32-bit. */
4451 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4452 {
4453 TARGET_FLOAT128_TYPE = 0;
4454 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4455 | OPTION_MASK_FLOAT128_KEYWORD)
4456 & ~rs6000_isa_flags_explicit);
4457 }
4458
4459 /* Enable Altivec ABI for AIX -maltivec. */
4460 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4461 {
4462 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4463 error ("target attribute or pragma changes AltiVec ABI");
4464 else
4465 rs6000_altivec_abi = 1;
4466 }
4467
4468 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4469 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4470 be explicitly overridden in either case. */
4471 if (TARGET_ELF)
4472 {
4473 if (!global_options_set.x_rs6000_altivec_abi
4474 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4475 {
4476 if (main_target_opt != NULL &&
4477 !main_target_opt->x_rs6000_altivec_abi)
4478 error ("target attribute or pragma changes AltiVec ABI");
4479 else
4480 rs6000_altivec_abi = 1;
4481 }
4482 }
4483
4484 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4485 So far, the only darwin64 targets are also MACH-O. */
4486 if (TARGET_MACHO
4487 && DEFAULT_ABI == ABI_DARWIN
4488 && TARGET_64BIT)
4489 {
4490 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4491 error ("target attribute or pragma changes darwin64 ABI");
4492 else
4493 {
4494 rs6000_darwin64_abi = 1;
4495 /* Default to natural alignment, for better performance. */
4496 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4497 }
4498 }
4499
4500 /* Place FP constants in the constant pool instead of TOC
4501 if section anchors enabled. */
4502 if (flag_section_anchors
4503 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4504 TARGET_NO_FP_IN_TOC = 1;
4505
4506 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4507 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4508
4509 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4510 SUBTARGET_OVERRIDE_OPTIONS;
4511 #endif
4512 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4513 SUBSUBTARGET_OVERRIDE_OPTIONS;
4514 #endif
4515 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4516 SUB3TARGET_OVERRIDE_OPTIONS;
4517 #endif
4518
4519 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4520 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4521
4522 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4523 && rs6000_tune != PROCESSOR_POWER5
4524 && rs6000_tune != PROCESSOR_POWER6
4525 && rs6000_tune != PROCESSOR_POWER7
4526 && rs6000_tune != PROCESSOR_POWER8
4527 && rs6000_tune != PROCESSOR_POWER9
4528 && rs6000_tune != PROCESSOR_PPCA2
4529 && rs6000_tune != PROCESSOR_CELL
4530 && rs6000_tune != PROCESSOR_PPC476);
4531 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4532 || rs6000_tune == PROCESSOR_POWER5
4533 || rs6000_tune == PROCESSOR_POWER7
4534 || rs6000_tune == PROCESSOR_POWER8);
4535 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4536 || rs6000_tune == PROCESSOR_POWER5
4537 || rs6000_tune == PROCESSOR_POWER6
4538 || rs6000_tune == PROCESSOR_POWER7
4539 || rs6000_tune == PROCESSOR_POWER8
4540 || rs6000_tune == PROCESSOR_POWER9
4541 || rs6000_tune == PROCESSOR_PPCE500MC
4542 || rs6000_tune == PROCESSOR_PPCE500MC64
4543 || rs6000_tune == PROCESSOR_PPCE5500
4544 || rs6000_tune == PROCESSOR_PPCE6500);
4545
4546 /* Allow debug switches to override the above settings. These are set to -1
4547 in rs6000.opt to indicate the user hasn't directly set the switch. */
4548 if (TARGET_ALWAYS_HINT >= 0)
4549 rs6000_always_hint = TARGET_ALWAYS_HINT;
4550
4551 if (TARGET_SCHED_GROUPS >= 0)
4552 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4553
4554 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4555 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4556
4557 rs6000_sched_restricted_insns_priority
4558 = (rs6000_sched_groups ? 1 : 0);
4559
4560 /* Handle -msched-costly-dep option. */
4561 rs6000_sched_costly_dep
4562 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4563
4564 if (rs6000_sched_costly_dep_str)
4565 {
4566 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4567 rs6000_sched_costly_dep = no_dep_costly;
4568 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4569 rs6000_sched_costly_dep = all_deps_costly;
4570 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4571 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4572 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4573 rs6000_sched_costly_dep = store_to_load_dep_costly;
4574 else
4575 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4576 atoi (rs6000_sched_costly_dep_str));
4577 }
4578
4579 /* Handle -minsert-sched-nops option. */
4580 rs6000_sched_insert_nops
4581 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4582
4583 if (rs6000_sched_insert_nops_str)
4584 {
4585 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4586 rs6000_sched_insert_nops = sched_finish_none;
4587 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4588 rs6000_sched_insert_nops = sched_finish_pad_groups;
4589 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4590 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4591 else
4592 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4593 atoi (rs6000_sched_insert_nops_str));
4594 }
4595
4596 /* Handle stack protector */
4597 if (!global_options_set.x_rs6000_stack_protector_guard)
4598 #ifdef TARGET_THREAD_SSP_OFFSET
4599 rs6000_stack_protector_guard = SSP_TLS;
4600 #else
4601 rs6000_stack_protector_guard = SSP_GLOBAL;
4602 #endif
4603
4604 #ifdef TARGET_THREAD_SSP_OFFSET
4605 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4606 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4607 #endif
4608
4609 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4610 {
4611 char *endp;
4612 const char *str = rs6000_stack_protector_guard_offset_str;
4613
4614 errno = 0;
4615 long offset = strtol (str, &endp, 0);
4616 if (!*str || *endp || errno)
4617 error ("%qs is not a valid number in %qs", str,
4618 "-mstack-protector-guard-offset=");
4619
4620 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4621 || (TARGET_64BIT && (offset & 3)))
4622 error ("%qs is not a valid offset in %qs", str,
4623 "-mstack-protector-guard-offset=");
4624
4625 rs6000_stack_protector_guard_offset = offset;
4626 }
4627
4628 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4629 {
4630 const char *str = rs6000_stack_protector_guard_reg_str;
4631 int reg = decode_reg_name (str);
4632
4633 if (!IN_RANGE (reg, 1, 31))
4634 error ("%qs is not a valid base register in %qs", str,
4635 "-mstack-protector-guard-reg=");
4636
4637 rs6000_stack_protector_guard_reg = reg;
4638 }
4639
4640 if (rs6000_stack_protector_guard == SSP_TLS
4641 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4642 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4643
4644 if (global_init_p)
4645 {
4646 #ifdef TARGET_REGNAMES
4647 /* If the user desires alternate register names, copy in the
4648 alternate names now. */
4649 if (TARGET_REGNAMES)
4650 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4651 #endif
4652
4653 /* Set aix_struct_return last, after the ABI is determined.
4654 If -maix-struct-return or -msvr4-struct-return was explicitly
4655 used, don't override with the ABI default. */
4656 if (!global_options_set.x_aix_struct_return)
4657 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4658
4659 #if 0
4660 /* IBM XL compiler defaults to unsigned bitfields. */
4661 if (TARGET_XL_COMPAT)
4662 flag_signed_bitfields = 0;
4663 #endif
4664
4665 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4666 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4667
4668 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4669
4670 /* We can only guarantee the availability of DI pseudo-ops when
4671 assembling for 64-bit targets. */
4672 if (!TARGET_64BIT)
4673 {
4674 targetm.asm_out.aligned_op.di = NULL;
4675 targetm.asm_out.unaligned_op.di = NULL;
4676 }
4677
4678
4679 /* Set branch target alignment, if not optimizing for size. */
4680 if (!optimize_size)
4681 {
4682 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4683 aligned 8byte to avoid misprediction by the branch predictor. */
4684 if (rs6000_tune == PROCESSOR_TITAN
4685 || rs6000_tune == PROCESSOR_CELL)
4686 {
4687 if (flag_align_functions && !str_align_functions)
4688 str_align_functions = "8";
4689 if (flag_align_jumps && !str_align_jumps)
4690 str_align_jumps = "8";
4691 if (flag_align_loops && !str_align_loops)
4692 str_align_loops = "8";
4693 }
4694 if (rs6000_align_branch_targets)
4695 {
4696 if (flag_align_functions && !str_align_functions)
4697 str_align_functions = "16";
4698 if (flag_align_jumps && !str_align_jumps)
4699 str_align_jumps = "16";
4700 if (flag_align_loops && !str_align_loops)
4701 {
4702 can_override_loop_align = 1;
4703 str_align_loops = "16";
4704 }
4705 }
4706
4707 if (flag_align_jumps && !str_align_jumps)
4708 str_align_jumps = "16";
4709 if (flag_align_loops && !str_align_loops)
4710 str_align_loops = "16";
4711 }
4712
4713 /* Arrange to save and restore machine status around nested functions. */
4714 init_machine_status = rs6000_init_machine_status;
4715
4716 /* We should always be splitting complex arguments, but we can't break
4717 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4718 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4719 targetm.calls.split_complex_arg = NULL;
4720
4721 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4722 if (DEFAULT_ABI == ABI_AIX)
4723 targetm.calls.custom_function_descriptors = 0;
4724 }
4725
4726 /* Initialize rs6000_cost with the appropriate target costs. */
4727 if (optimize_size)
4728 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4729 else
4730 switch (rs6000_tune)
4731 {
4732 case PROCESSOR_RS64A:
4733 rs6000_cost = &rs64a_cost;
4734 break;
4735
4736 case PROCESSOR_MPCCORE:
4737 rs6000_cost = &mpccore_cost;
4738 break;
4739
4740 case PROCESSOR_PPC403:
4741 rs6000_cost = &ppc403_cost;
4742 break;
4743
4744 case PROCESSOR_PPC405:
4745 rs6000_cost = &ppc405_cost;
4746 break;
4747
4748 case PROCESSOR_PPC440:
4749 rs6000_cost = &ppc440_cost;
4750 break;
4751
4752 case PROCESSOR_PPC476:
4753 rs6000_cost = &ppc476_cost;
4754 break;
4755
4756 case PROCESSOR_PPC601:
4757 rs6000_cost = &ppc601_cost;
4758 break;
4759
4760 case PROCESSOR_PPC603:
4761 rs6000_cost = &ppc603_cost;
4762 break;
4763
4764 case PROCESSOR_PPC604:
4765 rs6000_cost = &ppc604_cost;
4766 break;
4767
4768 case PROCESSOR_PPC604e:
4769 rs6000_cost = &ppc604e_cost;
4770 break;
4771
4772 case PROCESSOR_PPC620:
4773 rs6000_cost = &ppc620_cost;
4774 break;
4775
4776 case PROCESSOR_PPC630:
4777 rs6000_cost = &ppc630_cost;
4778 break;
4779
4780 case PROCESSOR_CELL:
4781 rs6000_cost = &ppccell_cost;
4782 break;
4783
4784 case PROCESSOR_PPC750:
4785 case PROCESSOR_PPC7400:
4786 rs6000_cost = &ppc750_cost;
4787 break;
4788
4789 case PROCESSOR_PPC7450:
4790 rs6000_cost = &ppc7450_cost;
4791 break;
4792
4793 case PROCESSOR_PPC8540:
4794 case PROCESSOR_PPC8548:
4795 rs6000_cost = &ppc8540_cost;
4796 break;
4797
4798 case PROCESSOR_PPCE300C2:
4799 case PROCESSOR_PPCE300C3:
4800 rs6000_cost = &ppce300c2c3_cost;
4801 break;
4802
4803 case PROCESSOR_PPCE500MC:
4804 rs6000_cost = &ppce500mc_cost;
4805 break;
4806
4807 case PROCESSOR_PPCE500MC64:
4808 rs6000_cost = &ppce500mc64_cost;
4809 break;
4810
4811 case PROCESSOR_PPCE5500:
4812 rs6000_cost = &ppce5500_cost;
4813 break;
4814
4815 case PROCESSOR_PPCE6500:
4816 rs6000_cost = &ppce6500_cost;
4817 break;
4818
4819 case PROCESSOR_TITAN:
4820 rs6000_cost = &titan_cost;
4821 break;
4822
4823 case PROCESSOR_POWER4:
4824 case PROCESSOR_POWER5:
4825 rs6000_cost = &power4_cost;
4826 break;
4827
4828 case PROCESSOR_POWER6:
4829 rs6000_cost = &power6_cost;
4830 break;
4831
4832 case PROCESSOR_POWER7:
4833 rs6000_cost = &power7_cost;
4834 break;
4835
4836 case PROCESSOR_POWER8:
4837 rs6000_cost = &power8_cost;
4838 break;
4839
4840 case PROCESSOR_POWER9:
4841 rs6000_cost = &power9_cost;
4842 break;
4843
4844 case PROCESSOR_PPCA2:
4845 rs6000_cost = &ppca2_cost;
4846 break;
4847
4848 default:
4849 gcc_unreachable ();
4850 }
4851
4852 if (global_init_p)
4853 {
4854 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4855 rs6000_cost->simultaneous_prefetches,
4856 global_options.x_param_values,
4857 global_options_set.x_param_values);
4858 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4859 global_options.x_param_values,
4860 global_options_set.x_param_values);
4861 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4862 rs6000_cost->cache_line_size,
4863 global_options.x_param_values,
4864 global_options_set.x_param_values);
4865 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4866 global_options.x_param_values,
4867 global_options_set.x_param_values);
4868
4869 /* Increase loop peeling limits based on performance analysis. */
4870 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4871 global_options.x_param_values,
4872 global_options_set.x_param_values);
4873 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4874 global_options.x_param_values,
4875 global_options_set.x_param_values);
4876
4877 /* Use the 'model' -fsched-pressure algorithm by default. */
4878 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4879 SCHED_PRESSURE_MODEL,
4880 global_options.x_param_values,
4881 global_options_set.x_param_values);
4882
4883 /* If using typedef char *va_list, signal that
4884 __builtin_va_start (&ap, 0) can be optimized to
4885 ap = __builtin_next_arg (0). */
4886 if (DEFAULT_ABI != ABI_V4)
4887 targetm.expand_builtin_va_start = NULL;
4888 }
4889
4890 /* If not explicitly specified via option, decide whether to generate indexed
4891 load/store instructions. A value of -1 indicates that the
4892 initial value of this variable has not been overwritten. During
4893 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4894 if (TARGET_AVOID_XFORM == -1)
4895 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4896 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4897 need indexed accesses and the type used is the scalar type of the element
4898 being loaded or stored. */
4899 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4900 && !TARGET_ALTIVEC);
4901
4902 /* Set the -mrecip options. */
4903 if (rs6000_recip_name)
4904 {
4905 char *p = ASTRDUP (rs6000_recip_name);
4906 char *q;
4907 unsigned int mask, i;
4908 bool invert;
4909
4910 while ((q = strtok (p, ",")) != NULL)
4911 {
4912 p = NULL;
4913 if (*q == '!')
4914 {
4915 invert = true;
4916 q++;
4917 }
4918 else
4919 invert = false;
4920
4921 if (!strcmp (q, "default"))
4922 mask = ((TARGET_RECIP_PRECISION)
4923 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4924 else
4925 {
4926 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4927 if (!strcmp (q, recip_options[i].string))
4928 {
4929 mask = recip_options[i].mask;
4930 break;
4931 }
4932
4933 if (i == ARRAY_SIZE (recip_options))
4934 {
4935 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4936 invert = false;
4937 mask = 0;
4938 ret = false;
4939 }
4940 }
4941
4942 if (invert)
4943 rs6000_recip_control &= ~mask;
4944 else
4945 rs6000_recip_control |= mask;
4946 }
4947 }
4948
4949 /* Set the builtin mask of the various options used that could affect which
4950 builtins were used. In the past we used target_flags, but we've run out
4951 of bits, and some options are no longer in target_flags. */
4952 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4953 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4954 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4955 rs6000_builtin_mask);
4956
4957 /* Initialize all of the registers. */
4958 rs6000_init_hard_regno_mode_ok (global_init_p);
4959
4960 /* Save the initial options in case the user does function specific options */
4961 if (global_init_p)
4962 target_option_default_node = target_option_current_node
4963 = build_target_option_node (&global_options);
4964
4965 /* If not explicitly specified via option, decide whether to generate the
4966 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4967 if (TARGET_LINK_STACK == -1)
4968 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4969
4970 /* Deprecate use of -mno-speculate-indirect-jumps. */
4971 if (!rs6000_speculate_indirect_jumps)
4972 warning (0, "%qs is deprecated and not recommended in any circumstances",
4973 "-mno-speculate-indirect-jumps");
4974
4975 return ret;
4976 }
4977
4978 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4979 define the target cpu type. */
4980
4981 static void
4982 rs6000_option_override (void)
4983 {
4984 (void) rs6000_option_override_internal (true);
4985 }
4986
4987 \f
4988 /* Implement targetm.vectorize.builtin_mask_for_load. */
4989 static tree
4990 rs6000_builtin_mask_for_load (void)
4991 {
4992 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4993 if ((TARGET_ALTIVEC && !TARGET_VSX)
4994 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4995 return altivec_builtin_mask_for_load;
4996 else
4997 return 0;
4998 }
4999
5000 /* Implement LOOP_ALIGN. */
5001 align_flags
5002 rs6000_loop_align (rtx label)
5003 {
5004 basic_block bb;
5005 int ninsns;
5006
5007 /* Don't override loop alignment if -falign-loops was specified. */
5008 if (!can_override_loop_align)
5009 return align_loops;
5010
5011 bb = BLOCK_FOR_INSN (label);
5012 ninsns = num_loop_insns(bb->loop_father);
5013
5014 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5015 if (ninsns > 4 && ninsns <= 8
5016 && (rs6000_tune == PROCESSOR_POWER4
5017 || rs6000_tune == PROCESSOR_POWER5
5018 || rs6000_tune == PROCESSOR_POWER6
5019 || rs6000_tune == PROCESSOR_POWER7
5020 || rs6000_tune == PROCESSOR_POWER8))
5021 return align_flags (5);
5022 else
5023 return align_loops;
5024 }
5025
5026 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5027 after applying N number of iterations. This routine does not determine
5028 how may iterations are required to reach desired alignment. */
5029
5030 static bool
5031 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5032 {
5033 if (is_packed)
5034 return false;
5035
5036 if (TARGET_32BIT)
5037 {
5038 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5039 return true;
5040
5041 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5042 return true;
5043
5044 return false;
5045 }
5046 else
5047 {
5048 if (TARGET_MACHO)
5049 return false;
5050
5051 /* Assuming that all other types are naturally aligned. CHECKME! */
5052 return true;
5053 }
5054 }
5055
5056 /* Return true if the vector misalignment factor is supported by the
5057 target. */
5058 static bool
5059 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5060 const_tree type,
5061 int misalignment,
5062 bool is_packed)
5063 {
5064 if (TARGET_VSX)
5065 {
5066 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5067 return true;
5068
5069 /* Return if movmisalign pattern is not supported for this mode. */
5070 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5071 return false;
5072
5073 if (misalignment == -1)
5074 {
5075 /* Misalignment factor is unknown at compile time but we know
5076 it's word aligned. */
5077 if (rs6000_vector_alignment_reachable (type, is_packed))
5078 {
5079 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5080
5081 if (element_size == 64 || element_size == 32)
5082 return true;
5083 }
5084
5085 return false;
5086 }
5087
5088 /* VSX supports word-aligned vector. */
5089 if (misalignment % 4 == 0)
5090 return true;
5091 }
5092 return false;
5093 }
5094
5095 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5096 static int
5097 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5098 tree vectype, int misalign)
5099 {
5100 unsigned elements;
5101 tree elem_type;
5102
5103 switch (type_of_cost)
5104 {
5105 case scalar_stmt:
5106 case scalar_load:
5107 case scalar_store:
5108 case vector_stmt:
5109 case vector_load:
5110 case vector_store:
5111 case vec_to_scalar:
5112 case scalar_to_vec:
5113 case cond_branch_not_taken:
5114 return 1;
5115
5116 case vec_perm:
5117 if (TARGET_VSX)
5118 return 3;
5119 else
5120 return 1;
5121
5122 case vec_promote_demote:
5123 if (TARGET_VSX)
5124 return 4;
5125 else
5126 return 1;
5127
5128 case cond_branch_taken:
5129 return 3;
5130
5131 case unaligned_load:
5132 case vector_gather_load:
5133 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5134 return 1;
5135
5136 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5137 {
5138 elements = TYPE_VECTOR_SUBPARTS (vectype);
5139 if (elements == 2)
5140 /* Double word aligned. */
5141 return 2;
5142
5143 if (elements == 4)
5144 {
5145 switch (misalign)
5146 {
5147 case 8:
5148 /* Double word aligned. */
5149 return 2;
5150
5151 case -1:
5152 /* Unknown misalignment. */
5153 case 4:
5154 case 12:
5155 /* Word aligned. */
5156 return 22;
5157
5158 default:
5159 gcc_unreachable ();
5160 }
5161 }
5162 }
5163
5164 if (TARGET_ALTIVEC)
5165 /* Misaligned loads are not supported. */
5166 gcc_unreachable ();
5167
5168 return 2;
5169
5170 case unaligned_store:
5171 case vector_scatter_store:
5172 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5173 return 1;
5174
5175 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5176 {
5177 elements = TYPE_VECTOR_SUBPARTS (vectype);
5178 if (elements == 2)
5179 /* Double word aligned. */
5180 return 2;
5181
5182 if (elements == 4)
5183 {
5184 switch (misalign)
5185 {
5186 case 8:
5187 /* Double word aligned. */
5188 return 2;
5189
5190 case -1:
5191 /* Unknown misalignment. */
5192 case 4:
5193 case 12:
5194 /* Word aligned. */
5195 return 23;
5196
5197 default:
5198 gcc_unreachable ();
5199 }
5200 }
5201 }
5202
5203 if (TARGET_ALTIVEC)
5204 /* Misaligned stores are not supported. */
5205 gcc_unreachable ();
5206
5207 return 2;
5208
5209 case vec_construct:
5210 /* This is a rough approximation assuming non-constant elements
5211 constructed into a vector via element insertion. FIXME:
5212 vec_construct is not granular enough for uniformly good
5213 decisions. If the initialization is a splat, this is
5214 cheaper than we estimate. Improve this someday. */
5215 elem_type = TREE_TYPE (vectype);
5216 /* 32-bit vectors loaded into registers are stored as double
5217 precision, so we need 2 permutes, 2 converts, and 1 merge
5218 to construct a vector of short floats from them. */
5219 if (SCALAR_FLOAT_TYPE_P (elem_type)
5220 && TYPE_PRECISION (elem_type) == 32)
5221 return 5;
5222 /* On POWER9, integer vector types are built up in GPRs and then
5223 use a direct move (2 cycles). For POWER8 this is even worse,
5224 as we need two direct moves and a merge, and the direct moves
5225 are five cycles. */
5226 else if (INTEGRAL_TYPE_P (elem_type))
5227 {
5228 if (TARGET_P9_VECTOR)
5229 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5230 else
5231 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5232 }
5233 else
5234 /* V2DFmode doesn't need a direct move. */
5235 return 2;
5236
5237 default:
5238 gcc_unreachable ();
5239 }
5240 }
5241
5242 /* Implement targetm.vectorize.preferred_simd_mode. */
5243
5244 static machine_mode
5245 rs6000_preferred_simd_mode (scalar_mode mode)
5246 {
5247 if (TARGET_VSX)
5248 switch (mode)
5249 {
5250 case E_DFmode:
5251 return V2DFmode;
5252 default:;
5253 }
5254 if (TARGET_ALTIVEC || TARGET_VSX)
5255 switch (mode)
5256 {
5257 case E_SFmode:
5258 return V4SFmode;
5259 case E_TImode:
5260 return V1TImode;
5261 case E_DImode:
5262 return V2DImode;
5263 case E_SImode:
5264 return V4SImode;
5265 case E_HImode:
5266 return V8HImode;
5267 case E_QImode:
5268 return V16QImode;
5269 default:;
5270 }
5271 return word_mode;
5272 }
5273
5274 typedef struct _rs6000_cost_data
5275 {
5276 struct loop *loop_info;
5277 unsigned cost[3];
5278 } rs6000_cost_data;
5279
5280 /* Test for likely overcommitment of vector hardware resources. If a
5281 loop iteration is relatively large, and too large a percentage of
5282 instructions in the loop are vectorized, the cost model may not
5283 adequately reflect delays from unavailable vector resources.
5284 Penalize the loop body cost for this case. */
5285
5286 static void
5287 rs6000_density_test (rs6000_cost_data *data)
5288 {
5289 const int DENSITY_PCT_THRESHOLD = 85;
5290 const int DENSITY_SIZE_THRESHOLD = 70;
5291 const int DENSITY_PENALTY = 10;
5292 struct loop *loop = data->loop_info;
5293 basic_block *bbs = get_loop_body (loop);
5294 int nbbs = loop->num_nodes;
5295 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5296 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5297 int i, density_pct;
5298
5299 for (i = 0; i < nbbs; i++)
5300 {
5301 basic_block bb = bbs[i];
5302 gimple_stmt_iterator gsi;
5303
5304 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5305 {
5306 gimple *stmt = gsi_stmt (gsi);
5307 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5308
5309 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5310 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5311 not_vec_cost++;
5312 }
5313 }
5314
5315 free (bbs);
5316 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5317
5318 if (density_pct > DENSITY_PCT_THRESHOLD
5319 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5320 {
5321 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5322 if (dump_enabled_p ())
5323 dump_printf_loc (MSG_NOTE, vect_location,
5324 "density %d%%, cost %d exceeds threshold, penalizing "
5325 "loop body cost by %d%%", density_pct,
5326 vec_cost + not_vec_cost, DENSITY_PENALTY);
5327 }
5328 }
5329
5330 /* Implement targetm.vectorize.init_cost. */
5331
5332 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5333 instruction is needed by the vectorization. */
5334 static bool rs6000_vect_nonmem;
5335
5336 static void *
5337 rs6000_init_cost (struct loop *loop_info)
5338 {
5339 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5340 data->loop_info = loop_info;
5341 data->cost[vect_prologue] = 0;
5342 data->cost[vect_body] = 0;
5343 data->cost[vect_epilogue] = 0;
5344 rs6000_vect_nonmem = false;
5345 return data;
5346 }
5347
5348 /* Implement targetm.vectorize.add_stmt_cost. */
5349
5350 static unsigned
5351 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5352 struct _stmt_vec_info *stmt_info, int misalign,
5353 enum vect_cost_model_location where)
5354 {
5355 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5356 unsigned retval = 0;
5357
5358 if (flag_vect_cost_model)
5359 {
5360 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5361 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5362 misalign);
5363 /* Statements in an inner loop relative to the loop being
5364 vectorized are weighted more heavily. The value here is
5365 arbitrary and could potentially be improved with analysis. */
5366 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5367 count *= 50; /* FIXME. */
5368
5369 retval = (unsigned) (count * stmt_cost);
5370 cost_data->cost[where] += retval;
5371
5372 /* Check whether we're doing something other than just a copy loop.
5373 Not all such loops may be profitably vectorized; see
5374 rs6000_finish_cost. */
5375 if ((kind == vec_to_scalar || kind == vec_perm
5376 || kind == vec_promote_demote || kind == vec_construct
5377 || kind == scalar_to_vec)
5378 || (where == vect_body && kind == vector_stmt))
5379 rs6000_vect_nonmem = true;
5380 }
5381
5382 return retval;
5383 }
5384
5385 /* Implement targetm.vectorize.finish_cost. */
5386
5387 static void
5388 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5389 unsigned *body_cost, unsigned *epilogue_cost)
5390 {
5391 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5392
5393 if (cost_data->loop_info)
5394 rs6000_density_test (cost_data);
5395
5396 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5397 that require versioning for any reason. The vectorization is at
5398 best a wash inside the loop, and the versioning checks make
5399 profitability highly unlikely and potentially quite harmful. */
5400 if (cost_data->loop_info)
5401 {
5402 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5403 if (!rs6000_vect_nonmem
5404 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5405 && LOOP_REQUIRES_VERSIONING (vec_info))
5406 cost_data->cost[vect_body] += 10000;
5407 }
5408
5409 *prologue_cost = cost_data->cost[vect_prologue];
5410 *body_cost = cost_data->cost[vect_body];
5411 *epilogue_cost = cost_data->cost[vect_epilogue];
5412 }
5413
5414 /* Implement targetm.vectorize.destroy_cost_data. */
5415
5416 static void
5417 rs6000_destroy_cost_data (void *data)
5418 {
5419 free (data);
5420 }
5421
5422 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5423 library with vectorized intrinsics. */
5424
5425 static tree
5426 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5427 tree type_in)
5428 {
5429 char name[32];
5430 const char *suffix = NULL;
5431 tree fntype, new_fndecl, bdecl = NULL_TREE;
5432 int n_args = 1;
5433 const char *bname;
5434 machine_mode el_mode, in_mode;
5435 int n, in_n;
5436
5437 /* Libmass is suitable for unsafe math only as it does not correctly support
5438 parts of IEEE with the required precision such as denormals. Only support
5439 it if we have VSX to use the simd d2 or f4 functions.
5440 XXX: Add variable length support. */
5441 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5442 return NULL_TREE;
5443
5444 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5445 n = TYPE_VECTOR_SUBPARTS (type_out);
5446 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5447 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5448 if (el_mode != in_mode
5449 || n != in_n)
5450 return NULL_TREE;
5451
5452 switch (fn)
5453 {
5454 CASE_CFN_ATAN2:
5455 CASE_CFN_HYPOT:
5456 CASE_CFN_POW:
5457 n_args = 2;
5458 gcc_fallthrough ();
5459
5460 CASE_CFN_ACOS:
5461 CASE_CFN_ACOSH:
5462 CASE_CFN_ASIN:
5463 CASE_CFN_ASINH:
5464 CASE_CFN_ATAN:
5465 CASE_CFN_ATANH:
5466 CASE_CFN_CBRT:
5467 CASE_CFN_COS:
5468 CASE_CFN_COSH:
5469 CASE_CFN_ERF:
5470 CASE_CFN_ERFC:
5471 CASE_CFN_EXP2:
5472 CASE_CFN_EXP:
5473 CASE_CFN_EXPM1:
5474 CASE_CFN_LGAMMA:
5475 CASE_CFN_LOG10:
5476 CASE_CFN_LOG1P:
5477 CASE_CFN_LOG2:
5478 CASE_CFN_LOG:
5479 CASE_CFN_SIN:
5480 CASE_CFN_SINH:
5481 CASE_CFN_SQRT:
5482 CASE_CFN_TAN:
5483 CASE_CFN_TANH:
5484 if (el_mode == DFmode && n == 2)
5485 {
5486 bdecl = mathfn_built_in (double_type_node, fn);
5487 suffix = "d2"; /* pow -> powd2 */
5488 }
5489 else if (el_mode == SFmode && n == 4)
5490 {
5491 bdecl = mathfn_built_in (float_type_node, fn);
5492 suffix = "4"; /* powf -> powf4 */
5493 }
5494 else
5495 return NULL_TREE;
5496 if (!bdecl)
5497 return NULL_TREE;
5498 break;
5499
5500 default:
5501 return NULL_TREE;
5502 }
5503
5504 gcc_assert (suffix != NULL);
5505 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5506 if (!bname)
5507 return NULL_TREE;
5508
5509 strcpy (name, bname + sizeof ("__builtin_") - 1);
5510 strcat (name, suffix);
5511
5512 if (n_args == 1)
5513 fntype = build_function_type_list (type_out, type_in, NULL);
5514 else if (n_args == 2)
5515 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5516 else
5517 gcc_unreachable ();
5518
5519 /* Build a function declaration for the vectorized function. */
5520 new_fndecl = build_decl (BUILTINS_LOCATION,
5521 FUNCTION_DECL, get_identifier (name), fntype);
5522 TREE_PUBLIC (new_fndecl) = 1;
5523 DECL_EXTERNAL (new_fndecl) = 1;
5524 DECL_IS_NOVOPS (new_fndecl) = 1;
5525 TREE_READONLY (new_fndecl) = 1;
5526
5527 return new_fndecl;
5528 }
5529
5530 /* Returns a function decl for a vectorized version of the builtin function
5531 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5532 if it is not available. */
5533
5534 static tree
5535 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5536 tree type_in)
5537 {
5538 machine_mode in_mode, out_mode;
5539 int in_n, out_n;
5540
5541 if (TARGET_DEBUG_BUILTIN)
5542 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5543 combined_fn_name (combined_fn (fn)),
5544 GET_MODE_NAME (TYPE_MODE (type_out)),
5545 GET_MODE_NAME (TYPE_MODE (type_in)));
5546
5547 if (TREE_CODE (type_out) != VECTOR_TYPE
5548 || TREE_CODE (type_in) != VECTOR_TYPE)
5549 return NULL_TREE;
5550
5551 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5552 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5553 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5554 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5555
5556 switch (fn)
5557 {
5558 CASE_CFN_COPYSIGN:
5559 if (VECTOR_UNIT_VSX_P (V2DFmode)
5560 && out_mode == DFmode && out_n == 2
5561 && in_mode == DFmode && in_n == 2)
5562 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5563 if (VECTOR_UNIT_VSX_P (V4SFmode)
5564 && out_mode == SFmode && out_n == 4
5565 && in_mode == SFmode && in_n == 4)
5566 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5567 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5568 && out_mode == SFmode && out_n == 4
5569 && in_mode == SFmode && in_n == 4)
5570 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5571 break;
5572 CASE_CFN_CEIL:
5573 if (VECTOR_UNIT_VSX_P (V2DFmode)
5574 && out_mode == DFmode && out_n == 2
5575 && in_mode == DFmode && in_n == 2)
5576 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5577 if (VECTOR_UNIT_VSX_P (V4SFmode)
5578 && out_mode == SFmode && out_n == 4
5579 && in_mode == SFmode && in_n == 4)
5580 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5581 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5582 && out_mode == SFmode && out_n == 4
5583 && in_mode == SFmode && in_n == 4)
5584 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5585 break;
5586 CASE_CFN_FLOOR:
5587 if (VECTOR_UNIT_VSX_P (V2DFmode)
5588 && out_mode == DFmode && out_n == 2
5589 && in_mode == DFmode && in_n == 2)
5590 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5591 if (VECTOR_UNIT_VSX_P (V4SFmode)
5592 && out_mode == SFmode && out_n == 4
5593 && in_mode == SFmode && in_n == 4)
5594 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5595 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5596 && out_mode == SFmode && out_n == 4
5597 && in_mode == SFmode && in_n == 4)
5598 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5599 break;
5600 CASE_CFN_FMA:
5601 if (VECTOR_UNIT_VSX_P (V2DFmode)
5602 && out_mode == DFmode && out_n == 2
5603 && in_mode == DFmode && in_n == 2)
5604 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5605 if (VECTOR_UNIT_VSX_P (V4SFmode)
5606 && out_mode == SFmode && out_n == 4
5607 && in_mode == SFmode && in_n == 4)
5608 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5609 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5610 && out_mode == SFmode && out_n == 4
5611 && in_mode == SFmode && in_n == 4)
5612 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5613 break;
5614 CASE_CFN_TRUNC:
5615 if (VECTOR_UNIT_VSX_P (V2DFmode)
5616 && out_mode == DFmode && out_n == 2
5617 && in_mode == DFmode && in_n == 2)
5618 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5619 if (VECTOR_UNIT_VSX_P (V4SFmode)
5620 && out_mode == SFmode && out_n == 4
5621 && in_mode == SFmode && in_n == 4)
5622 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5623 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5624 && out_mode == SFmode && out_n == 4
5625 && in_mode == SFmode && in_n == 4)
5626 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5627 break;
5628 CASE_CFN_NEARBYINT:
5629 if (VECTOR_UNIT_VSX_P (V2DFmode)
5630 && flag_unsafe_math_optimizations
5631 && out_mode == DFmode && out_n == 2
5632 && in_mode == DFmode && in_n == 2)
5633 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5634 if (VECTOR_UNIT_VSX_P (V4SFmode)
5635 && flag_unsafe_math_optimizations
5636 && out_mode == SFmode && out_n == 4
5637 && in_mode == SFmode && in_n == 4)
5638 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5639 break;
5640 CASE_CFN_RINT:
5641 if (VECTOR_UNIT_VSX_P (V2DFmode)
5642 && !flag_trapping_math
5643 && out_mode == DFmode && out_n == 2
5644 && in_mode == DFmode && in_n == 2)
5645 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5646 if (VECTOR_UNIT_VSX_P (V4SFmode)
5647 && !flag_trapping_math
5648 && out_mode == SFmode && out_n == 4
5649 && in_mode == SFmode && in_n == 4)
5650 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5651 break;
5652 default:
5653 break;
5654 }
5655
5656 /* Generate calls to libmass if appropriate. */
5657 if (rs6000_veclib_handler)
5658 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5659
5660 return NULL_TREE;
5661 }
5662
5663 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5664
5665 static tree
5666 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5667 tree type_in)
5668 {
5669 machine_mode in_mode, out_mode;
5670 int in_n, out_n;
5671
5672 if (TARGET_DEBUG_BUILTIN)
5673 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5674 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5675 GET_MODE_NAME (TYPE_MODE (type_out)),
5676 GET_MODE_NAME (TYPE_MODE (type_in)));
5677
5678 if (TREE_CODE (type_out) != VECTOR_TYPE
5679 || TREE_CODE (type_in) != VECTOR_TYPE)
5680 return NULL_TREE;
5681
5682 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5683 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5684 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5685 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5686
5687 enum rs6000_builtins fn
5688 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5689 switch (fn)
5690 {
5691 case RS6000_BUILTIN_RSQRTF:
5692 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5693 && out_mode == SFmode && out_n == 4
5694 && in_mode == SFmode && in_n == 4)
5695 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5696 break;
5697 case RS6000_BUILTIN_RSQRT:
5698 if (VECTOR_UNIT_VSX_P (V2DFmode)
5699 && out_mode == DFmode && out_n == 2
5700 && in_mode == DFmode && in_n == 2)
5701 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5702 break;
5703 case RS6000_BUILTIN_RECIPF:
5704 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5705 && out_mode == SFmode && out_n == 4
5706 && in_mode == SFmode && in_n == 4)
5707 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5708 break;
5709 case RS6000_BUILTIN_RECIP:
5710 if (VECTOR_UNIT_VSX_P (V2DFmode)
5711 && out_mode == DFmode && out_n == 2
5712 && in_mode == DFmode && in_n == 2)
5713 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5714 break;
5715 default:
5716 break;
5717 }
5718 return NULL_TREE;
5719 }
5720 \f
5721 /* Default CPU string for rs6000*_file_start functions. */
5722 static const char *rs6000_default_cpu;
5723
5724 /* Do anything needed at the start of the asm file. */
5725
5726 static void
5727 rs6000_file_start (void)
5728 {
5729 char buffer[80];
5730 const char *start = buffer;
5731 FILE *file = asm_out_file;
5732
5733 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5734
5735 default_file_start ();
5736
5737 if (flag_verbose_asm)
5738 {
5739 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5740
5741 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5742 {
5743 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5744 start = "";
5745 }
5746
5747 if (global_options_set.x_rs6000_cpu_index)
5748 {
5749 fprintf (file, "%s -mcpu=%s", start,
5750 processor_target_table[rs6000_cpu_index].name);
5751 start = "";
5752 }
5753
5754 if (global_options_set.x_rs6000_tune_index)
5755 {
5756 fprintf (file, "%s -mtune=%s", start,
5757 processor_target_table[rs6000_tune_index].name);
5758 start = "";
5759 }
5760
5761 if (PPC405_ERRATUM77)
5762 {
5763 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5764 start = "";
5765 }
5766
5767 #ifdef USING_ELFOS_H
5768 switch (rs6000_sdata)
5769 {
5770 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5771 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5772 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5773 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5774 }
5775
5776 if (rs6000_sdata && g_switch_value)
5777 {
5778 fprintf (file, "%s -G %d", start,
5779 g_switch_value);
5780 start = "";
5781 }
5782 #endif
5783
5784 if (*start == '\0')
5785 putc ('\n', file);
5786 }
5787
5788 #ifdef USING_ELFOS_H
5789 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5790 && !global_options_set.x_rs6000_cpu_index)
5791 {
5792 fputs ("\t.machine ", asm_out_file);
5793 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5794 fputs ("power9\n", asm_out_file);
5795 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5796 fputs ("power8\n", asm_out_file);
5797 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5798 fputs ("power7\n", asm_out_file);
5799 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5800 fputs ("power6\n", asm_out_file);
5801 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5802 fputs ("power5\n", asm_out_file);
5803 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5804 fputs ("power4\n", asm_out_file);
5805 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5806 fputs ("ppc64\n", asm_out_file);
5807 else
5808 fputs ("ppc\n", asm_out_file);
5809 }
5810 #endif
5811
5812 if (DEFAULT_ABI == ABI_ELFv2)
5813 fprintf (file, "\t.abiversion 2\n");
5814 }
5815
5816 \f
5817 /* Return nonzero if this function is known to have a null epilogue. */
5818
5819 int
5820 direct_return (void)
5821 {
5822 if (reload_completed)
5823 {
5824 rs6000_stack_t *info = rs6000_stack_info ();
5825
5826 if (info->first_gp_reg_save == 32
5827 && info->first_fp_reg_save == 64
5828 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5829 && ! info->lr_save_p
5830 && ! info->cr_save_p
5831 && info->vrsave_size == 0
5832 && ! info->push_p)
5833 return 1;
5834 }
5835
5836 return 0;
5837 }
5838
5839 /* Helper for num_insns_constant. Calculate number of instructions to
5840 load VALUE to a single gpr using combinations of addi, addis, ori,
5841 oris and sldi instructions. */
5842
5843 static int
5844 num_insns_constant_gpr (HOST_WIDE_INT value)
5845 {
5846 /* signed constant loadable with addi */
5847 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5848 return 1;
5849
5850 /* constant loadable with addis */
5851 else if ((value & 0xffff) == 0
5852 && (value >> 31 == -1 || value >> 31 == 0))
5853 return 1;
5854
5855 else if (TARGET_POWERPC64)
5856 {
5857 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5858 HOST_WIDE_INT high = value >> 31;
5859
5860 if (high == 0 || high == -1)
5861 return 2;
5862
5863 high >>= 1;
5864
5865 if (low == 0)
5866 return num_insns_constant_gpr (high) + 1;
5867 else if (high == 0)
5868 return num_insns_constant_gpr (low) + 1;
5869 else
5870 return (num_insns_constant_gpr (high)
5871 + num_insns_constant_gpr (low) + 1);
5872 }
5873
5874 else
5875 return 2;
5876 }
5877
5878 /* Helper for num_insns_constant. Allow constants formed by the
5879 num_insns_constant_gpr sequences, plus li -1, rldicl/rldicr/rlwinm,
5880 and handle modes that require multiple gprs. */
5881
5882 static int
5883 num_insns_constant_multi (HOST_WIDE_INT value, machine_mode mode)
5884 {
5885 int nregs = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5886 int total = 0;
5887 while (nregs-- > 0)
5888 {
5889 HOST_WIDE_INT low = sext_hwi (value, BITS_PER_WORD);
5890 int insns = num_insns_constant_gpr (low);
5891 if (insns > 2
5892 /* We won't get more than 2 from num_insns_constant_gpr
5893 except when TARGET_POWERPC64 and mode is DImode or
5894 wider, so the register mode must be DImode. */
5895 && rs6000_is_valid_and_mask (GEN_INT (low), DImode))
5896 insns = 2;
5897 total += insns;
5898 value >>= BITS_PER_WORD;
5899 }
5900 return total;
5901 }
5902
5903 /* Return the number of instructions it takes to form a constant in as
5904 many gprs are needed for MODE. */
5905
5906 int
5907 num_insns_constant (rtx op, machine_mode mode)
5908 {
5909 HOST_WIDE_INT val;
5910
5911 switch (GET_CODE (op))
5912 {
5913 case CONST_INT:
5914 val = INTVAL (op);
5915 break;
5916
5917 case CONST_WIDE_INT:
5918 {
5919 int insns = 0;
5920 for (int i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5921 insns += num_insns_constant_multi (CONST_WIDE_INT_ELT (op, i),
5922 DImode);
5923 return insns;
5924 }
5925
5926 case CONST_DOUBLE:
5927 {
5928 const struct real_value *rv = CONST_DOUBLE_REAL_VALUE (op);
5929
5930 if (mode == SFmode || mode == SDmode)
5931 {
5932 long l;
5933
5934 if (mode == SDmode)
5935 REAL_VALUE_TO_TARGET_DECIMAL32 (*rv, l);
5936 else
5937 REAL_VALUE_TO_TARGET_SINGLE (*rv, l);
5938 /* See the first define_split in rs6000.md handling a
5939 const_double_operand. */
5940 val = l;
5941 mode = SImode;
5942 }
5943 else if (mode == DFmode || mode == DDmode)
5944 {
5945 long l[2];
5946
5947 if (mode == DDmode)
5948 REAL_VALUE_TO_TARGET_DECIMAL64 (*rv, l);
5949 else
5950 REAL_VALUE_TO_TARGET_DOUBLE (*rv, l);
5951
5952 /* See the second (32-bit) and third (64-bit) define_split
5953 in rs6000.md handling a const_double_operand. */
5954 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 1] << 32;
5955 val |= l[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffffUL;
5956 mode = DImode;
5957 }
5958 else if (mode == TFmode || mode == TDmode
5959 || mode == KFmode || mode == IFmode)
5960 {
5961 long l[4];
5962 int insns;
5963
5964 if (mode == TDmode)
5965 REAL_VALUE_TO_TARGET_DECIMAL128 (*rv, l);
5966 else
5967 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*rv, l);
5968
5969 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 3] << 32;
5970 val |= l[WORDS_BIG_ENDIAN ? 1 : 2] & 0xffffffffUL;
5971 insns = num_insns_constant_multi (val, DImode);
5972 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 2 : 1] << 32;
5973 val |= l[WORDS_BIG_ENDIAN ? 3 : 0] & 0xffffffffUL;
5974 insns += num_insns_constant_multi (val, DImode);
5975 return insns;
5976 }
5977 else
5978 gcc_unreachable ();
5979 }
5980 break;
5981
5982 default:
5983 gcc_unreachable ();
5984 }
5985
5986 return num_insns_constant_multi (val, mode);
5987 }
5988
5989 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5990 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5991 corresponding element of the vector, but for V4SFmode, the
5992 corresponding "float" is interpreted as an SImode integer. */
5993
5994 HOST_WIDE_INT
5995 const_vector_elt_as_int (rtx op, unsigned int elt)
5996 {
5997 rtx tmp;
5998
5999 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6000 gcc_assert (GET_MODE (op) != V2DImode
6001 && GET_MODE (op) != V2DFmode);
6002
6003 tmp = CONST_VECTOR_ELT (op, elt);
6004 if (GET_MODE (op) == V4SFmode)
6005 tmp = gen_lowpart (SImode, tmp);
6006 return INTVAL (tmp);
6007 }
6008
6009 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6010 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6011 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6012 all items are set to the same value and contain COPIES replicas of the
6013 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6014 operand and the others are set to the value of the operand's msb. */
6015
6016 static bool
6017 vspltis_constant (rtx op, unsigned step, unsigned copies)
6018 {
6019 machine_mode mode = GET_MODE (op);
6020 machine_mode inner = GET_MODE_INNER (mode);
6021
6022 unsigned i;
6023 unsigned nunits;
6024 unsigned bitsize;
6025 unsigned mask;
6026
6027 HOST_WIDE_INT val;
6028 HOST_WIDE_INT splat_val;
6029 HOST_WIDE_INT msb_val;
6030
6031 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6032 return false;
6033
6034 nunits = GET_MODE_NUNITS (mode);
6035 bitsize = GET_MODE_BITSIZE (inner);
6036 mask = GET_MODE_MASK (inner);
6037
6038 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6039 splat_val = val;
6040 msb_val = val >= 0 ? 0 : -1;
6041
6042 /* Construct the value to be splatted, if possible. If not, return 0. */
6043 for (i = 2; i <= copies; i *= 2)
6044 {
6045 HOST_WIDE_INT small_val;
6046 bitsize /= 2;
6047 small_val = splat_val >> bitsize;
6048 mask >>= bitsize;
6049 if (splat_val != ((HOST_WIDE_INT)
6050 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6051 | (small_val & mask)))
6052 return false;
6053 splat_val = small_val;
6054 }
6055
6056 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6057 if (EASY_VECTOR_15 (splat_val))
6058 ;
6059
6060 /* Also check if we can splat, and then add the result to itself. Do so if
6061 the value is positive, of if the splat instruction is using OP's mode;
6062 for splat_val < 0, the splat and the add should use the same mode. */
6063 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6064 && (splat_val >= 0 || (step == 1 && copies == 1)))
6065 ;
6066
6067 /* Also check if are loading up the most significant bit which can be done by
6068 loading up -1 and shifting the value left by -1. */
6069 else if (EASY_VECTOR_MSB (splat_val, inner))
6070 ;
6071
6072 else
6073 return false;
6074
6075 /* Check if VAL is present in every STEP-th element, and the
6076 other elements are filled with its most significant bit. */
6077 for (i = 1; i < nunits; ++i)
6078 {
6079 HOST_WIDE_INT desired_val;
6080 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6081 if ((i & (step - 1)) == 0)
6082 desired_val = val;
6083 else
6084 desired_val = msb_val;
6085
6086 if (desired_val != const_vector_elt_as_int (op, elt))
6087 return false;
6088 }
6089
6090 return true;
6091 }
6092
6093 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6094 instruction, filling in the bottom elements with 0 or -1.
6095
6096 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6097 for the number of zeroes to shift in, or negative for the number of 0xff
6098 bytes to shift in.
6099
6100 OP is a CONST_VECTOR. */
6101
6102 int
6103 vspltis_shifted (rtx op)
6104 {
6105 machine_mode mode = GET_MODE (op);
6106 machine_mode inner = GET_MODE_INNER (mode);
6107
6108 unsigned i, j;
6109 unsigned nunits;
6110 unsigned mask;
6111
6112 HOST_WIDE_INT val;
6113
6114 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6115 return false;
6116
6117 /* We need to create pseudo registers to do the shift, so don't recognize
6118 shift vector constants after reload. */
6119 if (!can_create_pseudo_p ())
6120 return false;
6121
6122 nunits = GET_MODE_NUNITS (mode);
6123 mask = GET_MODE_MASK (inner);
6124
6125 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6126
6127 /* Check if the value can really be the operand of a vspltis[bhw]. */
6128 if (EASY_VECTOR_15 (val))
6129 ;
6130
6131 /* Also check if we are loading up the most significant bit which can be done
6132 by loading up -1 and shifting the value left by -1. */
6133 else if (EASY_VECTOR_MSB (val, inner))
6134 ;
6135
6136 else
6137 return 0;
6138
6139 /* Check if VAL is present in every STEP-th element until we find elements
6140 that are 0 or all 1 bits. */
6141 for (i = 1; i < nunits; ++i)
6142 {
6143 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6144 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6145
6146 /* If the value isn't the splat value, check for the remaining elements
6147 being 0/-1. */
6148 if (val != elt_val)
6149 {
6150 if (elt_val == 0)
6151 {
6152 for (j = i+1; j < nunits; ++j)
6153 {
6154 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6155 if (const_vector_elt_as_int (op, elt2) != 0)
6156 return 0;
6157 }
6158
6159 return (nunits - i) * GET_MODE_SIZE (inner);
6160 }
6161
6162 else if ((elt_val & mask) == mask)
6163 {
6164 for (j = i+1; j < nunits; ++j)
6165 {
6166 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6167 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6168 return 0;
6169 }
6170
6171 return -((nunits - i) * GET_MODE_SIZE (inner));
6172 }
6173
6174 else
6175 return 0;
6176 }
6177 }
6178
6179 /* If all elements are equal, we don't need to do VLSDOI. */
6180 return 0;
6181 }
6182
6183
6184 /* Return true if OP is of the given MODE and can be synthesized
6185 with a vspltisb, vspltish or vspltisw. */
6186
6187 bool
6188 easy_altivec_constant (rtx op, machine_mode mode)
6189 {
6190 unsigned step, copies;
6191
6192 if (mode == VOIDmode)
6193 mode = GET_MODE (op);
6194 else if (mode != GET_MODE (op))
6195 return false;
6196
6197 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6198 constants. */
6199 if (mode == V2DFmode)
6200 return zero_constant (op, mode);
6201
6202 else if (mode == V2DImode)
6203 {
6204 if (!CONST_INT_P (CONST_VECTOR_ELT (op, 0))
6205 || !CONST_INT_P (CONST_VECTOR_ELT (op, 1)))
6206 return false;
6207
6208 if (zero_constant (op, mode))
6209 return true;
6210
6211 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6212 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6213 return true;
6214
6215 return false;
6216 }
6217
6218 /* V1TImode is a special container for TImode. Ignore for now. */
6219 else if (mode == V1TImode)
6220 return false;
6221
6222 /* Start with a vspltisw. */
6223 step = GET_MODE_NUNITS (mode) / 4;
6224 copies = 1;
6225
6226 if (vspltis_constant (op, step, copies))
6227 return true;
6228
6229 /* Then try with a vspltish. */
6230 if (step == 1)
6231 copies <<= 1;
6232 else
6233 step >>= 1;
6234
6235 if (vspltis_constant (op, step, copies))
6236 return true;
6237
6238 /* And finally a vspltisb. */
6239 if (step == 1)
6240 copies <<= 1;
6241 else
6242 step >>= 1;
6243
6244 if (vspltis_constant (op, step, copies))
6245 return true;
6246
6247 if (vspltis_shifted (op) != 0)
6248 return true;
6249
6250 return false;
6251 }
6252
6253 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6254 result is OP. Abort if it is not possible. */
6255
6256 rtx
6257 gen_easy_altivec_constant (rtx op)
6258 {
6259 machine_mode mode = GET_MODE (op);
6260 int nunits = GET_MODE_NUNITS (mode);
6261 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6262 unsigned step = nunits / 4;
6263 unsigned copies = 1;
6264
6265 /* Start with a vspltisw. */
6266 if (vspltis_constant (op, step, copies))
6267 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6268
6269 /* Then try with a vspltish. */
6270 if (step == 1)
6271 copies <<= 1;
6272 else
6273 step >>= 1;
6274
6275 if (vspltis_constant (op, step, copies))
6276 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6277
6278 /* And finally a vspltisb. */
6279 if (step == 1)
6280 copies <<= 1;
6281 else
6282 step >>= 1;
6283
6284 if (vspltis_constant (op, step, copies))
6285 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6286
6287 gcc_unreachable ();
6288 }
6289
6290 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6291 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6292
6293 Return the number of instructions needed (1 or 2) into the address pointed
6294 via NUM_INSNS_PTR.
6295
6296 Return the constant that is being split via CONSTANT_PTR. */
6297
6298 bool
6299 xxspltib_constant_p (rtx op,
6300 machine_mode mode,
6301 int *num_insns_ptr,
6302 int *constant_ptr)
6303 {
6304 size_t nunits = GET_MODE_NUNITS (mode);
6305 size_t i;
6306 HOST_WIDE_INT value;
6307 rtx element;
6308
6309 /* Set the returned values to out of bound values. */
6310 *num_insns_ptr = -1;
6311 *constant_ptr = 256;
6312
6313 if (!TARGET_P9_VECTOR)
6314 return false;
6315
6316 if (mode == VOIDmode)
6317 mode = GET_MODE (op);
6318
6319 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6320 return false;
6321
6322 /* Handle (vec_duplicate <constant>). */
6323 if (GET_CODE (op) == VEC_DUPLICATE)
6324 {
6325 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6326 && mode != V2DImode)
6327 return false;
6328
6329 element = XEXP (op, 0);
6330 if (!CONST_INT_P (element))
6331 return false;
6332
6333 value = INTVAL (element);
6334 if (!IN_RANGE (value, -128, 127))
6335 return false;
6336 }
6337
6338 /* Handle (const_vector [...]). */
6339 else if (GET_CODE (op) == CONST_VECTOR)
6340 {
6341 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6342 && mode != V2DImode)
6343 return false;
6344
6345 element = CONST_VECTOR_ELT (op, 0);
6346 if (!CONST_INT_P (element))
6347 return false;
6348
6349 value = INTVAL (element);
6350 if (!IN_RANGE (value, -128, 127))
6351 return false;
6352
6353 for (i = 1; i < nunits; i++)
6354 {
6355 element = CONST_VECTOR_ELT (op, i);
6356 if (!CONST_INT_P (element))
6357 return false;
6358
6359 if (value != INTVAL (element))
6360 return false;
6361 }
6362 }
6363
6364 /* Handle integer constants being loaded into the upper part of the VSX
6365 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6366 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6367 else if (CONST_INT_P (op))
6368 {
6369 if (!SCALAR_INT_MODE_P (mode))
6370 return false;
6371
6372 value = INTVAL (op);
6373 if (!IN_RANGE (value, -128, 127))
6374 return false;
6375
6376 if (!IN_RANGE (value, -1, 0))
6377 {
6378 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6379 return false;
6380
6381 if (EASY_VECTOR_15 (value))
6382 return false;
6383 }
6384 }
6385
6386 else
6387 return false;
6388
6389 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6390 sign extend. Special case 0/-1 to allow getting any VSX register instead
6391 of an Altivec register. */
6392 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6393 && EASY_VECTOR_15 (value))
6394 return false;
6395
6396 /* Return # of instructions and the constant byte for XXSPLTIB. */
6397 if (mode == V16QImode)
6398 *num_insns_ptr = 1;
6399
6400 else if (IN_RANGE (value, -1, 0))
6401 *num_insns_ptr = 1;
6402
6403 else
6404 *num_insns_ptr = 2;
6405
6406 *constant_ptr = (int) value;
6407 return true;
6408 }
6409
6410 const char *
6411 output_vec_const_move (rtx *operands)
6412 {
6413 int shift;
6414 machine_mode mode;
6415 rtx dest, vec;
6416
6417 dest = operands[0];
6418 vec = operands[1];
6419 mode = GET_MODE (dest);
6420
6421 if (TARGET_VSX)
6422 {
6423 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6424 int xxspltib_value = 256;
6425 int num_insns = -1;
6426
6427 if (zero_constant (vec, mode))
6428 {
6429 if (TARGET_P9_VECTOR)
6430 return "xxspltib %x0,0";
6431
6432 else if (dest_vmx_p)
6433 return "vspltisw %0,0";
6434
6435 else
6436 return "xxlxor %x0,%x0,%x0";
6437 }
6438
6439 if (all_ones_constant (vec, mode))
6440 {
6441 if (TARGET_P9_VECTOR)
6442 return "xxspltib %x0,255";
6443
6444 else if (dest_vmx_p)
6445 return "vspltisw %0,-1";
6446
6447 else if (TARGET_P8_VECTOR)
6448 return "xxlorc %x0,%x0,%x0";
6449
6450 else
6451 gcc_unreachable ();
6452 }
6453
6454 if (TARGET_P9_VECTOR
6455 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6456 {
6457 if (num_insns == 1)
6458 {
6459 operands[2] = GEN_INT (xxspltib_value & 0xff);
6460 return "xxspltib %x0,%2";
6461 }
6462
6463 return "#";
6464 }
6465 }
6466
6467 if (TARGET_ALTIVEC)
6468 {
6469 rtx splat_vec;
6470
6471 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6472 if (zero_constant (vec, mode))
6473 return "vspltisw %0,0";
6474
6475 if (all_ones_constant (vec, mode))
6476 return "vspltisw %0,-1";
6477
6478 /* Do we need to construct a value using VSLDOI? */
6479 shift = vspltis_shifted (vec);
6480 if (shift != 0)
6481 return "#";
6482
6483 splat_vec = gen_easy_altivec_constant (vec);
6484 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6485 operands[1] = XEXP (splat_vec, 0);
6486 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6487 return "#";
6488
6489 switch (GET_MODE (splat_vec))
6490 {
6491 case E_V4SImode:
6492 return "vspltisw %0,%1";
6493
6494 case E_V8HImode:
6495 return "vspltish %0,%1";
6496
6497 case E_V16QImode:
6498 return "vspltisb %0,%1";
6499
6500 default:
6501 gcc_unreachable ();
6502 }
6503 }
6504
6505 gcc_unreachable ();
6506 }
6507
6508 /* Initialize vector TARGET to VALS. */
6509
6510 void
6511 rs6000_expand_vector_init (rtx target, rtx vals)
6512 {
6513 machine_mode mode = GET_MODE (target);
6514 machine_mode inner_mode = GET_MODE_INNER (mode);
6515 int n_elts = GET_MODE_NUNITS (mode);
6516 int n_var = 0, one_var = -1;
6517 bool all_same = true, all_const_zero = true;
6518 rtx x, mem;
6519 int i;
6520
6521 for (i = 0; i < n_elts; ++i)
6522 {
6523 x = XVECEXP (vals, 0, i);
6524 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6525 ++n_var, one_var = i;
6526 else if (x != CONST0_RTX (inner_mode))
6527 all_const_zero = false;
6528
6529 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6530 all_same = false;
6531 }
6532
6533 if (n_var == 0)
6534 {
6535 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6536 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6537 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6538 {
6539 /* Zero register. */
6540 emit_move_insn (target, CONST0_RTX (mode));
6541 return;
6542 }
6543 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6544 {
6545 /* Splat immediate. */
6546 emit_insn (gen_rtx_SET (target, const_vec));
6547 return;
6548 }
6549 else
6550 {
6551 /* Load from constant pool. */
6552 emit_move_insn (target, const_vec);
6553 return;
6554 }
6555 }
6556
6557 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6558 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6559 {
6560 rtx op[2];
6561 size_t i;
6562 size_t num_elements = all_same ? 1 : 2;
6563 for (i = 0; i < num_elements; i++)
6564 {
6565 op[i] = XVECEXP (vals, 0, i);
6566 /* Just in case there is a SUBREG with a smaller mode, do a
6567 conversion. */
6568 if (GET_MODE (op[i]) != inner_mode)
6569 {
6570 rtx tmp = gen_reg_rtx (inner_mode);
6571 convert_move (tmp, op[i], 0);
6572 op[i] = tmp;
6573 }
6574 /* Allow load with splat double word. */
6575 else if (MEM_P (op[i]))
6576 {
6577 if (!all_same)
6578 op[i] = force_reg (inner_mode, op[i]);
6579 }
6580 else if (!REG_P (op[i]))
6581 op[i] = force_reg (inner_mode, op[i]);
6582 }
6583
6584 if (all_same)
6585 {
6586 if (mode == V2DFmode)
6587 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6588 else
6589 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6590 }
6591 else
6592 {
6593 if (mode == V2DFmode)
6594 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6595 else
6596 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6597 }
6598 return;
6599 }
6600
6601 /* Special case initializing vector int if we are on 64-bit systems with
6602 direct move or we have the ISA 3.0 instructions. */
6603 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6604 && TARGET_DIRECT_MOVE_64BIT)
6605 {
6606 if (all_same)
6607 {
6608 rtx element0 = XVECEXP (vals, 0, 0);
6609 if (MEM_P (element0))
6610 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6611 else
6612 element0 = force_reg (SImode, element0);
6613
6614 if (TARGET_P9_VECTOR)
6615 emit_insn (gen_vsx_splat_v4si (target, element0));
6616 else
6617 {
6618 rtx tmp = gen_reg_rtx (DImode);
6619 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6620 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6621 }
6622 return;
6623 }
6624 else
6625 {
6626 rtx elements[4];
6627 size_t i;
6628
6629 for (i = 0; i < 4; i++)
6630 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6631
6632 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6633 elements[2], elements[3]));
6634 return;
6635 }
6636 }
6637
6638 /* With single precision floating point on VSX, know that internally single
6639 precision is actually represented as a double, and either make 2 V2DF
6640 vectors, and convert these vectors to single precision, or do one
6641 conversion, and splat the result to the other elements. */
6642 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6643 {
6644 if (all_same)
6645 {
6646 rtx element0 = XVECEXP (vals, 0, 0);
6647
6648 if (TARGET_P9_VECTOR)
6649 {
6650 if (MEM_P (element0))
6651 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6652
6653 emit_insn (gen_vsx_splat_v4sf (target, element0));
6654 }
6655
6656 else
6657 {
6658 rtx freg = gen_reg_rtx (V4SFmode);
6659 rtx sreg = force_reg (SFmode, element0);
6660 rtx cvt = (TARGET_XSCVDPSPN
6661 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6662 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6663
6664 emit_insn (cvt);
6665 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6666 const0_rtx));
6667 }
6668 }
6669 else
6670 {
6671 rtx dbl_even = gen_reg_rtx (V2DFmode);
6672 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6673 rtx flt_even = gen_reg_rtx (V4SFmode);
6674 rtx flt_odd = gen_reg_rtx (V4SFmode);
6675 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6676 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6677 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6678 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6679
6680 /* Use VMRGEW if we can instead of doing a permute. */
6681 if (TARGET_P8_VECTOR)
6682 {
6683 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6684 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6685 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6686 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6687 if (BYTES_BIG_ENDIAN)
6688 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6689 else
6690 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6691 }
6692 else
6693 {
6694 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6695 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6696 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6697 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6698 rs6000_expand_extract_even (target, flt_even, flt_odd);
6699 }
6700 }
6701 return;
6702 }
6703
6704 /* Special case initializing vector short/char that are splats if we are on
6705 64-bit systems with direct move. */
6706 if (all_same && TARGET_DIRECT_MOVE_64BIT
6707 && (mode == V16QImode || mode == V8HImode))
6708 {
6709 rtx op0 = XVECEXP (vals, 0, 0);
6710 rtx di_tmp = gen_reg_rtx (DImode);
6711
6712 if (!REG_P (op0))
6713 op0 = force_reg (GET_MODE_INNER (mode), op0);
6714
6715 if (mode == V16QImode)
6716 {
6717 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6718 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6719 return;
6720 }
6721
6722 if (mode == V8HImode)
6723 {
6724 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6725 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6726 return;
6727 }
6728 }
6729
6730 /* Store value to stack temp. Load vector element. Splat. However, splat
6731 of 64-bit items is not supported on Altivec. */
6732 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6733 {
6734 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6735 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6736 XVECEXP (vals, 0, 0));
6737 x = gen_rtx_UNSPEC (VOIDmode,
6738 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6739 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6740 gen_rtvec (2,
6741 gen_rtx_SET (target, mem),
6742 x)));
6743 x = gen_rtx_VEC_SELECT (inner_mode, target,
6744 gen_rtx_PARALLEL (VOIDmode,
6745 gen_rtvec (1, const0_rtx)));
6746 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6747 return;
6748 }
6749
6750 /* One field is non-constant. Load constant then overwrite
6751 varying field. */
6752 if (n_var == 1)
6753 {
6754 rtx copy = copy_rtx (vals);
6755
6756 /* Load constant part of vector, substitute neighboring value for
6757 varying element. */
6758 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6759 rs6000_expand_vector_init (target, copy);
6760
6761 /* Insert variable. */
6762 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6763 return;
6764 }
6765
6766 /* Construct the vector in memory one field at a time
6767 and load the whole vector. */
6768 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6769 for (i = 0; i < n_elts; i++)
6770 emit_move_insn (adjust_address_nv (mem, inner_mode,
6771 i * GET_MODE_SIZE (inner_mode)),
6772 XVECEXP (vals, 0, i));
6773 emit_move_insn (target, mem);
6774 }
6775
6776 /* Set field ELT of TARGET to VAL. */
6777
6778 void
6779 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6780 {
6781 machine_mode mode = GET_MODE (target);
6782 machine_mode inner_mode = GET_MODE_INNER (mode);
6783 rtx reg = gen_reg_rtx (mode);
6784 rtx mask, mem, x;
6785 int width = GET_MODE_SIZE (inner_mode);
6786 int i;
6787
6788 val = force_reg (GET_MODE (val), val);
6789
6790 if (VECTOR_MEM_VSX_P (mode))
6791 {
6792 rtx insn = NULL_RTX;
6793 rtx elt_rtx = GEN_INT (elt);
6794
6795 if (mode == V2DFmode)
6796 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6797
6798 else if (mode == V2DImode)
6799 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6800
6801 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6802 {
6803 if (mode == V4SImode)
6804 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6805 else if (mode == V8HImode)
6806 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6807 else if (mode == V16QImode)
6808 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6809 else if (mode == V4SFmode)
6810 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6811 }
6812
6813 if (insn)
6814 {
6815 emit_insn (insn);
6816 return;
6817 }
6818 }
6819
6820 /* Simplify setting single element vectors like V1TImode. */
6821 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6822 {
6823 emit_move_insn (target, gen_lowpart (mode, val));
6824 return;
6825 }
6826
6827 /* Load single variable value. */
6828 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6829 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6830 x = gen_rtx_UNSPEC (VOIDmode,
6831 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6832 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6833 gen_rtvec (2,
6834 gen_rtx_SET (reg, mem),
6835 x)));
6836
6837 /* Linear sequence. */
6838 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6839 for (i = 0; i < 16; ++i)
6840 XVECEXP (mask, 0, i) = GEN_INT (i);
6841
6842 /* Set permute mask to insert element into target. */
6843 for (i = 0; i < width; ++i)
6844 XVECEXP (mask, 0, elt*width + i)
6845 = GEN_INT (i + 0x10);
6846 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6847
6848 if (BYTES_BIG_ENDIAN)
6849 x = gen_rtx_UNSPEC (mode,
6850 gen_rtvec (3, target, reg,
6851 force_reg (V16QImode, x)),
6852 UNSPEC_VPERM);
6853 else
6854 {
6855 if (TARGET_P9_VECTOR)
6856 x = gen_rtx_UNSPEC (mode,
6857 gen_rtvec (3, reg, target,
6858 force_reg (V16QImode, x)),
6859 UNSPEC_VPERMR);
6860 else
6861 {
6862 /* Invert selector. We prefer to generate VNAND on P8 so
6863 that future fusion opportunities can kick in, but must
6864 generate VNOR elsewhere. */
6865 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6866 rtx iorx = (TARGET_P8_VECTOR
6867 ? gen_rtx_IOR (V16QImode, notx, notx)
6868 : gen_rtx_AND (V16QImode, notx, notx));
6869 rtx tmp = gen_reg_rtx (V16QImode);
6870 emit_insn (gen_rtx_SET (tmp, iorx));
6871
6872 /* Permute with operands reversed and adjusted selector. */
6873 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6874 UNSPEC_VPERM);
6875 }
6876 }
6877
6878 emit_insn (gen_rtx_SET (target, x));
6879 }
6880
6881 /* Extract field ELT from VEC into TARGET. */
6882
6883 void
6884 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6885 {
6886 machine_mode mode = GET_MODE (vec);
6887 machine_mode inner_mode = GET_MODE_INNER (mode);
6888 rtx mem;
6889
6890 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6891 {
6892 switch (mode)
6893 {
6894 default:
6895 break;
6896 case E_V1TImode:
6897 emit_move_insn (target, gen_lowpart (TImode, vec));
6898 break;
6899 case E_V2DFmode:
6900 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6901 return;
6902 case E_V2DImode:
6903 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6904 return;
6905 case E_V4SFmode:
6906 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6907 return;
6908 case E_V16QImode:
6909 if (TARGET_DIRECT_MOVE_64BIT)
6910 {
6911 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6912 return;
6913 }
6914 else
6915 break;
6916 case E_V8HImode:
6917 if (TARGET_DIRECT_MOVE_64BIT)
6918 {
6919 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6920 return;
6921 }
6922 else
6923 break;
6924 case E_V4SImode:
6925 if (TARGET_DIRECT_MOVE_64BIT)
6926 {
6927 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6928 return;
6929 }
6930 break;
6931 }
6932 }
6933 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6934 && TARGET_DIRECT_MOVE_64BIT)
6935 {
6936 if (GET_MODE (elt) != DImode)
6937 {
6938 rtx tmp = gen_reg_rtx (DImode);
6939 convert_move (tmp, elt, 0);
6940 elt = tmp;
6941 }
6942 else if (!REG_P (elt))
6943 elt = force_reg (DImode, elt);
6944
6945 switch (mode)
6946 {
6947 case E_V2DFmode:
6948 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6949 return;
6950
6951 case E_V2DImode:
6952 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6953 return;
6954
6955 case E_V4SFmode:
6956 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6957 return;
6958
6959 case E_V4SImode:
6960 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6961 return;
6962
6963 case E_V8HImode:
6964 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6965 return;
6966
6967 case E_V16QImode:
6968 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6969 return;
6970
6971 default:
6972 gcc_unreachable ();
6973 }
6974 }
6975
6976 /* Allocate mode-sized buffer. */
6977 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6978
6979 emit_move_insn (mem, vec);
6980 if (CONST_INT_P (elt))
6981 {
6982 int modulo_elt = INTVAL (elt) % GET_MODE_NUNITS (mode);
6983
6984 /* Add offset to field within buffer matching vector element. */
6985 mem = adjust_address_nv (mem, inner_mode,
6986 modulo_elt * GET_MODE_SIZE (inner_mode));
6987 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6988 }
6989 else
6990 {
6991 unsigned int ele_size = GET_MODE_SIZE (inner_mode);
6992 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
6993 rtx new_addr = gen_reg_rtx (Pmode);
6994
6995 elt = gen_rtx_AND (Pmode, elt, num_ele_m1);
6996 if (ele_size > 1)
6997 elt = gen_rtx_MULT (Pmode, elt, GEN_INT (ele_size));
6998 new_addr = gen_rtx_PLUS (Pmode, XEXP (mem, 0), elt);
6999 new_addr = change_address (mem, inner_mode, new_addr);
7000 emit_move_insn (target, new_addr);
7001 }
7002 }
7003
7004 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7005 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7006 temporary (BASE_TMP) to fixup the address. Return the new memory address
7007 that is valid for reads or writes to a given register (SCALAR_REG). */
7008
7009 rtx
7010 rs6000_adjust_vec_address (rtx scalar_reg,
7011 rtx mem,
7012 rtx element,
7013 rtx base_tmp,
7014 machine_mode scalar_mode)
7015 {
7016 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7017 rtx addr = XEXP (mem, 0);
7018 rtx element_offset;
7019 rtx new_addr;
7020 bool valid_addr_p;
7021
7022 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7023 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7024
7025 /* Calculate what we need to add to the address to get the element
7026 address. */
7027 if (CONST_INT_P (element))
7028 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7029 else
7030 {
7031 int byte_shift = exact_log2 (scalar_size);
7032 gcc_assert (byte_shift >= 0);
7033
7034 if (byte_shift == 0)
7035 element_offset = element;
7036
7037 else
7038 {
7039 if (TARGET_POWERPC64)
7040 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7041 else
7042 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7043
7044 element_offset = base_tmp;
7045 }
7046 }
7047
7048 /* Create the new address pointing to the element within the vector. If we
7049 are adding 0, we don't have to change the address. */
7050 if (element_offset == const0_rtx)
7051 new_addr = addr;
7052
7053 /* A simple indirect address can be converted into a reg + offset
7054 address. */
7055 else if (REG_P (addr) || SUBREG_P (addr))
7056 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7057
7058 /* Optimize D-FORM addresses with constant offset with a constant element, to
7059 include the element offset in the address directly. */
7060 else if (GET_CODE (addr) == PLUS)
7061 {
7062 rtx op0 = XEXP (addr, 0);
7063 rtx op1 = XEXP (addr, 1);
7064 rtx insn;
7065
7066 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7067 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7068 {
7069 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7070 rtx offset_rtx = GEN_INT (offset);
7071
7072 if (IN_RANGE (offset, -32768, 32767)
7073 && (scalar_size < 8 || (offset & 0x3) == 0))
7074 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7075 else
7076 {
7077 emit_move_insn (base_tmp, offset_rtx);
7078 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7079 }
7080 }
7081 else
7082 {
7083 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7084 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7085
7086 /* Note, ADDI requires the register being added to be a base
7087 register. If the register was R0, load it up into the temporary
7088 and do the add. */
7089 if (op1_reg_p
7090 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7091 {
7092 insn = gen_add3_insn (base_tmp, op1, element_offset);
7093 gcc_assert (insn != NULL_RTX);
7094 emit_insn (insn);
7095 }
7096
7097 else if (ele_reg_p
7098 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7099 {
7100 insn = gen_add3_insn (base_tmp, element_offset, op1);
7101 gcc_assert (insn != NULL_RTX);
7102 emit_insn (insn);
7103 }
7104
7105 else
7106 {
7107 emit_move_insn (base_tmp, op1);
7108 emit_insn (gen_add2_insn (base_tmp, element_offset));
7109 }
7110
7111 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7112 }
7113 }
7114
7115 else
7116 {
7117 emit_move_insn (base_tmp, addr);
7118 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7119 }
7120
7121 /* If we have a PLUS, we need to see whether the particular register class
7122 allows for D-FORM or X-FORM addressing. */
7123 if (GET_CODE (new_addr) == PLUS)
7124 {
7125 rtx op1 = XEXP (new_addr, 1);
7126 addr_mask_type addr_mask;
7127 unsigned int scalar_regno = reg_or_subregno (scalar_reg);
7128
7129 gcc_assert (HARD_REGISTER_NUM_P (scalar_regno));
7130 if (INT_REGNO_P (scalar_regno))
7131 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7132
7133 else if (FP_REGNO_P (scalar_regno))
7134 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7135
7136 else if (ALTIVEC_REGNO_P (scalar_regno))
7137 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7138
7139 else
7140 gcc_unreachable ();
7141
7142 if (REG_P (op1) || SUBREG_P (op1))
7143 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7144 else
7145 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7146 }
7147
7148 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7149 valid_addr_p = true;
7150
7151 else
7152 valid_addr_p = false;
7153
7154 if (!valid_addr_p)
7155 {
7156 emit_move_insn (base_tmp, new_addr);
7157 new_addr = base_tmp;
7158 }
7159
7160 return change_address (mem, scalar_mode, new_addr);
7161 }
7162
7163 /* Split a variable vec_extract operation into the component instructions. */
7164
7165 void
7166 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7167 rtx tmp_altivec)
7168 {
7169 machine_mode mode = GET_MODE (src);
7170 machine_mode scalar_mode = GET_MODE_INNER (GET_MODE (src));
7171 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7172 int byte_shift = exact_log2 (scalar_size);
7173
7174 gcc_assert (byte_shift >= 0);
7175
7176 /* If we are given a memory address, optimize to load just the element. We
7177 don't have to adjust the vector element number on little endian
7178 systems. */
7179 if (MEM_P (src))
7180 {
7181 int num_elements = GET_MODE_NUNITS (mode);
7182 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7183
7184 emit_insn (gen_anddi3 (element, element, num_ele_m1));
7185 gcc_assert (REG_P (tmp_gpr));
7186 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7187 tmp_gpr, scalar_mode));
7188 return;
7189 }
7190
7191 else if (REG_P (src) || SUBREG_P (src))
7192 {
7193 int num_elements = GET_MODE_NUNITS (mode);
7194 int bits_in_element = mode_to_bits (GET_MODE_INNER (mode));
7195 int bit_shift = 7 - exact_log2 (num_elements);
7196 rtx element2;
7197 unsigned int dest_regno = reg_or_subregno (dest);
7198 unsigned int src_regno = reg_or_subregno (src);
7199 unsigned int element_regno = reg_or_subregno (element);
7200
7201 gcc_assert (REG_P (tmp_gpr));
7202
7203 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7204 a general purpose register. */
7205 if (TARGET_P9_VECTOR
7206 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7207 && INT_REGNO_P (dest_regno)
7208 && ALTIVEC_REGNO_P (src_regno)
7209 && INT_REGNO_P (element_regno))
7210 {
7211 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7212 rtx element_si = gen_rtx_REG (SImode, element_regno);
7213
7214 if (mode == V16QImode)
7215 emit_insn (BYTES_BIG_ENDIAN
7216 ? gen_vextublx (dest_si, element_si, src)
7217 : gen_vextubrx (dest_si, element_si, src));
7218
7219 else if (mode == V8HImode)
7220 {
7221 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7222 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7223 emit_insn (BYTES_BIG_ENDIAN
7224 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7225 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7226 }
7227
7228
7229 else
7230 {
7231 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7232 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7233 emit_insn (BYTES_BIG_ENDIAN
7234 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7235 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7236 }
7237
7238 return;
7239 }
7240
7241
7242 gcc_assert (REG_P (tmp_altivec));
7243
7244 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7245 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7246 will shift the element into the upper position (adding 3 to convert a
7247 byte shift into a bit shift). */
7248 if (scalar_size == 8)
7249 {
7250 if (!BYTES_BIG_ENDIAN)
7251 {
7252 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7253 element2 = tmp_gpr;
7254 }
7255 else
7256 element2 = element;
7257
7258 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7259 bit. */
7260 emit_insn (gen_rtx_SET (tmp_gpr,
7261 gen_rtx_AND (DImode,
7262 gen_rtx_ASHIFT (DImode,
7263 element2,
7264 GEN_INT (6)),
7265 GEN_INT (64))));
7266 }
7267 else
7268 {
7269 if (!BYTES_BIG_ENDIAN)
7270 {
7271 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7272
7273 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7274 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7275 element2 = tmp_gpr;
7276 }
7277 else
7278 element2 = element;
7279
7280 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7281 }
7282
7283 /* Get the value into the lower byte of the Altivec register where VSLO
7284 expects it. */
7285 if (TARGET_P9_VECTOR)
7286 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7287 else if (can_create_pseudo_p ())
7288 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7289 else
7290 {
7291 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7292 emit_move_insn (tmp_di, tmp_gpr);
7293 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7294 }
7295
7296 /* Do the VSLO to get the value into the final location. */
7297 switch (mode)
7298 {
7299 case E_V2DFmode:
7300 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7301 return;
7302
7303 case E_V2DImode:
7304 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7305 return;
7306
7307 case E_V4SFmode:
7308 {
7309 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7310 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7311 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7312 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7313 tmp_altivec));
7314
7315 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7316 return;
7317 }
7318
7319 case E_V4SImode:
7320 case E_V8HImode:
7321 case E_V16QImode:
7322 {
7323 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7324 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7325 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7326 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7327 tmp_altivec));
7328 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7329 emit_insn (gen_lshrdi3 (tmp_gpr_di, tmp_gpr_di,
7330 GEN_INT (64 - bits_in_element)));
7331 return;
7332 }
7333
7334 default:
7335 gcc_unreachable ();
7336 }
7337
7338 return;
7339 }
7340 else
7341 gcc_unreachable ();
7342 }
7343
7344 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7345 selects whether the alignment is abi mandated, optional, or
7346 both abi and optional alignment. */
7347
7348 unsigned int
7349 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7350 {
7351 if (how != align_opt)
7352 {
7353 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7354 align = 128;
7355 }
7356
7357 if (how != align_abi)
7358 {
7359 if (TREE_CODE (type) == ARRAY_TYPE
7360 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7361 {
7362 if (align < BITS_PER_WORD)
7363 align = BITS_PER_WORD;
7364 }
7365 }
7366
7367 return align;
7368 }
7369
7370 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7371 instructions simply ignore the low bits; VSX memory instructions
7372 are aligned to 4 or 8 bytes. */
7373
7374 static bool
7375 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7376 {
7377 return (STRICT_ALIGNMENT
7378 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7379 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7380 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7381 && (int) align < VECTOR_ALIGN (mode)))));
7382 }
7383
7384 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7385
7386 bool
7387 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7388 {
7389 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7390 {
7391 if (computed != 128)
7392 {
7393 static bool warned;
7394 if (!warned && warn_psabi)
7395 {
7396 warned = true;
7397 inform (input_location,
7398 "the layout of aggregates containing vectors with"
7399 " %d-byte alignment has changed in GCC 5",
7400 computed / BITS_PER_UNIT);
7401 }
7402 }
7403 /* In current GCC there is no special case. */
7404 return false;
7405 }
7406
7407 return false;
7408 }
7409
7410 /* AIX increases natural record alignment to doubleword if the first
7411 field is an FP double while the FP fields remain word aligned. */
7412
7413 unsigned int
7414 rs6000_special_round_type_align (tree type, unsigned int computed,
7415 unsigned int specified)
7416 {
7417 unsigned int align = MAX (computed, specified);
7418 tree field = TYPE_FIELDS (type);
7419
7420 /* Skip all non field decls */
7421 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7422 field = DECL_CHAIN (field);
7423
7424 if (field != NULL && field != type)
7425 {
7426 type = TREE_TYPE (field);
7427 while (TREE_CODE (type) == ARRAY_TYPE)
7428 type = TREE_TYPE (type);
7429
7430 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7431 align = MAX (align, 64);
7432 }
7433
7434 return align;
7435 }
7436
7437 /* Darwin increases record alignment to the natural alignment of
7438 the first field. */
7439
7440 unsigned int
7441 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7442 unsigned int specified)
7443 {
7444 unsigned int align = MAX (computed, specified);
7445
7446 if (TYPE_PACKED (type))
7447 return align;
7448
7449 /* Find the first field, looking down into aggregates. */
7450 do {
7451 tree field = TYPE_FIELDS (type);
7452 /* Skip all non field decls */
7453 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7454 field = DECL_CHAIN (field);
7455 if (! field)
7456 break;
7457 /* A packed field does not contribute any extra alignment. */
7458 if (DECL_PACKED (field))
7459 return align;
7460 type = TREE_TYPE (field);
7461 while (TREE_CODE (type) == ARRAY_TYPE)
7462 type = TREE_TYPE (type);
7463 } while (AGGREGATE_TYPE_P (type));
7464
7465 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7466 align = MAX (align, TYPE_ALIGN (type));
7467
7468 return align;
7469 }
7470
7471 /* Return 1 for an operand in small memory on V.4/eabi. */
7472
7473 int
7474 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7475 machine_mode mode ATTRIBUTE_UNUSED)
7476 {
7477 #if TARGET_ELF
7478 rtx sym_ref;
7479
7480 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7481 return 0;
7482
7483 if (DEFAULT_ABI != ABI_V4)
7484 return 0;
7485
7486 if (SYMBOL_REF_P (op))
7487 sym_ref = op;
7488
7489 else if (GET_CODE (op) != CONST
7490 || GET_CODE (XEXP (op, 0)) != PLUS
7491 || !SYMBOL_REF_P (XEXP (XEXP (op, 0), 0))
7492 || !CONST_INT_P (XEXP (XEXP (op, 0), 1)))
7493 return 0;
7494
7495 else
7496 {
7497 rtx sum = XEXP (op, 0);
7498 HOST_WIDE_INT summand;
7499
7500 /* We have to be careful here, because it is the referenced address
7501 that must be 32k from _SDA_BASE_, not just the symbol. */
7502 summand = INTVAL (XEXP (sum, 1));
7503 if (summand < 0 || summand > g_switch_value)
7504 return 0;
7505
7506 sym_ref = XEXP (sum, 0);
7507 }
7508
7509 return SYMBOL_REF_SMALL_P (sym_ref);
7510 #else
7511 return 0;
7512 #endif
7513 }
7514
7515 /* Return true if either operand is a general purpose register. */
7516
7517 bool
7518 gpr_or_gpr_p (rtx op0, rtx op1)
7519 {
7520 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7521 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7522 }
7523
7524 /* Return true if this is a move direct operation between GPR registers and
7525 floating point/VSX registers. */
7526
7527 bool
7528 direct_move_p (rtx op0, rtx op1)
7529 {
7530 int regno0, regno1;
7531
7532 if (!REG_P (op0) || !REG_P (op1))
7533 return false;
7534
7535 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7536 return false;
7537
7538 regno0 = REGNO (op0);
7539 regno1 = REGNO (op1);
7540 if (!HARD_REGISTER_NUM_P (regno0) || !HARD_REGISTER_NUM_P (regno1))
7541 return false;
7542
7543 if (INT_REGNO_P (regno0))
7544 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7545
7546 else if (INT_REGNO_P (regno1))
7547 {
7548 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7549 return true;
7550
7551 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7552 return true;
7553 }
7554
7555 return false;
7556 }
7557
7558 /* Return true if the OFFSET is valid for the quad address instructions that
7559 use d-form (register + offset) addressing. */
7560
7561 static inline bool
7562 quad_address_offset_p (HOST_WIDE_INT offset)
7563 {
7564 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7565 }
7566
7567 /* Return true if the ADDR is an acceptable address for a quad memory
7568 operation of mode MODE (either LQ/STQ for general purpose registers, or
7569 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7570 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7571 3.0 LXV/STXV instruction. */
7572
7573 bool
7574 quad_address_p (rtx addr, machine_mode mode, bool strict)
7575 {
7576 rtx op0, op1;
7577
7578 if (GET_MODE_SIZE (mode) != 16)
7579 return false;
7580
7581 if (legitimate_indirect_address_p (addr, strict))
7582 return true;
7583
7584 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7585 return false;
7586
7587 if (GET_CODE (addr) != PLUS)
7588 return false;
7589
7590 op0 = XEXP (addr, 0);
7591 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7592 return false;
7593
7594 op1 = XEXP (addr, 1);
7595 if (!CONST_INT_P (op1))
7596 return false;
7597
7598 return quad_address_offset_p (INTVAL (op1));
7599 }
7600
7601 /* Return true if this is a load or store quad operation. This function does
7602 not handle the atomic quad memory instructions. */
7603
7604 bool
7605 quad_load_store_p (rtx op0, rtx op1)
7606 {
7607 bool ret;
7608
7609 if (!TARGET_QUAD_MEMORY)
7610 ret = false;
7611
7612 else if (REG_P (op0) && MEM_P (op1))
7613 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7614 && quad_memory_operand (op1, GET_MODE (op1))
7615 && !reg_overlap_mentioned_p (op0, op1));
7616
7617 else if (MEM_P (op0) && REG_P (op1))
7618 ret = (quad_memory_operand (op0, GET_MODE (op0))
7619 && quad_int_reg_operand (op1, GET_MODE (op1)));
7620
7621 else
7622 ret = false;
7623
7624 if (TARGET_DEBUG_ADDR)
7625 {
7626 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7627 ret ? "true" : "false");
7628 debug_rtx (gen_rtx_SET (op0, op1));
7629 }
7630
7631 return ret;
7632 }
7633
7634 /* Given an address, return a constant offset term if one exists. */
7635
7636 static rtx
7637 address_offset (rtx op)
7638 {
7639 if (GET_CODE (op) == PRE_INC
7640 || GET_CODE (op) == PRE_DEC)
7641 op = XEXP (op, 0);
7642 else if (GET_CODE (op) == PRE_MODIFY
7643 || GET_CODE (op) == LO_SUM)
7644 op = XEXP (op, 1);
7645
7646 if (GET_CODE (op) == CONST)
7647 op = XEXP (op, 0);
7648
7649 if (GET_CODE (op) == PLUS)
7650 op = XEXP (op, 1);
7651
7652 if (CONST_INT_P (op))
7653 return op;
7654
7655 return NULL_RTX;
7656 }
7657
7658 /* Return true if the MEM operand is a memory operand suitable for use
7659 with a (full width, possibly multiple) gpr load/store. On
7660 powerpc64 this means the offset must be divisible by 4.
7661 Implements 'Y' constraint.
7662
7663 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7664 a constraint function we know the operand has satisfied a suitable
7665 memory predicate. Also accept some odd rtl generated by reload
7666 (see rs6000_legitimize_reload_address for various forms). It is
7667 important that reload rtl be accepted by appropriate constraints
7668 but not by the operand predicate.
7669
7670 Offsetting a lo_sum should not be allowed, except where we know by
7671 alignment that a 32k boundary is not crossed, but see the ???
7672 comment in rs6000_legitimize_reload_address. Note that by
7673 "offsetting" here we mean a further offset to access parts of the
7674 MEM. It's fine to have a lo_sum where the inner address is offset
7675 from a sym, since the same sym+offset will appear in the high part
7676 of the address calculation. */
7677
7678 bool
7679 mem_operand_gpr (rtx op, machine_mode mode)
7680 {
7681 unsigned HOST_WIDE_INT offset;
7682 int extra;
7683 rtx addr = XEXP (op, 0);
7684
7685 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7686 if (TARGET_UPDATE
7687 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7688 && mode_supports_pre_incdec_p (mode)
7689 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7690 return true;
7691
7692 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7693 if (!rs6000_offsettable_memref_p (op, mode, false))
7694 return false;
7695
7696 op = address_offset (addr);
7697 if (op == NULL_RTX)
7698 return true;
7699
7700 offset = INTVAL (op);
7701 if (TARGET_POWERPC64 && (offset & 3) != 0)
7702 return false;
7703
7704 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7705 if (extra < 0)
7706 extra = 0;
7707
7708 if (GET_CODE (addr) == LO_SUM)
7709 /* For lo_sum addresses, we must allow any offset except one that
7710 causes a wrap, so test only the low 16 bits. */
7711 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7712
7713 return offset + 0x8000 < 0x10000u - extra;
7714 }
7715
7716 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7717 enforce an offset divisible by 4 even for 32-bit. */
7718
7719 bool
7720 mem_operand_ds_form (rtx op, machine_mode mode)
7721 {
7722 unsigned HOST_WIDE_INT offset;
7723 int extra;
7724 rtx addr = XEXP (op, 0);
7725
7726 if (!offsettable_address_p (false, mode, addr))
7727 return false;
7728
7729 op = address_offset (addr);
7730 if (op == NULL_RTX)
7731 return true;
7732
7733 offset = INTVAL (op);
7734 if ((offset & 3) != 0)
7735 return false;
7736
7737 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7738 if (extra < 0)
7739 extra = 0;
7740
7741 if (GET_CODE (addr) == LO_SUM)
7742 /* For lo_sum addresses, we must allow any offset except one that
7743 causes a wrap, so test only the low 16 bits. */
7744 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7745
7746 return offset + 0x8000 < 0x10000u - extra;
7747 }
7748 \f
7749 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7750
7751 static bool
7752 reg_offset_addressing_ok_p (machine_mode mode)
7753 {
7754 switch (mode)
7755 {
7756 case E_V16QImode:
7757 case E_V8HImode:
7758 case E_V4SFmode:
7759 case E_V4SImode:
7760 case E_V2DFmode:
7761 case E_V2DImode:
7762 case E_V1TImode:
7763 case E_TImode:
7764 case E_TFmode:
7765 case E_KFmode:
7766 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7767 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7768 a vector mode, if we want to use the VSX registers to move it around,
7769 we need to restrict ourselves to reg+reg addressing. Similarly for
7770 IEEE 128-bit floating point that is passed in a single vector
7771 register. */
7772 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7773 return mode_supports_dq_form (mode);
7774 break;
7775
7776 case E_SDmode:
7777 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7778 addressing for the LFIWZX and STFIWX instructions. */
7779 if (TARGET_NO_SDMODE_STACK)
7780 return false;
7781 break;
7782
7783 default:
7784 break;
7785 }
7786
7787 return true;
7788 }
7789
7790 static bool
7791 virtual_stack_registers_memory_p (rtx op)
7792 {
7793 int regnum;
7794
7795 if (REG_P (op))
7796 regnum = REGNO (op);
7797
7798 else if (GET_CODE (op) == PLUS
7799 && REG_P (XEXP (op, 0))
7800 && CONST_INT_P (XEXP (op, 1)))
7801 regnum = REGNO (XEXP (op, 0));
7802
7803 else
7804 return false;
7805
7806 return (regnum >= FIRST_VIRTUAL_REGISTER
7807 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7808 }
7809
7810 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7811 is known to not straddle a 32k boundary. This function is used
7812 to determine whether -mcmodel=medium code can use TOC pointer
7813 relative addressing for OP. This means the alignment of the TOC
7814 pointer must also be taken into account, and unfortunately that is
7815 only 8 bytes. */
7816
7817 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7818 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7819 #endif
7820
7821 static bool
7822 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7823 machine_mode mode)
7824 {
7825 tree decl;
7826 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7827
7828 if (!SYMBOL_REF_P (op))
7829 return false;
7830
7831 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7832 SYMBOL_REF. */
7833 if (mode_supports_dq_form (mode))
7834 return false;
7835
7836 dsize = GET_MODE_SIZE (mode);
7837 decl = SYMBOL_REF_DECL (op);
7838 if (!decl)
7839 {
7840 if (dsize == 0)
7841 return false;
7842
7843 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7844 replacing memory addresses with an anchor plus offset. We
7845 could find the decl by rummaging around in the block->objects
7846 VEC for the given offset but that seems like too much work. */
7847 dalign = BITS_PER_UNIT;
7848 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7849 && SYMBOL_REF_ANCHOR_P (op)
7850 && SYMBOL_REF_BLOCK (op) != NULL)
7851 {
7852 struct object_block *block = SYMBOL_REF_BLOCK (op);
7853
7854 dalign = block->alignment;
7855 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7856 }
7857 else if (CONSTANT_POOL_ADDRESS_P (op))
7858 {
7859 /* It would be nice to have get_pool_align().. */
7860 machine_mode cmode = get_pool_mode (op);
7861
7862 dalign = GET_MODE_ALIGNMENT (cmode);
7863 }
7864 }
7865 else if (DECL_P (decl))
7866 {
7867 dalign = DECL_ALIGN (decl);
7868
7869 if (dsize == 0)
7870 {
7871 /* Allow BLKmode when the entire object is known to not
7872 cross a 32k boundary. */
7873 if (!DECL_SIZE_UNIT (decl))
7874 return false;
7875
7876 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7877 return false;
7878
7879 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7880 if (dsize > 32768)
7881 return false;
7882
7883 dalign /= BITS_PER_UNIT;
7884 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7885 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7886 return dalign >= dsize;
7887 }
7888 }
7889 else
7890 gcc_unreachable ();
7891
7892 /* Find how many bits of the alignment we know for this access. */
7893 dalign /= BITS_PER_UNIT;
7894 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7895 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7896 mask = dalign - 1;
7897 lsb = offset & -offset;
7898 mask &= lsb - 1;
7899 dalign = mask + 1;
7900
7901 return dalign >= dsize;
7902 }
7903
7904 static bool
7905 constant_pool_expr_p (rtx op)
7906 {
7907 rtx base, offset;
7908
7909 split_const (op, &base, &offset);
7910 return (SYMBOL_REF_P (base)
7911 && CONSTANT_POOL_ADDRESS_P (base)
7912 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7913 }
7914
7915 /* These are only used to pass through from print_operand/print_operand_address
7916 to rs6000_output_addr_const_extra over the intervening function
7917 output_addr_const which is not target code. */
7918 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7919
7920 /* Return true if OP is a toc pointer relative address (the output
7921 of create_TOC_reference). If STRICT, do not match non-split
7922 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7923 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7924 TOCREL_OFFSET_RET respectively. */
7925
7926 bool
7927 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7928 const_rtx *tocrel_offset_ret)
7929 {
7930 if (!TARGET_TOC)
7931 return false;
7932
7933 if (TARGET_CMODEL != CMODEL_SMALL)
7934 {
7935 /* When strict ensure we have everything tidy. */
7936 if (strict
7937 && !(GET_CODE (op) == LO_SUM
7938 && REG_P (XEXP (op, 0))
7939 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7940 return false;
7941
7942 /* When not strict, allow non-split TOC addresses and also allow
7943 (lo_sum (high ..)) TOC addresses created during reload. */
7944 if (GET_CODE (op) == LO_SUM)
7945 op = XEXP (op, 1);
7946 }
7947
7948 const_rtx tocrel_base = op;
7949 const_rtx tocrel_offset = const0_rtx;
7950
7951 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7952 {
7953 tocrel_base = XEXP (op, 0);
7954 tocrel_offset = XEXP (op, 1);
7955 }
7956
7957 if (tocrel_base_ret)
7958 *tocrel_base_ret = tocrel_base;
7959 if (tocrel_offset_ret)
7960 *tocrel_offset_ret = tocrel_offset;
7961
7962 return (GET_CODE (tocrel_base) == UNSPEC
7963 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7964 && REG_P (XVECEXP (tocrel_base, 0, 1))
7965 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7966 }
7967
7968 /* Return true if X is a constant pool address, and also for cmodel=medium
7969 if X is a toc-relative address known to be offsettable within MODE. */
7970
7971 bool
7972 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7973 bool strict)
7974 {
7975 const_rtx tocrel_base, tocrel_offset;
7976 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7977 && (TARGET_CMODEL != CMODEL_MEDIUM
7978 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7979 || mode == QImode
7980 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7981 INTVAL (tocrel_offset), mode)));
7982 }
7983
7984 static bool
7985 legitimate_small_data_p (machine_mode mode, rtx x)
7986 {
7987 return (DEFAULT_ABI == ABI_V4
7988 && !flag_pic && !TARGET_TOC
7989 && (SYMBOL_REF_P (x) || GET_CODE (x) == CONST)
7990 && small_data_operand (x, mode));
7991 }
7992
7993 bool
7994 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7995 bool strict, bool worst_case)
7996 {
7997 unsigned HOST_WIDE_INT offset;
7998 unsigned int extra;
7999
8000 if (GET_CODE (x) != PLUS)
8001 return false;
8002 if (!REG_P (XEXP (x, 0)))
8003 return false;
8004 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8005 return false;
8006 if (mode_supports_dq_form (mode))
8007 return quad_address_p (x, mode, strict);
8008 if (!reg_offset_addressing_ok_p (mode))
8009 return virtual_stack_registers_memory_p (x);
8010 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8011 return true;
8012 if (!CONST_INT_P (XEXP (x, 1)))
8013 return false;
8014
8015 offset = INTVAL (XEXP (x, 1));
8016 extra = 0;
8017 switch (mode)
8018 {
8019 case E_DFmode:
8020 case E_DDmode:
8021 case E_DImode:
8022 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8023 addressing. */
8024 if (VECTOR_MEM_VSX_P (mode))
8025 return false;
8026
8027 if (!worst_case)
8028 break;
8029 if (!TARGET_POWERPC64)
8030 extra = 4;
8031 else if (offset & 3)
8032 return false;
8033 break;
8034
8035 case E_TFmode:
8036 case E_IFmode:
8037 case E_KFmode:
8038 case E_TDmode:
8039 case E_TImode:
8040 case E_PTImode:
8041 extra = 8;
8042 if (!worst_case)
8043 break;
8044 if (!TARGET_POWERPC64)
8045 extra = 12;
8046 else if (offset & 3)
8047 return false;
8048 break;
8049
8050 default:
8051 break;
8052 }
8053
8054 offset += 0x8000;
8055 return offset < 0x10000 - extra;
8056 }
8057
8058 bool
8059 legitimate_indexed_address_p (rtx x, int strict)
8060 {
8061 rtx op0, op1;
8062
8063 if (GET_CODE (x) != PLUS)
8064 return false;
8065
8066 op0 = XEXP (x, 0);
8067 op1 = XEXP (x, 1);
8068
8069 return (REG_P (op0) && REG_P (op1)
8070 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8071 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8072 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8073 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8074 }
8075
8076 bool
8077 avoiding_indexed_address_p (machine_mode mode)
8078 {
8079 /* Avoid indexed addressing for modes that have non-indexed
8080 load/store instruction forms. */
8081 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8082 }
8083
8084 bool
8085 legitimate_indirect_address_p (rtx x, int strict)
8086 {
8087 return REG_P (x) && INT_REG_OK_FOR_BASE_P (x, strict);
8088 }
8089
8090 bool
8091 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8092 {
8093 if (!TARGET_MACHO || !flag_pic
8094 || mode != SImode || !MEM_P (x))
8095 return false;
8096 x = XEXP (x, 0);
8097
8098 if (GET_CODE (x) != LO_SUM)
8099 return false;
8100 if (!REG_P (XEXP (x, 0)))
8101 return false;
8102 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8103 return false;
8104 x = XEXP (x, 1);
8105
8106 return CONSTANT_P (x);
8107 }
8108
8109 static bool
8110 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8111 {
8112 if (GET_CODE (x) != LO_SUM)
8113 return false;
8114 if (!REG_P (XEXP (x, 0)))
8115 return false;
8116 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8117 return false;
8118 /* quad word addresses are restricted, and we can't use LO_SUM. */
8119 if (mode_supports_dq_form (mode))
8120 return false;
8121 x = XEXP (x, 1);
8122
8123 if (TARGET_ELF || TARGET_MACHO)
8124 {
8125 bool large_toc_ok;
8126
8127 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8128 return false;
8129 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8130 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8131 recognizes some LO_SUM addresses as valid although this
8132 function says opposite. In most cases, LRA through different
8133 transformations can generate correct code for address reloads.
8134 It cannot manage only some LO_SUM cases. So we need to add
8135 code analogous to one in rs6000_legitimize_reload_address for
8136 LOW_SUM here saying that some addresses are still valid. */
8137 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8138 && small_toc_ref (x, VOIDmode));
8139 if (TARGET_TOC && ! large_toc_ok)
8140 return false;
8141 if (GET_MODE_NUNITS (mode) != 1)
8142 return false;
8143 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8144 && !(/* ??? Assume floating point reg based on mode? */
8145 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8146 return false;
8147
8148 return CONSTANT_P (x) || large_toc_ok;
8149 }
8150
8151 return false;
8152 }
8153
8154
8155 /* Try machine-dependent ways of modifying an illegitimate address
8156 to be legitimate. If we find one, return the new, valid address.
8157 This is used from only one place: `memory_address' in explow.c.
8158
8159 OLDX is the address as it was before break_out_memory_refs was
8160 called. In some cases it is useful to look at this to decide what
8161 needs to be done.
8162
8163 It is always safe for this function to do nothing. It exists to
8164 recognize opportunities to optimize the output.
8165
8166 On RS/6000, first check for the sum of a register with a constant
8167 integer that is out of range. If so, generate code to add the
8168 constant with the low-order 16 bits masked to the register and force
8169 this result into another register (this can be done with `cau').
8170 Then generate an address of REG+(CONST&0xffff), allowing for the
8171 possibility of bit 16 being a one.
8172
8173 Then check for the sum of a register and something not constant, try to
8174 load the other things into a register and return the sum. */
8175
8176 static rtx
8177 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8178 machine_mode mode)
8179 {
8180 unsigned int extra;
8181
8182 if (!reg_offset_addressing_ok_p (mode)
8183 || mode_supports_dq_form (mode))
8184 {
8185 if (virtual_stack_registers_memory_p (x))
8186 return x;
8187
8188 /* In theory we should not be seeing addresses of the form reg+0,
8189 but just in case it is generated, optimize it away. */
8190 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8191 return force_reg (Pmode, XEXP (x, 0));
8192
8193 /* For TImode with load/store quad, restrict addresses to just a single
8194 pointer, so it works with both GPRs and VSX registers. */
8195 /* Make sure both operands are registers. */
8196 else if (GET_CODE (x) == PLUS
8197 && (mode != TImode || !TARGET_VSX))
8198 return gen_rtx_PLUS (Pmode,
8199 force_reg (Pmode, XEXP (x, 0)),
8200 force_reg (Pmode, XEXP (x, 1)));
8201 else
8202 return force_reg (Pmode, x);
8203 }
8204 if (SYMBOL_REF_P (x))
8205 {
8206 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8207 if (model != 0)
8208 return rs6000_legitimize_tls_address (x, model);
8209 }
8210
8211 extra = 0;
8212 switch (mode)
8213 {
8214 case E_TFmode:
8215 case E_TDmode:
8216 case E_TImode:
8217 case E_PTImode:
8218 case E_IFmode:
8219 case E_KFmode:
8220 /* As in legitimate_offset_address_p we do not assume
8221 worst-case. The mode here is just a hint as to the registers
8222 used. A TImode is usually in gprs, but may actually be in
8223 fprs. Leave worst-case scenario for reload to handle via
8224 insn constraints. PTImode is only GPRs. */
8225 extra = 8;
8226 break;
8227 default:
8228 break;
8229 }
8230
8231 if (GET_CODE (x) == PLUS
8232 && REG_P (XEXP (x, 0))
8233 && CONST_INT_P (XEXP (x, 1))
8234 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8235 >= 0x10000 - extra))
8236 {
8237 HOST_WIDE_INT high_int, low_int;
8238 rtx sum;
8239 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8240 if (low_int >= 0x8000 - extra)
8241 low_int = 0;
8242 high_int = INTVAL (XEXP (x, 1)) - low_int;
8243 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8244 GEN_INT (high_int)), 0);
8245 return plus_constant (Pmode, sum, low_int);
8246 }
8247 else if (GET_CODE (x) == PLUS
8248 && REG_P (XEXP (x, 0))
8249 && !CONST_INT_P (XEXP (x, 1))
8250 && GET_MODE_NUNITS (mode) == 1
8251 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8252 || (/* ??? Assume floating point reg based on mode? */
8253 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8254 && !avoiding_indexed_address_p (mode))
8255 {
8256 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8257 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8258 }
8259 else if ((TARGET_ELF
8260 #if TARGET_MACHO
8261 || !MACHO_DYNAMIC_NO_PIC_P
8262 #endif
8263 )
8264 && TARGET_32BIT
8265 && TARGET_NO_TOC
8266 && !flag_pic
8267 && !CONST_INT_P (x)
8268 && !CONST_WIDE_INT_P (x)
8269 && !CONST_DOUBLE_P (x)
8270 && CONSTANT_P (x)
8271 && GET_MODE_NUNITS (mode) == 1
8272 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8273 || (/* ??? Assume floating point reg based on mode? */
8274 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8275 {
8276 rtx reg = gen_reg_rtx (Pmode);
8277 if (TARGET_ELF)
8278 emit_insn (gen_elf_high (reg, x));
8279 else
8280 emit_insn (gen_macho_high (reg, x));
8281 return gen_rtx_LO_SUM (Pmode, reg, x);
8282 }
8283 else if (TARGET_TOC
8284 && SYMBOL_REF_P (x)
8285 && constant_pool_expr_p (x)
8286 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8287 return create_TOC_reference (x, NULL_RTX);
8288 else
8289 return x;
8290 }
8291
8292 /* Debug version of rs6000_legitimize_address. */
8293 static rtx
8294 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8295 {
8296 rtx ret;
8297 rtx_insn *insns;
8298
8299 start_sequence ();
8300 ret = rs6000_legitimize_address (x, oldx, mode);
8301 insns = get_insns ();
8302 end_sequence ();
8303
8304 if (ret != x)
8305 {
8306 fprintf (stderr,
8307 "\nrs6000_legitimize_address: mode %s, old code %s, "
8308 "new code %s, modified\n",
8309 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8310 GET_RTX_NAME (GET_CODE (ret)));
8311
8312 fprintf (stderr, "Original address:\n");
8313 debug_rtx (x);
8314
8315 fprintf (stderr, "oldx:\n");
8316 debug_rtx (oldx);
8317
8318 fprintf (stderr, "New address:\n");
8319 debug_rtx (ret);
8320
8321 if (insns)
8322 {
8323 fprintf (stderr, "Insns added:\n");
8324 debug_rtx_list (insns, 20);
8325 }
8326 }
8327 else
8328 {
8329 fprintf (stderr,
8330 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8331 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8332
8333 debug_rtx (x);
8334 }
8335
8336 if (insns)
8337 emit_insn (insns);
8338
8339 return ret;
8340 }
8341
8342 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8343 We need to emit DTP-relative relocations. */
8344
8345 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8346 static void
8347 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8348 {
8349 switch (size)
8350 {
8351 case 4:
8352 fputs ("\t.long\t", file);
8353 break;
8354 case 8:
8355 fputs (DOUBLE_INT_ASM_OP, file);
8356 break;
8357 default:
8358 gcc_unreachable ();
8359 }
8360 output_addr_const (file, x);
8361 if (TARGET_ELF)
8362 fputs ("@dtprel+0x8000", file);
8363 else if (TARGET_XCOFF && SYMBOL_REF_P (x))
8364 {
8365 switch (SYMBOL_REF_TLS_MODEL (x))
8366 {
8367 case 0:
8368 break;
8369 case TLS_MODEL_LOCAL_EXEC:
8370 fputs ("@le", file);
8371 break;
8372 case TLS_MODEL_INITIAL_EXEC:
8373 fputs ("@ie", file);
8374 break;
8375 case TLS_MODEL_GLOBAL_DYNAMIC:
8376 case TLS_MODEL_LOCAL_DYNAMIC:
8377 fputs ("@m", file);
8378 break;
8379 default:
8380 gcc_unreachable ();
8381 }
8382 }
8383 }
8384
8385 /* Return true if X is a symbol that refers to real (rather than emulated)
8386 TLS. */
8387
8388 static bool
8389 rs6000_real_tls_symbol_ref_p (rtx x)
8390 {
8391 return (SYMBOL_REF_P (x)
8392 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8393 }
8394
8395 /* In the name of slightly smaller debug output, and to cater to
8396 general assembler lossage, recognize various UNSPEC sequences
8397 and turn them back into a direct symbol reference. */
8398
8399 static rtx
8400 rs6000_delegitimize_address (rtx orig_x)
8401 {
8402 rtx x, y, offset;
8403
8404 if (GET_CODE (orig_x) == UNSPEC && XINT (orig_x, 1) == UNSPEC_FUSION_GPR)
8405 orig_x = XVECEXP (orig_x, 0, 0);
8406
8407 orig_x = delegitimize_mem_from_attrs (orig_x);
8408
8409 x = orig_x;
8410 if (MEM_P (x))
8411 x = XEXP (x, 0);
8412
8413 y = x;
8414 if (TARGET_CMODEL != CMODEL_SMALL && GET_CODE (y) == LO_SUM)
8415 y = XEXP (y, 1);
8416
8417 offset = NULL_RTX;
8418 if (GET_CODE (y) == PLUS
8419 && GET_MODE (y) == Pmode
8420 && CONST_INT_P (XEXP (y, 1)))
8421 {
8422 offset = XEXP (y, 1);
8423 y = XEXP (y, 0);
8424 }
8425
8426 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_TOCREL)
8427 {
8428 y = XVECEXP (y, 0, 0);
8429
8430 #ifdef HAVE_AS_TLS
8431 /* Do not associate thread-local symbols with the original
8432 constant pool symbol. */
8433 if (TARGET_XCOFF
8434 && SYMBOL_REF_P (y)
8435 && CONSTANT_POOL_ADDRESS_P (y)
8436 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8437 return orig_x;
8438 #endif
8439
8440 if (offset != NULL_RTX)
8441 y = gen_rtx_PLUS (Pmode, y, offset);
8442 if (!MEM_P (orig_x))
8443 return y;
8444 else
8445 return replace_equiv_address_nv (orig_x, y);
8446 }
8447
8448 if (TARGET_MACHO
8449 && GET_CODE (orig_x) == LO_SUM
8450 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8451 {
8452 y = XEXP (XEXP (orig_x, 1), 0);
8453 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8454 return XVECEXP (y, 0, 0);
8455 }
8456
8457 return orig_x;
8458 }
8459
8460 /* Return true if X shouldn't be emitted into the debug info.
8461 The linker doesn't like .toc section references from
8462 .debug_* sections, so reject .toc section symbols. */
8463
8464 static bool
8465 rs6000_const_not_ok_for_debug_p (rtx x)
8466 {
8467 if (GET_CODE (x) == UNSPEC)
8468 return true;
8469 if (SYMBOL_REF_P (x)
8470 && CONSTANT_POOL_ADDRESS_P (x))
8471 {
8472 rtx c = get_pool_constant (x);
8473 machine_mode cmode = get_pool_mode (x);
8474 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8475 return true;
8476 }
8477
8478 return false;
8479 }
8480
8481 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8482
8483 static bool
8484 rs6000_legitimate_combined_insn (rtx_insn *insn)
8485 {
8486 int icode = INSN_CODE (insn);
8487
8488 /* Reject creating doloop insns. Combine should not be allowed
8489 to create these for a number of reasons:
8490 1) In a nested loop, if combine creates one of these in an
8491 outer loop and the register allocator happens to allocate ctr
8492 to the outer loop insn, then the inner loop can't use ctr.
8493 Inner loops ought to be more highly optimized.
8494 2) Combine often wants to create one of these from what was
8495 originally a three insn sequence, first combining the three
8496 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8497 allocated ctr, the splitter takes use back to the three insn
8498 sequence. It's better to stop combine at the two insn
8499 sequence.
8500 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8501 insns, the register allocator sometimes uses floating point
8502 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8503 jump insn and output reloads are not implemented for jumps,
8504 the ctrsi/ctrdi splitters need to handle all possible cases.
8505 That's a pain, and it gets to be seriously difficult when a
8506 splitter that runs after reload needs memory to transfer from
8507 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8508 for the difficult case. It's better to not create problems
8509 in the first place. */
8510 if (icode != CODE_FOR_nothing
8511 && (icode == CODE_FOR_bdz_si
8512 || icode == CODE_FOR_bdz_di
8513 || icode == CODE_FOR_bdnz_si
8514 || icode == CODE_FOR_bdnz_di
8515 || icode == CODE_FOR_bdztf_si
8516 || icode == CODE_FOR_bdztf_di
8517 || icode == CODE_FOR_bdnztf_si
8518 || icode == CODE_FOR_bdnztf_di))
8519 return false;
8520
8521 return true;
8522 }
8523
8524 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8525
8526 static GTY(()) rtx rs6000_tls_symbol;
8527 static rtx
8528 rs6000_tls_get_addr (void)
8529 {
8530 if (!rs6000_tls_symbol)
8531 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8532
8533 return rs6000_tls_symbol;
8534 }
8535
8536 /* Construct the SYMBOL_REF for TLS GOT references. */
8537
8538 static GTY(()) rtx rs6000_got_symbol;
8539 static rtx
8540 rs6000_got_sym (void)
8541 {
8542 if (!rs6000_got_symbol)
8543 {
8544 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8545 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8546 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8547 }
8548
8549 return rs6000_got_symbol;
8550 }
8551
8552 /* AIX Thread-Local Address support. */
8553
8554 static rtx
8555 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8556 {
8557 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8558 const char *name;
8559 char *tlsname;
8560
8561 name = XSTR (addr, 0);
8562 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8563 or the symbol will be in TLS private data section. */
8564 if (name[strlen (name) - 1] != ']'
8565 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8566 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8567 {
8568 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8569 strcpy (tlsname, name);
8570 strcat (tlsname,
8571 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8572 tlsaddr = copy_rtx (addr);
8573 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8574 }
8575 else
8576 tlsaddr = addr;
8577
8578 /* Place addr into TOC constant pool. */
8579 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8580
8581 /* Output the TOC entry and create the MEM referencing the value. */
8582 if (constant_pool_expr_p (XEXP (sym, 0))
8583 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8584 {
8585 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8586 mem = gen_const_mem (Pmode, tocref);
8587 set_mem_alias_set (mem, get_TOC_alias_set ());
8588 }
8589 else
8590 return sym;
8591
8592 /* Use global-dynamic for local-dynamic. */
8593 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8594 || model == TLS_MODEL_LOCAL_DYNAMIC)
8595 {
8596 /* Create new TOC reference for @m symbol. */
8597 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8598 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8599 strcpy (tlsname, "*LCM");
8600 strcat (tlsname, name + 3);
8601 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8602 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8603 tocref = create_TOC_reference (modaddr, NULL_RTX);
8604 rtx modmem = gen_const_mem (Pmode, tocref);
8605 set_mem_alias_set (modmem, get_TOC_alias_set ());
8606
8607 rtx modreg = gen_reg_rtx (Pmode);
8608 emit_insn (gen_rtx_SET (modreg, modmem));
8609
8610 tmpreg = gen_reg_rtx (Pmode);
8611 emit_insn (gen_rtx_SET (tmpreg, mem));
8612
8613 dest = gen_reg_rtx (Pmode);
8614 if (TARGET_32BIT)
8615 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8616 else
8617 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8618 return dest;
8619 }
8620 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8621 else if (TARGET_32BIT)
8622 {
8623 tlsreg = gen_reg_rtx (SImode);
8624 emit_insn (gen_tls_get_tpointer (tlsreg));
8625 }
8626 else
8627 tlsreg = gen_rtx_REG (DImode, 13);
8628
8629 /* Load the TOC value into temporary register. */
8630 tmpreg = gen_reg_rtx (Pmode);
8631 emit_insn (gen_rtx_SET (tmpreg, mem));
8632 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8633 gen_rtx_MINUS (Pmode, addr, tlsreg));
8634
8635 /* Add TOC symbol value to TLS pointer. */
8636 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8637
8638 return dest;
8639 }
8640
8641 /* Output arg setup instructions for a !TARGET_TLS_MARKERS
8642 __tls_get_addr call. */
8643
8644 void
8645 rs6000_output_tlsargs (rtx *operands)
8646 {
8647 /* Set up operands for output_asm_insn, without modifying OPERANDS. */
8648 rtx op[3];
8649
8650 /* The set dest of the call, ie. r3, which is also the first arg reg. */
8651 op[0] = operands[0];
8652 /* The TLS symbol from global_tlsarg stashed as CALL operand 2. */
8653 op[1] = XVECEXP (operands[2], 0, 0);
8654 if (XINT (operands[2], 1) == UNSPEC_TLSGD)
8655 {
8656 /* The GOT register. */
8657 op[2] = XVECEXP (operands[2], 0, 1);
8658 if (TARGET_CMODEL != CMODEL_SMALL)
8659 output_asm_insn ("addis %0,%2,%1@got@tlsgd@ha\n\t"
8660 "addi %0,%0,%1@got@tlsgd@l", op);
8661 else
8662 output_asm_insn ("addi %0,%2,%1@got@tlsgd", op);
8663 }
8664 else if (XINT (operands[2], 1) == UNSPEC_TLSLD)
8665 {
8666 if (TARGET_CMODEL != CMODEL_SMALL)
8667 output_asm_insn ("addis %0,%1,%&@got@tlsld@ha\n\t"
8668 "addi %0,%0,%&@got@tlsld@l", op);
8669 else
8670 output_asm_insn ("addi %0,%1,%&@got@tlsld", op);
8671 }
8672 else
8673 gcc_unreachable ();
8674 }
8675
8676 /* Passes the tls arg value for global dynamic and local dynamic
8677 emit_library_call_value in rs6000_legitimize_tls_address to
8678 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8679 marker relocs put on __tls_get_addr calls. */
8680 static rtx global_tlsarg;
8681
8682 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8683 this (thread-local) address. */
8684
8685 static rtx
8686 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8687 {
8688 rtx dest, insn;
8689
8690 if (TARGET_XCOFF)
8691 return rs6000_legitimize_tls_address_aix (addr, model);
8692
8693 dest = gen_reg_rtx (Pmode);
8694 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8695 {
8696 rtx tlsreg;
8697
8698 if (TARGET_64BIT)
8699 {
8700 tlsreg = gen_rtx_REG (Pmode, 13);
8701 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8702 }
8703 else
8704 {
8705 tlsreg = gen_rtx_REG (Pmode, 2);
8706 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8707 }
8708 emit_insn (insn);
8709 }
8710 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8711 {
8712 rtx tlsreg, tmp;
8713
8714 tmp = gen_reg_rtx (Pmode);
8715 if (TARGET_64BIT)
8716 {
8717 tlsreg = gen_rtx_REG (Pmode, 13);
8718 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8719 }
8720 else
8721 {
8722 tlsreg = gen_rtx_REG (Pmode, 2);
8723 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8724 }
8725 emit_insn (insn);
8726 if (TARGET_64BIT)
8727 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8728 else
8729 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8730 emit_insn (insn);
8731 }
8732 else
8733 {
8734 rtx got, tga, tmp1, tmp2;
8735
8736 /* We currently use relocations like @got@tlsgd for tls, which
8737 means the linker will handle allocation of tls entries, placing
8738 them in the .got section. So use a pointer to the .got section,
8739 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8740 or to secondary GOT sections used by 32-bit -fPIC. */
8741 if (TARGET_64BIT)
8742 got = gen_rtx_REG (Pmode, 2);
8743 else
8744 {
8745 if (flag_pic == 1)
8746 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8747 else
8748 {
8749 rtx gsym = rs6000_got_sym ();
8750 got = gen_reg_rtx (Pmode);
8751 if (flag_pic == 0)
8752 rs6000_emit_move (got, gsym, Pmode);
8753 else
8754 {
8755 rtx mem, lab;
8756
8757 tmp1 = gen_reg_rtx (Pmode);
8758 tmp2 = gen_reg_rtx (Pmode);
8759 mem = gen_const_mem (Pmode, tmp1);
8760 lab = gen_label_rtx ();
8761 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8762 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8763 if (TARGET_LINK_STACK)
8764 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8765 emit_move_insn (tmp2, mem);
8766 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8767 set_unique_reg_note (last, REG_EQUAL, gsym);
8768 }
8769 }
8770 }
8771
8772 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8773 {
8774 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8775 UNSPEC_TLSGD);
8776 tga = rs6000_tls_get_addr ();
8777 global_tlsarg = arg;
8778 if (TARGET_TLS_MARKERS)
8779 {
8780 rtx argreg = gen_rtx_REG (Pmode, 3);
8781 emit_insn (gen_rtx_SET (argreg, arg));
8782 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8783 argreg, Pmode);
8784 }
8785 else
8786 emit_library_call_value (tga, dest, LCT_CONST, Pmode);
8787 global_tlsarg = NULL_RTX;
8788
8789 /* Make a note so that the result of this call can be CSEd. */
8790 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8791 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8792 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8793 }
8794 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8795 {
8796 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got), UNSPEC_TLSLD);
8797 tga = rs6000_tls_get_addr ();
8798 tmp1 = gen_reg_rtx (Pmode);
8799 global_tlsarg = arg;
8800 if (TARGET_TLS_MARKERS)
8801 {
8802 rtx argreg = gen_rtx_REG (Pmode, 3);
8803 emit_insn (gen_rtx_SET (argreg, arg));
8804 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8805 argreg, Pmode);
8806 }
8807 else
8808 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode);
8809 global_tlsarg = NULL_RTX;
8810
8811 /* Make a note so that the result of this call can be CSEd. */
8812 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8813 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8814 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8815
8816 if (rs6000_tls_size == 16)
8817 {
8818 if (TARGET_64BIT)
8819 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8820 else
8821 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8822 }
8823 else if (rs6000_tls_size == 32)
8824 {
8825 tmp2 = gen_reg_rtx (Pmode);
8826 if (TARGET_64BIT)
8827 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8828 else
8829 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8830 emit_insn (insn);
8831 if (TARGET_64BIT)
8832 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8833 else
8834 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8835 }
8836 else
8837 {
8838 tmp2 = gen_reg_rtx (Pmode);
8839 if (TARGET_64BIT)
8840 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8841 else
8842 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8843 emit_insn (insn);
8844 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8845 }
8846 emit_insn (insn);
8847 }
8848 else
8849 {
8850 /* IE, or 64-bit offset LE. */
8851 tmp2 = gen_reg_rtx (Pmode);
8852 if (TARGET_64BIT)
8853 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8854 else
8855 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8856 emit_insn (insn);
8857 if (TARGET_64BIT)
8858 insn = gen_tls_tls_64 (dest, tmp2, addr);
8859 else
8860 insn = gen_tls_tls_32 (dest, tmp2, addr);
8861 emit_insn (insn);
8862 }
8863 }
8864
8865 return dest;
8866 }
8867
8868 /* Only create the global variable for the stack protect guard if we are using
8869 the global flavor of that guard. */
8870 static tree
8871 rs6000_init_stack_protect_guard (void)
8872 {
8873 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8874 return default_stack_protect_guard ();
8875
8876 return NULL_TREE;
8877 }
8878
8879 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8880
8881 static bool
8882 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8883 {
8884 if (GET_CODE (x) == HIGH
8885 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8886 return true;
8887
8888 /* A TLS symbol in the TOC cannot contain a sum. */
8889 if (GET_CODE (x) == CONST
8890 && GET_CODE (XEXP (x, 0)) == PLUS
8891 && SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
8892 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8893 return true;
8894
8895 /* Do not place an ELF TLS symbol in the constant pool. */
8896 return TARGET_ELF && tls_referenced_p (x);
8897 }
8898
8899 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8900 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8901 can be addressed relative to the toc pointer. */
8902
8903 static bool
8904 use_toc_relative_ref (rtx sym, machine_mode mode)
8905 {
8906 return ((constant_pool_expr_p (sym)
8907 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8908 get_pool_mode (sym)))
8909 || (TARGET_CMODEL == CMODEL_MEDIUM
8910 && SYMBOL_REF_LOCAL_P (sym)
8911 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8912 }
8913
8914 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8915 replace the input X, or the original X if no replacement is called for.
8916 The output parameter *WIN is 1 if the calling macro should goto WIN,
8917 0 if it should not.
8918
8919 For RS/6000, we wish to handle large displacements off a base
8920 register by splitting the addend across an addiu/addis and the mem insn.
8921 This cuts number of extra insns needed from 3 to 1.
8922
8923 On Darwin, we use this to generate code for floating point constants.
8924 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8925 The Darwin code is inside #if TARGET_MACHO because only then are the
8926 machopic_* functions defined. */
8927 static rtx
8928 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8929 int opnum, int type,
8930 int ind_levels ATTRIBUTE_UNUSED, int *win)
8931 {
8932 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8933 bool quad_offset_p = mode_supports_dq_form (mode);
8934
8935 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8936 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8937 if (reg_offset_p
8938 && opnum == 1
8939 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8940 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8941 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8942 && TARGET_P9_VECTOR)
8943 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8944 && TARGET_P9_VECTOR)))
8945 reg_offset_p = false;
8946
8947 /* We must recognize output that we have already generated ourselves. */
8948 if (GET_CODE (x) == PLUS
8949 && GET_CODE (XEXP (x, 0)) == PLUS
8950 && REG_P (XEXP (XEXP (x, 0), 0))
8951 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8952 && CONST_INT_P (XEXP (x, 1)))
8953 {
8954 if (TARGET_DEBUG_ADDR)
8955 {
8956 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8957 debug_rtx (x);
8958 }
8959 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8960 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8961 opnum, (enum reload_type) type);
8962 *win = 1;
8963 return x;
8964 }
8965
8966 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8967 if (GET_CODE (x) == LO_SUM
8968 && GET_CODE (XEXP (x, 0)) == HIGH)
8969 {
8970 if (TARGET_DEBUG_ADDR)
8971 {
8972 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8973 debug_rtx (x);
8974 }
8975 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8976 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8977 opnum, (enum reload_type) type);
8978 *win = 1;
8979 return x;
8980 }
8981
8982 #if TARGET_MACHO
8983 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8984 && GET_CODE (x) == LO_SUM
8985 && GET_CODE (XEXP (x, 0)) == PLUS
8986 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8987 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8988 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8989 && machopic_operand_p (XEXP (x, 1)))
8990 {
8991 /* Result of previous invocation of this function on Darwin
8992 floating point constant. */
8993 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8994 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8995 opnum, (enum reload_type) type);
8996 *win = 1;
8997 return x;
8998 }
8999 #endif
9000
9001 if (TARGET_CMODEL != CMODEL_SMALL
9002 && reg_offset_p
9003 && !quad_offset_p
9004 && small_toc_ref (x, VOIDmode))
9005 {
9006 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9007 x = gen_rtx_LO_SUM (Pmode, hi, x);
9008 if (TARGET_DEBUG_ADDR)
9009 {
9010 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9011 debug_rtx (x);
9012 }
9013 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9014 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9015 opnum, (enum reload_type) type);
9016 *win = 1;
9017 return x;
9018 }
9019
9020 if (GET_CODE (x) == PLUS
9021 && REG_P (XEXP (x, 0))
9022 && HARD_REGISTER_P (XEXP (x, 0))
9023 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9024 && CONST_INT_P (XEXP (x, 1))
9025 && reg_offset_p
9026 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9027 {
9028 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9029 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9030 HOST_WIDE_INT high
9031 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9032
9033 /* Check for 32-bit overflow or quad addresses with one of the
9034 four least significant bits set. */
9035 if (high + low != val
9036 || (quad_offset_p && (low & 0xf)))
9037 {
9038 *win = 0;
9039 return x;
9040 }
9041
9042 /* Reload the high part into a base reg; leave the low part
9043 in the mem directly. */
9044
9045 x = gen_rtx_PLUS (GET_MODE (x),
9046 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9047 GEN_INT (high)),
9048 GEN_INT (low));
9049
9050 if (TARGET_DEBUG_ADDR)
9051 {
9052 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9053 debug_rtx (x);
9054 }
9055 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9056 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9057 opnum, (enum reload_type) type);
9058 *win = 1;
9059 return x;
9060 }
9061
9062 if (SYMBOL_REF_P (x)
9063 && reg_offset_p
9064 && !quad_offset_p
9065 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9066 #if TARGET_MACHO
9067 && DEFAULT_ABI == ABI_DARWIN
9068 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9069 && machopic_symbol_defined_p (x)
9070 #else
9071 && DEFAULT_ABI == ABI_V4
9072 && !flag_pic
9073 #endif
9074 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9075 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9076 without fprs.
9077 ??? Assume floating point reg based on mode? This assumption is
9078 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9079 where reload ends up doing a DFmode load of a constant from
9080 mem using two gprs. Unfortunately, at this point reload
9081 hasn't yet selected regs so poking around in reload data
9082 won't help and even if we could figure out the regs reliably,
9083 we'd still want to allow this transformation when the mem is
9084 naturally aligned. Since we say the address is good here, we
9085 can't disable offsets from LO_SUMs in mem_operand_gpr.
9086 FIXME: Allow offset from lo_sum for other modes too, when
9087 mem is sufficiently aligned.
9088
9089 Also disallow this if the type can go in VMX/Altivec registers, since
9090 those registers do not have d-form (reg+offset) address modes. */
9091 && !reg_addr[mode].scalar_in_vmx_p
9092 && mode != TFmode
9093 && mode != TDmode
9094 && mode != IFmode
9095 && mode != KFmode
9096 && (mode != TImode || !TARGET_VSX)
9097 && mode != PTImode
9098 && (mode != DImode || TARGET_POWERPC64)
9099 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9100 || TARGET_HARD_FLOAT))
9101 {
9102 #if TARGET_MACHO
9103 if (flag_pic)
9104 {
9105 rtx offset = machopic_gen_offset (x);
9106 x = gen_rtx_LO_SUM (GET_MODE (x),
9107 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9108 gen_rtx_HIGH (Pmode, offset)), offset);
9109 }
9110 else
9111 #endif
9112 x = gen_rtx_LO_SUM (GET_MODE (x),
9113 gen_rtx_HIGH (Pmode, x), x);
9114
9115 if (TARGET_DEBUG_ADDR)
9116 {
9117 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9118 debug_rtx (x);
9119 }
9120 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9121 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9122 opnum, (enum reload_type) type);
9123 *win = 1;
9124 return x;
9125 }
9126
9127 /* Reload an offset address wrapped by an AND that represents the
9128 masking of the lower bits. Strip the outer AND and let reload
9129 convert the offset address into an indirect address. For VSX,
9130 force reload to create the address with an AND in a separate
9131 register, because we can't guarantee an altivec register will
9132 be used. */
9133 if (VECTOR_MEM_ALTIVEC_P (mode)
9134 && GET_CODE (x) == AND
9135 && GET_CODE (XEXP (x, 0)) == PLUS
9136 && REG_P (XEXP (XEXP (x, 0), 0))
9137 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9138 && CONST_INT_P (XEXP (x, 1))
9139 && INTVAL (XEXP (x, 1)) == -16)
9140 {
9141 x = XEXP (x, 0);
9142 *win = 1;
9143 return x;
9144 }
9145
9146 if (TARGET_TOC
9147 && reg_offset_p
9148 && !quad_offset_p
9149 && SYMBOL_REF_P (x)
9150 && use_toc_relative_ref (x, mode))
9151 {
9152 x = create_TOC_reference (x, NULL_RTX);
9153 if (TARGET_CMODEL != CMODEL_SMALL)
9154 {
9155 if (TARGET_DEBUG_ADDR)
9156 {
9157 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9158 debug_rtx (x);
9159 }
9160 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9161 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9162 opnum, (enum reload_type) type);
9163 }
9164 *win = 1;
9165 return x;
9166 }
9167 *win = 0;
9168 return x;
9169 }
9170
9171 /* Debug version of rs6000_legitimize_reload_address. */
9172 static rtx
9173 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9174 int opnum, int type,
9175 int ind_levels, int *win)
9176 {
9177 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9178 ind_levels, win);
9179 fprintf (stderr,
9180 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9181 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9182 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9183 debug_rtx (x);
9184
9185 if (x == ret)
9186 fprintf (stderr, "Same address returned\n");
9187 else if (!ret)
9188 fprintf (stderr, "NULL returned\n");
9189 else
9190 {
9191 fprintf (stderr, "New address:\n");
9192 debug_rtx (ret);
9193 }
9194
9195 return ret;
9196 }
9197
9198 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9199 that is a valid memory address for an instruction.
9200 The MODE argument is the machine mode for the MEM expression
9201 that wants to use this address.
9202
9203 On the RS/6000, there are four valid address: a SYMBOL_REF that
9204 refers to a constant pool entry of an address (or the sum of it
9205 plus a constant), a short (16-bit signed) constant plus a register,
9206 the sum of two registers, or a register indirect, possibly with an
9207 auto-increment. For DFmode, DDmode and DImode with a constant plus
9208 register, we must ensure that both words are addressable or PowerPC64
9209 with offset word aligned.
9210
9211 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9212 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9213 because adjacent memory cells are accessed by adding word-sized offsets
9214 during assembly output. */
9215 static bool
9216 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9217 {
9218 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9219 bool quad_offset_p = mode_supports_dq_form (mode);
9220
9221 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9222 if (VECTOR_MEM_ALTIVEC_P (mode)
9223 && GET_CODE (x) == AND
9224 && CONST_INT_P (XEXP (x, 1))
9225 && INTVAL (XEXP (x, 1)) == -16)
9226 x = XEXP (x, 0);
9227
9228 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9229 return 0;
9230 if (legitimate_indirect_address_p (x, reg_ok_strict))
9231 return 1;
9232 if (TARGET_UPDATE
9233 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9234 && mode_supports_pre_incdec_p (mode)
9235 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9236 return 1;
9237 /* Handle restricted vector d-form offsets in ISA 3.0. */
9238 if (quad_offset_p)
9239 {
9240 if (quad_address_p (x, mode, reg_ok_strict))
9241 return 1;
9242 }
9243 else if (virtual_stack_registers_memory_p (x))
9244 return 1;
9245
9246 else if (reg_offset_p)
9247 {
9248 if (legitimate_small_data_p (mode, x))
9249 return 1;
9250 if (legitimate_constant_pool_address_p (x, mode,
9251 reg_ok_strict || lra_in_progress))
9252 return 1;
9253 }
9254
9255 /* For TImode, if we have TImode in VSX registers, only allow register
9256 indirect addresses. This will allow the values to go in either GPRs
9257 or VSX registers without reloading. The vector types would tend to
9258 go into VSX registers, so we allow REG+REG, while TImode seems
9259 somewhat split, in that some uses are GPR based, and some VSX based. */
9260 /* FIXME: We could loosen this by changing the following to
9261 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9262 but currently we cannot allow REG+REG addressing for TImode. See
9263 PR72827 for complete details on how this ends up hoodwinking DSE. */
9264 if (mode == TImode && TARGET_VSX)
9265 return 0;
9266 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9267 if (! reg_ok_strict
9268 && reg_offset_p
9269 && GET_CODE (x) == PLUS
9270 && REG_P (XEXP (x, 0))
9271 && (XEXP (x, 0) == virtual_stack_vars_rtx
9272 || XEXP (x, 0) == arg_pointer_rtx)
9273 && CONST_INT_P (XEXP (x, 1)))
9274 return 1;
9275 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9276 return 1;
9277 if (!FLOAT128_2REG_P (mode)
9278 && (TARGET_HARD_FLOAT
9279 || TARGET_POWERPC64
9280 || (mode != DFmode && mode != DDmode))
9281 && (TARGET_POWERPC64 || mode != DImode)
9282 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9283 && mode != PTImode
9284 && !avoiding_indexed_address_p (mode)
9285 && legitimate_indexed_address_p (x, reg_ok_strict))
9286 return 1;
9287 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9288 && mode_supports_pre_modify_p (mode)
9289 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9290 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9291 reg_ok_strict, false)
9292 || (!avoiding_indexed_address_p (mode)
9293 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9294 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9295 return 1;
9296 if (reg_offset_p && !quad_offset_p
9297 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9298 return 1;
9299 return 0;
9300 }
9301
9302 /* Debug version of rs6000_legitimate_address_p. */
9303 static bool
9304 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9305 bool reg_ok_strict)
9306 {
9307 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9308 fprintf (stderr,
9309 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9310 "strict = %d, reload = %s, code = %s\n",
9311 ret ? "true" : "false",
9312 GET_MODE_NAME (mode),
9313 reg_ok_strict,
9314 (reload_completed ? "after" : "before"),
9315 GET_RTX_NAME (GET_CODE (x)));
9316 debug_rtx (x);
9317
9318 return ret;
9319 }
9320
9321 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9322
9323 static bool
9324 rs6000_mode_dependent_address_p (const_rtx addr,
9325 addr_space_t as ATTRIBUTE_UNUSED)
9326 {
9327 return rs6000_mode_dependent_address_ptr (addr);
9328 }
9329
9330 /* Go to LABEL if ADDR (a legitimate address expression)
9331 has an effect that depends on the machine mode it is used for.
9332
9333 On the RS/6000 this is true of all integral offsets (since AltiVec
9334 and VSX modes don't allow them) or is a pre-increment or decrement.
9335
9336 ??? Except that due to conceptual problems in offsettable_address_p
9337 we can't really report the problems of integral offsets. So leave
9338 this assuming that the adjustable offset must be valid for the
9339 sub-words of a TFmode operand, which is what we had before. */
9340
9341 static bool
9342 rs6000_mode_dependent_address (const_rtx addr)
9343 {
9344 switch (GET_CODE (addr))
9345 {
9346 case PLUS:
9347 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9348 is considered a legitimate address before reload, so there
9349 are no offset restrictions in that case. Note that this
9350 condition is safe in strict mode because any address involving
9351 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9352 been rejected as illegitimate. */
9353 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9354 && XEXP (addr, 0) != arg_pointer_rtx
9355 && CONST_INT_P (XEXP (addr, 1)))
9356 {
9357 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9358 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9359 }
9360 break;
9361
9362 case LO_SUM:
9363 /* Anything in the constant pool is sufficiently aligned that
9364 all bytes have the same high part address. */
9365 return !legitimate_constant_pool_address_p (addr, QImode, false);
9366
9367 /* Auto-increment cases are now treated generically in recog.c. */
9368 case PRE_MODIFY:
9369 return TARGET_UPDATE;
9370
9371 /* AND is only allowed in Altivec loads. */
9372 case AND:
9373 return true;
9374
9375 default:
9376 break;
9377 }
9378
9379 return false;
9380 }
9381
9382 /* Debug version of rs6000_mode_dependent_address. */
9383 static bool
9384 rs6000_debug_mode_dependent_address (const_rtx addr)
9385 {
9386 bool ret = rs6000_mode_dependent_address (addr);
9387
9388 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9389 ret ? "true" : "false");
9390 debug_rtx (addr);
9391
9392 return ret;
9393 }
9394
9395 /* Implement FIND_BASE_TERM. */
9396
9397 rtx
9398 rs6000_find_base_term (rtx op)
9399 {
9400 rtx base;
9401
9402 base = op;
9403 if (GET_CODE (base) == CONST)
9404 base = XEXP (base, 0);
9405 if (GET_CODE (base) == PLUS)
9406 base = XEXP (base, 0);
9407 if (GET_CODE (base) == UNSPEC)
9408 switch (XINT (base, 1))
9409 {
9410 case UNSPEC_TOCREL:
9411 case UNSPEC_MACHOPIC_OFFSET:
9412 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9413 for aliasing purposes. */
9414 return XVECEXP (base, 0, 0);
9415 }
9416
9417 return op;
9418 }
9419
9420 /* More elaborate version of recog's offsettable_memref_p predicate
9421 that works around the ??? note of rs6000_mode_dependent_address.
9422 In particular it accepts
9423
9424 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9425
9426 in 32-bit mode, that the recog predicate rejects. */
9427
9428 static bool
9429 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9430 {
9431 bool worst_case;
9432
9433 if (!MEM_P (op))
9434 return false;
9435
9436 /* First mimic offsettable_memref_p. */
9437 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9438 return true;
9439
9440 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9441 the latter predicate knows nothing about the mode of the memory
9442 reference and, therefore, assumes that it is the largest supported
9443 mode (TFmode). As a consequence, legitimate offsettable memory
9444 references are rejected. rs6000_legitimate_offset_address_p contains
9445 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9446 at least with a little bit of help here given that we know the
9447 actual registers used. */
9448 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9449 || GET_MODE_SIZE (reg_mode) == 4);
9450 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9451 strict, worst_case);
9452 }
9453
9454 /* Determine the reassociation width to be used in reassociate_bb.
9455 This takes into account how many parallel operations we
9456 can actually do of a given type, and also the latency.
9457 P8:
9458 int add/sub 6/cycle
9459 mul 2/cycle
9460 vect add/sub/mul 2/cycle
9461 fp add/sub/mul 2/cycle
9462 dfp 1/cycle
9463 */
9464
9465 static int
9466 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9467 machine_mode mode)
9468 {
9469 switch (rs6000_tune)
9470 {
9471 case PROCESSOR_POWER8:
9472 case PROCESSOR_POWER9:
9473 if (DECIMAL_FLOAT_MODE_P (mode))
9474 return 1;
9475 if (VECTOR_MODE_P (mode))
9476 return 4;
9477 if (INTEGRAL_MODE_P (mode))
9478 return 1;
9479 if (FLOAT_MODE_P (mode))
9480 return 4;
9481 break;
9482 default:
9483 break;
9484 }
9485 return 1;
9486 }
9487
9488 /* Change register usage conditional on target flags. */
9489 static void
9490 rs6000_conditional_register_usage (void)
9491 {
9492 int i;
9493
9494 if (TARGET_DEBUG_TARGET)
9495 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9496
9497 /* Set MQ register fixed (already call_used) so that it will not be
9498 allocated. */
9499 fixed_regs[64] = 1;
9500
9501 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9502 if (TARGET_64BIT)
9503 fixed_regs[13] = call_used_regs[13]
9504 = call_really_used_regs[13] = 1;
9505
9506 /* Conditionally disable FPRs. */
9507 if (TARGET_SOFT_FLOAT)
9508 for (i = 32; i < 64; i++)
9509 fixed_regs[i] = call_used_regs[i]
9510 = call_really_used_regs[i] = 1;
9511
9512 /* The TOC register is not killed across calls in a way that is
9513 visible to the compiler. */
9514 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9515 call_really_used_regs[2] = 0;
9516
9517 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9518 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9519
9520 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9521 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9522 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9523 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9524
9525 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9526 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9527 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9528 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9529
9530 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9531 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9532 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9533
9534 if (!TARGET_ALTIVEC && !TARGET_VSX)
9535 {
9536 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9537 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9538 call_really_used_regs[VRSAVE_REGNO] = 1;
9539 }
9540
9541 if (TARGET_ALTIVEC || TARGET_VSX)
9542 global_regs[VSCR_REGNO] = 1;
9543
9544 if (TARGET_ALTIVEC_ABI)
9545 {
9546 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9547 call_used_regs[i] = call_really_used_regs[i] = 1;
9548
9549 /* AIX reserves VR20:31 in non-extended ABI mode. */
9550 if (TARGET_XCOFF)
9551 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9552 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9553 }
9554 }
9555
9556 \f
9557 /* Output insns to set DEST equal to the constant SOURCE as a series of
9558 lis, ori and shl instructions and return TRUE. */
9559
9560 bool
9561 rs6000_emit_set_const (rtx dest, rtx source)
9562 {
9563 machine_mode mode = GET_MODE (dest);
9564 rtx temp, set;
9565 rtx_insn *insn;
9566 HOST_WIDE_INT c;
9567
9568 gcc_checking_assert (CONST_INT_P (source));
9569 c = INTVAL (source);
9570 switch (mode)
9571 {
9572 case E_QImode:
9573 case E_HImode:
9574 emit_insn (gen_rtx_SET (dest, source));
9575 return true;
9576
9577 case E_SImode:
9578 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9579
9580 emit_insn (gen_rtx_SET (copy_rtx (temp),
9581 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9582 emit_insn (gen_rtx_SET (dest,
9583 gen_rtx_IOR (SImode, copy_rtx (temp),
9584 GEN_INT (c & 0xffff))));
9585 break;
9586
9587 case E_DImode:
9588 if (!TARGET_POWERPC64)
9589 {
9590 rtx hi, lo;
9591
9592 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9593 DImode);
9594 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9595 DImode);
9596 emit_move_insn (hi, GEN_INT (c >> 32));
9597 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9598 emit_move_insn (lo, GEN_INT (c));
9599 }
9600 else
9601 rs6000_emit_set_long_const (dest, c);
9602 break;
9603
9604 default:
9605 gcc_unreachable ();
9606 }
9607
9608 insn = get_last_insn ();
9609 set = single_set (insn);
9610 if (! CONSTANT_P (SET_SRC (set)))
9611 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9612
9613 return true;
9614 }
9615
9616 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9617 Output insns to set DEST equal to the constant C as a series of
9618 lis, ori and shl instructions. */
9619
9620 static void
9621 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9622 {
9623 rtx temp;
9624 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9625
9626 ud1 = c & 0xffff;
9627 c = c >> 16;
9628 ud2 = c & 0xffff;
9629 c = c >> 16;
9630 ud3 = c & 0xffff;
9631 c = c >> 16;
9632 ud4 = c & 0xffff;
9633
9634 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9635 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9636 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9637
9638 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9639 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9640 {
9641 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9642
9643 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9644 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9645 if (ud1 != 0)
9646 emit_move_insn (dest,
9647 gen_rtx_IOR (DImode, copy_rtx (temp),
9648 GEN_INT (ud1)));
9649 }
9650 else if (ud3 == 0 && ud4 == 0)
9651 {
9652 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9653
9654 gcc_assert (ud2 & 0x8000);
9655 emit_move_insn (copy_rtx (temp),
9656 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9657 if (ud1 != 0)
9658 emit_move_insn (copy_rtx (temp),
9659 gen_rtx_IOR (DImode, copy_rtx (temp),
9660 GEN_INT (ud1)));
9661 emit_move_insn (dest,
9662 gen_rtx_ZERO_EXTEND (DImode,
9663 gen_lowpart (SImode,
9664 copy_rtx (temp))));
9665 }
9666 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9667 || (ud4 == 0 && ! (ud3 & 0x8000)))
9668 {
9669 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9670
9671 emit_move_insn (copy_rtx (temp),
9672 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9673 if (ud2 != 0)
9674 emit_move_insn (copy_rtx (temp),
9675 gen_rtx_IOR (DImode, copy_rtx (temp),
9676 GEN_INT (ud2)));
9677 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9678 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9679 GEN_INT (16)));
9680 if (ud1 != 0)
9681 emit_move_insn (dest,
9682 gen_rtx_IOR (DImode, copy_rtx (temp),
9683 GEN_INT (ud1)));
9684 }
9685 else
9686 {
9687 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9688
9689 emit_move_insn (copy_rtx (temp),
9690 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9691 if (ud3 != 0)
9692 emit_move_insn (copy_rtx (temp),
9693 gen_rtx_IOR (DImode, copy_rtx (temp),
9694 GEN_INT (ud3)));
9695
9696 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9697 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9698 GEN_INT (32)));
9699 if (ud2 != 0)
9700 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9701 gen_rtx_IOR (DImode, copy_rtx (temp),
9702 GEN_INT (ud2 << 16)));
9703 if (ud1 != 0)
9704 emit_move_insn (dest,
9705 gen_rtx_IOR (DImode, copy_rtx (temp),
9706 GEN_INT (ud1)));
9707 }
9708 }
9709
9710 /* Helper for the following. Get rid of [r+r] memory refs
9711 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9712
9713 static void
9714 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9715 {
9716 if (MEM_P (operands[0])
9717 && !REG_P (XEXP (operands[0], 0))
9718 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9719 GET_MODE (operands[0]), false))
9720 operands[0]
9721 = replace_equiv_address (operands[0],
9722 copy_addr_to_reg (XEXP (operands[0], 0)));
9723
9724 if (MEM_P (operands[1])
9725 && !REG_P (XEXP (operands[1], 0))
9726 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9727 GET_MODE (operands[1]), false))
9728 operands[1]
9729 = replace_equiv_address (operands[1],
9730 copy_addr_to_reg (XEXP (operands[1], 0)));
9731 }
9732
9733 /* Generate a vector of constants to permute MODE for a little-endian
9734 storage operation by swapping the two halves of a vector. */
9735 static rtvec
9736 rs6000_const_vec (machine_mode mode)
9737 {
9738 int i, subparts;
9739 rtvec v;
9740
9741 switch (mode)
9742 {
9743 case E_V1TImode:
9744 subparts = 1;
9745 break;
9746 case E_V2DFmode:
9747 case E_V2DImode:
9748 subparts = 2;
9749 break;
9750 case E_V4SFmode:
9751 case E_V4SImode:
9752 subparts = 4;
9753 break;
9754 case E_V8HImode:
9755 subparts = 8;
9756 break;
9757 case E_V16QImode:
9758 subparts = 16;
9759 break;
9760 default:
9761 gcc_unreachable();
9762 }
9763
9764 v = rtvec_alloc (subparts);
9765
9766 for (i = 0; i < subparts / 2; ++i)
9767 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9768 for (i = subparts / 2; i < subparts; ++i)
9769 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9770
9771 return v;
9772 }
9773
9774 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9775 store operation. */
9776 void
9777 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9778 {
9779 /* Scalar permutations are easier to express in integer modes rather than
9780 floating-point modes, so cast them here. We use V1TImode instead
9781 of TImode to ensure that the values don't go through GPRs. */
9782 if (FLOAT128_VECTOR_P (mode))
9783 {
9784 dest = gen_lowpart (V1TImode, dest);
9785 source = gen_lowpart (V1TImode, source);
9786 mode = V1TImode;
9787 }
9788
9789 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9790 scalar. */
9791 if (mode == TImode || mode == V1TImode)
9792 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9793 GEN_INT (64))));
9794 else
9795 {
9796 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9797 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9798 }
9799 }
9800
9801 /* Emit a little-endian load from vector memory location SOURCE to VSX
9802 register DEST in mode MODE. The load is done with two permuting
9803 insn's that represent an lxvd2x and xxpermdi. */
9804 void
9805 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9806 {
9807 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9808 V1TImode). */
9809 if (mode == TImode || mode == V1TImode)
9810 {
9811 mode = V2DImode;
9812 dest = gen_lowpart (V2DImode, dest);
9813 source = adjust_address (source, V2DImode, 0);
9814 }
9815
9816 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9817 rs6000_emit_le_vsx_permute (tmp, source, mode);
9818 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9819 }
9820
9821 /* Emit a little-endian store to vector memory location DEST from VSX
9822 register SOURCE in mode MODE. The store is done with two permuting
9823 insn's that represent an xxpermdi and an stxvd2x. */
9824 void
9825 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9826 {
9827 /* This should never be called during or after LRA, because it does
9828 not re-permute the source register. It is intended only for use
9829 during expand. */
9830 gcc_assert (!lra_in_progress && !reload_completed);
9831
9832 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9833 V1TImode). */
9834 if (mode == TImode || mode == V1TImode)
9835 {
9836 mode = V2DImode;
9837 dest = adjust_address (dest, V2DImode, 0);
9838 source = gen_lowpart (V2DImode, source);
9839 }
9840
9841 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9842 rs6000_emit_le_vsx_permute (tmp, source, mode);
9843 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9844 }
9845
9846 /* Emit a sequence representing a little-endian VSX load or store,
9847 moving data from SOURCE to DEST in mode MODE. This is done
9848 separately from rs6000_emit_move to ensure it is called only
9849 during expand. LE VSX loads and stores introduced later are
9850 handled with a split. The expand-time RTL generation allows
9851 us to optimize away redundant pairs of register-permutes. */
9852 void
9853 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9854 {
9855 gcc_assert (!BYTES_BIG_ENDIAN
9856 && VECTOR_MEM_VSX_P (mode)
9857 && !TARGET_P9_VECTOR
9858 && !gpr_or_gpr_p (dest, source)
9859 && (MEM_P (source) ^ MEM_P (dest)));
9860
9861 if (MEM_P (source))
9862 {
9863 gcc_assert (REG_P (dest) || SUBREG_P (dest));
9864 rs6000_emit_le_vsx_load (dest, source, mode);
9865 }
9866 else
9867 {
9868 if (!REG_P (source))
9869 source = force_reg (mode, source);
9870 rs6000_emit_le_vsx_store (dest, source, mode);
9871 }
9872 }
9873
9874 /* Return whether a SFmode or SImode move can be done without converting one
9875 mode to another. This arrises when we have:
9876
9877 (SUBREG:SF (REG:SI ...))
9878 (SUBREG:SI (REG:SF ...))
9879
9880 and one of the values is in a floating point/vector register, where SFmode
9881 scalars are stored in DFmode format. */
9882
9883 bool
9884 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9885 {
9886 if (TARGET_ALLOW_SF_SUBREG)
9887 return true;
9888
9889 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9890 return true;
9891
9892 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9893 return true;
9894
9895 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9896 if (SUBREG_P (dest))
9897 {
9898 rtx dest_subreg = SUBREG_REG (dest);
9899 rtx src_subreg = SUBREG_REG (src);
9900 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9901 }
9902
9903 return false;
9904 }
9905
9906
9907 /* Helper function to change moves with:
9908
9909 (SUBREG:SF (REG:SI)) and
9910 (SUBREG:SI (REG:SF))
9911
9912 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9913 values are stored as DFmode values in the VSX registers. We need to convert
9914 the bits before we can use a direct move or operate on the bits in the
9915 vector register as an integer type.
9916
9917 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9918
9919 static bool
9920 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9921 {
9922 if (TARGET_DIRECT_MOVE_64BIT && !reload_completed
9923 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9924 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9925 {
9926 rtx inner_source = SUBREG_REG (source);
9927 machine_mode inner_mode = GET_MODE (inner_source);
9928
9929 if (mode == SImode && inner_mode == SFmode)
9930 {
9931 emit_insn (gen_movsi_from_sf (dest, inner_source));
9932 return true;
9933 }
9934
9935 if (mode == SFmode && inner_mode == SImode)
9936 {
9937 emit_insn (gen_movsf_from_si (dest, inner_source));
9938 return true;
9939 }
9940 }
9941
9942 return false;
9943 }
9944
9945 /* Emit a move from SOURCE to DEST in mode MODE. */
9946 void
9947 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9948 {
9949 rtx operands[2];
9950 operands[0] = dest;
9951 operands[1] = source;
9952
9953 if (TARGET_DEBUG_ADDR)
9954 {
9955 fprintf (stderr,
9956 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9957 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9958 GET_MODE_NAME (mode),
9959 lra_in_progress,
9960 reload_completed,
9961 can_create_pseudo_p ());
9962 debug_rtx (dest);
9963 fprintf (stderr, "source:\n");
9964 debug_rtx (source);
9965 }
9966
9967 /* Check that we get CONST_WIDE_INT only when we should. */
9968 if (CONST_WIDE_INT_P (operands[1])
9969 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9970 gcc_unreachable ();
9971
9972 #ifdef HAVE_AS_GNU_ATTRIBUTE
9973 /* If we use a long double type, set the flags in .gnu_attribute that say
9974 what the long double type is. This is to allow the linker's warning
9975 message for the wrong long double to be useful, even if the function does
9976 not do a call (for example, doing a 128-bit add on power9 if the long
9977 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9978 used if they aren't the default long dobule type. */
9979 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9980 {
9981 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9982 rs6000_passes_float = rs6000_passes_long_double = true;
9983
9984 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9985 rs6000_passes_float = rs6000_passes_long_double = true;
9986 }
9987 #endif
9988
9989 /* See if we need to special case SImode/SFmode SUBREG moves. */
9990 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9991 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9992 return;
9993
9994 /* Check if GCC is setting up a block move that will end up using FP
9995 registers as temporaries. We must make sure this is acceptable. */
9996 if (MEM_P (operands[0])
9997 && MEM_P (operands[1])
9998 && mode == DImode
9999 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
10000 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
10001 && ! (rs6000_slow_unaligned_access (SImode,
10002 (MEM_ALIGN (operands[0]) > 32
10003 ? 32 : MEM_ALIGN (operands[0])))
10004 || rs6000_slow_unaligned_access (SImode,
10005 (MEM_ALIGN (operands[1]) > 32
10006 ? 32 : MEM_ALIGN (operands[1]))))
10007 && ! MEM_VOLATILE_P (operands [0])
10008 && ! MEM_VOLATILE_P (operands [1]))
10009 {
10010 emit_move_insn (adjust_address (operands[0], SImode, 0),
10011 adjust_address (operands[1], SImode, 0));
10012 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10013 adjust_address (copy_rtx (operands[1]), SImode, 4));
10014 return;
10015 }
10016
10017 if (can_create_pseudo_p () && MEM_P (operands[0])
10018 && !gpc_reg_operand (operands[1], mode))
10019 operands[1] = force_reg (mode, operands[1]);
10020
10021 /* Recognize the case where operand[1] is a reference to thread-local
10022 data and load its address to a register. */
10023 if (tls_referenced_p (operands[1]))
10024 {
10025 enum tls_model model;
10026 rtx tmp = operands[1];
10027 rtx addend = NULL;
10028
10029 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10030 {
10031 addend = XEXP (XEXP (tmp, 0), 1);
10032 tmp = XEXP (XEXP (tmp, 0), 0);
10033 }
10034
10035 gcc_assert (SYMBOL_REF_P (tmp));
10036 model = SYMBOL_REF_TLS_MODEL (tmp);
10037 gcc_assert (model != 0);
10038
10039 tmp = rs6000_legitimize_tls_address (tmp, model);
10040 if (addend)
10041 {
10042 tmp = gen_rtx_PLUS (mode, tmp, addend);
10043 tmp = force_operand (tmp, operands[0]);
10044 }
10045 operands[1] = tmp;
10046 }
10047
10048 /* 128-bit constant floating-point values on Darwin should really be loaded
10049 as two parts. However, this premature splitting is a problem when DFmode
10050 values can go into Altivec registers. */
10051 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
10052 && !reg_addr[DFmode].scalar_in_vmx_p)
10053 {
10054 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10055 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10056 DFmode);
10057 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10058 GET_MODE_SIZE (DFmode)),
10059 simplify_gen_subreg (DFmode, operands[1], mode,
10060 GET_MODE_SIZE (DFmode)),
10061 DFmode);
10062 return;
10063 }
10064
10065 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10066 p1:SD) if p1 is not of floating point class and p0 is spilled as
10067 we can have no analogous movsd_store for this. */
10068 if (lra_in_progress && mode == DDmode
10069 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
10070 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10071 && SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1]))
10072 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10073 {
10074 enum reg_class cl;
10075 int regno = REGNO (SUBREG_REG (operands[1]));
10076
10077 if (!HARD_REGISTER_NUM_P (regno))
10078 {
10079 cl = reg_preferred_class (regno);
10080 regno = reg_renumber[regno];
10081 if (regno < 0)
10082 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10083 }
10084 if (regno >= 0 && ! FP_REGNO_P (regno))
10085 {
10086 mode = SDmode;
10087 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10088 operands[1] = SUBREG_REG (operands[1]);
10089 }
10090 }
10091 if (lra_in_progress
10092 && mode == SDmode
10093 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
10094 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10095 && (REG_P (operands[1])
10096 || (SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1])))))
10097 {
10098 int regno = reg_or_subregno (operands[1]);
10099 enum reg_class cl;
10100
10101 if (!HARD_REGISTER_NUM_P (regno))
10102 {
10103 cl = reg_preferred_class (regno);
10104 gcc_assert (cl != NO_REGS);
10105 regno = reg_renumber[regno];
10106 if (regno < 0)
10107 regno = ira_class_hard_regs[cl][0];
10108 }
10109 if (FP_REGNO_P (regno))
10110 {
10111 if (GET_MODE (operands[0]) != DDmode)
10112 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10113 emit_insn (gen_movsd_store (operands[0], operands[1]));
10114 }
10115 else if (INT_REGNO_P (regno))
10116 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10117 else
10118 gcc_unreachable();
10119 return;
10120 }
10121 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10122 p:DD)) if p0 is not of floating point class and p1 is spilled as
10123 we can have no analogous movsd_load for this. */
10124 if (lra_in_progress && mode == DDmode
10125 && SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))
10126 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10127 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
10128 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10129 {
10130 enum reg_class cl;
10131 int regno = REGNO (SUBREG_REG (operands[0]));
10132
10133 if (!HARD_REGISTER_NUM_P (regno))
10134 {
10135 cl = reg_preferred_class (regno);
10136 regno = reg_renumber[regno];
10137 if (regno < 0)
10138 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10139 }
10140 if (regno >= 0 && ! FP_REGNO_P (regno))
10141 {
10142 mode = SDmode;
10143 operands[0] = SUBREG_REG (operands[0]);
10144 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10145 }
10146 }
10147 if (lra_in_progress
10148 && mode == SDmode
10149 && (REG_P (operands[0])
10150 || (SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))))
10151 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
10152 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10153 {
10154 int regno = reg_or_subregno (operands[0]);
10155 enum reg_class cl;
10156
10157 if (!HARD_REGISTER_NUM_P (regno))
10158 {
10159 cl = reg_preferred_class (regno);
10160 gcc_assert (cl != NO_REGS);
10161 regno = reg_renumber[regno];
10162 if (regno < 0)
10163 regno = ira_class_hard_regs[cl][0];
10164 }
10165 if (FP_REGNO_P (regno))
10166 {
10167 if (GET_MODE (operands[1]) != DDmode)
10168 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10169 emit_insn (gen_movsd_load (operands[0], operands[1]));
10170 }
10171 else if (INT_REGNO_P (regno))
10172 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10173 else
10174 gcc_unreachable();
10175 return;
10176 }
10177
10178 /* FIXME: In the long term, this switch statement should go away
10179 and be replaced by a sequence of tests based on things like
10180 mode == Pmode. */
10181 switch (mode)
10182 {
10183 case E_HImode:
10184 case E_QImode:
10185 if (CONSTANT_P (operands[1])
10186 && !CONST_INT_P (operands[1]))
10187 operands[1] = force_const_mem (mode, operands[1]);
10188 break;
10189
10190 case E_TFmode:
10191 case E_TDmode:
10192 case E_IFmode:
10193 case E_KFmode:
10194 if (FLOAT128_2REG_P (mode))
10195 rs6000_eliminate_indexed_memrefs (operands);
10196 /* fall through */
10197
10198 case E_DFmode:
10199 case E_DDmode:
10200 case E_SFmode:
10201 case E_SDmode:
10202 if (CONSTANT_P (operands[1])
10203 && ! easy_fp_constant (operands[1], mode))
10204 operands[1] = force_const_mem (mode, operands[1]);
10205 break;
10206
10207 case E_V16QImode:
10208 case E_V8HImode:
10209 case E_V4SFmode:
10210 case E_V4SImode:
10211 case E_V2DFmode:
10212 case E_V2DImode:
10213 case E_V1TImode:
10214 if (CONSTANT_P (operands[1])
10215 && !easy_vector_constant (operands[1], mode))
10216 operands[1] = force_const_mem (mode, operands[1]);
10217 break;
10218
10219 case E_SImode:
10220 case E_DImode:
10221 /* Use default pattern for address of ELF small data */
10222 if (TARGET_ELF
10223 && mode == Pmode
10224 && DEFAULT_ABI == ABI_V4
10225 && (SYMBOL_REF_P (operands[1])
10226 || GET_CODE (operands[1]) == CONST)
10227 && small_data_operand (operands[1], mode))
10228 {
10229 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10230 return;
10231 }
10232
10233 if (DEFAULT_ABI == ABI_V4
10234 && mode == Pmode && mode == SImode
10235 && flag_pic == 1 && got_operand (operands[1], mode))
10236 {
10237 emit_insn (gen_movsi_got (operands[0], operands[1]));
10238 return;
10239 }
10240
10241 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10242 && TARGET_NO_TOC
10243 && ! flag_pic
10244 && mode == Pmode
10245 && CONSTANT_P (operands[1])
10246 && GET_CODE (operands[1]) != HIGH
10247 && !CONST_INT_P (operands[1]))
10248 {
10249 rtx target = (!can_create_pseudo_p ()
10250 ? operands[0]
10251 : gen_reg_rtx (mode));
10252
10253 /* If this is a function address on -mcall-aixdesc,
10254 convert it to the address of the descriptor. */
10255 if (DEFAULT_ABI == ABI_AIX
10256 && SYMBOL_REF_P (operands[1])
10257 && XSTR (operands[1], 0)[0] == '.')
10258 {
10259 const char *name = XSTR (operands[1], 0);
10260 rtx new_ref;
10261 while (*name == '.')
10262 name++;
10263 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10264 CONSTANT_POOL_ADDRESS_P (new_ref)
10265 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10266 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10267 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10268 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10269 operands[1] = new_ref;
10270 }
10271
10272 if (DEFAULT_ABI == ABI_DARWIN)
10273 {
10274 #if TARGET_MACHO
10275 if (MACHO_DYNAMIC_NO_PIC_P)
10276 {
10277 /* Take care of any required data indirection. */
10278 operands[1] = rs6000_machopic_legitimize_pic_address (
10279 operands[1], mode, operands[0]);
10280 if (operands[0] != operands[1])
10281 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10282 return;
10283 }
10284 #endif
10285 emit_insn (gen_macho_high (target, operands[1]));
10286 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10287 return;
10288 }
10289
10290 emit_insn (gen_elf_high (target, operands[1]));
10291 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10292 return;
10293 }
10294
10295 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10296 and we have put it in the TOC, we just need to make a TOC-relative
10297 reference to it. */
10298 if (TARGET_TOC
10299 && SYMBOL_REF_P (operands[1])
10300 && use_toc_relative_ref (operands[1], mode))
10301 operands[1] = create_TOC_reference (operands[1], operands[0]);
10302 else if (mode == Pmode
10303 && CONSTANT_P (operands[1])
10304 && GET_CODE (operands[1]) != HIGH
10305 && ((REG_P (operands[0])
10306 && FP_REGNO_P (REGNO (operands[0])))
10307 || !CONST_INT_P (operands[1])
10308 || (num_insns_constant (operands[1], mode)
10309 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10310 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10311 && (TARGET_CMODEL == CMODEL_SMALL
10312 || can_create_pseudo_p ()
10313 || (REG_P (operands[0])
10314 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10315 {
10316
10317 #if TARGET_MACHO
10318 /* Darwin uses a special PIC legitimizer. */
10319 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10320 {
10321 operands[1] =
10322 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10323 operands[0]);
10324 if (operands[0] != operands[1])
10325 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10326 return;
10327 }
10328 #endif
10329
10330 /* If we are to limit the number of things we put in the TOC and
10331 this is a symbol plus a constant we can add in one insn,
10332 just put the symbol in the TOC and add the constant. */
10333 if (GET_CODE (operands[1]) == CONST
10334 && TARGET_NO_SUM_IN_TOC
10335 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10336 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10337 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10338 || SYMBOL_REF_P (XEXP (XEXP (operands[1], 0), 0)))
10339 && ! side_effects_p (operands[0]))
10340 {
10341 rtx sym =
10342 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10343 rtx other = XEXP (XEXP (operands[1], 0), 1);
10344
10345 sym = force_reg (mode, sym);
10346 emit_insn (gen_add3_insn (operands[0], sym, other));
10347 return;
10348 }
10349
10350 operands[1] = force_const_mem (mode, operands[1]);
10351
10352 if (TARGET_TOC
10353 && SYMBOL_REF_P (XEXP (operands[1], 0))
10354 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10355 {
10356 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10357 operands[0]);
10358 operands[1] = gen_const_mem (mode, tocref);
10359 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10360 }
10361 }
10362 break;
10363
10364 case E_TImode:
10365 if (!VECTOR_MEM_VSX_P (TImode))
10366 rs6000_eliminate_indexed_memrefs (operands);
10367 break;
10368
10369 case E_PTImode:
10370 rs6000_eliminate_indexed_memrefs (operands);
10371 break;
10372
10373 default:
10374 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10375 }
10376
10377 /* Above, we may have called force_const_mem which may have returned
10378 an invalid address. If we can, fix this up; otherwise, reload will
10379 have to deal with it. */
10380 if (MEM_P (operands[1]))
10381 operands[1] = validize_mem (operands[1]);
10382
10383 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10384 }
10385 \f
10386 /* Nonzero if we can use a floating-point register to pass this arg. */
10387 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10388 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10389 && (CUM)->fregno <= FP_ARG_MAX_REG \
10390 && TARGET_HARD_FLOAT)
10391
10392 /* Nonzero if we can use an AltiVec register to pass this arg. */
10393 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10394 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10395 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10396 && TARGET_ALTIVEC_ABI \
10397 && (NAMED))
10398
10399 /* Walk down the type tree of TYPE counting consecutive base elements.
10400 If *MODEP is VOIDmode, then set it to the first valid floating point
10401 or vector type. If a non-floating point or vector type is found, or
10402 if a floating point or vector type that doesn't match a non-VOIDmode
10403 *MODEP is found, then return -1, otherwise return the count in the
10404 sub-tree. */
10405
10406 static int
10407 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10408 {
10409 machine_mode mode;
10410 HOST_WIDE_INT size;
10411
10412 switch (TREE_CODE (type))
10413 {
10414 case REAL_TYPE:
10415 mode = TYPE_MODE (type);
10416 if (!SCALAR_FLOAT_MODE_P (mode))
10417 return -1;
10418
10419 if (*modep == VOIDmode)
10420 *modep = mode;
10421
10422 if (*modep == mode)
10423 return 1;
10424
10425 break;
10426
10427 case COMPLEX_TYPE:
10428 mode = TYPE_MODE (TREE_TYPE (type));
10429 if (!SCALAR_FLOAT_MODE_P (mode))
10430 return -1;
10431
10432 if (*modep == VOIDmode)
10433 *modep = mode;
10434
10435 if (*modep == mode)
10436 return 2;
10437
10438 break;
10439
10440 case VECTOR_TYPE:
10441 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10442 return -1;
10443
10444 /* Use V4SImode as representative of all 128-bit vector types. */
10445 size = int_size_in_bytes (type);
10446 switch (size)
10447 {
10448 case 16:
10449 mode = V4SImode;
10450 break;
10451 default:
10452 return -1;
10453 }
10454
10455 if (*modep == VOIDmode)
10456 *modep = mode;
10457
10458 /* Vector modes are considered to be opaque: two vectors are
10459 equivalent for the purposes of being homogeneous aggregates
10460 if they are the same size. */
10461 if (*modep == mode)
10462 return 1;
10463
10464 break;
10465
10466 case ARRAY_TYPE:
10467 {
10468 int count;
10469 tree index = TYPE_DOMAIN (type);
10470
10471 /* Can't handle incomplete types nor sizes that are not
10472 fixed. */
10473 if (!COMPLETE_TYPE_P (type)
10474 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10475 return -1;
10476
10477 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10478 if (count == -1
10479 || !index
10480 || !TYPE_MAX_VALUE (index)
10481 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10482 || !TYPE_MIN_VALUE (index)
10483 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10484 || count < 0)
10485 return -1;
10486
10487 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10488 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10489
10490 /* There must be no padding. */
10491 if (wi::to_wide (TYPE_SIZE (type))
10492 != count * GET_MODE_BITSIZE (*modep))
10493 return -1;
10494
10495 return count;
10496 }
10497
10498 case RECORD_TYPE:
10499 {
10500 int count = 0;
10501 int sub_count;
10502 tree field;
10503
10504 /* Can't handle incomplete types nor sizes that are not
10505 fixed. */
10506 if (!COMPLETE_TYPE_P (type)
10507 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10508 return -1;
10509
10510 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10511 {
10512 if (TREE_CODE (field) != FIELD_DECL)
10513 continue;
10514
10515 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10516 if (sub_count < 0)
10517 return -1;
10518 count += sub_count;
10519 }
10520
10521 /* There must be no padding. */
10522 if (wi::to_wide (TYPE_SIZE (type))
10523 != count * GET_MODE_BITSIZE (*modep))
10524 return -1;
10525
10526 return count;
10527 }
10528
10529 case UNION_TYPE:
10530 case QUAL_UNION_TYPE:
10531 {
10532 /* These aren't very interesting except in a degenerate case. */
10533 int count = 0;
10534 int sub_count;
10535 tree field;
10536
10537 /* Can't handle incomplete types nor sizes that are not
10538 fixed. */
10539 if (!COMPLETE_TYPE_P (type)
10540 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10541 return -1;
10542
10543 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10544 {
10545 if (TREE_CODE (field) != FIELD_DECL)
10546 continue;
10547
10548 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10549 if (sub_count < 0)
10550 return -1;
10551 count = count > sub_count ? count : sub_count;
10552 }
10553
10554 /* There must be no padding. */
10555 if (wi::to_wide (TYPE_SIZE (type))
10556 != count * GET_MODE_BITSIZE (*modep))
10557 return -1;
10558
10559 return count;
10560 }
10561
10562 default:
10563 break;
10564 }
10565
10566 return -1;
10567 }
10568
10569 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10570 float or vector aggregate that shall be passed in FP/vector registers
10571 according to the ELFv2 ABI, return the homogeneous element mode in
10572 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10573
10574 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10575
10576 static bool
10577 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10578 machine_mode *elt_mode,
10579 int *n_elts)
10580 {
10581 /* Note that we do not accept complex types at the top level as
10582 homogeneous aggregates; these types are handled via the
10583 targetm.calls.split_complex_arg mechanism. Complex types
10584 can be elements of homogeneous aggregates, however. */
10585 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10586 && AGGREGATE_TYPE_P (type))
10587 {
10588 machine_mode field_mode = VOIDmode;
10589 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10590
10591 if (field_count > 0)
10592 {
10593 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10594 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10595
10596 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10597 up to AGGR_ARG_NUM_REG registers. */
10598 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10599 {
10600 if (elt_mode)
10601 *elt_mode = field_mode;
10602 if (n_elts)
10603 *n_elts = field_count;
10604 return true;
10605 }
10606 }
10607 }
10608
10609 if (elt_mode)
10610 *elt_mode = mode;
10611 if (n_elts)
10612 *n_elts = 1;
10613 return false;
10614 }
10615
10616 /* Return a nonzero value to say to return the function value in
10617 memory, just as large structures are always returned. TYPE will be
10618 the data type of the value, and FNTYPE will be the type of the
10619 function doing the returning, or @code{NULL} for libcalls.
10620
10621 The AIX ABI for the RS/6000 specifies that all structures are
10622 returned in memory. The Darwin ABI does the same.
10623
10624 For the Darwin 64 Bit ABI, a function result can be returned in
10625 registers or in memory, depending on the size of the return data
10626 type. If it is returned in registers, the value occupies the same
10627 registers as it would if it were the first and only function
10628 argument. Otherwise, the function places its result in memory at
10629 the location pointed to by GPR3.
10630
10631 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10632 but a draft put them in memory, and GCC used to implement the draft
10633 instead of the final standard. Therefore, aix_struct_return
10634 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10635 compatibility can change DRAFT_V4_STRUCT_RET to override the
10636 default, and -m switches get the final word. See
10637 rs6000_option_override_internal for more details.
10638
10639 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10640 long double support is enabled. These values are returned in memory.
10641
10642 int_size_in_bytes returns -1 for variable size objects, which go in
10643 memory always. The cast to unsigned makes -1 > 8. */
10644
10645 static bool
10646 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10647 {
10648 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10649 if (TARGET_MACHO
10650 && rs6000_darwin64_abi
10651 && TREE_CODE (type) == RECORD_TYPE
10652 && int_size_in_bytes (type) > 0)
10653 {
10654 CUMULATIVE_ARGS valcum;
10655 rtx valret;
10656
10657 valcum.words = 0;
10658 valcum.fregno = FP_ARG_MIN_REG;
10659 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10660 /* Do a trial code generation as if this were going to be passed
10661 as an argument; if any part goes in memory, we return NULL. */
10662 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10663 if (valret)
10664 return false;
10665 /* Otherwise fall through to more conventional ABI rules. */
10666 }
10667
10668 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10669 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10670 NULL, NULL))
10671 return false;
10672
10673 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10674 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10675 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10676 return false;
10677
10678 if (AGGREGATE_TYPE_P (type)
10679 && (aix_struct_return
10680 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10681 return true;
10682
10683 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10684 modes only exist for GCC vector types if -maltivec. */
10685 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10686 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10687 return false;
10688
10689 /* Return synthetic vectors in memory. */
10690 if (TREE_CODE (type) == VECTOR_TYPE
10691 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10692 {
10693 static bool warned_for_return_big_vectors = false;
10694 if (!warned_for_return_big_vectors)
10695 {
10696 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10697 "non-standard ABI extension with no compatibility "
10698 "guarantee");
10699 warned_for_return_big_vectors = true;
10700 }
10701 return true;
10702 }
10703
10704 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10705 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10706 return true;
10707
10708 return false;
10709 }
10710
10711 /* Specify whether values returned in registers should be at the most
10712 significant end of a register. We want aggregates returned by
10713 value to match the way aggregates are passed to functions. */
10714
10715 static bool
10716 rs6000_return_in_msb (const_tree valtype)
10717 {
10718 return (DEFAULT_ABI == ABI_ELFv2
10719 && BYTES_BIG_ENDIAN
10720 && AGGREGATE_TYPE_P (valtype)
10721 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10722 == PAD_UPWARD));
10723 }
10724
10725 #ifdef HAVE_AS_GNU_ATTRIBUTE
10726 /* Return TRUE if a call to function FNDECL may be one that
10727 potentially affects the function calling ABI of the object file. */
10728
10729 static bool
10730 call_ABI_of_interest (tree fndecl)
10731 {
10732 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10733 {
10734 struct cgraph_node *c_node;
10735
10736 /* Libcalls are always interesting. */
10737 if (fndecl == NULL_TREE)
10738 return true;
10739
10740 /* Any call to an external function is interesting. */
10741 if (DECL_EXTERNAL (fndecl))
10742 return true;
10743
10744 /* Interesting functions that we are emitting in this object file. */
10745 c_node = cgraph_node::get (fndecl);
10746 c_node = c_node->ultimate_alias_target ();
10747 return !c_node->only_called_directly_p ();
10748 }
10749 return false;
10750 }
10751 #endif
10752
10753 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10754 for a call to a function whose data type is FNTYPE.
10755 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10756
10757 For incoming args we set the number of arguments in the prototype large
10758 so we never return a PARALLEL. */
10759
10760 void
10761 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10762 rtx libname ATTRIBUTE_UNUSED, int incoming,
10763 int libcall, int n_named_args,
10764 tree fndecl,
10765 machine_mode return_mode ATTRIBUTE_UNUSED)
10766 {
10767 static CUMULATIVE_ARGS zero_cumulative;
10768
10769 *cum = zero_cumulative;
10770 cum->words = 0;
10771 cum->fregno = FP_ARG_MIN_REG;
10772 cum->vregno = ALTIVEC_ARG_MIN_REG;
10773 cum->prototype = (fntype && prototype_p (fntype));
10774 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10775 ? CALL_LIBCALL : CALL_NORMAL);
10776 cum->sysv_gregno = GP_ARG_MIN_REG;
10777 cum->stdarg = stdarg_p (fntype);
10778 cum->libcall = libcall;
10779
10780 cum->nargs_prototype = 0;
10781 if (incoming || cum->prototype)
10782 cum->nargs_prototype = n_named_args;
10783
10784 /* Check for a longcall attribute. */
10785 if ((!fntype && rs6000_default_long_calls)
10786 || (fntype
10787 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10788 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10789 cum->call_cookie |= CALL_LONG;
10790 else if (DEFAULT_ABI != ABI_DARWIN)
10791 {
10792 bool is_local = (fndecl
10793 && !DECL_EXTERNAL (fndecl)
10794 && !DECL_WEAK (fndecl)
10795 && (*targetm.binds_local_p) (fndecl));
10796 if (is_local)
10797 ;
10798 else if (flag_plt)
10799 {
10800 if (fntype
10801 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10802 cum->call_cookie |= CALL_LONG;
10803 }
10804 else
10805 {
10806 if (!(fntype
10807 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10808 cum->call_cookie |= CALL_LONG;
10809 }
10810 }
10811
10812 if (TARGET_DEBUG_ARG)
10813 {
10814 fprintf (stderr, "\ninit_cumulative_args:");
10815 if (fntype)
10816 {
10817 tree ret_type = TREE_TYPE (fntype);
10818 fprintf (stderr, " ret code = %s,",
10819 get_tree_code_name (TREE_CODE (ret_type)));
10820 }
10821
10822 if (cum->call_cookie & CALL_LONG)
10823 fprintf (stderr, " longcall,");
10824
10825 fprintf (stderr, " proto = %d, nargs = %d\n",
10826 cum->prototype, cum->nargs_prototype);
10827 }
10828
10829 #ifdef HAVE_AS_GNU_ATTRIBUTE
10830 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10831 {
10832 cum->escapes = call_ABI_of_interest (fndecl);
10833 if (cum->escapes)
10834 {
10835 tree return_type;
10836
10837 if (fntype)
10838 {
10839 return_type = TREE_TYPE (fntype);
10840 return_mode = TYPE_MODE (return_type);
10841 }
10842 else
10843 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10844
10845 if (return_type != NULL)
10846 {
10847 if (TREE_CODE (return_type) == RECORD_TYPE
10848 && TYPE_TRANSPARENT_AGGR (return_type))
10849 {
10850 return_type = TREE_TYPE (first_field (return_type));
10851 return_mode = TYPE_MODE (return_type);
10852 }
10853 if (AGGREGATE_TYPE_P (return_type)
10854 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10855 <= 8))
10856 rs6000_returns_struct = true;
10857 }
10858 if (SCALAR_FLOAT_MODE_P (return_mode))
10859 {
10860 rs6000_passes_float = true;
10861 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10862 && (FLOAT128_IBM_P (return_mode)
10863 || FLOAT128_IEEE_P (return_mode)
10864 || (return_type != NULL
10865 && (TYPE_MAIN_VARIANT (return_type)
10866 == long_double_type_node))))
10867 rs6000_passes_long_double = true;
10868
10869 /* Note if we passed or return a IEEE 128-bit type. We changed
10870 the mangling for these types, and we may need to make an alias
10871 with the old mangling. */
10872 if (FLOAT128_IEEE_P (return_mode))
10873 rs6000_passes_ieee128 = true;
10874 }
10875 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10876 rs6000_passes_vector = true;
10877 }
10878 }
10879 #endif
10880
10881 if (fntype
10882 && !TARGET_ALTIVEC
10883 && TARGET_ALTIVEC_ABI
10884 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10885 {
10886 error ("cannot return value in vector register because"
10887 " altivec instructions are disabled, use %qs"
10888 " to enable them", "-maltivec");
10889 }
10890 }
10891 \f
10892 /* The mode the ABI uses for a word. This is not the same as word_mode
10893 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10894
10895 static scalar_int_mode
10896 rs6000_abi_word_mode (void)
10897 {
10898 return TARGET_32BIT ? SImode : DImode;
10899 }
10900
10901 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10902 static char *
10903 rs6000_offload_options (void)
10904 {
10905 if (TARGET_64BIT)
10906 return xstrdup ("-foffload-abi=lp64");
10907 else
10908 return xstrdup ("-foffload-abi=ilp32");
10909 }
10910
10911 /* On rs6000, function arguments are promoted, as are function return
10912 values. */
10913
10914 static machine_mode
10915 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10916 machine_mode mode,
10917 int *punsignedp ATTRIBUTE_UNUSED,
10918 const_tree, int)
10919 {
10920 PROMOTE_MODE (mode, *punsignedp, type);
10921
10922 return mode;
10923 }
10924
10925 /* Return true if TYPE must be passed on the stack and not in registers. */
10926
10927 static bool
10928 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10929 {
10930 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10931 return must_pass_in_stack_var_size (mode, type);
10932 else
10933 return must_pass_in_stack_var_size_or_pad (mode, type);
10934 }
10935
10936 static inline bool
10937 is_complex_IBM_long_double (machine_mode mode)
10938 {
10939 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10940 }
10941
10942 /* Whether ABI_V4 passes MODE args to a function in floating point
10943 registers. */
10944
10945 static bool
10946 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10947 {
10948 if (!TARGET_HARD_FLOAT)
10949 return false;
10950 if (mode == DFmode)
10951 return true;
10952 if (mode == SFmode && named)
10953 return true;
10954 /* ABI_V4 passes complex IBM long double in 8 gprs.
10955 Stupid, but we can't change the ABI now. */
10956 if (is_complex_IBM_long_double (mode))
10957 return false;
10958 if (FLOAT128_2REG_P (mode))
10959 return true;
10960 if (DECIMAL_FLOAT_MODE_P (mode))
10961 return true;
10962 return false;
10963 }
10964
10965 /* Implement TARGET_FUNCTION_ARG_PADDING.
10966
10967 For the AIX ABI structs are always stored left shifted in their
10968 argument slot. */
10969
10970 static pad_direction
10971 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10972 {
10973 #ifndef AGGREGATE_PADDING_FIXED
10974 #define AGGREGATE_PADDING_FIXED 0
10975 #endif
10976 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10977 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10978 #endif
10979
10980 if (!AGGREGATE_PADDING_FIXED)
10981 {
10982 /* GCC used to pass structures of the same size as integer types as
10983 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10984 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10985 passed padded downward, except that -mstrict-align further
10986 muddied the water in that multi-component structures of 2 and 4
10987 bytes in size were passed padded upward.
10988
10989 The following arranges for best compatibility with previous
10990 versions of gcc, but removes the -mstrict-align dependency. */
10991 if (BYTES_BIG_ENDIAN)
10992 {
10993 HOST_WIDE_INT size = 0;
10994
10995 if (mode == BLKmode)
10996 {
10997 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10998 size = int_size_in_bytes (type);
10999 }
11000 else
11001 size = GET_MODE_SIZE (mode);
11002
11003 if (size == 1 || size == 2 || size == 4)
11004 return PAD_DOWNWARD;
11005 }
11006 return PAD_UPWARD;
11007 }
11008
11009 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11010 {
11011 if (type != 0 && AGGREGATE_TYPE_P (type))
11012 return PAD_UPWARD;
11013 }
11014
11015 /* Fall back to the default. */
11016 return default_function_arg_padding (mode, type);
11017 }
11018
11019 /* If defined, a C expression that gives the alignment boundary, in bits,
11020 of an argument with the specified mode and type. If it is not defined,
11021 PARM_BOUNDARY is used for all arguments.
11022
11023 V.4 wants long longs and doubles to be double word aligned. Just
11024 testing the mode size is a boneheaded way to do this as it means
11025 that other types such as complex int are also double word aligned.
11026 However, we're stuck with this because changing the ABI might break
11027 existing library interfaces.
11028
11029 Quadword align Altivec/VSX vectors.
11030 Quadword align large synthetic vector types. */
11031
11032 static unsigned int
11033 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11034 {
11035 machine_mode elt_mode;
11036 int n_elts;
11037
11038 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11039
11040 if (DEFAULT_ABI == ABI_V4
11041 && (GET_MODE_SIZE (mode) == 8
11042 || (TARGET_HARD_FLOAT
11043 && !is_complex_IBM_long_double (mode)
11044 && FLOAT128_2REG_P (mode))))
11045 return 64;
11046 else if (FLOAT128_VECTOR_P (mode))
11047 return 128;
11048 else if (type && TREE_CODE (type) == VECTOR_TYPE
11049 && int_size_in_bytes (type) >= 8
11050 && int_size_in_bytes (type) < 16)
11051 return 64;
11052 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11053 || (type && TREE_CODE (type) == VECTOR_TYPE
11054 && int_size_in_bytes (type) >= 16))
11055 return 128;
11056
11057 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11058 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11059 -mcompat-align-parm is used. */
11060 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11061 || DEFAULT_ABI == ABI_ELFv2)
11062 && type && TYPE_ALIGN (type) > 64)
11063 {
11064 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11065 or homogeneous float/vector aggregates here. We already handled
11066 vector aggregates above, but still need to check for float here. */
11067 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11068 && !SCALAR_FLOAT_MODE_P (elt_mode));
11069
11070 /* We used to check for BLKmode instead of the above aggregate type
11071 check. Warn when this results in any difference to the ABI. */
11072 if (aggregate_p != (mode == BLKmode))
11073 {
11074 static bool warned;
11075 if (!warned && warn_psabi)
11076 {
11077 warned = true;
11078 inform (input_location,
11079 "the ABI of passing aggregates with %d-byte alignment"
11080 " has changed in GCC 5",
11081 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11082 }
11083 }
11084
11085 if (aggregate_p)
11086 return 128;
11087 }
11088
11089 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11090 implement the "aggregate type" check as a BLKmode check here; this
11091 means certain aggregate types are in fact not aligned. */
11092 if (TARGET_MACHO && rs6000_darwin64_abi
11093 && mode == BLKmode
11094 && type && TYPE_ALIGN (type) > 64)
11095 return 128;
11096
11097 return PARM_BOUNDARY;
11098 }
11099
11100 /* The offset in words to the start of the parameter save area. */
11101
11102 static unsigned int
11103 rs6000_parm_offset (void)
11104 {
11105 return (DEFAULT_ABI == ABI_V4 ? 2
11106 : DEFAULT_ABI == ABI_ELFv2 ? 4
11107 : 6);
11108 }
11109
11110 /* For a function parm of MODE and TYPE, return the starting word in
11111 the parameter area. NWORDS of the parameter area are already used. */
11112
11113 static unsigned int
11114 rs6000_parm_start (machine_mode mode, const_tree type,
11115 unsigned int nwords)
11116 {
11117 unsigned int align;
11118
11119 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11120 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11121 }
11122
11123 /* Compute the size (in words) of a function argument. */
11124
11125 static unsigned long
11126 rs6000_arg_size (machine_mode mode, const_tree type)
11127 {
11128 unsigned long size;
11129
11130 if (mode != BLKmode)
11131 size = GET_MODE_SIZE (mode);
11132 else
11133 size = int_size_in_bytes (type);
11134
11135 if (TARGET_32BIT)
11136 return (size + 3) >> 2;
11137 else
11138 return (size + 7) >> 3;
11139 }
11140 \f
11141 /* Use this to flush pending int fields. */
11142
11143 static void
11144 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11145 HOST_WIDE_INT bitpos, int final)
11146 {
11147 unsigned int startbit, endbit;
11148 int intregs, intoffset;
11149
11150 /* Handle the situations where a float is taking up the first half
11151 of the GPR, and the other half is empty (typically due to
11152 alignment restrictions). We can detect this by a 8-byte-aligned
11153 int field, or by seeing that this is the final flush for this
11154 argument. Count the word and continue on. */
11155 if (cum->floats_in_gpr == 1
11156 && (cum->intoffset % 64 == 0
11157 || (cum->intoffset == -1 && final)))
11158 {
11159 cum->words++;
11160 cum->floats_in_gpr = 0;
11161 }
11162
11163 if (cum->intoffset == -1)
11164 return;
11165
11166 intoffset = cum->intoffset;
11167 cum->intoffset = -1;
11168 cum->floats_in_gpr = 0;
11169
11170 if (intoffset % BITS_PER_WORD != 0)
11171 {
11172 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11173 if (!int_mode_for_size (bits, 0).exists ())
11174 {
11175 /* We couldn't find an appropriate mode, which happens,
11176 e.g., in packed structs when there are 3 bytes to load.
11177 Back intoffset back to the beginning of the word in this
11178 case. */
11179 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11180 }
11181 }
11182
11183 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11184 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11185 intregs = (endbit - startbit) / BITS_PER_WORD;
11186 cum->words += intregs;
11187 /* words should be unsigned. */
11188 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11189 {
11190 int pad = (endbit/BITS_PER_WORD) - cum->words;
11191 cum->words += pad;
11192 }
11193 }
11194
11195 /* The darwin64 ABI calls for us to recurse down through structs,
11196 looking for elements passed in registers. Unfortunately, we have
11197 to track int register count here also because of misalignments
11198 in powerpc alignment mode. */
11199
11200 static void
11201 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11202 const_tree type,
11203 HOST_WIDE_INT startbitpos)
11204 {
11205 tree f;
11206
11207 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11208 if (TREE_CODE (f) == FIELD_DECL)
11209 {
11210 HOST_WIDE_INT bitpos = startbitpos;
11211 tree ftype = TREE_TYPE (f);
11212 machine_mode mode;
11213 if (ftype == error_mark_node)
11214 continue;
11215 mode = TYPE_MODE (ftype);
11216
11217 if (DECL_SIZE (f) != 0
11218 && tree_fits_uhwi_p (bit_position (f)))
11219 bitpos += int_bit_position (f);
11220
11221 /* ??? FIXME: else assume zero offset. */
11222
11223 if (TREE_CODE (ftype) == RECORD_TYPE)
11224 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11225 else if (USE_FP_FOR_ARG_P (cum, mode))
11226 {
11227 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11228 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11229 cum->fregno += n_fpregs;
11230 /* Single-precision floats present a special problem for
11231 us, because they are smaller than an 8-byte GPR, and so
11232 the structure-packing rules combined with the standard
11233 varargs behavior mean that we want to pack float/float
11234 and float/int combinations into a single register's
11235 space. This is complicated by the arg advance flushing,
11236 which works on arbitrarily large groups of int-type
11237 fields. */
11238 if (mode == SFmode)
11239 {
11240 if (cum->floats_in_gpr == 1)
11241 {
11242 /* Two floats in a word; count the word and reset
11243 the float count. */
11244 cum->words++;
11245 cum->floats_in_gpr = 0;
11246 }
11247 else if (bitpos % 64 == 0)
11248 {
11249 /* A float at the beginning of an 8-byte word;
11250 count it and put off adjusting cum->words until
11251 we see if a arg advance flush is going to do it
11252 for us. */
11253 cum->floats_in_gpr++;
11254 }
11255 else
11256 {
11257 /* The float is at the end of a word, preceded
11258 by integer fields, so the arg advance flush
11259 just above has already set cum->words and
11260 everything is taken care of. */
11261 }
11262 }
11263 else
11264 cum->words += n_fpregs;
11265 }
11266 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11267 {
11268 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11269 cum->vregno++;
11270 cum->words += 2;
11271 }
11272 else if (cum->intoffset == -1)
11273 cum->intoffset = bitpos;
11274 }
11275 }
11276
11277 /* Check for an item that needs to be considered specially under the darwin 64
11278 bit ABI. These are record types where the mode is BLK or the structure is
11279 8 bytes in size. */
11280 static int
11281 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11282 {
11283 return rs6000_darwin64_abi
11284 && ((mode == BLKmode
11285 && TREE_CODE (type) == RECORD_TYPE
11286 && int_size_in_bytes (type) > 0)
11287 || (type && TREE_CODE (type) == RECORD_TYPE
11288 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11289 }
11290
11291 /* Update the data in CUM to advance over an argument
11292 of mode MODE and data type TYPE.
11293 (TYPE is null for libcalls where that information may not be available.)
11294
11295 Note that for args passed by reference, function_arg will be called
11296 with MODE and TYPE set to that of the pointer to the arg, not the arg
11297 itself. */
11298
11299 static void
11300 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11301 const_tree type, bool named, int depth)
11302 {
11303 machine_mode elt_mode;
11304 int n_elts;
11305
11306 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11307
11308 /* Only tick off an argument if we're not recursing. */
11309 if (depth == 0)
11310 cum->nargs_prototype--;
11311
11312 #ifdef HAVE_AS_GNU_ATTRIBUTE
11313 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11314 && cum->escapes)
11315 {
11316 if (SCALAR_FLOAT_MODE_P (mode))
11317 {
11318 rs6000_passes_float = true;
11319 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11320 && (FLOAT128_IBM_P (mode)
11321 || FLOAT128_IEEE_P (mode)
11322 || (type != NULL
11323 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11324 rs6000_passes_long_double = true;
11325
11326 /* Note if we passed or return a IEEE 128-bit type. We changed the
11327 mangling for these types, and we may need to make an alias with
11328 the old mangling. */
11329 if (FLOAT128_IEEE_P (mode))
11330 rs6000_passes_ieee128 = true;
11331 }
11332 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11333 rs6000_passes_vector = true;
11334 }
11335 #endif
11336
11337 if (TARGET_ALTIVEC_ABI
11338 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11339 || (type && TREE_CODE (type) == VECTOR_TYPE
11340 && int_size_in_bytes (type) == 16)))
11341 {
11342 bool stack = false;
11343
11344 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11345 {
11346 cum->vregno += n_elts;
11347
11348 if (!TARGET_ALTIVEC)
11349 error ("cannot pass argument in vector register because"
11350 " altivec instructions are disabled, use %qs"
11351 " to enable them", "-maltivec");
11352
11353 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11354 even if it is going to be passed in a vector register.
11355 Darwin does the same for variable-argument functions. */
11356 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11357 && TARGET_64BIT)
11358 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11359 stack = true;
11360 }
11361 else
11362 stack = true;
11363
11364 if (stack)
11365 {
11366 int align;
11367
11368 /* Vector parameters must be 16-byte aligned. In 32-bit
11369 mode this means we need to take into account the offset
11370 to the parameter save area. In 64-bit mode, they just
11371 have to start on an even word, since the parameter save
11372 area is 16-byte aligned. */
11373 if (TARGET_32BIT)
11374 align = -(rs6000_parm_offset () + cum->words) & 3;
11375 else
11376 align = cum->words & 1;
11377 cum->words += align + rs6000_arg_size (mode, type);
11378
11379 if (TARGET_DEBUG_ARG)
11380 {
11381 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11382 cum->words, align);
11383 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11384 cum->nargs_prototype, cum->prototype,
11385 GET_MODE_NAME (mode));
11386 }
11387 }
11388 }
11389 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11390 {
11391 int size = int_size_in_bytes (type);
11392 /* Variable sized types have size == -1 and are
11393 treated as if consisting entirely of ints.
11394 Pad to 16 byte boundary if needed. */
11395 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11396 && (cum->words % 2) != 0)
11397 cum->words++;
11398 /* For varargs, we can just go up by the size of the struct. */
11399 if (!named)
11400 cum->words += (size + 7) / 8;
11401 else
11402 {
11403 /* It is tempting to say int register count just goes up by
11404 sizeof(type)/8, but this is wrong in a case such as
11405 { int; double; int; } [powerpc alignment]. We have to
11406 grovel through the fields for these too. */
11407 cum->intoffset = 0;
11408 cum->floats_in_gpr = 0;
11409 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11410 rs6000_darwin64_record_arg_advance_flush (cum,
11411 size * BITS_PER_UNIT, 1);
11412 }
11413 if (TARGET_DEBUG_ARG)
11414 {
11415 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11416 cum->words, TYPE_ALIGN (type), size);
11417 fprintf (stderr,
11418 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11419 cum->nargs_prototype, cum->prototype,
11420 GET_MODE_NAME (mode));
11421 }
11422 }
11423 else if (DEFAULT_ABI == ABI_V4)
11424 {
11425 if (abi_v4_pass_in_fpr (mode, named))
11426 {
11427 /* _Decimal128 must use an even/odd register pair. This assumes
11428 that the register number is odd when fregno is odd. */
11429 if (mode == TDmode && (cum->fregno % 2) == 1)
11430 cum->fregno++;
11431
11432 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11433 <= FP_ARG_V4_MAX_REG)
11434 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11435 else
11436 {
11437 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11438 if (mode == DFmode || FLOAT128_IBM_P (mode)
11439 || mode == DDmode || mode == TDmode)
11440 cum->words += cum->words & 1;
11441 cum->words += rs6000_arg_size (mode, type);
11442 }
11443 }
11444 else
11445 {
11446 int n_words = rs6000_arg_size (mode, type);
11447 int gregno = cum->sysv_gregno;
11448
11449 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11450 As does any other 2 word item such as complex int due to a
11451 historical mistake. */
11452 if (n_words == 2)
11453 gregno += (1 - gregno) & 1;
11454
11455 /* Multi-reg args are not split between registers and stack. */
11456 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11457 {
11458 /* Long long is aligned on the stack. So are other 2 word
11459 items such as complex int due to a historical mistake. */
11460 if (n_words == 2)
11461 cum->words += cum->words & 1;
11462 cum->words += n_words;
11463 }
11464
11465 /* Note: continuing to accumulate gregno past when we've started
11466 spilling to the stack indicates the fact that we've started
11467 spilling to the stack to expand_builtin_saveregs. */
11468 cum->sysv_gregno = gregno + n_words;
11469 }
11470
11471 if (TARGET_DEBUG_ARG)
11472 {
11473 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11474 cum->words, cum->fregno);
11475 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11476 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11477 fprintf (stderr, "mode = %4s, named = %d\n",
11478 GET_MODE_NAME (mode), named);
11479 }
11480 }
11481 else
11482 {
11483 int n_words = rs6000_arg_size (mode, type);
11484 int start_words = cum->words;
11485 int align_words = rs6000_parm_start (mode, type, start_words);
11486
11487 cum->words = align_words + n_words;
11488
11489 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11490 {
11491 /* _Decimal128 must be passed in an even/odd float register pair.
11492 This assumes that the register number is odd when fregno is
11493 odd. */
11494 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11495 cum->fregno++;
11496 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11497 }
11498
11499 if (TARGET_DEBUG_ARG)
11500 {
11501 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11502 cum->words, cum->fregno);
11503 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11504 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11505 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11506 named, align_words - start_words, depth);
11507 }
11508 }
11509 }
11510
11511 static void
11512 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11513 const_tree type, bool named)
11514 {
11515 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11516 0);
11517 }
11518
11519 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11520 structure between cum->intoffset and bitpos to integer registers. */
11521
11522 static void
11523 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11524 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11525 {
11526 machine_mode mode;
11527 unsigned int regno;
11528 unsigned int startbit, endbit;
11529 int this_regno, intregs, intoffset;
11530 rtx reg;
11531
11532 if (cum->intoffset == -1)
11533 return;
11534
11535 intoffset = cum->intoffset;
11536 cum->intoffset = -1;
11537
11538 /* If this is the trailing part of a word, try to only load that
11539 much into the register. Otherwise load the whole register. Note
11540 that in the latter case we may pick up unwanted bits. It's not a
11541 problem at the moment but may wish to revisit. */
11542
11543 if (intoffset % BITS_PER_WORD != 0)
11544 {
11545 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11546 if (!int_mode_for_size (bits, 0).exists (&mode))
11547 {
11548 /* We couldn't find an appropriate mode, which happens,
11549 e.g., in packed structs when there are 3 bytes to load.
11550 Back intoffset back to the beginning of the word in this
11551 case. */
11552 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11553 mode = word_mode;
11554 }
11555 }
11556 else
11557 mode = word_mode;
11558
11559 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11560 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11561 intregs = (endbit - startbit) / BITS_PER_WORD;
11562 this_regno = cum->words + intoffset / BITS_PER_WORD;
11563
11564 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11565 cum->use_stack = 1;
11566
11567 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11568 if (intregs <= 0)
11569 return;
11570
11571 intoffset /= BITS_PER_UNIT;
11572 do
11573 {
11574 regno = GP_ARG_MIN_REG + this_regno;
11575 reg = gen_rtx_REG (mode, regno);
11576 rvec[(*k)++] =
11577 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11578
11579 this_regno += 1;
11580 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11581 mode = word_mode;
11582 intregs -= 1;
11583 }
11584 while (intregs > 0);
11585 }
11586
11587 /* Recursive workhorse for the following. */
11588
11589 static void
11590 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11591 HOST_WIDE_INT startbitpos, rtx rvec[],
11592 int *k)
11593 {
11594 tree f;
11595
11596 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11597 if (TREE_CODE (f) == FIELD_DECL)
11598 {
11599 HOST_WIDE_INT bitpos = startbitpos;
11600 tree ftype = TREE_TYPE (f);
11601 machine_mode mode;
11602 if (ftype == error_mark_node)
11603 continue;
11604 mode = TYPE_MODE (ftype);
11605
11606 if (DECL_SIZE (f) != 0
11607 && tree_fits_uhwi_p (bit_position (f)))
11608 bitpos += int_bit_position (f);
11609
11610 /* ??? FIXME: else assume zero offset. */
11611
11612 if (TREE_CODE (ftype) == RECORD_TYPE)
11613 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11614 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11615 {
11616 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11617 #if 0
11618 switch (mode)
11619 {
11620 case E_SCmode: mode = SFmode; break;
11621 case E_DCmode: mode = DFmode; break;
11622 case E_TCmode: mode = TFmode; break;
11623 default: break;
11624 }
11625 #endif
11626 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11627 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11628 {
11629 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11630 && (mode == TFmode || mode == TDmode));
11631 /* Long double or _Decimal128 split over regs and memory. */
11632 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11633 cum->use_stack=1;
11634 }
11635 rvec[(*k)++]
11636 = gen_rtx_EXPR_LIST (VOIDmode,
11637 gen_rtx_REG (mode, cum->fregno++),
11638 GEN_INT (bitpos / BITS_PER_UNIT));
11639 if (FLOAT128_2REG_P (mode))
11640 cum->fregno++;
11641 }
11642 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11643 {
11644 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11645 rvec[(*k)++]
11646 = gen_rtx_EXPR_LIST (VOIDmode,
11647 gen_rtx_REG (mode, cum->vregno++),
11648 GEN_INT (bitpos / BITS_PER_UNIT));
11649 }
11650 else if (cum->intoffset == -1)
11651 cum->intoffset = bitpos;
11652 }
11653 }
11654
11655 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11656 the register(s) to be used for each field and subfield of a struct
11657 being passed by value, along with the offset of where the
11658 register's value may be found in the block. FP fields go in FP
11659 register, vector fields go in vector registers, and everything
11660 else goes in int registers, packed as in memory.
11661
11662 This code is also used for function return values. RETVAL indicates
11663 whether this is the case.
11664
11665 Much of this is taken from the SPARC V9 port, which has a similar
11666 calling convention. */
11667
11668 static rtx
11669 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11670 bool named, bool retval)
11671 {
11672 rtx rvec[FIRST_PSEUDO_REGISTER];
11673 int k = 1, kbase = 1;
11674 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11675 /* This is a copy; modifications are not visible to our caller. */
11676 CUMULATIVE_ARGS copy_cum = *orig_cum;
11677 CUMULATIVE_ARGS *cum = &copy_cum;
11678
11679 /* Pad to 16 byte boundary if needed. */
11680 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11681 && (cum->words % 2) != 0)
11682 cum->words++;
11683
11684 cum->intoffset = 0;
11685 cum->use_stack = 0;
11686 cum->named = named;
11687
11688 /* Put entries into rvec[] for individual FP and vector fields, and
11689 for the chunks of memory that go in int regs. Note we start at
11690 element 1; 0 is reserved for an indication of using memory, and
11691 may or may not be filled in below. */
11692 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11693 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11694
11695 /* If any part of the struct went on the stack put all of it there.
11696 This hack is because the generic code for
11697 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11698 parts of the struct are not at the beginning. */
11699 if (cum->use_stack)
11700 {
11701 if (retval)
11702 return NULL_RTX; /* doesn't go in registers at all */
11703 kbase = 0;
11704 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11705 }
11706 if (k > 1 || cum->use_stack)
11707 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11708 else
11709 return NULL_RTX;
11710 }
11711
11712 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11713
11714 static rtx
11715 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11716 int align_words)
11717 {
11718 int n_units;
11719 int i, k;
11720 rtx rvec[GP_ARG_NUM_REG + 1];
11721
11722 if (align_words >= GP_ARG_NUM_REG)
11723 return NULL_RTX;
11724
11725 n_units = rs6000_arg_size (mode, type);
11726
11727 /* Optimize the simple case where the arg fits in one gpr, except in
11728 the case of BLKmode due to assign_parms assuming that registers are
11729 BITS_PER_WORD wide. */
11730 if (n_units == 0
11731 || (n_units == 1 && mode != BLKmode))
11732 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11733
11734 k = 0;
11735 if (align_words + n_units > GP_ARG_NUM_REG)
11736 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11737 using a magic NULL_RTX component.
11738 This is not strictly correct. Only some of the arg belongs in
11739 memory, not all of it. However, the normal scheme using
11740 function_arg_partial_nregs can result in unusual subregs, eg.
11741 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11742 store the whole arg to memory is often more efficient than code
11743 to store pieces, and we know that space is available in the right
11744 place for the whole arg. */
11745 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11746
11747 i = 0;
11748 do
11749 {
11750 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11751 rtx off = GEN_INT (i++ * 4);
11752 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11753 }
11754 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11755
11756 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11757 }
11758
11759 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11760 but must also be copied into the parameter save area starting at
11761 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11762 to the GPRs and/or memory. Return the number of elements used. */
11763
11764 static int
11765 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11766 int align_words, rtx *rvec)
11767 {
11768 int k = 0;
11769
11770 if (align_words < GP_ARG_NUM_REG)
11771 {
11772 int n_words = rs6000_arg_size (mode, type);
11773
11774 if (align_words + n_words > GP_ARG_NUM_REG
11775 || mode == BLKmode
11776 || (TARGET_32BIT && TARGET_POWERPC64))
11777 {
11778 /* If this is partially on the stack, then we only
11779 include the portion actually in registers here. */
11780 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11781 int i = 0;
11782
11783 if (align_words + n_words > GP_ARG_NUM_REG)
11784 {
11785 /* Not all of the arg fits in gprs. Say that it goes in memory
11786 too, using a magic NULL_RTX component. Also see comment in
11787 rs6000_mixed_function_arg for why the normal
11788 function_arg_partial_nregs scheme doesn't work in this case. */
11789 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11790 }
11791
11792 do
11793 {
11794 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11795 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11796 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11797 }
11798 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11799 }
11800 else
11801 {
11802 /* The whole arg fits in gprs. */
11803 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11804 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11805 }
11806 }
11807 else
11808 {
11809 /* It's entirely in memory. */
11810 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11811 }
11812
11813 return k;
11814 }
11815
11816 /* RVEC is a vector of K components of an argument of mode MODE.
11817 Construct the final function_arg return value from it. */
11818
11819 static rtx
11820 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11821 {
11822 gcc_assert (k >= 1);
11823
11824 /* Avoid returning a PARALLEL in the trivial cases. */
11825 if (k == 1)
11826 {
11827 if (XEXP (rvec[0], 0) == NULL_RTX)
11828 return NULL_RTX;
11829
11830 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11831 return XEXP (rvec[0], 0);
11832 }
11833
11834 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11835 }
11836
11837 /* Determine where to put an argument to a function.
11838 Value is zero to push the argument on the stack,
11839 or a hard register in which to store the argument.
11840
11841 MODE is the argument's machine mode.
11842 TYPE is the data type of the argument (as a tree).
11843 This is null for libcalls where that information may
11844 not be available.
11845 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11846 the preceding args and about the function being called. It is
11847 not modified in this routine.
11848 NAMED is nonzero if this argument is a named parameter
11849 (otherwise it is an extra parameter matching an ellipsis).
11850
11851 On RS/6000 the first eight words of non-FP are normally in registers
11852 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11853 Under V.4, the first 8 FP args are in registers.
11854
11855 If this is floating-point and no prototype is specified, we use
11856 both an FP and integer register (or possibly FP reg and stack). Library
11857 functions (when CALL_LIBCALL is set) always have the proper types for args,
11858 so we can pass the FP value just in one register. emit_library_function
11859 doesn't support PARALLEL anyway.
11860
11861 Note that for args passed by reference, function_arg will be called
11862 with MODE and TYPE set to that of the pointer to the arg, not the arg
11863 itself. */
11864
11865 static rtx
11866 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11867 const_tree type, bool named)
11868 {
11869 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11870 enum rs6000_abi abi = DEFAULT_ABI;
11871 machine_mode elt_mode;
11872 int n_elts;
11873
11874 /* Return a marker to indicate whether CR1 needs to set or clear the
11875 bit that V.4 uses to say fp args were passed in registers.
11876 Assume that we don't need the marker for software floating point,
11877 or compiler generated library calls. */
11878 if (mode == VOIDmode)
11879 {
11880 if (abi == ABI_V4
11881 && (cum->call_cookie & CALL_LIBCALL) == 0
11882 && (cum->stdarg
11883 || (cum->nargs_prototype < 0
11884 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11885 && TARGET_HARD_FLOAT)
11886 return GEN_INT (cum->call_cookie
11887 | ((cum->fregno == FP_ARG_MIN_REG)
11888 ? CALL_V4_SET_FP_ARGS
11889 : CALL_V4_CLEAR_FP_ARGS));
11890
11891 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11892 }
11893
11894 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11895
11896 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11897 {
11898 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11899 if (rslt != NULL_RTX)
11900 return rslt;
11901 /* Else fall through to usual handling. */
11902 }
11903
11904 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11905 {
11906 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11907 rtx r, off;
11908 int i, k = 0;
11909
11910 /* Do we also need to pass this argument in the parameter save area?
11911 Library support functions for IEEE 128-bit are assumed to not need the
11912 value passed both in GPRs and in vector registers. */
11913 if (TARGET_64BIT && !cum->prototype
11914 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11915 {
11916 int align_words = ROUND_UP (cum->words, 2);
11917 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11918 }
11919
11920 /* Describe where this argument goes in the vector registers. */
11921 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11922 {
11923 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11924 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11925 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11926 }
11927
11928 return rs6000_finish_function_arg (mode, rvec, k);
11929 }
11930 else if (TARGET_ALTIVEC_ABI
11931 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11932 || (type && TREE_CODE (type) == VECTOR_TYPE
11933 && int_size_in_bytes (type) == 16)))
11934 {
11935 if (named || abi == ABI_V4)
11936 return NULL_RTX;
11937 else
11938 {
11939 /* Vector parameters to varargs functions under AIX or Darwin
11940 get passed in memory and possibly also in GPRs. */
11941 int align, align_words, n_words;
11942 machine_mode part_mode;
11943
11944 /* Vector parameters must be 16-byte aligned. In 32-bit
11945 mode this means we need to take into account the offset
11946 to the parameter save area. In 64-bit mode, they just
11947 have to start on an even word, since the parameter save
11948 area is 16-byte aligned. */
11949 if (TARGET_32BIT)
11950 align = -(rs6000_parm_offset () + cum->words) & 3;
11951 else
11952 align = cum->words & 1;
11953 align_words = cum->words + align;
11954
11955 /* Out of registers? Memory, then. */
11956 if (align_words >= GP_ARG_NUM_REG)
11957 return NULL_RTX;
11958
11959 if (TARGET_32BIT && TARGET_POWERPC64)
11960 return rs6000_mixed_function_arg (mode, type, align_words);
11961
11962 /* The vector value goes in GPRs. Only the part of the
11963 value in GPRs is reported here. */
11964 part_mode = mode;
11965 n_words = rs6000_arg_size (mode, type);
11966 if (align_words + n_words > GP_ARG_NUM_REG)
11967 /* Fortunately, there are only two possibilities, the value
11968 is either wholly in GPRs or half in GPRs and half not. */
11969 part_mode = DImode;
11970
11971 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11972 }
11973 }
11974
11975 else if (abi == ABI_V4)
11976 {
11977 if (abi_v4_pass_in_fpr (mode, named))
11978 {
11979 /* _Decimal128 must use an even/odd register pair. This assumes
11980 that the register number is odd when fregno is odd. */
11981 if (mode == TDmode && (cum->fregno % 2) == 1)
11982 cum->fregno++;
11983
11984 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11985 <= FP_ARG_V4_MAX_REG)
11986 return gen_rtx_REG (mode, cum->fregno);
11987 else
11988 return NULL_RTX;
11989 }
11990 else
11991 {
11992 int n_words = rs6000_arg_size (mode, type);
11993 int gregno = cum->sysv_gregno;
11994
11995 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11996 As does any other 2 word item such as complex int due to a
11997 historical mistake. */
11998 if (n_words == 2)
11999 gregno += (1 - gregno) & 1;
12000
12001 /* Multi-reg args are not split between registers and stack. */
12002 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12003 return NULL_RTX;
12004
12005 if (TARGET_32BIT && TARGET_POWERPC64)
12006 return rs6000_mixed_function_arg (mode, type,
12007 gregno - GP_ARG_MIN_REG);
12008 return gen_rtx_REG (mode, gregno);
12009 }
12010 }
12011 else
12012 {
12013 int align_words = rs6000_parm_start (mode, type, cum->words);
12014
12015 /* _Decimal128 must be passed in an even/odd float register pair.
12016 This assumes that the register number is odd when fregno is odd. */
12017 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12018 cum->fregno++;
12019
12020 if (USE_FP_FOR_ARG_P (cum, elt_mode)
12021 && !(TARGET_AIX && !TARGET_ELF
12022 && type != NULL && AGGREGATE_TYPE_P (type)))
12023 {
12024 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12025 rtx r, off;
12026 int i, k = 0;
12027 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12028 int fpr_words;
12029
12030 /* Do we also need to pass this argument in the parameter
12031 save area? */
12032 if (type && (cum->nargs_prototype <= 0
12033 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12034 && TARGET_XL_COMPAT
12035 && align_words >= GP_ARG_NUM_REG)))
12036 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12037
12038 /* Describe where this argument goes in the fprs. */
12039 for (i = 0; i < n_elts
12040 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12041 {
12042 /* Check if the argument is split over registers and memory.
12043 This can only ever happen for long double or _Decimal128;
12044 complex types are handled via split_complex_arg. */
12045 machine_mode fmode = elt_mode;
12046 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12047 {
12048 gcc_assert (FLOAT128_2REG_P (fmode));
12049 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12050 }
12051
12052 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12053 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12054 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12055 }
12056
12057 /* If there were not enough FPRs to hold the argument, the rest
12058 usually goes into memory. However, if the current position
12059 is still within the register parameter area, a portion may
12060 actually have to go into GPRs.
12061
12062 Note that it may happen that the portion of the argument
12063 passed in the first "half" of the first GPR was already
12064 passed in the last FPR as well.
12065
12066 For unnamed arguments, we already set up GPRs to cover the
12067 whole argument in rs6000_psave_function_arg, so there is
12068 nothing further to do at this point. */
12069 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12070 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12071 && cum->nargs_prototype > 0)
12072 {
12073 static bool warned;
12074
12075 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12076 int n_words = rs6000_arg_size (mode, type);
12077
12078 align_words += fpr_words;
12079 n_words -= fpr_words;
12080
12081 do
12082 {
12083 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12084 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12085 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12086 }
12087 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12088
12089 if (!warned && warn_psabi)
12090 {
12091 warned = true;
12092 inform (input_location,
12093 "the ABI of passing homogeneous float aggregates"
12094 " has changed in GCC 5");
12095 }
12096 }
12097
12098 return rs6000_finish_function_arg (mode, rvec, k);
12099 }
12100 else if (align_words < GP_ARG_NUM_REG)
12101 {
12102 if (TARGET_32BIT && TARGET_POWERPC64)
12103 return rs6000_mixed_function_arg (mode, type, align_words);
12104
12105 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12106 }
12107 else
12108 return NULL_RTX;
12109 }
12110 }
12111 \f
12112 /* For an arg passed partly in registers and partly in memory, this is
12113 the number of bytes passed in registers. For args passed entirely in
12114 registers or entirely in memory, zero. When an arg is described by a
12115 PARALLEL, perhaps using more than one register type, this function
12116 returns the number of bytes used by the first element of the PARALLEL. */
12117
12118 static int
12119 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12120 tree type, bool named)
12121 {
12122 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12123 bool passed_in_gprs = true;
12124 int ret = 0;
12125 int align_words;
12126 machine_mode elt_mode;
12127 int n_elts;
12128
12129 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12130
12131 if (DEFAULT_ABI == ABI_V4)
12132 return 0;
12133
12134 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12135 {
12136 /* If we are passing this arg in the fixed parameter save area (gprs or
12137 memory) as well as VRs, we do not use the partial bytes mechanism;
12138 instead, rs6000_function_arg will return a PARALLEL including a memory
12139 element as necessary. Library support functions for IEEE 128-bit are
12140 assumed to not need the value passed both in GPRs and in vector
12141 registers. */
12142 if (TARGET_64BIT && !cum->prototype
12143 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12144 return 0;
12145
12146 /* Otherwise, we pass in VRs only. Check for partial copies. */
12147 passed_in_gprs = false;
12148 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12149 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12150 }
12151
12152 /* In this complicated case we just disable the partial_nregs code. */
12153 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12154 return 0;
12155
12156 align_words = rs6000_parm_start (mode, type, cum->words);
12157
12158 if (USE_FP_FOR_ARG_P (cum, elt_mode)
12159 && !(TARGET_AIX && !TARGET_ELF
12160 && type != NULL && AGGREGATE_TYPE_P (type)))
12161 {
12162 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12163
12164 /* If we are passing this arg in the fixed parameter save area
12165 (gprs or memory) as well as FPRs, we do not use the partial
12166 bytes mechanism; instead, rs6000_function_arg will return a
12167 PARALLEL including a memory element as necessary. */
12168 if (type
12169 && (cum->nargs_prototype <= 0
12170 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12171 && TARGET_XL_COMPAT
12172 && align_words >= GP_ARG_NUM_REG)))
12173 return 0;
12174
12175 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12176 passed_in_gprs = false;
12177 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12178 {
12179 /* Compute number of bytes / words passed in FPRs. If there
12180 is still space available in the register parameter area
12181 *after* that amount, a part of the argument will be passed
12182 in GPRs. In that case, the total amount passed in any
12183 registers is equal to the amount that would have been passed
12184 in GPRs if everything were passed there, so we fall back to
12185 the GPR code below to compute the appropriate value. */
12186 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12187 * MIN (8, GET_MODE_SIZE (elt_mode)));
12188 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12189
12190 if (align_words + fpr_words < GP_ARG_NUM_REG)
12191 passed_in_gprs = true;
12192 else
12193 ret = fpr;
12194 }
12195 }
12196
12197 if (passed_in_gprs
12198 && align_words < GP_ARG_NUM_REG
12199 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12200 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12201
12202 if (ret != 0 && TARGET_DEBUG_ARG)
12203 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12204
12205 return ret;
12206 }
12207 \f
12208 /* A C expression that indicates when an argument must be passed by
12209 reference. If nonzero for an argument, a copy of that argument is
12210 made in memory and a pointer to the argument is passed instead of
12211 the argument itself. The pointer is passed in whatever way is
12212 appropriate for passing a pointer to that type.
12213
12214 Under V.4, aggregates and long double are passed by reference.
12215
12216 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12217 reference unless the AltiVec vector extension ABI is in force.
12218
12219 As an extension to all ABIs, variable sized types are passed by
12220 reference. */
12221
12222 static bool
12223 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12224 machine_mode mode, const_tree type,
12225 bool named ATTRIBUTE_UNUSED)
12226 {
12227 if (!type)
12228 return 0;
12229
12230 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12231 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12232 {
12233 if (TARGET_DEBUG_ARG)
12234 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12235 return 1;
12236 }
12237
12238 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12239 {
12240 if (TARGET_DEBUG_ARG)
12241 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12242 return 1;
12243 }
12244
12245 if (int_size_in_bytes (type) < 0)
12246 {
12247 if (TARGET_DEBUG_ARG)
12248 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12249 return 1;
12250 }
12251
12252 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12253 modes only exist for GCC vector types if -maltivec. */
12254 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12255 {
12256 if (TARGET_DEBUG_ARG)
12257 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12258 return 1;
12259 }
12260
12261 /* Pass synthetic vectors in memory. */
12262 if (TREE_CODE (type) == VECTOR_TYPE
12263 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12264 {
12265 static bool warned_for_pass_big_vectors = false;
12266 if (TARGET_DEBUG_ARG)
12267 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12268 if (!warned_for_pass_big_vectors)
12269 {
12270 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12271 "non-standard ABI extension with no compatibility "
12272 "guarantee");
12273 warned_for_pass_big_vectors = true;
12274 }
12275 return 1;
12276 }
12277
12278 return 0;
12279 }
12280
12281 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12282 already processes. Return true if the parameter must be passed
12283 (fully or partially) on the stack. */
12284
12285 static bool
12286 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12287 {
12288 machine_mode mode;
12289 int unsignedp;
12290 rtx entry_parm;
12291
12292 /* Catch errors. */
12293 if (type == NULL || type == error_mark_node)
12294 return true;
12295
12296 /* Handle types with no storage requirement. */
12297 if (TYPE_MODE (type) == VOIDmode)
12298 return false;
12299
12300 /* Handle complex types. */
12301 if (TREE_CODE (type) == COMPLEX_TYPE)
12302 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12303 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12304
12305 /* Handle transparent aggregates. */
12306 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12307 && TYPE_TRANSPARENT_AGGR (type))
12308 type = TREE_TYPE (first_field (type));
12309
12310 /* See if this arg was passed by invisible reference. */
12311 if (pass_by_reference (get_cumulative_args (args_so_far),
12312 TYPE_MODE (type), type, true))
12313 type = build_pointer_type (type);
12314
12315 /* Find mode as it is passed by the ABI. */
12316 unsignedp = TYPE_UNSIGNED (type);
12317 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12318
12319 /* If we must pass in stack, we need a stack. */
12320 if (rs6000_must_pass_in_stack (mode, type))
12321 return true;
12322
12323 /* If there is no incoming register, we need a stack. */
12324 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12325 if (entry_parm == NULL)
12326 return true;
12327
12328 /* Likewise if we need to pass both in registers and on the stack. */
12329 if (GET_CODE (entry_parm) == PARALLEL
12330 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12331 return true;
12332
12333 /* Also true if we're partially in registers and partially not. */
12334 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12335 return true;
12336
12337 /* Update info on where next arg arrives in registers. */
12338 rs6000_function_arg_advance (args_so_far, mode, type, true);
12339 return false;
12340 }
12341
12342 /* Return true if FUN has no prototype, has a variable argument
12343 list, or passes any parameter in memory. */
12344
12345 static bool
12346 rs6000_function_parms_need_stack (tree fun, bool incoming)
12347 {
12348 tree fntype, result;
12349 CUMULATIVE_ARGS args_so_far_v;
12350 cumulative_args_t args_so_far;
12351
12352 if (!fun)
12353 /* Must be a libcall, all of which only use reg parms. */
12354 return false;
12355
12356 fntype = fun;
12357 if (!TYPE_P (fun))
12358 fntype = TREE_TYPE (fun);
12359
12360 /* Varargs functions need the parameter save area. */
12361 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12362 return true;
12363
12364 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12365 args_so_far = pack_cumulative_args (&args_so_far_v);
12366
12367 /* When incoming, we will have been passed the function decl.
12368 It is necessary to use the decl to handle K&R style functions,
12369 where TYPE_ARG_TYPES may not be available. */
12370 if (incoming)
12371 {
12372 gcc_assert (DECL_P (fun));
12373 result = DECL_RESULT (fun);
12374 }
12375 else
12376 result = TREE_TYPE (fntype);
12377
12378 if (result && aggregate_value_p (result, fntype))
12379 {
12380 if (!TYPE_P (result))
12381 result = TREE_TYPE (result);
12382 result = build_pointer_type (result);
12383 rs6000_parm_needs_stack (args_so_far, result);
12384 }
12385
12386 if (incoming)
12387 {
12388 tree parm;
12389
12390 for (parm = DECL_ARGUMENTS (fun);
12391 parm && parm != void_list_node;
12392 parm = TREE_CHAIN (parm))
12393 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12394 return true;
12395 }
12396 else
12397 {
12398 function_args_iterator args_iter;
12399 tree arg_type;
12400
12401 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12402 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12403 return true;
12404 }
12405
12406 return false;
12407 }
12408
12409 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12410 usually a constant depending on the ABI. However, in the ELFv2 ABI
12411 the register parameter area is optional when calling a function that
12412 has a prototype is scope, has no variable argument list, and passes
12413 all parameters in registers. */
12414
12415 int
12416 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12417 {
12418 int reg_parm_stack_space;
12419
12420 switch (DEFAULT_ABI)
12421 {
12422 default:
12423 reg_parm_stack_space = 0;
12424 break;
12425
12426 case ABI_AIX:
12427 case ABI_DARWIN:
12428 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12429 break;
12430
12431 case ABI_ELFv2:
12432 /* ??? Recomputing this every time is a bit expensive. Is there
12433 a place to cache this information? */
12434 if (rs6000_function_parms_need_stack (fun, incoming))
12435 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12436 else
12437 reg_parm_stack_space = 0;
12438 break;
12439 }
12440
12441 return reg_parm_stack_space;
12442 }
12443
12444 static void
12445 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12446 {
12447 int i;
12448 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12449
12450 if (nregs == 0)
12451 return;
12452
12453 for (i = 0; i < nregs; i++)
12454 {
12455 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12456 if (reload_completed)
12457 {
12458 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12459 tem = NULL_RTX;
12460 else
12461 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12462 i * GET_MODE_SIZE (reg_mode));
12463 }
12464 else
12465 tem = replace_equiv_address (tem, XEXP (tem, 0));
12466
12467 gcc_assert (tem);
12468
12469 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12470 }
12471 }
12472 \f
12473 /* Perform any needed actions needed for a function that is receiving a
12474 variable number of arguments.
12475
12476 CUM is as above.
12477
12478 MODE and TYPE are the mode and type of the current parameter.
12479
12480 PRETEND_SIZE is a variable that should be set to the amount of stack
12481 that must be pushed by the prolog to pretend that our caller pushed
12482 it.
12483
12484 Normally, this macro will push all remaining incoming registers on the
12485 stack and set PRETEND_SIZE to the length of the registers pushed. */
12486
12487 static void
12488 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12489 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12490 int no_rtl)
12491 {
12492 CUMULATIVE_ARGS next_cum;
12493 int reg_size = TARGET_32BIT ? 4 : 8;
12494 rtx save_area = NULL_RTX, mem;
12495 int first_reg_offset;
12496 alias_set_type set;
12497
12498 /* Skip the last named argument. */
12499 next_cum = *get_cumulative_args (cum);
12500 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12501
12502 if (DEFAULT_ABI == ABI_V4)
12503 {
12504 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12505
12506 if (! no_rtl)
12507 {
12508 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12509 HOST_WIDE_INT offset = 0;
12510
12511 /* Try to optimize the size of the varargs save area.
12512 The ABI requires that ap.reg_save_area is doubleword
12513 aligned, but we don't need to allocate space for all
12514 the bytes, only those to which we actually will save
12515 anything. */
12516 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12517 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12518 if (TARGET_HARD_FLOAT
12519 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12520 && cfun->va_list_fpr_size)
12521 {
12522 if (gpr_reg_num)
12523 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12524 * UNITS_PER_FP_WORD;
12525 if (cfun->va_list_fpr_size
12526 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12527 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12528 else
12529 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12530 * UNITS_PER_FP_WORD;
12531 }
12532 if (gpr_reg_num)
12533 {
12534 offset = -((first_reg_offset * reg_size) & ~7);
12535 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12536 {
12537 gpr_reg_num = cfun->va_list_gpr_size;
12538 if (reg_size == 4 && (first_reg_offset & 1))
12539 gpr_reg_num++;
12540 }
12541 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12542 }
12543 else if (fpr_size)
12544 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12545 * UNITS_PER_FP_WORD
12546 - (int) (GP_ARG_NUM_REG * reg_size);
12547
12548 if (gpr_size + fpr_size)
12549 {
12550 rtx reg_save_area
12551 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12552 gcc_assert (MEM_P (reg_save_area));
12553 reg_save_area = XEXP (reg_save_area, 0);
12554 if (GET_CODE (reg_save_area) == PLUS)
12555 {
12556 gcc_assert (XEXP (reg_save_area, 0)
12557 == virtual_stack_vars_rtx);
12558 gcc_assert (CONST_INT_P (XEXP (reg_save_area, 1)));
12559 offset += INTVAL (XEXP (reg_save_area, 1));
12560 }
12561 else
12562 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12563 }
12564
12565 cfun->machine->varargs_save_offset = offset;
12566 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12567 }
12568 }
12569 else
12570 {
12571 first_reg_offset = next_cum.words;
12572 save_area = crtl->args.internal_arg_pointer;
12573
12574 if (targetm.calls.must_pass_in_stack (mode, type))
12575 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12576 }
12577
12578 set = get_varargs_alias_set ();
12579 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12580 && cfun->va_list_gpr_size)
12581 {
12582 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12583
12584 if (va_list_gpr_counter_field)
12585 /* V4 va_list_gpr_size counts number of registers needed. */
12586 n_gpr = cfun->va_list_gpr_size;
12587 else
12588 /* char * va_list instead counts number of bytes needed. */
12589 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12590
12591 if (nregs > n_gpr)
12592 nregs = n_gpr;
12593
12594 mem = gen_rtx_MEM (BLKmode,
12595 plus_constant (Pmode, save_area,
12596 first_reg_offset * reg_size));
12597 MEM_NOTRAP_P (mem) = 1;
12598 set_mem_alias_set (mem, set);
12599 set_mem_align (mem, BITS_PER_WORD);
12600
12601 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12602 nregs);
12603 }
12604
12605 /* Save FP registers if needed. */
12606 if (DEFAULT_ABI == ABI_V4
12607 && TARGET_HARD_FLOAT
12608 && ! no_rtl
12609 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12610 && cfun->va_list_fpr_size)
12611 {
12612 int fregno = next_cum.fregno, nregs;
12613 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12614 rtx lab = gen_label_rtx ();
12615 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12616 * UNITS_PER_FP_WORD);
12617
12618 emit_jump_insn
12619 (gen_rtx_SET (pc_rtx,
12620 gen_rtx_IF_THEN_ELSE (VOIDmode,
12621 gen_rtx_NE (VOIDmode, cr1,
12622 const0_rtx),
12623 gen_rtx_LABEL_REF (VOIDmode, lab),
12624 pc_rtx)));
12625
12626 for (nregs = 0;
12627 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12628 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12629 {
12630 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12631 plus_constant (Pmode, save_area, off));
12632 MEM_NOTRAP_P (mem) = 1;
12633 set_mem_alias_set (mem, set);
12634 set_mem_align (mem, GET_MODE_ALIGNMENT (
12635 TARGET_HARD_FLOAT ? DFmode : SFmode));
12636 emit_move_insn (mem, gen_rtx_REG (
12637 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12638 }
12639
12640 emit_label (lab);
12641 }
12642 }
12643
12644 /* Create the va_list data type. */
12645
12646 static tree
12647 rs6000_build_builtin_va_list (void)
12648 {
12649 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12650
12651 /* For AIX, prefer 'char *' because that's what the system
12652 header files like. */
12653 if (DEFAULT_ABI != ABI_V4)
12654 return build_pointer_type (char_type_node);
12655
12656 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12657 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12658 get_identifier ("__va_list_tag"), record);
12659
12660 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12661 unsigned_char_type_node);
12662 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12663 unsigned_char_type_node);
12664 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12665 every user file. */
12666 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12667 get_identifier ("reserved"), short_unsigned_type_node);
12668 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12669 get_identifier ("overflow_arg_area"),
12670 ptr_type_node);
12671 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12672 get_identifier ("reg_save_area"),
12673 ptr_type_node);
12674
12675 va_list_gpr_counter_field = f_gpr;
12676 va_list_fpr_counter_field = f_fpr;
12677
12678 DECL_FIELD_CONTEXT (f_gpr) = record;
12679 DECL_FIELD_CONTEXT (f_fpr) = record;
12680 DECL_FIELD_CONTEXT (f_res) = record;
12681 DECL_FIELD_CONTEXT (f_ovf) = record;
12682 DECL_FIELD_CONTEXT (f_sav) = record;
12683
12684 TYPE_STUB_DECL (record) = type_decl;
12685 TYPE_NAME (record) = type_decl;
12686 TYPE_FIELDS (record) = f_gpr;
12687 DECL_CHAIN (f_gpr) = f_fpr;
12688 DECL_CHAIN (f_fpr) = f_res;
12689 DECL_CHAIN (f_res) = f_ovf;
12690 DECL_CHAIN (f_ovf) = f_sav;
12691
12692 layout_type (record);
12693
12694 /* The correct type is an array type of one element. */
12695 return build_array_type (record, build_index_type (size_zero_node));
12696 }
12697
12698 /* Implement va_start. */
12699
12700 static void
12701 rs6000_va_start (tree valist, rtx nextarg)
12702 {
12703 HOST_WIDE_INT words, n_gpr, n_fpr;
12704 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12705 tree gpr, fpr, ovf, sav, t;
12706
12707 /* Only SVR4 needs something special. */
12708 if (DEFAULT_ABI != ABI_V4)
12709 {
12710 std_expand_builtin_va_start (valist, nextarg);
12711 return;
12712 }
12713
12714 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12715 f_fpr = DECL_CHAIN (f_gpr);
12716 f_res = DECL_CHAIN (f_fpr);
12717 f_ovf = DECL_CHAIN (f_res);
12718 f_sav = DECL_CHAIN (f_ovf);
12719
12720 valist = build_simple_mem_ref (valist);
12721 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12722 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12723 f_fpr, NULL_TREE);
12724 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12725 f_ovf, NULL_TREE);
12726 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12727 f_sav, NULL_TREE);
12728
12729 /* Count number of gp and fp argument registers used. */
12730 words = crtl->args.info.words;
12731 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12732 GP_ARG_NUM_REG);
12733 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12734 FP_ARG_NUM_REG);
12735
12736 if (TARGET_DEBUG_ARG)
12737 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12738 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12739 words, n_gpr, n_fpr);
12740
12741 if (cfun->va_list_gpr_size)
12742 {
12743 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12744 build_int_cst (NULL_TREE, n_gpr));
12745 TREE_SIDE_EFFECTS (t) = 1;
12746 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12747 }
12748
12749 if (cfun->va_list_fpr_size)
12750 {
12751 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12752 build_int_cst (NULL_TREE, n_fpr));
12753 TREE_SIDE_EFFECTS (t) = 1;
12754 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12755
12756 #ifdef HAVE_AS_GNU_ATTRIBUTE
12757 if (call_ABI_of_interest (cfun->decl))
12758 rs6000_passes_float = true;
12759 #endif
12760 }
12761
12762 /* Find the overflow area. */
12763 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12764 if (words != 0)
12765 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12766 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12767 TREE_SIDE_EFFECTS (t) = 1;
12768 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12769
12770 /* If there were no va_arg invocations, don't set up the register
12771 save area. */
12772 if (!cfun->va_list_gpr_size
12773 && !cfun->va_list_fpr_size
12774 && n_gpr < GP_ARG_NUM_REG
12775 && n_fpr < FP_ARG_V4_MAX_REG)
12776 return;
12777
12778 /* Find the register save area. */
12779 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12780 if (cfun->machine->varargs_save_offset)
12781 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12782 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12783 TREE_SIDE_EFFECTS (t) = 1;
12784 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12785 }
12786
12787 /* Implement va_arg. */
12788
12789 static tree
12790 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12791 gimple_seq *post_p)
12792 {
12793 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12794 tree gpr, fpr, ovf, sav, reg, t, u;
12795 int size, rsize, n_reg, sav_ofs, sav_scale;
12796 tree lab_false, lab_over, addr;
12797 int align;
12798 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12799 int regalign = 0;
12800 gimple *stmt;
12801
12802 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12803 {
12804 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12805 return build_va_arg_indirect_ref (t);
12806 }
12807
12808 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12809 earlier version of gcc, with the property that it always applied alignment
12810 adjustments to the va-args (even for zero-sized types). The cheapest way
12811 to deal with this is to replicate the effect of the part of
12812 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12813 of relevance.
12814 We don't need to check for pass-by-reference because of the test above.
12815 We can return a simplifed answer, since we know there's no offset to add. */
12816
12817 if (((TARGET_MACHO
12818 && rs6000_darwin64_abi)
12819 || DEFAULT_ABI == ABI_ELFv2
12820 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12821 && integer_zerop (TYPE_SIZE (type)))
12822 {
12823 unsigned HOST_WIDE_INT align, boundary;
12824 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12825 align = PARM_BOUNDARY / BITS_PER_UNIT;
12826 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12827 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12828 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12829 boundary /= BITS_PER_UNIT;
12830 if (boundary > align)
12831 {
12832 tree t ;
12833 /* This updates arg ptr by the amount that would be necessary
12834 to align the zero-sized (but not zero-alignment) item. */
12835 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12836 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12837 gimplify_and_add (t, pre_p);
12838
12839 t = fold_convert (sizetype, valist_tmp);
12840 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12841 fold_convert (TREE_TYPE (valist),
12842 fold_build2 (BIT_AND_EXPR, sizetype, t,
12843 size_int (-boundary))));
12844 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12845 gimplify_and_add (t, pre_p);
12846 }
12847 /* Since it is zero-sized there's no increment for the item itself. */
12848 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12849 return build_va_arg_indirect_ref (valist_tmp);
12850 }
12851
12852 if (DEFAULT_ABI != ABI_V4)
12853 {
12854 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12855 {
12856 tree elem_type = TREE_TYPE (type);
12857 machine_mode elem_mode = TYPE_MODE (elem_type);
12858 int elem_size = GET_MODE_SIZE (elem_mode);
12859
12860 if (elem_size < UNITS_PER_WORD)
12861 {
12862 tree real_part, imag_part;
12863 gimple_seq post = NULL;
12864
12865 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12866 &post);
12867 /* Copy the value into a temporary, lest the formal temporary
12868 be reused out from under us. */
12869 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12870 gimple_seq_add_seq (pre_p, post);
12871
12872 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12873 post_p);
12874
12875 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12876 }
12877 }
12878
12879 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12880 }
12881
12882 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12883 f_fpr = DECL_CHAIN (f_gpr);
12884 f_res = DECL_CHAIN (f_fpr);
12885 f_ovf = DECL_CHAIN (f_res);
12886 f_sav = DECL_CHAIN (f_ovf);
12887
12888 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12889 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12890 f_fpr, NULL_TREE);
12891 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12892 f_ovf, NULL_TREE);
12893 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12894 f_sav, NULL_TREE);
12895
12896 size = int_size_in_bytes (type);
12897 rsize = (size + 3) / 4;
12898 int pad = 4 * rsize - size;
12899 align = 1;
12900
12901 machine_mode mode = TYPE_MODE (type);
12902 if (abi_v4_pass_in_fpr (mode, false))
12903 {
12904 /* FP args go in FP registers, if present. */
12905 reg = fpr;
12906 n_reg = (size + 7) / 8;
12907 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12908 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12909 if (mode != SFmode && mode != SDmode)
12910 align = 8;
12911 }
12912 else
12913 {
12914 /* Otherwise into GP registers. */
12915 reg = gpr;
12916 n_reg = rsize;
12917 sav_ofs = 0;
12918 sav_scale = 4;
12919 if (n_reg == 2)
12920 align = 8;
12921 }
12922
12923 /* Pull the value out of the saved registers.... */
12924
12925 lab_over = NULL;
12926 addr = create_tmp_var (ptr_type_node, "addr");
12927
12928 /* AltiVec vectors never go in registers when -mabi=altivec. */
12929 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12930 align = 16;
12931 else
12932 {
12933 lab_false = create_artificial_label (input_location);
12934 lab_over = create_artificial_label (input_location);
12935
12936 /* Long long is aligned in the registers. As are any other 2 gpr
12937 item such as complex int due to a historical mistake. */
12938 u = reg;
12939 if (n_reg == 2 && reg == gpr)
12940 {
12941 regalign = 1;
12942 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12943 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12944 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12945 unshare_expr (reg), u);
12946 }
12947 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12948 reg number is 0 for f1, so we want to make it odd. */
12949 else if (reg == fpr && mode == TDmode)
12950 {
12951 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12952 build_int_cst (TREE_TYPE (reg), 1));
12953 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12954 }
12955
12956 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12957 t = build2 (GE_EXPR, boolean_type_node, u, t);
12958 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12959 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12960 gimplify_and_add (t, pre_p);
12961
12962 t = sav;
12963 if (sav_ofs)
12964 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12965
12966 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12967 build_int_cst (TREE_TYPE (reg), n_reg));
12968 u = fold_convert (sizetype, u);
12969 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12970 t = fold_build_pointer_plus (t, u);
12971
12972 /* _Decimal32 varargs are located in the second word of the 64-bit
12973 FP register for 32-bit binaries. */
12974 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12975 t = fold_build_pointer_plus_hwi (t, size);
12976
12977 /* Args are passed right-aligned. */
12978 if (BYTES_BIG_ENDIAN)
12979 t = fold_build_pointer_plus_hwi (t, pad);
12980
12981 gimplify_assign (addr, t, pre_p);
12982
12983 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12984
12985 stmt = gimple_build_label (lab_false);
12986 gimple_seq_add_stmt (pre_p, stmt);
12987
12988 if ((n_reg == 2 && !regalign) || n_reg > 2)
12989 {
12990 /* Ensure that we don't find any more args in regs.
12991 Alignment has taken care of for special cases. */
12992 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12993 }
12994 }
12995
12996 /* ... otherwise out of the overflow area. */
12997
12998 /* Care for on-stack alignment if needed. */
12999 t = ovf;
13000 if (align != 1)
13001 {
13002 t = fold_build_pointer_plus_hwi (t, align - 1);
13003 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13004 build_int_cst (TREE_TYPE (t), -align));
13005 }
13006
13007 /* Args are passed right-aligned. */
13008 if (BYTES_BIG_ENDIAN)
13009 t = fold_build_pointer_plus_hwi (t, pad);
13010
13011 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13012
13013 gimplify_assign (unshare_expr (addr), t, pre_p);
13014
13015 t = fold_build_pointer_plus_hwi (t, size);
13016 gimplify_assign (unshare_expr (ovf), t, pre_p);
13017
13018 if (lab_over)
13019 {
13020 stmt = gimple_build_label (lab_over);
13021 gimple_seq_add_stmt (pre_p, stmt);
13022 }
13023
13024 if (STRICT_ALIGNMENT
13025 && (TYPE_ALIGN (type)
13026 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13027 {
13028 /* The value (of type complex double, for example) may not be
13029 aligned in memory in the saved registers, so copy via a
13030 temporary. (This is the same code as used for SPARC.) */
13031 tree tmp = create_tmp_var (type, "va_arg_tmp");
13032 tree dest_addr = build_fold_addr_expr (tmp);
13033
13034 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13035 3, dest_addr, addr, size_int (rsize * 4));
13036 TREE_ADDRESSABLE (tmp) = 1;
13037
13038 gimplify_and_add (copy, pre_p);
13039 addr = dest_addr;
13040 }
13041
13042 addr = fold_convert (ptrtype, addr);
13043 return build_va_arg_indirect_ref (addr);
13044 }
13045
13046 /* Builtins. */
13047
13048 static void
13049 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13050 {
13051 tree t;
13052 unsigned classify = rs6000_builtin_info[(int)code].attr;
13053 const char *attr_string = "";
13054
13055 gcc_assert (name != NULL);
13056 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13057
13058 if (rs6000_builtin_decls[(int)code])
13059 fatal_error (input_location,
13060 "internal error: builtin function %qs already processed",
13061 name);
13062
13063 rs6000_builtin_decls[(int)code] = t =
13064 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13065
13066 /* Set any special attributes. */
13067 if ((classify & RS6000_BTC_CONST) != 0)
13068 {
13069 /* const function, function only depends on the inputs. */
13070 TREE_READONLY (t) = 1;
13071 TREE_NOTHROW (t) = 1;
13072 attr_string = ", const";
13073 }
13074 else if ((classify & RS6000_BTC_PURE) != 0)
13075 {
13076 /* pure function, function can read global memory, but does not set any
13077 external state. */
13078 DECL_PURE_P (t) = 1;
13079 TREE_NOTHROW (t) = 1;
13080 attr_string = ", pure";
13081 }
13082 else if ((classify & RS6000_BTC_FP) != 0)
13083 {
13084 /* Function is a math function. If rounding mode is on, then treat the
13085 function as not reading global memory, but it can have arbitrary side
13086 effects. If it is off, then assume the function is a const function.
13087 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13088 builtin-attribute.def that is used for the math functions. */
13089 TREE_NOTHROW (t) = 1;
13090 if (flag_rounding_math)
13091 {
13092 DECL_PURE_P (t) = 1;
13093 DECL_IS_NOVOPS (t) = 1;
13094 attr_string = ", fp, pure";
13095 }
13096 else
13097 {
13098 TREE_READONLY (t) = 1;
13099 attr_string = ", fp, const";
13100 }
13101 }
13102 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13103 gcc_unreachable ();
13104
13105 if (TARGET_DEBUG_BUILTIN)
13106 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13107 (int)code, name, attr_string);
13108 }
13109
13110 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13111
13112 #undef RS6000_BUILTIN_0
13113 #undef RS6000_BUILTIN_1
13114 #undef RS6000_BUILTIN_2
13115 #undef RS6000_BUILTIN_3
13116 #undef RS6000_BUILTIN_A
13117 #undef RS6000_BUILTIN_D
13118 #undef RS6000_BUILTIN_H
13119 #undef RS6000_BUILTIN_P
13120 #undef RS6000_BUILTIN_X
13121
13122 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13123 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13124 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13125 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13126 { MASK, ICODE, NAME, ENUM },
13127
13128 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13129 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13130 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13131 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13132 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13133
13134 static const struct builtin_description bdesc_3arg[] =
13135 {
13136 #include "rs6000-builtin.def"
13137 };
13138
13139 /* DST operations: void foo (void *, const int, const char). */
13140
13141 #undef RS6000_BUILTIN_0
13142 #undef RS6000_BUILTIN_1
13143 #undef RS6000_BUILTIN_2
13144 #undef RS6000_BUILTIN_3
13145 #undef RS6000_BUILTIN_A
13146 #undef RS6000_BUILTIN_D
13147 #undef RS6000_BUILTIN_H
13148 #undef RS6000_BUILTIN_P
13149 #undef RS6000_BUILTIN_X
13150
13151 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13152 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13153 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13154 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13155 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13156 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13157 { MASK, ICODE, NAME, ENUM },
13158
13159 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13160 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13161 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13162
13163 static const struct builtin_description bdesc_dst[] =
13164 {
13165 #include "rs6000-builtin.def"
13166 };
13167
13168 /* Simple binary operations: VECc = foo (VECa, VECb). */
13169
13170 #undef RS6000_BUILTIN_0
13171 #undef RS6000_BUILTIN_1
13172 #undef RS6000_BUILTIN_2
13173 #undef RS6000_BUILTIN_3
13174 #undef RS6000_BUILTIN_A
13175 #undef RS6000_BUILTIN_D
13176 #undef RS6000_BUILTIN_H
13177 #undef RS6000_BUILTIN_P
13178 #undef RS6000_BUILTIN_X
13179
13180 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13181 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13182 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13183 { MASK, ICODE, NAME, ENUM },
13184
13185 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13186 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13187 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13188 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13189 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13190 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13191
13192 static const struct builtin_description bdesc_2arg[] =
13193 {
13194 #include "rs6000-builtin.def"
13195 };
13196
13197 #undef RS6000_BUILTIN_0
13198 #undef RS6000_BUILTIN_1
13199 #undef RS6000_BUILTIN_2
13200 #undef RS6000_BUILTIN_3
13201 #undef RS6000_BUILTIN_A
13202 #undef RS6000_BUILTIN_D
13203 #undef RS6000_BUILTIN_H
13204 #undef RS6000_BUILTIN_P
13205 #undef RS6000_BUILTIN_X
13206
13207 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13208 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13209 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13210 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13211 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13212 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13213 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13214 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13215 { MASK, ICODE, NAME, ENUM },
13216
13217 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13218
13219 /* AltiVec predicates. */
13220
13221 static const struct builtin_description bdesc_altivec_preds[] =
13222 {
13223 #include "rs6000-builtin.def"
13224 };
13225
13226 /* ABS* operations. */
13227
13228 #undef RS6000_BUILTIN_0
13229 #undef RS6000_BUILTIN_1
13230 #undef RS6000_BUILTIN_2
13231 #undef RS6000_BUILTIN_3
13232 #undef RS6000_BUILTIN_A
13233 #undef RS6000_BUILTIN_D
13234 #undef RS6000_BUILTIN_H
13235 #undef RS6000_BUILTIN_P
13236 #undef RS6000_BUILTIN_X
13237
13238 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13239 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13240 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13241 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13242 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13243 { MASK, ICODE, NAME, ENUM },
13244
13245 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13246 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13247 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13248 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13249
13250 static const struct builtin_description bdesc_abs[] =
13251 {
13252 #include "rs6000-builtin.def"
13253 };
13254
13255 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13256 foo (VECa). */
13257
13258 #undef RS6000_BUILTIN_0
13259 #undef RS6000_BUILTIN_1
13260 #undef RS6000_BUILTIN_2
13261 #undef RS6000_BUILTIN_3
13262 #undef RS6000_BUILTIN_A
13263 #undef RS6000_BUILTIN_D
13264 #undef RS6000_BUILTIN_H
13265 #undef RS6000_BUILTIN_P
13266 #undef RS6000_BUILTIN_X
13267
13268 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13269 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13270 { MASK, ICODE, NAME, ENUM },
13271
13272 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13273 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13274 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13275 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13276 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13277 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13278 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13279
13280 static const struct builtin_description bdesc_1arg[] =
13281 {
13282 #include "rs6000-builtin.def"
13283 };
13284
13285 /* Simple no-argument operations: result = __builtin_darn_32 () */
13286
13287 #undef RS6000_BUILTIN_0
13288 #undef RS6000_BUILTIN_1
13289 #undef RS6000_BUILTIN_2
13290 #undef RS6000_BUILTIN_3
13291 #undef RS6000_BUILTIN_A
13292 #undef RS6000_BUILTIN_D
13293 #undef RS6000_BUILTIN_H
13294 #undef RS6000_BUILTIN_P
13295 #undef RS6000_BUILTIN_X
13296
13297 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13298 { MASK, ICODE, NAME, ENUM },
13299
13300 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13301 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13302 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13303 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13304 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13305 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13306 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13307 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13308
13309 static const struct builtin_description bdesc_0arg[] =
13310 {
13311 #include "rs6000-builtin.def"
13312 };
13313
13314 /* HTM builtins. */
13315 #undef RS6000_BUILTIN_0
13316 #undef RS6000_BUILTIN_1
13317 #undef RS6000_BUILTIN_2
13318 #undef RS6000_BUILTIN_3
13319 #undef RS6000_BUILTIN_A
13320 #undef RS6000_BUILTIN_D
13321 #undef RS6000_BUILTIN_H
13322 #undef RS6000_BUILTIN_P
13323 #undef RS6000_BUILTIN_X
13324
13325 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13326 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13327 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13328 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13329 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13330 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13331 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13332 { MASK, ICODE, NAME, ENUM },
13333
13334 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13335 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13336
13337 static const struct builtin_description bdesc_htm[] =
13338 {
13339 #include "rs6000-builtin.def"
13340 };
13341
13342 #undef RS6000_BUILTIN_0
13343 #undef RS6000_BUILTIN_1
13344 #undef RS6000_BUILTIN_2
13345 #undef RS6000_BUILTIN_3
13346 #undef RS6000_BUILTIN_A
13347 #undef RS6000_BUILTIN_D
13348 #undef RS6000_BUILTIN_H
13349 #undef RS6000_BUILTIN_P
13350
13351 /* Return true if a builtin function is overloaded. */
13352 bool
13353 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13354 {
13355 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13356 }
13357
13358 const char *
13359 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13360 {
13361 return rs6000_builtin_info[(int)fncode].name;
13362 }
13363
13364 /* Expand an expression EXP that calls a builtin without arguments. */
13365 static rtx
13366 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13367 {
13368 rtx pat;
13369 machine_mode tmode = insn_data[icode].operand[0].mode;
13370
13371 if (icode == CODE_FOR_nothing)
13372 /* Builtin not supported on this processor. */
13373 return 0;
13374
13375 if (icode == CODE_FOR_rs6000_mffsl
13376 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13377 {
13378 error ("%<__builtin_mffsl%> not supported with %<-msoft-float%>");
13379 return const0_rtx;
13380 }
13381
13382 if (target == 0
13383 || GET_MODE (target) != tmode
13384 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13385 target = gen_reg_rtx (tmode);
13386
13387 pat = GEN_FCN (icode) (target);
13388 if (! pat)
13389 return 0;
13390 emit_insn (pat);
13391
13392 return target;
13393 }
13394
13395
13396 static rtx
13397 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13398 {
13399 rtx pat;
13400 tree arg0 = CALL_EXPR_ARG (exp, 0);
13401 tree arg1 = CALL_EXPR_ARG (exp, 1);
13402 rtx op0 = expand_normal (arg0);
13403 rtx op1 = expand_normal (arg1);
13404 machine_mode mode0 = insn_data[icode].operand[0].mode;
13405 machine_mode mode1 = insn_data[icode].operand[1].mode;
13406
13407 if (icode == CODE_FOR_nothing)
13408 /* Builtin not supported on this processor. */
13409 return 0;
13410
13411 /* If we got invalid arguments bail out before generating bad rtl. */
13412 if (arg0 == error_mark_node || arg1 == error_mark_node)
13413 return const0_rtx;
13414
13415 if (!CONST_INT_P (op0)
13416 || INTVAL (op0) > 255
13417 || INTVAL (op0) < 0)
13418 {
13419 error ("argument 1 must be an 8-bit field value");
13420 return const0_rtx;
13421 }
13422
13423 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13424 op0 = copy_to_mode_reg (mode0, op0);
13425
13426 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13427 op1 = copy_to_mode_reg (mode1, op1);
13428
13429 pat = GEN_FCN (icode) (op0, op1);
13430 if (!pat)
13431 return const0_rtx;
13432 emit_insn (pat);
13433
13434 return NULL_RTX;
13435 }
13436
13437 static rtx
13438 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13439 {
13440 rtx pat;
13441 tree arg0 = CALL_EXPR_ARG (exp, 0);
13442 rtx op0 = expand_normal (arg0);
13443
13444 if (icode == CODE_FOR_nothing)
13445 /* Builtin not supported on this processor. */
13446 return 0;
13447
13448 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13449 {
13450 error ("%<__builtin_mtfsb0%> and %<__builtin_mtfsb1%> not supported with "
13451 "%<-msoft-float%>");
13452 return const0_rtx;
13453 }
13454
13455 /* If we got invalid arguments bail out before generating bad rtl. */
13456 if (arg0 == error_mark_node)
13457 return const0_rtx;
13458
13459 /* Only allow bit numbers 0 to 31. */
13460 if (!u5bit_cint_operand (op0, VOIDmode))
13461 {
13462 error ("Argument must be a constant between 0 and 31.");
13463 return const0_rtx;
13464 }
13465
13466 pat = GEN_FCN (icode) (op0);
13467 if (!pat)
13468 return const0_rtx;
13469 emit_insn (pat);
13470
13471 return NULL_RTX;
13472 }
13473
13474 static rtx
13475 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13476 {
13477 rtx pat;
13478 tree arg0 = CALL_EXPR_ARG (exp, 0);
13479 rtx op0 = expand_normal (arg0);
13480 machine_mode mode0 = insn_data[icode].operand[0].mode;
13481
13482 if (icode == CODE_FOR_nothing)
13483 /* Builtin not supported on this processor. */
13484 return 0;
13485
13486 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13487 {
13488 error ("%<__builtin_set_fpscr_rn%> not supported with %<-msoft-float%>");
13489 return const0_rtx;
13490 }
13491
13492 /* If we got invalid arguments bail out before generating bad rtl. */
13493 if (arg0 == error_mark_node)
13494 return const0_rtx;
13495
13496 /* If the argument is a constant, check the range. Argument can only be a
13497 2-bit value. Unfortunately, can't check the range of the value at
13498 compile time if the argument is a variable. The least significant two
13499 bits of the argument, regardless of type, are used to set the rounding
13500 mode. All other bits are ignored. */
13501 if (CONST_INT_P (op0) && !const_0_to_3_operand(op0, VOIDmode))
13502 {
13503 error ("Argument must be a value between 0 and 3.");
13504 return const0_rtx;
13505 }
13506
13507 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13508 op0 = copy_to_mode_reg (mode0, op0);
13509
13510 pat = GEN_FCN (icode) (op0);
13511 if (!pat)
13512 return const0_rtx;
13513 emit_insn (pat);
13514
13515 return NULL_RTX;
13516 }
13517 static rtx
13518 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13519 {
13520 rtx pat;
13521 tree arg0 = CALL_EXPR_ARG (exp, 0);
13522 rtx op0 = expand_normal (arg0);
13523 machine_mode mode0 = insn_data[icode].operand[0].mode;
13524
13525 if (TARGET_32BIT)
13526 /* Builtin not supported in 32-bit mode. */
13527 fatal_error (input_location,
13528 "%<__builtin_set_fpscr_drn%> is not supported "
13529 "in 32-bit mode.");
13530
13531 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13532 {
13533 error ("%<__builtin_set_fpscr_drn%> not supported with %<-msoft-float%>");
13534 return const0_rtx;
13535 }
13536
13537 if (icode == CODE_FOR_nothing)
13538 /* Builtin not supported on this processor. */
13539 return 0;
13540
13541 /* If we got invalid arguments bail out before generating bad rtl. */
13542 if (arg0 == error_mark_node)
13543 return const0_rtx;
13544
13545 /* If the argument is a constant, check the range. Agrument can only be a
13546 3-bit value. Unfortunately, can't check the range of the value at
13547 compile time if the argument is a variable. The least significant two
13548 bits of the argument, regardless of type, are used to set the rounding
13549 mode. All other bits are ignored. */
13550 if (CONST_INT_P (op0) && !const_0_to_7_operand(op0, VOIDmode))
13551 {
13552 error ("Argument must be a value between 0 and 7.");
13553 return const0_rtx;
13554 }
13555
13556 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13557 op0 = copy_to_mode_reg (mode0, op0);
13558
13559 pat = GEN_FCN (icode) (op0);
13560 if (! pat)
13561 return const0_rtx;
13562 emit_insn (pat);
13563
13564 return NULL_RTX;
13565 }
13566
13567 static rtx
13568 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13569 {
13570 rtx pat;
13571 tree arg0 = CALL_EXPR_ARG (exp, 0);
13572 rtx op0 = expand_normal (arg0);
13573 machine_mode tmode = insn_data[icode].operand[0].mode;
13574 machine_mode mode0 = insn_data[icode].operand[1].mode;
13575
13576 if (icode == CODE_FOR_nothing)
13577 /* Builtin not supported on this processor. */
13578 return 0;
13579
13580 /* If we got invalid arguments bail out before generating bad rtl. */
13581 if (arg0 == error_mark_node)
13582 return const0_rtx;
13583
13584 if (icode == CODE_FOR_altivec_vspltisb
13585 || icode == CODE_FOR_altivec_vspltish
13586 || icode == CODE_FOR_altivec_vspltisw)
13587 {
13588 /* Only allow 5-bit *signed* literals. */
13589 if (!CONST_INT_P (op0)
13590 || INTVAL (op0) > 15
13591 || INTVAL (op0) < -16)
13592 {
13593 error ("argument 1 must be a 5-bit signed literal");
13594 return CONST0_RTX (tmode);
13595 }
13596 }
13597
13598 if (target == 0
13599 || GET_MODE (target) != tmode
13600 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13601 target = gen_reg_rtx (tmode);
13602
13603 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13604 op0 = copy_to_mode_reg (mode0, op0);
13605
13606 pat = GEN_FCN (icode) (target, op0);
13607 if (! pat)
13608 return 0;
13609 emit_insn (pat);
13610
13611 return target;
13612 }
13613
13614 static rtx
13615 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13616 {
13617 rtx pat, scratch1, scratch2;
13618 tree arg0 = CALL_EXPR_ARG (exp, 0);
13619 rtx op0 = expand_normal (arg0);
13620 machine_mode tmode = insn_data[icode].operand[0].mode;
13621 machine_mode mode0 = insn_data[icode].operand[1].mode;
13622
13623 /* If we have invalid arguments, bail out before generating bad rtl. */
13624 if (arg0 == error_mark_node)
13625 return const0_rtx;
13626
13627 if (target == 0
13628 || GET_MODE (target) != tmode
13629 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13630 target = gen_reg_rtx (tmode);
13631
13632 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13633 op0 = copy_to_mode_reg (mode0, op0);
13634
13635 scratch1 = gen_reg_rtx (mode0);
13636 scratch2 = gen_reg_rtx (mode0);
13637
13638 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13639 if (! pat)
13640 return 0;
13641 emit_insn (pat);
13642
13643 return target;
13644 }
13645
13646 static rtx
13647 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13648 {
13649 rtx pat;
13650 tree arg0 = CALL_EXPR_ARG (exp, 0);
13651 tree arg1 = CALL_EXPR_ARG (exp, 1);
13652 rtx op0 = expand_normal (arg0);
13653 rtx op1 = expand_normal (arg1);
13654 machine_mode tmode = insn_data[icode].operand[0].mode;
13655 machine_mode mode0 = insn_data[icode].operand[1].mode;
13656 machine_mode mode1 = insn_data[icode].operand[2].mode;
13657
13658 if (icode == CODE_FOR_nothing)
13659 /* Builtin not supported on this processor. */
13660 return 0;
13661
13662 /* If we got invalid arguments bail out before generating bad rtl. */
13663 if (arg0 == error_mark_node || arg1 == error_mark_node)
13664 return const0_rtx;
13665
13666 if (icode == CODE_FOR_unpackv1ti
13667 || icode == CODE_FOR_unpackkf
13668 || icode == CODE_FOR_unpacktf
13669 || icode == CODE_FOR_unpackif
13670 || icode == CODE_FOR_unpacktd)
13671 {
13672 /* Only allow 1-bit unsigned literals. */
13673 STRIP_NOPS (arg1);
13674 if (TREE_CODE (arg1) != INTEGER_CST
13675 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13676 {
13677 error ("argument 2 must be a 1-bit unsigned literal");
13678 return CONST0_RTX (tmode);
13679 }
13680 }
13681 else if (icode == CODE_FOR_altivec_vspltw)
13682 {
13683 /* Only allow 2-bit unsigned literals. */
13684 STRIP_NOPS (arg1);
13685 if (TREE_CODE (arg1) != INTEGER_CST
13686 || TREE_INT_CST_LOW (arg1) & ~3)
13687 {
13688 error ("argument 2 must be a 2-bit unsigned literal");
13689 return CONST0_RTX (tmode);
13690 }
13691 }
13692 else if (icode == CODE_FOR_altivec_vsplth)
13693 {
13694 /* Only allow 3-bit unsigned literals. */
13695 STRIP_NOPS (arg1);
13696 if (TREE_CODE (arg1) != INTEGER_CST
13697 || TREE_INT_CST_LOW (arg1) & ~7)
13698 {
13699 error ("argument 2 must be a 3-bit unsigned literal");
13700 return CONST0_RTX (tmode);
13701 }
13702 }
13703 else if (icode == CODE_FOR_altivec_vspltb)
13704 {
13705 /* Only allow 4-bit unsigned literals. */
13706 STRIP_NOPS (arg1);
13707 if (TREE_CODE (arg1) != INTEGER_CST
13708 || TREE_INT_CST_LOW (arg1) & ~15)
13709 {
13710 error ("argument 2 must be a 4-bit unsigned literal");
13711 return CONST0_RTX (tmode);
13712 }
13713 }
13714 else if (icode == CODE_FOR_altivec_vcfux
13715 || icode == CODE_FOR_altivec_vcfsx
13716 || icode == CODE_FOR_altivec_vctsxs
13717 || icode == CODE_FOR_altivec_vctuxs)
13718 {
13719 /* Only allow 5-bit unsigned literals. */
13720 STRIP_NOPS (arg1);
13721 if (TREE_CODE (arg1) != INTEGER_CST
13722 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13723 {
13724 error ("argument 2 must be a 5-bit unsigned literal");
13725 return CONST0_RTX (tmode);
13726 }
13727 }
13728 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13729 || icode == CODE_FOR_dfptstsfi_lt_dd
13730 || icode == CODE_FOR_dfptstsfi_gt_dd
13731 || icode == CODE_FOR_dfptstsfi_unordered_dd
13732 || icode == CODE_FOR_dfptstsfi_eq_td
13733 || icode == CODE_FOR_dfptstsfi_lt_td
13734 || icode == CODE_FOR_dfptstsfi_gt_td
13735 || icode == CODE_FOR_dfptstsfi_unordered_td)
13736 {
13737 /* Only allow 6-bit unsigned literals. */
13738 STRIP_NOPS (arg0);
13739 if (TREE_CODE (arg0) != INTEGER_CST
13740 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13741 {
13742 error ("argument 1 must be a 6-bit unsigned literal");
13743 return CONST0_RTX (tmode);
13744 }
13745 }
13746 else if (icode == CODE_FOR_xststdcqp_kf
13747 || icode == CODE_FOR_xststdcqp_tf
13748 || icode == CODE_FOR_xststdcdp
13749 || icode == CODE_FOR_xststdcsp
13750 || icode == CODE_FOR_xvtstdcdp
13751 || icode == CODE_FOR_xvtstdcsp)
13752 {
13753 /* Only allow 7-bit unsigned literals. */
13754 STRIP_NOPS (arg1);
13755 if (TREE_CODE (arg1) != INTEGER_CST
13756 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13757 {
13758 error ("argument 2 must be a 7-bit unsigned literal");
13759 return CONST0_RTX (tmode);
13760 }
13761 }
13762
13763 if (target == 0
13764 || GET_MODE (target) != tmode
13765 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13766 target = gen_reg_rtx (tmode);
13767
13768 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13769 op0 = copy_to_mode_reg (mode0, op0);
13770 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13771 op1 = copy_to_mode_reg (mode1, op1);
13772
13773 pat = GEN_FCN (icode) (target, op0, op1);
13774 if (! pat)
13775 return 0;
13776 emit_insn (pat);
13777
13778 return target;
13779 }
13780
13781 static rtx
13782 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13783 {
13784 rtx pat, scratch;
13785 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13786 tree arg0 = CALL_EXPR_ARG (exp, 1);
13787 tree arg1 = CALL_EXPR_ARG (exp, 2);
13788 rtx op0 = expand_normal (arg0);
13789 rtx op1 = expand_normal (arg1);
13790 machine_mode tmode = SImode;
13791 machine_mode mode0 = insn_data[icode].operand[1].mode;
13792 machine_mode mode1 = insn_data[icode].operand[2].mode;
13793 int cr6_form_int;
13794
13795 if (TREE_CODE (cr6_form) != INTEGER_CST)
13796 {
13797 error ("argument 1 of %qs must be a constant",
13798 "__builtin_altivec_predicate");
13799 return const0_rtx;
13800 }
13801 else
13802 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13803
13804 gcc_assert (mode0 == mode1);
13805
13806 /* If we have invalid arguments, bail out before generating bad rtl. */
13807 if (arg0 == error_mark_node || arg1 == error_mark_node)
13808 return const0_rtx;
13809
13810 if (target == 0
13811 || GET_MODE (target) != tmode
13812 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13813 target = gen_reg_rtx (tmode);
13814
13815 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13816 op0 = copy_to_mode_reg (mode0, op0);
13817 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13818 op1 = copy_to_mode_reg (mode1, op1);
13819
13820 /* Note that for many of the relevant operations (e.g. cmpne or
13821 cmpeq) with float or double operands, it makes more sense for the
13822 mode of the allocated scratch register to select a vector of
13823 integer. But the choice to copy the mode of operand 0 was made
13824 long ago and there are no plans to change it. */
13825 scratch = gen_reg_rtx (mode0);
13826
13827 pat = GEN_FCN (icode) (scratch, op0, op1);
13828 if (! pat)
13829 return 0;
13830 emit_insn (pat);
13831
13832 /* The vec_any* and vec_all* predicates use the same opcodes for two
13833 different operations, but the bits in CR6 will be different
13834 depending on what information we want. So we have to play tricks
13835 with CR6 to get the right bits out.
13836
13837 If you think this is disgusting, look at the specs for the
13838 AltiVec predicates. */
13839
13840 switch (cr6_form_int)
13841 {
13842 case 0:
13843 emit_insn (gen_cr6_test_for_zero (target));
13844 break;
13845 case 1:
13846 emit_insn (gen_cr6_test_for_zero_reverse (target));
13847 break;
13848 case 2:
13849 emit_insn (gen_cr6_test_for_lt (target));
13850 break;
13851 case 3:
13852 emit_insn (gen_cr6_test_for_lt_reverse (target));
13853 break;
13854 default:
13855 error ("argument 1 of %qs is out of range",
13856 "__builtin_altivec_predicate");
13857 break;
13858 }
13859
13860 return target;
13861 }
13862
13863 rtx
13864 swap_endian_selector_for_mode (machine_mode mode)
13865 {
13866 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13867 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13868 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13869 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13870
13871 unsigned int *swaparray, i;
13872 rtx perm[16];
13873
13874 switch (mode)
13875 {
13876 case E_V1TImode:
13877 swaparray = swap1;
13878 break;
13879 case E_V2DFmode:
13880 case E_V2DImode:
13881 swaparray = swap2;
13882 break;
13883 case E_V4SFmode:
13884 case E_V4SImode:
13885 swaparray = swap4;
13886 break;
13887 case E_V8HImode:
13888 swaparray = swap8;
13889 break;
13890 default:
13891 gcc_unreachable ();
13892 }
13893
13894 for (i = 0; i < 16; ++i)
13895 perm[i] = GEN_INT (swaparray[i]);
13896
13897 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13898 gen_rtvec_v (16, perm)));
13899 }
13900
13901 static rtx
13902 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13903 {
13904 rtx pat, addr;
13905 tree arg0 = CALL_EXPR_ARG (exp, 0);
13906 tree arg1 = CALL_EXPR_ARG (exp, 1);
13907 machine_mode tmode = insn_data[icode].operand[0].mode;
13908 machine_mode mode0 = Pmode;
13909 machine_mode mode1 = Pmode;
13910 rtx op0 = expand_normal (arg0);
13911 rtx op1 = expand_normal (arg1);
13912
13913 if (icode == CODE_FOR_nothing)
13914 /* Builtin not supported on this processor. */
13915 return 0;
13916
13917 /* If we got invalid arguments bail out before generating bad rtl. */
13918 if (arg0 == error_mark_node || arg1 == error_mark_node)
13919 return const0_rtx;
13920
13921 if (target == 0
13922 || GET_MODE (target) != tmode
13923 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13924 target = gen_reg_rtx (tmode);
13925
13926 op1 = copy_to_mode_reg (mode1, op1);
13927
13928 /* For LVX, express the RTL accurately by ANDing the address with -16.
13929 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13930 so the raw address is fine. */
13931 if (icode == CODE_FOR_altivec_lvx_v1ti
13932 || icode == CODE_FOR_altivec_lvx_v2df
13933 || icode == CODE_FOR_altivec_lvx_v2di
13934 || icode == CODE_FOR_altivec_lvx_v4sf
13935 || icode == CODE_FOR_altivec_lvx_v4si
13936 || icode == CODE_FOR_altivec_lvx_v8hi
13937 || icode == CODE_FOR_altivec_lvx_v16qi)
13938 {
13939 rtx rawaddr;
13940 if (op0 == const0_rtx)
13941 rawaddr = op1;
13942 else
13943 {
13944 op0 = copy_to_mode_reg (mode0, op0);
13945 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13946 }
13947 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13948 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13949
13950 emit_insn (gen_rtx_SET (target, addr));
13951 }
13952 else
13953 {
13954 if (op0 == const0_rtx)
13955 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13956 else
13957 {
13958 op0 = copy_to_mode_reg (mode0, op0);
13959 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13960 gen_rtx_PLUS (Pmode, op1, op0));
13961 }
13962
13963 pat = GEN_FCN (icode) (target, addr);
13964 if (! pat)
13965 return 0;
13966 emit_insn (pat);
13967 }
13968
13969 return target;
13970 }
13971
13972 static rtx
13973 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13974 {
13975 rtx pat;
13976 tree arg0 = CALL_EXPR_ARG (exp, 0);
13977 tree arg1 = CALL_EXPR_ARG (exp, 1);
13978 tree arg2 = CALL_EXPR_ARG (exp, 2);
13979 rtx op0 = expand_normal (arg0);
13980 rtx op1 = expand_normal (arg1);
13981 rtx op2 = expand_normal (arg2);
13982 machine_mode mode0 = insn_data[icode].operand[0].mode;
13983 machine_mode mode1 = insn_data[icode].operand[1].mode;
13984 machine_mode mode2 = insn_data[icode].operand[2].mode;
13985
13986 if (icode == CODE_FOR_nothing)
13987 /* Builtin not supported on this processor. */
13988 return NULL_RTX;
13989
13990 /* If we got invalid arguments bail out before generating bad rtl. */
13991 if (arg0 == error_mark_node
13992 || arg1 == error_mark_node
13993 || arg2 == error_mark_node)
13994 return NULL_RTX;
13995
13996 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13997 op0 = copy_to_mode_reg (mode0, op0);
13998 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13999 op1 = copy_to_mode_reg (mode1, op1);
14000 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14001 op2 = copy_to_mode_reg (mode2, op2);
14002
14003 pat = GEN_FCN (icode) (op0, op1, op2);
14004 if (pat)
14005 emit_insn (pat);
14006
14007 return NULL_RTX;
14008 }
14009
14010 static rtx
14011 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14012 {
14013 tree arg0 = CALL_EXPR_ARG (exp, 0);
14014 tree arg1 = CALL_EXPR_ARG (exp, 1);
14015 tree arg2 = CALL_EXPR_ARG (exp, 2);
14016 rtx op0 = expand_normal (arg0);
14017 rtx op1 = expand_normal (arg1);
14018 rtx op2 = expand_normal (arg2);
14019 rtx pat, addr, rawaddr;
14020 machine_mode tmode = insn_data[icode].operand[0].mode;
14021 machine_mode smode = insn_data[icode].operand[1].mode;
14022 machine_mode mode1 = Pmode;
14023 machine_mode mode2 = Pmode;
14024
14025 /* Invalid arguments. Bail before doing anything stoopid! */
14026 if (arg0 == error_mark_node
14027 || arg1 == error_mark_node
14028 || arg2 == error_mark_node)
14029 return const0_rtx;
14030
14031 op2 = copy_to_mode_reg (mode2, op2);
14032
14033 /* For STVX, express the RTL accurately by ANDing the address with -16.
14034 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14035 so the raw address is fine. */
14036 if (icode == CODE_FOR_altivec_stvx_v2df
14037 || icode == CODE_FOR_altivec_stvx_v2di
14038 || icode == CODE_FOR_altivec_stvx_v4sf
14039 || icode == CODE_FOR_altivec_stvx_v4si
14040 || icode == CODE_FOR_altivec_stvx_v8hi
14041 || icode == CODE_FOR_altivec_stvx_v16qi)
14042 {
14043 if (op1 == const0_rtx)
14044 rawaddr = op2;
14045 else
14046 {
14047 op1 = copy_to_mode_reg (mode1, op1);
14048 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14049 }
14050
14051 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14052 addr = gen_rtx_MEM (tmode, addr);
14053
14054 op0 = copy_to_mode_reg (tmode, op0);
14055
14056 emit_insn (gen_rtx_SET (addr, op0));
14057 }
14058 else
14059 {
14060 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14061 op0 = copy_to_mode_reg (smode, op0);
14062
14063 if (op1 == const0_rtx)
14064 addr = gen_rtx_MEM (tmode, op2);
14065 else
14066 {
14067 op1 = copy_to_mode_reg (mode1, op1);
14068 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14069 }
14070
14071 pat = GEN_FCN (icode) (addr, op0);
14072 if (pat)
14073 emit_insn (pat);
14074 }
14075
14076 return NULL_RTX;
14077 }
14078
14079 /* Return the appropriate SPR number associated with the given builtin. */
14080 static inline HOST_WIDE_INT
14081 htm_spr_num (enum rs6000_builtins code)
14082 {
14083 if (code == HTM_BUILTIN_GET_TFHAR
14084 || code == HTM_BUILTIN_SET_TFHAR)
14085 return TFHAR_SPR;
14086 else if (code == HTM_BUILTIN_GET_TFIAR
14087 || code == HTM_BUILTIN_SET_TFIAR)
14088 return TFIAR_SPR;
14089 else if (code == HTM_BUILTIN_GET_TEXASR
14090 || code == HTM_BUILTIN_SET_TEXASR)
14091 return TEXASR_SPR;
14092 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14093 || code == HTM_BUILTIN_SET_TEXASRU);
14094 return TEXASRU_SPR;
14095 }
14096
14097 /* Return the appropriate SPR regno associated with the given builtin. */
14098 static inline HOST_WIDE_INT
14099 htm_spr_regno (enum rs6000_builtins code)
14100 {
14101 if (code == HTM_BUILTIN_GET_TFHAR
14102 || code == HTM_BUILTIN_SET_TFHAR)
14103 return TFHAR_REGNO;
14104 else if (code == HTM_BUILTIN_GET_TFIAR
14105 || code == HTM_BUILTIN_SET_TFIAR)
14106 return TFIAR_REGNO;
14107 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14108 || code == HTM_BUILTIN_SET_TEXASR
14109 || code == HTM_BUILTIN_GET_TEXASRU
14110 || code == HTM_BUILTIN_SET_TEXASRU);
14111 return TEXASR_REGNO;
14112 }
14113
14114 /* Return the correct ICODE value depending on whether we are
14115 setting or reading the HTM SPRs. */
14116 static inline enum insn_code
14117 rs6000_htm_spr_icode (bool nonvoid)
14118 {
14119 if (nonvoid)
14120 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14121 else
14122 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14123 }
14124
14125 /* Expand the HTM builtin in EXP and store the result in TARGET.
14126 Store true in *EXPANDEDP if we found a builtin to expand. */
14127 static rtx
14128 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14129 {
14130 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14131 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14132 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14133 const struct builtin_description *d;
14134 size_t i;
14135
14136 *expandedp = true;
14137
14138 if (!TARGET_POWERPC64
14139 && (fcode == HTM_BUILTIN_TABORTDC
14140 || fcode == HTM_BUILTIN_TABORTDCI))
14141 {
14142 size_t uns_fcode = (size_t)fcode;
14143 const char *name = rs6000_builtin_info[uns_fcode].name;
14144 error ("builtin %qs is only valid in 64-bit mode", name);
14145 return const0_rtx;
14146 }
14147
14148 /* Expand the HTM builtins. */
14149 d = bdesc_htm;
14150 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14151 if (d->code == fcode)
14152 {
14153 rtx op[MAX_HTM_OPERANDS], pat;
14154 int nopnds = 0;
14155 tree arg;
14156 call_expr_arg_iterator iter;
14157 unsigned attr = rs6000_builtin_info[fcode].attr;
14158 enum insn_code icode = d->icode;
14159 const struct insn_operand_data *insn_op;
14160 bool uses_spr = (attr & RS6000_BTC_SPR);
14161 rtx cr = NULL_RTX;
14162
14163 if (uses_spr)
14164 icode = rs6000_htm_spr_icode (nonvoid);
14165 insn_op = &insn_data[icode].operand[0];
14166
14167 if (nonvoid)
14168 {
14169 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14170 if (!target
14171 || GET_MODE (target) != tmode
14172 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14173 target = gen_reg_rtx (tmode);
14174 if (uses_spr)
14175 op[nopnds++] = target;
14176 }
14177
14178 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14179 {
14180 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14181 return const0_rtx;
14182
14183 insn_op = &insn_data[icode].operand[nopnds];
14184
14185 op[nopnds] = expand_normal (arg);
14186
14187 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14188 {
14189 if (!strcmp (insn_op->constraint, "n"))
14190 {
14191 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14192 if (!CONST_INT_P (op[nopnds]))
14193 error ("argument %d must be an unsigned literal", arg_num);
14194 else
14195 error ("argument %d is an unsigned literal that is "
14196 "out of range", arg_num);
14197 return const0_rtx;
14198 }
14199 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14200 }
14201
14202 nopnds++;
14203 }
14204
14205 /* Handle the builtins for extended mnemonics. These accept
14206 no arguments, but map to builtins that take arguments. */
14207 switch (fcode)
14208 {
14209 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14210 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14211 op[nopnds++] = GEN_INT (1);
14212 if (flag_checking)
14213 attr |= RS6000_BTC_UNARY;
14214 break;
14215 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14216 op[nopnds++] = GEN_INT (0);
14217 if (flag_checking)
14218 attr |= RS6000_BTC_UNARY;
14219 break;
14220 default:
14221 break;
14222 }
14223
14224 /* If this builtin accesses SPRs, then pass in the appropriate
14225 SPR number and SPR regno as the last two operands. */
14226 if (uses_spr)
14227 {
14228 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14229 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14230 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14231 }
14232 /* If this builtin accesses a CR, then pass in a scratch
14233 CR as the last operand. */
14234 else if (attr & RS6000_BTC_CR)
14235 { cr = gen_reg_rtx (CCmode);
14236 op[nopnds++] = cr;
14237 }
14238
14239 if (flag_checking)
14240 {
14241 int expected_nopnds = 0;
14242 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14243 expected_nopnds = 1;
14244 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14245 expected_nopnds = 2;
14246 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14247 expected_nopnds = 3;
14248 if (!(attr & RS6000_BTC_VOID))
14249 expected_nopnds += 1;
14250 if (uses_spr)
14251 expected_nopnds += 2;
14252
14253 gcc_assert (nopnds == expected_nopnds
14254 && nopnds <= MAX_HTM_OPERANDS);
14255 }
14256
14257 switch (nopnds)
14258 {
14259 case 1:
14260 pat = GEN_FCN (icode) (op[0]);
14261 break;
14262 case 2:
14263 pat = GEN_FCN (icode) (op[0], op[1]);
14264 break;
14265 case 3:
14266 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14267 break;
14268 case 4:
14269 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14270 break;
14271 default:
14272 gcc_unreachable ();
14273 }
14274 if (!pat)
14275 return NULL_RTX;
14276 emit_insn (pat);
14277
14278 if (attr & RS6000_BTC_CR)
14279 {
14280 if (fcode == HTM_BUILTIN_TBEGIN)
14281 {
14282 /* Emit code to set TARGET to true or false depending on
14283 whether the tbegin. instruction successfully or failed
14284 to start a transaction. We do this by placing the 1's
14285 complement of CR's EQ bit into TARGET. */
14286 rtx scratch = gen_reg_rtx (SImode);
14287 emit_insn (gen_rtx_SET (scratch,
14288 gen_rtx_EQ (SImode, cr,
14289 const0_rtx)));
14290 emit_insn (gen_rtx_SET (target,
14291 gen_rtx_XOR (SImode, scratch,
14292 GEN_INT (1))));
14293 }
14294 else
14295 {
14296 /* Emit code to copy the 4-bit condition register field
14297 CR into the least significant end of register TARGET. */
14298 rtx scratch1 = gen_reg_rtx (SImode);
14299 rtx scratch2 = gen_reg_rtx (SImode);
14300 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14301 emit_insn (gen_movcc (subreg, cr));
14302 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14303 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14304 }
14305 }
14306
14307 if (nonvoid)
14308 return target;
14309 return const0_rtx;
14310 }
14311
14312 *expandedp = false;
14313 return NULL_RTX;
14314 }
14315
14316 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14317
14318 static rtx
14319 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14320 rtx target)
14321 {
14322 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14323 if (fcode == RS6000_BUILTIN_CPU_INIT)
14324 return const0_rtx;
14325
14326 if (target == 0 || GET_MODE (target) != SImode)
14327 target = gen_reg_rtx (SImode);
14328
14329 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14330 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14331 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14332 to a STRING_CST. */
14333 if (TREE_CODE (arg) == ARRAY_REF
14334 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14335 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14336 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14337 arg = TREE_OPERAND (arg, 0);
14338
14339 if (TREE_CODE (arg) != STRING_CST)
14340 {
14341 error ("builtin %qs only accepts a string argument",
14342 rs6000_builtin_info[(size_t) fcode].name);
14343 return const0_rtx;
14344 }
14345
14346 if (fcode == RS6000_BUILTIN_CPU_IS)
14347 {
14348 const char *cpu = TREE_STRING_POINTER (arg);
14349 rtx cpuid = NULL_RTX;
14350 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14351 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14352 {
14353 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14354 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14355 break;
14356 }
14357 if (cpuid == NULL_RTX)
14358 {
14359 /* Invalid CPU argument. */
14360 error ("cpu %qs is an invalid argument to builtin %qs",
14361 cpu, rs6000_builtin_info[(size_t) fcode].name);
14362 return const0_rtx;
14363 }
14364
14365 rtx platform = gen_reg_rtx (SImode);
14366 rtx tcbmem = gen_const_mem (SImode,
14367 gen_rtx_PLUS (Pmode,
14368 gen_rtx_REG (Pmode, TLS_REGNUM),
14369 GEN_INT (TCB_PLATFORM_OFFSET)));
14370 emit_move_insn (platform, tcbmem);
14371 emit_insn (gen_eqsi3 (target, platform, cpuid));
14372 }
14373 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14374 {
14375 const char *hwcap = TREE_STRING_POINTER (arg);
14376 rtx mask = NULL_RTX;
14377 int hwcap_offset;
14378 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14379 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14380 {
14381 mask = GEN_INT (cpu_supports_info[i].mask);
14382 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14383 break;
14384 }
14385 if (mask == NULL_RTX)
14386 {
14387 /* Invalid HWCAP argument. */
14388 error ("%s %qs is an invalid argument to builtin %qs",
14389 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14390 return const0_rtx;
14391 }
14392
14393 rtx tcb_hwcap = gen_reg_rtx (SImode);
14394 rtx tcbmem = gen_const_mem (SImode,
14395 gen_rtx_PLUS (Pmode,
14396 gen_rtx_REG (Pmode, TLS_REGNUM),
14397 GEN_INT (hwcap_offset)));
14398 emit_move_insn (tcb_hwcap, tcbmem);
14399 rtx scratch1 = gen_reg_rtx (SImode);
14400 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14401 rtx scratch2 = gen_reg_rtx (SImode);
14402 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14403 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14404 }
14405 else
14406 gcc_unreachable ();
14407
14408 /* Record that we have expanded a CPU builtin, so that we can later
14409 emit a reference to the special symbol exported by LIBC to ensure we
14410 do not link against an old LIBC that doesn't support this feature. */
14411 cpu_builtin_p = true;
14412
14413 #else
14414 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14415 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14416
14417 /* For old LIBCs, always return FALSE. */
14418 emit_move_insn (target, GEN_INT (0));
14419 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14420
14421 return target;
14422 }
14423
14424 static rtx
14425 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14426 {
14427 rtx pat;
14428 tree arg0 = CALL_EXPR_ARG (exp, 0);
14429 tree arg1 = CALL_EXPR_ARG (exp, 1);
14430 tree arg2 = CALL_EXPR_ARG (exp, 2);
14431 rtx op0 = expand_normal (arg0);
14432 rtx op1 = expand_normal (arg1);
14433 rtx op2 = expand_normal (arg2);
14434 machine_mode tmode = insn_data[icode].operand[0].mode;
14435 machine_mode mode0 = insn_data[icode].operand[1].mode;
14436 machine_mode mode1 = insn_data[icode].operand[2].mode;
14437 machine_mode mode2 = insn_data[icode].operand[3].mode;
14438
14439 if (icode == CODE_FOR_nothing)
14440 /* Builtin not supported on this processor. */
14441 return 0;
14442
14443 /* If we got invalid arguments bail out before generating bad rtl. */
14444 if (arg0 == error_mark_node
14445 || arg1 == error_mark_node
14446 || arg2 == error_mark_node)
14447 return const0_rtx;
14448
14449 /* Check and prepare argument depending on the instruction code.
14450
14451 Note that a switch statement instead of the sequence of tests
14452 would be incorrect as many of the CODE_FOR values could be
14453 CODE_FOR_nothing and that would yield multiple alternatives
14454 with identical values. We'd never reach here at runtime in
14455 this case. */
14456 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14457 || icode == CODE_FOR_altivec_vsldoi_v2df
14458 || icode == CODE_FOR_altivec_vsldoi_v4si
14459 || icode == CODE_FOR_altivec_vsldoi_v8hi
14460 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14461 {
14462 /* Only allow 4-bit unsigned literals. */
14463 STRIP_NOPS (arg2);
14464 if (TREE_CODE (arg2) != INTEGER_CST
14465 || TREE_INT_CST_LOW (arg2) & ~0xf)
14466 {
14467 error ("argument 3 must be a 4-bit unsigned literal");
14468 return CONST0_RTX (tmode);
14469 }
14470 }
14471 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14472 || icode == CODE_FOR_vsx_xxpermdi_v2di
14473 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14474 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14475 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14476 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14477 || icode == CODE_FOR_vsx_xxpermdi_v4si
14478 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14479 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14480 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14481 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14482 || icode == CODE_FOR_vsx_xxsldwi_v4si
14483 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14484 || icode == CODE_FOR_vsx_xxsldwi_v2di
14485 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14486 {
14487 /* Only allow 2-bit unsigned literals. */
14488 STRIP_NOPS (arg2);
14489 if (TREE_CODE (arg2) != INTEGER_CST
14490 || TREE_INT_CST_LOW (arg2) & ~0x3)
14491 {
14492 error ("argument 3 must be a 2-bit unsigned literal");
14493 return CONST0_RTX (tmode);
14494 }
14495 }
14496 else if (icode == CODE_FOR_vsx_set_v2df
14497 || icode == CODE_FOR_vsx_set_v2di
14498 || icode == CODE_FOR_bcdadd
14499 || icode == CODE_FOR_bcdadd_lt
14500 || icode == CODE_FOR_bcdadd_eq
14501 || icode == CODE_FOR_bcdadd_gt
14502 || icode == CODE_FOR_bcdsub
14503 || icode == CODE_FOR_bcdsub_lt
14504 || icode == CODE_FOR_bcdsub_eq
14505 || icode == CODE_FOR_bcdsub_gt)
14506 {
14507 /* Only allow 1-bit unsigned literals. */
14508 STRIP_NOPS (arg2);
14509 if (TREE_CODE (arg2) != INTEGER_CST
14510 || TREE_INT_CST_LOW (arg2) & ~0x1)
14511 {
14512 error ("argument 3 must be a 1-bit unsigned literal");
14513 return CONST0_RTX (tmode);
14514 }
14515 }
14516 else if (icode == CODE_FOR_dfp_ddedpd_dd
14517 || icode == CODE_FOR_dfp_ddedpd_td)
14518 {
14519 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14520 STRIP_NOPS (arg0);
14521 if (TREE_CODE (arg0) != INTEGER_CST
14522 || TREE_INT_CST_LOW (arg2) & ~0x3)
14523 {
14524 error ("argument 1 must be 0 or 2");
14525 return CONST0_RTX (tmode);
14526 }
14527 }
14528 else if (icode == CODE_FOR_dfp_denbcd_dd
14529 || icode == CODE_FOR_dfp_denbcd_td)
14530 {
14531 /* Only allow 1-bit unsigned literals. */
14532 STRIP_NOPS (arg0);
14533 if (TREE_CODE (arg0) != INTEGER_CST
14534 || TREE_INT_CST_LOW (arg0) & ~0x1)
14535 {
14536 error ("argument 1 must be a 1-bit unsigned literal");
14537 return CONST0_RTX (tmode);
14538 }
14539 }
14540 else if (icode == CODE_FOR_dfp_dscli_dd
14541 || icode == CODE_FOR_dfp_dscli_td
14542 || icode == CODE_FOR_dfp_dscri_dd
14543 || icode == CODE_FOR_dfp_dscri_td)
14544 {
14545 /* Only allow 6-bit unsigned literals. */
14546 STRIP_NOPS (arg1);
14547 if (TREE_CODE (arg1) != INTEGER_CST
14548 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14549 {
14550 error ("argument 2 must be a 6-bit unsigned literal");
14551 return CONST0_RTX (tmode);
14552 }
14553 }
14554 else if (icode == CODE_FOR_crypto_vshasigmaw
14555 || icode == CODE_FOR_crypto_vshasigmad)
14556 {
14557 /* Check whether the 2nd and 3rd arguments are integer constants and in
14558 range and prepare arguments. */
14559 STRIP_NOPS (arg1);
14560 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14561 {
14562 error ("argument 2 must be 0 or 1");
14563 return CONST0_RTX (tmode);
14564 }
14565
14566 STRIP_NOPS (arg2);
14567 if (TREE_CODE (arg2) != INTEGER_CST
14568 || wi::geu_p (wi::to_wide (arg2), 16))
14569 {
14570 error ("argument 3 must be in the range 0..15");
14571 return CONST0_RTX (tmode);
14572 }
14573 }
14574
14575 if (target == 0
14576 || GET_MODE (target) != tmode
14577 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14578 target = gen_reg_rtx (tmode);
14579
14580 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14581 op0 = copy_to_mode_reg (mode0, op0);
14582 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14583 op1 = copy_to_mode_reg (mode1, op1);
14584 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14585 op2 = copy_to_mode_reg (mode2, op2);
14586
14587 pat = GEN_FCN (icode) (target, op0, op1, op2);
14588 if (! pat)
14589 return 0;
14590 emit_insn (pat);
14591
14592 return target;
14593 }
14594
14595
14596 /* Expand the dst builtins. */
14597 static rtx
14598 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14599 bool *expandedp)
14600 {
14601 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14602 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14603 tree arg0, arg1, arg2;
14604 machine_mode mode0, mode1;
14605 rtx pat, op0, op1, op2;
14606 const struct builtin_description *d;
14607 size_t i;
14608
14609 *expandedp = false;
14610
14611 /* Handle DST variants. */
14612 d = bdesc_dst;
14613 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14614 if (d->code == fcode)
14615 {
14616 arg0 = CALL_EXPR_ARG (exp, 0);
14617 arg1 = CALL_EXPR_ARG (exp, 1);
14618 arg2 = CALL_EXPR_ARG (exp, 2);
14619 op0 = expand_normal (arg0);
14620 op1 = expand_normal (arg1);
14621 op2 = expand_normal (arg2);
14622 mode0 = insn_data[d->icode].operand[0].mode;
14623 mode1 = insn_data[d->icode].operand[1].mode;
14624
14625 /* Invalid arguments, bail out before generating bad rtl. */
14626 if (arg0 == error_mark_node
14627 || arg1 == error_mark_node
14628 || arg2 == error_mark_node)
14629 return const0_rtx;
14630
14631 *expandedp = true;
14632 STRIP_NOPS (arg2);
14633 if (TREE_CODE (arg2) != INTEGER_CST
14634 || TREE_INT_CST_LOW (arg2) & ~0x3)
14635 {
14636 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14637 return const0_rtx;
14638 }
14639
14640 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14641 op0 = copy_to_mode_reg (Pmode, op0);
14642 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14643 op1 = copy_to_mode_reg (mode1, op1);
14644
14645 pat = GEN_FCN (d->icode) (op0, op1, op2);
14646 if (pat != 0)
14647 emit_insn (pat);
14648
14649 return NULL_RTX;
14650 }
14651
14652 return NULL_RTX;
14653 }
14654
14655 /* Expand vec_init builtin. */
14656 static rtx
14657 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14658 {
14659 machine_mode tmode = TYPE_MODE (type);
14660 machine_mode inner_mode = GET_MODE_INNER (tmode);
14661 int i, n_elt = GET_MODE_NUNITS (tmode);
14662
14663 gcc_assert (VECTOR_MODE_P (tmode));
14664 gcc_assert (n_elt == call_expr_nargs (exp));
14665
14666 if (!target || !register_operand (target, tmode))
14667 target = gen_reg_rtx (tmode);
14668
14669 /* If we have a vector compromised of a single element, such as V1TImode, do
14670 the initialization directly. */
14671 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14672 {
14673 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14674 emit_move_insn (target, gen_lowpart (tmode, x));
14675 }
14676 else
14677 {
14678 rtvec v = rtvec_alloc (n_elt);
14679
14680 for (i = 0; i < n_elt; ++i)
14681 {
14682 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14683 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14684 }
14685
14686 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14687 }
14688
14689 return target;
14690 }
14691
14692 /* Return the integer constant in ARG. Constrain it to be in the range
14693 of the subparts of VEC_TYPE; issue an error if not. */
14694
14695 static int
14696 get_element_number (tree vec_type, tree arg)
14697 {
14698 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14699
14700 if (!tree_fits_uhwi_p (arg)
14701 || (elt = tree_to_uhwi (arg), elt > max))
14702 {
14703 error ("selector must be an integer constant in the range 0..%wi", max);
14704 return 0;
14705 }
14706
14707 return elt;
14708 }
14709
14710 /* Expand vec_set builtin. */
14711 static rtx
14712 altivec_expand_vec_set_builtin (tree exp)
14713 {
14714 machine_mode tmode, mode1;
14715 tree arg0, arg1, arg2;
14716 int elt;
14717 rtx op0, op1;
14718
14719 arg0 = CALL_EXPR_ARG (exp, 0);
14720 arg1 = CALL_EXPR_ARG (exp, 1);
14721 arg2 = CALL_EXPR_ARG (exp, 2);
14722
14723 tmode = TYPE_MODE (TREE_TYPE (arg0));
14724 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14725 gcc_assert (VECTOR_MODE_P (tmode));
14726
14727 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14728 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14729 elt = get_element_number (TREE_TYPE (arg0), arg2);
14730
14731 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14732 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14733
14734 op0 = force_reg (tmode, op0);
14735 op1 = force_reg (mode1, op1);
14736
14737 rs6000_expand_vector_set (op0, op1, elt);
14738
14739 return op0;
14740 }
14741
14742 /* Expand vec_ext builtin. */
14743 static rtx
14744 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14745 {
14746 machine_mode tmode, mode0;
14747 tree arg0, arg1;
14748 rtx op0;
14749 rtx op1;
14750
14751 arg0 = CALL_EXPR_ARG (exp, 0);
14752 arg1 = CALL_EXPR_ARG (exp, 1);
14753
14754 op0 = expand_normal (arg0);
14755 op1 = expand_normal (arg1);
14756
14757 if (TREE_CODE (arg1) == INTEGER_CST)
14758 {
14759 unsigned HOST_WIDE_INT elt;
14760 unsigned HOST_WIDE_INT size = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
14761 unsigned int truncated_selector;
14762 /* Even if !tree_fits_uhwi_p (arg1)), TREE_INT_CST_LOW (arg0)
14763 returns low-order bits of INTEGER_CST for modulo indexing. */
14764 elt = TREE_INT_CST_LOW (arg1);
14765 truncated_selector = elt % size;
14766 op1 = GEN_INT (truncated_selector);
14767 }
14768
14769 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14770 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14771 gcc_assert (VECTOR_MODE_P (mode0));
14772
14773 op0 = force_reg (mode0, op0);
14774
14775 if (optimize || !target || !register_operand (target, tmode))
14776 target = gen_reg_rtx (tmode);
14777
14778 rs6000_expand_vector_extract (target, op0, op1);
14779
14780 return target;
14781 }
14782
14783 /* Expand the builtin in EXP and store the result in TARGET. Store
14784 true in *EXPANDEDP if we found a builtin to expand. */
14785 static rtx
14786 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14787 {
14788 const struct builtin_description *d;
14789 size_t i;
14790 enum insn_code icode;
14791 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14792 tree arg0, arg1, arg2;
14793 rtx op0, pat;
14794 machine_mode tmode, mode0;
14795 enum rs6000_builtins fcode
14796 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14797
14798 if (rs6000_overloaded_builtin_p (fcode))
14799 {
14800 *expandedp = true;
14801 error ("unresolved overload for Altivec builtin %qF", fndecl);
14802
14803 /* Given it is invalid, just generate a normal call. */
14804 return expand_call (exp, target, false);
14805 }
14806
14807 target = altivec_expand_dst_builtin (exp, target, expandedp);
14808 if (*expandedp)
14809 return target;
14810
14811 *expandedp = true;
14812
14813 switch (fcode)
14814 {
14815 case ALTIVEC_BUILTIN_STVX_V2DF:
14816 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14817 case ALTIVEC_BUILTIN_STVX_V2DI:
14818 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14819 case ALTIVEC_BUILTIN_STVX_V4SF:
14820 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14821 case ALTIVEC_BUILTIN_STVX:
14822 case ALTIVEC_BUILTIN_STVX_V4SI:
14823 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14824 case ALTIVEC_BUILTIN_STVX_V8HI:
14825 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14826 case ALTIVEC_BUILTIN_STVX_V16QI:
14827 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14828 case ALTIVEC_BUILTIN_STVEBX:
14829 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14830 case ALTIVEC_BUILTIN_STVEHX:
14831 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14832 case ALTIVEC_BUILTIN_STVEWX:
14833 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14834 case ALTIVEC_BUILTIN_STVXL_V2DF:
14835 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14836 case ALTIVEC_BUILTIN_STVXL_V2DI:
14837 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14838 case ALTIVEC_BUILTIN_STVXL_V4SF:
14839 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14840 case ALTIVEC_BUILTIN_STVXL:
14841 case ALTIVEC_BUILTIN_STVXL_V4SI:
14842 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14843 case ALTIVEC_BUILTIN_STVXL_V8HI:
14844 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14845 case ALTIVEC_BUILTIN_STVXL_V16QI:
14846 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14847
14848 case ALTIVEC_BUILTIN_STVLX:
14849 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14850 case ALTIVEC_BUILTIN_STVLXL:
14851 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14852 case ALTIVEC_BUILTIN_STVRX:
14853 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14854 case ALTIVEC_BUILTIN_STVRXL:
14855 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14856
14857 case P9V_BUILTIN_STXVL:
14858 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14859
14860 case P9V_BUILTIN_XST_LEN_R:
14861 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14862
14863 case VSX_BUILTIN_STXVD2X_V1TI:
14864 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14865 case VSX_BUILTIN_STXVD2X_V2DF:
14866 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14867 case VSX_BUILTIN_STXVD2X_V2DI:
14868 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14869 case VSX_BUILTIN_STXVW4X_V4SF:
14870 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14871 case VSX_BUILTIN_STXVW4X_V4SI:
14872 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14873 case VSX_BUILTIN_STXVW4X_V8HI:
14874 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14875 case VSX_BUILTIN_STXVW4X_V16QI:
14876 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14877
14878 /* For the following on big endian, it's ok to use any appropriate
14879 unaligned-supporting store, so use a generic expander. For
14880 little-endian, the exact element-reversing instruction must
14881 be used. */
14882 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14883 {
14884 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14885 : CODE_FOR_vsx_st_elemrev_v1ti);
14886 return altivec_expand_stv_builtin (code, exp);
14887 }
14888 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14889 {
14890 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14891 : CODE_FOR_vsx_st_elemrev_v2df);
14892 return altivec_expand_stv_builtin (code, exp);
14893 }
14894 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14895 {
14896 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14897 : CODE_FOR_vsx_st_elemrev_v2di);
14898 return altivec_expand_stv_builtin (code, exp);
14899 }
14900 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14901 {
14902 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14903 : CODE_FOR_vsx_st_elemrev_v4sf);
14904 return altivec_expand_stv_builtin (code, exp);
14905 }
14906 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14907 {
14908 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14909 : CODE_FOR_vsx_st_elemrev_v4si);
14910 return altivec_expand_stv_builtin (code, exp);
14911 }
14912 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14913 {
14914 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14915 : CODE_FOR_vsx_st_elemrev_v8hi);
14916 return altivec_expand_stv_builtin (code, exp);
14917 }
14918 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14919 {
14920 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14921 : CODE_FOR_vsx_st_elemrev_v16qi);
14922 return altivec_expand_stv_builtin (code, exp);
14923 }
14924
14925 case ALTIVEC_BUILTIN_MFVSCR:
14926 icode = CODE_FOR_altivec_mfvscr;
14927 tmode = insn_data[icode].operand[0].mode;
14928
14929 if (target == 0
14930 || GET_MODE (target) != tmode
14931 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14932 target = gen_reg_rtx (tmode);
14933
14934 pat = GEN_FCN (icode) (target);
14935 if (! pat)
14936 return 0;
14937 emit_insn (pat);
14938 return target;
14939
14940 case ALTIVEC_BUILTIN_MTVSCR:
14941 icode = CODE_FOR_altivec_mtvscr;
14942 arg0 = CALL_EXPR_ARG (exp, 0);
14943 op0 = expand_normal (arg0);
14944 mode0 = insn_data[icode].operand[0].mode;
14945
14946 /* If we got invalid arguments bail out before generating bad rtl. */
14947 if (arg0 == error_mark_node)
14948 return const0_rtx;
14949
14950 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14951 op0 = copy_to_mode_reg (mode0, op0);
14952
14953 pat = GEN_FCN (icode) (op0);
14954 if (pat)
14955 emit_insn (pat);
14956 return NULL_RTX;
14957
14958 case ALTIVEC_BUILTIN_DSSALL:
14959 emit_insn (gen_altivec_dssall ());
14960 return NULL_RTX;
14961
14962 case ALTIVEC_BUILTIN_DSS:
14963 icode = CODE_FOR_altivec_dss;
14964 arg0 = CALL_EXPR_ARG (exp, 0);
14965 STRIP_NOPS (arg0);
14966 op0 = expand_normal (arg0);
14967 mode0 = insn_data[icode].operand[0].mode;
14968
14969 /* If we got invalid arguments bail out before generating bad rtl. */
14970 if (arg0 == error_mark_node)
14971 return const0_rtx;
14972
14973 if (TREE_CODE (arg0) != INTEGER_CST
14974 || TREE_INT_CST_LOW (arg0) & ~0x3)
14975 {
14976 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14977 return const0_rtx;
14978 }
14979
14980 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14981 op0 = copy_to_mode_reg (mode0, op0);
14982
14983 emit_insn (gen_altivec_dss (op0));
14984 return NULL_RTX;
14985
14986 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14987 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14988 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14989 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14990 case VSX_BUILTIN_VEC_INIT_V2DF:
14991 case VSX_BUILTIN_VEC_INIT_V2DI:
14992 case VSX_BUILTIN_VEC_INIT_V1TI:
14993 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14994
14995 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14996 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14997 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14998 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14999 case VSX_BUILTIN_VEC_SET_V2DF:
15000 case VSX_BUILTIN_VEC_SET_V2DI:
15001 case VSX_BUILTIN_VEC_SET_V1TI:
15002 return altivec_expand_vec_set_builtin (exp);
15003
15004 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15005 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15006 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15007 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15008 case VSX_BUILTIN_VEC_EXT_V2DF:
15009 case VSX_BUILTIN_VEC_EXT_V2DI:
15010 case VSX_BUILTIN_VEC_EXT_V1TI:
15011 return altivec_expand_vec_ext_builtin (exp, target);
15012
15013 case P9V_BUILTIN_VEC_EXTRACT4B:
15014 arg1 = CALL_EXPR_ARG (exp, 1);
15015 STRIP_NOPS (arg1);
15016
15017 /* Generate a normal call if it is invalid. */
15018 if (arg1 == error_mark_node)
15019 return expand_call (exp, target, false);
15020
15021 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15022 {
15023 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15024 return expand_call (exp, target, false);
15025 }
15026 break;
15027
15028 case P9V_BUILTIN_VEC_INSERT4B:
15029 arg2 = CALL_EXPR_ARG (exp, 2);
15030 STRIP_NOPS (arg2);
15031
15032 /* Generate a normal call if it is invalid. */
15033 if (arg2 == error_mark_node)
15034 return expand_call (exp, target, false);
15035
15036 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15037 {
15038 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15039 return expand_call (exp, target, false);
15040 }
15041 break;
15042
15043 default:
15044 break;
15045 /* Fall through. */
15046 }
15047
15048 /* Expand abs* operations. */
15049 d = bdesc_abs;
15050 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15051 if (d->code == fcode)
15052 return altivec_expand_abs_builtin (d->icode, exp, target);
15053
15054 /* Expand the AltiVec predicates. */
15055 d = bdesc_altivec_preds;
15056 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15057 if (d->code == fcode)
15058 return altivec_expand_predicate_builtin (d->icode, exp, target);
15059
15060 /* LV* are funky. We initialized them differently. */
15061 switch (fcode)
15062 {
15063 case ALTIVEC_BUILTIN_LVSL:
15064 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15065 exp, target, false);
15066 case ALTIVEC_BUILTIN_LVSR:
15067 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15068 exp, target, false);
15069 case ALTIVEC_BUILTIN_LVEBX:
15070 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15071 exp, target, false);
15072 case ALTIVEC_BUILTIN_LVEHX:
15073 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15074 exp, target, false);
15075 case ALTIVEC_BUILTIN_LVEWX:
15076 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15077 exp, target, false);
15078 case ALTIVEC_BUILTIN_LVXL_V2DF:
15079 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15080 exp, target, false);
15081 case ALTIVEC_BUILTIN_LVXL_V2DI:
15082 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15083 exp, target, false);
15084 case ALTIVEC_BUILTIN_LVXL_V4SF:
15085 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15086 exp, target, false);
15087 case ALTIVEC_BUILTIN_LVXL:
15088 case ALTIVEC_BUILTIN_LVXL_V4SI:
15089 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15090 exp, target, false);
15091 case ALTIVEC_BUILTIN_LVXL_V8HI:
15092 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15093 exp, target, false);
15094 case ALTIVEC_BUILTIN_LVXL_V16QI:
15095 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15096 exp, target, false);
15097 case ALTIVEC_BUILTIN_LVX_V1TI:
15098 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
15099 exp, target, false);
15100 case ALTIVEC_BUILTIN_LVX_V2DF:
15101 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
15102 exp, target, false);
15103 case ALTIVEC_BUILTIN_LVX_V2DI:
15104 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
15105 exp, target, false);
15106 case ALTIVEC_BUILTIN_LVX_V4SF:
15107 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
15108 exp, target, false);
15109 case ALTIVEC_BUILTIN_LVX:
15110 case ALTIVEC_BUILTIN_LVX_V4SI:
15111 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
15112 exp, target, false);
15113 case ALTIVEC_BUILTIN_LVX_V8HI:
15114 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
15115 exp, target, false);
15116 case ALTIVEC_BUILTIN_LVX_V16QI:
15117 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
15118 exp, target, false);
15119 case ALTIVEC_BUILTIN_LVLX:
15120 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15121 exp, target, true);
15122 case ALTIVEC_BUILTIN_LVLXL:
15123 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15124 exp, target, true);
15125 case ALTIVEC_BUILTIN_LVRX:
15126 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15127 exp, target, true);
15128 case ALTIVEC_BUILTIN_LVRXL:
15129 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15130 exp, target, true);
15131 case VSX_BUILTIN_LXVD2X_V1TI:
15132 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15133 exp, target, false);
15134 case VSX_BUILTIN_LXVD2X_V2DF:
15135 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15136 exp, target, false);
15137 case VSX_BUILTIN_LXVD2X_V2DI:
15138 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15139 exp, target, false);
15140 case VSX_BUILTIN_LXVW4X_V4SF:
15141 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15142 exp, target, false);
15143 case VSX_BUILTIN_LXVW4X_V4SI:
15144 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15145 exp, target, false);
15146 case VSX_BUILTIN_LXVW4X_V8HI:
15147 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15148 exp, target, false);
15149 case VSX_BUILTIN_LXVW4X_V16QI:
15150 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15151 exp, target, false);
15152 /* For the following on big endian, it's ok to use any appropriate
15153 unaligned-supporting load, so use a generic expander. For
15154 little-endian, the exact element-reversing instruction must
15155 be used. */
15156 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15157 {
15158 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15159 : CODE_FOR_vsx_ld_elemrev_v2df);
15160 return altivec_expand_lv_builtin (code, exp, target, false);
15161 }
15162 case VSX_BUILTIN_LD_ELEMREV_V1TI:
15163 {
15164 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
15165 : CODE_FOR_vsx_ld_elemrev_v1ti);
15166 return altivec_expand_lv_builtin (code, exp, target, false);
15167 }
15168 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15169 {
15170 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15171 : CODE_FOR_vsx_ld_elemrev_v2di);
15172 return altivec_expand_lv_builtin (code, exp, target, false);
15173 }
15174 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15175 {
15176 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15177 : CODE_FOR_vsx_ld_elemrev_v4sf);
15178 return altivec_expand_lv_builtin (code, exp, target, false);
15179 }
15180 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15181 {
15182 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15183 : CODE_FOR_vsx_ld_elemrev_v4si);
15184 return altivec_expand_lv_builtin (code, exp, target, false);
15185 }
15186 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15187 {
15188 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15189 : CODE_FOR_vsx_ld_elemrev_v8hi);
15190 return altivec_expand_lv_builtin (code, exp, target, false);
15191 }
15192 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15193 {
15194 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15195 : CODE_FOR_vsx_ld_elemrev_v16qi);
15196 return altivec_expand_lv_builtin (code, exp, target, false);
15197 }
15198 break;
15199 default:
15200 break;
15201 /* Fall through. */
15202 }
15203
15204 *expandedp = false;
15205 return NULL_RTX;
15206 }
15207
15208 /* Check whether a builtin function is supported in this target
15209 configuration. */
15210 bool
15211 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
15212 {
15213 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
15214 if ((fnmask & rs6000_builtin_mask) != fnmask)
15215 return false;
15216 else
15217 return true;
15218 }
15219
15220 /* Raise an error message for a builtin function that is called without the
15221 appropriate target options being set. */
15222
15223 static void
15224 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15225 {
15226 size_t uns_fncode = (size_t) fncode;
15227 const char *name = rs6000_builtin_info[uns_fncode].name;
15228 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15229
15230 gcc_assert (name != NULL);
15231 if ((fnmask & RS6000_BTM_CELL) != 0)
15232 error ("builtin function %qs is only valid for the cell processor", name);
15233 else if ((fnmask & RS6000_BTM_VSX) != 0)
15234 error ("builtin function %qs requires the %qs option", name, "-mvsx");
15235 else if ((fnmask & RS6000_BTM_HTM) != 0)
15236 error ("builtin function %qs requires the %qs option", name, "-mhtm");
15237 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15238 error ("builtin function %qs requires the %qs option", name, "-maltivec");
15239 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15240 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15241 error ("builtin function %qs requires the %qs and %qs options",
15242 name, "-mhard-dfp", "-mpower8-vector");
15243 else if ((fnmask & RS6000_BTM_DFP) != 0)
15244 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
15245 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15246 error ("builtin function %qs requires the %qs option", name,
15247 "-mpower8-vector");
15248 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15249 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15250 error ("builtin function %qs requires the %qs and %qs options",
15251 name, "-mcpu=power9", "-m64");
15252 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15253 error ("builtin function %qs requires the %qs option", name,
15254 "-mcpu=power9");
15255 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15256 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15257 error ("builtin function %qs requires the %qs and %qs options",
15258 name, "-mcpu=power9", "-m64");
15259 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15260 error ("builtin function %qs requires the %qs option", name,
15261 "-mcpu=power9");
15262 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
15263 {
15264 if (!TARGET_HARD_FLOAT)
15265 error ("builtin function %qs requires the %qs option", name,
15266 "-mhard-float");
15267 else
15268 error ("builtin function %qs requires the %qs option", name,
15269 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
15270 }
15271 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15272 error ("builtin function %qs requires the %qs option", name,
15273 "-mhard-float");
15274 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
15275 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15276 name);
15277 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15278 error ("builtin function %qs requires the %qs option", name,
15279 "%<-mfloat128%>");
15280 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15281 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15282 error ("builtin function %qs requires the %qs (or newer), and "
15283 "%qs or %qs options",
15284 name, "-mcpu=power7", "-m64", "-mpowerpc64");
15285 else
15286 error ("builtin function %qs is not supported with the current options",
15287 name);
15288 }
15289
15290 /* Target hook for early folding of built-ins, shamelessly stolen
15291 from ia64.c. */
15292
15293 static tree
15294 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
15295 int n_args ATTRIBUTE_UNUSED,
15296 tree *args ATTRIBUTE_UNUSED,
15297 bool ignore ATTRIBUTE_UNUSED)
15298 {
15299 #ifdef SUBTARGET_FOLD_BUILTIN
15300 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15301 #else
15302 return NULL_TREE;
15303 #endif
15304 }
15305
15306 /* Helper function to sort out which built-ins may be valid without having
15307 a LHS. */
15308 static bool
15309 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
15310 {
15311 switch (fn_code)
15312 {
15313 case ALTIVEC_BUILTIN_STVX_V16QI:
15314 case ALTIVEC_BUILTIN_STVX_V8HI:
15315 case ALTIVEC_BUILTIN_STVX_V4SI:
15316 case ALTIVEC_BUILTIN_STVX_V4SF:
15317 case ALTIVEC_BUILTIN_STVX_V2DI:
15318 case ALTIVEC_BUILTIN_STVX_V2DF:
15319 case VSX_BUILTIN_STXVW4X_V16QI:
15320 case VSX_BUILTIN_STXVW4X_V8HI:
15321 case VSX_BUILTIN_STXVW4X_V4SF:
15322 case VSX_BUILTIN_STXVW4X_V4SI:
15323 case VSX_BUILTIN_STXVD2X_V2DF:
15324 case VSX_BUILTIN_STXVD2X_V2DI:
15325 return true;
15326 default:
15327 return false;
15328 }
15329 }
15330
15331 /* Helper function to handle the gimple folding of a vector compare
15332 operation. This sets up true/false vectors, and uses the
15333 VEC_COND_EXPR operation.
15334 CODE indicates which comparison is to be made. (EQ, GT, ...).
15335 TYPE indicates the type of the result. */
15336 static tree
15337 fold_build_vec_cmp (tree_code code, tree type,
15338 tree arg0, tree arg1)
15339 {
15340 tree cmp_type = build_same_sized_truth_vector_type (type);
15341 tree zero_vec = build_zero_cst (type);
15342 tree minus_one_vec = build_minus_one_cst (type);
15343 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
15344 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
15345 }
15346
15347 /* Helper function to handle the in-between steps for the
15348 vector compare built-ins. */
15349 static void
15350 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15351 {
15352 tree arg0 = gimple_call_arg (stmt, 0);
15353 tree arg1 = gimple_call_arg (stmt, 1);
15354 tree lhs = gimple_call_lhs (stmt);
15355 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15356 gimple *g = gimple_build_assign (lhs, cmp);
15357 gimple_set_location (g, gimple_location (stmt));
15358 gsi_replace (gsi, g, true);
15359 }
15360
15361 /* Helper function to map V2DF and V4SF types to their
15362 integral equivalents (V2DI and V4SI). */
15363 tree map_to_integral_tree_type (tree input_tree_type)
15364 {
15365 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
15366 return input_tree_type;
15367 else
15368 {
15369 if (types_compatible_p (TREE_TYPE (input_tree_type),
15370 TREE_TYPE (V2DF_type_node)))
15371 return V2DI_type_node;
15372 else if (types_compatible_p (TREE_TYPE (input_tree_type),
15373 TREE_TYPE (V4SF_type_node)))
15374 return V4SI_type_node;
15375 else
15376 gcc_unreachable ();
15377 }
15378 }
15379
15380 /* Helper function to handle the vector merge[hl] built-ins. The
15381 implementation difference between h and l versions for this code are in
15382 the values used when building of the permute vector for high word versus
15383 low word merge. The variance is keyed off the use_high parameter. */
15384 static void
15385 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15386 {
15387 tree arg0 = gimple_call_arg (stmt, 0);
15388 tree arg1 = gimple_call_arg (stmt, 1);
15389 tree lhs = gimple_call_lhs (stmt);
15390 tree lhs_type = TREE_TYPE (lhs);
15391 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15392 int midpoint = n_elts / 2;
15393 int offset = 0;
15394
15395 if (use_high == 1)
15396 offset = midpoint;
15397
15398 /* The permute_type will match the lhs for integral types. For double and
15399 float types, the permute type needs to map to the V2 or V4 type that
15400 matches size. */
15401 tree permute_type;
15402 permute_type = map_to_integral_tree_type (lhs_type);
15403 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15404
15405 for (int i = 0; i < midpoint; i++)
15406 {
15407 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15408 offset + i));
15409 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15410 offset + n_elts + i));
15411 }
15412
15413 tree permute = elts.build ();
15414
15415 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15416 gimple_set_location (g, gimple_location (stmt));
15417 gsi_replace (gsi, g, true);
15418 }
15419
15420 /* Helper function to handle the vector merge[eo] built-ins. */
15421 static void
15422 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
15423 {
15424 tree arg0 = gimple_call_arg (stmt, 0);
15425 tree arg1 = gimple_call_arg (stmt, 1);
15426 tree lhs = gimple_call_lhs (stmt);
15427 tree lhs_type = TREE_TYPE (lhs);
15428 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15429
15430 /* The permute_type will match the lhs for integral types. For double and
15431 float types, the permute type needs to map to the V2 or V4 type that
15432 matches size. */
15433 tree permute_type;
15434 permute_type = map_to_integral_tree_type (lhs_type);
15435
15436 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15437
15438 /* Build the permute vector. */
15439 for (int i = 0; i < n_elts / 2; i++)
15440 {
15441 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15442 2*i + use_odd));
15443 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15444 2*i + use_odd + n_elts));
15445 }
15446
15447 tree permute = elts.build ();
15448
15449 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15450 gimple_set_location (g, gimple_location (stmt));
15451 gsi_replace (gsi, g, true);
15452 }
15453
15454 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15455 a constant, use rs6000_fold_builtin.) */
15456
15457 bool
15458 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15459 {
15460 gimple *stmt = gsi_stmt (*gsi);
15461 tree fndecl = gimple_call_fndecl (stmt);
15462 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15463 enum rs6000_builtins fn_code
15464 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15465 tree arg0, arg1, lhs, temp;
15466 enum tree_code bcode;
15467 gimple *g;
15468
15469 size_t uns_fncode = (size_t) fn_code;
15470 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15471 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15472 const char *fn_name2 = (icode != CODE_FOR_nothing)
15473 ? get_insn_name ((int) icode)
15474 : "nothing";
15475
15476 if (TARGET_DEBUG_BUILTIN)
15477 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15478 fn_code, fn_name1, fn_name2);
15479
15480 if (!rs6000_fold_gimple)
15481 return false;
15482
15483 /* Prevent gimple folding for code that does not have a LHS, unless it is
15484 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15485 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15486 return false;
15487
15488 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15489 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15490 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15491 if (!func_valid_p)
15492 return false;
15493
15494 switch (fn_code)
15495 {
15496 /* Flavors of vec_add. We deliberately don't expand
15497 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15498 TImode, resulting in much poorer code generation. */
15499 case ALTIVEC_BUILTIN_VADDUBM:
15500 case ALTIVEC_BUILTIN_VADDUHM:
15501 case ALTIVEC_BUILTIN_VADDUWM:
15502 case P8V_BUILTIN_VADDUDM:
15503 case ALTIVEC_BUILTIN_VADDFP:
15504 case VSX_BUILTIN_XVADDDP:
15505 bcode = PLUS_EXPR;
15506 do_binary:
15507 arg0 = gimple_call_arg (stmt, 0);
15508 arg1 = gimple_call_arg (stmt, 1);
15509 lhs = gimple_call_lhs (stmt);
15510 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15511 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15512 {
15513 /* Ensure the binary operation is performed in a type
15514 that wraps if it is integral type. */
15515 gimple_seq stmts = NULL;
15516 tree type = unsigned_type_for (TREE_TYPE (lhs));
15517 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15518 type, arg0);
15519 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15520 type, arg1);
15521 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15522 type, uarg0, uarg1);
15523 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15524 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15525 build1 (VIEW_CONVERT_EXPR,
15526 TREE_TYPE (lhs), res));
15527 gsi_replace (gsi, g, true);
15528 return true;
15529 }
15530 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15531 gimple_set_location (g, gimple_location (stmt));
15532 gsi_replace (gsi, g, true);
15533 return true;
15534 /* Flavors of vec_sub. We deliberately don't expand
15535 P8V_BUILTIN_VSUBUQM. */
15536 case ALTIVEC_BUILTIN_VSUBUBM:
15537 case ALTIVEC_BUILTIN_VSUBUHM:
15538 case ALTIVEC_BUILTIN_VSUBUWM:
15539 case P8V_BUILTIN_VSUBUDM:
15540 case ALTIVEC_BUILTIN_VSUBFP:
15541 case VSX_BUILTIN_XVSUBDP:
15542 bcode = MINUS_EXPR;
15543 goto do_binary;
15544 case VSX_BUILTIN_XVMULSP:
15545 case VSX_BUILTIN_XVMULDP:
15546 arg0 = gimple_call_arg (stmt, 0);
15547 arg1 = gimple_call_arg (stmt, 1);
15548 lhs = gimple_call_lhs (stmt);
15549 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15550 gimple_set_location (g, gimple_location (stmt));
15551 gsi_replace (gsi, g, true);
15552 return true;
15553 /* Even element flavors of vec_mul (signed). */
15554 case ALTIVEC_BUILTIN_VMULESB:
15555 case ALTIVEC_BUILTIN_VMULESH:
15556 case P8V_BUILTIN_VMULESW:
15557 /* Even element flavors of vec_mul (unsigned). */
15558 case ALTIVEC_BUILTIN_VMULEUB:
15559 case ALTIVEC_BUILTIN_VMULEUH:
15560 case P8V_BUILTIN_VMULEUW:
15561 arg0 = gimple_call_arg (stmt, 0);
15562 arg1 = gimple_call_arg (stmt, 1);
15563 lhs = gimple_call_lhs (stmt);
15564 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15565 gimple_set_location (g, gimple_location (stmt));
15566 gsi_replace (gsi, g, true);
15567 return true;
15568 /* Odd element flavors of vec_mul (signed). */
15569 case ALTIVEC_BUILTIN_VMULOSB:
15570 case ALTIVEC_BUILTIN_VMULOSH:
15571 case P8V_BUILTIN_VMULOSW:
15572 /* Odd element flavors of vec_mul (unsigned). */
15573 case ALTIVEC_BUILTIN_VMULOUB:
15574 case ALTIVEC_BUILTIN_VMULOUH:
15575 case P8V_BUILTIN_VMULOUW:
15576 arg0 = gimple_call_arg (stmt, 0);
15577 arg1 = gimple_call_arg (stmt, 1);
15578 lhs = gimple_call_lhs (stmt);
15579 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15580 gimple_set_location (g, gimple_location (stmt));
15581 gsi_replace (gsi, g, true);
15582 return true;
15583 /* Flavors of vec_div (Integer). */
15584 case VSX_BUILTIN_DIV_V2DI:
15585 case VSX_BUILTIN_UDIV_V2DI:
15586 arg0 = gimple_call_arg (stmt, 0);
15587 arg1 = gimple_call_arg (stmt, 1);
15588 lhs = gimple_call_lhs (stmt);
15589 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15590 gimple_set_location (g, gimple_location (stmt));
15591 gsi_replace (gsi, g, true);
15592 return true;
15593 /* Flavors of vec_div (Float). */
15594 case VSX_BUILTIN_XVDIVSP:
15595 case VSX_BUILTIN_XVDIVDP:
15596 arg0 = gimple_call_arg (stmt, 0);
15597 arg1 = gimple_call_arg (stmt, 1);
15598 lhs = gimple_call_lhs (stmt);
15599 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15600 gimple_set_location (g, gimple_location (stmt));
15601 gsi_replace (gsi, g, true);
15602 return true;
15603 /* Flavors of vec_and. */
15604 case ALTIVEC_BUILTIN_VAND:
15605 arg0 = gimple_call_arg (stmt, 0);
15606 arg1 = gimple_call_arg (stmt, 1);
15607 lhs = gimple_call_lhs (stmt);
15608 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15609 gimple_set_location (g, gimple_location (stmt));
15610 gsi_replace (gsi, g, true);
15611 return true;
15612 /* Flavors of vec_andc. */
15613 case ALTIVEC_BUILTIN_VANDC:
15614 arg0 = gimple_call_arg (stmt, 0);
15615 arg1 = gimple_call_arg (stmt, 1);
15616 lhs = gimple_call_lhs (stmt);
15617 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15618 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15619 gimple_set_location (g, gimple_location (stmt));
15620 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15621 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15622 gimple_set_location (g, gimple_location (stmt));
15623 gsi_replace (gsi, g, true);
15624 return true;
15625 /* Flavors of vec_nand. */
15626 case P8V_BUILTIN_VEC_NAND:
15627 case P8V_BUILTIN_NAND_V16QI:
15628 case P8V_BUILTIN_NAND_V8HI:
15629 case P8V_BUILTIN_NAND_V4SI:
15630 case P8V_BUILTIN_NAND_V4SF:
15631 case P8V_BUILTIN_NAND_V2DF:
15632 case P8V_BUILTIN_NAND_V2DI:
15633 arg0 = gimple_call_arg (stmt, 0);
15634 arg1 = gimple_call_arg (stmt, 1);
15635 lhs = gimple_call_lhs (stmt);
15636 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15637 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15638 gimple_set_location (g, gimple_location (stmt));
15639 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15640 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15641 gimple_set_location (g, gimple_location (stmt));
15642 gsi_replace (gsi, g, true);
15643 return true;
15644 /* Flavors of vec_or. */
15645 case ALTIVEC_BUILTIN_VOR:
15646 arg0 = gimple_call_arg (stmt, 0);
15647 arg1 = gimple_call_arg (stmt, 1);
15648 lhs = gimple_call_lhs (stmt);
15649 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15650 gimple_set_location (g, gimple_location (stmt));
15651 gsi_replace (gsi, g, true);
15652 return true;
15653 /* flavors of vec_orc. */
15654 case P8V_BUILTIN_ORC_V16QI:
15655 case P8V_BUILTIN_ORC_V8HI:
15656 case P8V_BUILTIN_ORC_V4SI:
15657 case P8V_BUILTIN_ORC_V4SF:
15658 case P8V_BUILTIN_ORC_V2DF:
15659 case P8V_BUILTIN_ORC_V2DI:
15660 arg0 = gimple_call_arg (stmt, 0);
15661 arg1 = gimple_call_arg (stmt, 1);
15662 lhs = gimple_call_lhs (stmt);
15663 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15664 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15665 gimple_set_location (g, gimple_location (stmt));
15666 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15667 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15668 gimple_set_location (g, gimple_location (stmt));
15669 gsi_replace (gsi, g, true);
15670 return true;
15671 /* Flavors of vec_xor. */
15672 case ALTIVEC_BUILTIN_VXOR:
15673 arg0 = gimple_call_arg (stmt, 0);
15674 arg1 = gimple_call_arg (stmt, 1);
15675 lhs = gimple_call_lhs (stmt);
15676 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15677 gimple_set_location (g, gimple_location (stmt));
15678 gsi_replace (gsi, g, true);
15679 return true;
15680 /* Flavors of vec_nor. */
15681 case ALTIVEC_BUILTIN_VNOR:
15682 arg0 = gimple_call_arg (stmt, 0);
15683 arg1 = gimple_call_arg (stmt, 1);
15684 lhs = gimple_call_lhs (stmt);
15685 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15686 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15687 gimple_set_location (g, gimple_location (stmt));
15688 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15689 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15690 gimple_set_location (g, gimple_location (stmt));
15691 gsi_replace (gsi, g, true);
15692 return true;
15693 /* flavors of vec_abs. */
15694 case ALTIVEC_BUILTIN_ABS_V16QI:
15695 case ALTIVEC_BUILTIN_ABS_V8HI:
15696 case ALTIVEC_BUILTIN_ABS_V4SI:
15697 case ALTIVEC_BUILTIN_ABS_V4SF:
15698 case P8V_BUILTIN_ABS_V2DI:
15699 case VSX_BUILTIN_XVABSDP:
15700 arg0 = gimple_call_arg (stmt, 0);
15701 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15702 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15703 return false;
15704 lhs = gimple_call_lhs (stmt);
15705 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15706 gimple_set_location (g, gimple_location (stmt));
15707 gsi_replace (gsi, g, true);
15708 return true;
15709 /* flavors of vec_min. */
15710 case VSX_BUILTIN_XVMINDP:
15711 case P8V_BUILTIN_VMINSD:
15712 case P8V_BUILTIN_VMINUD:
15713 case ALTIVEC_BUILTIN_VMINSB:
15714 case ALTIVEC_BUILTIN_VMINSH:
15715 case ALTIVEC_BUILTIN_VMINSW:
15716 case ALTIVEC_BUILTIN_VMINUB:
15717 case ALTIVEC_BUILTIN_VMINUH:
15718 case ALTIVEC_BUILTIN_VMINUW:
15719 case ALTIVEC_BUILTIN_VMINFP:
15720 arg0 = gimple_call_arg (stmt, 0);
15721 arg1 = gimple_call_arg (stmt, 1);
15722 lhs = gimple_call_lhs (stmt);
15723 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15724 gimple_set_location (g, gimple_location (stmt));
15725 gsi_replace (gsi, g, true);
15726 return true;
15727 /* flavors of vec_max. */
15728 case VSX_BUILTIN_XVMAXDP:
15729 case P8V_BUILTIN_VMAXSD:
15730 case P8V_BUILTIN_VMAXUD:
15731 case ALTIVEC_BUILTIN_VMAXSB:
15732 case ALTIVEC_BUILTIN_VMAXSH:
15733 case ALTIVEC_BUILTIN_VMAXSW:
15734 case ALTIVEC_BUILTIN_VMAXUB:
15735 case ALTIVEC_BUILTIN_VMAXUH:
15736 case ALTIVEC_BUILTIN_VMAXUW:
15737 case ALTIVEC_BUILTIN_VMAXFP:
15738 arg0 = gimple_call_arg (stmt, 0);
15739 arg1 = gimple_call_arg (stmt, 1);
15740 lhs = gimple_call_lhs (stmt);
15741 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15742 gimple_set_location (g, gimple_location (stmt));
15743 gsi_replace (gsi, g, true);
15744 return true;
15745 /* Flavors of vec_eqv. */
15746 case P8V_BUILTIN_EQV_V16QI:
15747 case P8V_BUILTIN_EQV_V8HI:
15748 case P8V_BUILTIN_EQV_V4SI:
15749 case P8V_BUILTIN_EQV_V4SF:
15750 case P8V_BUILTIN_EQV_V2DF:
15751 case P8V_BUILTIN_EQV_V2DI:
15752 arg0 = gimple_call_arg (stmt, 0);
15753 arg1 = gimple_call_arg (stmt, 1);
15754 lhs = gimple_call_lhs (stmt);
15755 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15756 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15757 gimple_set_location (g, gimple_location (stmt));
15758 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15759 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15760 gimple_set_location (g, gimple_location (stmt));
15761 gsi_replace (gsi, g, true);
15762 return true;
15763 /* Flavors of vec_rotate_left. */
15764 case ALTIVEC_BUILTIN_VRLB:
15765 case ALTIVEC_BUILTIN_VRLH:
15766 case ALTIVEC_BUILTIN_VRLW:
15767 case P8V_BUILTIN_VRLD:
15768 arg0 = gimple_call_arg (stmt, 0);
15769 arg1 = gimple_call_arg (stmt, 1);
15770 lhs = gimple_call_lhs (stmt);
15771 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15772 gimple_set_location (g, gimple_location (stmt));
15773 gsi_replace (gsi, g, true);
15774 return true;
15775 /* Flavors of vector shift right algebraic.
15776 vec_sra{b,h,w} -> vsra{b,h,w}. */
15777 case ALTIVEC_BUILTIN_VSRAB:
15778 case ALTIVEC_BUILTIN_VSRAH:
15779 case ALTIVEC_BUILTIN_VSRAW:
15780 case P8V_BUILTIN_VSRAD:
15781 {
15782 arg0 = gimple_call_arg (stmt, 0);
15783 arg1 = gimple_call_arg (stmt, 1);
15784 lhs = gimple_call_lhs (stmt);
15785 tree arg1_type = TREE_TYPE (arg1);
15786 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15787 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15788 location_t loc = gimple_location (stmt);
15789 /* Force arg1 into the range valid matching the arg0 type. */
15790 /* Build a vector consisting of the max valid bit-size values. */
15791 int n_elts = VECTOR_CST_NELTS (arg1);
15792 tree element_size = build_int_cst (unsigned_element_type,
15793 128 / n_elts);
15794 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15795 for (int i = 0; i < n_elts; i++)
15796 elts.safe_push (element_size);
15797 tree modulo_tree = elts.build ();
15798 /* Modulo the provided shift value against that vector. */
15799 gimple_seq stmts = NULL;
15800 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15801 unsigned_arg1_type, arg1);
15802 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15803 unsigned_arg1_type, unsigned_arg1,
15804 modulo_tree);
15805 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15806 /* And finally, do the shift. */
15807 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, new_arg1);
15808 gimple_set_location (g, loc);
15809 gsi_replace (gsi, g, true);
15810 return true;
15811 }
15812 /* Flavors of vector shift left.
15813 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15814 case ALTIVEC_BUILTIN_VSLB:
15815 case ALTIVEC_BUILTIN_VSLH:
15816 case ALTIVEC_BUILTIN_VSLW:
15817 case P8V_BUILTIN_VSLD:
15818 {
15819 location_t loc;
15820 gimple_seq stmts = NULL;
15821 arg0 = gimple_call_arg (stmt, 0);
15822 tree arg0_type = TREE_TYPE (arg0);
15823 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15824 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15825 return false;
15826 arg1 = gimple_call_arg (stmt, 1);
15827 tree arg1_type = TREE_TYPE (arg1);
15828 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15829 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15830 loc = gimple_location (stmt);
15831 lhs = gimple_call_lhs (stmt);
15832 /* Force arg1 into the range valid matching the arg0 type. */
15833 /* Build a vector consisting of the max valid bit-size values. */
15834 int n_elts = VECTOR_CST_NELTS (arg1);
15835 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15836 * BITS_PER_UNIT;
15837 tree element_size = build_int_cst (unsigned_element_type,
15838 tree_size_in_bits / n_elts);
15839 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15840 for (int i = 0; i < n_elts; i++)
15841 elts.safe_push (element_size);
15842 tree modulo_tree = elts.build ();
15843 /* Modulo the provided shift value against that vector. */
15844 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15845 unsigned_arg1_type, arg1);
15846 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15847 unsigned_arg1_type, unsigned_arg1,
15848 modulo_tree);
15849 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15850 /* And finally, do the shift. */
15851 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15852 gimple_set_location (g, gimple_location (stmt));
15853 gsi_replace (gsi, g, true);
15854 return true;
15855 }
15856 /* Flavors of vector shift right. */
15857 case ALTIVEC_BUILTIN_VSRB:
15858 case ALTIVEC_BUILTIN_VSRH:
15859 case ALTIVEC_BUILTIN_VSRW:
15860 case P8V_BUILTIN_VSRD:
15861 {
15862 arg0 = gimple_call_arg (stmt, 0);
15863 arg1 = gimple_call_arg (stmt, 1);
15864 lhs = gimple_call_lhs (stmt);
15865 tree arg1_type = TREE_TYPE (arg1);
15866 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15867 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15868 location_t loc = gimple_location (stmt);
15869 gimple_seq stmts = NULL;
15870 /* Convert arg0 to unsigned. */
15871 tree arg0_unsigned
15872 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15873 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15874 /* Force arg1 into the range valid matching the arg0 type. */
15875 /* Build a vector consisting of the max valid bit-size values. */
15876 int n_elts = VECTOR_CST_NELTS (arg1);
15877 tree element_size = build_int_cst (unsigned_element_type,
15878 128 / n_elts);
15879 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15880 for (int i = 0; i < n_elts; i++)
15881 elts.safe_push (element_size);
15882 tree modulo_tree = elts.build ();
15883 /* Modulo the provided shift value against that vector. */
15884 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15885 unsigned_arg1_type, arg1);
15886 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15887 unsigned_arg1_type, unsigned_arg1,
15888 modulo_tree);
15889 /* Do the shift. */
15890 tree res
15891 = gimple_build (&stmts, RSHIFT_EXPR,
15892 TREE_TYPE (arg0_unsigned), arg0_unsigned, new_arg1);
15893 /* Convert result back to the lhs type. */
15894 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15895 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15896 update_call_from_tree (gsi, res);
15897 return true;
15898 }
15899 /* Vector loads. */
15900 case ALTIVEC_BUILTIN_LVX_V16QI:
15901 case ALTIVEC_BUILTIN_LVX_V8HI:
15902 case ALTIVEC_BUILTIN_LVX_V4SI:
15903 case ALTIVEC_BUILTIN_LVX_V4SF:
15904 case ALTIVEC_BUILTIN_LVX_V2DI:
15905 case ALTIVEC_BUILTIN_LVX_V2DF:
15906 case ALTIVEC_BUILTIN_LVX_V1TI:
15907 {
15908 arg0 = gimple_call_arg (stmt, 0); // offset
15909 arg1 = gimple_call_arg (stmt, 1); // address
15910 lhs = gimple_call_lhs (stmt);
15911 location_t loc = gimple_location (stmt);
15912 /* Since arg1 may be cast to a different type, just use ptr_type_node
15913 here instead of trying to enforce TBAA on pointer types. */
15914 tree arg1_type = ptr_type_node;
15915 tree lhs_type = TREE_TYPE (lhs);
15916 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15917 the tree using the value from arg0. The resulting type will match
15918 the type of arg1. */
15919 gimple_seq stmts = NULL;
15920 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15921 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15922 arg1_type, arg1, temp_offset);
15923 /* Mask off any lower bits from the address. */
15924 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15925 arg1_type, temp_addr,
15926 build_int_cst (arg1_type, -16));
15927 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15928 if (!is_gimple_mem_ref_addr (aligned_addr))
15929 {
15930 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15931 gimple *g = gimple_build_assign (t, aligned_addr);
15932 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15933 aligned_addr = t;
15934 }
15935 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15936 take an offset, but since we've already incorporated the offset
15937 above, here we just pass in a zero. */
15938 gimple *g
15939 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15940 build_int_cst (arg1_type, 0)));
15941 gimple_set_location (g, loc);
15942 gsi_replace (gsi, g, true);
15943 return true;
15944 }
15945 /* Vector stores. */
15946 case ALTIVEC_BUILTIN_STVX_V16QI:
15947 case ALTIVEC_BUILTIN_STVX_V8HI:
15948 case ALTIVEC_BUILTIN_STVX_V4SI:
15949 case ALTIVEC_BUILTIN_STVX_V4SF:
15950 case ALTIVEC_BUILTIN_STVX_V2DI:
15951 case ALTIVEC_BUILTIN_STVX_V2DF:
15952 {
15953 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15954 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15955 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15956 location_t loc = gimple_location (stmt);
15957 tree arg0_type = TREE_TYPE (arg0);
15958 /* Use ptr_type_node (no TBAA) for the arg2_type.
15959 FIXME: (Richard) "A proper fix would be to transition this type as
15960 seen from the frontend to GIMPLE, for example in a similar way we
15961 do for MEM_REFs by piggy-backing that on an extra argument, a
15962 constant zero pointer of the alias pointer type to use (which would
15963 also serve as a type indicator of the store itself). I'd use a
15964 target specific internal function for this (not sure if we can have
15965 those target specific, but I guess if it's folded away then that's
15966 fine) and get away with the overload set." */
15967 tree arg2_type = ptr_type_node;
15968 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15969 the tree using the value from arg0. The resulting type will match
15970 the type of arg2. */
15971 gimple_seq stmts = NULL;
15972 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15973 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15974 arg2_type, arg2, temp_offset);
15975 /* Mask off any lower bits from the address. */
15976 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15977 arg2_type, temp_addr,
15978 build_int_cst (arg2_type, -16));
15979 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15980 if (!is_gimple_mem_ref_addr (aligned_addr))
15981 {
15982 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15983 gimple *g = gimple_build_assign (t, aligned_addr);
15984 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15985 aligned_addr = t;
15986 }
15987 /* The desired gimple result should be similar to:
15988 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15989 gimple *g
15990 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15991 build_int_cst (arg2_type, 0)), arg0);
15992 gimple_set_location (g, loc);
15993 gsi_replace (gsi, g, true);
15994 return true;
15995 }
15996
15997 /* unaligned Vector loads. */
15998 case VSX_BUILTIN_LXVW4X_V16QI:
15999 case VSX_BUILTIN_LXVW4X_V8HI:
16000 case VSX_BUILTIN_LXVW4X_V4SF:
16001 case VSX_BUILTIN_LXVW4X_V4SI:
16002 case VSX_BUILTIN_LXVD2X_V2DF:
16003 case VSX_BUILTIN_LXVD2X_V2DI:
16004 {
16005 arg0 = gimple_call_arg (stmt, 0); // offset
16006 arg1 = gimple_call_arg (stmt, 1); // address
16007 lhs = gimple_call_lhs (stmt);
16008 location_t loc = gimple_location (stmt);
16009 /* Since arg1 may be cast to a different type, just use ptr_type_node
16010 here instead of trying to enforce TBAA on pointer types. */
16011 tree arg1_type = ptr_type_node;
16012 tree lhs_type = TREE_TYPE (lhs);
16013 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
16014 required alignment (power) is 4 bytes regardless of data type. */
16015 tree align_ltype = build_aligned_type (lhs_type, 4);
16016 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16017 the tree using the value from arg0. The resulting type will match
16018 the type of arg1. */
16019 gimple_seq stmts = NULL;
16020 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
16021 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16022 arg1_type, arg1, temp_offset);
16023 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16024 if (!is_gimple_mem_ref_addr (temp_addr))
16025 {
16026 tree t = make_ssa_name (TREE_TYPE (temp_addr));
16027 gimple *g = gimple_build_assign (t, temp_addr);
16028 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16029 temp_addr = t;
16030 }
16031 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
16032 take an offset, but since we've already incorporated the offset
16033 above, here we just pass in a zero. */
16034 gimple *g;
16035 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
16036 build_int_cst (arg1_type, 0)));
16037 gimple_set_location (g, loc);
16038 gsi_replace (gsi, g, true);
16039 return true;
16040 }
16041
16042 /* unaligned Vector stores. */
16043 case VSX_BUILTIN_STXVW4X_V16QI:
16044 case VSX_BUILTIN_STXVW4X_V8HI:
16045 case VSX_BUILTIN_STXVW4X_V4SF:
16046 case VSX_BUILTIN_STXVW4X_V4SI:
16047 case VSX_BUILTIN_STXVD2X_V2DF:
16048 case VSX_BUILTIN_STXVD2X_V2DI:
16049 {
16050 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
16051 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
16052 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
16053 location_t loc = gimple_location (stmt);
16054 tree arg0_type = TREE_TYPE (arg0);
16055 /* Use ptr_type_node (no TBAA) for the arg2_type. */
16056 tree arg2_type = ptr_type_node;
16057 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
16058 required alignment (power) is 4 bytes regardless of data type. */
16059 tree align_stype = build_aligned_type (arg0_type, 4);
16060 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16061 the tree using the value from arg1. */
16062 gimple_seq stmts = NULL;
16063 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
16064 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16065 arg2_type, arg2, temp_offset);
16066 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16067 if (!is_gimple_mem_ref_addr (temp_addr))
16068 {
16069 tree t = make_ssa_name (TREE_TYPE (temp_addr));
16070 gimple *g = gimple_build_assign (t, temp_addr);
16071 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16072 temp_addr = t;
16073 }
16074 gimple *g;
16075 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
16076 build_int_cst (arg2_type, 0)), arg0);
16077 gimple_set_location (g, loc);
16078 gsi_replace (gsi, g, true);
16079 return true;
16080 }
16081
16082 /* Vector Fused multiply-add (fma). */
16083 case ALTIVEC_BUILTIN_VMADDFP:
16084 case VSX_BUILTIN_XVMADDDP:
16085 case ALTIVEC_BUILTIN_VMLADDUHM:
16086 {
16087 arg0 = gimple_call_arg (stmt, 0);
16088 arg1 = gimple_call_arg (stmt, 1);
16089 tree arg2 = gimple_call_arg (stmt, 2);
16090 lhs = gimple_call_lhs (stmt);
16091 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
16092 gimple_call_set_lhs (g, lhs);
16093 gimple_call_set_nothrow (g, true);
16094 gimple_set_location (g, gimple_location (stmt));
16095 gsi_replace (gsi, g, true);
16096 return true;
16097 }
16098
16099 /* Vector compares; EQ, NE, GE, GT, LE. */
16100 case ALTIVEC_BUILTIN_VCMPEQUB:
16101 case ALTIVEC_BUILTIN_VCMPEQUH:
16102 case ALTIVEC_BUILTIN_VCMPEQUW:
16103 case P8V_BUILTIN_VCMPEQUD:
16104 fold_compare_helper (gsi, EQ_EXPR, stmt);
16105 return true;
16106
16107 case P9V_BUILTIN_CMPNEB:
16108 case P9V_BUILTIN_CMPNEH:
16109 case P9V_BUILTIN_CMPNEW:
16110 fold_compare_helper (gsi, NE_EXPR, stmt);
16111 return true;
16112
16113 case VSX_BUILTIN_CMPGE_16QI:
16114 case VSX_BUILTIN_CMPGE_U16QI:
16115 case VSX_BUILTIN_CMPGE_8HI:
16116 case VSX_BUILTIN_CMPGE_U8HI:
16117 case VSX_BUILTIN_CMPGE_4SI:
16118 case VSX_BUILTIN_CMPGE_U4SI:
16119 case VSX_BUILTIN_CMPGE_2DI:
16120 case VSX_BUILTIN_CMPGE_U2DI:
16121 fold_compare_helper (gsi, GE_EXPR, stmt);
16122 return true;
16123
16124 case ALTIVEC_BUILTIN_VCMPGTSB:
16125 case ALTIVEC_BUILTIN_VCMPGTUB:
16126 case ALTIVEC_BUILTIN_VCMPGTSH:
16127 case ALTIVEC_BUILTIN_VCMPGTUH:
16128 case ALTIVEC_BUILTIN_VCMPGTSW:
16129 case ALTIVEC_BUILTIN_VCMPGTUW:
16130 case P8V_BUILTIN_VCMPGTUD:
16131 case P8V_BUILTIN_VCMPGTSD:
16132 fold_compare_helper (gsi, GT_EXPR, stmt);
16133 return true;
16134
16135 case VSX_BUILTIN_CMPLE_16QI:
16136 case VSX_BUILTIN_CMPLE_U16QI:
16137 case VSX_BUILTIN_CMPLE_8HI:
16138 case VSX_BUILTIN_CMPLE_U8HI:
16139 case VSX_BUILTIN_CMPLE_4SI:
16140 case VSX_BUILTIN_CMPLE_U4SI:
16141 case VSX_BUILTIN_CMPLE_2DI:
16142 case VSX_BUILTIN_CMPLE_U2DI:
16143 fold_compare_helper (gsi, LE_EXPR, stmt);
16144 return true;
16145
16146 /* flavors of vec_splat_[us]{8,16,32}. */
16147 case ALTIVEC_BUILTIN_VSPLTISB:
16148 case ALTIVEC_BUILTIN_VSPLTISH:
16149 case ALTIVEC_BUILTIN_VSPLTISW:
16150 {
16151 arg0 = gimple_call_arg (stmt, 0);
16152 lhs = gimple_call_lhs (stmt);
16153
16154 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
16155 5-bit signed constant in range -16 to +15. */
16156 if (TREE_CODE (arg0) != INTEGER_CST
16157 || !IN_RANGE (TREE_INT_CST_LOW (arg0), -16, 15))
16158 return false;
16159 gimple_seq stmts = NULL;
16160 location_t loc = gimple_location (stmt);
16161 tree splat_value = gimple_convert (&stmts, loc,
16162 TREE_TYPE (TREE_TYPE (lhs)), arg0);
16163 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16164 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
16165 g = gimple_build_assign (lhs, splat_tree);
16166 gimple_set_location (g, gimple_location (stmt));
16167 gsi_replace (gsi, g, true);
16168 return true;
16169 }
16170
16171 /* Flavors of vec_splat. */
16172 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
16173 case ALTIVEC_BUILTIN_VSPLTB:
16174 case ALTIVEC_BUILTIN_VSPLTH:
16175 case ALTIVEC_BUILTIN_VSPLTW:
16176 case VSX_BUILTIN_XXSPLTD_V2DI:
16177 case VSX_BUILTIN_XXSPLTD_V2DF:
16178 {
16179 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
16180 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
16181 /* Only fold the vec_splat_*() if arg1 is both a constant value and
16182 is a valid index into the arg0 vector. */
16183 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
16184 if (TREE_CODE (arg1) != INTEGER_CST
16185 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
16186 return false;
16187 lhs = gimple_call_lhs (stmt);
16188 tree lhs_type = TREE_TYPE (lhs);
16189 tree arg0_type = TREE_TYPE (arg0);
16190 tree splat;
16191 if (TREE_CODE (arg0) == VECTOR_CST)
16192 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
16193 else
16194 {
16195 /* Determine (in bits) the length and start location of the
16196 splat value for a call to the tree_vec_extract helper. */
16197 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
16198 * BITS_PER_UNIT / n_elts;
16199 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
16200 tree len = build_int_cst (bitsizetype, splat_elem_size);
16201 tree start = build_int_cst (bitsizetype, splat_start_bit);
16202 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
16203 len, start);
16204 }
16205 /* And finally, build the new vector. */
16206 tree splat_tree = build_vector_from_val (lhs_type, splat);
16207 g = gimple_build_assign (lhs, splat_tree);
16208 gimple_set_location (g, gimple_location (stmt));
16209 gsi_replace (gsi, g, true);
16210 return true;
16211 }
16212
16213 /* vec_mergel (integrals). */
16214 case ALTIVEC_BUILTIN_VMRGLH:
16215 case ALTIVEC_BUILTIN_VMRGLW:
16216 case VSX_BUILTIN_XXMRGLW_4SI:
16217 case ALTIVEC_BUILTIN_VMRGLB:
16218 case VSX_BUILTIN_VEC_MERGEL_V2DI:
16219 case VSX_BUILTIN_XXMRGLW_4SF:
16220 case VSX_BUILTIN_VEC_MERGEL_V2DF:
16221 fold_mergehl_helper (gsi, stmt, 1);
16222 return true;
16223 /* vec_mergeh (integrals). */
16224 case ALTIVEC_BUILTIN_VMRGHH:
16225 case ALTIVEC_BUILTIN_VMRGHW:
16226 case VSX_BUILTIN_XXMRGHW_4SI:
16227 case ALTIVEC_BUILTIN_VMRGHB:
16228 case VSX_BUILTIN_VEC_MERGEH_V2DI:
16229 case VSX_BUILTIN_XXMRGHW_4SF:
16230 case VSX_BUILTIN_VEC_MERGEH_V2DF:
16231 fold_mergehl_helper (gsi, stmt, 0);
16232 return true;
16233
16234 /* Flavors of vec_mergee. */
16235 case P8V_BUILTIN_VMRGEW_V4SI:
16236 case P8V_BUILTIN_VMRGEW_V2DI:
16237 case P8V_BUILTIN_VMRGEW_V4SF:
16238 case P8V_BUILTIN_VMRGEW_V2DF:
16239 fold_mergeeo_helper (gsi, stmt, 0);
16240 return true;
16241 /* Flavors of vec_mergeo. */
16242 case P8V_BUILTIN_VMRGOW_V4SI:
16243 case P8V_BUILTIN_VMRGOW_V2DI:
16244 case P8V_BUILTIN_VMRGOW_V4SF:
16245 case P8V_BUILTIN_VMRGOW_V2DF:
16246 fold_mergeeo_helper (gsi, stmt, 1);
16247 return true;
16248
16249 /* d = vec_pack (a, b) */
16250 case P8V_BUILTIN_VPKUDUM:
16251 case ALTIVEC_BUILTIN_VPKUHUM:
16252 case ALTIVEC_BUILTIN_VPKUWUM:
16253 {
16254 arg0 = gimple_call_arg (stmt, 0);
16255 arg1 = gimple_call_arg (stmt, 1);
16256 lhs = gimple_call_lhs (stmt);
16257 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
16258 gimple_set_location (g, gimple_location (stmt));
16259 gsi_replace (gsi, g, true);
16260 return true;
16261 }
16262
16263 /* d = vec_unpackh (a) */
16264 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
16265 in this code is sensitive to endian-ness, and needs to be inverted to
16266 handle both LE and BE targets. */
16267 case ALTIVEC_BUILTIN_VUPKHSB:
16268 case ALTIVEC_BUILTIN_VUPKHSH:
16269 case P8V_BUILTIN_VUPKHSW:
16270 {
16271 arg0 = gimple_call_arg (stmt, 0);
16272 lhs = gimple_call_lhs (stmt);
16273 if (BYTES_BIG_ENDIAN)
16274 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16275 else
16276 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16277 gimple_set_location (g, gimple_location (stmt));
16278 gsi_replace (gsi, g, true);
16279 return true;
16280 }
16281 /* d = vec_unpackl (a) */
16282 case ALTIVEC_BUILTIN_VUPKLSB:
16283 case ALTIVEC_BUILTIN_VUPKLSH:
16284 case P8V_BUILTIN_VUPKLSW:
16285 {
16286 arg0 = gimple_call_arg (stmt, 0);
16287 lhs = gimple_call_lhs (stmt);
16288 if (BYTES_BIG_ENDIAN)
16289 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16290 else
16291 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16292 gimple_set_location (g, gimple_location (stmt));
16293 gsi_replace (gsi, g, true);
16294 return true;
16295 }
16296 /* There is no gimple type corresponding with pixel, so just return. */
16297 case ALTIVEC_BUILTIN_VUPKHPX:
16298 case ALTIVEC_BUILTIN_VUPKLPX:
16299 return false;
16300
16301 /* vec_perm. */
16302 case ALTIVEC_BUILTIN_VPERM_16QI:
16303 case ALTIVEC_BUILTIN_VPERM_8HI:
16304 case ALTIVEC_BUILTIN_VPERM_4SI:
16305 case ALTIVEC_BUILTIN_VPERM_2DI:
16306 case ALTIVEC_BUILTIN_VPERM_4SF:
16307 case ALTIVEC_BUILTIN_VPERM_2DF:
16308 {
16309 arg0 = gimple_call_arg (stmt, 0);
16310 arg1 = gimple_call_arg (stmt, 1);
16311 tree permute = gimple_call_arg (stmt, 2);
16312 lhs = gimple_call_lhs (stmt);
16313 location_t loc = gimple_location (stmt);
16314 gimple_seq stmts = NULL;
16315 // convert arg0 and arg1 to match the type of the permute
16316 // for the VEC_PERM_EXPR operation.
16317 tree permute_type = (TREE_TYPE (permute));
16318 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
16319 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
16320 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
16321 permute_type, arg0_ptype, arg1_ptype,
16322 permute);
16323 // Convert the result back to the desired lhs type upon completion.
16324 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
16325 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16326 g = gimple_build_assign (lhs, temp);
16327 gimple_set_location (g, loc);
16328 gsi_replace (gsi, g, true);
16329 return true;
16330 }
16331
16332 default:
16333 if (TARGET_DEBUG_BUILTIN)
16334 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16335 fn_code, fn_name1, fn_name2);
16336 break;
16337 }
16338
16339 return false;
16340 }
16341
16342 /* Expand an expression EXP that calls a built-in function,
16343 with result going to TARGET if that's convenient
16344 (and in mode MODE if that's convenient).
16345 SUBTARGET may be used as the target for computing one of EXP's operands.
16346 IGNORE is nonzero if the value is to be ignored. */
16347
16348 static rtx
16349 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16350 machine_mode mode ATTRIBUTE_UNUSED,
16351 int ignore ATTRIBUTE_UNUSED)
16352 {
16353 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16354 enum rs6000_builtins fcode
16355 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16356 size_t uns_fcode = (size_t)fcode;
16357 const struct builtin_description *d;
16358 size_t i;
16359 rtx ret;
16360 bool success;
16361 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16362 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16363 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16364
16365 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16366 floating point type, depending on whether long double is the IBM extended
16367 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16368 we only define one variant of the built-in function, and switch the code
16369 when defining it, rather than defining two built-ins and using the
16370 overload table in rs6000-c.c to switch between the two. If we don't have
16371 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16372 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16373 if (FLOAT128_IEEE_P (TFmode))
16374 switch (icode)
16375 {
16376 default:
16377 break;
16378
16379 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16380 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16381 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16382 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16383 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16384 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16385 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16386 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16387 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16388 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16389 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16390 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16391 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16392 }
16393
16394 if (TARGET_DEBUG_BUILTIN)
16395 {
16396 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16397 const char *name2 = (icode != CODE_FOR_nothing)
16398 ? get_insn_name ((int) icode)
16399 : "nothing";
16400 const char *name3;
16401
16402 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16403 {
16404 default: name3 = "unknown"; break;
16405 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16406 case RS6000_BTC_UNARY: name3 = "unary"; break;
16407 case RS6000_BTC_BINARY: name3 = "binary"; break;
16408 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16409 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16410 case RS6000_BTC_ABS: name3 = "abs"; break;
16411 case RS6000_BTC_DST: name3 = "dst"; break;
16412 }
16413
16414
16415 fprintf (stderr,
16416 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16417 (name1) ? name1 : "---", fcode,
16418 (name2) ? name2 : "---", (int) icode,
16419 name3,
16420 func_valid_p ? "" : ", not valid");
16421 }
16422
16423 if (!func_valid_p)
16424 {
16425 rs6000_invalid_builtin (fcode);
16426
16427 /* Given it is invalid, just generate a normal call. */
16428 return expand_call (exp, target, ignore);
16429 }
16430
16431 switch (fcode)
16432 {
16433 case RS6000_BUILTIN_RECIP:
16434 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16435
16436 case RS6000_BUILTIN_RECIPF:
16437 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16438
16439 case RS6000_BUILTIN_RSQRTF:
16440 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16441
16442 case RS6000_BUILTIN_RSQRT:
16443 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16444
16445 case POWER7_BUILTIN_BPERMD:
16446 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16447 ? CODE_FOR_bpermd_di
16448 : CODE_FOR_bpermd_si), exp, target);
16449
16450 case RS6000_BUILTIN_GET_TB:
16451 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16452 target);
16453
16454 case RS6000_BUILTIN_MFTB:
16455 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16456 ? CODE_FOR_rs6000_mftb_di
16457 : CODE_FOR_rs6000_mftb_si),
16458 target);
16459
16460 case RS6000_BUILTIN_MFFS:
16461 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16462
16463 case RS6000_BUILTIN_MTFSB0:
16464 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16465
16466 case RS6000_BUILTIN_MTFSB1:
16467 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16468
16469 case RS6000_BUILTIN_SET_FPSCR_RN:
16470 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16471 exp);
16472
16473 case RS6000_BUILTIN_SET_FPSCR_DRN:
16474 return
16475 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16476 exp);
16477
16478 case RS6000_BUILTIN_MFFSL:
16479 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16480
16481 case RS6000_BUILTIN_MTFSF:
16482 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16483
16484 case RS6000_BUILTIN_CPU_INIT:
16485 case RS6000_BUILTIN_CPU_IS:
16486 case RS6000_BUILTIN_CPU_SUPPORTS:
16487 return cpu_expand_builtin (fcode, exp, target);
16488
16489 case MISC_BUILTIN_SPEC_BARRIER:
16490 {
16491 emit_insn (gen_speculation_barrier ());
16492 return NULL_RTX;
16493 }
16494
16495 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16496 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16497 {
16498 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16499 : (int) CODE_FOR_altivec_lvsl_direct);
16500 machine_mode tmode = insn_data[icode2].operand[0].mode;
16501 machine_mode mode = insn_data[icode2].operand[1].mode;
16502 tree arg;
16503 rtx op, addr, pat;
16504
16505 gcc_assert (TARGET_ALTIVEC);
16506
16507 arg = CALL_EXPR_ARG (exp, 0);
16508 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16509 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16510 addr = memory_address (mode, op);
16511 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16512 op = addr;
16513 else
16514 {
16515 /* For the load case need to negate the address. */
16516 op = gen_reg_rtx (GET_MODE (addr));
16517 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16518 }
16519 op = gen_rtx_MEM (mode, op);
16520
16521 if (target == 0
16522 || GET_MODE (target) != tmode
16523 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16524 target = gen_reg_rtx (tmode);
16525
16526 pat = GEN_FCN (icode2) (target, op);
16527 if (!pat)
16528 return 0;
16529 emit_insn (pat);
16530
16531 return target;
16532 }
16533
16534 case ALTIVEC_BUILTIN_VCFUX:
16535 case ALTIVEC_BUILTIN_VCFSX:
16536 case ALTIVEC_BUILTIN_VCTUXS:
16537 case ALTIVEC_BUILTIN_VCTSXS:
16538 /* FIXME: There's got to be a nicer way to handle this case than
16539 constructing a new CALL_EXPR. */
16540 if (call_expr_nargs (exp) == 1)
16541 {
16542 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16543 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16544 }
16545 break;
16546
16547 /* For the pack and unpack int128 routines, fix up the builtin so it
16548 uses the correct IBM128 type. */
16549 case MISC_BUILTIN_PACK_IF:
16550 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16551 {
16552 icode = CODE_FOR_packtf;
16553 fcode = MISC_BUILTIN_PACK_TF;
16554 uns_fcode = (size_t)fcode;
16555 }
16556 break;
16557
16558 case MISC_BUILTIN_UNPACK_IF:
16559 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16560 {
16561 icode = CODE_FOR_unpacktf;
16562 fcode = MISC_BUILTIN_UNPACK_TF;
16563 uns_fcode = (size_t)fcode;
16564 }
16565 break;
16566
16567 default:
16568 break;
16569 }
16570
16571 if (TARGET_ALTIVEC)
16572 {
16573 ret = altivec_expand_builtin (exp, target, &success);
16574
16575 if (success)
16576 return ret;
16577 }
16578 if (TARGET_HTM)
16579 {
16580 ret = htm_expand_builtin (exp, target, &success);
16581
16582 if (success)
16583 return ret;
16584 }
16585
16586 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16587 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16588 gcc_assert (attr == RS6000_BTC_UNARY
16589 || attr == RS6000_BTC_BINARY
16590 || attr == RS6000_BTC_TERNARY
16591 || attr == RS6000_BTC_SPECIAL);
16592
16593 /* Handle simple unary operations. */
16594 d = bdesc_1arg;
16595 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16596 if (d->code == fcode)
16597 return rs6000_expand_unop_builtin (icode, exp, target);
16598
16599 /* Handle simple binary operations. */
16600 d = bdesc_2arg;
16601 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16602 if (d->code == fcode)
16603 return rs6000_expand_binop_builtin (icode, exp, target);
16604
16605 /* Handle simple ternary operations. */
16606 d = bdesc_3arg;
16607 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16608 if (d->code == fcode)
16609 return rs6000_expand_ternop_builtin (icode, exp, target);
16610
16611 /* Handle simple no-argument operations. */
16612 d = bdesc_0arg;
16613 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16614 if (d->code == fcode)
16615 return rs6000_expand_zeroop_builtin (icode, target);
16616
16617 gcc_unreachable ();
16618 }
16619
16620 /* Create a builtin vector type with a name. Taking care not to give
16621 the canonical type a name. */
16622
16623 static tree
16624 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16625 {
16626 tree result = build_vector_type (elt_type, num_elts);
16627
16628 /* Copy so we don't give the canonical type a name. */
16629 result = build_variant_type_copy (result);
16630
16631 add_builtin_type (name, result);
16632
16633 return result;
16634 }
16635
16636 static void
16637 rs6000_init_builtins (void)
16638 {
16639 tree tdecl;
16640 tree ftype;
16641 machine_mode mode;
16642
16643 if (TARGET_DEBUG_BUILTIN)
16644 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16645 (TARGET_ALTIVEC) ? ", altivec" : "",
16646 (TARGET_VSX) ? ", vsx" : "");
16647
16648 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16649 : "__vector long long",
16650 intDI_type_node, 2);
16651 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16652 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16653 intSI_type_node, 4);
16654 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16655 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16656 intHI_type_node, 8);
16657 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16658 intQI_type_node, 16);
16659
16660 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16661 unsigned_intQI_type_node, 16);
16662 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16663 unsigned_intHI_type_node, 8);
16664 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16665 unsigned_intSI_type_node, 4);
16666 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16667 ? "__vector unsigned long"
16668 : "__vector unsigned long long",
16669 unsigned_intDI_type_node, 2);
16670
16671 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16672
16673 const_str_type_node
16674 = build_pointer_type (build_qualified_type (char_type_node,
16675 TYPE_QUAL_CONST));
16676
16677 /* We use V1TI mode as a special container to hold __int128_t items that
16678 must live in VSX registers. */
16679 if (intTI_type_node)
16680 {
16681 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16682 intTI_type_node, 1);
16683 unsigned_V1TI_type_node
16684 = rs6000_vector_type ("__vector unsigned __int128",
16685 unsigned_intTI_type_node, 1);
16686 }
16687
16688 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16689 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16690 'vector unsigned short'. */
16691
16692 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16693 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16694 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16695 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16696 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16697
16698 long_integer_type_internal_node = long_integer_type_node;
16699 long_unsigned_type_internal_node = long_unsigned_type_node;
16700 long_long_integer_type_internal_node = long_long_integer_type_node;
16701 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16702 intQI_type_internal_node = intQI_type_node;
16703 uintQI_type_internal_node = unsigned_intQI_type_node;
16704 intHI_type_internal_node = intHI_type_node;
16705 uintHI_type_internal_node = unsigned_intHI_type_node;
16706 intSI_type_internal_node = intSI_type_node;
16707 uintSI_type_internal_node = unsigned_intSI_type_node;
16708 intDI_type_internal_node = intDI_type_node;
16709 uintDI_type_internal_node = unsigned_intDI_type_node;
16710 intTI_type_internal_node = intTI_type_node;
16711 uintTI_type_internal_node = unsigned_intTI_type_node;
16712 float_type_internal_node = float_type_node;
16713 double_type_internal_node = double_type_node;
16714 long_double_type_internal_node = long_double_type_node;
16715 dfloat64_type_internal_node = dfloat64_type_node;
16716 dfloat128_type_internal_node = dfloat128_type_node;
16717 void_type_internal_node = void_type_node;
16718
16719 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16720 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16721 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16722 format that uses a pair of doubles, depending on the switches and
16723 defaults.
16724
16725 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16726 floating point, we need make sure the type is non-zero or else self-test
16727 fails during bootstrap.
16728
16729 Always create __ibm128 as a separate type, even if the current long double
16730 format is IBM extended double.
16731
16732 For IEEE 128-bit floating point, always create the type __ieee128. If the
16733 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16734 __ieee128. */
16735 if (TARGET_FLOAT128_TYPE)
16736 {
16737 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16738 ibm128_float_type_node = long_double_type_node;
16739 else
16740 {
16741 ibm128_float_type_node = make_node (REAL_TYPE);
16742 TYPE_PRECISION (ibm128_float_type_node) = 128;
16743 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16744 layout_type (ibm128_float_type_node);
16745 }
16746
16747 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16748 "__ibm128");
16749
16750 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16751 ieee128_float_type_node = long_double_type_node;
16752 else
16753 ieee128_float_type_node = float128_type_node;
16754
16755 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16756 "__ieee128");
16757 }
16758
16759 else
16760 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16761
16762 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16763 tree type node. */
16764 builtin_mode_to_type[QImode][0] = integer_type_node;
16765 builtin_mode_to_type[HImode][0] = integer_type_node;
16766 builtin_mode_to_type[SImode][0] = intSI_type_node;
16767 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16768 builtin_mode_to_type[DImode][0] = intDI_type_node;
16769 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16770 builtin_mode_to_type[TImode][0] = intTI_type_node;
16771 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16772 builtin_mode_to_type[SFmode][0] = float_type_node;
16773 builtin_mode_to_type[DFmode][0] = double_type_node;
16774 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16775 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16776 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16777 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16778 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16779 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16780 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16781 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16782 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16783 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16784 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16785 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16786 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16787 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16788 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16789 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16790 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16791
16792 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16793 TYPE_NAME (bool_char_type_node) = tdecl;
16794
16795 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16796 TYPE_NAME (bool_short_type_node) = tdecl;
16797
16798 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16799 TYPE_NAME (bool_int_type_node) = tdecl;
16800
16801 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16802 TYPE_NAME (pixel_type_node) = tdecl;
16803
16804 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16805 bool_char_type_node, 16);
16806 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16807 bool_short_type_node, 8);
16808 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16809 bool_int_type_node, 4);
16810 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16811 ? "__vector __bool long"
16812 : "__vector __bool long long",
16813 bool_long_long_type_node, 2);
16814 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16815 pixel_type_node, 8);
16816
16817 /* Create Altivec and VSX builtins on machines with at least the
16818 general purpose extensions (970 and newer) to allow the use of
16819 the target attribute. */
16820 if (TARGET_EXTRA_BUILTINS)
16821 altivec_init_builtins ();
16822 if (TARGET_HTM)
16823 htm_init_builtins ();
16824
16825 if (TARGET_EXTRA_BUILTINS)
16826 rs6000_common_init_builtins ();
16827
16828 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16829 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16830 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16831
16832 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16833 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16834 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16835
16836 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16837 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16838 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16839
16840 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16841 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16842 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16843
16844 mode = (TARGET_64BIT) ? DImode : SImode;
16845 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16846 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16847 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16848
16849 ftype = build_function_type_list (unsigned_intDI_type_node,
16850 NULL_TREE);
16851 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16852
16853 if (TARGET_64BIT)
16854 ftype = build_function_type_list (unsigned_intDI_type_node,
16855 NULL_TREE);
16856 else
16857 ftype = build_function_type_list (unsigned_intSI_type_node,
16858 NULL_TREE);
16859 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16860
16861 ftype = build_function_type_list (double_type_node, NULL_TREE);
16862 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16863
16864 ftype = build_function_type_list (double_type_node, NULL_TREE);
16865 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16866
16867 ftype = build_function_type_list (void_type_node,
16868 intSI_type_node,
16869 NULL_TREE);
16870 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16871
16872 ftype = build_function_type_list (void_type_node,
16873 intSI_type_node,
16874 NULL_TREE);
16875 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16876
16877 ftype = build_function_type_list (void_type_node,
16878 intDI_type_node,
16879 NULL_TREE);
16880 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16881
16882 ftype = build_function_type_list (void_type_node,
16883 intDI_type_node,
16884 NULL_TREE);
16885 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16886
16887 ftype = build_function_type_list (void_type_node,
16888 intSI_type_node, double_type_node,
16889 NULL_TREE);
16890 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16891
16892 ftype = build_function_type_list (void_type_node, NULL_TREE);
16893 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16894 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16895 MISC_BUILTIN_SPEC_BARRIER);
16896
16897 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16898 NULL_TREE);
16899 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16900 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16901
16902 /* AIX libm provides clog as __clog. */
16903 if (TARGET_XCOFF &&
16904 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16905 set_user_assembler_name (tdecl, "__clog");
16906
16907 #ifdef SUBTARGET_INIT_BUILTINS
16908 SUBTARGET_INIT_BUILTINS;
16909 #endif
16910 }
16911
16912 /* Returns the rs6000 builtin decl for CODE. */
16913
16914 static tree
16915 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16916 {
16917 HOST_WIDE_INT fnmask;
16918
16919 if (code >= RS6000_BUILTIN_COUNT)
16920 return error_mark_node;
16921
16922 fnmask = rs6000_builtin_info[code].mask;
16923 if ((fnmask & rs6000_builtin_mask) != fnmask)
16924 {
16925 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16926 return error_mark_node;
16927 }
16928
16929 return rs6000_builtin_decls[code];
16930 }
16931
16932 static void
16933 altivec_init_builtins (void)
16934 {
16935 const struct builtin_description *d;
16936 size_t i;
16937 tree ftype;
16938 tree decl;
16939 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16940
16941 tree pvoid_type_node = build_pointer_type (void_type_node);
16942
16943 tree pcvoid_type_node
16944 = build_pointer_type (build_qualified_type (void_type_node,
16945 TYPE_QUAL_CONST));
16946
16947 tree int_ftype_opaque
16948 = build_function_type_list (integer_type_node,
16949 opaque_V4SI_type_node, NULL_TREE);
16950 tree opaque_ftype_opaque
16951 = build_function_type_list (integer_type_node, NULL_TREE);
16952 tree opaque_ftype_opaque_int
16953 = build_function_type_list (opaque_V4SI_type_node,
16954 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16955 tree opaque_ftype_opaque_opaque_int
16956 = build_function_type_list (opaque_V4SI_type_node,
16957 opaque_V4SI_type_node, opaque_V4SI_type_node,
16958 integer_type_node, NULL_TREE);
16959 tree opaque_ftype_opaque_opaque_opaque
16960 = build_function_type_list (opaque_V4SI_type_node,
16961 opaque_V4SI_type_node, opaque_V4SI_type_node,
16962 opaque_V4SI_type_node, NULL_TREE);
16963 tree opaque_ftype_opaque_opaque
16964 = build_function_type_list (opaque_V4SI_type_node,
16965 opaque_V4SI_type_node, opaque_V4SI_type_node,
16966 NULL_TREE);
16967 tree int_ftype_int_opaque_opaque
16968 = build_function_type_list (integer_type_node,
16969 integer_type_node, opaque_V4SI_type_node,
16970 opaque_V4SI_type_node, NULL_TREE);
16971 tree int_ftype_int_v4si_v4si
16972 = build_function_type_list (integer_type_node,
16973 integer_type_node, V4SI_type_node,
16974 V4SI_type_node, NULL_TREE);
16975 tree int_ftype_int_v2di_v2di
16976 = build_function_type_list (integer_type_node,
16977 integer_type_node, V2DI_type_node,
16978 V2DI_type_node, NULL_TREE);
16979 tree void_ftype_v4si
16980 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16981 tree v8hi_ftype_void
16982 = build_function_type_list (V8HI_type_node, NULL_TREE);
16983 tree void_ftype_void
16984 = build_function_type_list (void_type_node, NULL_TREE);
16985 tree void_ftype_int
16986 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16987
16988 tree opaque_ftype_long_pcvoid
16989 = build_function_type_list (opaque_V4SI_type_node,
16990 long_integer_type_node, pcvoid_type_node,
16991 NULL_TREE);
16992 tree v16qi_ftype_long_pcvoid
16993 = build_function_type_list (V16QI_type_node,
16994 long_integer_type_node, pcvoid_type_node,
16995 NULL_TREE);
16996 tree v8hi_ftype_long_pcvoid
16997 = build_function_type_list (V8HI_type_node,
16998 long_integer_type_node, pcvoid_type_node,
16999 NULL_TREE);
17000 tree v4si_ftype_long_pcvoid
17001 = build_function_type_list (V4SI_type_node,
17002 long_integer_type_node, pcvoid_type_node,
17003 NULL_TREE);
17004 tree v4sf_ftype_long_pcvoid
17005 = build_function_type_list (V4SF_type_node,
17006 long_integer_type_node, pcvoid_type_node,
17007 NULL_TREE);
17008 tree v2df_ftype_long_pcvoid
17009 = build_function_type_list (V2DF_type_node,
17010 long_integer_type_node, pcvoid_type_node,
17011 NULL_TREE);
17012 tree v2di_ftype_long_pcvoid
17013 = build_function_type_list (V2DI_type_node,
17014 long_integer_type_node, pcvoid_type_node,
17015 NULL_TREE);
17016 tree v1ti_ftype_long_pcvoid
17017 = build_function_type_list (V1TI_type_node,
17018 long_integer_type_node, pcvoid_type_node,
17019 NULL_TREE);
17020
17021 tree void_ftype_opaque_long_pvoid
17022 = build_function_type_list (void_type_node,
17023 opaque_V4SI_type_node, long_integer_type_node,
17024 pvoid_type_node, NULL_TREE);
17025 tree void_ftype_v4si_long_pvoid
17026 = build_function_type_list (void_type_node,
17027 V4SI_type_node, long_integer_type_node,
17028 pvoid_type_node, NULL_TREE);
17029 tree void_ftype_v16qi_long_pvoid
17030 = build_function_type_list (void_type_node,
17031 V16QI_type_node, long_integer_type_node,
17032 pvoid_type_node, NULL_TREE);
17033
17034 tree void_ftype_v16qi_pvoid_long
17035 = build_function_type_list (void_type_node,
17036 V16QI_type_node, pvoid_type_node,
17037 long_integer_type_node, NULL_TREE);
17038
17039 tree void_ftype_v8hi_long_pvoid
17040 = build_function_type_list (void_type_node,
17041 V8HI_type_node, long_integer_type_node,
17042 pvoid_type_node, NULL_TREE);
17043 tree void_ftype_v4sf_long_pvoid
17044 = build_function_type_list (void_type_node,
17045 V4SF_type_node, long_integer_type_node,
17046 pvoid_type_node, NULL_TREE);
17047 tree void_ftype_v2df_long_pvoid
17048 = build_function_type_list (void_type_node,
17049 V2DF_type_node, long_integer_type_node,
17050 pvoid_type_node, NULL_TREE);
17051 tree void_ftype_v1ti_long_pvoid
17052 = build_function_type_list (void_type_node,
17053 V1TI_type_node, long_integer_type_node,
17054 pvoid_type_node, NULL_TREE);
17055 tree void_ftype_v2di_long_pvoid
17056 = build_function_type_list (void_type_node,
17057 V2DI_type_node, long_integer_type_node,
17058 pvoid_type_node, NULL_TREE);
17059 tree int_ftype_int_v8hi_v8hi
17060 = build_function_type_list (integer_type_node,
17061 integer_type_node, V8HI_type_node,
17062 V8HI_type_node, NULL_TREE);
17063 tree int_ftype_int_v16qi_v16qi
17064 = build_function_type_list (integer_type_node,
17065 integer_type_node, V16QI_type_node,
17066 V16QI_type_node, NULL_TREE);
17067 tree int_ftype_int_v4sf_v4sf
17068 = build_function_type_list (integer_type_node,
17069 integer_type_node, V4SF_type_node,
17070 V4SF_type_node, NULL_TREE);
17071 tree int_ftype_int_v2df_v2df
17072 = build_function_type_list (integer_type_node,
17073 integer_type_node, V2DF_type_node,
17074 V2DF_type_node, NULL_TREE);
17075 tree v2di_ftype_v2di
17076 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17077 tree v4si_ftype_v4si
17078 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17079 tree v8hi_ftype_v8hi
17080 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17081 tree v16qi_ftype_v16qi
17082 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17083 tree v4sf_ftype_v4sf
17084 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17085 tree v2df_ftype_v2df
17086 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17087 tree void_ftype_pcvoid_int_int
17088 = build_function_type_list (void_type_node,
17089 pcvoid_type_node, integer_type_node,
17090 integer_type_node, NULL_TREE);
17091
17092 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17093 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17094 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17095 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17096 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17097 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17098 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17099 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17100 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17101 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17102 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17103 ALTIVEC_BUILTIN_LVXL_V2DF);
17104 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17105 ALTIVEC_BUILTIN_LVXL_V2DI);
17106 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17107 ALTIVEC_BUILTIN_LVXL_V4SF);
17108 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17109 ALTIVEC_BUILTIN_LVXL_V4SI);
17110 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17111 ALTIVEC_BUILTIN_LVXL_V8HI);
17112 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17113 ALTIVEC_BUILTIN_LVXL_V16QI);
17114 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17115 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
17116 ALTIVEC_BUILTIN_LVX_V1TI);
17117 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17118 ALTIVEC_BUILTIN_LVX_V2DF);
17119 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17120 ALTIVEC_BUILTIN_LVX_V2DI);
17121 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17122 ALTIVEC_BUILTIN_LVX_V4SF);
17123 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17124 ALTIVEC_BUILTIN_LVX_V4SI);
17125 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17126 ALTIVEC_BUILTIN_LVX_V8HI);
17127 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17128 ALTIVEC_BUILTIN_LVX_V16QI);
17129 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17130 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17131 ALTIVEC_BUILTIN_STVX_V2DF);
17132 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17133 ALTIVEC_BUILTIN_STVX_V2DI);
17134 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17135 ALTIVEC_BUILTIN_STVX_V4SF);
17136 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17137 ALTIVEC_BUILTIN_STVX_V4SI);
17138 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17139 ALTIVEC_BUILTIN_STVX_V8HI);
17140 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17141 ALTIVEC_BUILTIN_STVX_V16QI);
17142 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17143 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17144 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17145 ALTIVEC_BUILTIN_STVXL_V2DF);
17146 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17147 ALTIVEC_BUILTIN_STVXL_V2DI);
17148 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17149 ALTIVEC_BUILTIN_STVXL_V4SF);
17150 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17151 ALTIVEC_BUILTIN_STVXL_V4SI);
17152 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17153 ALTIVEC_BUILTIN_STVXL_V8HI);
17154 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17155 ALTIVEC_BUILTIN_STVXL_V16QI);
17156 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17157 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17158 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17159 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17160 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17161 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17162 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17163 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17164 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17165 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17166 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17167 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17168 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17169 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17170 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17171 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17172
17173 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17174 VSX_BUILTIN_LXVD2X_V2DF);
17175 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17176 VSX_BUILTIN_LXVD2X_V2DI);
17177 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17178 VSX_BUILTIN_LXVW4X_V4SF);
17179 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17180 VSX_BUILTIN_LXVW4X_V4SI);
17181 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17182 VSX_BUILTIN_LXVW4X_V8HI);
17183 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17184 VSX_BUILTIN_LXVW4X_V16QI);
17185 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17186 VSX_BUILTIN_STXVD2X_V2DF);
17187 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17188 VSX_BUILTIN_STXVD2X_V2DI);
17189 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17190 VSX_BUILTIN_STXVW4X_V4SF);
17191 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17192 VSX_BUILTIN_STXVW4X_V4SI);
17193 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17194 VSX_BUILTIN_STXVW4X_V8HI);
17195 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17196 VSX_BUILTIN_STXVW4X_V16QI);
17197
17198 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17199 VSX_BUILTIN_LD_ELEMREV_V2DF);
17200 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17201 VSX_BUILTIN_LD_ELEMREV_V2DI);
17202 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17203 VSX_BUILTIN_LD_ELEMREV_V4SF);
17204 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17205 VSX_BUILTIN_LD_ELEMREV_V4SI);
17206 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17207 VSX_BUILTIN_LD_ELEMREV_V8HI);
17208 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17209 VSX_BUILTIN_LD_ELEMREV_V16QI);
17210 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17211 VSX_BUILTIN_ST_ELEMREV_V2DF);
17212 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
17213 VSX_BUILTIN_ST_ELEMREV_V1TI);
17214 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17215 VSX_BUILTIN_ST_ELEMREV_V2DI);
17216 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17217 VSX_BUILTIN_ST_ELEMREV_V4SF);
17218 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17219 VSX_BUILTIN_ST_ELEMREV_V4SI);
17220 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17221 VSX_BUILTIN_ST_ELEMREV_V8HI);
17222 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17223 VSX_BUILTIN_ST_ELEMREV_V16QI);
17224
17225 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17226 VSX_BUILTIN_VEC_LD);
17227 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17228 VSX_BUILTIN_VEC_ST);
17229 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17230 VSX_BUILTIN_VEC_XL);
17231 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17232 VSX_BUILTIN_VEC_XL_BE);
17233 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17234 VSX_BUILTIN_VEC_XST);
17235 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17236 VSX_BUILTIN_VEC_XST_BE);
17237
17238 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17239 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17240 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17241
17242 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17243 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17244 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17245 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17246 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17247 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17248 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17249 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17250 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17251 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17252 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17253 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17254
17255 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17256 ALTIVEC_BUILTIN_VEC_ADDE);
17257 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17258 ALTIVEC_BUILTIN_VEC_ADDEC);
17259 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17260 ALTIVEC_BUILTIN_VEC_CMPNE);
17261 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17262 ALTIVEC_BUILTIN_VEC_MUL);
17263 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17264 ALTIVEC_BUILTIN_VEC_SUBE);
17265 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17266 ALTIVEC_BUILTIN_VEC_SUBEC);
17267
17268 /* Cell builtins. */
17269 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17270 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17271 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17272 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17273
17274 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17275 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17276 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17277 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17278
17279 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17280 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17281 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17282 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17283
17284 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17285 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17286 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17287 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17288
17289 if (TARGET_P9_VECTOR)
17290 {
17291 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17292 P9V_BUILTIN_STXVL);
17293 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17294 P9V_BUILTIN_XST_LEN_R);
17295 }
17296
17297 /* Add the DST variants. */
17298 d = bdesc_dst;
17299 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17300 {
17301 HOST_WIDE_INT mask = d->mask;
17302
17303 /* It is expected that these dst built-in functions may have
17304 d->icode equal to CODE_FOR_nothing. */
17305 if ((mask & builtin_mask) != mask)
17306 {
17307 if (TARGET_DEBUG_BUILTIN)
17308 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17309 d->name);
17310 continue;
17311 }
17312 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17313 }
17314
17315 /* Initialize the predicates. */
17316 d = bdesc_altivec_preds;
17317 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17318 {
17319 machine_mode mode1;
17320 tree type;
17321 HOST_WIDE_INT mask = d->mask;
17322
17323 if ((mask & builtin_mask) != mask)
17324 {
17325 if (TARGET_DEBUG_BUILTIN)
17326 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17327 d->name);
17328 continue;
17329 }
17330
17331 if (rs6000_overloaded_builtin_p (d->code))
17332 mode1 = VOIDmode;
17333 else
17334 {
17335 /* Cannot define builtin if the instruction is disabled. */
17336 gcc_assert (d->icode != CODE_FOR_nothing);
17337 mode1 = insn_data[d->icode].operand[1].mode;
17338 }
17339
17340 switch (mode1)
17341 {
17342 case E_VOIDmode:
17343 type = int_ftype_int_opaque_opaque;
17344 break;
17345 case E_V2DImode:
17346 type = int_ftype_int_v2di_v2di;
17347 break;
17348 case E_V4SImode:
17349 type = int_ftype_int_v4si_v4si;
17350 break;
17351 case E_V8HImode:
17352 type = int_ftype_int_v8hi_v8hi;
17353 break;
17354 case E_V16QImode:
17355 type = int_ftype_int_v16qi_v16qi;
17356 break;
17357 case E_V4SFmode:
17358 type = int_ftype_int_v4sf_v4sf;
17359 break;
17360 case E_V2DFmode:
17361 type = int_ftype_int_v2df_v2df;
17362 break;
17363 default:
17364 gcc_unreachable ();
17365 }
17366
17367 def_builtin (d->name, type, d->code);
17368 }
17369
17370 /* Initialize the abs* operators. */
17371 d = bdesc_abs;
17372 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17373 {
17374 machine_mode mode0;
17375 tree type;
17376 HOST_WIDE_INT mask = d->mask;
17377
17378 if ((mask & builtin_mask) != mask)
17379 {
17380 if (TARGET_DEBUG_BUILTIN)
17381 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17382 d->name);
17383 continue;
17384 }
17385
17386 /* Cannot define builtin if the instruction is disabled. */
17387 gcc_assert (d->icode != CODE_FOR_nothing);
17388 mode0 = insn_data[d->icode].operand[0].mode;
17389
17390 switch (mode0)
17391 {
17392 case E_V2DImode:
17393 type = v2di_ftype_v2di;
17394 break;
17395 case E_V4SImode:
17396 type = v4si_ftype_v4si;
17397 break;
17398 case E_V8HImode:
17399 type = v8hi_ftype_v8hi;
17400 break;
17401 case E_V16QImode:
17402 type = v16qi_ftype_v16qi;
17403 break;
17404 case E_V4SFmode:
17405 type = v4sf_ftype_v4sf;
17406 break;
17407 case E_V2DFmode:
17408 type = v2df_ftype_v2df;
17409 break;
17410 default:
17411 gcc_unreachable ();
17412 }
17413
17414 def_builtin (d->name, type, d->code);
17415 }
17416
17417 /* Initialize target builtin that implements
17418 targetm.vectorize.builtin_mask_for_load. */
17419
17420 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17421 v16qi_ftype_long_pcvoid,
17422 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17423 BUILT_IN_MD, NULL, NULL_TREE);
17424 TREE_READONLY (decl) = 1;
17425 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17426 altivec_builtin_mask_for_load = decl;
17427
17428 /* Access to the vec_init patterns. */
17429 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17430 integer_type_node, integer_type_node,
17431 integer_type_node, NULL_TREE);
17432 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17433
17434 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17435 short_integer_type_node,
17436 short_integer_type_node,
17437 short_integer_type_node,
17438 short_integer_type_node,
17439 short_integer_type_node,
17440 short_integer_type_node,
17441 short_integer_type_node, NULL_TREE);
17442 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17443
17444 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17445 char_type_node, char_type_node,
17446 char_type_node, char_type_node,
17447 char_type_node, char_type_node,
17448 char_type_node, char_type_node,
17449 char_type_node, char_type_node,
17450 char_type_node, char_type_node,
17451 char_type_node, char_type_node,
17452 char_type_node, NULL_TREE);
17453 def_builtin ("__builtin_vec_init_v16qi", ftype,
17454 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17455
17456 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17457 float_type_node, float_type_node,
17458 float_type_node, NULL_TREE);
17459 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17460
17461 /* VSX builtins. */
17462 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17463 double_type_node, NULL_TREE);
17464 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17465
17466 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17467 intDI_type_node, NULL_TREE);
17468 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17469
17470 /* Access to the vec_set patterns. */
17471 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17472 intSI_type_node,
17473 integer_type_node, NULL_TREE);
17474 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17475
17476 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17477 intHI_type_node,
17478 integer_type_node, NULL_TREE);
17479 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17480
17481 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17482 intQI_type_node,
17483 integer_type_node, NULL_TREE);
17484 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17485
17486 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17487 float_type_node,
17488 integer_type_node, NULL_TREE);
17489 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17490
17491 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17492 double_type_node,
17493 integer_type_node, NULL_TREE);
17494 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17495
17496 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17497 intDI_type_node,
17498 integer_type_node, NULL_TREE);
17499 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17500
17501 /* Access to the vec_extract patterns. */
17502 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17503 integer_type_node, NULL_TREE);
17504 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17505
17506 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17507 integer_type_node, NULL_TREE);
17508 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17509
17510 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17511 integer_type_node, NULL_TREE);
17512 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17513
17514 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17515 integer_type_node, NULL_TREE);
17516 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17517
17518 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17519 integer_type_node, NULL_TREE);
17520 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17521
17522 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17523 integer_type_node, NULL_TREE);
17524 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17525
17526
17527 if (V1TI_type_node)
17528 {
17529 tree v1ti_ftype_long_pcvoid
17530 = build_function_type_list (V1TI_type_node,
17531 long_integer_type_node, pcvoid_type_node,
17532 NULL_TREE);
17533 tree void_ftype_v1ti_long_pvoid
17534 = build_function_type_list (void_type_node,
17535 V1TI_type_node, long_integer_type_node,
17536 pvoid_type_node, NULL_TREE);
17537 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17538 VSX_BUILTIN_LD_ELEMREV_V1TI);
17539 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17540 VSX_BUILTIN_LXVD2X_V1TI);
17541 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17542 VSX_BUILTIN_STXVD2X_V1TI);
17543 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17544 NULL_TREE, NULL_TREE);
17545 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17546 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17547 intTI_type_node,
17548 integer_type_node, NULL_TREE);
17549 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17550 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17551 integer_type_node, NULL_TREE);
17552 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17553 }
17554
17555 }
17556
17557 static void
17558 htm_init_builtins (void)
17559 {
17560 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17561 const struct builtin_description *d;
17562 size_t i;
17563
17564 d = bdesc_htm;
17565 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17566 {
17567 tree op[MAX_HTM_OPERANDS], type;
17568 HOST_WIDE_INT mask = d->mask;
17569 unsigned attr = rs6000_builtin_info[d->code].attr;
17570 bool void_func = (attr & RS6000_BTC_VOID);
17571 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17572 int nopnds = 0;
17573 tree gpr_type_node;
17574 tree rettype;
17575 tree argtype;
17576
17577 /* It is expected that these htm built-in functions may have
17578 d->icode equal to CODE_FOR_nothing. */
17579
17580 if (TARGET_32BIT && TARGET_POWERPC64)
17581 gpr_type_node = long_long_unsigned_type_node;
17582 else
17583 gpr_type_node = long_unsigned_type_node;
17584
17585 if (attr & RS6000_BTC_SPR)
17586 {
17587 rettype = gpr_type_node;
17588 argtype = gpr_type_node;
17589 }
17590 else if (d->code == HTM_BUILTIN_TABORTDC
17591 || d->code == HTM_BUILTIN_TABORTDCI)
17592 {
17593 rettype = unsigned_type_node;
17594 argtype = gpr_type_node;
17595 }
17596 else
17597 {
17598 rettype = unsigned_type_node;
17599 argtype = unsigned_type_node;
17600 }
17601
17602 if ((mask & builtin_mask) != mask)
17603 {
17604 if (TARGET_DEBUG_BUILTIN)
17605 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17606 continue;
17607 }
17608
17609 if (d->name == 0)
17610 {
17611 if (TARGET_DEBUG_BUILTIN)
17612 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17613 (long unsigned) i);
17614 continue;
17615 }
17616
17617 op[nopnds++] = (void_func) ? void_type_node : rettype;
17618
17619 if (attr_args == RS6000_BTC_UNARY)
17620 op[nopnds++] = argtype;
17621 else if (attr_args == RS6000_BTC_BINARY)
17622 {
17623 op[nopnds++] = argtype;
17624 op[nopnds++] = argtype;
17625 }
17626 else if (attr_args == RS6000_BTC_TERNARY)
17627 {
17628 op[nopnds++] = argtype;
17629 op[nopnds++] = argtype;
17630 op[nopnds++] = argtype;
17631 }
17632
17633 switch (nopnds)
17634 {
17635 case 1:
17636 type = build_function_type_list (op[0], NULL_TREE);
17637 break;
17638 case 2:
17639 type = build_function_type_list (op[0], op[1], NULL_TREE);
17640 break;
17641 case 3:
17642 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17643 break;
17644 case 4:
17645 type = build_function_type_list (op[0], op[1], op[2], op[3],
17646 NULL_TREE);
17647 break;
17648 default:
17649 gcc_unreachable ();
17650 }
17651
17652 def_builtin (d->name, type, d->code);
17653 }
17654 }
17655
17656 /* Hash function for builtin functions with up to 3 arguments and a return
17657 type. */
17658 hashval_t
17659 builtin_hasher::hash (builtin_hash_struct *bh)
17660 {
17661 unsigned ret = 0;
17662 int i;
17663
17664 for (i = 0; i < 4; i++)
17665 {
17666 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17667 ret = (ret * 2) + bh->uns_p[i];
17668 }
17669
17670 return ret;
17671 }
17672
17673 /* Compare builtin hash entries H1 and H2 for equivalence. */
17674 bool
17675 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17676 {
17677 return ((p1->mode[0] == p2->mode[0])
17678 && (p1->mode[1] == p2->mode[1])
17679 && (p1->mode[2] == p2->mode[2])
17680 && (p1->mode[3] == p2->mode[3])
17681 && (p1->uns_p[0] == p2->uns_p[0])
17682 && (p1->uns_p[1] == p2->uns_p[1])
17683 && (p1->uns_p[2] == p2->uns_p[2])
17684 && (p1->uns_p[3] == p2->uns_p[3]));
17685 }
17686
17687 /* Map types for builtin functions with an explicit return type and up to 3
17688 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17689 of the argument. */
17690 static tree
17691 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17692 machine_mode mode_arg1, machine_mode mode_arg2,
17693 enum rs6000_builtins builtin, const char *name)
17694 {
17695 struct builtin_hash_struct h;
17696 struct builtin_hash_struct *h2;
17697 int num_args = 3;
17698 int i;
17699 tree ret_type = NULL_TREE;
17700 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17701
17702 /* Create builtin_hash_table. */
17703 if (builtin_hash_table == NULL)
17704 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17705
17706 h.type = NULL_TREE;
17707 h.mode[0] = mode_ret;
17708 h.mode[1] = mode_arg0;
17709 h.mode[2] = mode_arg1;
17710 h.mode[3] = mode_arg2;
17711 h.uns_p[0] = 0;
17712 h.uns_p[1] = 0;
17713 h.uns_p[2] = 0;
17714 h.uns_p[3] = 0;
17715
17716 /* If the builtin is a type that produces unsigned results or takes unsigned
17717 arguments, and it is returned as a decl for the vectorizer (such as
17718 widening multiplies, permute), make sure the arguments and return value
17719 are type correct. */
17720 switch (builtin)
17721 {
17722 /* unsigned 1 argument functions. */
17723 case CRYPTO_BUILTIN_VSBOX:
17724 case CRYPTO_BUILTIN_VSBOX_BE:
17725 case P8V_BUILTIN_VGBBD:
17726 case MISC_BUILTIN_CDTBCD:
17727 case MISC_BUILTIN_CBCDTD:
17728 h.uns_p[0] = 1;
17729 h.uns_p[1] = 1;
17730 break;
17731
17732 /* unsigned 2 argument functions. */
17733 case ALTIVEC_BUILTIN_VMULEUB:
17734 case ALTIVEC_BUILTIN_VMULEUH:
17735 case P8V_BUILTIN_VMULEUW:
17736 case ALTIVEC_BUILTIN_VMULOUB:
17737 case ALTIVEC_BUILTIN_VMULOUH:
17738 case P8V_BUILTIN_VMULOUW:
17739 case CRYPTO_BUILTIN_VCIPHER:
17740 case CRYPTO_BUILTIN_VCIPHER_BE:
17741 case CRYPTO_BUILTIN_VCIPHERLAST:
17742 case CRYPTO_BUILTIN_VCIPHERLAST_BE:
17743 case CRYPTO_BUILTIN_VNCIPHER:
17744 case CRYPTO_BUILTIN_VNCIPHER_BE:
17745 case CRYPTO_BUILTIN_VNCIPHERLAST:
17746 case CRYPTO_BUILTIN_VNCIPHERLAST_BE:
17747 case CRYPTO_BUILTIN_VPMSUMB:
17748 case CRYPTO_BUILTIN_VPMSUMH:
17749 case CRYPTO_BUILTIN_VPMSUMW:
17750 case CRYPTO_BUILTIN_VPMSUMD:
17751 case CRYPTO_BUILTIN_VPMSUM:
17752 case MISC_BUILTIN_ADDG6S:
17753 case MISC_BUILTIN_DIVWEU:
17754 case MISC_BUILTIN_DIVDEU:
17755 case VSX_BUILTIN_UDIV_V2DI:
17756 case ALTIVEC_BUILTIN_VMAXUB:
17757 case ALTIVEC_BUILTIN_VMINUB:
17758 case ALTIVEC_BUILTIN_VMAXUH:
17759 case ALTIVEC_BUILTIN_VMINUH:
17760 case ALTIVEC_BUILTIN_VMAXUW:
17761 case ALTIVEC_BUILTIN_VMINUW:
17762 case P8V_BUILTIN_VMAXUD:
17763 case P8V_BUILTIN_VMINUD:
17764 h.uns_p[0] = 1;
17765 h.uns_p[1] = 1;
17766 h.uns_p[2] = 1;
17767 break;
17768
17769 /* unsigned 3 argument functions. */
17770 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17771 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17772 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17773 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17774 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17775 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17776 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17777 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17778 case VSX_BUILTIN_VPERM_16QI_UNS:
17779 case VSX_BUILTIN_VPERM_8HI_UNS:
17780 case VSX_BUILTIN_VPERM_4SI_UNS:
17781 case VSX_BUILTIN_VPERM_2DI_UNS:
17782 case VSX_BUILTIN_XXSEL_16QI_UNS:
17783 case VSX_BUILTIN_XXSEL_8HI_UNS:
17784 case VSX_BUILTIN_XXSEL_4SI_UNS:
17785 case VSX_BUILTIN_XXSEL_2DI_UNS:
17786 case CRYPTO_BUILTIN_VPERMXOR:
17787 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17788 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17789 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17790 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17791 case CRYPTO_BUILTIN_VSHASIGMAW:
17792 case CRYPTO_BUILTIN_VSHASIGMAD:
17793 case CRYPTO_BUILTIN_VSHASIGMA:
17794 h.uns_p[0] = 1;
17795 h.uns_p[1] = 1;
17796 h.uns_p[2] = 1;
17797 h.uns_p[3] = 1;
17798 break;
17799
17800 /* signed permute functions with unsigned char mask. */
17801 case ALTIVEC_BUILTIN_VPERM_16QI:
17802 case ALTIVEC_BUILTIN_VPERM_8HI:
17803 case ALTIVEC_BUILTIN_VPERM_4SI:
17804 case ALTIVEC_BUILTIN_VPERM_4SF:
17805 case ALTIVEC_BUILTIN_VPERM_2DI:
17806 case ALTIVEC_BUILTIN_VPERM_2DF:
17807 case VSX_BUILTIN_VPERM_16QI:
17808 case VSX_BUILTIN_VPERM_8HI:
17809 case VSX_BUILTIN_VPERM_4SI:
17810 case VSX_BUILTIN_VPERM_4SF:
17811 case VSX_BUILTIN_VPERM_2DI:
17812 case VSX_BUILTIN_VPERM_2DF:
17813 h.uns_p[3] = 1;
17814 break;
17815
17816 /* unsigned args, signed return. */
17817 case VSX_BUILTIN_XVCVUXDSP:
17818 case VSX_BUILTIN_XVCVUXDDP_UNS:
17819 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17820 h.uns_p[1] = 1;
17821 break;
17822
17823 /* signed args, unsigned return. */
17824 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17825 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17826 case MISC_BUILTIN_UNPACK_TD:
17827 case MISC_BUILTIN_UNPACK_V1TI:
17828 h.uns_p[0] = 1;
17829 break;
17830
17831 /* unsigned arguments, bool return (compares). */
17832 case ALTIVEC_BUILTIN_VCMPEQUB:
17833 case ALTIVEC_BUILTIN_VCMPEQUH:
17834 case ALTIVEC_BUILTIN_VCMPEQUW:
17835 case P8V_BUILTIN_VCMPEQUD:
17836 case VSX_BUILTIN_CMPGE_U16QI:
17837 case VSX_BUILTIN_CMPGE_U8HI:
17838 case VSX_BUILTIN_CMPGE_U4SI:
17839 case VSX_BUILTIN_CMPGE_U2DI:
17840 case ALTIVEC_BUILTIN_VCMPGTUB:
17841 case ALTIVEC_BUILTIN_VCMPGTUH:
17842 case ALTIVEC_BUILTIN_VCMPGTUW:
17843 case P8V_BUILTIN_VCMPGTUD:
17844 h.uns_p[1] = 1;
17845 h.uns_p[2] = 1;
17846 break;
17847
17848 /* unsigned arguments for 128-bit pack instructions. */
17849 case MISC_BUILTIN_PACK_TD:
17850 case MISC_BUILTIN_PACK_V1TI:
17851 h.uns_p[1] = 1;
17852 h.uns_p[2] = 1;
17853 break;
17854
17855 /* unsigned second arguments (vector shift right). */
17856 case ALTIVEC_BUILTIN_VSRB:
17857 case ALTIVEC_BUILTIN_VSRH:
17858 case ALTIVEC_BUILTIN_VSRW:
17859 case P8V_BUILTIN_VSRD:
17860 h.uns_p[2] = 1;
17861 break;
17862
17863 default:
17864 break;
17865 }
17866
17867 /* Figure out how many args are present. */
17868 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17869 num_args--;
17870
17871 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17872 if (!ret_type && h.uns_p[0])
17873 ret_type = builtin_mode_to_type[h.mode[0]][0];
17874
17875 if (!ret_type)
17876 fatal_error (input_location,
17877 "internal error: builtin function %qs had an unexpected "
17878 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17879
17880 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17881 arg_type[i] = NULL_TREE;
17882
17883 for (i = 0; i < num_args; i++)
17884 {
17885 int m = (int) h.mode[i+1];
17886 int uns_p = h.uns_p[i+1];
17887
17888 arg_type[i] = builtin_mode_to_type[m][uns_p];
17889 if (!arg_type[i] && uns_p)
17890 arg_type[i] = builtin_mode_to_type[m][0];
17891
17892 if (!arg_type[i])
17893 fatal_error (input_location,
17894 "internal error: builtin function %qs, argument %d "
17895 "had unexpected argument type %qs", name, i,
17896 GET_MODE_NAME (m));
17897 }
17898
17899 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17900 if (*found == NULL)
17901 {
17902 h2 = ggc_alloc<builtin_hash_struct> ();
17903 *h2 = h;
17904 *found = h2;
17905
17906 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17907 arg_type[2], NULL_TREE);
17908 }
17909
17910 return (*found)->type;
17911 }
17912
17913 static void
17914 rs6000_common_init_builtins (void)
17915 {
17916 const struct builtin_description *d;
17917 size_t i;
17918
17919 tree opaque_ftype_opaque = NULL_TREE;
17920 tree opaque_ftype_opaque_opaque = NULL_TREE;
17921 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17922 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17923
17924 /* Create Altivec and VSX builtins on machines with at least the
17925 general purpose extensions (970 and newer) to allow the use of
17926 the target attribute. */
17927
17928 if (TARGET_EXTRA_BUILTINS)
17929 builtin_mask |= RS6000_BTM_COMMON;
17930
17931 /* Add the ternary operators. */
17932 d = bdesc_3arg;
17933 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17934 {
17935 tree type;
17936 HOST_WIDE_INT mask = d->mask;
17937
17938 if ((mask & builtin_mask) != mask)
17939 {
17940 if (TARGET_DEBUG_BUILTIN)
17941 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17942 continue;
17943 }
17944
17945 if (rs6000_overloaded_builtin_p (d->code))
17946 {
17947 if (! (type = opaque_ftype_opaque_opaque_opaque))
17948 type = opaque_ftype_opaque_opaque_opaque
17949 = build_function_type_list (opaque_V4SI_type_node,
17950 opaque_V4SI_type_node,
17951 opaque_V4SI_type_node,
17952 opaque_V4SI_type_node,
17953 NULL_TREE);
17954 }
17955 else
17956 {
17957 enum insn_code icode = d->icode;
17958 if (d->name == 0)
17959 {
17960 if (TARGET_DEBUG_BUILTIN)
17961 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17962 (long unsigned)i);
17963
17964 continue;
17965 }
17966
17967 if (icode == CODE_FOR_nothing)
17968 {
17969 if (TARGET_DEBUG_BUILTIN)
17970 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17971 d->name);
17972
17973 continue;
17974 }
17975
17976 type = builtin_function_type (insn_data[icode].operand[0].mode,
17977 insn_data[icode].operand[1].mode,
17978 insn_data[icode].operand[2].mode,
17979 insn_data[icode].operand[3].mode,
17980 d->code, d->name);
17981 }
17982
17983 def_builtin (d->name, type, d->code);
17984 }
17985
17986 /* Add the binary operators. */
17987 d = bdesc_2arg;
17988 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17989 {
17990 machine_mode mode0, mode1, mode2;
17991 tree type;
17992 HOST_WIDE_INT mask = d->mask;
17993
17994 if ((mask & builtin_mask) != mask)
17995 {
17996 if (TARGET_DEBUG_BUILTIN)
17997 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17998 continue;
17999 }
18000
18001 if (rs6000_overloaded_builtin_p (d->code))
18002 {
18003 if (! (type = opaque_ftype_opaque_opaque))
18004 type = opaque_ftype_opaque_opaque
18005 = build_function_type_list (opaque_V4SI_type_node,
18006 opaque_V4SI_type_node,
18007 opaque_V4SI_type_node,
18008 NULL_TREE);
18009 }
18010 else
18011 {
18012 enum insn_code icode = d->icode;
18013 if (d->name == 0)
18014 {
18015 if (TARGET_DEBUG_BUILTIN)
18016 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18017 (long unsigned)i);
18018
18019 continue;
18020 }
18021
18022 if (icode == CODE_FOR_nothing)
18023 {
18024 if (TARGET_DEBUG_BUILTIN)
18025 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
18026 d->name);
18027
18028 continue;
18029 }
18030
18031 mode0 = insn_data[icode].operand[0].mode;
18032 mode1 = insn_data[icode].operand[1].mode;
18033 mode2 = insn_data[icode].operand[2].mode;
18034
18035 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
18036 d->code, d->name);
18037 }
18038
18039 def_builtin (d->name, type, d->code);
18040 }
18041
18042 /* Add the simple unary operators. */
18043 d = bdesc_1arg;
18044 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18045 {
18046 machine_mode mode0, mode1;
18047 tree type;
18048 HOST_WIDE_INT mask = d->mask;
18049
18050 if ((mask & builtin_mask) != mask)
18051 {
18052 if (TARGET_DEBUG_BUILTIN)
18053 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18054 continue;
18055 }
18056
18057 if (rs6000_overloaded_builtin_p (d->code))
18058 {
18059 if (! (type = opaque_ftype_opaque))
18060 type = opaque_ftype_opaque
18061 = build_function_type_list (opaque_V4SI_type_node,
18062 opaque_V4SI_type_node,
18063 NULL_TREE);
18064 }
18065 else
18066 {
18067 enum insn_code icode = d->icode;
18068 if (d->name == 0)
18069 {
18070 if (TARGET_DEBUG_BUILTIN)
18071 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18072 (long unsigned)i);
18073
18074 continue;
18075 }
18076
18077 if (icode == CODE_FOR_nothing)
18078 {
18079 if (TARGET_DEBUG_BUILTIN)
18080 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18081 d->name);
18082
18083 continue;
18084 }
18085
18086 mode0 = insn_data[icode].operand[0].mode;
18087 mode1 = insn_data[icode].operand[1].mode;
18088
18089 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18090 d->code, d->name);
18091 }
18092
18093 def_builtin (d->name, type, d->code);
18094 }
18095
18096 /* Add the simple no-argument operators. */
18097 d = bdesc_0arg;
18098 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18099 {
18100 machine_mode mode0;
18101 tree type;
18102 HOST_WIDE_INT mask = d->mask;
18103
18104 if ((mask & builtin_mask) != mask)
18105 {
18106 if (TARGET_DEBUG_BUILTIN)
18107 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18108 continue;
18109 }
18110 if (rs6000_overloaded_builtin_p (d->code))
18111 {
18112 if (!opaque_ftype_opaque)
18113 opaque_ftype_opaque
18114 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18115 type = opaque_ftype_opaque;
18116 }
18117 else
18118 {
18119 enum insn_code icode = d->icode;
18120 if (d->name == 0)
18121 {
18122 if (TARGET_DEBUG_BUILTIN)
18123 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18124 (long unsigned) i);
18125 continue;
18126 }
18127 if (icode == CODE_FOR_nothing)
18128 {
18129 if (TARGET_DEBUG_BUILTIN)
18130 fprintf (stderr,
18131 "rs6000_builtin, skip no-argument %s (no code)\n",
18132 d->name);
18133 continue;
18134 }
18135 mode0 = insn_data[icode].operand[0].mode;
18136 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18137 d->code, d->name);
18138 }
18139 def_builtin (d->name, type, d->code);
18140 }
18141 }
18142
18143 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18144 static void
18145 init_float128_ibm (machine_mode mode)
18146 {
18147 if (!TARGET_XL_COMPAT)
18148 {
18149 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18150 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18151 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18152 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18153
18154 if (!TARGET_HARD_FLOAT)
18155 {
18156 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18157 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18158 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18159 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18160 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18161 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18162 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18163 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18164
18165 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18166 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18167 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18168 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18169 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18170 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18171 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18172 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18173 }
18174 }
18175 else
18176 {
18177 set_optab_libfunc (add_optab, mode, "_xlqadd");
18178 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18179 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18180 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18181 }
18182
18183 /* Add various conversions for IFmode to use the traditional TFmode
18184 names. */
18185 if (mode == IFmode)
18186 {
18187 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
18188 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
18189 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
18190 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
18191 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
18192 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
18193
18194 if (TARGET_POWERPC64)
18195 {
18196 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18197 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18198 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18199 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18200 }
18201 }
18202 }
18203
18204 /* Create a decl for either complex long double multiply or complex long double
18205 divide when long double is IEEE 128-bit floating point. We can't use
18206 __multc3 and __divtc3 because the original long double using IBM extended
18207 double used those names. The complex multiply/divide functions are encoded
18208 as builtin functions with a complex result and 4 scalar inputs. */
18209
18210 static void
18211 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
18212 {
18213 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
18214 name, NULL_TREE);
18215
18216 set_builtin_decl (fncode, fndecl, true);
18217
18218 if (TARGET_DEBUG_BUILTIN)
18219 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
18220
18221 return;
18222 }
18223
18224 /* Set up IEEE 128-bit floating point routines. Use different names if the
18225 arguments can be passed in a vector register. The historical PowerPC
18226 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18227 continue to use that if we aren't using vector registers to pass IEEE
18228 128-bit floating point. */
18229
18230 static void
18231 init_float128_ieee (machine_mode mode)
18232 {
18233 if (FLOAT128_VECTOR_P (mode))
18234 {
18235 static bool complex_muldiv_init_p = false;
18236
18237 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
18238 we have clone or target attributes, this will be called a second
18239 time. We want to create the built-in function only once. */
18240 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
18241 {
18242 complex_muldiv_init_p = true;
18243 built_in_function fncode_mul =
18244 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
18245 - MIN_MODE_COMPLEX_FLOAT);
18246 built_in_function fncode_div =
18247 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
18248 - MIN_MODE_COMPLEX_FLOAT);
18249
18250 tree fntype = build_function_type_list (complex_long_double_type_node,
18251 long_double_type_node,
18252 long_double_type_node,
18253 long_double_type_node,
18254 long_double_type_node,
18255 NULL_TREE);
18256
18257 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
18258 create_complex_muldiv ("__divkc3", fncode_div, fntype);
18259 }
18260
18261 set_optab_libfunc (add_optab, mode, "__addkf3");
18262 set_optab_libfunc (sub_optab, mode, "__subkf3");
18263 set_optab_libfunc (neg_optab, mode, "__negkf2");
18264 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18265 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18266 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18267 set_optab_libfunc (abs_optab, mode, "__abskf2");
18268 set_optab_libfunc (powi_optab, mode, "__powikf2");
18269
18270 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18271 set_optab_libfunc (ne_optab, mode, "__nekf2");
18272 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18273 set_optab_libfunc (ge_optab, mode, "__gekf2");
18274 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18275 set_optab_libfunc (le_optab, mode, "__lekf2");
18276 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18277
18278 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18279 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18280 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18281 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18282
18283 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
18284 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18285 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
18286
18287 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
18288 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18289 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
18290
18291 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
18292 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
18293 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
18294 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
18295 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
18296 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
18297
18298 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18299 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18300 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18301 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18302
18303 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18304 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18305 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18306 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18307
18308 if (TARGET_POWERPC64)
18309 {
18310 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18311 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18312 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18313 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18314 }
18315 }
18316
18317 else
18318 {
18319 set_optab_libfunc (add_optab, mode, "_q_add");
18320 set_optab_libfunc (sub_optab, mode, "_q_sub");
18321 set_optab_libfunc (neg_optab, mode, "_q_neg");
18322 set_optab_libfunc (smul_optab, mode, "_q_mul");
18323 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18324 if (TARGET_PPC_GPOPT)
18325 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18326
18327 set_optab_libfunc (eq_optab, mode, "_q_feq");
18328 set_optab_libfunc (ne_optab, mode, "_q_fne");
18329 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18330 set_optab_libfunc (ge_optab, mode, "_q_fge");
18331 set_optab_libfunc (lt_optab, mode, "_q_flt");
18332 set_optab_libfunc (le_optab, mode, "_q_fle");
18333
18334 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18335 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18336 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18337 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18338 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18339 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18340 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18341 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18342 }
18343 }
18344
18345 static void
18346 rs6000_init_libfuncs (void)
18347 {
18348 /* __float128 support. */
18349 if (TARGET_FLOAT128_TYPE)
18350 {
18351 init_float128_ibm (IFmode);
18352 init_float128_ieee (KFmode);
18353 }
18354
18355 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18356 if (TARGET_LONG_DOUBLE_128)
18357 {
18358 if (!TARGET_IEEEQUAD)
18359 init_float128_ibm (TFmode);
18360
18361 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18362 else
18363 init_float128_ieee (TFmode);
18364 }
18365 }
18366
18367 /* Emit a potentially record-form instruction, setting DST from SRC.
18368 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18369 signed comparison of DST with zero. If DOT is 1, the generated RTL
18370 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18371 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18372 a separate COMPARE. */
18373
18374 void
18375 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18376 {
18377 if (dot == 0)
18378 {
18379 emit_move_insn (dst, src);
18380 return;
18381 }
18382
18383 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18384 {
18385 emit_move_insn (dst, src);
18386 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18387 return;
18388 }
18389
18390 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18391 if (dot == 1)
18392 {
18393 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18394 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18395 }
18396 else
18397 {
18398 rtx set = gen_rtx_SET (dst, src);
18399 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18400 }
18401 }
18402
18403 \f
18404 /* A validation routine: say whether CODE, a condition code, and MODE
18405 match. The other alternatives either don't make sense or should
18406 never be generated. */
18407
18408 void
18409 validate_condition_mode (enum rtx_code code, machine_mode mode)
18410 {
18411 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18412 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18413 && GET_MODE_CLASS (mode) == MODE_CC);
18414
18415 /* These don't make sense. */
18416 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18417 || mode != CCUNSmode);
18418
18419 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18420 || mode == CCUNSmode);
18421
18422 gcc_assert (mode == CCFPmode
18423 || (code != ORDERED && code != UNORDERED
18424 && code != UNEQ && code != LTGT
18425 && code != UNGT && code != UNLT
18426 && code != UNGE && code != UNLE));
18427
18428 /* These should never be generated except for
18429 flag_finite_math_only. */
18430 gcc_assert (mode != CCFPmode
18431 || flag_finite_math_only
18432 || (code != LE && code != GE
18433 && code != UNEQ && code != LTGT
18434 && code != UNGT && code != UNLT));
18435
18436 /* These are invalid; the information is not there. */
18437 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18438 }
18439
18440 \f
18441 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18442 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18443 not zero, store there the bit offset (counted from the right) where
18444 the single stretch of 1 bits begins; and similarly for B, the bit
18445 offset where it ends. */
18446
18447 bool
18448 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18449 {
18450 unsigned HOST_WIDE_INT val = INTVAL (mask);
18451 unsigned HOST_WIDE_INT bit;
18452 int nb, ne;
18453 int n = GET_MODE_PRECISION (mode);
18454
18455 if (mode != DImode && mode != SImode)
18456 return false;
18457
18458 if (INTVAL (mask) >= 0)
18459 {
18460 bit = val & -val;
18461 ne = exact_log2 (bit);
18462 nb = exact_log2 (val + bit);
18463 }
18464 else if (val + 1 == 0)
18465 {
18466 nb = n;
18467 ne = 0;
18468 }
18469 else if (val & 1)
18470 {
18471 val = ~val;
18472 bit = val & -val;
18473 nb = exact_log2 (bit);
18474 ne = exact_log2 (val + bit);
18475 }
18476 else
18477 {
18478 bit = val & -val;
18479 ne = exact_log2 (bit);
18480 if (val + bit == 0)
18481 nb = n;
18482 else
18483 nb = 0;
18484 }
18485
18486 nb--;
18487
18488 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18489 return false;
18490
18491 if (b)
18492 *b = nb;
18493 if (e)
18494 *e = ne;
18495
18496 return true;
18497 }
18498
18499 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18500 or rldicr instruction, to implement an AND with it in mode MODE. */
18501
18502 bool
18503 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18504 {
18505 int nb, ne;
18506
18507 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18508 return false;
18509
18510 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18511 does not wrap. */
18512 if (mode == DImode)
18513 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18514
18515 /* For SImode, rlwinm can do everything. */
18516 if (mode == SImode)
18517 return (nb < 32 && ne < 32);
18518
18519 return false;
18520 }
18521
18522 /* Return the instruction template for an AND with mask in mode MODE, with
18523 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18524
18525 const char *
18526 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18527 {
18528 int nb, ne;
18529
18530 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18531 gcc_unreachable ();
18532
18533 if (mode == DImode && ne == 0)
18534 {
18535 operands[3] = GEN_INT (63 - nb);
18536 if (dot)
18537 return "rldicl. %0,%1,0,%3";
18538 return "rldicl %0,%1,0,%3";
18539 }
18540
18541 if (mode == DImode && nb == 63)
18542 {
18543 operands[3] = GEN_INT (63 - ne);
18544 if (dot)
18545 return "rldicr. %0,%1,0,%3";
18546 return "rldicr %0,%1,0,%3";
18547 }
18548
18549 if (nb < 32 && ne < 32)
18550 {
18551 operands[3] = GEN_INT (31 - nb);
18552 operands[4] = GEN_INT (31 - ne);
18553 if (dot)
18554 return "rlwinm. %0,%1,0,%3,%4";
18555 return "rlwinm %0,%1,0,%3,%4";
18556 }
18557
18558 gcc_unreachable ();
18559 }
18560
18561 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18562 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18563 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18564
18565 bool
18566 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18567 {
18568 int nb, ne;
18569
18570 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18571 return false;
18572
18573 int n = GET_MODE_PRECISION (mode);
18574 int sh = -1;
18575
18576 if (CONST_INT_P (XEXP (shift, 1)))
18577 {
18578 sh = INTVAL (XEXP (shift, 1));
18579 if (sh < 0 || sh >= n)
18580 return false;
18581 }
18582
18583 rtx_code code = GET_CODE (shift);
18584
18585 /* Convert any shift by 0 to a rotate, to simplify below code. */
18586 if (sh == 0)
18587 code = ROTATE;
18588
18589 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18590 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18591 code = ASHIFT;
18592 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18593 {
18594 code = LSHIFTRT;
18595 sh = n - sh;
18596 }
18597
18598 /* DImode rotates need rld*. */
18599 if (mode == DImode && code == ROTATE)
18600 return (nb == 63 || ne == 0 || ne == sh);
18601
18602 /* SImode rotates need rlw*. */
18603 if (mode == SImode && code == ROTATE)
18604 return (nb < 32 && ne < 32 && sh < 32);
18605
18606 /* Wrap-around masks are only okay for rotates. */
18607 if (ne > nb)
18608 return false;
18609
18610 /* Variable shifts are only okay for rotates. */
18611 if (sh < 0)
18612 return false;
18613
18614 /* Don't allow ASHIFT if the mask is wrong for that. */
18615 if (code == ASHIFT && ne < sh)
18616 return false;
18617
18618 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18619 if the mask is wrong for that. */
18620 if (nb < 32 && ne < 32 && sh < 32
18621 && !(code == LSHIFTRT && nb >= 32 - sh))
18622 return true;
18623
18624 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18625 if the mask is wrong for that. */
18626 if (code == LSHIFTRT)
18627 sh = 64 - sh;
18628 if (nb == 63 || ne == 0 || ne == sh)
18629 return !(code == LSHIFTRT && nb >= sh);
18630
18631 return false;
18632 }
18633
18634 /* Return the instruction template for a shift with mask in mode MODE, with
18635 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18636
18637 const char *
18638 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18639 {
18640 int nb, ne;
18641
18642 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18643 gcc_unreachable ();
18644
18645 if (mode == DImode && ne == 0)
18646 {
18647 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18648 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18649 operands[3] = GEN_INT (63 - nb);
18650 if (dot)
18651 return "rld%I2cl. %0,%1,%2,%3";
18652 return "rld%I2cl %0,%1,%2,%3";
18653 }
18654
18655 if (mode == DImode && nb == 63)
18656 {
18657 operands[3] = GEN_INT (63 - ne);
18658 if (dot)
18659 return "rld%I2cr. %0,%1,%2,%3";
18660 return "rld%I2cr %0,%1,%2,%3";
18661 }
18662
18663 if (mode == DImode
18664 && GET_CODE (operands[4]) != LSHIFTRT
18665 && CONST_INT_P (operands[2])
18666 && ne == INTVAL (operands[2]))
18667 {
18668 operands[3] = GEN_INT (63 - nb);
18669 if (dot)
18670 return "rld%I2c. %0,%1,%2,%3";
18671 return "rld%I2c %0,%1,%2,%3";
18672 }
18673
18674 if (nb < 32 && ne < 32)
18675 {
18676 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18677 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18678 operands[3] = GEN_INT (31 - nb);
18679 operands[4] = GEN_INT (31 - ne);
18680 /* This insn can also be a 64-bit rotate with mask that really makes
18681 it just a shift right (with mask); the %h below are to adjust for
18682 that situation (shift count is >= 32 in that case). */
18683 if (dot)
18684 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18685 return "rlw%I2nm %0,%1,%h2,%3,%4";
18686 }
18687
18688 gcc_unreachable ();
18689 }
18690
18691 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18692 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18693 ASHIFT, or LSHIFTRT) in mode MODE. */
18694
18695 bool
18696 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18697 {
18698 int nb, ne;
18699
18700 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18701 return false;
18702
18703 int n = GET_MODE_PRECISION (mode);
18704
18705 int sh = INTVAL (XEXP (shift, 1));
18706 if (sh < 0 || sh >= n)
18707 return false;
18708
18709 rtx_code code = GET_CODE (shift);
18710
18711 /* Convert any shift by 0 to a rotate, to simplify below code. */
18712 if (sh == 0)
18713 code = ROTATE;
18714
18715 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18716 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18717 code = ASHIFT;
18718 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18719 {
18720 code = LSHIFTRT;
18721 sh = n - sh;
18722 }
18723
18724 /* DImode rotates need rldimi. */
18725 if (mode == DImode && code == ROTATE)
18726 return (ne == sh);
18727
18728 /* SImode rotates need rlwimi. */
18729 if (mode == SImode && code == ROTATE)
18730 return (nb < 32 && ne < 32 && sh < 32);
18731
18732 /* Wrap-around masks are only okay for rotates. */
18733 if (ne > nb)
18734 return false;
18735
18736 /* Don't allow ASHIFT if the mask is wrong for that. */
18737 if (code == ASHIFT && ne < sh)
18738 return false;
18739
18740 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18741 if the mask is wrong for that. */
18742 if (nb < 32 && ne < 32 && sh < 32
18743 && !(code == LSHIFTRT && nb >= 32 - sh))
18744 return true;
18745
18746 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18747 if the mask is wrong for that. */
18748 if (code == LSHIFTRT)
18749 sh = 64 - sh;
18750 if (ne == sh)
18751 return !(code == LSHIFTRT && nb >= sh);
18752
18753 return false;
18754 }
18755
18756 /* Return the instruction template for an insert with mask in mode MODE, with
18757 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18758
18759 const char *
18760 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18761 {
18762 int nb, ne;
18763
18764 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18765 gcc_unreachable ();
18766
18767 /* Prefer rldimi because rlwimi is cracked. */
18768 if (TARGET_POWERPC64
18769 && (!dot || mode == DImode)
18770 && GET_CODE (operands[4]) != LSHIFTRT
18771 && ne == INTVAL (operands[2]))
18772 {
18773 operands[3] = GEN_INT (63 - nb);
18774 if (dot)
18775 return "rldimi. %0,%1,%2,%3";
18776 return "rldimi %0,%1,%2,%3";
18777 }
18778
18779 if (nb < 32 && ne < 32)
18780 {
18781 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18782 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18783 operands[3] = GEN_INT (31 - nb);
18784 operands[4] = GEN_INT (31 - ne);
18785 if (dot)
18786 return "rlwimi. %0,%1,%2,%3,%4";
18787 return "rlwimi %0,%1,%2,%3,%4";
18788 }
18789
18790 gcc_unreachable ();
18791 }
18792
18793 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18794 using two machine instructions. */
18795
18796 bool
18797 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18798 {
18799 /* There are two kinds of AND we can handle with two insns:
18800 1) those we can do with two rl* insn;
18801 2) ori[s];xori[s].
18802
18803 We do not handle that last case yet. */
18804
18805 /* If there is just one stretch of ones, we can do it. */
18806 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18807 return true;
18808
18809 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18810 one insn, we can do the whole thing with two. */
18811 unsigned HOST_WIDE_INT val = INTVAL (c);
18812 unsigned HOST_WIDE_INT bit1 = val & -val;
18813 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18814 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18815 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18816 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18817 }
18818
18819 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18820 If EXPAND is true, split rotate-and-mask instructions we generate to
18821 their constituent parts as well (this is used during expand); if DOT
18822 is 1, make the last insn a record-form instruction clobbering the
18823 destination GPR and setting the CC reg (from operands[3]); if 2, set
18824 that GPR as well as the CC reg. */
18825
18826 void
18827 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18828 {
18829 gcc_assert (!(expand && dot));
18830
18831 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18832
18833 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18834 shift right. This generates better code than doing the masks without
18835 shifts, or shifting first right and then left. */
18836 int nb, ne;
18837 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18838 {
18839 gcc_assert (mode == DImode);
18840
18841 int shift = 63 - nb;
18842 if (expand)
18843 {
18844 rtx tmp1 = gen_reg_rtx (DImode);
18845 rtx tmp2 = gen_reg_rtx (DImode);
18846 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18847 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18848 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18849 }
18850 else
18851 {
18852 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18853 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18854 emit_move_insn (operands[0], tmp);
18855 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18856 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18857 }
18858 return;
18859 }
18860
18861 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18862 that does the rest. */
18863 unsigned HOST_WIDE_INT bit1 = val & -val;
18864 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18865 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18866 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18867
18868 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18869 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18870
18871 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18872
18873 /* Two "no-rotate"-and-mask instructions, for SImode. */
18874 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18875 {
18876 gcc_assert (mode == SImode);
18877
18878 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18879 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18880 emit_move_insn (reg, tmp);
18881 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18882 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18883 return;
18884 }
18885
18886 gcc_assert (mode == DImode);
18887
18888 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18889 insns; we have to do the first in SImode, because it wraps. */
18890 if (mask2 <= 0xffffffff
18891 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18892 {
18893 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18894 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18895 GEN_INT (mask1));
18896 rtx reg_low = gen_lowpart (SImode, reg);
18897 emit_move_insn (reg_low, tmp);
18898 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18899 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18900 return;
18901 }
18902
18903 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18904 at the top end), rotate back and clear the other hole. */
18905 int right = exact_log2 (bit3);
18906 int left = 64 - right;
18907
18908 /* Rotate the mask too. */
18909 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18910
18911 if (expand)
18912 {
18913 rtx tmp1 = gen_reg_rtx (DImode);
18914 rtx tmp2 = gen_reg_rtx (DImode);
18915 rtx tmp3 = gen_reg_rtx (DImode);
18916 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18917 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18918 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18919 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18920 }
18921 else
18922 {
18923 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18924 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18925 emit_move_insn (operands[0], tmp);
18926 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18927 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18928 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18929 }
18930 }
18931 \f
18932 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18933 for lfq and stfq insns iff the registers are hard registers. */
18934
18935 int
18936 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18937 {
18938 /* We might have been passed a SUBREG. */
18939 if (!REG_P (reg1) || !REG_P (reg2))
18940 return 0;
18941
18942 /* We might have been passed non floating point registers. */
18943 if (!FP_REGNO_P (REGNO (reg1))
18944 || !FP_REGNO_P (REGNO (reg2)))
18945 return 0;
18946
18947 return (REGNO (reg1) == REGNO (reg2) - 1);
18948 }
18949
18950 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18951 addr1 and addr2 must be in consecutive memory locations
18952 (addr2 == addr1 + 8). */
18953
18954 int
18955 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18956 {
18957 rtx addr1, addr2;
18958 unsigned int reg1, reg2;
18959 int offset1, offset2;
18960
18961 /* The mems cannot be volatile. */
18962 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18963 return 0;
18964
18965 addr1 = XEXP (mem1, 0);
18966 addr2 = XEXP (mem2, 0);
18967
18968 /* Extract an offset (if used) from the first addr. */
18969 if (GET_CODE (addr1) == PLUS)
18970 {
18971 /* If not a REG, return zero. */
18972 if (!REG_P (XEXP (addr1, 0)))
18973 return 0;
18974 else
18975 {
18976 reg1 = REGNO (XEXP (addr1, 0));
18977 /* The offset must be constant! */
18978 if (!CONST_INT_P (XEXP (addr1, 1)))
18979 return 0;
18980 offset1 = INTVAL (XEXP (addr1, 1));
18981 }
18982 }
18983 else if (!REG_P (addr1))
18984 return 0;
18985 else
18986 {
18987 reg1 = REGNO (addr1);
18988 /* This was a simple (mem (reg)) expression. Offset is 0. */
18989 offset1 = 0;
18990 }
18991
18992 /* And now for the second addr. */
18993 if (GET_CODE (addr2) == PLUS)
18994 {
18995 /* If not a REG, return zero. */
18996 if (!REG_P (XEXP (addr2, 0)))
18997 return 0;
18998 else
18999 {
19000 reg2 = REGNO (XEXP (addr2, 0));
19001 /* The offset must be constant. */
19002 if (!CONST_INT_P (XEXP (addr2, 1)))
19003 return 0;
19004 offset2 = INTVAL (XEXP (addr2, 1));
19005 }
19006 }
19007 else if (!REG_P (addr2))
19008 return 0;
19009 else
19010 {
19011 reg2 = REGNO (addr2);
19012 /* This was a simple (mem (reg)) expression. Offset is 0. */
19013 offset2 = 0;
19014 }
19015
19016 /* Both of these must have the same base register. */
19017 if (reg1 != reg2)
19018 return 0;
19019
19020 /* The offset for the second addr must be 8 more than the first addr. */
19021 if (offset2 != offset1 + 8)
19022 return 0;
19023
19024 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
19025 instructions. */
19026 return 1;
19027 }
19028 \f
19029 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
19030 need to use DDmode, in all other cases we can use the same mode. */
19031 static machine_mode
19032 rs6000_secondary_memory_needed_mode (machine_mode mode)
19033 {
19034 if (lra_in_progress && mode == SDmode)
19035 return DDmode;
19036 return mode;
19037 }
19038
19039 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19040 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19041 only work on the traditional altivec registers, note if an altivec register
19042 was chosen. */
19043
19044 static enum rs6000_reg_type
19045 register_to_reg_type (rtx reg, bool *is_altivec)
19046 {
19047 HOST_WIDE_INT regno;
19048 enum reg_class rclass;
19049
19050 if (SUBREG_P (reg))
19051 reg = SUBREG_REG (reg);
19052
19053 if (!REG_P (reg))
19054 return NO_REG_TYPE;
19055
19056 regno = REGNO (reg);
19057 if (!HARD_REGISTER_NUM_P (regno))
19058 {
19059 if (!lra_in_progress && !reload_completed)
19060 return PSEUDO_REG_TYPE;
19061
19062 regno = true_regnum (reg);
19063 if (regno < 0 || !HARD_REGISTER_NUM_P (regno))
19064 return PSEUDO_REG_TYPE;
19065 }
19066
19067 gcc_assert (regno >= 0);
19068
19069 if (is_altivec && ALTIVEC_REGNO_P (regno))
19070 *is_altivec = true;
19071
19072 rclass = rs6000_regno_regclass[regno];
19073 return reg_class_to_reg_type[(int)rclass];
19074 }
19075
19076 /* Helper function to return the cost of adding a TOC entry address. */
19077
19078 static inline int
19079 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19080 {
19081 int ret;
19082
19083 if (TARGET_CMODEL != CMODEL_SMALL)
19084 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19085
19086 else
19087 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19088
19089 return ret;
19090 }
19091
19092 /* Helper function for rs6000_secondary_reload to determine whether the memory
19093 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19094 needs reloading. Return negative if the memory is not handled by the memory
19095 helper functions and to try a different reload method, 0 if no additional
19096 instructions are need, and positive to give the extra cost for the
19097 memory. */
19098
19099 static int
19100 rs6000_secondary_reload_memory (rtx addr,
19101 enum reg_class rclass,
19102 machine_mode mode)
19103 {
19104 int extra_cost = 0;
19105 rtx reg, and_arg, plus_arg0, plus_arg1;
19106 addr_mask_type addr_mask;
19107 const char *type = NULL;
19108 const char *fail_msg = NULL;
19109
19110 if (GPR_REG_CLASS_P (rclass))
19111 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19112
19113 else if (rclass == FLOAT_REGS)
19114 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19115
19116 else if (rclass == ALTIVEC_REGS)
19117 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19118
19119 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19120 else if (rclass == VSX_REGS)
19121 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19122 & ~RELOAD_REG_AND_M16);
19123
19124 /* If the register allocator hasn't made up its mind yet on the register
19125 class to use, settle on defaults to use. */
19126 else if (rclass == NO_REGS)
19127 {
19128 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19129 & ~RELOAD_REG_AND_M16);
19130
19131 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19132 addr_mask &= ~(RELOAD_REG_INDEXED
19133 | RELOAD_REG_PRE_INCDEC
19134 | RELOAD_REG_PRE_MODIFY);
19135 }
19136
19137 else
19138 addr_mask = 0;
19139
19140 /* If the register isn't valid in this register class, just return now. */
19141 if ((addr_mask & RELOAD_REG_VALID) == 0)
19142 {
19143 if (TARGET_DEBUG_ADDR)
19144 {
19145 fprintf (stderr,
19146 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19147 "not valid in class\n",
19148 GET_MODE_NAME (mode), reg_class_names[rclass]);
19149 debug_rtx (addr);
19150 }
19151
19152 return -1;
19153 }
19154
19155 switch (GET_CODE (addr))
19156 {
19157 /* Does the register class supports auto update forms for this mode? We
19158 don't need a scratch register, since the powerpc only supports
19159 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19160 case PRE_INC:
19161 case PRE_DEC:
19162 reg = XEXP (addr, 0);
19163 if (!base_reg_operand (addr, GET_MODE (reg)))
19164 {
19165 fail_msg = "no base register #1";
19166 extra_cost = -1;
19167 }
19168
19169 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19170 {
19171 extra_cost = 1;
19172 type = "update";
19173 }
19174 break;
19175
19176 case PRE_MODIFY:
19177 reg = XEXP (addr, 0);
19178 plus_arg1 = XEXP (addr, 1);
19179 if (!base_reg_operand (reg, GET_MODE (reg))
19180 || GET_CODE (plus_arg1) != PLUS
19181 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19182 {
19183 fail_msg = "bad PRE_MODIFY";
19184 extra_cost = -1;
19185 }
19186
19187 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19188 {
19189 extra_cost = 1;
19190 type = "update";
19191 }
19192 break;
19193
19194 /* Do we need to simulate AND -16 to clear the bottom address bits used
19195 in VMX load/stores? Only allow the AND for vector sizes. */
19196 case AND:
19197 and_arg = XEXP (addr, 0);
19198 if (GET_MODE_SIZE (mode) != 16
19199 || !CONST_INT_P (XEXP (addr, 1))
19200 || INTVAL (XEXP (addr, 1)) != -16)
19201 {
19202 fail_msg = "bad Altivec AND #1";
19203 extra_cost = -1;
19204 }
19205
19206 if (rclass != ALTIVEC_REGS)
19207 {
19208 if (legitimate_indirect_address_p (and_arg, false))
19209 extra_cost = 1;
19210
19211 else if (legitimate_indexed_address_p (and_arg, false))
19212 extra_cost = 2;
19213
19214 else
19215 {
19216 fail_msg = "bad Altivec AND #2";
19217 extra_cost = -1;
19218 }
19219
19220 type = "and";
19221 }
19222 break;
19223
19224 /* If this is an indirect address, make sure it is a base register. */
19225 case REG:
19226 case SUBREG:
19227 if (!legitimate_indirect_address_p (addr, false))
19228 {
19229 extra_cost = 1;
19230 type = "move";
19231 }
19232 break;
19233
19234 /* If this is an indexed address, make sure the register class can handle
19235 indexed addresses for this mode. */
19236 case PLUS:
19237 plus_arg0 = XEXP (addr, 0);
19238 plus_arg1 = XEXP (addr, 1);
19239
19240 /* (plus (plus (reg) (constant)) (constant)) is generated during
19241 push_reload processing, so handle it now. */
19242 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19243 {
19244 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19245 {
19246 extra_cost = 1;
19247 type = "offset";
19248 }
19249 }
19250
19251 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19252 push_reload processing, so handle it now. */
19253 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19254 {
19255 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19256 {
19257 extra_cost = 1;
19258 type = "indexed #2";
19259 }
19260 }
19261
19262 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19263 {
19264 fail_msg = "no base register #2";
19265 extra_cost = -1;
19266 }
19267
19268 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19269 {
19270 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19271 || !legitimate_indexed_address_p (addr, false))
19272 {
19273 extra_cost = 1;
19274 type = "indexed";
19275 }
19276 }
19277
19278 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19279 && CONST_INT_P (plus_arg1))
19280 {
19281 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19282 {
19283 extra_cost = 1;
19284 type = "vector d-form offset";
19285 }
19286 }
19287
19288 /* Make sure the register class can handle offset addresses. */
19289 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19290 {
19291 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19292 {
19293 extra_cost = 1;
19294 type = "offset #2";
19295 }
19296 }
19297
19298 else
19299 {
19300 fail_msg = "bad PLUS";
19301 extra_cost = -1;
19302 }
19303
19304 break;
19305
19306 case LO_SUM:
19307 /* Quad offsets are restricted and can't handle normal addresses. */
19308 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19309 {
19310 extra_cost = -1;
19311 type = "vector d-form lo_sum";
19312 }
19313
19314 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19315 {
19316 fail_msg = "bad LO_SUM";
19317 extra_cost = -1;
19318 }
19319
19320 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19321 {
19322 extra_cost = 1;
19323 type = "lo_sum";
19324 }
19325 break;
19326
19327 /* Static addresses need to create a TOC entry. */
19328 case CONST:
19329 case SYMBOL_REF:
19330 case LABEL_REF:
19331 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19332 {
19333 extra_cost = -1;
19334 type = "vector d-form lo_sum #2";
19335 }
19336
19337 else
19338 {
19339 type = "address";
19340 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19341 }
19342 break;
19343
19344 /* TOC references look like offsetable memory. */
19345 case UNSPEC:
19346 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19347 {
19348 fail_msg = "bad UNSPEC";
19349 extra_cost = -1;
19350 }
19351
19352 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19353 {
19354 extra_cost = -1;
19355 type = "vector d-form lo_sum #3";
19356 }
19357
19358 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19359 {
19360 extra_cost = 1;
19361 type = "toc reference";
19362 }
19363 break;
19364
19365 default:
19366 {
19367 fail_msg = "bad address";
19368 extra_cost = -1;
19369 }
19370 }
19371
19372 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19373 {
19374 if (extra_cost < 0)
19375 fprintf (stderr,
19376 "rs6000_secondary_reload_memory error: mode = %s, "
19377 "class = %s, addr_mask = '%s', %s\n",
19378 GET_MODE_NAME (mode),
19379 reg_class_names[rclass],
19380 rs6000_debug_addr_mask (addr_mask, false),
19381 (fail_msg != NULL) ? fail_msg : "<bad address>");
19382
19383 else
19384 fprintf (stderr,
19385 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19386 "addr_mask = '%s', extra cost = %d, %s\n",
19387 GET_MODE_NAME (mode),
19388 reg_class_names[rclass],
19389 rs6000_debug_addr_mask (addr_mask, false),
19390 extra_cost,
19391 (type) ? type : "<none>");
19392
19393 debug_rtx (addr);
19394 }
19395
19396 return extra_cost;
19397 }
19398
19399 /* Helper function for rs6000_secondary_reload to return true if a move to a
19400 different register classe is really a simple move. */
19401
19402 static bool
19403 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19404 enum rs6000_reg_type from_type,
19405 machine_mode mode)
19406 {
19407 int size = GET_MODE_SIZE (mode);
19408
19409 /* Add support for various direct moves available. In this function, we only
19410 look at cases where we don't need any extra registers, and one or more
19411 simple move insns are issued. Originally small integers are not allowed
19412 in FPR/VSX registers. Single precision binary floating is not a simple
19413 move because we need to convert to the single precision memory layout.
19414 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19415 need special direct move handling, which we do not support yet. */
19416 if (TARGET_DIRECT_MOVE
19417 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19418 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19419 {
19420 if (TARGET_POWERPC64)
19421 {
19422 /* ISA 2.07: MTVSRD or MVFVSRD. */
19423 if (size == 8)
19424 return true;
19425
19426 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19427 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19428 return true;
19429 }
19430
19431 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19432 if (TARGET_P8_VECTOR)
19433 {
19434 if (mode == SImode)
19435 return true;
19436
19437 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19438 return true;
19439 }
19440
19441 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19442 if (mode == SDmode)
19443 return true;
19444 }
19445
19446 /* Power6+: MFTGPR or MFFGPR. */
19447 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19448 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19449 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19450 return true;
19451
19452 /* Move to/from SPR. */
19453 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19454 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19455 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19456 return true;
19457
19458 return false;
19459 }
19460
19461 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19462 special direct moves that involve allocating an extra register, return the
19463 insn code of the helper function if there is such a function or
19464 CODE_FOR_nothing if not. */
19465
19466 static bool
19467 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19468 enum rs6000_reg_type from_type,
19469 machine_mode mode,
19470 secondary_reload_info *sri,
19471 bool altivec_p)
19472 {
19473 bool ret = false;
19474 enum insn_code icode = CODE_FOR_nothing;
19475 int cost = 0;
19476 int size = GET_MODE_SIZE (mode);
19477
19478 if (TARGET_POWERPC64 && size == 16)
19479 {
19480 /* Handle moving 128-bit values from GPRs to VSX point registers on
19481 ISA 2.07 (power8, power9) when running in 64-bit mode using
19482 XXPERMDI to glue the two 64-bit values back together. */
19483 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19484 {
19485 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19486 icode = reg_addr[mode].reload_vsx_gpr;
19487 }
19488
19489 /* Handle moving 128-bit values from VSX point registers to GPRs on
19490 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19491 bottom 64-bit value. */
19492 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19493 {
19494 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19495 icode = reg_addr[mode].reload_gpr_vsx;
19496 }
19497 }
19498
19499 else if (TARGET_POWERPC64 && mode == SFmode)
19500 {
19501 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19502 {
19503 cost = 3; /* xscvdpspn, mfvsrd, and. */
19504 icode = reg_addr[mode].reload_gpr_vsx;
19505 }
19506
19507 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19508 {
19509 cost = 2; /* mtvsrz, xscvspdpn. */
19510 icode = reg_addr[mode].reload_vsx_gpr;
19511 }
19512 }
19513
19514 else if (!TARGET_POWERPC64 && size == 8)
19515 {
19516 /* Handle moving 64-bit values from GPRs to floating point registers on
19517 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19518 32-bit values back together. Altivec register classes must be handled
19519 specially since a different instruction is used, and the secondary
19520 reload support requires a single instruction class in the scratch
19521 register constraint. However, right now TFmode is not allowed in
19522 Altivec registers, so the pattern will never match. */
19523 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19524 {
19525 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19526 icode = reg_addr[mode].reload_fpr_gpr;
19527 }
19528 }
19529
19530 if (icode != CODE_FOR_nothing)
19531 {
19532 ret = true;
19533 if (sri)
19534 {
19535 sri->icode = icode;
19536 sri->extra_cost = cost;
19537 }
19538 }
19539
19540 return ret;
19541 }
19542
19543 /* Return whether a move between two register classes can be done either
19544 directly (simple move) or via a pattern that uses a single extra temporary
19545 (using ISA 2.07's direct move in this case. */
19546
19547 static bool
19548 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19549 enum rs6000_reg_type from_type,
19550 machine_mode mode,
19551 secondary_reload_info *sri,
19552 bool altivec_p)
19553 {
19554 /* Fall back to load/store reloads if either type is not a register. */
19555 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19556 return false;
19557
19558 /* If we haven't allocated registers yet, assume the move can be done for the
19559 standard register types. */
19560 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19561 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19562 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19563 return true;
19564
19565 /* Moves to the same set of registers is a simple move for non-specialized
19566 registers. */
19567 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19568 return true;
19569
19570 /* Check whether a simple move can be done directly. */
19571 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19572 {
19573 if (sri)
19574 {
19575 sri->icode = CODE_FOR_nothing;
19576 sri->extra_cost = 0;
19577 }
19578 return true;
19579 }
19580
19581 /* Now check if we can do it in a few steps. */
19582 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19583 altivec_p);
19584 }
19585
19586 /* Inform reload about cases where moving X with a mode MODE to a register in
19587 RCLASS requires an extra scratch or immediate register. Return the class
19588 needed for the immediate register.
19589
19590 For VSX and Altivec, we may need a register to convert sp+offset into
19591 reg+sp.
19592
19593 For misaligned 64-bit gpr loads and stores we need a register to
19594 convert an offset address to indirect. */
19595
19596 static reg_class_t
19597 rs6000_secondary_reload (bool in_p,
19598 rtx x,
19599 reg_class_t rclass_i,
19600 machine_mode mode,
19601 secondary_reload_info *sri)
19602 {
19603 enum reg_class rclass = (enum reg_class) rclass_i;
19604 reg_class_t ret = ALL_REGS;
19605 enum insn_code icode;
19606 bool default_p = false;
19607 bool done_p = false;
19608
19609 /* Allow subreg of memory before/during reload. */
19610 bool memory_p = (MEM_P (x)
19611 || (!reload_completed && SUBREG_P (x)
19612 && MEM_P (SUBREG_REG (x))));
19613
19614 sri->icode = CODE_FOR_nothing;
19615 sri->t_icode = CODE_FOR_nothing;
19616 sri->extra_cost = 0;
19617 icode = ((in_p)
19618 ? reg_addr[mode].reload_load
19619 : reg_addr[mode].reload_store);
19620
19621 if (REG_P (x) || register_operand (x, mode))
19622 {
19623 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19624 bool altivec_p = (rclass == ALTIVEC_REGS);
19625 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19626
19627 if (!in_p)
19628 std::swap (to_type, from_type);
19629
19630 /* Can we do a direct move of some sort? */
19631 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19632 altivec_p))
19633 {
19634 icode = (enum insn_code)sri->icode;
19635 default_p = false;
19636 done_p = true;
19637 ret = NO_REGS;
19638 }
19639 }
19640
19641 /* Make sure 0.0 is not reloaded or forced into memory. */
19642 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19643 {
19644 ret = NO_REGS;
19645 default_p = false;
19646 done_p = true;
19647 }
19648
19649 /* If this is a scalar floating point value and we want to load it into the
19650 traditional Altivec registers, do it via a move via a traditional floating
19651 point register, unless we have D-form addressing. Also make sure that
19652 non-zero constants use a FPR. */
19653 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19654 && !mode_supports_vmx_dform (mode)
19655 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19656 && (memory_p || CONST_DOUBLE_P (x)))
19657 {
19658 ret = FLOAT_REGS;
19659 default_p = false;
19660 done_p = true;
19661 }
19662
19663 /* Handle reload of load/stores if we have reload helper functions. */
19664 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19665 {
19666 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19667 mode);
19668
19669 if (extra_cost >= 0)
19670 {
19671 done_p = true;
19672 ret = NO_REGS;
19673 if (extra_cost > 0)
19674 {
19675 sri->extra_cost = extra_cost;
19676 sri->icode = icode;
19677 }
19678 }
19679 }
19680
19681 /* Handle unaligned loads and stores of integer registers. */
19682 if (!done_p && TARGET_POWERPC64
19683 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19684 && memory_p
19685 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19686 {
19687 rtx addr = XEXP (x, 0);
19688 rtx off = address_offset (addr);
19689
19690 if (off != NULL_RTX)
19691 {
19692 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19693 unsigned HOST_WIDE_INT offset = INTVAL (off);
19694
19695 /* We need a secondary reload when our legitimate_address_p
19696 says the address is good (as otherwise the entire address
19697 will be reloaded), and the offset is not a multiple of
19698 four or we have an address wrap. Address wrap will only
19699 occur for LO_SUMs since legitimate_offset_address_p
19700 rejects addresses for 16-byte mems that will wrap. */
19701 if (GET_CODE (addr) == LO_SUM
19702 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19703 && ((offset & 3) != 0
19704 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19705 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19706 && (offset & 3) != 0))
19707 {
19708 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19709 if (in_p)
19710 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19711 : CODE_FOR_reload_di_load);
19712 else
19713 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19714 : CODE_FOR_reload_di_store);
19715 sri->extra_cost = 2;
19716 ret = NO_REGS;
19717 done_p = true;
19718 }
19719 else
19720 default_p = true;
19721 }
19722 else
19723 default_p = true;
19724 }
19725
19726 if (!done_p && !TARGET_POWERPC64
19727 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19728 && memory_p
19729 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19730 {
19731 rtx addr = XEXP (x, 0);
19732 rtx off = address_offset (addr);
19733
19734 if (off != NULL_RTX)
19735 {
19736 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19737 unsigned HOST_WIDE_INT offset = INTVAL (off);
19738
19739 /* We need a secondary reload when our legitimate_address_p
19740 says the address is good (as otherwise the entire address
19741 will be reloaded), and we have a wrap.
19742
19743 legitimate_lo_sum_address_p allows LO_SUM addresses to
19744 have any offset so test for wrap in the low 16 bits.
19745
19746 legitimate_offset_address_p checks for the range
19747 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19748 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19749 [0x7ff4,0x7fff] respectively, so test for the
19750 intersection of these ranges, [0x7ffc,0x7fff] and
19751 [0x7ff4,0x7ff7] respectively.
19752
19753 Note that the address we see here may have been
19754 manipulated by legitimize_reload_address. */
19755 if (GET_CODE (addr) == LO_SUM
19756 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19757 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19758 {
19759 if (in_p)
19760 sri->icode = CODE_FOR_reload_si_load;
19761 else
19762 sri->icode = CODE_FOR_reload_si_store;
19763 sri->extra_cost = 2;
19764 ret = NO_REGS;
19765 done_p = true;
19766 }
19767 else
19768 default_p = true;
19769 }
19770 else
19771 default_p = true;
19772 }
19773
19774 if (!done_p)
19775 default_p = true;
19776
19777 if (default_p)
19778 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19779
19780 gcc_assert (ret != ALL_REGS);
19781
19782 if (TARGET_DEBUG_ADDR)
19783 {
19784 fprintf (stderr,
19785 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19786 "mode = %s",
19787 reg_class_names[ret],
19788 in_p ? "true" : "false",
19789 reg_class_names[rclass],
19790 GET_MODE_NAME (mode));
19791
19792 if (reload_completed)
19793 fputs (", after reload", stderr);
19794
19795 if (!done_p)
19796 fputs (", done_p not set", stderr);
19797
19798 if (default_p)
19799 fputs (", default secondary reload", stderr);
19800
19801 if (sri->icode != CODE_FOR_nothing)
19802 fprintf (stderr, ", reload func = %s, extra cost = %d",
19803 insn_data[sri->icode].name, sri->extra_cost);
19804
19805 else if (sri->extra_cost > 0)
19806 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19807
19808 fputs ("\n", stderr);
19809 debug_rtx (x);
19810 }
19811
19812 return ret;
19813 }
19814
19815 /* Better tracing for rs6000_secondary_reload_inner. */
19816
19817 static void
19818 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19819 bool store_p)
19820 {
19821 rtx set, clobber;
19822
19823 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19824
19825 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19826 store_p ? "store" : "load");
19827
19828 if (store_p)
19829 set = gen_rtx_SET (mem, reg);
19830 else
19831 set = gen_rtx_SET (reg, mem);
19832
19833 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19834 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19835 }
19836
19837 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19838 ATTRIBUTE_NORETURN;
19839
19840 static void
19841 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19842 bool store_p)
19843 {
19844 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19845 gcc_unreachable ();
19846 }
19847
19848 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19849 reload helper functions. These were identified in
19850 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19851 reload, it calls the insns:
19852 reload_<RELOAD:mode>_<P:mptrsize>_store
19853 reload_<RELOAD:mode>_<P:mptrsize>_load
19854
19855 which in turn calls this function, to do whatever is necessary to create
19856 valid addresses. */
19857
19858 void
19859 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19860 {
19861 int regno = true_regnum (reg);
19862 machine_mode mode = GET_MODE (reg);
19863 addr_mask_type addr_mask;
19864 rtx addr;
19865 rtx new_addr;
19866 rtx op_reg, op0, op1;
19867 rtx and_op;
19868 rtx cc_clobber;
19869 rtvec rv;
19870
19871 if (regno < 0 || !HARD_REGISTER_NUM_P (regno) || !MEM_P (mem)
19872 || !base_reg_operand (scratch, GET_MODE (scratch)))
19873 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19874
19875 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19876 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19877
19878 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19879 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19880
19881 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19882 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19883
19884 else
19885 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19886
19887 /* Make sure the mode is valid in this register class. */
19888 if ((addr_mask & RELOAD_REG_VALID) == 0)
19889 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19890
19891 if (TARGET_DEBUG_ADDR)
19892 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19893
19894 new_addr = addr = XEXP (mem, 0);
19895 switch (GET_CODE (addr))
19896 {
19897 /* Does the register class support auto update forms for this mode? If
19898 not, do the update now. We don't need a scratch register, since the
19899 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19900 case PRE_INC:
19901 case PRE_DEC:
19902 op_reg = XEXP (addr, 0);
19903 if (!base_reg_operand (op_reg, Pmode))
19904 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19905
19906 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19907 {
19908 int delta = GET_MODE_SIZE (mode);
19909 if (GET_CODE (addr) == PRE_DEC)
19910 delta = -delta;
19911 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19912 new_addr = op_reg;
19913 }
19914 break;
19915
19916 case PRE_MODIFY:
19917 op0 = XEXP (addr, 0);
19918 op1 = XEXP (addr, 1);
19919 if (!base_reg_operand (op0, Pmode)
19920 || GET_CODE (op1) != PLUS
19921 || !rtx_equal_p (op0, XEXP (op1, 0)))
19922 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19923
19924 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19925 {
19926 emit_insn (gen_rtx_SET (op0, op1));
19927 new_addr = reg;
19928 }
19929 break;
19930
19931 /* Do we need to simulate AND -16 to clear the bottom address bits used
19932 in VMX load/stores? */
19933 case AND:
19934 op0 = XEXP (addr, 0);
19935 op1 = XEXP (addr, 1);
19936 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19937 {
19938 if (REG_P (op0) || SUBREG_P (op0))
19939 op_reg = op0;
19940
19941 else if (GET_CODE (op1) == PLUS)
19942 {
19943 emit_insn (gen_rtx_SET (scratch, op1));
19944 op_reg = scratch;
19945 }
19946
19947 else
19948 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19949
19950 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19951 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19952 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19953 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19954 new_addr = scratch;
19955 }
19956 break;
19957
19958 /* If this is an indirect address, make sure it is a base register. */
19959 case REG:
19960 case SUBREG:
19961 if (!base_reg_operand (addr, GET_MODE (addr)))
19962 {
19963 emit_insn (gen_rtx_SET (scratch, addr));
19964 new_addr = scratch;
19965 }
19966 break;
19967
19968 /* If this is an indexed address, make sure the register class can handle
19969 indexed addresses for this mode. */
19970 case PLUS:
19971 op0 = XEXP (addr, 0);
19972 op1 = XEXP (addr, 1);
19973 if (!base_reg_operand (op0, Pmode))
19974 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19975
19976 else if (int_reg_operand (op1, Pmode))
19977 {
19978 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19979 {
19980 emit_insn (gen_rtx_SET (scratch, addr));
19981 new_addr = scratch;
19982 }
19983 }
19984
19985 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19986 {
19987 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19988 || !quad_address_p (addr, mode, false))
19989 {
19990 emit_insn (gen_rtx_SET (scratch, addr));
19991 new_addr = scratch;
19992 }
19993 }
19994
19995 /* Make sure the register class can handle offset addresses. */
19996 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19997 {
19998 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19999 {
20000 emit_insn (gen_rtx_SET (scratch, addr));
20001 new_addr = scratch;
20002 }
20003 }
20004
20005 else
20006 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20007
20008 break;
20009
20010 case LO_SUM:
20011 op0 = XEXP (addr, 0);
20012 op1 = XEXP (addr, 1);
20013 if (!base_reg_operand (op0, Pmode))
20014 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20015
20016 else if (int_reg_operand (op1, Pmode))
20017 {
20018 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
20019 {
20020 emit_insn (gen_rtx_SET (scratch, addr));
20021 new_addr = scratch;
20022 }
20023 }
20024
20025 /* Quad offsets are restricted and can't handle normal addresses. */
20026 else if (mode_supports_dq_form (mode))
20027 {
20028 emit_insn (gen_rtx_SET (scratch, addr));
20029 new_addr = scratch;
20030 }
20031
20032 /* Make sure the register class can handle offset addresses. */
20033 else if (legitimate_lo_sum_address_p (mode, addr, false))
20034 {
20035 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
20036 {
20037 emit_insn (gen_rtx_SET (scratch, addr));
20038 new_addr = scratch;
20039 }
20040 }
20041
20042 else
20043 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20044
20045 break;
20046
20047 case SYMBOL_REF:
20048 case CONST:
20049 case LABEL_REF:
20050 rs6000_emit_move (scratch, addr, Pmode);
20051 new_addr = scratch;
20052 break;
20053
20054 default:
20055 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20056 }
20057
20058 /* Adjust the address if it changed. */
20059 if (addr != new_addr)
20060 {
20061 mem = replace_equiv_address_nv (mem, new_addr);
20062 if (TARGET_DEBUG_ADDR)
20063 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20064 }
20065
20066 /* Now create the move. */
20067 if (store_p)
20068 emit_insn (gen_rtx_SET (mem, reg));
20069 else
20070 emit_insn (gen_rtx_SET (reg, mem));
20071
20072 return;
20073 }
20074
20075 /* Convert reloads involving 64-bit gprs and misaligned offset
20076 addressing, or multiple 32-bit gprs and offsets that are too large,
20077 to use indirect addressing. */
20078
20079 void
20080 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20081 {
20082 int regno = true_regnum (reg);
20083 enum reg_class rclass;
20084 rtx addr;
20085 rtx scratch_or_premodify = scratch;
20086
20087 if (TARGET_DEBUG_ADDR)
20088 {
20089 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20090 store_p ? "store" : "load");
20091 fprintf (stderr, "reg:\n");
20092 debug_rtx (reg);
20093 fprintf (stderr, "mem:\n");
20094 debug_rtx (mem);
20095 fprintf (stderr, "scratch:\n");
20096 debug_rtx (scratch);
20097 }
20098
20099 gcc_assert (regno >= 0 && HARD_REGISTER_NUM_P (regno));
20100 gcc_assert (MEM_P (mem));
20101 rclass = REGNO_REG_CLASS (regno);
20102 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20103 addr = XEXP (mem, 0);
20104
20105 if (GET_CODE (addr) == PRE_MODIFY)
20106 {
20107 gcc_assert (REG_P (XEXP (addr, 0))
20108 && GET_CODE (XEXP (addr, 1)) == PLUS
20109 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20110 scratch_or_premodify = XEXP (addr, 0);
20111 addr = XEXP (addr, 1);
20112 }
20113 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20114
20115 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20116
20117 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20118
20119 /* Now create the move. */
20120 if (store_p)
20121 emit_insn (gen_rtx_SET (mem, reg));
20122 else
20123 emit_insn (gen_rtx_SET (reg, mem));
20124
20125 return;
20126 }
20127
20128 /* Given an rtx X being reloaded into a reg required to be
20129 in class CLASS, return the class of reg to actually use.
20130 In general this is just CLASS; but on some machines
20131 in some cases it is preferable to use a more restrictive class.
20132
20133 On the RS/6000, we have to return NO_REGS when we want to reload a
20134 floating-point CONST_DOUBLE to force it to be copied to memory.
20135
20136 We also don't want to reload integer values into floating-point
20137 registers if we can at all help it. In fact, this can
20138 cause reload to die, if it tries to generate a reload of CTR
20139 into a FP register and discovers it doesn't have the memory location
20140 required.
20141
20142 ??? Would it be a good idea to have reload do the converse, that is
20143 try to reload floating modes into FP registers if possible?
20144 */
20145
20146 static enum reg_class
20147 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20148 {
20149 machine_mode mode = GET_MODE (x);
20150 bool is_constant = CONSTANT_P (x);
20151
20152 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20153 reload class for it. */
20154 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20155 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20156 return NO_REGS;
20157
20158 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20159 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20160 return NO_REGS;
20161
20162 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20163 the reloading of address expressions using PLUS into floating point
20164 registers. */
20165 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20166 {
20167 if (is_constant)
20168 {
20169 /* Zero is always allowed in all VSX registers. */
20170 if (x == CONST0_RTX (mode))
20171 return rclass;
20172
20173 /* If this is a vector constant that can be formed with a few Altivec
20174 instructions, we want altivec registers. */
20175 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20176 return ALTIVEC_REGS;
20177
20178 /* If this is an integer constant that can easily be loaded into
20179 vector registers, allow it. */
20180 if (CONST_INT_P (x))
20181 {
20182 HOST_WIDE_INT value = INTVAL (x);
20183
20184 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20185 2.06 can generate it in the Altivec registers with
20186 VSPLTI<x>. */
20187 if (value == -1)
20188 {
20189 if (TARGET_P8_VECTOR)
20190 return rclass;
20191 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20192 return ALTIVEC_REGS;
20193 else
20194 return NO_REGS;
20195 }
20196
20197 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20198 a sign extend in the Altivec registers. */
20199 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20200 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20201 return ALTIVEC_REGS;
20202 }
20203
20204 /* Force constant to memory. */
20205 return NO_REGS;
20206 }
20207
20208 /* D-form addressing can easily reload the value. */
20209 if (mode_supports_vmx_dform (mode)
20210 || mode_supports_dq_form (mode))
20211 return rclass;
20212
20213 /* If this is a scalar floating point value and we don't have D-form
20214 addressing, prefer the traditional floating point registers so that we
20215 can use D-form (register+offset) addressing. */
20216 if (rclass == VSX_REGS
20217 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20218 return FLOAT_REGS;
20219
20220 /* Prefer the Altivec registers if Altivec is handling the vector
20221 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20222 loads. */
20223 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20224 || mode == V1TImode)
20225 return ALTIVEC_REGS;
20226
20227 return rclass;
20228 }
20229
20230 if (is_constant || GET_CODE (x) == PLUS)
20231 {
20232 if (reg_class_subset_p (GENERAL_REGS, rclass))
20233 return GENERAL_REGS;
20234 if (reg_class_subset_p (BASE_REGS, rclass))
20235 return BASE_REGS;
20236 return NO_REGS;
20237 }
20238
20239 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == GEN_OR_FLOAT_REGS)
20240 return GENERAL_REGS;
20241
20242 return rclass;
20243 }
20244
20245 /* Debug version of rs6000_preferred_reload_class. */
20246 static enum reg_class
20247 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20248 {
20249 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20250
20251 fprintf (stderr,
20252 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20253 "mode = %s, x:\n",
20254 reg_class_names[ret], reg_class_names[rclass],
20255 GET_MODE_NAME (GET_MODE (x)));
20256 debug_rtx (x);
20257
20258 return ret;
20259 }
20260
20261 /* If we are copying between FP or AltiVec registers and anything else, we need
20262 a memory location. The exception is when we are targeting ppc64 and the
20263 move to/from fpr to gpr instructions are available. Also, under VSX, you
20264 can copy vector registers from the FP register set to the Altivec register
20265 set and vice versa. */
20266
20267 static bool
20268 rs6000_secondary_memory_needed (machine_mode mode,
20269 reg_class_t from_class,
20270 reg_class_t to_class)
20271 {
20272 enum rs6000_reg_type from_type, to_type;
20273 bool altivec_p = ((from_class == ALTIVEC_REGS)
20274 || (to_class == ALTIVEC_REGS));
20275
20276 /* If a simple/direct move is available, we don't need secondary memory */
20277 from_type = reg_class_to_reg_type[(int)from_class];
20278 to_type = reg_class_to_reg_type[(int)to_class];
20279
20280 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20281 (secondary_reload_info *)0, altivec_p))
20282 return false;
20283
20284 /* If we have a floating point or vector register class, we need to use
20285 memory to transfer the data. */
20286 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20287 return true;
20288
20289 return false;
20290 }
20291
20292 /* Debug version of rs6000_secondary_memory_needed. */
20293 static bool
20294 rs6000_debug_secondary_memory_needed (machine_mode mode,
20295 reg_class_t from_class,
20296 reg_class_t to_class)
20297 {
20298 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20299
20300 fprintf (stderr,
20301 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20302 "to_class = %s, mode = %s\n",
20303 ret ? "true" : "false",
20304 reg_class_names[from_class],
20305 reg_class_names[to_class],
20306 GET_MODE_NAME (mode));
20307
20308 return ret;
20309 }
20310
20311 /* Return the register class of a scratch register needed to copy IN into
20312 or out of a register in RCLASS in MODE. If it can be done directly,
20313 NO_REGS is returned. */
20314
20315 static enum reg_class
20316 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20317 rtx in)
20318 {
20319 int regno;
20320
20321 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20322 #if TARGET_MACHO
20323 && MACHOPIC_INDIRECT
20324 #endif
20325 ))
20326 {
20327 /* We cannot copy a symbolic operand directly into anything
20328 other than BASE_REGS for TARGET_ELF. So indicate that a
20329 register from BASE_REGS is needed as an intermediate
20330 register.
20331
20332 On Darwin, pic addresses require a load from memory, which
20333 needs a base register. */
20334 if (rclass != BASE_REGS
20335 && (SYMBOL_REF_P (in)
20336 || GET_CODE (in) == HIGH
20337 || GET_CODE (in) == LABEL_REF
20338 || GET_CODE (in) == CONST))
20339 return BASE_REGS;
20340 }
20341
20342 if (REG_P (in))
20343 {
20344 regno = REGNO (in);
20345 if (!HARD_REGISTER_NUM_P (regno))
20346 {
20347 regno = true_regnum (in);
20348 if (!HARD_REGISTER_NUM_P (regno))
20349 regno = -1;
20350 }
20351 }
20352 else if (SUBREG_P (in))
20353 {
20354 regno = true_regnum (in);
20355 if (!HARD_REGISTER_NUM_P (regno))
20356 regno = -1;
20357 }
20358 else
20359 regno = -1;
20360
20361 /* If we have VSX register moves, prefer moving scalar values between
20362 Altivec registers and GPR by going via an FPR (and then via memory)
20363 instead of reloading the secondary memory address for Altivec moves. */
20364 if (TARGET_VSX
20365 && GET_MODE_SIZE (mode) < 16
20366 && !mode_supports_vmx_dform (mode)
20367 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20368 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20369 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20370 && (regno >= 0 && INT_REGNO_P (regno)))))
20371 return FLOAT_REGS;
20372
20373 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20374 into anything. */
20375 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20376 || (regno >= 0 && INT_REGNO_P (regno)))
20377 return NO_REGS;
20378
20379 /* Constants, memory, and VSX registers can go into VSX registers (both the
20380 traditional floating point and the altivec registers). */
20381 if (rclass == VSX_REGS
20382 && (regno == -1 || VSX_REGNO_P (regno)))
20383 return NO_REGS;
20384
20385 /* Constants, memory, and FP registers can go into FP registers. */
20386 if ((regno == -1 || FP_REGNO_P (regno))
20387 && (rclass == FLOAT_REGS || rclass == GEN_OR_FLOAT_REGS))
20388 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20389
20390 /* Memory, and AltiVec registers can go into AltiVec registers. */
20391 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20392 && rclass == ALTIVEC_REGS)
20393 return NO_REGS;
20394
20395 /* We can copy among the CR registers. */
20396 if ((rclass == CR_REGS || rclass == CR0_REGS)
20397 && regno >= 0 && CR_REGNO_P (regno))
20398 return NO_REGS;
20399
20400 /* Otherwise, we need GENERAL_REGS. */
20401 return GENERAL_REGS;
20402 }
20403
20404 /* Debug version of rs6000_secondary_reload_class. */
20405 static enum reg_class
20406 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20407 machine_mode mode, rtx in)
20408 {
20409 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20410 fprintf (stderr,
20411 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20412 "mode = %s, input rtx:\n",
20413 reg_class_names[ret], reg_class_names[rclass],
20414 GET_MODE_NAME (mode));
20415 debug_rtx (in);
20416
20417 return ret;
20418 }
20419
20420 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20421
20422 static bool
20423 rs6000_can_change_mode_class (machine_mode from,
20424 machine_mode to,
20425 reg_class_t rclass)
20426 {
20427 unsigned from_size = GET_MODE_SIZE (from);
20428 unsigned to_size = GET_MODE_SIZE (to);
20429
20430 if (from_size != to_size)
20431 {
20432 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20433
20434 if (reg_classes_intersect_p (xclass, rclass))
20435 {
20436 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20437 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20438 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20439 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20440
20441 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20442 single register under VSX because the scalar part of the register
20443 is in the upper 64-bits, and not the lower 64-bits. Types like
20444 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20445 IEEE floating point can't overlap, and neither can small
20446 values. */
20447
20448 if (to_float128_vector_p && from_float128_vector_p)
20449 return true;
20450
20451 else if (to_float128_vector_p || from_float128_vector_p)
20452 return false;
20453
20454 /* TDmode in floating-mode registers must always go into a register
20455 pair with the most significant word in the even-numbered register
20456 to match ISA requirements. In little-endian mode, this does not
20457 match subreg numbering, so we cannot allow subregs. */
20458 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20459 return false;
20460
20461 if (from_size < 8 || to_size < 8)
20462 return false;
20463
20464 if (from_size == 8 && (8 * to_nregs) != to_size)
20465 return false;
20466
20467 if (to_size == 8 && (8 * from_nregs) != from_size)
20468 return false;
20469
20470 return true;
20471 }
20472 else
20473 return true;
20474 }
20475
20476 /* Since the VSX register set includes traditional floating point registers
20477 and altivec registers, just check for the size being different instead of
20478 trying to check whether the modes are vector modes. Otherwise it won't
20479 allow say DF and DI to change classes. For types like TFmode and TDmode
20480 that take 2 64-bit registers, rather than a single 128-bit register, don't
20481 allow subregs of those types to other 128 bit types. */
20482 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20483 {
20484 unsigned num_regs = (from_size + 15) / 16;
20485 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20486 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20487 return false;
20488
20489 return (from_size == 8 || from_size == 16);
20490 }
20491
20492 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20493 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20494 return false;
20495
20496 return true;
20497 }
20498
20499 /* Debug version of rs6000_can_change_mode_class. */
20500 static bool
20501 rs6000_debug_can_change_mode_class (machine_mode from,
20502 machine_mode to,
20503 reg_class_t rclass)
20504 {
20505 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20506
20507 fprintf (stderr,
20508 "rs6000_can_change_mode_class, return %s, from = %s, "
20509 "to = %s, rclass = %s\n",
20510 ret ? "true" : "false",
20511 GET_MODE_NAME (from), GET_MODE_NAME (to),
20512 reg_class_names[rclass]);
20513
20514 return ret;
20515 }
20516 \f
20517 /* Return a string to do a move operation of 128 bits of data. */
20518
20519 const char *
20520 rs6000_output_move_128bit (rtx operands[])
20521 {
20522 rtx dest = operands[0];
20523 rtx src = operands[1];
20524 machine_mode mode = GET_MODE (dest);
20525 int dest_regno;
20526 int src_regno;
20527 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20528 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20529
20530 if (REG_P (dest))
20531 {
20532 dest_regno = REGNO (dest);
20533 dest_gpr_p = INT_REGNO_P (dest_regno);
20534 dest_fp_p = FP_REGNO_P (dest_regno);
20535 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20536 dest_vsx_p = dest_fp_p | dest_vmx_p;
20537 }
20538 else
20539 {
20540 dest_regno = -1;
20541 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20542 }
20543
20544 if (REG_P (src))
20545 {
20546 src_regno = REGNO (src);
20547 src_gpr_p = INT_REGNO_P (src_regno);
20548 src_fp_p = FP_REGNO_P (src_regno);
20549 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20550 src_vsx_p = src_fp_p | src_vmx_p;
20551 }
20552 else
20553 {
20554 src_regno = -1;
20555 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20556 }
20557
20558 /* Register moves. */
20559 if (dest_regno >= 0 && src_regno >= 0)
20560 {
20561 if (dest_gpr_p)
20562 {
20563 if (src_gpr_p)
20564 return "#";
20565
20566 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20567 return (WORDS_BIG_ENDIAN
20568 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20569 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20570
20571 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20572 return "#";
20573 }
20574
20575 else if (TARGET_VSX && dest_vsx_p)
20576 {
20577 if (src_vsx_p)
20578 return "xxlor %x0,%x1,%x1";
20579
20580 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20581 return (WORDS_BIG_ENDIAN
20582 ? "mtvsrdd %x0,%1,%L1"
20583 : "mtvsrdd %x0,%L1,%1");
20584
20585 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20586 return "#";
20587 }
20588
20589 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20590 return "vor %0,%1,%1";
20591
20592 else if (dest_fp_p && src_fp_p)
20593 return "#";
20594 }
20595
20596 /* Loads. */
20597 else if (dest_regno >= 0 && MEM_P (src))
20598 {
20599 if (dest_gpr_p)
20600 {
20601 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20602 return "lq %0,%1";
20603 else
20604 return "#";
20605 }
20606
20607 else if (TARGET_ALTIVEC && dest_vmx_p
20608 && altivec_indexed_or_indirect_operand (src, mode))
20609 return "lvx %0,%y1";
20610
20611 else if (TARGET_VSX && dest_vsx_p)
20612 {
20613 if (mode_supports_dq_form (mode)
20614 && quad_address_p (XEXP (src, 0), mode, true))
20615 return "lxv %x0,%1";
20616
20617 else if (TARGET_P9_VECTOR)
20618 return "lxvx %x0,%y1";
20619
20620 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20621 return "lxvw4x %x0,%y1";
20622
20623 else
20624 return "lxvd2x %x0,%y1";
20625 }
20626
20627 else if (TARGET_ALTIVEC && dest_vmx_p)
20628 return "lvx %0,%y1";
20629
20630 else if (dest_fp_p)
20631 return "#";
20632 }
20633
20634 /* Stores. */
20635 else if (src_regno >= 0 && MEM_P (dest))
20636 {
20637 if (src_gpr_p)
20638 {
20639 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20640 return "stq %1,%0";
20641 else
20642 return "#";
20643 }
20644
20645 else if (TARGET_ALTIVEC && src_vmx_p
20646 && altivec_indexed_or_indirect_operand (dest, mode))
20647 return "stvx %1,%y0";
20648
20649 else if (TARGET_VSX && src_vsx_p)
20650 {
20651 if (mode_supports_dq_form (mode)
20652 && quad_address_p (XEXP (dest, 0), mode, true))
20653 return "stxv %x1,%0";
20654
20655 else if (TARGET_P9_VECTOR)
20656 return "stxvx %x1,%y0";
20657
20658 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20659 return "stxvw4x %x1,%y0";
20660
20661 else
20662 return "stxvd2x %x1,%y0";
20663 }
20664
20665 else if (TARGET_ALTIVEC && src_vmx_p)
20666 return "stvx %1,%y0";
20667
20668 else if (src_fp_p)
20669 return "#";
20670 }
20671
20672 /* Constants. */
20673 else if (dest_regno >= 0
20674 && (CONST_INT_P (src)
20675 || CONST_WIDE_INT_P (src)
20676 || CONST_DOUBLE_P (src)
20677 || GET_CODE (src) == CONST_VECTOR))
20678 {
20679 if (dest_gpr_p)
20680 return "#";
20681
20682 else if ((dest_vmx_p && TARGET_ALTIVEC)
20683 || (dest_vsx_p && TARGET_VSX))
20684 return output_vec_const_move (operands);
20685 }
20686
20687 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20688 }
20689
20690 /* Validate a 128-bit move. */
20691 bool
20692 rs6000_move_128bit_ok_p (rtx operands[])
20693 {
20694 machine_mode mode = GET_MODE (operands[0]);
20695 return (gpc_reg_operand (operands[0], mode)
20696 || gpc_reg_operand (operands[1], mode));
20697 }
20698
20699 /* Return true if a 128-bit move needs to be split. */
20700 bool
20701 rs6000_split_128bit_ok_p (rtx operands[])
20702 {
20703 if (!reload_completed)
20704 return false;
20705
20706 if (!gpr_or_gpr_p (operands[0], operands[1]))
20707 return false;
20708
20709 if (quad_load_store_p (operands[0], operands[1]))
20710 return false;
20711
20712 return true;
20713 }
20714
20715 \f
20716 /* Given a comparison operation, return the bit number in CCR to test. We
20717 know this is a valid comparison.
20718
20719 SCC_P is 1 if this is for an scc. That means that %D will have been
20720 used instead of %C, so the bits will be in different places.
20721
20722 Return -1 if OP isn't a valid comparison for some reason. */
20723
20724 int
20725 ccr_bit (rtx op, int scc_p)
20726 {
20727 enum rtx_code code = GET_CODE (op);
20728 machine_mode cc_mode;
20729 int cc_regnum;
20730 int base_bit;
20731 rtx reg;
20732
20733 if (!COMPARISON_P (op))
20734 return -1;
20735
20736 reg = XEXP (op, 0);
20737
20738 if (!REG_P (reg) || !CR_REGNO_P (REGNO (reg)))
20739 return -1;
20740
20741 cc_mode = GET_MODE (reg);
20742 cc_regnum = REGNO (reg);
20743 base_bit = 4 * (cc_regnum - CR0_REGNO);
20744
20745 validate_condition_mode (code, cc_mode);
20746
20747 /* When generating a sCOND operation, only positive conditions are
20748 allowed. */
20749 if (scc_p)
20750 switch (code)
20751 {
20752 case EQ:
20753 case GT:
20754 case LT:
20755 case UNORDERED:
20756 case GTU:
20757 case LTU:
20758 break;
20759 default:
20760 return -1;
20761 }
20762
20763 switch (code)
20764 {
20765 case NE:
20766 return scc_p ? base_bit + 3 : base_bit + 2;
20767 case EQ:
20768 return base_bit + 2;
20769 case GT: case GTU: case UNLE:
20770 return base_bit + 1;
20771 case LT: case LTU: case UNGE:
20772 return base_bit;
20773 case ORDERED: case UNORDERED:
20774 return base_bit + 3;
20775
20776 case GE: case GEU:
20777 /* If scc, we will have done a cror to put the bit in the
20778 unordered position. So test that bit. For integer, this is ! LT
20779 unless this is an scc insn. */
20780 return scc_p ? base_bit + 3 : base_bit;
20781
20782 case LE: case LEU:
20783 return scc_p ? base_bit + 3 : base_bit + 1;
20784
20785 default:
20786 return -1;
20787 }
20788 }
20789 \f
20790 /* Return the GOT register. */
20791
20792 rtx
20793 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20794 {
20795 /* The second flow pass currently (June 1999) can't update
20796 regs_ever_live without disturbing other parts of the compiler, so
20797 update it here to make the prolog/epilogue code happy. */
20798 if (!can_create_pseudo_p ()
20799 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20800 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20801
20802 crtl->uses_pic_offset_table = 1;
20803
20804 return pic_offset_table_rtx;
20805 }
20806 \f
20807 static rs6000_stack_t stack_info;
20808
20809 /* Function to init struct machine_function.
20810 This will be called, via a pointer variable,
20811 from push_function_context. */
20812
20813 static struct machine_function *
20814 rs6000_init_machine_status (void)
20815 {
20816 stack_info.reload_completed = 0;
20817 return ggc_cleared_alloc<machine_function> ();
20818 }
20819 \f
20820 #define INT_P(X) (CONST_INT_P (X) && GET_MODE (X) == VOIDmode)
20821
20822 /* Write out a function code label. */
20823
20824 void
20825 rs6000_output_function_entry (FILE *file, const char *fname)
20826 {
20827 if (fname[0] != '.')
20828 {
20829 switch (DEFAULT_ABI)
20830 {
20831 default:
20832 gcc_unreachable ();
20833
20834 case ABI_AIX:
20835 if (DOT_SYMBOLS)
20836 putc ('.', file);
20837 else
20838 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20839 break;
20840
20841 case ABI_ELFv2:
20842 case ABI_V4:
20843 case ABI_DARWIN:
20844 break;
20845 }
20846 }
20847
20848 RS6000_OUTPUT_BASENAME (file, fname);
20849 }
20850
20851 /* Print an operand. Recognize special options, documented below. */
20852
20853 #if TARGET_ELF
20854 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20855 only introduced by the linker, when applying the sda21
20856 relocation. */
20857 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20858 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20859 #else
20860 #define SMALL_DATA_RELOC "sda21"
20861 #define SMALL_DATA_REG 0
20862 #endif
20863
20864 void
20865 print_operand (FILE *file, rtx x, int code)
20866 {
20867 int i;
20868 unsigned HOST_WIDE_INT uval;
20869
20870 switch (code)
20871 {
20872 /* %a is output_address. */
20873
20874 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20875 output_operand. */
20876
20877 case 'D':
20878 /* Like 'J' but get to the GT bit only. */
20879 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20880 {
20881 output_operand_lossage ("invalid %%D value");
20882 return;
20883 }
20884
20885 /* Bit 1 is GT bit. */
20886 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20887
20888 /* Add one for shift count in rlinm for scc. */
20889 fprintf (file, "%d", i + 1);
20890 return;
20891
20892 case 'e':
20893 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20894 if (! INT_P (x))
20895 {
20896 output_operand_lossage ("invalid %%e value");
20897 return;
20898 }
20899
20900 uval = INTVAL (x);
20901 if ((uval & 0xffff) == 0 && uval != 0)
20902 putc ('s', file);
20903 return;
20904
20905 case 'E':
20906 /* X is a CR register. Print the number of the EQ bit of the CR */
20907 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20908 output_operand_lossage ("invalid %%E value");
20909 else
20910 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20911 return;
20912
20913 case 'f':
20914 /* X is a CR register. Print the shift count needed to move it
20915 to the high-order four bits. */
20916 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20917 output_operand_lossage ("invalid %%f value");
20918 else
20919 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20920 return;
20921
20922 case 'F':
20923 /* Similar, but print the count for the rotate in the opposite
20924 direction. */
20925 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20926 output_operand_lossage ("invalid %%F value");
20927 else
20928 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20929 return;
20930
20931 case 'G':
20932 /* X is a constant integer. If it is negative, print "m",
20933 otherwise print "z". This is to make an aze or ame insn. */
20934 if (!CONST_INT_P (x))
20935 output_operand_lossage ("invalid %%G value");
20936 else if (INTVAL (x) >= 0)
20937 putc ('z', file);
20938 else
20939 putc ('m', file);
20940 return;
20941
20942 case 'h':
20943 /* If constant, output low-order five bits. Otherwise, write
20944 normally. */
20945 if (INT_P (x))
20946 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20947 else
20948 print_operand (file, x, 0);
20949 return;
20950
20951 case 'H':
20952 /* If constant, output low-order six bits. Otherwise, write
20953 normally. */
20954 if (INT_P (x))
20955 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20956 else
20957 print_operand (file, x, 0);
20958 return;
20959
20960 case 'I':
20961 /* Print `i' if this is a constant, else nothing. */
20962 if (INT_P (x))
20963 putc ('i', file);
20964 return;
20965
20966 case 'j':
20967 /* Write the bit number in CCR for jump. */
20968 i = ccr_bit (x, 0);
20969 if (i == -1)
20970 output_operand_lossage ("invalid %%j code");
20971 else
20972 fprintf (file, "%d", i);
20973 return;
20974
20975 case 'J':
20976 /* Similar, but add one for shift count in rlinm for scc and pass
20977 scc flag to `ccr_bit'. */
20978 i = ccr_bit (x, 1);
20979 if (i == -1)
20980 output_operand_lossage ("invalid %%J code");
20981 else
20982 /* If we want bit 31, write a shift count of zero, not 32. */
20983 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20984 return;
20985
20986 case 'k':
20987 /* X must be a constant. Write the 1's complement of the
20988 constant. */
20989 if (! INT_P (x))
20990 output_operand_lossage ("invalid %%k value");
20991 else
20992 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20993 return;
20994
20995 case 'K':
20996 /* X must be a symbolic constant on ELF. Write an
20997 expression suitable for an 'addi' that adds in the low 16
20998 bits of the MEM. */
20999 if (GET_CODE (x) == CONST)
21000 {
21001 if (GET_CODE (XEXP (x, 0)) != PLUS
21002 || (!SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
21003 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
21004 || !CONST_INT_P (XEXP (XEXP (x, 0), 1)))
21005 output_operand_lossage ("invalid %%K value");
21006 }
21007 print_operand_address (file, x);
21008 fputs ("@l", file);
21009 return;
21010
21011 /* %l is output_asm_label. */
21012
21013 case 'L':
21014 /* Write second word of DImode or DFmode reference. Works on register
21015 or non-indexed memory only. */
21016 if (REG_P (x))
21017 fputs (reg_names[REGNO (x) + 1], file);
21018 else if (MEM_P (x))
21019 {
21020 machine_mode mode = GET_MODE (x);
21021 /* Handle possible auto-increment. Since it is pre-increment and
21022 we have already done it, we can just use an offset of word. */
21023 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21024 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21025 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21026 UNITS_PER_WORD));
21027 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21028 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
21029 UNITS_PER_WORD));
21030 else
21031 output_address (mode, XEXP (adjust_address_nv (x, SImode,
21032 UNITS_PER_WORD),
21033 0));
21034
21035 if (small_data_operand (x, GET_MODE (x)))
21036 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21037 reg_names[SMALL_DATA_REG]);
21038 }
21039 return;
21040
21041 case 'N': /* Unused */
21042 /* Write the number of elements in the vector times 4. */
21043 if (GET_CODE (x) != PARALLEL)
21044 output_operand_lossage ("invalid %%N value");
21045 else
21046 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21047 return;
21048
21049 case 'O': /* Unused */
21050 /* Similar, but subtract 1 first. */
21051 if (GET_CODE (x) != PARALLEL)
21052 output_operand_lossage ("invalid %%O value");
21053 else
21054 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21055 return;
21056
21057 case 'p':
21058 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21059 if (! INT_P (x)
21060 || INTVAL (x) < 0
21061 || (i = exact_log2 (INTVAL (x))) < 0)
21062 output_operand_lossage ("invalid %%p value");
21063 else
21064 fprintf (file, "%d", i);
21065 return;
21066
21067 case 'P':
21068 /* The operand must be an indirect memory reference. The result
21069 is the register name. */
21070 if (!MEM_P (x) || !REG_P (XEXP (x, 0))
21071 || REGNO (XEXP (x, 0)) >= 32)
21072 output_operand_lossage ("invalid %%P value");
21073 else
21074 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21075 return;
21076
21077 case 'q':
21078 /* This outputs the logical code corresponding to a boolean
21079 expression. The expression may have one or both operands
21080 negated (if one, only the first one). For condition register
21081 logical operations, it will also treat the negated
21082 CR codes as NOTs, but not handle NOTs of them. */
21083 {
21084 const char *const *t = 0;
21085 const char *s;
21086 enum rtx_code code = GET_CODE (x);
21087 static const char * const tbl[3][3] = {
21088 { "and", "andc", "nor" },
21089 { "or", "orc", "nand" },
21090 { "xor", "eqv", "xor" } };
21091
21092 if (code == AND)
21093 t = tbl[0];
21094 else if (code == IOR)
21095 t = tbl[1];
21096 else if (code == XOR)
21097 t = tbl[2];
21098 else
21099 output_operand_lossage ("invalid %%q value");
21100
21101 if (GET_CODE (XEXP (x, 0)) != NOT)
21102 s = t[0];
21103 else
21104 {
21105 if (GET_CODE (XEXP (x, 1)) == NOT)
21106 s = t[2];
21107 else
21108 s = t[1];
21109 }
21110
21111 fputs (s, file);
21112 }
21113 return;
21114
21115 case 'Q':
21116 if (! TARGET_MFCRF)
21117 return;
21118 fputc (',', file);
21119 /* FALLTHRU */
21120
21121 case 'R':
21122 /* X is a CR register. Print the mask for `mtcrf'. */
21123 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21124 output_operand_lossage ("invalid %%R value");
21125 else
21126 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21127 return;
21128
21129 case 's':
21130 /* Low 5 bits of 32 - value */
21131 if (! INT_P (x))
21132 output_operand_lossage ("invalid %%s value");
21133 else
21134 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21135 return;
21136
21137 case 't':
21138 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21139 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21140 {
21141 output_operand_lossage ("invalid %%t value");
21142 return;
21143 }
21144
21145 /* Bit 3 is OV bit. */
21146 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21147
21148 /* If we want bit 31, write a shift count of zero, not 32. */
21149 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21150 return;
21151
21152 case 'T':
21153 /* Print the symbolic name of a branch target register. */
21154 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21155 x = XVECEXP (x, 0, 0);
21156 if (!REG_P (x) || (REGNO (x) != LR_REGNO
21157 && REGNO (x) != CTR_REGNO))
21158 output_operand_lossage ("invalid %%T value");
21159 else if (REGNO (x) == LR_REGNO)
21160 fputs ("lr", file);
21161 else
21162 fputs ("ctr", file);
21163 return;
21164
21165 case 'u':
21166 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21167 for use in unsigned operand. */
21168 if (! INT_P (x))
21169 {
21170 output_operand_lossage ("invalid %%u value");
21171 return;
21172 }
21173
21174 uval = INTVAL (x);
21175 if ((uval & 0xffff) == 0)
21176 uval >>= 16;
21177
21178 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21179 return;
21180
21181 case 'v':
21182 /* High-order 16 bits of constant for use in signed operand. */
21183 if (! INT_P (x))
21184 output_operand_lossage ("invalid %%v value");
21185 else
21186 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21187 (INTVAL (x) >> 16) & 0xffff);
21188 return;
21189
21190 case 'U':
21191 /* Print `u' if this has an auto-increment or auto-decrement. */
21192 if (MEM_P (x)
21193 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21194 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21195 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21196 putc ('u', file);
21197 return;
21198
21199 case 'V':
21200 /* Print the trap code for this operand. */
21201 switch (GET_CODE (x))
21202 {
21203 case EQ:
21204 fputs ("eq", file); /* 4 */
21205 break;
21206 case NE:
21207 fputs ("ne", file); /* 24 */
21208 break;
21209 case LT:
21210 fputs ("lt", file); /* 16 */
21211 break;
21212 case LE:
21213 fputs ("le", file); /* 20 */
21214 break;
21215 case GT:
21216 fputs ("gt", file); /* 8 */
21217 break;
21218 case GE:
21219 fputs ("ge", file); /* 12 */
21220 break;
21221 case LTU:
21222 fputs ("llt", file); /* 2 */
21223 break;
21224 case LEU:
21225 fputs ("lle", file); /* 6 */
21226 break;
21227 case GTU:
21228 fputs ("lgt", file); /* 1 */
21229 break;
21230 case GEU:
21231 fputs ("lge", file); /* 5 */
21232 break;
21233 default:
21234 output_operand_lossage ("invalid %%V value");
21235 }
21236 break;
21237
21238 case 'w':
21239 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21240 normally. */
21241 if (INT_P (x))
21242 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21243 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21244 else
21245 print_operand (file, x, 0);
21246 return;
21247
21248 case 'x':
21249 /* X is a FPR or Altivec register used in a VSX context. */
21250 if (!REG_P (x) || !VSX_REGNO_P (REGNO (x)))
21251 output_operand_lossage ("invalid %%x value");
21252 else
21253 {
21254 int reg = REGNO (x);
21255 int vsx_reg = (FP_REGNO_P (reg)
21256 ? reg - 32
21257 : reg - FIRST_ALTIVEC_REGNO + 32);
21258
21259 #ifdef TARGET_REGNAMES
21260 if (TARGET_REGNAMES)
21261 fprintf (file, "%%vs%d", vsx_reg);
21262 else
21263 #endif
21264 fprintf (file, "%d", vsx_reg);
21265 }
21266 return;
21267
21268 case 'X':
21269 if (MEM_P (x)
21270 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21271 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21272 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21273 putc ('x', file);
21274 return;
21275
21276 case 'Y':
21277 /* Like 'L', for third word of TImode/PTImode */
21278 if (REG_P (x))
21279 fputs (reg_names[REGNO (x) + 2], file);
21280 else if (MEM_P (x))
21281 {
21282 machine_mode mode = GET_MODE (x);
21283 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21284 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21285 output_address (mode, plus_constant (Pmode,
21286 XEXP (XEXP (x, 0), 0), 8));
21287 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21288 output_address (mode, plus_constant (Pmode,
21289 XEXP (XEXP (x, 0), 0), 8));
21290 else
21291 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21292 if (small_data_operand (x, GET_MODE (x)))
21293 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21294 reg_names[SMALL_DATA_REG]);
21295 }
21296 return;
21297
21298 case 'z':
21299 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21300 x = XVECEXP (x, 0, 1);
21301 /* X is a SYMBOL_REF. Write out the name preceded by a
21302 period and without any trailing data in brackets. Used for function
21303 names. If we are configured for System V (or the embedded ABI) on
21304 the PowerPC, do not emit the period, since those systems do not use
21305 TOCs and the like. */
21306 if (!SYMBOL_REF_P (x))
21307 {
21308 output_operand_lossage ("invalid %%z value");
21309 return;
21310 }
21311
21312 /* For macho, check to see if we need a stub. */
21313 if (TARGET_MACHO)
21314 {
21315 const char *name = XSTR (x, 0);
21316 #if TARGET_MACHO
21317 if (darwin_emit_branch_islands
21318 && MACHOPIC_INDIRECT
21319 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21320 name = machopic_indirection_name (x, /*stub_p=*/true);
21321 #endif
21322 assemble_name (file, name);
21323 }
21324 else if (!DOT_SYMBOLS)
21325 assemble_name (file, XSTR (x, 0));
21326 else
21327 rs6000_output_function_entry (file, XSTR (x, 0));
21328 return;
21329
21330 case 'Z':
21331 /* Like 'L', for last word of TImode/PTImode. */
21332 if (REG_P (x))
21333 fputs (reg_names[REGNO (x) + 3], file);
21334 else if (MEM_P (x))
21335 {
21336 machine_mode mode = GET_MODE (x);
21337 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21338 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21339 output_address (mode, plus_constant (Pmode,
21340 XEXP (XEXP (x, 0), 0), 12));
21341 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21342 output_address (mode, plus_constant (Pmode,
21343 XEXP (XEXP (x, 0), 0), 12));
21344 else
21345 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21346 if (small_data_operand (x, GET_MODE (x)))
21347 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21348 reg_names[SMALL_DATA_REG]);
21349 }
21350 return;
21351
21352 /* Print AltiVec memory operand. */
21353 case 'y':
21354 {
21355 rtx tmp;
21356
21357 gcc_assert (MEM_P (x));
21358
21359 tmp = XEXP (x, 0);
21360
21361 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21362 && GET_CODE (tmp) == AND
21363 && CONST_INT_P (XEXP (tmp, 1))
21364 && INTVAL (XEXP (tmp, 1)) == -16)
21365 tmp = XEXP (tmp, 0);
21366 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21367 && GET_CODE (tmp) == PRE_MODIFY)
21368 tmp = XEXP (tmp, 1);
21369 if (REG_P (tmp))
21370 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21371 else
21372 {
21373 if (GET_CODE (tmp) != PLUS
21374 || !REG_P (XEXP (tmp, 0))
21375 || !REG_P (XEXP (tmp, 1)))
21376 {
21377 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21378 break;
21379 }
21380
21381 if (REGNO (XEXP (tmp, 0)) == 0)
21382 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21383 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21384 else
21385 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21386 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21387 }
21388 break;
21389 }
21390
21391 case 0:
21392 if (REG_P (x))
21393 fprintf (file, "%s", reg_names[REGNO (x)]);
21394 else if (MEM_P (x))
21395 {
21396 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21397 know the width from the mode. */
21398 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21399 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21400 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21401 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21402 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21403 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21404 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21405 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21406 else
21407 output_address (GET_MODE (x), XEXP (x, 0));
21408 }
21409 else if (toc_relative_expr_p (x, false,
21410 &tocrel_base_oac, &tocrel_offset_oac))
21411 /* This hack along with a corresponding hack in
21412 rs6000_output_addr_const_extra arranges to output addends
21413 where the assembler expects to find them. eg.
21414 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21415 without this hack would be output as "x@toc+4". We
21416 want "x+4@toc". */
21417 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21418 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
21419 output_addr_const (file, XVECEXP (x, 0, 0));
21420 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21421 output_addr_const (file, XVECEXP (x, 0, 1));
21422 else
21423 output_addr_const (file, x);
21424 return;
21425
21426 case '&':
21427 if (const char *name = get_some_local_dynamic_name ())
21428 assemble_name (file, name);
21429 else
21430 output_operand_lossage ("'%%&' used without any "
21431 "local dynamic TLS references");
21432 return;
21433
21434 default:
21435 output_operand_lossage ("invalid %%xn code");
21436 }
21437 }
21438 \f
21439 /* Print the address of an operand. */
21440
21441 void
21442 print_operand_address (FILE *file, rtx x)
21443 {
21444 if (REG_P (x))
21445 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21446 else if (SYMBOL_REF_P (x) || GET_CODE (x) == CONST
21447 || GET_CODE (x) == LABEL_REF)
21448 {
21449 output_addr_const (file, x);
21450 if (small_data_operand (x, GET_MODE (x)))
21451 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21452 reg_names[SMALL_DATA_REG]);
21453 else
21454 gcc_assert (!TARGET_TOC);
21455 }
21456 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21457 && REG_P (XEXP (x, 1)))
21458 {
21459 if (REGNO (XEXP (x, 0)) == 0)
21460 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21461 reg_names[ REGNO (XEXP (x, 0)) ]);
21462 else
21463 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21464 reg_names[ REGNO (XEXP (x, 1)) ]);
21465 }
21466 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21467 && CONST_INT_P (XEXP (x, 1)))
21468 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21469 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21470 #if TARGET_MACHO
21471 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21472 && CONSTANT_P (XEXP (x, 1)))
21473 {
21474 fprintf (file, "lo16(");
21475 output_addr_const (file, XEXP (x, 1));
21476 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21477 }
21478 #endif
21479 #if TARGET_ELF
21480 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21481 && CONSTANT_P (XEXP (x, 1)))
21482 {
21483 output_addr_const (file, XEXP (x, 1));
21484 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21485 }
21486 #endif
21487 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21488 {
21489 /* This hack along with a corresponding hack in
21490 rs6000_output_addr_const_extra arranges to output addends
21491 where the assembler expects to find them. eg.
21492 (lo_sum (reg 9)
21493 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21494 without this hack would be output as "x@toc+8@l(9)". We
21495 want "x+8@toc@l(9)". */
21496 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21497 if (GET_CODE (x) == LO_SUM)
21498 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21499 else
21500 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21501 }
21502 else
21503 output_addr_const (file, x);
21504 }
21505 \f
21506 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21507
21508 static bool
21509 rs6000_output_addr_const_extra (FILE *file, rtx x)
21510 {
21511 if (GET_CODE (x) == UNSPEC)
21512 switch (XINT (x, 1))
21513 {
21514 case UNSPEC_TOCREL:
21515 gcc_checking_assert (SYMBOL_REF_P (XVECEXP (x, 0, 0))
21516 && REG_P (XVECEXP (x, 0, 1))
21517 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21518 output_addr_const (file, XVECEXP (x, 0, 0));
21519 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21520 {
21521 if (INTVAL (tocrel_offset_oac) >= 0)
21522 fprintf (file, "+");
21523 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21524 }
21525 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21526 {
21527 putc ('-', file);
21528 assemble_name (file, toc_label_name);
21529 need_toc_init = 1;
21530 }
21531 else if (TARGET_ELF)
21532 fputs ("@toc", file);
21533 return true;
21534
21535 #if TARGET_MACHO
21536 case UNSPEC_MACHOPIC_OFFSET:
21537 output_addr_const (file, XVECEXP (x, 0, 0));
21538 putc ('-', file);
21539 machopic_output_function_base_name (file);
21540 return true;
21541 #endif
21542 }
21543 return false;
21544 }
21545 \f
21546 /* Target hook for assembling integer objects. The PowerPC version has
21547 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21548 is defined. It also needs to handle DI-mode objects on 64-bit
21549 targets. */
21550
21551 static bool
21552 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21553 {
21554 #ifdef RELOCATABLE_NEEDS_FIXUP
21555 /* Special handling for SI values. */
21556 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21557 {
21558 static int recurse = 0;
21559
21560 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21561 the .fixup section. Since the TOC section is already relocated, we
21562 don't need to mark it here. We used to skip the text section, but it
21563 should never be valid for relocated addresses to be placed in the text
21564 section. */
21565 if (DEFAULT_ABI == ABI_V4
21566 && (TARGET_RELOCATABLE || flag_pic > 1)
21567 && in_section != toc_section
21568 && !recurse
21569 && !CONST_SCALAR_INT_P (x)
21570 && CONSTANT_P (x))
21571 {
21572 char buf[256];
21573
21574 recurse = 1;
21575 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21576 fixuplabelno++;
21577 ASM_OUTPUT_LABEL (asm_out_file, buf);
21578 fprintf (asm_out_file, "\t.long\t(");
21579 output_addr_const (asm_out_file, x);
21580 fprintf (asm_out_file, ")@fixup\n");
21581 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21582 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21583 fprintf (asm_out_file, "\t.long\t");
21584 assemble_name (asm_out_file, buf);
21585 fprintf (asm_out_file, "\n\t.previous\n");
21586 recurse = 0;
21587 return true;
21588 }
21589 /* Remove initial .'s to turn a -mcall-aixdesc function
21590 address into the address of the descriptor, not the function
21591 itself. */
21592 else if (SYMBOL_REF_P (x)
21593 && XSTR (x, 0)[0] == '.'
21594 && DEFAULT_ABI == ABI_AIX)
21595 {
21596 const char *name = XSTR (x, 0);
21597 while (*name == '.')
21598 name++;
21599
21600 fprintf (asm_out_file, "\t.long\t%s\n", name);
21601 return true;
21602 }
21603 }
21604 #endif /* RELOCATABLE_NEEDS_FIXUP */
21605 return default_assemble_integer (x, size, aligned_p);
21606 }
21607
21608 /* Return a template string for assembly to emit when making an
21609 external call. FUNOP is the call mem argument operand number. */
21610
21611 static const char *
21612 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21613 {
21614 /* -Wformat-overflow workaround, without which gcc thinks that %u
21615 might produce 10 digits. */
21616 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21617
21618 char arg[12];
21619 arg[0] = 0;
21620 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21621 {
21622 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21623 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21624 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21625 sprintf (arg, "(%%&@tlsld)");
21626 else
21627 gcc_unreachable ();
21628 }
21629
21630 /* The magic 32768 offset here corresponds to the offset of
21631 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21632 char z[11];
21633 sprintf (z, "%%z%u%s", funop,
21634 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21635 ? "+32768" : ""));
21636
21637 static char str[32]; /* 2 spare */
21638 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21639 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21640 sibcall ? "" : "\n\tnop");
21641 else if (DEFAULT_ABI == ABI_V4)
21642 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21643 flag_pic ? "@plt" : "");
21644 #if TARGET_MACHO
21645 /* If/when we remove the mlongcall opt, we can share the AIX/ELGv2 case. */
21646 else if (DEFAULT_ABI == ABI_DARWIN)
21647 {
21648 /* The cookie is in operand func+2. */
21649 gcc_checking_assert (GET_CODE (operands[funop + 2]) == CONST_INT);
21650 int cookie = INTVAL (operands[funop + 2]);
21651 if (cookie & CALL_LONG)
21652 {
21653 tree funname = get_identifier (XSTR (operands[funop], 0));
21654 tree labelname = get_prev_label (funname);
21655 gcc_checking_assert (labelname && !sibcall);
21656
21657 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21658 instruction will reach 'foo', otherwise link as 'bl L42'".
21659 "L42" should be a 'branch island', that will do a far jump to
21660 'foo'. Branch islands are generated in
21661 macho_branch_islands(). */
21662 sprintf (str, "jbsr %%z%u,%.10s", funop,
21663 IDENTIFIER_POINTER (labelname));
21664 }
21665 else
21666 /* Same as AIX or ELFv2, except to keep backwards compat, no nop
21667 after the call. */
21668 sprintf (str, "b%s %s%s", sibcall ? "" : "l", z, arg);
21669 }
21670 #endif
21671 else
21672 gcc_unreachable ();
21673 return str;
21674 }
21675
21676 const char *
21677 rs6000_call_template (rtx *operands, unsigned int funop)
21678 {
21679 return rs6000_call_template_1 (operands, funop, false);
21680 }
21681
21682 const char *
21683 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21684 {
21685 return rs6000_call_template_1 (operands, funop, true);
21686 }
21687
21688 /* As above, for indirect calls. */
21689
21690 static const char *
21691 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21692 bool sibcall)
21693 {
21694 /* -Wformat-overflow workaround, without which gcc thinks that %u
21695 might produce 10 digits. Note that -Wformat-overflow will not
21696 currently warn here for str[], so do not rely on a warning to
21697 ensure str[] is correctly sized. */
21698 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21699
21700 /* Currently, funop is either 0 or 1. The maximum string is always
21701 a !speculate 64-bit __tls_get_addr call.
21702
21703 ABI_AIX:
21704 . 9 ld 2,%3\n\t
21705 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21706 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21707 . 9 crset 2\n\t
21708 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21709 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21710 . 10 beq%T1l-\n\t
21711 . 10 ld 2,%4(1)
21712 .---
21713 .151
21714
21715 ABI_ELFv2:
21716 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21717 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21718 . 9 crset 2\n\t
21719 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21720 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21721 . 10 beq%T1l-\n\t
21722 . 10 ld 2,%3(1)
21723 .---
21724 .142
21725
21726 ABI_V4:
21727 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21728 . 35 .reloc .,R_PPC64_PLTSEQ,%z1+32768\n\t
21729 . 9 crset 2\n\t
21730 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21731 . 36 .reloc .,R_PPC64_PLTCALL,%z1+32768\n\t
21732 . 8 beq%T1l-
21733 .---
21734 .141 */
21735 static char str[160]; /* 8 spare */
21736 char *s = str;
21737 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21738
21739 if (DEFAULT_ABI == ABI_AIX)
21740 s += sprintf (s,
21741 "l%s 2,%%%u\n\t",
21742 ptrload, funop + 2);
21743
21744 /* We don't need the extra code to stop indirect call speculation if
21745 calling via LR. */
21746 bool speculate = (TARGET_MACHO
21747 || rs6000_speculate_indirect_jumps
21748 || (REG_P (operands[funop])
21749 && REGNO (operands[funop]) == LR_REGNO));
21750
21751 if (TARGET_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21752 {
21753 const char *rel64 = TARGET_64BIT ? "64" : "";
21754 char tls[29];
21755 tls[0] = 0;
21756 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21757 {
21758 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21759 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21760 rel64, funop + 1);
21761 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21762 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21763 rel64);
21764 else
21765 gcc_unreachable ();
21766 }
21767
21768 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21769 && flag_pic == 2 ? "+32768" : "");
21770 if (!speculate)
21771 {
21772 s += sprintf (s,
21773 "%s.reloc .,R_PPC%s_PLTSEQ,%%z%u%s\n\t",
21774 tls, rel64, funop, addend);
21775 s += sprintf (s, "crset 2\n\t");
21776 }
21777 s += sprintf (s,
21778 "%s.reloc .,R_PPC%s_PLTCALL,%%z%u%s\n\t",
21779 tls, rel64, funop, addend);
21780 }
21781 else if (!speculate)
21782 s += sprintf (s, "crset 2\n\t");
21783
21784 if (DEFAULT_ABI == ABI_AIX)
21785 {
21786 if (speculate)
21787 sprintf (s,
21788 "b%%T%ul\n\t"
21789 "l%s 2,%%%u(1)",
21790 funop, ptrload, funop + 3);
21791 else
21792 sprintf (s,
21793 "beq%%T%ul-\n\t"
21794 "l%s 2,%%%u(1)",
21795 funop, ptrload, funop + 3);
21796 }
21797 else if (DEFAULT_ABI == ABI_ELFv2)
21798 {
21799 if (speculate)
21800 sprintf (s,
21801 "b%%T%ul\n\t"
21802 "l%s 2,%%%u(1)",
21803 funop, ptrload, funop + 2);
21804 else
21805 sprintf (s,
21806 "beq%%T%ul-\n\t"
21807 "l%s 2,%%%u(1)",
21808 funop, ptrload, funop + 2);
21809 }
21810 else
21811 {
21812 if (speculate)
21813 sprintf (s,
21814 "b%%T%u%s",
21815 funop, sibcall ? "" : "l");
21816 else
21817 sprintf (s,
21818 "beq%%T%u%s-%s",
21819 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21820 }
21821 return str;
21822 }
21823
21824 const char *
21825 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21826 {
21827 return rs6000_indirect_call_template_1 (operands, funop, false);
21828 }
21829
21830 const char *
21831 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21832 {
21833 return rs6000_indirect_call_template_1 (operands, funop, true);
21834 }
21835
21836 #if HAVE_AS_PLTSEQ
21837 /* Output indirect call insns.
21838 WHICH is 0 for tocsave, 1 for plt16_ha, 2 for plt16_lo, 3 for mtctr. */
21839 const char *
21840 rs6000_pltseq_template (rtx *operands, int which)
21841 {
21842 const char *rel64 = TARGET_64BIT ? "64" : "";
21843 char tls[28];
21844 tls[0] = 0;
21845 if (TARGET_TLS_MARKERS && GET_CODE (operands[3]) == UNSPEC)
21846 {
21847 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21848 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%3\n\t",
21849 rel64);
21850 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21851 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21852 rel64);
21853 else
21854 gcc_unreachable ();
21855 }
21856
21857 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21858 static char str[96]; /* 15 spare */
21859 const char *off = WORDS_BIG_ENDIAN ? "+2" : "";
21860 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21861 && flag_pic == 2 ? "+32768" : "");
21862 switch (which)
21863 {
21864 case 0:
21865 sprintf (str,
21866 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2\n\t"
21867 "st%s",
21868 tls, rel64, TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)");
21869 break;
21870 case 1:
21871 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21872 sprintf (str,
21873 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2\n\t"
21874 "lis %%0,0",
21875 tls, off, rel64);
21876 else
21877 sprintf (str,
21878 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2%s\n\t"
21879 "addis %%0,%%1,0",
21880 tls, off, rel64, addend);
21881 break;
21882 case 2:
21883 sprintf (str,
21884 "%s.reloc .%s,R_PPC%s_PLT16_LO%s,%%z2%s\n\t"
21885 "l%s %%0,0(%%1)",
21886 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend,
21887 TARGET_64BIT ? "d" : "wz");
21888 break;
21889 case 3:
21890 sprintf (str,
21891 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2%s\n\t"
21892 "mtctr %%1",
21893 tls, rel64, addend);
21894 break;
21895 default:
21896 gcc_unreachable ();
21897 }
21898 return str;
21899 }
21900 #endif
21901
21902 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21903 /* Emit an assembler directive to set symbol visibility for DECL to
21904 VISIBILITY_TYPE. */
21905
21906 static void
21907 rs6000_assemble_visibility (tree decl, int vis)
21908 {
21909 if (TARGET_XCOFF)
21910 return;
21911
21912 /* Functions need to have their entry point symbol visibility set as
21913 well as their descriptor symbol visibility. */
21914 if (DEFAULT_ABI == ABI_AIX
21915 && DOT_SYMBOLS
21916 && TREE_CODE (decl) == FUNCTION_DECL)
21917 {
21918 static const char * const visibility_types[] = {
21919 NULL, "protected", "hidden", "internal"
21920 };
21921
21922 const char *name, *type;
21923
21924 name = ((* targetm.strip_name_encoding)
21925 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21926 type = visibility_types[vis];
21927
21928 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21929 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21930 }
21931 else
21932 default_assemble_visibility (decl, vis);
21933 }
21934 #endif
21935 \f
21936 enum rtx_code
21937 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21938 {
21939 /* Reversal of FP compares takes care -- an ordered compare
21940 becomes an unordered compare and vice versa. */
21941 if (mode == CCFPmode
21942 && (!flag_finite_math_only
21943 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21944 || code == UNEQ || code == LTGT))
21945 return reverse_condition_maybe_unordered (code);
21946 else
21947 return reverse_condition (code);
21948 }
21949
21950 /* Generate a compare for CODE. Return a brand-new rtx that
21951 represents the result of the compare. */
21952
21953 static rtx
21954 rs6000_generate_compare (rtx cmp, machine_mode mode)
21955 {
21956 machine_mode comp_mode;
21957 rtx compare_result;
21958 enum rtx_code code = GET_CODE (cmp);
21959 rtx op0 = XEXP (cmp, 0);
21960 rtx op1 = XEXP (cmp, 1);
21961
21962 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21963 comp_mode = CCmode;
21964 else if (FLOAT_MODE_P (mode))
21965 comp_mode = CCFPmode;
21966 else if (code == GTU || code == LTU
21967 || code == GEU || code == LEU)
21968 comp_mode = CCUNSmode;
21969 else if ((code == EQ || code == NE)
21970 && unsigned_reg_p (op0)
21971 && (unsigned_reg_p (op1)
21972 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21973 /* These are unsigned values, perhaps there will be a later
21974 ordering compare that can be shared with this one. */
21975 comp_mode = CCUNSmode;
21976 else
21977 comp_mode = CCmode;
21978
21979 /* If we have an unsigned compare, make sure we don't have a signed value as
21980 an immediate. */
21981 if (comp_mode == CCUNSmode && CONST_INT_P (op1)
21982 && INTVAL (op1) < 0)
21983 {
21984 op0 = copy_rtx_if_shared (op0);
21985 op1 = force_reg (GET_MODE (op0), op1);
21986 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21987 }
21988
21989 /* First, the compare. */
21990 compare_result = gen_reg_rtx (comp_mode);
21991
21992 /* IEEE 128-bit support in VSX registers when we do not have hardware
21993 support. */
21994 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21995 {
21996 rtx libfunc = NULL_RTX;
21997 bool check_nan = false;
21998 rtx dest;
21999
22000 switch (code)
22001 {
22002 case EQ:
22003 case NE:
22004 libfunc = optab_libfunc (eq_optab, mode);
22005 break;
22006
22007 case GT:
22008 case GE:
22009 libfunc = optab_libfunc (ge_optab, mode);
22010 break;
22011
22012 case LT:
22013 case LE:
22014 libfunc = optab_libfunc (le_optab, mode);
22015 break;
22016
22017 case UNORDERED:
22018 case ORDERED:
22019 libfunc = optab_libfunc (unord_optab, mode);
22020 code = (code == UNORDERED) ? NE : EQ;
22021 break;
22022
22023 case UNGE:
22024 case UNGT:
22025 check_nan = true;
22026 libfunc = optab_libfunc (ge_optab, mode);
22027 code = (code == UNGE) ? GE : GT;
22028 break;
22029
22030 case UNLE:
22031 case UNLT:
22032 check_nan = true;
22033 libfunc = optab_libfunc (le_optab, mode);
22034 code = (code == UNLE) ? LE : LT;
22035 break;
22036
22037 case UNEQ:
22038 case LTGT:
22039 check_nan = true;
22040 libfunc = optab_libfunc (eq_optab, mode);
22041 code = (code = UNEQ) ? EQ : NE;
22042 break;
22043
22044 default:
22045 gcc_unreachable ();
22046 }
22047
22048 gcc_assert (libfunc);
22049
22050 if (!check_nan)
22051 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22052 SImode, op0, mode, op1, mode);
22053
22054 /* The library signals an exception for signalling NaNs, so we need to
22055 handle isgreater, etc. by first checking isordered. */
22056 else
22057 {
22058 rtx ne_rtx, normal_dest, unord_dest;
22059 rtx unord_func = optab_libfunc (unord_optab, mode);
22060 rtx join_label = gen_label_rtx ();
22061 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
22062 rtx unord_cmp = gen_reg_rtx (comp_mode);
22063
22064
22065 /* Test for either value being a NaN. */
22066 gcc_assert (unord_func);
22067 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
22068 SImode, op0, mode, op1, mode);
22069
22070 /* Set value (0) if either value is a NaN, and jump to the join
22071 label. */
22072 dest = gen_reg_rtx (SImode);
22073 emit_move_insn (dest, const1_rtx);
22074 emit_insn (gen_rtx_SET (unord_cmp,
22075 gen_rtx_COMPARE (comp_mode, unord_dest,
22076 const0_rtx)));
22077
22078 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
22079 emit_jump_insn (gen_rtx_SET (pc_rtx,
22080 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
22081 join_ref,
22082 pc_rtx)));
22083
22084 /* Do the normal comparison, knowing that the values are not
22085 NaNs. */
22086 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22087 SImode, op0, mode, op1, mode);
22088
22089 emit_insn (gen_cstoresi4 (dest,
22090 gen_rtx_fmt_ee (code, SImode, normal_dest,
22091 const0_rtx),
22092 normal_dest, const0_rtx));
22093
22094 /* Join NaN and non-Nan paths. Compare dest against 0. */
22095 emit_label (join_label);
22096 code = NE;
22097 }
22098
22099 emit_insn (gen_rtx_SET (compare_result,
22100 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
22101 }
22102
22103 else
22104 {
22105 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22106 CLOBBERs to match cmptf_internal2 pattern. */
22107 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
22108 && FLOAT128_IBM_P (GET_MODE (op0))
22109 && TARGET_HARD_FLOAT)
22110 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22111 gen_rtvec (10,
22112 gen_rtx_SET (compare_result,
22113 gen_rtx_COMPARE (comp_mode, op0, op1)),
22114 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22115 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22116 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22117 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22118 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22119 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22120 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22121 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22122 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
22123 else if (GET_CODE (op1) == UNSPEC
22124 && XINT (op1, 1) == UNSPEC_SP_TEST)
22125 {
22126 rtx op1b = XVECEXP (op1, 0, 0);
22127 comp_mode = CCEQmode;
22128 compare_result = gen_reg_rtx (CCEQmode);
22129 if (TARGET_64BIT)
22130 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
22131 else
22132 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
22133 }
22134 else
22135 emit_insn (gen_rtx_SET (compare_result,
22136 gen_rtx_COMPARE (comp_mode, op0, op1)));
22137 }
22138
22139 /* Some kinds of FP comparisons need an OR operation;
22140 under flag_finite_math_only we don't bother. */
22141 if (FLOAT_MODE_P (mode)
22142 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22143 && !flag_finite_math_only
22144 && (code == LE || code == GE
22145 || code == UNEQ || code == LTGT
22146 || code == UNGT || code == UNLT))
22147 {
22148 enum rtx_code or1, or2;
22149 rtx or1_rtx, or2_rtx, compare2_rtx;
22150 rtx or_result = gen_reg_rtx (CCEQmode);
22151
22152 switch (code)
22153 {
22154 case LE: or1 = LT; or2 = EQ; break;
22155 case GE: or1 = GT; or2 = EQ; break;
22156 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22157 case LTGT: or1 = LT; or2 = GT; break;
22158 case UNGT: or1 = UNORDERED; or2 = GT; break;
22159 case UNLT: or1 = UNORDERED; or2 = LT; break;
22160 default: gcc_unreachable ();
22161 }
22162 validate_condition_mode (or1, comp_mode);
22163 validate_condition_mode (or2, comp_mode);
22164 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22165 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22166 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22167 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22168 const_true_rtx);
22169 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22170
22171 compare_result = or_result;
22172 code = EQ;
22173 }
22174
22175 validate_condition_mode (code, GET_MODE (compare_result));
22176
22177 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22178 }
22179
22180 \f
22181 /* Return the diagnostic message string if the binary operation OP is
22182 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22183
22184 static const char*
22185 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22186 const_tree type1,
22187 const_tree type2)
22188 {
22189 machine_mode mode1 = TYPE_MODE (type1);
22190 machine_mode mode2 = TYPE_MODE (type2);
22191
22192 /* For complex modes, use the inner type. */
22193 if (COMPLEX_MODE_P (mode1))
22194 mode1 = GET_MODE_INNER (mode1);
22195
22196 if (COMPLEX_MODE_P (mode2))
22197 mode2 = GET_MODE_INNER (mode2);
22198
22199 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22200 double to intermix unless -mfloat128-convert. */
22201 if (mode1 == mode2)
22202 return NULL;
22203
22204 if (!TARGET_FLOAT128_CVT)
22205 {
22206 if ((mode1 == KFmode && mode2 == IFmode)
22207 || (mode1 == IFmode && mode2 == KFmode))
22208 return N_("__float128 and __ibm128 cannot be used in the same "
22209 "expression");
22210
22211 if (TARGET_IEEEQUAD
22212 && ((mode1 == IFmode && mode2 == TFmode)
22213 || (mode1 == TFmode && mode2 == IFmode)))
22214 return N_("__ibm128 and long double cannot be used in the same "
22215 "expression");
22216
22217 if (!TARGET_IEEEQUAD
22218 && ((mode1 == KFmode && mode2 == TFmode)
22219 || (mode1 == TFmode && mode2 == KFmode)))
22220 return N_("__float128 and long double cannot be used in the same "
22221 "expression");
22222 }
22223
22224 return NULL;
22225 }
22226
22227 \f
22228 /* Expand floating point conversion to/from __float128 and __ibm128. */
22229
22230 void
22231 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22232 {
22233 machine_mode dest_mode = GET_MODE (dest);
22234 machine_mode src_mode = GET_MODE (src);
22235 convert_optab cvt = unknown_optab;
22236 bool do_move = false;
22237 rtx libfunc = NULL_RTX;
22238 rtx dest2;
22239 typedef rtx (*rtx_2func_t) (rtx, rtx);
22240 rtx_2func_t hw_convert = (rtx_2func_t)0;
22241 size_t kf_or_tf;
22242
22243 struct hw_conv_t {
22244 rtx_2func_t from_df;
22245 rtx_2func_t from_sf;
22246 rtx_2func_t from_si_sign;
22247 rtx_2func_t from_si_uns;
22248 rtx_2func_t from_di_sign;
22249 rtx_2func_t from_di_uns;
22250 rtx_2func_t to_df;
22251 rtx_2func_t to_sf;
22252 rtx_2func_t to_si_sign;
22253 rtx_2func_t to_si_uns;
22254 rtx_2func_t to_di_sign;
22255 rtx_2func_t to_di_uns;
22256 } hw_conversions[2] = {
22257 /* convertions to/from KFmode */
22258 {
22259 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22260 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22261 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22262 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22263 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22264 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22265 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22266 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22267 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22268 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22269 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22270 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22271 },
22272
22273 /* convertions to/from TFmode */
22274 {
22275 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22276 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22277 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22278 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22279 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22280 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22281 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22282 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22283 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22284 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22285 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22286 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22287 },
22288 };
22289
22290 if (dest_mode == src_mode)
22291 gcc_unreachable ();
22292
22293 /* Eliminate memory operations. */
22294 if (MEM_P (src))
22295 src = force_reg (src_mode, src);
22296
22297 if (MEM_P (dest))
22298 {
22299 rtx tmp = gen_reg_rtx (dest_mode);
22300 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22301 rs6000_emit_move (dest, tmp, dest_mode);
22302 return;
22303 }
22304
22305 /* Convert to IEEE 128-bit floating point. */
22306 if (FLOAT128_IEEE_P (dest_mode))
22307 {
22308 if (dest_mode == KFmode)
22309 kf_or_tf = 0;
22310 else if (dest_mode == TFmode)
22311 kf_or_tf = 1;
22312 else
22313 gcc_unreachable ();
22314
22315 switch (src_mode)
22316 {
22317 case E_DFmode:
22318 cvt = sext_optab;
22319 hw_convert = hw_conversions[kf_or_tf].from_df;
22320 break;
22321
22322 case E_SFmode:
22323 cvt = sext_optab;
22324 hw_convert = hw_conversions[kf_or_tf].from_sf;
22325 break;
22326
22327 case E_KFmode:
22328 case E_IFmode:
22329 case E_TFmode:
22330 if (FLOAT128_IBM_P (src_mode))
22331 cvt = sext_optab;
22332 else
22333 do_move = true;
22334 break;
22335
22336 case E_SImode:
22337 if (unsigned_p)
22338 {
22339 cvt = ufloat_optab;
22340 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22341 }
22342 else
22343 {
22344 cvt = sfloat_optab;
22345 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22346 }
22347 break;
22348
22349 case E_DImode:
22350 if (unsigned_p)
22351 {
22352 cvt = ufloat_optab;
22353 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22354 }
22355 else
22356 {
22357 cvt = sfloat_optab;
22358 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22359 }
22360 break;
22361
22362 default:
22363 gcc_unreachable ();
22364 }
22365 }
22366
22367 /* Convert from IEEE 128-bit floating point. */
22368 else if (FLOAT128_IEEE_P (src_mode))
22369 {
22370 if (src_mode == KFmode)
22371 kf_or_tf = 0;
22372 else if (src_mode == TFmode)
22373 kf_or_tf = 1;
22374 else
22375 gcc_unreachable ();
22376
22377 switch (dest_mode)
22378 {
22379 case E_DFmode:
22380 cvt = trunc_optab;
22381 hw_convert = hw_conversions[kf_or_tf].to_df;
22382 break;
22383
22384 case E_SFmode:
22385 cvt = trunc_optab;
22386 hw_convert = hw_conversions[kf_or_tf].to_sf;
22387 break;
22388
22389 case E_KFmode:
22390 case E_IFmode:
22391 case E_TFmode:
22392 if (FLOAT128_IBM_P (dest_mode))
22393 cvt = trunc_optab;
22394 else
22395 do_move = true;
22396 break;
22397
22398 case E_SImode:
22399 if (unsigned_p)
22400 {
22401 cvt = ufix_optab;
22402 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22403 }
22404 else
22405 {
22406 cvt = sfix_optab;
22407 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22408 }
22409 break;
22410
22411 case E_DImode:
22412 if (unsigned_p)
22413 {
22414 cvt = ufix_optab;
22415 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22416 }
22417 else
22418 {
22419 cvt = sfix_optab;
22420 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22421 }
22422 break;
22423
22424 default:
22425 gcc_unreachable ();
22426 }
22427 }
22428
22429 /* Both IBM format. */
22430 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22431 do_move = true;
22432
22433 else
22434 gcc_unreachable ();
22435
22436 /* Handle conversion between TFmode/KFmode/IFmode. */
22437 if (do_move)
22438 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22439
22440 /* Handle conversion if we have hardware support. */
22441 else if (TARGET_FLOAT128_HW && hw_convert)
22442 emit_insn ((hw_convert) (dest, src));
22443
22444 /* Call an external function to do the conversion. */
22445 else if (cvt != unknown_optab)
22446 {
22447 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22448 gcc_assert (libfunc != NULL_RTX);
22449
22450 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22451 src, src_mode);
22452
22453 gcc_assert (dest2 != NULL_RTX);
22454 if (!rtx_equal_p (dest, dest2))
22455 emit_move_insn (dest, dest2);
22456 }
22457
22458 else
22459 gcc_unreachable ();
22460
22461 return;
22462 }
22463
22464 \f
22465 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22466 can be used as that dest register. Return the dest register. */
22467
22468 rtx
22469 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22470 {
22471 if (op2 == const0_rtx)
22472 return op1;
22473
22474 if (GET_CODE (scratch) == SCRATCH)
22475 scratch = gen_reg_rtx (mode);
22476
22477 if (logical_operand (op2, mode))
22478 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22479 else
22480 emit_insn (gen_rtx_SET (scratch,
22481 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22482
22483 return scratch;
22484 }
22485
22486 void
22487 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22488 {
22489 rtx condition_rtx;
22490 machine_mode op_mode;
22491 enum rtx_code cond_code;
22492 rtx result = operands[0];
22493
22494 condition_rtx = rs6000_generate_compare (operands[1], mode);
22495 cond_code = GET_CODE (condition_rtx);
22496
22497 if (cond_code == NE
22498 || cond_code == GE || cond_code == LE
22499 || cond_code == GEU || cond_code == LEU
22500 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22501 {
22502 rtx not_result = gen_reg_rtx (CCEQmode);
22503 rtx not_op, rev_cond_rtx;
22504 machine_mode cc_mode;
22505
22506 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22507
22508 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22509 SImode, XEXP (condition_rtx, 0), const0_rtx);
22510 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22511 emit_insn (gen_rtx_SET (not_result, not_op));
22512 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22513 }
22514
22515 op_mode = GET_MODE (XEXP (operands[1], 0));
22516 if (op_mode == VOIDmode)
22517 op_mode = GET_MODE (XEXP (operands[1], 1));
22518
22519 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22520 {
22521 PUT_MODE (condition_rtx, DImode);
22522 convert_move (result, condition_rtx, 0);
22523 }
22524 else
22525 {
22526 PUT_MODE (condition_rtx, SImode);
22527 emit_insn (gen_rtx_SET (result, condition_rtx));
22528 }
22529 }
22530
22531 /* Emit a branch of kind CODE to location LOC. */
22532
22533 void
22534 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22535 {
22536 rtx condition_rtx, loc_ref;
22537
22538 condition_rtx = rs6000_generate_compare (operands[0], mode);
22539 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22540 emit_jump_insn (gen_rtx_SET (pc_rtx,
22541 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22542 loc_ref, pc_rtx)));
22543 }
22544
22545 /* Return the string to output a conditional branch to LABEL, which is
22546 the operand template of the label, or NULL if the branch is really a
22547 conditional return.
22548
22549 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22550 condition code register and its mode specifies what kind of
22551 comparison we made.
22552
22553 REVERSED is nonzero if we should reverse the sense of the comparison.
22554
22555 INSN is the insn. */
22556
22557 char *
22558 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22559 {
22560 static char string[64];
22561 enum rtx_code code = GET_CODE (op);
22562 rtx cc_reg = XEXP (op, 0);
22563 machine_mode mode = GET_MODE (cc_reg);
22564 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22565 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22566 int really_reversed = reversed ^ need_longbranch;
22567 char *s = string;
22568 const char *ccode;
22569 const char *pred;
22570 rtx note;
22571
22572 validate_condition_mode (code, mode);
22573
22574 /* Work out which way this really branches. We could use
22575 reverse_condition_maybe_unordered here always but this
22576 makes the resulting assembler clearer. */
22577 if (really_reversed)
22578 {
22579 /* Reversal of FP compares takes care -- an ordered compare
22580 becomes an unordered compare and vice versa. */
22581 if (mode == CCFPmode)
22582 code = reverse_condition_maybe_unordered (code);
22583 else
22584 code = reverse_condition (code);
22585 }
22586
22587 switch (code)
22588 {
22589 /* Not all of these are actually distinct opcodes, but
22590 we distinguish them for clarity of the resulting assembler. */
22591 case NE: case LTGT:
22592 ccode = "ne"; break;
22593 case EQ: case UNEQ:
22594 ccode = "eq"; break;
22595 case GE: case GEU:
22596 ccode = "ge"; break;
22597 case GT: case GTU: case UNGT:
22598 ccode = "gt"; break;
22599 case LE: case LEU:
22600 ccode = "le"; break;
22601 case LT: case LTU: case UNLT:
22602 ccode = "lt"; break;
22603 case UNORDERED: ccode = "un"; break;
22604 case ORDERED: ccode = "nu"; break;
22605 case UNGE: ccode = "nl"; break;
22606 case UNLE: ccode = "ng"; break;
22607 default:
22608 gcc_unreachable ();
22609 }
22610
22611 /* Maybe we have a guess as to how likely the branch is. */
22612 pred = "";
22613 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22614 if (note != NULL_RTX)
22615 {
22616 /* PROB is the difference from 50%. */
22617 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22618 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22619
22620 /* Only hint for highly probable/improbable branches on newer cpus when
22621 we have real profile data, as static prediction overrides processor
22622 dynamic prediction. For older cpus we may as well always hint, but
22623 assume not taken for branches that are very close to 50% as a
22624 mispredicted taken branch is more expensive than a
22625 mispredicted not-taken branch. */
22626 if (rs6000_always_hint
22627 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22628 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22629 && br_prob_note_reliable_p (note)))
22630 {
22631 if (abs (prob) > REG_BR_PROB_BASE / 20
22632 && ((prob > 0) ^ need_longbranch))
22633 pred = "+";
22634 else
22635 pred = "-";
22636 }
22637 }
22638
22639 if (label == NULL)
22640 s += sprintf (s, "b%slr%s ", ccode, pred);
22641 else
22642 s += sprintf (s, "b%s%s ", ccode, pred);
22643
22644 /* We need to escape any '%' characters in the reg_names string.
22645 Assume they'd only be the first character.... */
22646 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22647 *s++ = '%';
22648 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22649
22650 if (label != NULL)
22651 {
22652 /* If the branch distance was too far, we may have to use an
22653 unconditional branch to go the distance. */
22654 if (need_longbranch)
22655 s += sprintf (s, ",$+8\n\tb %s", label);
22656 else
22657 s += sprintf (s, ",%s", label);
22658 }
22659
22660 return string;
22661 }
22662
22663 /* Return insn for VSX or Altivec comparisons. */
22664
22665 static rtx
22666 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22667 {
22668 rtx mask;
22669 machine_mode mode = GET_MODE (op0);
22670
22671 switch (code)
22672 {
22673 default:
22674 break;
22675
22676 case GE:
22677 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22678 return NULL_RTX;
22679 /* FALLTHRU */
22680
22681 case EQ:
22682 case GT:
22683 case GTU:
22684 case ORDERED:
22685 case UNORDERED:
22686 case UNEQ:
22687 case LTGT:
22688 mask = gen_reg_rtx (mode);
22689 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22690 return mask;
22691 }
22692
22693 return NULL_RTX;
22694 }
22695
22696 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22697 DMODE is expected destination mode. This is a recursive function. */
22698
22699 static rtx
22700 rs6000_emit_vector_compare (enum rtx_code rcode,
22701 rtx op0, rtx op1,
22702 machine_mode dmode)
22703 {
22704 rtx mask;
22705 bool swap_operands = false;
22706 bool try_again = false;
22707
22708 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22709 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22710
22711 /* See if the comparison works as is. */
22712 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22713 if (mask)
22714 return mask;
22715
22716 switch (rcode)
22717 {
22718 case LT:
22719 rcode = GT;
22720 swap_operands = true;
22721 try_again = true;
22722 break;
22723 case LTU:
22724 rcode = GTU;
22725 swap_operands = true;
22726 try_again = true;
22727 break;
22728 case NE:
22729 case UNLE:
22730 case UNLT:
22731 case UNGE:
22732 case UNGT:
22733 /* Invert condition and try again.
22734 e.g., A != B becomes ~(A==B). */
22735 {
22736 enum rtx_code rev_code;
22737 enum insn_code nor_code;
22738 rtx mask2;
22739
22740 rev_code = reverse_condition_maybe_unordered (rcode);
22741 if (rev_code == UNKNOWN)
22742 return NULL_RTX;
22743
22744 nor_code = optab_handler (one_cmpl_optab, dmode);
22745 if (nor_code == CODE_FOR_nothing)
22746 return NULL_RTX;
22747
22748 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22749 if (!mask2)
22750 return NULL_RTX;
22751
22752 mask = gen_reg_rtx (dmode);
22753 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22754 return mask;
22755 }
22756 break;
22757 case GE:
22758 case GEU:
22759 case LE:
22760 case LEU:
22761 /* Try GT/GTU/LT/LTU OR EQ */
22762 {
22763 rtx c_rtx, eq_rtx;
22764 enum insn_code ior_code;
22765 enum rtx_code new_code;
22766
22767 switch (rcode)
22768 {
22769 case GE:
22770 new_code = GT;
22771 break;
22772
22773 case GEU:
22774 new_code = GTU;
22775 break;
22776
22777 case LE:
22778 new_code = LT;
22779 break;
22780
22781 case LEU:
22782 new_code = LTU;
22783 break;
22784
22785 default:
22786 gcc_unreachable ();
22787 }
22788
22789 ior_code = optab_handler (ior_optab, dmode);
22790 if (ior_code == CODE_FOR_nothing)
22791 return NULL_RTX;
22792
22793 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22794 if (!c_rtx)
22795 return NULL_RTX;
22796
22797 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22798 if (!eq_rtx)
22799 return NULL_RTX;
22800
22801 mask = gen_reg_rtx (dmode);
22802 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22803 return mask;
22804 }
22805 break;
22806 default:
22807 return NULL_RTX;
22808 }
22809
22810 if (try_again)
22811 {
22812 if (swap_operands)
22813 std::swap (op0, op1);
22814
22815 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22816 if (mask)
22817 return mask;
22818 }
22819
22820 /* You only get two chances. */
22821 return NULL_RTX;
22822 }
22823
22824 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22825 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22826 operands for the relation operation COND. */
22827
22828 int
22829 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22830 rtx cond, rtx cc_op0, rtx cc_op1)
22831 {
22832 machine_mode dest_mode = GET_MODE (dest);
22833 machine_mode mask_mode = GET_MODE (cc_op0);
22834 enum rtx_code rcode = GET_CODE (cond);
22835 machine_mode cc_mode = CCmode;
22836 rtx mask;
22837 rtx cond2;
22838 bool invert_move = false;
22839
22840 if (VECTOR_UNIT_NONE_P (dest_mode))
22841 return 0;
22842
22843 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22844 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22845
22846 switch (rcode)
22847 {
22848 /* Swap operands if we can, and fall back to doing the operation as
22849 specified, and doing a NOR to invert the test. */
22850 case NE:
22851 case UNLE:
22852 case UNLT:
22853 case UNGE:
22854 case UNGT:
22855 /* Invert condition and try again.
22856 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22857 invert_move = true;
22858 rcode = reverse_condition_maybe_unordered (rcode);
22859 if (rcode == UNKNOWN)
22860 return 0;
22861 break;
22862
22863 case GE:
22864 case LE:
22865 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22866 {
22867 /* Invert condition to avoid compound test. */
22868 invert_move = true;
22869 rcode = reverse_condition (rcode);
22870 }
22871 break;
22872
22873 case GTU:
22874 case GEU:
22875 case LTU:
22876 case LEU:
22877 /* Mark unsigned tests with CCUNSmode. */
22878 cc_mode = CCUNSmode;
22879
22880 /* Invert condition to avoid compound test if necessary. */
22881 if (rcode == GEU || rcode == LEU)
22882 {
22883 invert_move = true;
22884 rcode = reverse_condition (rcode);
22885 }
22886 break;
22887
22888 default:
22889 break;
22890 }
22891
22892 /* Get the vector mask for the given relational operations. */
22893 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22894
22895 if (!mask)
22896 return 0;
22897
22898 if (invert_move)
22899 std::swap (op_true, op_false);
22900
22901 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22902 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22903 && (GET_CODE (op_true) == CONST_VECTOR
22904 || GET_CODE (op_false) == CONST_VECTOR))
22905 {
22906 rtx constant_0 = CONST0_RTX (dest_mode);
22907 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22908
22909 if (op_true == constant_m1 && op_false == constant_0)
22910 {
22911 emit_move_insn (dest, mask);
22912 return 1;
22913 }
22914
22915 else if (op_true == constant_0 && op_false == constant_m1)
22916 {
22917 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22918 return 1;
22919 }
22920
22921 /* If we can't use the vector comparison directly, perhaps we can use
22922 the mask for the true or false fields, instead of loading up a
22923 constant. */
22924 if (op_true == constant_m1)
22925 op_true = mask;
22926
22927 if (op_false == constant_0)
22928 op_false = mask;
22929 }
22930
22931 if (!REG_P (op_true) && !SUBREG_P (op_true))
22932 op_true = force_reg (dest_mode, op_true);
22933
22934 if (!REG_P (op_false) && !SUBREG_P (op_false))
22935 op_false = force_reg (dest_mode, op_false);
22936
22937 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22938 CONST0_RTX (dest_mode));
22939 emit_insn (gen_rtx_SET (dest,
22940 gen_rtx_IF_THEN_ELSE (dest_mode,
22941 cond2,
22942 op_true,
22943 op_false)));
22944 return 1;
22945 }
22946
22947 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22948 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22949 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22950 hardware has no such operation. */
22951
22952 static int
22953 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22954 {
22955 enum rtx_code code = GET_CODE (op);
22956 rtx op0 = XEXP (op, 0);
22957 rtx op1 = XEXP (op, 1);
22958 machine_mode compare_mode = GET_MODE (op0);
22959 machine_mode result_mode = GET_MODE (dest);
22960 bool max_p = false;
22961
22962 if (result_mode != compare_mode)
22963 return 0;
22964
22965 if (code == GE || code == GT)
22966 max_p = true;
22967 else if (code == LE || code == LT)
22968 max_p = false;
22969 else
22970 return 0;
22971
22972 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22973 ;
22974
22975 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22976 max_p = !max_p;
22977
22978 else
22979 return 0;
22980
22981 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22982 return 1;
22983 }
22984
22985 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22986 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22987 operands of the last comparison is nonzero/true, FALSE_COND if it is
22988 zero/false. Return 0 if the hardware has no such operation. */
22989
22990 static int
22991 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22992 {
22993 enum rtx_code code = GET_CODE (op);
22994 rtx op0 = XEXP (op, 0);
22995 rtx op1 = XEXP (op, 1);
22996 machine_mode result_mode = GET_MODE (dest);
22997 rtx compare_rtx;
22998 rtx cmove_rtx;
22999 rtx clobber_rtx;
23000
23001 if (!can_create_pseudo_p ())
23002 return 0;
23003
23004 switch (code)
23005 {
23006 case EQ:
23007 case GE:
23008 case GT:
23009 break;
23010
23011 case NE:
23012 case LT:
23013 case LE:
23014 code = swap_condition (code);
23015 std::swap (op0, op1);
23016 break;
23017
23018 default:
23019 return 0;
23020 }
23021
23022 /* Generate: [(parallel [(set (dest)
23023 (if_then_else (op (cmp1) (cmp2))
23024 (true)
23025 (false)))
23026 (clobber (scratch))])]. */
23027
23028 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
23029 cmove_rtx = gen_rtx_SET (dest,
23030 gen_rtx_IF_THEN_ELSE (result_mode,
23031 compare_rtx,
23032 true_cond,
23033 false_cond));
23034
23035 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
23036 emit_insn (gen_rtx_PARALLEL (VOIDmode,
23037 gen_rtvec (2, cmove_rtx, clobber_rtx)));
23038
23039 return 1;
23040 }
23041
23042 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
23043 operands of the last comparison is nonzero/true, FALSE_COND if it
23044 is zero/false. Return 0 if the hardware has no such operation. */
23045
23046 int
23047 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23048 {
23049 enum rtx_code code = GET_CODE (op);
23050 rtx op0 = XEXP (op, 0);
23051 rtx op1 = XEXP (op, 1);
23052 machine_mode compare_mode = GET_MODE (op0);
23053 machine_mode result_mode = GET_MODE (dest);
23054 rtx temp;
23055 bool is_against_zero;
23056
23057 /* These modes should always match. */
23058 if (GET_MODE (op1) != compare_mode
23059 /* In the isel case however, we can use a compare immediate, so
23060 op1 may be a small constant. */
23061 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
23062 return 0;
23063 if (GET_MODE (true_cond) != result_mode)
23064 return 0;
23065 if (GET_MODE (false_cond) != result_mode)
23066 return 0;
23067
23068 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
23069 if (TARGET_P9_MINMAX
23070 && (compare_mode == SFmode || compare_mode == DFmode)
23071 && (result_mode == SFmode || result_mode == DFmode))
23072 {
23073 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
23074 return 1;
23075
23076 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
23077 return 1;
23078 }
23079
23080 /* Don't allow using floating point comparisons for integer results for
23081 now. */
23082 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
23083 return 0;
23084
23085 /* First, work out if the hardware can do this at all, or
23086 if it's too slow.... */
23087 if (!FLOAT_MODE_P (compare_mode))
23088 {
23089 if (TARGET_ISEL)
23090 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
23091 return 0;
23092 }
23093
23094 is_against_zero = op1 == CONST0_RTX (compare_mode);
23095
23096 /* A floating-point subtract might overflow, underflow, or produce
23097 an inexact result, thus changing the floating-point flags, so it
23098 can't be generated if we care about that. It's safe if one side
23099 of the construct is zero, since then no subtract will be
23100 generated. */
23101 if (SCALAR_FLOAT_MODE_P (compare_mode)
23102 && flag_trapping_math && ! is_against_zero)
23103 return 0;
23104
23105 /* Eliminate half of the comparisons by switching operands, this
23106 makes the remaining code simpler. */
23107 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
23108 || code == LTGT || code == LT || code == UNLE)
23109 {
23110 code = reverse_condition_maybe_unordered (code);
23111 temp = true_cond;
23112 true_cond = false_cond;
23113 false_cond = temp;
23114 }
23115
23116 /* UNEQ and LTGT take four instructions for a comparison with zero,
23117 it'll probably be faster to use a branch here too. */
23118 if (code == UNEQ && HONOR_NANS (compare_mode))
23119 return 0;
23120
23121 /* We're going to try to implement comparisons by performing
23122 a subtract, then comparing against zero. Unfortunately,
23123 Inf - Inf is NaN which is not zero, and so if we don't
23124 know that the operand is finite and the comparison
23125 would treat EQ different to UNORDERED, we can't do it. */
23126 if (HONOR_INFINITIES (compare_mode)
23127 && code != GT && code != UNGE
23128 && (!CONST_DOUBLE_P (op1)
23129 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23130 /* Constructs of the form (a OP b ? a : b) are safe. */
23131 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23132 || (! rtx_equal_p (op0, true_cond)
23133 && ! rtx_equal_p (op1, true_cond))))
23134 return 0;
23135
23136 /* At this point we know we can use fsel. */
23137
23138 /* Reduce the comparison to a comparison against zero. */
23139 if (! is_against_zero)
23140 {
23141 temp = gen_reg_rtx (compare_mode);
23142 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23143 op0 = temp;
23144 op1 = CONST0_RTX (compare_mode);
23145 }
23146
23147 /* If we don't care about NaNs we can reduce some of the comparisons
23148 down to faster ones. */
23149 if (! HONOR_NANS (compare_mode))
23150 switch (code)
23151 {
23152 case GT:
23153 code = LE;
23154 temp = true_cond;
23155 true_cond = false_cond;
23156 false_cond = temp;
23157 break;
23158 case UNGE:
23159 code = GE;
23160 break;
23161 case UNEQ:
23162 code = EQ;
23163 break;
23164 default:
23165 break;
23166 }
23167
23168 /* Now, reduce everything down to a GE. */
23169 switch (code)
23170 {
23171 case GE:
23172 break;
23173
23174 case LE:
23175 temp = gen_reg_rtx (compare_mode);
23176 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23177 op0 = temp;
23178 break;
23179
23180 case ORDERED:
23181 temp = gen_reg_rtx (compare_mode);
23182 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23183 op0 = temp;
23184 break;
23185
23186 case EQ:
23187 temp = gen_reg_rtx (compare_mode);
23188 emit_insn (gen_rtx_SET (temp,
23189 gen_rtx_NEG (compare_mode,
23190 gen_rtx_ABS (compare_mode, op0))));
23191 op0 = temp;
23192 break;
23193
23194 case UNGE:
23195 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23196 temp = gen_reg_rtx (result_mode);
23197 emit_insn (gen_rtx_SET (temp,
23198 gen_rtx_IF_THEN_ELSE (result_mode,
23199 gen_rtx_GE (VOIDmode,
23200 op0, op1),
23201 true_cond, false_cond)));
23202 false_cond = true_cond;
23203 true_cond = temp;
23204
23205 temp = gen_reg_rtx (compare_mode);
23206 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23207 op0 = temp;
23208 break;
23209
23210 case GT:
23211 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23212 temp = gen_reg_rtx (result_mode);
23213 emit_insn (gen_rtx_SET (temp,
23214 gen_rtx_IF_THEN_ELSE (result_mode,
23215 gen_rtx_GE (VOIDmode,
23216 op0, op1),
23217 true_cond, false_cond)));
23218 true_cond = false_cond;
23219 false_cond = temp;
23220
23221 temp = gen_reg_rtx (compare_mode);
23222 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23223 op0 = temp;
23224 break;
23225
23226 default:
23227 gcc_unreachable ();
23228 }
23229
23230 emit_insn (gen_rtx_SET (dest,
23231 gen_rtx_IF_THEN_ELSE (result_mode,
23232 gen_rtx_GE (VOIDmode,
23233 op0, op1),
23234 true_cond, false_cond)));
23235 return 1;
23236 }
23237
23238 /* Same as above, but for ints (isel). */
23239
23240 int
23241 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23242 {
23243 rtx condition_rtx, cr;
23244 machine_mode mode = GET_MODE (dest);
23245 enum rtx_code cond_code;
23246 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23247 bool signedp;
23248
23249 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23250 return 0;
23251
23252 /* We still have to do the compare, because isel doesn't do a
23253 compare, it just looks at the CRx bits set by a previous compare
23254 instruction. */
23255 condition_rtx = rs6000_generate_compare (op, mode);
23256 cond_code = GET_CODE (condition_rtx);
23257 cr = XEXP (condition_rtx, 0);
23258 signedp = GET_MODE (cr) == CCmode;
23259
23260 isel_func = (mode == SImode
23261 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23262 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23263
23264 switch (cond_code)
23265 {
23266 case LT: case GT: case LTU: case GTU: case EQ:
23267 /* isel handles these directly. */
23268 break;
23269
23270 default:
23271 /* We need to swap the sense of the comparison. */
23272 {
23273 std::swap (false_cond, true_cond);
23274 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23275 }
23276 break;
23277 }
23278
23279 false_cond = force_reg (mode, false_cond);
23280 if (true_cond != const0_rtx)
23281 true_cond = force_reg (mode, true_cond);
23282
23283 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23284
23285 return 1;
23286 }
23287
23288 void
23289 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23290 {
23291 machine_mode mode = GET_MODE (op0);
23292 enum rtx_code c;
23293 rtx target;
23294
23295 /* VSX/altivec have direct min/max insns. */
23296 if ((code == SMAX || code == SMIN)
23297 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23298 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23299 {
23300 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23301 return;
23302 }
23303
23304 if (code == SMAX || code == SMIN)
23305 c = GE;
23306 else
23307 c = GEU;
23308
23309 if (code == SMAX || code == UMAX)
23310 target = emit_conditional_move (dest, c, op0, op1, mode,
23311 op0, op1, mode, 0);
23312 else
23313 target = emit_conditional_move (dest, c, op0, op1, mode,
23314 op1, op0, mode, 0);
23315 gcc_assert (target);
23316 if (target != dest)
23317 emit_move_insn (dest, target);
23318 }
23319
23320 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23321 COND is true. Mark the jump as unlikely to be taken. */
23322
23323 static void
23324 emit_unlikely_jump (rtx cond, rtx label)
23325 {
23326 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23327 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23328 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23329 }
23330
23331 /* A subroutine of the atomic operation splitters. Emit a load-locked
23332 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23333 the zero_extend operation. */
23334
23335 static void
23336 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23337 {
23338 rtx (*fn) (rtx, rtx) = NULL;
23339
23340 switch (mode)
23341 {
23342 case E_QImode:
23343 fn = gen_load_lockedqi;
23344 break;
23345 case E_HImode:
23346 fn = gen_load_lockedhi;
23347 break;
23348 case E_SImode:
23349 if (GET_MODE (mem) == QImode)
23350 fn = gen_load_lockedqi_si;
23351 else if (GET_MODE (mem) == HImode)
23352 fn = gen_load_lockedhi_si;
23353 else
23354 fn = gen_load_lockedsi;
23355 break;
23356 case E_DImode:
23357 fn = gen_load_lockeddi;
23358 break;
23359 case E_TImode:
23360 fn = gen_load_lockedti;
23361 break;
23362 default:
23363 gcc_unreachable ();
23364 }
23365 emit_insn (fn (reg, mem));
23366 }
23367
23368 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23369 instruction in MODE. */
23370
23371 static void
23372 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23373 {
23374 rtx (*fn) (rtx, rtx, rtx) = NULL;
23375
23376 switch (mode)
23377 {
23378 case E_QImode:
23379 fn = gen_store_conditionalqi;
23380 break;
23381 case E_HImode:
23382 fn = gen_store_conditionalhi;
23383 break;
23384 case E_SImode:
23385 fn = gen_store_conditionalsi;
23386 break;
23387 case E_DImode:
23388 fn = gen_store_conditionaldi;
23389 break;
23390 case E_TImode:
23391 fn = gen_store_conditionalti;
23392 break;
23393 default:
23394 gcc_unreachable ();
23395 }
23396
23397 /* Emit sync before stwcx. to address PPC405 Erratum. */
23398 if (PPC405_ERRATUM77)
23399 emit_insn (gen_hwsync ());
23400
23401 emit_insn (fn (res, mem, val));
23402 }
23403
23404 /* Expand barriers before and after a load_locked/store_cond sequence. */
23405
23406 static rtx
23407 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23408 {
23409 rtx addr = XEXP (mem, 0);
23410
23411 if (!legitimate_indirect_address_p (addr, reload_completed)
23412 && !legitimate_indexed_address_p (addr, reload_completed))
23413 {
23414 addr = force_reg (Pmode, addr);
23415 mem = replace_equiv_address_nv (mem, addr);
23416 }
23417
23418 switch (model)
23419 {
23420 case MEMMODEL_RELAXED:
23421 case MEMMODEL_CONSUME:
23422 case MEMMODEL_ACQUIRE:
23423 break;
23424 case MEMMODEL_RELEASE:
23425 case MEMMODEL_ACQ_REL:
23426 emit_insn (gen_lwsync ());
23427 break;
23428 case MEMMODEL_SEQ_CST:
23429 emit_insn (gen_hwsync ());
23430 break;
23431 default:
23432 gcc_unreachable ();
23433 }
23434 return mem;
23435 }
23436
23437 static void
23438 rs6000_post_atomic_barrier (enum memmodel model)
23439 {
23440 switch (model)
23441 {
23442 case MEMMODEL_RELAXED:
23443 case MEMMODEL_CONSUME:
23444 case MEMMODEL_RELEASE:
23445 break;
23446 case MEMMODEL_ACQUIRE:
23447 case MEMMODEL_ACQ_REL:
23448 case MEMMODEL_SEQ_CST:
23449 emit_insn (gen_isync ());
23450 break;
23451 default:
23452 gcc_unreachable ();
23453 }
23454 }
23455
23456 /* A subroutine of the various atomic expanders. For sub-word operations,
23457 we must adjust things to operate on SImode. Given the original MEM,
23458 return a new aligned memory. Also build and return the quantities by
23459 which to shift and mask. */
23460
23461 static rtx
23462 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23463 {
23464 rtx addr, align, shift, mask, mem;
23465 HOST_WIDE_INT shift_mask;
23466 machine_mode mode = GET_MODE (orig_mem);
23467
23468 /* For smaller modes, we have to implement this via SImode. */
23469 shift_mask = (mode == QImode ? 0x18 : 0x10);
23470
23471 addr = XEXP (orig_mem, 0);
23472 addr = force_reg (GET_MODE (addr), addr);
23473
23474 /* Aligned memory containing subword. Generate a new memory. We
23475 do not want any of the existing MEM_ATTR data, as we're now
23476 accessing memory outside the original object. */
23477 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23478 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23479 mem = gen_rtx_MEM (SImode, align);
23480 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23481 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23482 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23483
23484 /* Shift amount for subword relative to aligned word. */
23485 shift = gen_reg_rtx (SImode);
23486 addr = gen_lowpart (SImode, addr);
23487 rtx tmp = gen_reg_rtx (SImode);
23488 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23489 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23490 if (BYTES_BIG_ENDIAN)
23491 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23492 shift, 1, OPTAB_LIB_WIDEN);
23493 *pshift = shift;
23494
23495 /* Mask for insertion. */
23496 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23497 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23498 *pmask = mask;
23499
23500 return mem;
23501 }
23502
23503 /* A subroutine of the various atomic expanders. For sub-word operands,
23504 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23505
23506 static rtx
23507 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23508 {
23509 rtx x;
23510
23511 x = gen_reg_rtx (SImode);
23512 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23513 gen_rtx_NOT (SImode, mask),
23514 oldval)));
23515
23516 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23517
23518 return x;
23519 }
23520
23521 /* A subroutine of the various atomic expanders. For sub-word operands,
23522 extract WIDE to NARROW via SHIFT. */
23523
23524 static void
23525 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23526 {
23527 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23528 wide, 1, OPTAB_LIB_WIDEN);
23529 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23530 }
23531
23532 /* Expand an atomic compare and swap operation. */
23533
23534 void
23535 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23536 {
23537 rtx boolval, retval, mem, oldval, newval, cond;
23538 rtx label1, label2, x, mask, shift;
23539 machine_mode mode, orig_mode;
23540 enum memmodel mod_s, mod_f;
23541 bool is_weak;
23542
23543 boolval = operands[0];
23544 retval = operands[1];
23545 mem = operands[2];
23546 oldval = operands[3];
23547 newval = operands[4];
23548 is_weak = (INTVAL (operands[5]) != 0);
23549 mod_s = memmodel_base (INTVAL (operands[6]));
23550 mod_f = memmodel_base (INTVAL (operands[7]));
23551 orig_mode = mode = GET_MODE (mem);
23552
23553 mask = shift = NULL_RTX;
23554 if (mode == QImode || mode == HImode)
23555 {
23556 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23557 lwarx and shift/mask operations. With power8, we need to do the
23558 comparison in SImode, but the store is still done in QI/HImode. */
23559 oldval = convert_modes (SImode, mode, oldval, 1);
23560
23561 if (!TARGET_SYNC_HI_QI)
23562 {
23563 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23564
23565 /* Shift and mask OLDVAL into position with the word. */
23566 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23567 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23568
23569 /* Shift and mask NEWVAL into position within the word. */
23570 newval = convert_modes (SImode, mode, newval, 1);
23571 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23572 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23573 }
23574
23575 /* Prepare to adjust the return value. */
23576 retval = gen_reg_rtx (SImode);
23577 mode = SImode;
23578 }
23579 else if (reg_overlap_mentioned_p (retval, oldval))
23580 oldval = copy_to_reg (oldval);
23581
23582 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23583 oldval = copy_to_mode_reg (mode, oldval);
23584
23585 if (reg_overlap_mentioned_p (retval, newval))
23586 newval = copy_to_reg (newval);
23587
23588 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23589
23590 label1 = NULL_RTX;
23591 if (!is_weak)
23592 {
23593 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23594 emit_label (XEXP (label1, 0));
23595 }
23596 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23597
23598 emit_load_locked (mode, retval, mem);
23599
23600 x = retval;
23601 if (mask)
23602 x = expand_simple_binop (SImode, AND, retval, mask,
23603 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23604
23605 cond = gen_reg_rtx (CCmode);
23606 /* If we have TImode, synthesize a comparison. */
23607 if (mode != TImode)
23608 x = gen_rtx_COMPARE (CCmode, x, oldval);
23609 else
23610 {
23611 rtx xor1_result = gen_reg_rtx (DImode);
23612 rtx xor2_result = gen_reg_rtx (DImode);
23613 rtx or_result = gen_reg_rtx (DImode);
23614 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23615 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23616 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23617 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23618
23619 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23620 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23621 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23622 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23623 }
23624
23625 emit_insn (gen_rtx_SET (cond, x));
23626
23627 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23628 emit_unlikely_jump (x, label2);
23629
23630 x = newval;
23631 if (mask)
23632 x = rs6000_mask_atomic_subword (retval, newval, mask);
23633
23634 emit_store_conditional (orig_mode, cond, mem, x);
23635
23636 if (!is_weak)
23637 {
23638 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23639 emit_unlikely_jump (x, label1);
23640 }
23641
23642 if (!is_mm_relaxed (mod_f))
23643 emit_label (XEXP (label2, 0));
23644
23645 rs6000_post_atomic_barrier (mod_s);
23646
23647 if (is_mm_relaxed (mod_f))
23648 emit_label (XEXP (label2, 0));
23649
23650 if (shift)
23651 rs6000_finish_atomic_subword (operands[1], retval, shift);
23652 else if (mode != GET_MODE (operands[1]))
23653 convert_move (operands[1], retval, 1);
23654
23655 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23656 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23657 emit_insn (gen_rtx_SET (boolval, x));
23658 }
23659
23660 /* Expand an atomic exchange operation. */
23661
23662 void
23663 rs6000_expand_atomic_exchange (rtx operands[])
23664 {
23665 rtx retval, mem, val, cond;
23666 machine_mode mode;
23667 enum memmodel model;
23668 rtx label, x, mask, shift;
23669
23670 retval = operands[0];
23671 mem = operands[1];
23672 val = operands[2];
23673 model = memmodel_base (INTVAL (operands[3]));
23674 mode = GET_MODE (mem);
23675
23676 mask = shift = NULL_RTX;
23677 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23678 {
23679 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23680
23681 /* Shift and mask VAL into position with the word. */
23682 val = convert_modes (SImode, mode, val, 1);
23683 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23684 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23685
23686 /* Prepare to adjust the return value. */
23687 retval = gen_reg_rtx (SImode);
23688 mode = SImode;
23689 }
23690
23691 mem = rs6000_pre_atomic_barrier (mem, model);
23692
23693 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23694 emit_label (XEXP (label, 0));
23695
23696 emit_load_locked (mode, retval, mem);
23697
23698 x = val;
23699 if (mask)
23700 x = rs6000_mask_atomic_subword (retval, val, mask);
23701
23702 cond = gen_reg_rtx (CCmode);
23703 emit_store_conditional (mode, cond, mem, x);
23704
23705 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23706 emit_unlikely_jump (x, label);
23707
23708 rs6000_post_atomic_barrier (model);
23709
23710 if (shift)
23711 rs6000_finish_atomic_subword (operands[0], retval, shift);
23712 }
23713
23714 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23715 to perform. MEM is the memory on which to operate. VAL is the second
23716 operand of the binary operator. BEFORE and AFTER are optional locations to
23717 return the value of MEM either before of after the operation. MODEL_RTX
23718 is a CONST_INT containing the memory model to use. */
23719
23720 void
23721 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23722 rtx orig_before, rtx orig_after, rtx model_rtx)
23723 {
23724 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23725 machine_mode mode = GET_MODE (mem);
23726 machine_mode store_mode = mode;
23727 rtx label, x, cond, mask, shift;
23728 rtx before = orig_before, after = orig_after;
23729
23730 mask = shift = NULL_RTX;
23731 /* On power8, we want to use SImode for the operation. On previous systems,
23732 use the operation in a subword and shift/mask to get the proper byte or
23733 halfword. */
23734 if (mode == QImode || mode == HImode)
23735 {
23736 if (TARGET_SYNC_HI_QI)
23737 {
23738 val = convert_modes (SImode, mode, val, 1);
23739
23740 /* Prepare to adjust the return value. */
23741 before = gen_reg_rtx (SImode);
23742 if (after)
23743 after = gen_reg_rtx (SImode);
23744 mode = SImode;
23745 }
23746 else
23747 {
23748 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23749
23750 /* Shift and mask VAL into position with the word. */
23751 val = convert_modes (SImode, mode, val, 1);
23752 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23753 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23754
23755 switch (code)
23756 {
23757 case IOR:
23758 case XOR:
23759 /* We've already zero-extended VAL. That is sufficient to
23760 make certain that it does not affect other bits. */
23761 mask = NULL;
23762 break;
23763
23764 case AND:
23765 /* If we make certain that all of the other bits in VAL are
23766 set, that will be sufficient to not affect other bits. */
23767 x = gen_rtx_NOT (SImode, mask);
23768 x = gen_rtx_IOR (SImode, x, val);
23769 emit_insn (gen_rtx_SET (val, x));
23770 mask = NULL;
23771 break;
23772
23773 case NOT:
23774 case PLUS:
23775 case MINUS:
23776 /* These will all affect bits outside the field and need
23777 adjustment via MASK within the loop. */
23778 break;
23779
23780 default:
23781 gcc_unreachable ();
23782 }
23783
23784 /* Prepare to adjust the return value. */
23785 before = gen_reg_rtx (SImode);
23786 if (after)
23787 after = gen_reg_rtx (SImode);
23788 store_mode = mode = SImode;
23789 }
23790 }
23791
23792 mem = rs6000_pre_atomic_barrier (mem, model);
23793
23794 label = gen_label_rtx ();
23795 emit_label (label);
23796 label = gen_rtx_LABEL_REF (VOIDmode, label);
23797
23798 if (before == NULL_RTX)
23799 before = gen_reg_rtx (mode);
23800
23801 emit_load_locked (mode, before, mem);
23802
23803 if (code == NOT)
23804 {
23805 x = expand_simple_binop (mode, AND, before, val,
23806 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23807 after = expand_simple_unop (mode, NOT, x, after, 1);
23808 }
23809 else
23810 {
23811 after = expand_simple_binop (mode, code, before, val,
23812 after, 1, OPTAB_LIB_WIDEN);
23813 }
23814
23815 x = after;
23816 if (mask)
23817 {
23818 x = expand_simple_binop (SImode, AND, after, mask,
23819 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23820 x = rs6000_mask_atomic_subword (before, x, mask);
23821 }
23822 else if (store_mode != mode)
23823 x = convert_modes (store_mode, mode, x, 1);
23824
23825 cond = gen_reg_rtx (CCmode);
23826 emit_store_conditional (store_mode, cond, mem, x);
23827
23828 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23829 emit_unlikely_jump (x, label);
23830
23831 rs6000_post_atomic_barrier (model);
23832
23833 if (shift)
23834 {
23835 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23836 then do the calcuations in a SImode register. */
23837 if (orig_before)
23838 rs6000_finish_atomic_subword (orig_before, before, shift);
23839 if (orig_after)
23840 rs6000_finish_atomic_subword (orig_after, after, shift);
23841 }
23842 else if (store_mode != mode)
23843 {
23844 /* QImode/HImode on machines with lbarx/lharx where we do the native
23845 operation and then do the calcuations in a SImode register. */
23846 if (orig_before)
23847 convert_move (orig_before, before, 1);
23848 if (orig_after)
23849 convert_move (orig_after, after, 1);
23850 }
23851 else if (orig_after && after != orig_after)
23852 emit_move_insn (orig_after, after);
23853 }
23854
23855 /* Emit instructions to move SRC to DST. Called by splitters for
23856 multi-register moves. It will emit at most one instruction for
23857 each register that is accessed; that is, it won't emit li/lis pairs
23858 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23859 register. */
23860
23861 void
23862 rs6000_split_multireg_move (rtx dst, rtx src)
23863 {
23864 /* The register number of the first register being moved. */
23865 int reg;
23866 /* The mode that is to be moved. */
23867 machine_mode mode;
23868 /* The mode that the move is being done in, and its size. */
23869 machine_mode reg_mode;
23870 int reg_mode_size;
23871 /* The number of registers that will be moved. */
23872 int nregs;
23873
23874 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23875 mode = GET_MODE (dst);
23876 nregs = hard_regno_nregs (reg, mode);
23877 if (FP_REGNO_P (reg))
23878 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23879 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23880 else if (ALTIVEC_REGNO_P (reg))
23881 reg_mode = V16QImode;
23882 else
23883 reg_mode = word_mode;
23884 reg_mode_size = GET_MODE_SIZE (reg_mode);
23885
23886 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23887
23888 /* TDmode residing in FP registers is special, since the ISA requires that
23889 the lower-numbered word of a register pair is always the most significant
23890 word, even in little-endian mode. This does not match the usual subreg
23891 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23892 the appropriate constituent registers "by hand" in little-endian mode.
23893
23894 Note we do not need to check for destructive overlap here since TDmode
23895 can only reside in even/odd register pairs. */
23896 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23897 {
23898 rtx p_src, p_dst;
23899 int i;
23900
23901 for (i = 0; i < nregs; i++)
23902 {
23903 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23904 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23905 else
23906 p_src = simplify_gen_subreg (reg_mode, src, mode,
23907 i * reg_mode_size);
23908
23909 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23910 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23911 else
23912 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23913 i * reg_mode_size);
23914
23915 emit_insn (gen_rtx_SET (p_dst, p_src));
23916 }
23917
23918 return;
23919 }
23920
23921 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23922 {
23923 /* Move register range backwards, if we might have destructive
23924 overlap. */
23925 int i;
23926 for (i = nregs - 1; i >= 0; i--)
23927 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23928 i * reg_mode_size),
23929 simplify_gen_subreg (reg_mode, src, mode,
23930 i * reg_mode_size)));
23931 }
23932 else
23933 {
23934 int i;
23935 int j = -1;
23936 bool used_update = false;
23937 rtx restore_basereg = NULL_RTX;
23938
23939 if (MEM_P (src) && INT_REGNO_P (reg))
23940 {
23941 rtx breg;
23942
23943 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23944 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23945 {
23946 rtx delta_rtx;
23947 breg = XEXP (XEXP (src, 0), 0);
23948 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23949 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23950 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23951 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23952 src = replace_equiv_address (src, breg);
23953 }
23954 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23955 {
23956 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23957 {
23958 rtx basereg = XEXP (XEXP (src, 0), 0);
23959 if (TARGET_UPDATE)
23960 {
23961 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23962 emit_insn (gen_rtx_SET (ndst,
23963 gen_rtx_MEM (reg_mode,
23964 XEXP (src, 0))));
23965 used_update = true;
23966 }
23967 else
23968 emit_insn (gen_rtx_SET (basereg,
23969 XEXP (XEXP (src, 0), 1)));
23970 src = replace_equiv_address (src, basereg);
23971 }
23972 else
23973 {
23974 rtx basereg = gen_rtx_REG (Pmode, reg);
23975 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23976 src = replace_equiv_address (src, basereg);
23977 }
23978 }
23979
23980 breg = XEXP (src, 0);
23981 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23982 breg = XEXP (breg, 0);
23983
23984 /* If the base register we are using to address memory is
23985 also a destination reg, then change that register last. */
23986 if (REG_P (breg)
23987 && REGNO (breg) >= REGNO (dst)
23988 && REGNO (breg) < REGNO (dst) + nregs)
23989 j = REGNO (breg) - REGNO (dst);
23990 }
23991 else if (MEM_P (dst) && INT_REGNO_P (reg))
23992 {
23993 rtx breg;
23994
23995 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23996 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23997 {
23998 rtx delta_rtx;
23999 breg = XEXP (XEXP (dst, 0), 0);
24000 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
24001 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
24002 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
24003
24004 /* We have to update the breg before doing the store.
24005 Use store with update, if available. */
24006
24007 if (TARGET_UPDATE)
24008 {
24009 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24010 emit_insn (TARGET_32BIT
24011 ? (TARGET_POWERPC64
24012 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
24013 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
24014 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
24015 used_update = true;
24016 }
24017 else
24018 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
24019 dst = replace_equiv_address (dst, breg);
24020 }
24021 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
24022 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
24023 {
24024 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
24025 {
24026 rtx basereg = XEXP (XEXP (dst, 0), 0);
24027 if (TARGET_UPDATE)
24028 {
24029 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
24030 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
24031 XEXP (dst, 0)),
24032 nsrc));
24033 used_update = true;
24034 }
24035 else
24036 emit_insn (gen_rtx_SET (basereg,
24037 XEXP (XEXP (dst, 0), 1)));
24038 dst = replace_equiv_address (dst, basereg);
24039 }
24040 else
24041 {
24042 rtx basereg = XEXP (XEXP (dst, 0), 0);
24043 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
24044 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
24045 && REG_P (basereg)
24046 && REG_P (offsetreg)
24047 && REGNO (basereg) != REGNO (offsetreg));
24048 if (REGNO (basereg) == 0)
24049 {
24050 rtx tmp = offsetreg;
24051 offsetreg = basereg;
24052 basereg = tmp;
24053 }
24054 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
24055 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
24056 dst = replace_equiv_address (dst, basereg);
24057 }
24058 }
24059 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
24060 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
24061 }
24062
24063 for (i = 0; i < nregs; i++)
24064 {
24065 /* Calculate index to next subword. */
24066 ++j;
24067 if (j == nregs)
24068 j = 0;
24069
24070 /* If compiler already emitted move of first word by
24071 store with update, no need to do anything. */
24072 if (j == 0 && used_update)
24073 continue;
24074
24075 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24076 j * reg_mode_size),
24077 simplify_gen_subreg (reg_mode, src, mode,
24078 j * reg_mode_size)));
24079 }
24080 if (restore_basereg != NULL_RTX)
24081 emit_insn (restore_basereg);
24082 }
24083 }
24084
24085 \f
24086 /* This page contains routines that are used to determine what the
24087 function prologue and epilogue code will do and write them out. */
24088
24089 /* Determine whether the REG is really used. */
24090
24091 static bool
24092 save_reg_p (int reg)
24093 {
24094 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24095 {
24096 /* When calling eh_return, we must return true for all the cases
24097 where conditional_register_usage marks the PIC offset reg
24098 call used or fixed. */
24099 if (crtl->calls_eh_return
24100 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24101 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24102 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24103 return true;
24104
24105 /* We need to mark the PIC offset register live for the same
24106 conditions as it is set up in rs6000_emit_prologue, or
24107 otherwise it won't be saved before we clobber it. */
24108 if (TARGET_TOC && TARGET_MINIMAL_TOC
24109 && !constant_pool_empty_p ())
24110 return true;
24111
24112 if (DEFAULT_ABI == ABI_V4
24113 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
24114 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
24115 return true;
24116
24117 if (DEFAULT_ABI == ABI_DARWIN
24118 && flag_pic && crtl->uses_pic_offset_table)
24119 return true;
24120 }
24121
24122 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
24123 }
24124
24125 /* Return the first fixed-point register that is required to be
24126 saved. 32 if none. */
24127
24128 int
24129 first_reg_to_save (void)
24130 {
24131 int first_reg;
24132
24133 /* Find lowest numbered live register. */
24134 for (first_reg = 13; first_reg <= 31; first_reg++)
24135 if (save_reg_p (first_reg))
24136 break;
24137
24138 return first_reg;
24139 }
24140
24141 /* Similar, for FP regs. */
24142
24143 int
24144 first_fp_reg_to_save (void)
24145 {
24146 int first_reg;
24147
24148 /* Find lowest numbered live register. */
24149 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24150 if (save_reg_p (first_reg))
24151 break;
24152
24153 return first_reg;
24154 }
24155
24156 /* Similar, for AltiVec regs. */
24157
24158 static int
24159 first_altivec_reg_to_save (void)
24160 {
24161 int i;
24162
24163 /* Stack frame remains as is unless we are in AltiVec ABI. */
24164 if (! TARGET_ALTIVEC_ABI)
24165 return LAST_ALTIVEC_REGNO + 1;
24166
24167 /* On Darwin, the unwind routines are compiled without
24168 TARGET_ALTIVEC, and use save_world to save/restore the
24169 altivec registers when necessary. */
24170 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24171 && ! TARGET_ALTIVEC)
24172 return FIRST_ALTIVEC_REGNO + 20;
24173
24174 /* Find lowest numbered live register. */
24175 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24176 if (save_reg_p (i))
24177 break;
24178
24179 return i;
24180 }
24181
24182 /* Return a 32-bit mask of the AltiVec registers we need to set in
24183 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24184 the 32-bit word is 0. */
24185
24186 static unsigned int
24187 compute_vrsave_mask (void)
24188 {
24189 unsigned int i, mask = 0;
24190
24191 /* On Darwin, the unwind routines are compiled without
24192 TARGET_ALTIVEC, and use save_world to save/restore the
24193 call-saved altivec registers when necessary. */
24194 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24195 && ! TARGET_ALTIVEC)
24196 mask |= 0xFFF;
24197
24198 /* First, find out if we use _any_ altivec registers. */
24199 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24200 if (df_regs_ever_live_p (i))
24201 mask |= ALTIVEC_REG_BIT (i);
24202
24203 if (mask == 0)
24204 return mask;
24205
24206 /* Next, remove the argument registers from the set. These must
24207 be in the VRSAVE mask set by the caller, so we don't need to add
24208 them in again. More importantly, the mask we compute here is
24209 used to generate CLOBBERs in the set_vrsave insn, and we do not
24210 wish the argument registers to die. */
24211 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24212 mask &= ~ALTIVEC_REG_BIT (i);
24213
24214 /* Similarly, remove the return value from the set. */
24215 {
24216 bool yes = false;
24217 diddle_return_value (is_altivec_return_reg, &yes);
24218 if (yes)
24219 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24220 }
24221
24222 return mask;
24223 }
24224
24225 /* For a very restricted set of circumstances, we can cut down the
24226 size of prologues/epilogues by calling our own save/restore-the-world
24227 routines. */
24228
24229 static void
24230 compute_save_world_info (rs6000_stack_t *info)
24231 {
24232 info->world_save_p = 1;
24233 info->world_save_p
24234 = (WORLD_SAVE_P (info)
24235 && DEFAULT_ABI == ABI_DARWIN
24236 && !cfun->has_nonlocal_label
24237 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24238 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24239 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24240 && info->cr_save_p);
24241
24242 /* This will not work in conjunction with sibcalls. Make sure there
24243 are none. (This check is expensive, but seldom executed.) */
24244 if (WORLD_SAVE_P (info))
24245 {
24246 rtx_insn *insn;
24247 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24248 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24249 {
24250 info->world_save_p = 0;
24251 break;
24252 }
24253 }
24254
24255 if (WORLD_SAVE_P (info))
24256 {
24257 /* Even if we're not touching VRsave, make sure there's room on the
24258 stack for it, if it looks like we're calling SAVE_WORLD, which
24259 will attempt to save it. */
24260 info->vrsave_size = 4;
24261
24262 /* If we are going to save the world, we need to save the link register too. */
24263 info->lr_save_p = 1;
24264
24265 /* "Save" the VRsave register too if we're saving the world. */
24266 if (info->vrsave_mask == 0)
24267 info->vrsave_mask = compute_vrsave_mask ();
24268
24269 /* Because the Darwin register save/restore routines only handle
24270 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24271 check. */
24272 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24273 && (info->first_altivec_reg_save
24274 >= FIRST_SAVED_ALTIVEC_REGNO));
24275 }
24276
24277 return;
24278 }
24279
24280
24281 static void
24282 is_altivec_return_reg (rtx reg, void *xyes)
24283 {
24284 bool *yes = (bool *) xyes;
24285 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24286 *yes = true;
24287 }
24288
24289 \f
24290 /* Return whether REG is a global user reg or has been specifed by
24291 -ffixed-REG. We should not restore these, and so cannot use
24292 lmw or out-of-line restore functions if there are any. We also
24293 can't save them (well, emit frame notes for them), because frame
24294 unwinding during exception handling will restore saved registers. */
24295
24296 static bool
24297 fixed_reg_p (int reg)
24298 {
24299 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24300 backend sets it, overriding anything the user might have given. */
24301 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24302 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24303 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24304 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24305 return false;
24306
24307 return fixed_regs[reg];
24308 }
24309
24310 /* Determine the strategy for savings/restoring registers. */
24311
24312 enum {
24313 SAVE_MULTIPLE = 0x1,
24314 SAVE_INLINE_GPRS = 0x2,
24315 SAVE_INLINE_FPRS = 0x4,
24316 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24317 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24318 SAVE_INLINE_VRS = 0x20,
24319 REST_MULTIPLE = 0x100,
24320 REST_INLINE_GPRS = 0x200,
24321 REST_INLINE_FPRS = 0x400,
24322 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24323 REST_INLINE_VRS = 0x1000
24324 };
24325
24326 static int
24327 rs6000_savres_strategy (rs6000_stack_t *info,
24328 bool using_static_chain_p)
24329 {
24330 int strategy = 0;
24331
24332 /* Select between in-line and out-of-line save and restore of regs.
24333 First, all the obvious cases where we don't use out-of-line. */
24334 if (crtl->calls_eh_return
24335 || cfun->machine->ra_need_lr)
24336 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24337 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24338 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24339
24340 if (info->first_gp_reg_save == 32)
24341 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24342
24343 if (info->first_fp_reg_save == 64)
24344 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24345
24346 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24347 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24348
24349 /* Define cutoff for using out-of-line functions to save registers. */
24350 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24351 {
24352 if (!optimize_size)
24353 {
24354 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24355 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24356 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24357 }
24358 else
24359 {
24360 /* Prefer out-of-line restore if it will exit. */
24361 if (info->first_fp_reg_save > 61)
24362 strategy |= SAVE_INLINE_FPRS;
24363 if (info->first_gp_reg_save > 29)
24364 {
24365 if (info->first_fp_reg_save == 64)
24366 strategy |= SAVE_INLINE_GPRS;
24367 else
24368 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24369 }
24370 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24371 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24372 }
24373 }
24374 else if (DEFAULT_ABI == ABI_DARWIN)
24375 {
24376 if (info->first_fp_reg_save > 60)
24377 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24378 if (info->first_gp_reg_save > 29)
24379 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24380 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24381 }
24382 else
24383 {
24384 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24385 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24386 || info->first_fp_reg_save > 61)
24387 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24388 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24389 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24390 }
24391
24392 /* Don't bother to try to save things out-of-line if r11 is occupied
24393 by the static chain. It would require too much fiddling and the
24394 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24395 pointer on Darwin, and AIX uses r1 or r12. */
24396 if (using_static_chain_p
24397 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24398 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24399 | SAVE_INLINE_GPRS
24400 | SAVE_INLINE_VRS);
24401
24402 /* Don't ever restore fixed regs. That means we can't use the
24403 out-of-line register restore functions if a fixed reg is in the
24404 range of regs restored. */
24405 if (!(strategy & REST_INLINE_FPRS))
24406 for (int i = info->first_fp_reg_save; i < 64; i++)
24407 if (fixed_regs[i])
24408 {
24409 strategy |= REST_INLINE_FPRS;
24410 break;
24411 }
24412
24413 /* We can only use the out-of-line routines to restore fprs if we've
24414 saved all the registers from first_fp_reg_save in the prologue.
24415 Otherwise, we risk loading garbage. Of course, if we have saved
24416 out-of-line then we know we haven't skipped any fprs. */
24417 if ((strategy & SAVE_INLINE_FPRS)
24418 && !(strategy & REST_INLINE_FPRS))
24419 for (int i = info->first_fp_reg_save; i < 64; i++)
24420 if (!save_reg_p (i))
24421 {
24422 strategy |= REST_INLINE_FPRS;
24423 break;
24424 }
24425
24426 /* Similarly, for altivec regs. */
24427 if (!(strategy & REST_INLINE_VRS))
24428 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24429 if (fixed_regs[i])
24430 {
24431 strategy |= REST_INLINE_VRS;
24432 break;
24433 }
24434
24435 if ((strategy & SAVE_INLINE_VRS)
24436 && !(strategy & REST_INLINE_VRS))
24437 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24438 if (!save_reg_p (i))
24439 {
24440 strategy |= REST_INLINE_VRS;
24441 break;
24442 }
24443
24444 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24445 saved is an out-of-line save or restore. Set up the value for
24446 the next test (excluding out-of-line gprs). */
24447 bool lr_save_p = (info->lr_save_p
24448 || !(strategy & SAVE_INLINE_FPRS)
24449 || !(strategy & SAVE_INLINE_VRS)
24450 || !(strategy & REST_INLINE_FPRS)
24451 || !(strategy & REST_INLINE_VRS));
24452
24453 if (TARGET_MULTIPLE
24454 && !TARGET_POWERPC64
24455 && info->first_gp_reg_save < 31
24456 && !(flag_shrink_wrap
24457 && flag_shrink_wrap_separate
24458 && optimize_function_for_speed_p (cfun)))
24459 {
24460 int count = 0;
24461 for (int i = info->first_gp_reg_save; i < 32; i++)
24462 if (save_reg_p (i))
24463 count++;
24464
24465 if (count <= 1)
24466 /* Don't use store multiple if only one reg needs to be
24467 saved. This can occur for example when the ABI_V4 pic reg
24468 (r30) needs to be saved to make calls, but r31 is not
24469 used. */
24470 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24471 else
24472 {
24473 /* Prefer store multiple for saves over out-of-line
24474 routines, since the store-multiple instruction will
24475 always be smaller. */
24476 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24477
24478 /* The situation is more complicated with load multiple.
24479 We'd prefer to use the out-of-line routines for restores,
24480 since the "exit" out-of-line routines can handle the
24481 restore of LR and the frame teardown. However if doesn't
24482 make sense to use the out-of-line routine if that is the
24483 only reason we'd need to save LR, and we can't use the
24484 "exit" out-of-line gpr restore if we have saved some
24485 fprs; In those cases it is advantageous to use load
24486 multiple when available. */
24487 if (info->first_fp_reg_save != 64 || !lr_save_p)
24488 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24489 }
24490 }
24491
24492 /* Using the "exit" out-of-line routine does not improve code size
24493 if using it would require lr to be saved and if only saving one
24494 or two gprs. */
24495 else if (!lr_save_p && info->first_gp_reg_save > 29)
24496 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24497
24498 /* Don't ever restore fixed regs. */
24499 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24500 for (int i = info->first_gp_reg_save; i < 32; i++)
24501 if (fixed_reg_p (i))
24502 {
24503 strategy |= REST_INLINE_GPRS;
24504 strategy &= ~REST_MULTIPLE;
24505 break;
24506 }
24507
24508 /* We can only use load multiple or the out-of-line routines to
24509 restore gprs if we've saved all the registers from
24510 first_gp_reg_save. Otherwise, we risk loading garbage.
24511 Of course, if we have saved out-of-line or used stmw then we know
24512 we haven't skipped any gprs. */
24513 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24514 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24515 for (int i = info->first_gp_reg_save; i < 32; i++)
24516 if (!save_reg_p (i))
24517 {
24518 strategy |= REST_INLINE_GPRS;
24519 strategy &= ~REST_MULTIPLE;
24520 break;
24521 }
24522
24523 if (TARGET_ELF && TARGET_64BIT)
24524 {
24525 if (!(strategy & SAVE_INLINE_FPRS))
24526 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24527 else if (!(strategy & SAVE_INLINE_GPRS)
24528 && info->first_fp_reg_save == 64)
24529 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24530 }
24531 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24532 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24533
24534 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24535 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24536
24537 return strategy;
24538 }
24539
24540 /* Calculate the stack information for the current function. This is
24541 complicated by having two separate calling sequences, the AIX calling
24542 sequence and the V.4 calling sequence.
24543
24544 AIX (and Darwin/Mac OS X) stack frames look like:
24545 32-bit 64-bit
24546 SP----> +---------------------------------------+
24547 | back chain to caller | 0 0
24548 +---------------------------------------+
24549 | saved CR | 4 8 (8-11)
24550 +---------------------------------------+
24551 | saved LR | 8 16
24552 +---------------------------------------+
24553 | reserved for compilers | 12 24
24554 +---------------------------------------+
24555 | reserved for binders | 16 32
24556 +---------------------------------------+
24557 | saved TOC pointer | 20 40
24558 +---------------------------------------+
24559 | Parameter save area (+padding*) (P) | 24 48
24560 +---------------------------------------+
24561 | Alloca space (A) | 24+P etc.
24562 +---------------------------------------+
24563 | Local variable space (L) | 24+P+A
24564 +---------------------------------------+
24565 | Float/int conversion temporary (X) | 24+P+A+L
24566 +---------------------------------------+
24567 | Save area for AltiVec registers (W) | 24+P+A+L+X
24568 +---------------------------------------+
24569 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24570 +---------------------------------------+
24571 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24572 +---------------------------------------+
24573 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24574 +---------------------------------------+
24575 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24576 +---------------------------------------+
24577 old SP->| back chain to caller's caller |
24578 +---------------------------------------+
24579
24580 * If the alloca area is present, the parameter save area is
24581 padded so that the former starts 16-byte aligned.
24582
24583 The required alignment for AIX configurations is two words (i.e., 8
24584 or 16 bytes).
24585
24586 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24587
24588 SP----> +---------------------------------------+
24589 | Back chain to caller | 0
24590 +---------------------------------------+
24591 | Save area for CR | 8
24592 +---------------------------------------+
24593 | Saved LR | 16
24594 +---------------------------------------+
24595 | Saved TOC pointer | 24
24596 +---------------------------------------+
24597 | Parameter save area (+padding*) (P) | 32
24598 +---------------------------------------+
24599 | Alloca space (A) | 32+P
24600 +---------------------------------------+
24601 | Local variable space (L) | 32+P+A
24602 +---------------------------------------+
24603 | Save area for AltiVec registers (W) | 32+P+A+L
24604 +---------------------------------------+
24605 | AltiVec alignment padding (Y) | 32+P+A+L+W
24606 +---------------------------------------+
24607 | Save area for GP registers (G) | 32+P+A+L+W+Y
24608 +---------------------------------------+
24609 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24610 +---------------------------------------+
24611 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24612 +---------------------------------------+
24613
24614 * If the alloca area is present, the parameter save area is
24615 padded so that the former starts 16-byte aligned.
24616
24617 V.4 stack frames look like:
24618
24619 SP----> +---------------------------------------+
24620 | back chain to caller | 0
24621 +---------------------------------------+
24622 | caller's saved LR | 4
24623 +---------------------------------------+
24624 | Parameter save area (+padding*) (P) | 8
24625 +---------------------------------------+
24626 | Alloca space (A) | 8+P
24627 +---------------------------------------+
24628 | Varargs save area (V) | 8+P+A
24629 +---------------------------------------+
24630 | Local variable space (L) | 8+P+A+V
24631 +---------------------------------------+
24632 | Float/int conversion temporary (X) | 8+P+A+V+L
24633 +---------------------------------------+
24634 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24635 +---------------------------------------+
24636 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24637 +---------------------------------------+
24638 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24639 +---------------------------------------+
24640 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24641 +---------------------------------------+
24642 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24643 +---------------------------------------+
24644 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24645 +---------------------------------------+
24646 old SP->| back chain to caller's caller |
24647 +---------------------------------------+
24648
24649 * If the alloca area is present and the required alignment is
24650 16 bytes, the parameter save area is padded so that the
24651 alloca area starts 16-byte aligned.
24652
24653 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24654 given. (But note below and in sysv4.h that we require only 8 and
24655 may round up the size of our stack frame anyways. The historical
24656 reason is early versions of powerpc-linux which didn't properly
24657 align the stack at program startup. A happy side-effect is that
24658 -mno-eabi libraries can be used with -meabi programs.)
24659
24660 The EABI configuration defaults to the V.4 layout. However,
24661 the stack alignment requirements may differ. If -mno-eabi is not
24662 given, the required stack alignment is 8 bytes; if -mno-eabi is
24663 given, the required alignment is 16 bytes. (But see V.4 comment
24664 above.) */
24665
24666 #ifndef ABI_STACK_BOUNDARY
24667 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24668 #endif
24669
24670 static rs6000_stack_t *
24671 rs6000_stack_info (void)
24672 {
24673 /* We should never be called for thunks, we are not set up for that. */
24674 gcc_assert (!cfun->is_thunk);
24675
24676 rs6000_stack_t *info = &stack_info;
24677 int reg_size = TARGET_32BIT ? 4 : 8;
24678 int ehrd_size;
24679 int ehcr_size;
24680 int save_align;
24681 int first_gp;
24682 HOST_WIDE_INT non_fixed_size;
24683 bool using_static_chain_p;
24684
24685 if (reload_completed && info->reload_completed)
24686 return info;
24687
24688 memset (info, 0, sizeof (*info));
24689 info->reload_completed = reload_completed;
24690
24691 /* Select which calling sequence. */
24692 info->abi = DEFAULT_ABI;
24693
24694 /* Calculate which registers need to be saved & save area size. */
24695 info->first_gp_reg_save = first_reg_to_save ();
24696 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24697 even if it currently looks like we won't. Reload may need it to
24698 get at a constant; if so, it will have already created a constant
24699 pool entry for it. */
24700 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24701 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24702 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24703 && crtl->uses_const_pool
24704 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24705 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24706 else
24707 first_gp = info->first_gp_reg_save;
24708
24709 info->gp_size = reg_size * (32 - first_gp);
24710
24711 info->first_fp_reg_save = first_fp_reg_to_save ();
24712 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24713
24714 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24715 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24716 - info->first_altivec_reg_save);
24717
24718 /* Does this function call anything? */
24719 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24720
24721 /* Determine if we need to save the condition code registers. */
24722 if (save_reg_p (CR2_REGNO)
24723 || save_reg_p (CR3_REGNO)
24724 || save_reg_p (CR4_REGNO))
24725 {
24726 info->cr_save_p = 1;
24727 if (DEFAULT_ABI == ABI_V4)
24728 info->cr_size = reg_size;
24729 }
24730
24731 /* If the current function calls __builtin_eh_return, then we need
24732 to allocate stack space for registers that will hold data for
24733 the exception handler. */
24734 if (crtl->calls_eh_return)
24735 {
24736 unsigned int i;
24737 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24738 continue;
24739
24740 ehrd_size = i * UNITS_PER_WORD;
24741 }
24742 else
24743 ehrd_size = 0;
24744
24745 /* In the ELFv2 ABI, we also need to allocate space for separate
24746 CR field save areas if the function calls __builtin_eh_return. */
24747 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24748 {
24749 /* This hard-codes that we have three call-saved CR fields. */
24750 ehcr_size = 3 * reg_size;
24751 /* We do *not* use the regular CR save mechanism. */
24752 info->cr_save_p = 0;
24753 }
24754 else
24755 ehcr_size = 0;
24756
24757 /* Determine various sizes. */
24758 info->reg_size = reg_size;
24759 info->fixed_size = RS6000_SAVE_AREA;
24760 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24761 if (cfun->calls_alloca)
24762 info->parm_size =
24763 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24764 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24765 else
24766 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24767 TARGET_ALTIVEC ? 16 : 8);
24768 if (FRAME_GROWS_DOWNWARD)
24769 info->vars_size
24770 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24771 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24772 - (info->fixed_size + info->vars_size + info->parm_size);
24773
24774 if (TARGET_ALTIVEC_ABI)
24775 info->vrsave_mask = compute_vrsave_mask ();
24776
24777 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24778 info->vrsave_size = 4;
24779
24780 compute_save_world_info (info);
24781
24782 /* Calculate the offsets. */
24783 switch (DEFAULT_ABI)
24784 {
24785 case ABI_NONE:
24786 default:
24787 gcc_unreachable ();
24788
24789 case ABI_AIX:
24790 case ABI_ELFv2:
24791 case ABI_DARWIN:
24792 info->fp_save_offset = -info->fp_size;
24793 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24794
24795 if (TARGET_ALTIVEC_ABI)
24796 {
24797 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24798
24799 /* Align stack so vector save area is on a quadword boundary.
24800 The padding goes above the vectors. */
24801 if (info->altivec_size != 0)
24802 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24803
24804 info->altivec_save_offset = info->vrsave_save_offset
24805 - info->altivec_padding_size
24806 - info->altivec_size;
24807 gcc_assert (info->altivec_size == 0
24808 || info->altivec_save_offset % 16 == 0);
24809
24810 /* Adjust for AltiVec case. */
24811 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24812 }
24813 else
24814 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24815
24816 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24817 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24818 info->lr_save_offset = 2*reg_size;
24819 break;
24820
24821 case ABI_V4:
24822 info->fp_save_offset = -info->fp_size;
24823 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24824 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24825
24826 if (TARGET_ALTIVEC_ABI)
24827 {
24828 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24829
24830 /* Align stack so vector save area is on a quadword boundary. */
24831 if (info->altivec_size != 0)
24832 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24833
24834 info->altivec_save_offset = info->vrsave_save_offset
24835 - info->altivec_padding_size
24836 - info->altivec_size;
24837
24838 /* Adjust for AltiVec case. */
24839 info->ehrd_offset = info->altivec_save_offset;
24840 }
24841 else
24842 info->ehrd_offset = info->cr_save_offset;
24843
24844 info->ehrd_offset -= ehrd_size;
24845 info->lr_save_offset = reg_size;
24846 }
24847
24848 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24849 info->save_size = RS6000_ALIGN (info->fp_size
24850 + info->gp_size
24851 + info->altivec_size
24852 + info->altivec_padding_size
24853 + ehrd_size
24854 + ehcr_size
24855 + info->cr_size
24856 + info->vrsave_size,
24857 save_align);
24858
24859 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24860
24861 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24862 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24863
24864 /* Determine if we need to save the link register. */
24865 if (info->calls_p
24866 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24867 && crtl->profile
24868 && !TARGET_PROFILE_KERNEL)
24869 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24870 #ifdef TARGET_RELOCATABLE
24871 || (DEFAULT_ABI == ABI_V4
24872 && (TARGET_RELOCATABLE || flag_pic > 1)
24873 && !constant_pool_empty_p ())
24874 #endif
24875 || rs6000_ra_ever_killed ())
24876 info->lr_save_p = 1;
24877
24878 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24879 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24880 && call_used_regs[STATIC_CHAIN_REGNUM]);
24881 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24882
24883 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24884 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24885 || !(info->savres_strategy & SAVE_INLINE_VRS)
24886 || !(info->savres_strategy & REST_INLINE_GPRS)
24887 || !(info->savres_strategy & REST_INLINE_FPRS)
24888 || !(info->savres_strategy & REST_INLINE_VRS))
24889 info->lr_save_p = 1;
24890
24891 if (info->lr_save_p)
24892 df_set_regs_ever_live (LR_REGNO, true);
24893
24894 /* Determine if we need to allocate any stack frame:
24895
24896 For AIX we need to push the stack if a frame pointer is needed
24897 (because the stack might be dynamically adjusted), if we are
24898 debugging, if we make calls, or if the sum of fp_save, gp_save,
24899 and local variables are more than the space needed to save all
24900 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24901 + 18*8 = 288 (GPR13 reserved).
24902
24903 For V.4 we don't have the stack cushion that AIX uses, but assume
24904 that the debugger can handle stackless frames. */
24905
24906 if (info->calls_p)
24907 info->push_p = 1;
24908
24909 else if (DEFAULT_ABI == ABI_V4)
24910 info->push_p = non_fixed_size != 0;
24911
24912 else if (frame_pointer_needed)
24913 info->push_p = 1;
24914
24915 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24916 info->push_p = 1;
24917
24918 else
24919 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24920
24921 return info;
24922 }
24923
24924 static void
24925 debug_stack_info (rs6000_stack_t *info)
24926 {
24927 const char *abi_string;
24928
24929 if (! info)
24930 info = rs6000_stack_info ();
24931
24932 fprintf (stderr, "\nStack information for function %s:\n",
24933 ((current_function_decl && DECL_NAME (current_function_decl))
24934 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24935 : "<unknown>"));
24936
24937 switch (info->abi)
24938 {
24939 default: abi_string = "Unknown"; break;
24940 case ABI_NONE: abi_string = "NONE"; break;
24941 case ABI_AIX: abi_string = "AIX"; break;
24942 case ABI_ELFv2: abi_string = "ELFv2"; break;
24943 case ABI_DARWIN: abi_string = "Darwin"; break;
24944 case ABI_V4: abi_string = "V.4"; break;
24945 }
24946
24947 fprintf (stderr, "\tABI = %5s\n", abi_string);
24948
24949 if (TARGET_ALTIVEC_ABI)
24950 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24951
24952 if (info->first_gp_reg_save != 32)
24953 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24954
24955 if (info->first_fp_reg_save != 64)
24956 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24957
24958 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24959 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24960 info->first_altivec_reg_save);
24961
24962 if (info->lr_save_p)
24963 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24964
24965 if (info->cr_save_p)
24966 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24967
24968 if (info->vrsave_mask)
24969 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24970
24971 if (info->push_p)
24972 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24973
24974 if (info->calls_p)
24975 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24976
24977 if (info->gp_size)
24978 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24979
24980 if (info->fp_size)
24981 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24982
24983 if (info->altivec_size)
24984 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24985 info->altivec_save_offset);
24986
24987 if (info->vrsave_size)
24988 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24989 info->vrsave_save_offset);
24990
24991 if (info->lr_save_p)
24992 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24993
24994 if (info->cr_save_p)
24995 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24996
24997 if (info->varargs_save_offset)
24998 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24999
25000 if (info->total_size)
25001 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25002 info->total_size);
25003
25004 if (info->vars_size)
25005 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
25006 info->vars_size);
25007
25008 if (info->parm_size)
25009 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
25010
25011 if (info->fixed_size)
25012 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
25013
25014 if (info->gp_size)
25015 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
25016
25017 if (info->fp_size)
25018 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
25019
25020 if (info->altivec_size)
25021 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
25022
25023 if (info->vrsave_size)
25024 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
25025
25026 if (info->altivec_padding_size)
25027 fprintf (stderr, "\taltivec_padding_size= %5d\n",
25028 info->altivec_padding_size);
25029
25030 if (info->cr_size)
25031 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
25032
25033 if (info->save_size)
25034 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
25035
25036 if (info->reg_size != 4)
25037 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
25038
25039 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
25040
25041 fprintf (stderr, "\n");
25042 }
25043
25044 rtx
25045 rs6000_return_addr (int count, rtx frame)
25046 {
25047 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
25048 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
25049 if (count != 0
25050 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
25051 {
25052 cfun->machine->ra_needs_full_frame = 1;
25053
25054 if (count == 0)
25055 /* FRAME is set to frame_pointer_rtx by the generic code, but that
25056 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
25057 frame = stack_pointer_rtx;
25058 rtx prev_frame_addr = memory_address (Pmode, frame);
25059 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
25060 rtx lr_save_off = plus_constant (Pmode,
25061 prev_frame, RETURN_ADDRESS_OFFSET);
25062 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
25063 return gen_rtx_MEM (Pmode, lr_save_addr);
25064 }
25065
25066 cfun->machine->ra_need_lr = 1;
25067 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25068 }
25069
25070 /* Say whether a function is a candidate for sibcall handling or not. */
25071
25072 static bool
25073 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25074 {
25075 tree fntype;
25076
25077 /* The sibcall epilogue may clobber the static chain register.
25078 ??? We could work harder and avoid that, but it's probably
25079 not worth the hassle in practice. */
25080 if (CALL_EXPR_STATIC_CHAIN (exp))
25081 return false;
25082
25083 if (decl)
25084 fntype = TREE_TYPE (decl);
25085 else
25086 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25087
25088 /* We can't do it if the called function has more vector parameters
25089 than the current function; there's nowhere to put the VRsave code. */
25090 if (TARGET_ALTIVEC_ABI
25091 && TARGET_ALTIVEC_VRSAVE
25092 && !(decl && decl == current_function_decl))
25093 {
25094 function_args_iterator args_iter;
25095 tree type;
25096 int nvreg = 0;
25097
25098 /* Functions with vector parameters are required to have a
25099 prototype, so the argument type info must be available
25100 here. */
25101 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25102 if (TREE_CODE (type) == VECTOR_TYPE
25103 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25104 nvreg++;
25105
25106 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25107 if (TREE_CODE (type) == VECTOR_TYPE
25108 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25109 nvreg--;
25110
25111 if (nvreg > 0)
25112 return false;
25113 }
25114
25115 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25116 functions, because the callee may have a different TOC pointer to
25117 the caller and there's no way to ensure we restore the TOC when
25118 we return. With the secure-plt SYSV ABI we can't make non-local
25119 calls when -fpic/PIC because the plt call stubs use r30. */
25120 if (DEFAULT_ABI == ABI_DARWIN
25121 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25122 && decl
25123 && !DECL_EXTERNAL (decl)
25124 && !DECL_WEAK (decl)
25125 && (*targetm.binds_local_p) (decl))
25126 || (DEFAULT_ABI == ABI_V4
25127 && (!TARGET_SECURE_PLT
25128 || !flag_pic
25129 || (decl
25130 && (*targetm.binds_local_p) (decl)))))
25131 {
25132 tree attr_list = TYPE_ATTRIBUTES (fntype);
25133
25134 if (!lookup_attribute ("longcall", attr_list)
25135 || lookup_attribute ("shortcall", attr_list))
25136 return true;
25137 }
25138
25139 return false;
25140 }
25141
25142 static int
25143 rs6000_ra_ever_killed (void)
25144 {
25145 rtx_insn *top;
25146 rtx reg;
25147 rtx_insn *insn;
25148
25149 if (cfun->is_thunk)
25150 return 0;
25151
25152 if (cfun->machine->lr_save_state)
25153 return cfun->machine->lr_save_state - 1;
25154
25155 /* regs_ever_live has LR marked as used if any sibcalls are present,
25156 but this should not force saving and restoring in the
25157 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25158 clobbers LR, so that is inappropriate. */
25159
25160 /* Also, the prologue can generate a store into LR that
25161 doesn't really count, like this:
25162
25163 move LR->R0
25164 bcl to set PIC register
25165 move LR->R31
25166 move R0->LR
25167
25168 When we're called from the epilogue, we need to avoid counting
25169 this as a store. */
25170
25171 push_topmost_sequence ();
25172 top = get_insns ();
25173 pop_topmost_sequence ();
25174 reg = gen_rtx_REG (Pmode, LR_REGNO);
25175
25176 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25177 {
25178 if (INSN_P (insn))
25179 {
25180 if (CALL_P (insn))
25181 {
25182 if (!SIBLING_CALL_P (insn))
25183 return 1;
25184 }
25185 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25186 return 1;
25187 else if (set_of (reg, insn) != NULL_RTX
25188 && !prologue_epilogue_contains (insn))
25189 return 1;
25190 }
25191 }
25192 return 0;
25193 }
25194 \f
25195 /* Emit instructions needed to load the TOC register.
25196 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25197 a constant pool; or for SVR4 -fpic. */
25198
25199 void
25200 rs6000_emit_load_toc_table (int fromprolog)
25201 {
25202 rtx dest;
25203 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25204
25205 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25206 {
25207 char buf[30];
25208 rtx lab, tmp1, tmp2, got;
25209
25210 lab = gen_label_rtx ();
25211 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25212 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25213 if (flag_pic == 2)
25214 {
25215 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25216 need_toc_init = 1;
25217 }
25218 else
25219 got = rs6000_got_sym ();
25220 tmp1 = tmp2 = dest;
25221 if (!fromprolog)
25222 {
25223 tmp1 = gen_reg_rtx (Pmode);
25224 tmp2 = gen_reg_rtx (Pmode);
25225 }
25226 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25227 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25228 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25229 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25230 }
25231 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25232 {
25233 emit_insn (gen_load_toc_v4_pic_si ());
25234 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25235 }
25236 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25237 {
25238 char buf[30];
25239 rtx temp0 = (fromprolog
25240 ? gen_rtx_REG (Pmode, 0)
25241 : gen_reg_rtx (Pmode));
25242
25243 if (fromprolog)
25244 {
25245 rtx symF, symL;
25246
25247 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25248 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25249
25250 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25251 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25252
25253 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25254 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25255 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25256 }
25257 else
25258 {
25259 rtx tocsym, lab;
25260
25261 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25262 need_toc_init = 1;
25263 lab = gen_label_rtx ();
25264 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25265 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25266 if (TARGET_LINK_STACK)
25267 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25268 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25269 }
25270 emit_insn (gen_addsi3 (dest, temp0, dest));
25271 }
25272 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25273 {
25274 /* This is for AIX code running in non-PIC ELF32. */
25275 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25276
25277 need_toc_init = 1;
25278 emit_insn (gen_elf_high (dest, realsym));
25279 emit_insn (gen_elf_low (dest, dest, realsym));
25280 }
25281 else
25282 {
25283 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25284
25285 if (TARGET_32BIT)
25286 emit_insn (gen_load_toc_aix_si (dest));
25287 else
25288 emit_insn (gen_load_toc_aix_di (dest));
25289 }
25290 }
25291
25292 /* Emit instructions to restore the link register after determining where
25293 its value has been stored. */
25294
25295 void
25296 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25297 {
25298 rs6000_stack_t *info = rs6000_stack_info ();
25299 rtx operands[2];
25300
25301 operands[0] = source;
25302 operands[1] = scratch;
25303
25304 if (info->lr_save_p)
25305 {
25306 rtx frame_rtx = stack_pointer_rtx;
25307 HOST_WIDE_INT sp_offset = 0;
25308 rtx tmp;
25309
25310 if (frame_pointer_needed
25311 || cfun->calls_alloca
25312 || info->total_size > 32767)
25313 {
25314 tmp = gen_frame_mem (Pmode, frame_rtx);
25315 emit_move_insn (operands[1], tmp);
25316 frame_rtx = operands[1];
25317 }
25318 else if (info->push_p)
25319 sp_offset = info->total_size;
25320
25321 tmp = plus_constant (Pmode, frame_rtx,
25322 info->lr_save_offset + sp_offset);
25323 tmp = gen_frame_mem (Pmode, tmp);
25324 emit_move_insn (tmp, operands[0]);
25325 }
25326 else
25327 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25328
25329 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25330 state of lr_save_p so any change from here on would be a bug. In
25331 particular, stop rs6000_ra_ever_killed from considering the SET
25332 of lr we may have added just above. */
25333 cfun->machine->lr_save_state = info->lr_save_p + 1;
25334 }
25335
25336 static GTY(()) alias_set_type set = -1;
25337
25338 alias_set_type
25339 get_TOC_alias_set (void)
25340 {
25341 if (set == -1)
25342 set = new_alias_set ();
25343 return set;
25344 }
25345
25346 /* This returns nonzero if the current function uses the TOC. This is
25347 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25348 is generated by the ABI_V4 load_toc_* patterns.
25349 Return 2 instead of 1 if the load_toc_* pattern is in the function
25350 partition that doesn't start the function. */
25351 #if TARGET_ELF
25352 static int
25353 uses_TOC (void)
25354 {
25355 rtx_insn *insn;
25356 int ret = 1;
25357
25358 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25359 {
25360 if (INSN_P (insn))
25361 {
25362 rtx pat = PATTERN (insn);
25363 int i;
25364
25365 if (GET_CODE (pat) == PARALLEL)
25366 for (i = 0; i < XVECLEN (pat, 0); i++)
25367 {
25368 rtx sub = XVECEXP (pat, 0, i);
25369 if (GET_CODE (sub) == USE)
25370 {
25371 sub = XEXP (sub, 0);
25372 if (GET_CODE (sub) == UNSPEC
25373 && XINT (sub, 1) == UNSPEC_TOC)
25374 return ret;
25375 }
25376 }
25377 }
25378 else if (crtl->has_bb_partition
25379 && NOTE_P (insn)
25380 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25381 ret = 2;
25382 }
25383 return 0;
25384 }
25385 #endif
25386
25387 rtx
25388 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25389 {
25390 rtx tocrel, tocreg, hi;
25391
25392 if (TARGET_DEBUG_ADDR)
25393 {
25394 if (SYMBOL_REF_P (symbol))
25395 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25396 XSTR (symbol, 0));
25397 else
25398 {
25399 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25400 GET_RTX_NAME (GET_CODE (symbol)));
25401 debug_rtx (symbol);
25402 }
25403 }
25404
25405 if (!can_create_pseudo_p ())
25406 df_set_regs_ever_live (TOC_REGISTER, true);
25407
25408 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25409 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25410 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25411 return tocrel;
25412
25413 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25414 if (largetoc_reg != NULL)
25415 {
25416 emit_move_insn (largetoc_reg, hi);
25417 hi = largetoc_reg;
25418 }
25419 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25420 }
25421
25422 /* Issue assembly directives that create a reference to the given DWARF
25423 FRAME_TABLE_LABEL from the current function section. */
25424 void
25425 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25426 {
25427 fprintf (asm_out_file, "\t.ref %s\n",
25428 (* targetm.strip_name_encoding) (frame_table_label));
25429 }
25430 \f
25431 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25432 and the change to the stack pointer. */
25433
25434 static void
25435 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25436 {
25437 rtvec p;
25438 int i;
25439 rtx regs[3];
25440
25441 i = 0;
25442 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25443 if (hard_frame_needed)
25444 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25445 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25446 || (hard_frame_needed
25447 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25448 regs[i++] = fp;
25449
25450 p = rtvec_alloc (i);
25451 while (--i >= 0)
25452 {
25453 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25454 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25455 }
25456
25457 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25458 }
25459
25460 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25461 and set the appropriate attributes for the generated insn. Return the
25462 first insn which adjusts the stack pointer or the last insn before
25463 the stack adjustment loop.
25464
25465 SIZE_INT is used to create the CFI note for the allocation.
25466
25467 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25468 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25469
25470 ORIG_SP contains the backchain value that must be stored at *sp. */
25471
25472 static rtx_insn *
25473 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25474 {
25475 rtx_insn *insn;
25476
25477 rtx size_rtx = GEN_INT (-size_int);
25478 if (size_int > 32767)
25479 {
25480 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25481 /* Need a note here so that try_split doesn't get confused. */
25482 if (get_last_insn () == NULL_RTX)
25483 emit_note (NOTE_INSN_DELETED);
25484 insn = emit_move_insn (tmp_reg, size_rtx);
25485 try_split (PATTERN (insn), insn, 0);
25486 size_rtx = tmp_reg;
25487 }
25488
25489 if (Pmode == SImode)
25490 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25491 stack_pointer_rtx,
25492 size_rtx,
25493 orig_sp));
25494 else
25495 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
25496 stack_pointer_rtx,
25497 size_rtx,
25498 orig_sp));
25499 rtx par = PATTERN (insn);
25500 gcc_assert (GET_CODE (par) == PARALLEL);
25501 rtx set = XVECEXP (par, 0, 0);
25502 gcc_assert (GET_CODE (set) == SET);
25503 rtx mem = SET_DEST (set);
25504 gcc_assert (MEM_P (mem));
25505 MEM_NOTRAP_P (mem) = 1;
25506 set_mem_alias_set (mem, get_frame_alias_set ());
25507
25508 RTX_FRAME_RELATED_P (insn) = 1;
25509 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25510 gen_rtx_SET (stack_pointer_rtx,
25511 gen_rtx_PLUS (Pmode,
25512 stack_pointer_rtx,
25513 GEN_INT (-size_int))));
25514
25515 /* Emit a blockage to ensure the allocation/probing insns are
25516 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25517 note for similar reasons. */
25518 if (flag_stack_clash_protection)
25519 {
25520 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25521 emit_insn (gen_blockage ());
25522 }
25523
25524 return insn;
25525 }
25526
25527 static HOST_WIDE_INT
25528 get_stack_clash_protection_probe_interval (void)
25529 {
25530 return (HOST_WIDE_INT_1U
25531 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25532 }
25533
25534 static HOST_WIDE_INT
25535 get_stack_clash_protection_guard_size (void)
25536 {
25537 return (HOST_WIDE_INT_1U
25538 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25539 }
25540
25541 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25542 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25543
25544 COPY_REG, if non-null, should contain a copy of the original
25545 stack pointer at exit from this function.
25546
25547 This is subtly different than the Ada probing in that it tries hard to
25548 prevent attacks that jump the stack guard. Thus it is never allowed to
25549 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25550 space without a suitable probe. */
25551 static rtx_insn *
25552 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25553 rtx copy_reg)
25554 {
25555 rtx orig_sp = copy_reg;
25556
25557 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25558
25559 /* Round the size down to a multiple of PROBE_INTERVAL. */
25560 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25561
25562 /* If explicitly requested,
25563 or the rounded size is not the same as the original size
25564 or the the rounded size is greater than a page,
25565 then we will need a copy of the original stack pointer. */
25566 if (rounded_size != orig_size
25567 || rounded_size > probe_interval
25568 || copy_reg)
25569 {
25570 /* If the caller did not request a copy of the incoming stack
25571 pointer, then we use r0 to hold the copy. */
25572 if (!copy_reg)
25573 orig_sp = gen_rtx_REG (Pmode, 0);
25574 emit_move_insn (orig_sp, stack_pointer_rtx);
25575 }
25576
25577 /* There's three cases here.
25578
25579 One is a single probe which is the most common and most efficiently
25580 implemented as it does not have to have a copy of the original
25581 stack pointer if there are no residuals.
25582
25583 Second is unrolled allocation/probes which we use if there's just
25584 a few of them. It needs to save the original stack pointer into a
25585 temporary for use as a source register in the allocation/probe.
25586
25587 Last is a loop. This is the most uncommon case and least efficient. */
25588 rtx_insn *retval = NULL;
25589 if (rounded_size == probe_interval)
25590 {
25591 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25592
25593 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25594 }
25595 else if (rounded_size <= 8 * probe_interval)
25596 {
25597 /* The ABI requires using the store with update insns to allocate
25598 space and store the backchain into the stack
25599
25600 So we save the current stack pointer into a temporary, then
25601 emit the store-with-update insns to store the saved stack pointer
25602 into the right location in each new page. */
25603 for (int i = 0; i < rounded_size; i += probe_interval)
25604 {
25605 rtx_insn *insn
25606 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25607
25608 /* Save the first stack adjustment in RETVAL. */
25609 if (i == 0)
25610 retval = insn;
25611 }
25612
25613 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25614 }
25615 else
25616 {
25617 /* Compute the ending address. */
25618 rtx end_addr
25619 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25620 rtx rs = GEN_INT (-rounded_size);
25621 rtx_insn *insn;
25622 if (add_operand (rs, Pmode))
25623 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25624 else
25625 {
25626 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25627 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25628 stack_pointer_rtx));
25629 /* Describe the effect of INSN to the CFI engine. */
25630 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25631 gen_rtx_SET (end_addr,
25632 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25633 rs)));
25634 }
25635 RTX_FRAME_RELATED_P (insn) = 1;
25636
25637 /* Emit the loop. */
25638 if (TARGET_64BIT)
25639 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25640 stack_pointer_rtx, orig_sp,
25641 end_addr));
25642 else
25643 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25644 stack_pointer_rtx, orig_sp,
25645 end_addr));
25646 RTX_FRAME_RELATED_P (retval) = 1;
25647 /* Describe the effect of INSN to the CFI engine. */
25648 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25649 gen_rtx_SET (stack_pointer_rtx, end_addr));
25650
25651 /* Emit a blockage to ensure the allocation/probing insns are
25652 not optimized, combined, removed, etc. Other cases handle this
25653 within their call to rs6000_emit_allocate_stack_1. */
25654 emit_insn (gen_blockage ());
25655
25656 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25657 }
25658
25659 if (orig_size != rounded_size)
25660 {
25661 /* Allocate (and implicitly probe) any residual space. */
25662 HOST_WIDE_INT residual = orig_size - rounded_size;
25663
25664 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25665
25666 /* If the residual was the only allocation, then we can return the
25667 allocating insn. */
25668 if (!retval)
25669 retval = insn;
25670 }
25671
25672 return retval;
25673 }
25674
25675 /* Emit the correct code for allocating stack space, as insns.
25676 If COPY_REG, make sure a copy of the old frame is left there.
25677 The generated code may use hard register 0 as a temporary. */
25678
25679 static rtx_insn *
25680 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25681 {
25682 rtx_insn *insn;
25683 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25684 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25685 rtx todec = gen_int_mode (-size, Pmode);
25686
25687 if (INTVAL (todec) != -size)
25688 {
25689 warning (0, "stack frame too large");
25690 emit_insn (gen_trap ());
25691 return 0;
25692 }
25693
25694 if (crtl->limit_stack)
25695 {
25696 if (REG_P (stack_limit_rtx)
25697 && REGNO (stack_limit_rtx) > 1
25698 && REGNO (stack_limit_rtx) <= 31)
25699 {
25700 rtx_insn *insn
25701 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25702 gcc_assert (insn);
25703 emit_insn (insn);
25704 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25705 }
25706 else if (SYMBOL_REF_P (stack_limit_rtx)
25707 && TARGET_32BIT
25708 && DEFAULT_ABI == ABI_V4
25709 && !flag_pic)
25710 {
25711 rtx toload = gen_rtx_CONST (VOIDmode,
25712 gen_rtx_PLUS (Pmode,
25713 stack_limit_rtx,
25714 GEN_INT (size)));
25715
25716 emit_insn (gen_elf_high (tmp_reg, toload));
25717 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25718 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25719 const0_rtx));
25720 }
25721 else
25722 warning (0, "stack limit expression is not supported");
25723 }
25724
25725 if (flag_stack_clash_protection)
25726 {
25727 if (size < get_stack_clash_protection_guard_size ())
25728 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25729 else
25730 {
25731 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25732 copy_reg);
25733
25734 /* If we asked for a copy with an offset, then we still need add in
25735 the offset. */
25736 if (copy_reg && copy_off)
25737 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25738 return insn;
25739 }
25740 }
25741
25742 if (copy_reg)
25743 {
25744 if (copy_off != 0)
25745 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25746 else
25747 emit_move_insn (copy_reg, stack_reg);
25748 }
25749
25750 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25751 it now and set the alias set/attributes. The above gen_*_update
25752 calls will generate a PARALLEL with the MEM set being the first
25753 operation. */
25754 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25755 return insn;
25756 }
25757
25758 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25759
25760 #if PROBE_INTERVAL > 32768
25761 #error Cannot use indexed addressing mode for stack probing
25762 #endif
25763
25764 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25765 inclusive. These are offsets from the current stack pointer. */
25766
25767 static void
25768 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25769 {
25770 /* See if we have a constant small number of probes to generate. If so,
25771 that's the easy case. */
25772 if (first + size <= 32768)
25773 {
25774 HOST_WIDE_INT i;
25775
25776 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25777 it exceeds SIZE. If only one probe is needed, this will not
25778 generate any code. Then probe at FIRST + SIZE. */
25779 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25780 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25781 -(first + i)));
25782
25783 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25784 -(first + size)));
25785 }
25786
25787 /* Otherwise, do the same as above, but in a loop. Note that we must be
25788 extra careful with variables wrapping around because we might be at
25789 the very top (or the very bottom) of the address space and we have
25790 to be able to handle this case properly; in particular, we use an
25791 equality test for the loop condition. */
25792 else
25793 {
25794 HOST_WIDE_INT rounded_size;
25795 rtx r12 = gen_rtx_REG (Pmode, 12);
25796 rtx r0 = gen_rtx_REG (Pmode, 0);
25797
25798 /* Sanity check for the addressing mode we're going to use. */
25799 gcc_assert (first <= 32768);
25800
25801 /* Step 1: round SIZE to the previous multiple of the interval. */
25802
25803 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25804
25805
25806 /* Step 2: compute initial and final value of the loop counter. */
25807
25808 /* TEST_ADDR = SP + FIRST. */
25809 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25810 -first)));
25811
25812 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25813 if (rounded_size > 32768)
25814 {
25815 emit_move_insn (r0, GEN_INT (-rounded_size));
25816 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25817 }
25818 else
25819 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25820 -rounded_size)));
25821
25822
25823 /* Step 3: the loop
25824
25825 do
25826 {
25827 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25828 probe at TEST_ADDR
25829 }
25830 while (TEST_ADDR != LAST_ADDR)
25831
25832 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25833 until it is equal to ROUNDED_SIZE. */
25834
25835 if (TARGET_64BIT)
25836 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25837 else
25838 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25839
25840
25841 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25842 that SIZE is equal to ROUNDED_SIZE. */
25843
25844 if (size != rounded_size)
25845 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25846 }
25847 }
25848
25849 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25850 addresses, not offsets. */
25851
25852 static const char *
25853 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25854 {
25855 static int labelno = 0;
25856 char loop_lab[32];
25857 rtx xops[2];
25858
25859 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25860
25861 /* Loop. */
25862 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25863
25864 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25865 xops[0] = reg1;
25866 xops[1] = GEN_INT (-PROBE_INTERVAL);
25867 output_asm_insn ("addi %0,%0,%1", xops);
25868
25869 /* Probe at TEST_ADDR. */
25870 xops[1] = gen_rtx_REG (Pmode, 0);
25871 output_asm_insn ("stw %1,0(%0)", xops);
25872
25873 /* Test if TEST_ADDR == LAST_ADDR. */
25874 xops[1] = reg2;
25875 if (TARGET_64BIT)
25876 output_asm_insn ("cmpd 0,%0,%1", xops);
25877 else
25878 output_asm_insn ("cmpw 0,%0,%1", xops);
25879
25880 /* Branch. */
25881 fputs ("\tbne 0,", asm_out_file);
25882 assemble_name_raw (asm_out_file, loop_lab);
25883 fputc ('\n', asm_out_file);
25884
25885 return "";
25886 }
25887
25888 /* This function is called when rs6000_frame_related is processing
25889 SETs within a PARALLEL, and returns whether the REGNO save ought to
25890 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25891 for out-of-line register save functions, store multiple, and the
25892 Darwin world_save. They may contain registers that don't really
25893 need saving. */
25894
25895 static bool
25896 interesting_frame_related_regno (unsigned int regno)
25897 {
25898 /* Saves apparently of r0 are actually saving LR. It doesn't make
25899 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25900 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25901 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25902 as frame related. */
25903 if (regno == 0)
25904 return true;
25905 /* If we see CR2 then we are here on a Darwin world save. Saves of
25906 CR2 signify the whole CR is being saved. This is a long-standing
25907 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25908 that CR needs to be saved. */
25909 if (regno == CR2_REGNO)
25910 return true;
25911 /* Omit frame info for any user-defined global regs. If frame info
25912 is supplied for them, frame unwinding will restore a user reg.
25913 Also omit frame info for any reg we don't need to save, as that
25914 bloats frame info and can cause problems with shrink wrapping.
25915 Since global regs won't be seen as needing to be saved, both of
25916 these conditions are covered by save_reg_p. */
25917 return save_reg_p (regno);
25918 }
25919
25920 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25921 addresses, not offsets.
25922
25923 REG2 contains the backchain that must be stored into *sp at each allocation.
25924
25925 This is subtly different than the Ada probing above in that it tries hard
25926 to prevent attacks that jump the stack guard. Thus, it is never allowed
25927 to allocate more than PROBE_INTERVAL bytes of stack space without a
25928 suitable probe. */
25929
25930 static const char *
25931 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25932 {
25933 static int labelno = 0;
25934 char loop_lab[32];
25935 rtx xops[3];
25936
25937 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25938
25939 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25940
25941 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25942
25943 /* This allocates and probes. */
25944 xops[0] = reg1;
25945 xops[1] = reg2;
25946 xops[2] = GEN_INT (-probe_interval);
25947 if (TARGET_64BIT)
25948 output_asm_insn ("stdu %1,%2(%0)", xops);
25949 else
25950 output_asm_insn ("stwu %1,%2(%0)", xops);
25951
25952 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25953 xops[0] = reg1;
25954 xops[1] = reg3;
25955 if (TARGET_64BIT)
25956 output_asm_insn ("cmpd 0,%0,%1", xops);
25957 else
25958 output_asm_insn ("cmpw 0,%0,%1", xops);
25959
25960 fputs ("\tbne 0,", asm_out_file);
25961 assemble_name_raw (asm_out_file, loop_lab);
25962 fputc ('\n', asm_out_file);
25963
25964 return "";
25965 }
25966
25967 /* Wrapper around the output_probe_stack_range routines. */
25968 const char *
25969 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25970 {
25971 if (flag_stack_clash_protection)
25972 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25973 else
25974 return output_probe_stack_range_1 (reg1, reg3);
25975 }
25976
25977 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25978 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25979 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25980 deduce these equivalences by itself so it wasn't necessary to hold
25981 its hand so much. Don't be tempted to always supply d2_f_d_e with
25982 the actual cfa register, ie. r31 when we are using a hard frame
25983 pointer. That fails when saving regs off r1, and sched moves the
25984 r31 setup past the reg saves. */
25985
25986 static rtx_insn *
25987 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25988 rtx reg2, rtx repl2)
25989 {
25990 rtx repl;
25991
25992 if (REGNO (reg) == STACK_POINTER_REGNUM)
25993 {
25994 gcc_checking_assert (val == 0);
25995 repl = NULL_RTX;
25996 }
25997 else
25998 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25999 GEN_INT (val));
26000
26001 rtx pat = PATTERN (insn);
26002 if (!repl && !reg2)
26003 {
26004 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
26005 if (GET_CODE (pat) == PARALLEL)
26006 for (int i = 0; i < XVECLEN (pat, 0); i++)
26007 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26008 {
26009 rtx set = XVECEXP (pat, 0, i);
26010
26011 if (!REG_P (SET_SRC (set))
26012 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26013 RTX_FRAME_RELATED_P (set) = 1;
26014 }
26015 RTX_FRAME_RELATED_P (insn) = 1;
26016 return insn;
26017 }
26018
26019 /* We expect that 'pat' is either a SET or a PARALLEL containing
26020 SETs (and possibly other stuff). In a PARALLEL, all the SETs
26021 are important so they all have to be marked RTX_FRAME_RELATED_P.
26022 Call simplify_replace_rtx on the SETs rather than the whole insn
26023 so as to leave the other stuff alone (for example USE of r12). */
26024
26025 set_used_flags (pat);
26026 if (GET_CODE (pat) == SET)
26027 {
26028 if (repl)
26029 pat = simplify_replace_rtx (pat, reg, repl);
26030 if (reg2)
26031 pat = simplify_replace_rtx (pat, reg2, repl2);
26032 }
26033 else if (GET_CODE (pat) == PARALLEL)
26034 {
26035 pat = shallow_copy_rtx (pat);
26036 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
26037
26038 for (int i = 0; i < XVECLEN (pat, 0); i++)
26039 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26040 {
26041 rtx set = XVECEXP (pat, 0, i);
26042
26043 if (repl)
26044 set = simplify_replace_rtx (set, reg, repl);
26045 if (reg2)
26046 set = simplify_replace_rtx (set, reg2, repl2);
26047 XVECEXP (pat, 0, i) = set;
26048
26049 if (!REG_P (SET_SRC (set))
26050 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26051 RTX_FRAME_RELATED_P (set) = 1;
26052 }
26053 }
26054 else
26055 gcc_unreachable ();
26056
26057 RTX_FRAME_RELATED_P (insn) = 1;
26058 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
26059
26060 return insn;
26061 }
26062
26063 /* Returns an insn that has a vrsave set operation with the
26064 appropriate CLOBBERs. */
26065
26066 static rtx
26067 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
26068 {
26069 int nclobs, i;
26070 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
26071 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26072
26073 clobs[0]
26074 = gen_rtx_SET (vrsave,
26075 gen_rtx_UNSPEC_VOLATILE (SImode,
26076 gen_rtvec (2, reg, vrsave),
26077 UNSPECV_SET_VRSAVE));
26078
26079 nclobs = 1;
26080
26081 /* We need to clobber the registers in the mask so the scheduler
26082 does not move sets to VRSAVE before sets of AltiVec registers.
26083
26084 However, if the function receives nonlocal gotos, reload will set
26085 all call saved registers live. We will end up with:
26086
26087 (set (reg 999) (mem))
26088 (parallel [ (set (reg vrsave) (unspec blah))
26089 (clobber (reg 999))])
26090
26091 The clobber will cause the store into reg 999 to be dead, and
26092 flow will attempt to delete an epilogue insn. In this case, we
26093 need an unspec use/set of the register. */
26094
26095 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
26096 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
26097 {
26098 if (!epiloguep || call_used_regs [i])
26099 clobs[nclobs++] = gen_hard_reg_clobber (V4SImode, i);
26100 else
26101 {
26102 rtx reg = gen_rtx_REG (V4SImode, i);
26103
26104 clobs[nclobs++]
26105 = gen_rtx_SET (reg,
26106 gen_rtx_UNSPEC (V4SImode,
26107 gen_rtvec (1, reg), 27));
26108 }
26109 }
26110
26111 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
26112
26113 for (i = 0; i < nclobs; ++i)
26114 XVECEXP (insn, 0, i) = clobs[i];
26115
26116 return insn;
26117 }
26118
26119 static rtx
26120 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
26121 {
26122 rtx addr, mem;
26123
26124 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
26125 mem = gen_frame_mem (GET_MODE (reg), addr);
26126 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
26127 }
26128
26129 static rtx
26130 gen_frame_load (rtx reg, rtx frame_reg, int offset)
26131 {
26132 return gen_frame_set (reg, frame_reg, offset, false);
26133 }
26134
26135 static rtx
26136 gen_frame_store (rtx reg, rtx frame_reg, int offset)
26137 {
26138 return gen_frame_set (reg, frame_reg, offset, true);
26139 }
26140
26141 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
26142 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
26143
26144 static rtx_insn *
26145 emit_frame_save (rtx frame_reg, machine_mode mode,
26146 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
26147 {
26148 rtx reg;
26149
26150 /* Some cases that need register indexed addressing. */
26151 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
26152 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
26153
26154 reg = gen_rtx_REG (mode, regno);
26155 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
26156 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
26157 NULL_RTX, NULL_RTX);
26158 }
26159
26160 /* Emit an offset memory reference suitable for a frame store, while
26161 converting to a valid addressing mode. */
26162
26163 static rtx
26164 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
26165 {
26166 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
26167 }
26168
26169 #ifndef TARGET_FIX_AND_CONTINUE
26170 #define TARGET_FIX_AND_CONTINUE 0
26171 #endif
26172
26173 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26174 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26175 #define LAST_SAVRES_REGISTER 31
26176 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26177
26178 enum {
26179 SAVRES_LR = 0x1,
26180 SAVRES_SAVE = 0x2,
26181 SAVRES_REG = 0x0c,
26182 SAVRES_GPR = 0,
26183 SAVRES_FPR = 4,
26184 SAVRES_VR = 8
26185 };
26186
26187 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
26188
26189 /* Temporary holding space for an out-of-line register save/restore
26190 routine name. */
26191 static char savres_routine_name[30];
26192
26193 /* Return the name for an out-of-line register save/restore routine.
26194 We are saving/restoring GPRs if GPR is true. */
26195
26196 static char *
26197 rs6000_savres_routine_name (int regno, int sel)
26198 {
26199 const char *prefix = "";
26200 const char *suffix = "";
26201
26202 /* Different targets are supposed to define
26203 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26204 routine name could be defined with:
26205
26206 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26207
26208 This is a nice idea in practice, but in reality, things are
26209 complicated in several ways:
26210
26211 - ELF targets have save/restore routines for GPRs.
26212
26213 - PPC64 ELF targets have routines for save/restore of GPRs that
26214 differ in what they do with the link register, so having a set
26215 prefix doesn't work. (We only use one of the save routines at
26216 the moment, though.)
26217
26218 - PPC32 elf targets have "exit" versions of the restore routines
26219 that restore the link register and can save some extra space.
26220 These require an extra suffix. (There are also "tail" versions
26221 of the restore routines and "GOT" versions of the save routines,
26222 but we don't generate those at present. Same problems apply,
26223 though.)
26224
26225 We deal with all this by synthesizing our own prefix/suffix and
26226 using that for the simple sprintf call shown above. */
26227 if (DEFAULT_ABI == ABI_V4)
26228 {
26229 if (TARGET_64BIT)
26230 goto aix_names;
26231
26232 if ((sel & SAVRES_REG) == SAVRES_GPR)
26233 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
26234 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26235 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
26236 else if ((sel & SAVRES_REG) == SAVRES_VR)
26237 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26238 else
26239 abort ();
26240
26241 if ((sel & SAVRES_LR))
26242 suffix = "_x";
26243 }
26244 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26245 {
26246 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26247 /* No out-of-line save/restore routines for GPRs on AIX. */
26248 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26249 #endif
26250
26251 aix_names:
26252 if ((sel & SAVRES_REG) == SAVRES_GPR)
26253 prefix = ((sel & SAVRES_SAVE)
26254 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26255 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26256 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26257 {
26258 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26259 if ((sel & SAVRES_LR))
26260 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26261 else
26262 #endif
26263 {
26264 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26265 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26266 }
26267 }
26268 else if ((sel & SAVRES_REG) == SAVRES_VR)
26269 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26270 else
26271 abort ();
26272 }
26273
26274 if (DEFAULT_ABI == ABI_DARWIN)
26275 {
26276 /* The Darwin approach is (slightly) different, in order to be
26277 compatible with code generated by the system toolchain. There is a
26278 single symbol for the start of save sequence, and the code here
26279 embeds an offset into that code on the basis of the first register
26280 to be saved. */
26281 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26282 if ((sel & SAVRES_REG) == SAVRES_GPR)
26283 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26284 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26285 (regno - 13) * 4, prefix, regno);
26286 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26287 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26288 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26289 else if ((sel & SAVRES_REG) == SAVRES_VR)
26290 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26291 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26292 else
26293 abort ();
26294 }
26295 else
26296 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26297
26298 return savres_routine_name;
26299 }
26300
26301 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26302 We are saving/restoring GPRs if GPR is true. */
26303
26304 static rtx
26305 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26306 {
26307 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26308 ? info->first_gp_reg_save
26309 : (sel & SAVRES_REG) == SAVRES_FPR
26310 ? info->first_fp_reg_save - 32
26311 : (sel & SAVRES_REG) == SAVRES_VR
26312 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26313 : -1);
26314 rtx sym;
26315 int select = sel;
26316
26317 /* Don't generate bogus routine names. */
26318 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26319 && regno <= LAST_SAVRES_REGISTER
26320 && select >= 0 && select <= 12);
26321
26322 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26323
26324 if (sym == NULL)
26325 {
26326 char *name;
26327
26328 name = rs6000_savres_routine_name (regno, sel);
26329
26330 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26331 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26332 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26333 }
26334
26335 return sym;
26336 }
26337
26338 /* Emit a sequence of insns, including a stack tie if needed, for
26339 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26340 reset the stack pointer, but move the base of the frame into
26341 reg UPDT_REGNO for use by out-of-line register restore routines. */
26342
26343 static rtx
26344 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26345 unsigned updt_regno)
26346 {
26347 /* If there is nothing to do, don't do anything. */
26348 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26349 return NULL_RTX;
26350
26351 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26352
26353 /* This blockage is needed so that sched doesn't decide to move
26354 the sp change before the register restores. */
26355 if (DEFAULT_ABI == ABI_V4)
26356 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26357 GEN_INT (frame_off)));
26358
26359 /* If we are restoring registers out-of-line, we will be using the
26360 "exit" variants of the restore routines, which will reset the
26361 stack for us. But we do need to point updt_reg into the
26362 right place for those routines. */
26363 if (frame_off != 0)
26364 return emit_insn (gen_add3_insn (updt_reg_rtx,
26365 frame_reg_rtx, GEN_INT (frame_off)));
26366 else
26367 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26368
26369 return NULL_RTX;
26370 }
26371
26372 /* Return the register number used as a pointer by out-of-line
26373 save/restore functions. */
26374
26375 static inline unsigned
26376 ptr_regno_for_savres (int sel)
26377 {
26378 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26379 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26380 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26381 }
26382
26383 /* Construct a parallel rtx describing the effect of a call to an
26384 out-of-line register save/restore routine, and emit the insn
26385 or jump_insn as appropriate. */
26386
26387 static rtx_insn *
26388 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26389 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26390 machine_mode reg_mode, int sel)
26391 {
26392 int i;
26393 int offset, start_reg, end_reg, n_regs, use_reg;
26394 int reg_size = GET_MODE_SIZE (reg_mode);
26395 rtx sym;
26396 rtvec p;
26397 rtx par;
26398 rtx_insn *insn;
26399
26400 offset = 0;
26401 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26402 ? info->first_gp_reg_save
26403 : (sel & SAVRES_REG) == SAVRES_FPR
26404 ? info->first_fp_reg_save
26405 : (sel & SAVRES_REG) == SAVRES_VR
26406 ? info->first_altivec_reg_save
26407 : -1);
26408 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26409 ? 32
26410 : (sel & SAVRES_REG) == SAVRES_FPR
26411 ? 64
26412 : (sel & SAVRES_REG) == SAVRES_VR
26413 ? LAST_ALTIVEC_REGNO + 1
26414 : -1);
26415 n_regs = end_reg - start_reg;
26416 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26417 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26418 + n_regs);
26419
26420 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26421 RTVEC_ELT (p, offset++) = ret_rtx;
26422
26423 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
26424
26425 sym = rs6000_savres_routine_sym (info, sel);
26426 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26427
26428 use_reg = ptr_regno_for_savres (sel);
26429 if ((sel & SAVRES_REG) == SAVRES_VR)
26430 {
26431 /* Vector regs are saved/restored using [reg+reg] addressing. */
26432 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, use_reg);
26433 RTVEC_ELT (p, offset++)
26434 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26435 }
26436 else
26437 RTVEC_ELT (p, offset++)
26438 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26439
26440 for (i = 0; i < end_reg - start_reg; i++)
26441 RTVEC_ELT (p, i + offset)
26442 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26443 frame_reg_rtx, save_area_offset + reg_size * i,
26444 (sel & SAVRES_SAVE) != 0);
26445
26446 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26447 RTVEC_ELT (p, i + offset)
26448 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26449
26450 par = gen_rtx_PARALLEL (VOIDmode, p);
26451
26452 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26453 {
26454 insn = emit_jump_insn (par);
26455 JUMP_LABEL (insn) = ret_rtx;
26456 }
26457 else
26458 insn = emit_insn (par);
26459 return insn;
26460 }
26461
26462 /* Emit prologue code to store CR fields that need to be saved into REG. This
26463 function should only be called when moving the non-volatile CRs to REG, it
26464 is not a general purpose routine to move the entire set of CRs to REG.
26465 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26466 volatile CRs. */
26467
26468 static void
26469 rs6000_emit_prologue_move_from_cr (rtx reg)
26470 {
26471 /* Only the ELFv2 ABI allows storing only selected fields. */
26472 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26473 {
26474 int i, cr_reg[8], count = 0;
26475
26476 /* Collect CR fields that must be saved. */
26477 for (i = 0; i < 8; i++)
26478 if (save_reg_p (CR0_REGNO + i))
26479 cr_reg[count++] = i;
26480
26481 /* If it's just a single one, use mfcrf. */
26482 if (count == 1)
26483 {
26484 rtvec p = rtvec_alloc (1);
26485 rtvec r = rtvec_alloc (2);
26486 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26487 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26488 RTVEC_ELT (p, 0)
26489 = gen_rtx_SET (reg,
26490 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26491
26492 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26493 return;
26494 }
26495
26496 /* ??? It might be better to handle count == 2 / 3 cases here
26497 as well, using logical operations to combine the values. */
26498 }
26499
26500 emit_insn (gen_prologue_movesi_from_cr (reg));
26501 }
26502
26503 /* Return whether the split-stack arg pointer (r12) is used. */
26504
26505 static bool
26506 split_stack_arg_pointer_used_p (void)
26507 {
26508 /* If the pseudo holding the arg pointer is no longer a pseudo,
26509 then the arg pointer is used. */
26510 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26511 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26512 || HARD_REGISTER_P (cfun->machine->split_stack_arg_pointer)))
26513 return true;
26514
26515 /* Unfortunately we also need to do some code scanning, since
26516 r12 may have been substituted for the pseudo. */
26517 rtx_insn *insn;
26518 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26519 FOR_BB_INSNS (bb, insn)
26520 if (NONDEBUG_INSN_P (insn))
26521 {
26522 /* A call destroys r12. */
26523 if (CALL_P (insn))
26524 return false;
26525
26526 df_ref use;
26527 FOR_EACH_INSN_USE (use, insn)
26528 {
26529 rtx x = DF_REF_REG (use);
26530 if (REG_P (x) && REGNO (x) == 12)
26531 return true;
26532 }
26533 df_ref def;
26534 FOR_EACH_INSN_DEF (def, insn)
26535 {
26536 rtx x = DF_REF_REG (def);
26537 if (REG_P (x) && REGNO (x) == 12)
26538 return false;
26539 }
26540 }
26541 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26542 }
26543
26544 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26545
26546 static bool
26547 rs6000_global_entry_point_needed_p (void)
26548 {
26549 /* Only needed for the ELFv2 ABI. */
26550 if (DEFAULT_ABI != ABI_ELFv2)
26551 return false;
26552
26553 /* With -msingle-pic-base, we assume the whole program shares the same
26554 TOC, so no global entry point prologues are needed anywhere. */
26555 if (TARGET_SINGLE_PIC_BASE)
26556 return false;
26557
26558 /* Ensure we have a global entry point for thunks. ??? We could
26559 avoid that if the target routine doesn't need a global entry point,
26560 but we do not know whether this is the case at this point. */
26561 if (cfun->is_thunk)
26562 return true;
26563
26564 /* For regular functions, rs6000_emit_prologue sets this flag if the
26565 routine ever uses the TOC pointer. */
26566 return cfun->machine->r2_setup_needed;
26567 }
26568
26569 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26570 static sbitmap
26571 rs6000_get_separate_components (void)
26572 {
26573 rs6000_stack_t *info = rs6000_stack_info ();
26574
26575 if (WORLD_SAVE_P (info))
26576 return NULL;
26577
26578 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26579 && !(info->savres_strategy & REST_MULTIPLE));
26580
26581 /* Component 0 is the save/restore of LR (done via GPR0).
26582 Component 2 is the save of the TOC (GPR2).
26583 Components 13..31 are the save/restore of GPR13..GPR31.
26584 Components 46..63 are the save/restore of FPR14..FPR31. */
26585
26586 cfun->machine->n_components = 64;
26587
26588 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26589 bitmap_clear (components);
26590
26591 int reg_size = TARGET_32BIT ? 4 : 8;
26592 int fp_reg_size = 8;
26593
26594 /* The GPRs we need saved to the frame. */
26595 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26596 && (info->savres_strategy & REST_INLINE_GPRS))
26597 {
26598 int offset = info->gp_save_offset;
26599 if (info->push_p)
26600 offset += info->total_size;
26601
26602 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26603 {
26604 if (IN_RANGE (offset, -0x8000, 0x7fff)
26605 && save_reg_p (regno))
26606 bitmap_set_bit (components, regno);
26607
26608 offset += reg_size;
26609 }
26610 }
26611
26612 /* Don't mess with the hard frame pointer. */
26613 if (frame_pointer_needed)
26614 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26615
26616 /* Don't mess with the fixed TOC register. */
26617 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26618 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26619 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26620 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26621
26622 /* The FPRs we need saved to the frame. */
26623 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26624 && (info->savres_strategy & REST_INLINE_FPRS))
26625 {
26626 int offset = info->fp_save_offset;
26627 if (info->push_p)
26628 offset += info->total_size;
26629
26630 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26631 {
26632 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26633 bitmap_set_bit (components, regno);
26634
26635 offset += fp_reg_size;
26636 }
26637 }
26638
26639 /* Optimize LR save and restore if we can. This is component 0. Any
26640 out-of-line register save/restore routines need LR. */
26641 if (info->lr_save_p
26642 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26643 && (info->savres_strategy & SAVE_INLINE_GPRS)
26644 && (info->savres_strategy & REST_INLINE_GPRS)
26645 && (info->savres_strategy & SAVE_INLINE_FPRS)
26646 && (info->savres_strategy & REST_INLINE_FPRS)
26647 && (info->savres_strategy & SAVE_INLINE_VRS)
26648 && (info->savres_strategy & REST_INLINE_VRS))
26649 {
26650 int offset = info->lr_save_offset;
26651 if (info->push_p)
26652 offset += info->total_size;
26653 if (IN_RANGE (offset, -0x8000, 0x7fff))
26654 bitmap_set_bit (components, 0);
26655 }
26656
26657 /* Optimize saving the TOC. This is component 2. */
26658 if (cfun->machine->save_toc_in_prologue)
26659 bitmap_set_bit (components, 2);
26660
26661 return components;
26662 }
26663
26664 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26665 static sbitmap
26666 rs6000_components_for_bb (basic_block bb)
26667 {
26668 rs6000_stack_t *info = rs6000_stack_info ();
26669
26670 bitmap in = DF_LIVE_IN (bb);
26671 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26672 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26673
26674 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26675 bitmap_clear (components);
26676
26677 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26678
26679 /* GPRs. */
26680 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26681 if (bitmap_bit_p (in, regno)
26682 || bitmap_bit_p (gen, regno)
26683 || bitmap_bit_p (kill, regno))
26684 bitmap_set_bit (components, regno);
26685
26686 /* FPRs. */
26687 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26688 if (bitmap_bit_p (in, regno)
26689 || bitmap_bit_p (gen, regno)
26690 || bitmap_bit_p (kill, regno))
26691 bitmap_set_bit (components, regno);
26692
26693 /* The link register. */
26694 if (bitmap_bit_p (in, LR_REGNO)
26695 || bitmap_bit_p (gen, LR_REGNO)
26696 || bitmap_bit_p (kill, LR_REGNO))
26697 bitmap_set_bit (components, 0);
26698
26699 /* The TOC save. */
26700 if (bitmap_bit_p (in, TOC_REGNUM)
26701 || bitmap_bit_p (gen, TOC_REGNUM)
26702 || bitmap_bit_p (kill, TOC_REGNUM))
26703 bitmap_set_bit (components, 2);
26704
26705 return components;
26706 }
26707
26708 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26709 static void
26710 rs6000_disqualify_components (sbitmap components, edge e,
26711 sbitmap edge_components, bool /*is_prologue*/)
26712 {
26713 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26714 live where we want to place that code. */
26715 if (bitmap_bit_p (edge_components, 0)
26716 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26717 {
26718 if (dump_file)
26719 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26720 "on entry to bb %d\n", e->dest->index);
26721 bitmap_clear_bit (components, 0);
26722 }
26723 }
26724
26725 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26726 static void
26727 rs6000_emit_prologue_components (sbitmap components)
26728 {
26729 rs6000_stack_t *info = rs6000_stack_info ();
26730 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26731 ? HARD_FRAME_POINTER_REGNUM
26732 : STACK_POINTER_REGNUM);
26733
26734 machine_mode reg_mode = Pmode;
26735 int reg_size = TARGET_32BIT ? 4 : 8;
26736 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26737 int fp_reg_size = 8;
26738
26739 /* Prologue for LR. */
26740 if (bitmap_bit_p (components, 0))
26741 {
26742 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26743 rtx reg = gen_rtx_REG (reg_mode, 0);
26744 rtx_insn *insn = emit_move_insn (reg, lr);
26745 RTX_FRAME_RELATED_P (insn) = 1;
26746 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26747
26748 int offset = info->lr_save_offset;
26749 if (info->push_p)
26750 offset += info->total_size;
26751
26752 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26753 RTX_FRAME_RELATED_P (insn) = 1;
26754 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26755 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26756 }
26757
26758 /* Prologue for TOC. */
26759 if (bitmap_bit_p (components, 2))
26760 {
26761 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26762 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26763 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26764 }
26765
26766 /* Prologue for the GPRs. */
26767 int offset = info->gp_save_offset;
26768 if (info->push_p)
26769 offset += info->total_size;
26770
26771 for (int i = info->first_gp_reg_save; i < 32; i++)
26772 {
26773 if (bitmap_bit_p (components, i))
26774 {
26775 rtx reg = gen_rtx_REG (reg_mode, i);
26776 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26777 RTX_FRAME_RELATED_P (insn) = 1;
26778 rtx set = copy_rtx (single_set (insn));
26779 add_reg_note (insn, REG_CFA_OFFSET, set);
26780 }
26781
26782 offset += reg_size;
26783 }
26784
26785 /* Prologue for the FPRs. */
26786 offset = info->fp_save_offset;
26787 if (info->push_p)
26788 offset += info->total_size;
26789
26790 for (int i = info->first_fp_reg_save; i < 64; i++)
26791 {
26792 if (bitmap_bit_p (components, i))
26793 {
26794 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26795 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26796 RTX_FRAME_RELATED_P (insn) = 1;
26797 rtx set = copy_rtx (single_set (insn));
26798 add_reg_note (insn, REG_CFA_OFFSET, set);
26799 }
26800
26801 offset += fp_reg_size;
26802 }
26803 }
26804
26805 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26806 static void
26807 rs6000_emit_epilogue_components (sbitmap components)
26808 {
26809 rs6000_stack_t *info = rs6000_stack_info ();
26810 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26811 ? HARD_FRAME_POINTER_REGNUM
26812 : STACK_POINTER_REGNUM);
26813
26814 machine_mode reg_mode = Pmode;
26815 int reg_size = TARGET_32BIT ? 4 : 8;
26816
26817 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26818 int fp_reg_size = 8;
26819
26820 /* Epilogue for the FPRs. */
26821 int offset = info->fp_save_offset;
26822 if (info->push_p)
26823 offset += info->total_size;
26824
26825 for (int i = info->first_fp_reg_save; i < 64; i++)
26826 {
26827 if (bitmap_bit_p (components, i))
26828 {
26829 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26830 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26831 RTX_FRAME_RELATED_P (insn) = 1;
26832 add_reg_note (insn, REG_CFA_RESTORE, reg);
26833 }
26834
26835 offset += fp_reg_size;
26836 }
26837
26838 /* Epilogue for the GPRs. */
26839 offset = info->gp_save_offset;
26840 if (info->push_p)
26841 offset += info->total_size;
26842
26843 for (int i = info->first_gp_reg_save; i < 32; i++)
26844 {
26845 if (bitmap_bit_p (components, i))
26846 {
26847 rtx reg = gen_rtx_REG (reg_mode, i);
26848 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26849 RTX_FRAME_RELATED_P (insn) = 1;
26850 add_reg_note (insn, REG_CFA_RESTORE, reg);
26851 }
26852
26853 offset += reg_size;
26854 }
26855
26856 /* Epilogue for LR. */
26857 if (bitmap_bit_p (components, 0))
26858 {
26859 int offset = info->lr_save_offset;
26860 if (info->push_p)
26861 offset += info->total_size;
26862
26863 rtx reg = gen_rtx_REG (reg_mode, 0);
26864 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26865
26866 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26867 insn = emit_move_insn (lr, reg);
26868 RTX_FRAME_RELATED_P (insn) = 1;
26869 add_reg_note (insn, REG_CFA_RESTORE, lr);
26870 }
26871 }
26872
26873 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26874 static void
26875 rs6000_set_handled_components (sbitmap components)
26876 {
26877 rs6000_stack_t *info = rs6000_stack_info ();
26878
26879 for (int i = info->first_gp_reg_save; i < 32; i++)
26880 if (bitmap_bit_p (components, i))
26881 cfun->machine->gpr_is_wrapped_separately[i] = true;
26882
26883 for (int i = info->first_fp_reg_save; i < 64; i++)
26884 if (bitmap_bit_p (components, i))
26885 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26886
26887 if (bitmap_bit_p (components, 0))
26888 cfun->machine->lr_is_wrapped_separately = true;
26889
26890 if (bitmap_bit_p (components, 2))
26891 cfun->machine->toc_is_wrapped_separately = true;
26892 }
26893
26894 /* VRSAVE is a bit vector representing which AltiVec registers
26895 are used. The OS uses this to determine which vector
26896 registers to save on a context switch. We need to save
26897 VRSAVE on the stack frame, add whatever AltiVec registers we
26898 used in this function, and do the corresponding magic in the
26899 epilogue. */
26900 static void
26901 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26902 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26903 {
26904 /* Get VRSAVE into a GPR. */
26905 rtx reg = gen_rtx_REG (SImode, save_regno);
26906 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26907 if (TARGET_MACHO)
26908 emit_insn (gen_get_vrsave_internal (reg));
26909 else
26910 emit_insn (gen_rtx_SET (reg, vrsave));
26911
26912 /* Save VRSAVE. */
26913 int offset = info->vrsave_save_offset + frame_off;
26914 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26915
26916 /* Include the registers in the mask. */
26917 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26918
26919 emit_insn (generate_set_vrsave (reg, info, 0));
26920 }
26921
26922 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26923 called, it left the arg pointer to the old stack in r29. Otherwise, the
26924 arg pointer is the top of the current frame. */
26925 static void
26926 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26927 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26928 {
26929 cfun->machine->split_stack_argp_used = true;
26930
26931 if (sp_adjust)
26932 {
26933 rtx r12 = gen_rtx_REG (Pmode, 12);
26934 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26935 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26936 emit_insn_before (set_r12, sp_adjust);
26937 }
26938 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26939 {
26940 rtx r12 = gen_rtx_REG (Pmode, 12);
26941 if (frame_off == 0)
26942 emit_move_insn (r12, frame_reg_rtx);
26943 else
26944 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26945 }
26946
26947 if (info->push_p)
26948 {
26949 rtx r12 = gen_rtx_REG (Pmode, 12);
26950 rtx r29 = gen_rtx_REG (Pmode, 29);
26951 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26952 rtx not_more = gen_label_rtx ();
26953 rtx jump;
26954
26955 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26956 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26957 gen_rtx_LABEL_REF (VOIDmode, not_more),
26958 pc_rtx);
26959 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26960 JUMP_LABEL (jump) = not_more;
26961 LABEL_NUSES (not_more) += 1;
26962 emit_move_insn (r12, r29);
26963 emit_label (not_more);
26964 }
26965 }
26966
26967 /* Emit function prologue as insns. */
26968
26969 void
26970 rs6000_emit_prologue (void)
26971 {
26972 rs6000_stack_t *info = rs6000_stack_info ();
26973 machine_mode reg_mode = Pmode;
26974 int reg_size = TARGET_32BIT ? 4 : 8;
26975 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26976 int fp_reg_size = 8;
26977 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26978 rtx frame_reg_rtx = sp_reg_rtx;
26979 unsigned int cr_save_regno;
26980 rtx cr_save_rtx = NULL_RTX;
26981 rtx_insn *insn;
26982 int strategy;
26983 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26984 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26985 && call_used_regs[STATIC_CHAIN_REGNUM]);
26986 int using_split_stack = (flag_split_stack
26987 && (lookup_attribute ("no_split_stack",
26988 DECL_ATTRIBUTES (cfun->decl))
26989 == NULL));
26990
26991 /* Offset to top of frame for frame_reg and sp respectively. */
26992 HOST_WIDE_INT frame_off = 0;
26993 HOST_WIDE_INT sp_off = 0;
26994 /* sp_adjust is the stack adjusting instruction, tracked so that the
26995 insn setting up the split-stack arg pointer can be emitted just
26996 prior to it, when r12 is not used here for other purposes. */
26997 rtx_insn *sp_adjust = 0;
26998
26999 #if CHECKING_P
27000 /* Track and check usage of r0, r11, r12. */
27001 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
27002 #define START_USE(R) do \
27003 { \
27004 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27005 reg_inuse |= 1 << (R); \
27006 } while (0)
27007 #define END_USE(R) do \
27008 { \
27009 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
27010 reg_inuse &= ~(1 << (R)); \
27011 } while (0)
27012 #define NOT_INUSE(R) do \
27013 { \
27014 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
27015 } while (0)
27016 #else
27017 #define START_USE(R) do {} while (0)
27018 #define END_USE(R) do {} while (0)
27019 #define NOT_INUSE(R) do {} while (0)
27020 #endif
27021
27022 if (DEFAULT_ABI == ABI_ELFv2
27023 && !TARGET_SINGLE_PIC_BASE)
27024 {
27025 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
27026
27027 /* With -mminimal-toc we may generate an extra use of r2 below. */
27028 if (TARGET_TOC && TARGET_MINIMAL_TOC
27029 && !constant_pool_empty_p ())
27030 cfun->machine->r2_setup_needed = true;
27031 }
27032
27033
27034 if (flag_stack_usage_info)
27035 current_function_static_stack_size = info->total_size;
27036
27037 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
27038 {
27039 HOST_WIDE_INT size = info->total_size;
27040
27041 if (crtl->is_leaf && !cfun->calls_alloca)
27042 {
27043 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
27044 rs6000_emit_probe_stack_range (get_stack_check_protect (),
27045 size - get_stack_check_protect ());
27046 }
27047 else if (size > 0)
27048 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
27049 }
27050
27051 if (TARGET_FIX_AND_CONTINUE)
27052 {
27053 /* gdb on darwin arranges to forward a function from the old
27054 address by modifying the first 5 instructions of the function
27055 to branch to the overriding function. This is necessary to
27056 permit function pointers that point to the old function to
27057 actually forward to the new function. */
27058 emit_insn (gen_nop ());
27059 emit_insn (gen_nop ());
27060 emit_insn (gen_nop ());
27061 emit_insn (gen_nop ());
27062 emit_insn (gen_nop ());
27063 }
27064
27065 /* Handle world saves specially here. */
27066 if (WORLD_SAVE_P (info))
27067 {
27068 int i, j, sz;
27069 rtx treg;
27070 rtvec p;
27071 rtx reg0;
27072
27073 /* save_world expects lr in r0. */
27074 reg0 = gen_rtx_REG (Pmode, 0);
27075 if (info->lr_save_p)
27076 {
27077 insn = emit_move_insn (reg0,
27078 gen_rtx_REG (Pmode, LR_REGNO));
27079 RTX_FRAME_RELATED_P (insn) = 1;
27080 }
27081
27082 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
27083 assumptions about the offsets of various bits of the stack
27084 frame. */
27085 gcc_assert (info->gp_save_offset == -220
27086 && info->fp_save_offset == -144
27087 && info->lr_save_offset == 8
27088 && info->cr_save_offset == 4
27089 && info->push_p
27090 && info->lr_save_p
27091 && (!crtl->calls_eh_return
27092 || info->ehrd_offset == -432)
27093 && info->vrsave_save_offset == -224
27094 && info->altivec_save_offset == -416);
27095
27096 treg = gen_rtx_REG (SImode, 11);
27097 emit_move_insn (treg, GEN_INT (-info->total_size));
27098
27099 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
27100 in R11. It also clobbers R12, so beware! */
27101
27102 /* Preserve CR2 for save_world prologues */
27103 sz = 5;
27104 sz += 32 - info->first_gp_reg_save;
27105 sz += 64 - info->first_fp_reg_save;
27106 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
27107 p = rtvec_alloc (sz);
27108 j = 0;
27109 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, LR_REGNO);
27110 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
27111 gen_rtx_SYMBOL_REF (Pmode,
27112 "*save_world"));
27113 /* We do floats first so that the instruction pattern matches
27114 properly. */
27115 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
27116 RTVEC_ELT (p, j++)
27117 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
27118 info->first_fp_reg_save + i),
27119 frame_reg_rtx,
27120 info->fp_save_offset + frame_off + 8 * i);
27121 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27122 RTVEC_ELT (p, j++)
27123 = gen_frame_store (gen_rtx_REG (V4SImode,
27124 info->first_altivec_reg_save + i),
27125 frame_reg_rtx,
27126 info->altivec_save_offset + frame_off + 16 * i);
27127 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27128 RTVEC_ELT (p, j++)
27129 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27130 frame_reg_rtx,
27131 info->gp_save_offset + frame_off + reg_size * i);
27132
27133 /* CR register traditionally saved as CR2. */
27134 RTVEC_ELT (p, j++)
27135 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
27136 frame_reg_rtx, info->cr_save_offset + frame_off);
27137 /* Explain about use of R0. */
27138 if (info->lr_save_p)
27139 RTVEC_ELT (p, j++)
27140 = gen_frame_store (reg0,
27141 frame_reg_rtx, info->lr_save_offset + frame_off);
27142 /* Explain what happens to the stack pointer. */
27143 {
27144 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
27145 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
27146 }
27147
27148 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27149 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27150 treg, GEN_INT (-info->total_size));
27151 sp_off = frame_off = info->total_size;
27152 }
27153
27154 strategy = info->savres_strategy;
27155
27156 /* For V.4, update stack before we do any saving and set back pointer. */
27157 if (! WORLD_SAVE_P (info)
27158 && info->push_p
27159 && (DEFAULT_ABI == ABI_V4
27160 || crtl->calls_eh_return))
27161 {
27162 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
27163 || !(strategy & SAVE_INLINE_GPRS)
27164 || !(strategy & SAVE_INLINE_VRS));
27165 int ptr_regno = -1;
27166 rtx ptr_reg = NULL_RTX;
27167 int ptr_off = 0;
27168
27169 if (info->total_size < 32767)
27170 frame_off = info->total_size;
27171 else if (need_r11)
27172 ptr_regno = 11;
27173 else if (info->cr_save_p
27174 || info->lr_save_p
27175 || info->first_fp_reg_save < 64
27176 || info->first_gp_reg_save < 32
27177 || info->altivec_size != 0
27178 || info->vrsave_size != 0
27179 || crtl->calls_eh_return)
27180 ptr_regno = 12;
27181 else
27182 {
27183 /* The prologue won't be saving any regs so there is no need
27184 to set up a frame register to access any frame save area.
27185 We also won't be using frame_off anywhere below, but set
27186 the correct value anyway to protect against future
27187 changes to this function. */
27188 frame_off = info->total_size;
27189 }
27190 if (ptr_regno != -1)
27191 {
27192 /* Set up the frame offset to that needed by the first
27193 out-of-line save function. */
27194 START_USE (ptr_regno);
27195 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27196 frame_reg_rtx = ptr_reg;
27197 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
27198 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
27199 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
27200 ptr_off = info->gp_save_offset + info->gp_size;
27201 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
27202 ptr_off = info->altivec_save_offset + info->altivec_size;
27203 frame_off = -ptr_off;
27204 }
27205 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27206 ptr_reg, ptr_off);
27207 if (REGNO (frame_reg_rtx) == 12)
27208 sp_adjust = 0;
27209 sp_off = info->total_size;
27210 if (frame_reg_rtx != sp_reg_rtx)
27211 rs6000_emit_stack_tie (frame_reg_rtx, false);
27212 }
27213
27214 /* If we use the link register, get it into r0. */
27215 if (!WORLD_SAVE_P (info) && info->lr_save_p
27216 && !cfun->machine->lr_is_wrapped_separately)
27217 {
27218 rtx addr, reg, mem;
27219
27220 reg = gen_rtx_REG (Pmode, 0);
27221 START_USE (0);
27222 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
27223 RTX_FRAME_RELATED_P (insn) = 1;
27224
27225 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
27226 | SAVE_NOINLINE_FPRS_SAVES_LR)))
27227 {
27228 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27229 GEN_INT (info->lr_save_offset + frame_off));
27230 mem = gen_rtx_MEM (Pmode, addr);
27231 /* This should not be of rs6000_sr_alias_set, because of
27232 __builtin_return_address. */
27233
27234 insn = emit_move_insn (mem, reg);
27235 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27236 NULL_RTX, NULL_RTX);
27237 END_USE (0);
27238 }
27239 }
27240
27241 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27242 r12 will be needed by out-of-line gpr save. */
27243 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27244 && !(strategy & (SAVE_INLINE_GPRS
27245 | SAVE_NOINLINE_GPRS_SAVES_LR))
27246 ? 11 : 12);
27247 if (!WORLD_SAVE_P (info)
27248 && info->cr_save_p
27249 && REGNO (frame_reg_rtx) != cr_save_regno
27250 && !(using_static_chain_p && cr_save_regno == 11)
27251 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
27252 {
27253 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
27254 START_USE (cr_save_regno);
27255 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27256 }
27257
27258 /* Do any required saving of fpr's. If only one or two to save, do
27259 it ourselves. Otherwise, call function. */
27260 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
27261 {
27262 int offset = info->fp_save_offset + frame_off;
27263 for (int i = info->first_fp_reg_save; i < 64; i++)
27264 {
27265 if (save_reg_p (i)
27266 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
27267 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
27268 sp_off - frame_off);
27269
27270 offset += fp_reg_size;
27271 }
27272 }
27273 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
27274 {
27275 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27276 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27277 unsigned ptr_regno = ptr_regno_for_savres (sel);
27278 rtx ptr_reg = frame_reg_rtx;
27279
27280 if (REGNO (frame_reg_rtx) == ptr_regno)
27281 gcc_checking_assert (frame_off == 0);
27282 else
27283 {
27284 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27285 NOT_INUSE (ptr_regno);
27286 emit_insn (gen_add3_insn (ptr_reg,
27287 frame_reg_rtx, GEN_INT (frame_off)));
27288 }
27289 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27290 info->fp_save_offset,
27291 info->lr_save_offset,
27292 DFmode, sel);
27293 rs6000_frame_related (insn, ptr_reg, sp_off,
27294 NULL_RTX, NULL_RTX);
27295 if (lr)
27296 END_USE (0);
27297 }
27298
27299 /* Save GPRs. This is done as a PARALLEL if we are using
27300 the store-multiple instructions. */
27301 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27302 {
27303 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27304 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27305 unsigned ptr_regno = ptr_regno_for_savres (sel);
27306 rtx ptr_reg = frame_reg_rtx;
27307 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27308 int end_save = info->gp_save_offset + info->gp_size;
27309 int ptr_off;
27310
27311 if (ptr_regno == 12)
27312 sp_adjust = 0;
27313 if (!ptr_set_up)
27314 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27315
27316 /* Need to adjust r11 (r12) if we saved any FPRs. */
27317 if (end_save + frame_off != 0)
27318 {
27319 rtx offset = GEN_INT (end_save + frame_off);
27320
27321 if (ptr_set_up)
27322 frame_off = -end_save;
27323 else
27324 NOT_INUSE (ptr_regno);
27325 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27326 }
27327 else if (!ptr_set_up)
27328 {
27329 NOT_INUSE (ptr_regno);
27330 emit_move_insn (ptr_reg, frame_reg_rtx);
27331 }
27332 ptr_off = -end_save;
27333 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27334 info->gp_save_offset + ptr_off,
27335 info->lr_save_offset + ptr_off,
27336 reg_mode, sel);
27337 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27338 NULL_RTX, NULL_RTX);
27339 if (lr)
27340 END_USE (0);
27341 }
27342 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27343 {
27344 rtvec p;
27345 int i;
27346 p = rtvec_alloc (32 - info->first_gp_reg_save);
27347 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27348 RTVEC_ELT (p, i)
27349 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27350 frame_reg_rtx,
27351 info->gp_save_offset + frame_off + reg_size * i);
27352 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27353 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27354 NULL_RTX, NULL_RTX);
27355 }
27356 else if (!WORLD_SAVE_P (info))
27357 {
27358 int offset = info->gp_save_offset + frame_off;
27359 for (int i = info->first_gp_reg_save; i < 32; i++)
27360 {
27361 if (save_reg_p (i)
27362 && !cfun->machine->gpr_is_wrapped_separately[i])
27363 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27364 sp_off - frame_off);
27365
27366 offset += reg_size;
27367 }
27368 }
27369
27370 if (crtl->calls_eh_return)
27371 {
27372 unsigned int i;
27373 rtvec p;
27374
27375 for (i = 0; ; ++i)
27376 {
27377 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27378 if (regno == INVALID_REGNUM)
27379 break;
27380 }
27381
27382 p = rtvec_alloc (i);
27383
27384 for (i = 0; ; ++i)
27385 {
27386 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27387 if (regno == INVALID_REGNUM)
27388 break;
27389
27390 rtx set
27391 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27392 sp_reg_rtx,
27393 info->ehrd_offset + sp_off + reg_size * (int) i);
27394 RTVEC_ELT (p, i) = set;
27395 RTX_FRAME_RELATED_P (set) = 1;
27396 }
27397
27398 insn = emit_insn (gen_blockage ());
27399 RTX_FRAME_RELATED_P (insn) = 1;
27400 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27401 }
27402
27403 /* In AIX ABI we need to make sure r2 is really saved. */
27404 if (TARGET_AIX && crtl->calls_eh_return)
27405 {
27406 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27407 rtx join_insn, note;
27408 rtx_insn *save_insn;
27409 long toc_restore_insn;
27410
27411 tmp_reg = gen_rtx_REG (Pmode, 11);
27412 tmp_reg_si = gen_rtx_REG (SImode, 11);
27413 if (using_static_chain_p)
27414 {
27415 START_USE (0);
27416 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27417 }
27418 else
27419 START_USE (11);
27420 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27421 /* Peek at instruction to which this function returns. If it's
27422 restoring r2, then we know we've already saved r2. We can't
27423 unconditionally save r2 because the value we have will already
27424 be updated if we arrived at this function via a plt call or
27425 toc adjusting stub. */
27426 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27427 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27428 + RS6000_TOC_SAVE_SLOT);
27429 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27430 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27431 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27432 validate_condition_mode (EQ, CCUNSmode);
27433 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27434 emit_insn (gen_rtx_SET (compare_result,
27435 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27436 toc_save_done = gen_label_rtx ();
27437 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27438 gen_rtx_EQ (VOIDmode, compare_result,
27439 const0_rtx),
27440 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27441 pc_rtx);
27442 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27443 JUMP_LABEL (jump) = toc_save_done;
27444 LABEL_NUSES (toc_save_done) += 1;
27445
27446 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27447 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27448 sp_off - frame_off);
27449
27450 emit_label (toc_save_done);
27451
27452 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27453 have a CFG that has different saves along different paths.
27454 Move the note to a dummy blockage insn, which describes that
27455 R2 is unconditionally saved after the label. */
27456 /* ??? An alternate representation might be a special insn pattern
27457 containing both the branch and the store. That might let the
27458 code that minimizes the number of DW_CFA_advance opcodes better
27459 freedom in placing the annotations. */
27460 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27461 if (note)
27462 remove_note (save_insn, note);
27463 else
27464 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27465 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27466 RTX_FRAME_RELATED_P (save_insn) = 0;
27467
27468 join_insn = emit_insn (gen_blockage ());
27469 REG_NOTES (join_insn) = note;
27470 RTX_FRAME_RELATED_P (join_insn) = 1;
27471
27472 if (using_static_chain_p)
27473 {
27474 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27475 END_USE (0);
27476 }
27477 else
27478 END_USE (11);
27479 }
27480
27481 /* Save CR if we use any that must be preserved. */
27482 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27483 {
27484 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27485 GEN_INT (info->cr_save_offset + frame_off));
27486 rtx mem = gen_frame_mem (SImode, addr);
27487
27488 /* If we didn't copy cr before, do so now using r0. */
27489 if (cr_save_rtx == NULL_RTX)
27490 {
27491 START_USE (0);
27492 cr_save_rtx = gen_rtx_REG (SImode, 0);
27493 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27494 }
27495
27496 /* Saving CR requires a two-instruction sequence: one instruction
27497 to move the CR to a general-purpose register, and a second
27498 instruction that stores the GPR to memory.
27499
27500 We do not emit any DWARF CFI records for the first of these,
27501 because we cannot properly represent the fact that CR is saved in
27502 a register. One reason is that we cannot express that multiple
27503 CR fields are saved; another reason is that on 64-bit, the size
27504 of the CR register in DWARF (4 bytes) differs from the size of
27505 a general-purpose register.
27506
27507 This means if any intervening instruction were to clobber one of
27508 the call-saved CR fields, we'd have incorrect CFI. To prevent
27509 this from happening, we mark the store to memory as a use of
27510 those CR fields, which prevents any such instruction from being
27511 scheduled in between the two instructions. */
27512 rtx crsave_v[9];
27513 int n_crsave = 0;
27514 int i;
27515
27516 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27517 for (i = 0; i < 8; i++)
27518 if (save_reg_p (CR0_REGNO + i))
27519 crsave_v[n_crsave++]
27520 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27521
27522 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27523 gen_rtvec_v (n_crsave, crsave_v)));
27524 END_USE (REGNO (cr_save_rtx));
27525
27526 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27527 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27528 so we need to construct a frame expression manually. */
27529 RTX_FRAME_RELATED_P (insn) = 1;
27530
27531 /* Update address to be stack-pointer relative, like
27532 rs6000_frame_related would do. */
27533 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27534 GEN_INT (info->cr_save_offset + sp_off));
27535 mem = gen_frame_mem (SImode, addr);
27536
27537 if (DEFAULT_ABI == ABI_ELFv2)
27538 {
27539 /* In the ELFv2 ABI we generate separate CFI records for each
27540 CR field that was actually saved. They all point to the
27541 same 32-bit stack slot. */
27542 rtx crframe[8];
27543 int n_crframe = 0;
27544
27545 for (i = 0; i < 8; i++)
27546 if (save_reg_p (CR0_REGNO + i))
27547 {
27548 crframe[n_crframe]
27549 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27550
27551 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27552 n_crframe++;
27553 }
27554
27555 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27556 gen_rtx_PARALLEL (VOIDmode,
27557 gen_rtvec_v (n_crframe, crframe)));
27558 }
27559 else
27560 {
27561 /* In other ABIs, by convention, we use a single CR regnum to
27562 represent the fact that all call-saved CR fields are saved.
27563 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27564 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27565 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27566 }
27567 }
27568
27569 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27570 *separate* slots if the routine calls __builtin_eh_return, so
27571 that they can be independently restored by the unwinder. */
27572 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27573 {
27574 int i, cr_off = info->ehcr_offset;
27575 rtx crsave;
27576
27577 /* ??? We might get better performance by using multiple mfocrf
27578 instructions. */
27579 crsave = gen_rtx_REG (SImode, 0);
27580 emit_insn (gen_prologue_movesi_from_cr (crsave));
27581
27582 for (i = 0; i < 8; i++)
27583 if (!call_used_regs[CR0_REGNO + i])
27584 {
27585 rtvec p = rtvec_alloc (2);
27586 RTVEC_ELT (p, 0)
27587 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27588 RTVEC_ELT (p, 1)
27589 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27590
27591 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27592
27593 RTX_FRAME_RELATED_P (insn) = 1;
27594 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27595 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27596 sp_reg_rtx, cr_off + sp_off));
27597
27598 cr_off += reg_size;
27599 }
27600 }
27601
27602 /* If we are emitting stack probes, but allocate no stack, then
27603 just note that in the dump file. */
27604 if (flag_stack_clash_protection
27605 && dump_file
27606 && !info->push_p)
27607 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27608
27609 /* Update stack and set back pointer unless this is V.4,
27610 for which it was done previously. */
27611 if (!WORLD_SAVE_P (info) && info->push_p
27612 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27613 {
27614 rtx ptr_reg = NULL;
27615 int ptr_off = 0;
27616
27617 /* If saving altivec regs we need to be able to address all save
27618 locations using a 16-bit offset. */
27619 if ((strategy & SAVE_INLINE_VRS) == 0
27620 || (info->altivec_size != 0
27621 && (info->altivec_save_offset + info->altivec_size - 16
27622 + info->total_size - frame_off) > 32767)
27623 || (info->vrsave_size != 0
27624 && (info->vrsave_save_offset
27625 + info->total_size - frame_off) > 32767))
27626 {
27627 int sel = SAVRES_SAVE | SAVRES_VR;
27628 unsigned ptr_regno = ptr_regno_for_savres (sel);
27629
27630 if (using_static_chain_p
27631 && ptr_regno == STATIC_CHAIN_REGNUM)
27632 ptr_regno = 12;
27633 if (REGNO (frame_reg_rtx) != ptr_regno)
27634 START_USE (ptr_regno);
27635 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27636 frame_reg_rtx = ptr_reg;
27637 ptr_off = info->altivec_save_offset + info->altivec_size;
27638 frame_off = -ptr_off;
27639 }
27640 else if (REGNO (frame_reg_rtx) == 1)
27641 frame_off = info->total_size;
27642 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27643 ptr_reg, ptr_off);
27644 if (REGNO (frame_reg_rtx) == 12)
27645 sp_adjust = 0;
27646 sp_off = info->total_size;
27647 if (frame_reg_rtx != sp_reg_rtx)
27648 rs6000_emit_stack_tie (frame_reg_rtx, false);
27649 }
27650
27651 /* Set frame pointer, if needed. */
27652 if (frame_pointer_needed)
27653 {
27654 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27655 sp_reg_rtx);
27656 RTX_FRAME_RELATED_P (insn) = 1;
27657 }
27658
27659 /* Save AltiVec registers if needed. Save here because the red zone does
27660 not always include AltiVec registers. */
27661 if (!WORLD_SAVE_P (info)
27662 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27663 {
27664 int end_save = info->altivec_save_offset + info->altivec_size;
27665 int ptr_off;
27666 /* Oddly, the vector save/restore functions point r0 at the end
27667 of the save area, then use r11 or r12 to load offsets for
27668 [reg+reg] addressing. */
27669 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27670 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27671 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27672
27673 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27674 NOT_INUSE (0);
27675 if (scratch_regno == 12)
27676 sp_adjust = 0;
27677 if (end_save + frame_off != 0)
27678 {
27679 rtx offset = GEN_INT (end_save + frame_off);
27680
27681 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27682 }
27683 else
27684 emit_move_insn (ptr_reg, frame_reg_rtx);
27685
27686 ptr_off = -end_save;
27687 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27688 info->altivec_save_offset + ptr_off,
27689 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27690 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27691 NULL_RTX, NULL_RTX);
27692 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27693 {
27694 /* The oddity mentioned above clobbered our frame reg. */
27695 emit_move_insn (frame_reg_rtx, ptr_reg);
27696 frame_off = ptr_off;
27697 }
27698 }
27699 else if (!WORLD_SAVE_P (info)
27700 && info->altivec_size != 0)
27701 {
27702 int i;
27703
27704 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27705 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27706 {
27707 rtx areg, savereg, mem;
27708 HOST_WIDE_INT offset;
27709
27710 offset = (info->altivec_save_offset + frame_off
27711 + 16 * (i - info->first_altivec_reg_save));
27712
27713 savereg = gen_rtx_REG (V4SImode, i);
27714
27715 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27716 {
27717 mem = gen_frame_mem (V4SImode,
27718 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27719 GEN_INT (offset)));
27720 insn = emit_insn (gen_rtx_SET (mem, savereg));
27721 areg = NULL_RTX;
27722 }
27723 else
27724 {
27725 NOT_INUSE (0);
27726 areg = gen_rtx_REG (Pmode, 0);
27727 emit_move_insn (areg, GEN_INT (offset));
27728
27729 /* AltiVec addressing mode is [reg+reg]. */
27730 mem = gen_frame_mem (V4SImode,
27731 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27732
27733 /* Rather than emitting a generic move, force use of the stvx
27734 instruction, which we always want on ISA 2.07 (power8) systems.
27735 In particular we don't want xxpermdi/stxvd2x for little
27736 endian. */
27737 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27738 }
27739
27740 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27741 areg, GEN_INT (offset));
27742 }
27743 }
27744
27745 /* VRSAVE is a bit vector representing which AltiVec registers
27746 are used. The OS uses this to determine which vector
27747 registers to save on a context switch. We need to save
27748 VRSAVE on the stack frame, add whatever AltiVec registers we
27749 used in this function, and do the corresponding magic in the
27750 epilogue. */
27751
27752 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27753 {
27754 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27755 be using r12 as frame_reg_rtx and r11 as the static chain
27756 pointer for nested functions. */
27757 int save_regno = 12;
27758 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27759 && !using_static_chain_p)
27760 save_regno = 11;
27761 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27762 {
27763 save_regno = 11;
27764 if (using_static_chain_p)
27765 save_regno = 0;
27766 }
27767 NOT_INUSE (save_regno);
27768
27769 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27770 }
27771
27772 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27773 if (!TARGET_SINGLE_PIC_BASE
27774 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27775 && !constant_pool_empty_p ())
27776 || (DEFAULT_ABI == ABI_V4
27777 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27778 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27779 {
27780 /* If emit_load_toc_table will use the link register, we need to save
27781 it. We use R12 for this purpose because emit_load_toc_table
27782 can use register 0. This allows us to use a plain 'blr' to return
27783 from the procedure more often. */
27784 int save_LR_around_toc_setup = (TARGET_ELF
27785 && DEFAULT_ABI == ABI_V4
27786 && flag_pic
27787 && ! info->lr_save_p
27788 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27789 if (save_LR_around_toc_setup)
27790 {
27791 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27792 rtx tmp = gen_rtx_REG (Pmode, 12);
27793
27794 sp_adjust = 0;
27795 insn = emit_move_insn (tmp, lr);
27796 RTX_FRAME_RELATED_P (insn) = 1;
27797
27798 rs6000_emit_load_toc_table (TRUE);
27799
27800 insn = emit_move_insn (lr, tmp);
27801 add_reg_note (insn, REG_CFA_RESTORE, lr);
27802 RTX_FRAME_RELATED_P (insn) = 1;
27803 }
27804 else
27805 rs6000_emit_load_toc_table (TRUE);
27806 }
27807
27808 #if TARGET_MACHO
27809 if (!TARGET_SINGLE_PIC_BASE
27810 && DEFAULT_ABI == ABI_DARWIN
27811 && flag_pic && crtl->uses_pic_offset_table)
27812 {
27813 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27814 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27815
27816 /* Save and restore LR locally around this call (in R0). */
27817 if (!info->lr_save_p)
27818 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27819
27820 emit_insn (gen_load_macho_picbase (src));
27821
27822 emit_move_insn (gen_rtx_REG (Pmode,
27823 RS6000_PIC_OFFSET_TABLE_REGNUM),
27824 lr);
27825
27826 if (!info->lr_save_p)
27827 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27828 }
27829 #endif
27830
27831 /* If we need to, save the TOC register after doing the stack setup.
27832 Do not emit eh frame info for this save. The unwinder wants info,
27833 conceptually attached to instructions in this function, about
27834 register values in the caller of this function. This R2 may have
27835 already been changed from the value in the caller.
27836 We don't attempt to write accurate DWARF EH frame info for R2
27837 because code emitted by gcc for a (non-pointer) function call
27838 doesn't save and restore R2. Instead, R2 is managed out-of-line
27839 by a linker generated plt call stub when the function resides in
27840 a shared library. This behavior is costly to describe in DWARF,
27841 both in terms of the size of DWARF info and the time taken in the
27842 unwinder to interpret it. R2 changes, apart from the
27843 calls_eh_return case earlier in this function, are handled by
27844 linux-unwind.h frob_update_context. */
27845 if (rs6000_save_toc_in_prologue_p ()
27846 && !cfun->machine->toc_is_wrapped_separately)
27847 {
27848 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27849 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27850 }
27851
27852 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27853 if (using_split_stack && split_stack_arg_pointer_used_p ())
27854 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27855 }
27856
27857 /* Output .extern statements for the save/restore routines we use. */
27858
27859 static void
27860 rs6000_output_savres_externs (FILE *file)
27861 {
27862 rs6000_stack_t *info = rs6000_stack_info ();
27863
27864 if (TARGET_DEBUG_STACK)
27865 debug_stack_info (info);
27866
27867 /* Write .extern for any function we will call to save and restore
27868 fp values. */
27869 if (info->first_fp_reg_save < 64
27870 && !TARGET_MACHO
27871 && !TARGET_ELF)
27872 {
27873 char *name;
27874 int regno = info->first_fp_reg_save - 32;
27875
27876 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27877 {
27878 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27879 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27880 name = rs6000_savres_routine_name (regno, sel);
27881 fprintf (file, "\t.extern %s\n", name);
27882 }
27883 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27884 {
27885 bool lr = (info->savres_strategy
27886 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27887 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27888 name = rs6000_savres_routine_name (regno, sel);
27889 fprintf (file, "\t.extern %s\n", name);
27890 }
27891 }
27892 }
27893
27894 /* Write function prologue. */
27895
27896 static void
27897 rs6000_output_function_prologue (FILE *file)
27898 {
27899 if (!cfun->is_thunk)
27900 rs6000_output_savres_externs (file);
27901
27902 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27903 immediately after the global entry point label. */
27904 if (rs6000_global_entry_point_needed_p ())
27905 {
27906 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27907
27908 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27909
27910 if (TARGET_CMODEL != CMODEL_LARGE)
27911 {
27912 /* In the small and medium code models, we assume the TOC is less
27913 2 GB away from the text section, so it can be computed via the
27914 following two-instruction sequence. */
27915 char buf[256];
27916
27917 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27918 fprintf (file, "0:\taddis 2,12,.TOC.-");
27919 assemble_name (file, buf);
27920 fprintf (file, "@ha\n");
27921 fprintf (file, "\taddi 2,2,.TOC.-");
27922 assemble_name (file, buf);
27923 fprintf (file, "@l\n");
27924 }
27925 else
27926 {
27927 /* In the large code model, we allow arbitrary offsets between the
27928 TOC and the text section, so we have to load the offset from
27929 memory. The data field is emitted directly before the global
27930 entry point in rs6000_elf_declare_function_name. */
27931 char buf[256];
27932
27933 #ifdef HAVE_AS_ENTRY_MARKERS
27934 /* If supported by the linker, emit a marker relocation. If the
27935 total code size of the final executable or shared library
27936 happens to fit into 2 GB after all, the linker will replace
27937 this code sequence with the sequence for the small or medium
27938 code model. */
27939 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27940 #endif
27941 fprintf (file, "\tld 2,");
27942 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27943 assemble_name (file, buf);
27944 fprintf (file, "-");
27945 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27946 assemble_name (file, buf);
27947 fprintf (file, "(12)\n");
27948 fprintf (file, "\tadd 2,2,12\n");
27949 }
27950
27951 fputs ("\t.localentry\t", file);
27952 assemble_name (file, name);
27953 fputs (",.-", file);
27954 assemble_name (file, name);
27955 fputs ("\n", file);
27956 }
27957
27958 /* Output -mprofile-kernel code. This needs to be done here instead of
27959 in output_function_profile since it must go after the ELFv2 ABI
27960 local entry point. */
27961 if (TARGET_PROFILE_KERNEL && crtl->profile)
27962 {
27963 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27964 gcc_assert (!TARGET_32BIT);
27965
27966 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27967
27968 /* In the ELFv2 ABI we have no compiler stack word. It must be
27969 the resposibility of _mcount to preserve the static chain
27970 register if required. */
27971 if (DEFAULT_ABI != ABI_ELFv2
27972 && cfun->static_chain_decl != NULL)
27973 {
27974 asm_fprintf (file, "\tstd %s,24(%s)\n",
27975 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27976 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27977 asm_fprintf (file, "\tld %s,24(%s)\n",
27978 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27979 }
27980 else
27981 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27982 }
27983
27984 rs6000_pic_labelno++;
27985 }
27986
27987 /* -mprofile-kernel code calls mcount before the function prolog,
27988 so a profiled leaf function should stay a leaf function. */
27989 static bool
27990 rs6000_keep_leaf_when_profiled ()
27991 {
27992 return TARGET_PROFILE_KERNEL;
27993 }
27994
27995 /* Non-zero if vmx regs are restored before the frame pop, zero if
27996 we restore after the pop when possible. */
27997 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27998
27999 /* Restoring cr is a two step process: loading a reg from the frame
28000 save, then moving the reg to cr. For ABI_V4 we must let the
28001 unwinder know that the stack location is no longer valid at or
28002 before the stack deallocation, but we can't emit a cfa_restore for
28003 cr at the stack deallocation like we do for other registers.
28004 The trouble is that it is possible for the move to cr to be
28005 scheduled after the stack deallocation. So say exactly where cr
28006 is located on each of the two insns. */
28007
28008 static rtx
28009 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
28010 {
28011 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
28012 rtx reg = gen_rtx_REG (SImode, regno);
28013 rtx_insn *insn = emit_move_insn (reg, mem);
28014
28015 if (!exit_func && DEFAULT_ABI == ABI_V4)
28016 {
28017 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28018 rtx set = gen_rtx_SET (reg, cr);
28019
28020 add_reg_note (insn, REG_CFA_REGISTER, set);
28021 RTX_FRAME_RELATED_P (insn) = 1;
28022 }
28023 return reg;
28024 }
28025
28026 /* Reload CR from REG. */
28027
28028 static void
28029 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
28030 {
28031 int count = 0;
28032 int i;
28033
28034 if (using_mfcr_multiple)
28035 {
28036 for (i = 0; i < 8; i++)
28037 if (save_reg_p (CR0_REGNO + i))
28038 count++;
28039 gcc_assert (count);
28040 }
28041
28042 if (using_mfcr_multiple && count > 1)
28043 {
28044 rtx_insn *insn;
28045 rtvec p;
28046 int ndx;
28047
28048 p = rtvec_alloc (count);
28049
28050 ndx = 0;
28051 for (i = 0; i < 8; i++)
28052 if (save_reg_p (CR0_REGNO + i))
28053 {
28054 rtvec r = rtvec_alloc (2);
28055 RTVEC_ELT (r, 0) = reg;
28056 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
28057 RTVEC_ELT (p, ndx) =
28058 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
28059 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
28060 ndx++;
28061 }
28062 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28063 gcc_assert (ndx == count);
28064
28065 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28066 CR field separately. */
28067 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28068 {
28069 for (i = 0; i < 8; i++)
28070 if (save_reg_p (CR0_REGNO + i))
28071 add_reg_note (insn, REG_CFA_RESTORE,
28072 gen_rtx_REG (SImode, CR0_REGNO + i));
28073
28074 RTX_FRAME_RELATED_P (insn) = 1;
28075 }
28076 }
28077 else
28078 for (i = 0; i < 8; i++)
28079 if (save_reg_p (CR0_REGNO + i))
28080 {
28081 rtx insn = emit_insn (gen_movsi_to_cr_one
28082 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28083
28084 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28085 CR field separately, attached to the insn that in fact
28086 restores this particular CR field. */
28087 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28088 {
28089 add_reg_note (insn, REG_CFA_RESTORE,
28090 gen_rtx_REG (SImode, CR0_REGNO + i));
28091
28092 RTX_FRAME_RELATED_P (insn) = 1;
28093 }
28094 }
28095
28096 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
28097 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
28098 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
28099 {
28100 rtx_insn *insn = get_last_insn ();
28101 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28102
28103 add_reg_note (insn, REG_CFA_RESTORE, cr);
28104 RTX_FRAME_RELATED_P (insn) = 1;
28105 }
28106 }
28107
28108 /* Like cr, the move to lr instruction can be scheduled after the
28109 stack deallocation, but unlike cr, its stack frame save is still
28110 valid. So we only need to emit the cfa_restore on the correct
28111 instruction. */
28112
28113 static void
28114 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
28115 {
28116 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
28117 rtx reg = gen_rtx_REG (Pmode, regno);
28118
28119 emit_move_insn (reg, mem);
28120 }
28121
28122 static void
28123 restore_saved_lr (int regno, bool exit_func)
28124 {
28125 rtx reg = gen_rtx_REG (Pmode, regno);
28126 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
28127 rtx_insn *insn = emit_move_insn (lr, reg);
28128
28129 if (!exit_func && flag_shrink_wrap)
28130 {
28131 add_reg_note (insn, REG_CFA_RESTORE, lr);
28132 RTX_FRAME_RELATED_P (insn) = 1;
28133 }
28134 }
28135
28136 static rtx
28137 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
28138 {
28139 if (DEFAULT_ABI == ABI_ELFv2)
28140 {
28141 int i;
28142 for (i = 0; i < 8; i++)
28143 if (save_reg_p (CR0_REGNO + i))
28144 {
28145 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
28146 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
28147 cfa_restores);
28148 }
28149 }
28150 else if (info->cr_save_p)
28151 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28152 gen_rtx_REG (SImode, CR2_REGNO),
28153 cfa_restores);
28154
28155 if (info->lr_save_p)
28156 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28157 gen_rtx_REG (Pmode, LR_REGNO),
28158 cfa_restores);
28159 return cfa_restores;
28160 }
28161
28162 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28163 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28164 below stack pointer not cloberred by signals. */
28165
28166 static inline bool
28167 offset_below_red_zone_p (HOST_WIDE_INT offset)
28168 {
28169 return offset < (DEFAULT_ABI == ABI_V4
28170 ? 0
28171 : TARGET_32BIT ? -220 : -288);
28172 }
28173
28174 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28175
28176 static void
28177 emit_cfa_restores (rtx cfa_restores)
28178 {
28179 rtx_insn *insn = get_last_insn ();
28180 rtx *loc = &REG_NOTES (insn);
28181
28182 while (*loc)
28183 loc = &XEXP (*loc, 1);
28184 *loc = cfa_restores;
28185 RTX_FRAME_RELATED_P (insn) = 1;
28186 }
28187
28188 /* Emit function epilogue as insns. */
28189
28190 void
28191 rs6000_emit_epilogue (int sibcall)
28192 {
28193 rs6000_stack_t *info;
28194 int restoring_GPRs_inline;
28195 int restoring_FPRs_inline;
28196 int using_load_multiple;
28197 int using_mtcr_multiple;
28198 int use_backchain_to_restore_sp;
28199 int restore_lr;
28200 int strategy;
28201 HOST_WIDE_INT frame_off = 0;
28202 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
28203 rtx frame_reg_rtx = sp_reg_rtx;
28204 rtx cfa_restores = NULL_RTX;
28205 rtx insn;
28206 rtx cr_save_reg = NULL_RTX;
28207 machine_mode reg_mode = Pmode;
28208 int reg_size = TARGET_32BIT ? 4 : 8;
28209 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
28210 int fp_reg_size = 8;
28211 int i;
28212 bool exit_func;
28213 unsigned ptr_regno;
28214
28215 info = rs6000_stack_info ();
28216
28217 strategy = info->savres_strategy;
28218 using_load_multiple = strategy & REST_MULTIPLE;
28219 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
28220 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
28221 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
28222 || rs6000_tune == PROCESSOR_PPC603
28223 || rs6000_tune == PROCESSOR_PPC750
28224 || optimize_size);
28225 /* Restore via the backchain when we have a large frame, since this
28226 is more efficient than an addis, addi pair. The second condition
28227 here will not trigger at the moment; We don't actually need a
28228 frame pointer for alloca, but the generic parts of the compiler
28229 give us one anyway. */
28230 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
28231 ? info->lr_save_offset
28232 : 0) > 32767
28233 || (cfun->calls_alloca
28234 && !frame_pointer_needed));
28235 restore_lr = (info->lr_save_p
28236 && (restoring_FPRs_inline
28237 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
28238 && (restoring_GPRs_inline
28239 || info->first_fp_reg_save < 64)
28240 && !cfun->machine->lr_is_wrapped_separately);
28241
28242
28243 if (WORLD_SAVE_P (info))
28244 {
28245 int i, j;
28246 char rname[30];
28247 const char *alloc_rname;
28248 rtvec p;
28249
28250 /* eh_rest_world_r10 will return to the location saved in the LR
28251 stack slot (which is not likely to be our caller.)
28252 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28253 rest_world is similar, except any R10 parameter is ignored.
28254 The exception-handling stuff that was here in 2.95 is no
28255 longer necessary. */
28256
28257 p = rtvec_alloc (9
28258 + 32 - info->first_gp_reg_save
28259 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
28260 + 63 + 1 - info->first_fp_reg_save);
28261
28262 strcpy (rname, ((crtl->calls_eh_return) ?
28263 "*eh_rest_world_r10" : "*rest_world"));
28264 alloc_rname = ggc_strdup (rname);
28265
28266 j = 0;
28267 RTVEC_ELT (p, j++) = ret_rtx;
28268 RTVEC_ELT (p, j++)
28269 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
28270 /* The instruction pattern requires a clobber here;
28271 it is shared with the restVEC helper. */
28272 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 11);
28273
28274 {
28275 /* CR register traditionally saved as CR2. */
28276 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
28277 RTVEC_ELT (p, j++)
28278 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28279 if (flag_shrink_wrap)
28280 {
28281 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28282 gen_rtx_REG (Pmode, LR_REGNO),
28283 cfa_restores);
28284 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28285 }
28286 }
28287
28288 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28289 {
28290 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28291 RTVEC_ELT (p, j++)
28292 = gen_frame_load (reg,
28293 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28294 if (flag_shrink_wrap
28295 && save_reg_p (info->first_gp_reg_save + i))
28296 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28297 }
28298 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28299 {
28300 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28301 RTVEC_ELT (p, j++)
28302 = gen_frame_load (reg,
28303 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28304 if (flag_shrink_wrap
28305 && save_reg_p (info->first_altivec_reg_save + i))
28306 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28307 }
28308 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28309 {
28310 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
28311 info->first_fp_reg_save + i);
28312 RTVEC_ELT (p, j++)
28313 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28314 if (flag_shrink_wrap
28315 && save_reg_p (info->first_fp_reg_save + i))
28316 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28317 }
28318 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 0);
28319 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 12);
28320 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 7);
28321 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 8);
28322 RTVEC_ELT (p, j++)
28323 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28324 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28325
28326 if (flag_shrink_wrap)
28327 {
28328 REG_NOTES (insn) = cfa_restores;
28329 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28330 RTX_FRAME_RELATED_P (insn) = 1;
28331 }
28332 return;
28333 }
28334
28335 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28336 if (info->push_p)
28337 frame_off = info->total_size;
28338
28339 /* Restore AltiVec registers if we must do so before adjusting the
28340 stack. */
28341 if (info->altivec_size != 0
28342 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28343 || (DEFAULT_ABI != ABI_V4
28344 && offset_below_red_zone_p (info->altivec_save_offset))))
28345 {
28346 int i;
28347 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28348
28349 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28350 if (use_backchain_to_restore_sp)
28351 {
28352 int frame_regno = 11;
28353
28354 if ((strategy & REST_INLINE_VRS) == 0)
28355 {
28356 /* Of r11 and r12, select the one not clobbered by an
28357 out-of-line restore function for the frame register. */
28358 frame_regno = 11 + 12 - scratch_regno;
28359 }
28360 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28361 emit_move_insn (frame_reg_rtx,
28362 gen_rtx_MEM (Pmode, sp_reg_rtx));
28363 frame_off = 0;
28364 }
28365 else if (frame_pointer_needed)
28366 frame_reg_rtx = hard_frame_pointer_rtx;
28367
28368 if ((strategy & REST_INLINE_VRS) == 0)
28369 {
28370 int end_save = info->altivec_save_offset + info->altivec_size;
28371 int ptr_off;
28372 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28373 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28374
28375 if (end_save + frame_off != 0)
28376 {
28377 rtx offset = GEN_INT (end_save + frame_off);
28378
28379 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28380 }
28381 else
28382 emit_move_insn (ptr_reg, frame_reg_rtx);
28383
28384 ptr_off = -end_save;
28385 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28386 info->altivec_save_offset + ptr_off,
28387 0, V4SImode, SAVRES_VR);
28388 }
28389 else
28390 {
28391 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28392 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28393 {
28394 rtx addr, areg, mem, insn;
28395 rtx reg = gen_rtx_REG (V4SImode, i);
28396 HOST_WIDE_INT offset
28397 = (info->altivec_save_offset + frame_off
28398 + 16 * (i - info->first_altivec_reg_save));
28399
28400 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28401 {
28402 mem = gen_frame_mem (V4SImode,
28403 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28404 GEN_INT (offset)));
28405 insn = gen_rtx_SET (reg, mem);
28406 }
28407 else
28408 {
28409 areg = gen_rtx_REG (Pmode, 0);
28410 emit_move_insn (areg, GEN_INT (offset));
28411
28412 /* AltiVec addressing mode is [reg+reg]. */
28413 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28414 mem = gen_frame_mem (V4SImode, addr);
28415
28416 /* Rather than emitting a generic move, force use of the
28417 lvx instruction, which we always want. In particular we
28418 don't want lxvd2x/xxpermdi for little endian. */
28419 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28420 }
28421
28422 (void) emit_insn (insn);
28423 }
28424 }
28425
28426 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28427 if (((strategy & REST_INLINE_VRS) == 0
28428 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28429 && (flag_shrink_wrap
28430 || (offset_below_red_zone_p
28431 (info->altivec_save_offset
28432 + 16 * (i - info->first_altivec_reg_save))))
28433 && save_reg_p (i))
28434 {
28435 rtx reg = gen_rtx_REG (V4SImode, i);
28436 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28437 }
28438 }
28439
28440 /* Restore VRSAVE if we must do so before adjusting the stack. */
28441 if (info->vrsave_size != 0
28442 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28443 || (DEFAULT_ABI != ABI_V4
28444 && offset_below_red_zone_p (info->vrsave_save_offset))))
28445 {
28446 rtx reg;
28447
28448 if (frame_reg_rtx == sp_reg_rtx)
28449 {
28450 if (use_backchain_to_restore_sp)
28451 {
28452 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28453 emit_move_insn (frame_reg_rtx,
28454 gen_rtx_MEM (Pmode, sp_reg_rtx));
28455 frame_off = 0;
28456 }
28457 else if (frame_pointer_needed)
28458 frame_reg_rtx = hard_frame_pointer_rtx;
28459 }
28460
28461 reg = gen_rtx_REG (SImode, 12);
28462 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28463 info->vrsave_save_offset + frame_off));
28464
28465 emit_insn (generate_set_vrsave (reg, info, 1));
28466 }
28467
28468 insn = NULL_RTX;
28469 /* If we have a large stack frame, restore the old stack pointer
28470 using the backchain. */
28471 if (use_backchain_to_restore_sp)
28472 {
28473 if (frame_reg_rtx == sp_reg_rtx)
28474 {
28475 /* Under V.4, don't reset the stack pointer until after we're done
28476 loading the saved registers. */
28477 if (DEFAULT_ABI == ABI_V4)
28478 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28479
28480 insn = emit_move_insn (frame_reg_rtx,
28481 gen_rtx_MEM (Pmode, sp_reg_rtx));
28482 frame_off = 0;
28483 }
28484 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28485 && DEFAULT_ABI == ABI_V4)
28486 /* frame_reg_rtx has been set up by the altivec restore. */
28487 ;
28488 else
28489 {
28490 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28491 frame_reg_rtx = sp_reg_rtx;
28492 }
28493 }
28494 /* If we have a frame pointer, we can restore the old stack pointer
28495 from it. */
28496 else if (frame_pointer_needed)
28497 {
28498 frame_reg_rtx = sp_reg_rtx;
28499 if (DEFAULT_ABI == ABI_V4)
28500 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28501 /* Prevent reordering memory accesses against stack pointer restore. */
28502 else if (cfun->calls_alloca
28503 || offset_below_red_zone_p (-info->total_size))
28504 rs6000_emit_stack_tie (frame_reg_rtx, true);
28505
28506 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28507 GEN_INT (info->total_size)));
28508 frame_off = 0;
28509 }
28510 else if (info->push_p
28511 && DEFAULT_ABI != ABI_V4
28512 && !crtl->calls_eh_return)
28513 {
28514 /* Prevent reordering memory accesses against stack pointer restore. */
28515 if (cfun->calls_alloca
28516 || offset_below_red_zone_p (-info->total_size))
28517 rs6000_emit_stack_tie (frame_reg_rtx, false);
28518 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28519 GEN_INT (info->total_size)));
28520 frame_off = 0;
28521 }
28522 if (insn && frame_reg_rtx == sp_reg_rtx)
28523 {
28524 if (cfa_restores)
28525 {
28526 REG_NOTES (insn) = cfa_restores;
28527 cfa_restores = NULL_RTX;
28528 }
28529 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28530 RTX_FRAME_RELATED_P (insn) = 1;
28531 }
28532
28533 /* Restore AltiVec registers if we have not done so already. */
28534 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28535 && info->altivec_size != 0
28536 && (DEFAULT_ABI == ABI_V4
28537 || !offset_below_red_zone_p (info->altivec_save_offset)))
28538 {
28539 int i;
28540
28541 if ((strategy & REST_INLINE_VRS) == 0)
28542 {
28543 int end_save = info->altivec_save_offset + info->altivec_size;
28544 int ptr_off;
28545 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28546 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28547 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28548
28549 if (end_save + frame_off != 0)
28550 {
28551 rtx offset = GEN_INT (end_save + frame_off);
28552
28553 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28554 }
28555 else
28556 emit_move_insn (ptr_reg, frame_reg_rtx);
28557
28558 ptr_off = -end_save;
28559 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28560 info->altivec_save_offset + ptr_off,
28561 0, V4SImode, SAVRES_VR);
28562 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28563 {
28564 /* Frame reg was clobbered by out-of-line save. Restore it
28565 from ptr_reg, and if we are calling out-of-line gpr or
28566 fpr restore set up the correct pointer and offset. */
28567 unsigned newptr_regno = 1;
28568 if (!restoring_GPRs_inline)
28569 {
28570 bool lr = info->gp_save_offset + info->gp_size == 0;
28571 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28572 newptr_regno = ptr_regno_for_savres (sel);
28573 end_save = info->gp_save_offset + info->gp_size;
28574 }
28575 else if (!restoring_FPRs_inline)
28576 {
28577 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28578 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28579 newptr_regno = ptr_regno_for_savres (sel);
28580 end_save = info->fp_save_offset + info->fp_size;
28581 }
28582
28583 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28584 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28585
28586 if (end_save + ptr_off != 0)
28587 {
28588 rtx offset = GEN_INT (end_save + ptr_off);
28589
28590 frame_off = -end_save;
28591 if (TARGET_32BIT)
28592 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28593 ptr_reg, offset));
28594 else
28595 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28596 ptr_reg, offset));
28597 }
28598 else
28599 {
28600 frame_off = ptr_off;
28601 emit_move_insn (frame_reg_rtx, ptr_reg);
28602 }
28603 }
28604 }
28605 else
28606 {
28607 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28608 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28609 {
28610 rtx addr, areg, mem, insn;
28611 rtx reg = gen_rtx_REG (V4SImode, i);
28612 HOST_WIDE_INT offset
28613 = (info->altivec_save_offset + frame_off
28614 + 16 * (i - info->first_altivec_reg_save));
28615
28616 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28617 {
28618 mem = gen_frame_mem (V4SImode,
28619 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28620 GEN_INT (offset)));
28621 insn = gen_rtx_SET (reg, mem);
28622 }
28623 else
28624 {
28625 areg = gen_rtx_REG (Pmode, 0);
28626 emit_move_insn (areg, GEN_INT (offset));
28627
28628 /* AltiVec addressing mode is [reg+reg]. */
28629 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28630 mem = gen_frame_mem (V4SImode, addr);
28631
28632 /* Rather than emitting a generic move, force use of the
28633 lvx instruction, which we always want. In particular we
28634 don't want lxvd2x/xxpermdi for little endian. */
28635 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28636 }
28637
28638 (void) emit_insn (insn);
28639 }
28640 }
28641
28642 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28643 if (((strategy & REST_INLINE_VRS) == 0
28644 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28645 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28646 && save_reg_p (i))
28647 {
28648 rtx reg = gen_rtx_REG (V4SImode, i);
28649 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28650 }
28651 }
28652
28653 /* Restore VRSAVE if we have not done so already. */
28654 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28655 && info->vrsave_size != 0
28656 && (DEFAULT_ABI == ABI_V4
28657 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28658 {
28659 rtx reg;
28660
28661 reg = gen_rtx_REG (SImode, 12);
28662 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28663 info->vrsave_save_offset + frame_off));
28664
28665 emit_insn (generate_set_vrsave (reg, info, 1));
28666 }
28667
28668 /* If we exit by an out-of-line restore function on ABI_V4 then that
28669 function will deallocate the stack, so we don't need to worry
28670 about the unwinder restoring cr from an invalid stack frame
28671 location. */
28672 exit_func = (!restoring_FPRs_inline
28673 || (!restoring_GPRs_inline
28674 && info->first_fp_reg_save == 64));
28675
28676 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28677 *separate* slots if the routine calls __builtin_eh_return, so
28678 that they can be independently restored by the unwinder. */
28679 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28680 {
28681 int i, cr_off = info->ehcr_offset;
28682
28683 for (i = 0; i < 8; i++)
28684 if (!call_used_regs[CR0_REGNO + i])
28685 {
28686 rtx reg = gen_rtx_REG (SImode, 0);
28687 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28688 cr_off + frame_off));
28689
28690 insn = emit_insn (gen_movsi_to_cr_one
28691 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28692
28693 if (!exit_func && flag_shrink_wrap)
28694 {
28695 add_reg_note (insn, REG_CFA_RESTORE,
28696 gen_rtx_REG (SImode, CR0_REGNO + i));
28697
28698 RTX_FRAME_RELATED_P (insn) = 1;
28699 }
28700
28701 cr_off += reg_size;
28702 }
28703 }
28704
28705 /* Get the old lr if we saved it. If we are restoring registers
28706 out-of-line, then the out-of-line routines can do this for us. */
28707 if (restore_lr && restoring_GPRs_inline)
28708 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28709
28710 /* Get the old cr if we saved it. */
28711 if (info->cr_save_p)
28712 {
28713 unsigned cr_save_regno = 12;
28714
28715 if (!restoring_GPRs_inline)
28716 {
28717 /* Ensure we don't use the register used by the out-of-line
28718 gpr register restore below. */
28719 bool lr = info->gp_save_offset + info->gp_size == 0;
28720 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28721 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28722
28723 if (gpr_ptr_regno == 12)
28724 cr_save_regno = 11;
28725 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28726 }
28727 else if (REGNO (frame_reg_rtx) == 12)
28728 cr_save_regno = 11;
28729
28730 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28731 info->cr_save_offset + frame_off,
28732 exit_func);
28733 }
28734
28735 /* Set LR here to try to overlap restores below. */
28736 if (restore_lr && restoring_GPRs_inline)
28737 restore_saved_lr (0, exit_func);
28738
28739 /* Load exception handler data registers, if needed. */
28740 if (crtl->calls_eh_return)
28741 {
28742 unsigned int i, regno;
28743
28744 if (TARGET_AIX)
28745 {
28746 rtx reg = gen_rtx_REG (reg_mode, 2);
28747 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28748 frame_off + RS6000_TOC_SAVE_SLOT));
28749 }
28750
28751 for (i = 0; ; ++i)
28752 {
28753 rtx mem;
28754
28755 regno = EH_RETURN_DATA_REGNO (i);
28756 if (regno == INVALID_REGNUM)
28757 break;
28758
28759 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28760 info->ehrd_offset + frame_off
28761 + reg_size * (int) i);
28762
28763 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28764 }
28765 }
28766
28767 /* Restore GPRs. This is done as a PARALLEL if we are using
28768 the load-multiple instructions. */
28769 if (!restoring_GPRs_inline)
28770 {
28771 /* We are jumping to an out-of-line function. */
28772 rtx ptr_reg;
28773 int end_save = info->gp_save_offset + info->gp_size;
28774 bool can_use_exit = end_save == 0;
28775 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28776 int ptr_off;
28777
28778 /* Emit stack reset code if we need it. */
28779 ptr_regno = ptr_regno_for_savres (sel);
28780 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28781 if (can_use_exit)
28782 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28783 else if (end_save + frame_off != 0)
28784 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28785 GEN_INT (end_save + frame_off)));
28786 else if (REGNO (frame_reg_rtx) != ptr_regno)
28787 emit_move_insn (ptr_reg, frame_reg_rtx);
28788 if (REGNO (frame_reg_rtx) == ptr_regno)
28789 frame_off = -end_save;
28790
28791 if (can_use_exit && info->cr_save_p)
28792 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28793
28794 ptr_off = -end_save;
28795 rs6000_emit_savres_rtx (info, ptr_reg,
28796 info->gp_save_offset + ptr_off,
28797 info->lr_save_offset + ptr_off,
28798 reg_mode, sel);
28799 }
28800 else if (using_load_multiple)
28801 {
28802 rtvec p;
28803 p = rtvec_alloc (32 - info->first_gp_reg_save);
28804 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28805 RTVEC_ELT (p, i)
28806 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28807 frame_reg_rtx,
28808 info->gp_save_offset + frame_off + reg_size * i);
28809 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28810 }
28811 else
28812 {
28813 int offset = info->gp_save_offset + frame_off;
28814 for (i = info->first_gp_reg_save; i < 32; i++)
28815 {
28816 if (save_reg_p (i)
28817 && !cfun->machine->gpr_is_wrapped_separately[i])
28818 {
28819 rtx reg = gen_rtx_REG (reg_mode, i);
28820 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28821 }
28822
28823 offset += reg_size;
28824 }
28825 }
28826
28827 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28828 {
28829 /* If the frame pointer was used then we can't delay emitting
28830 a REG_CFA_DEF_CFA note. This must happen on the insn that
28831 restores the frame pointer, r31. We may have already emitted
28832 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28833 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28834 be harmless if emitted. */
28835 if (frame_pointer_needed)
28836 {
28837 insn = get_last_insn ();
28838 add_reg_note (insn, REG_CFA_DEF_CFA,
28839 plus_constant (Pmode, frame_reg_rtx, frame_off));
28840 RTX_FRAME_RELATED_P (insn) = 1;
28841 }
28842
28843 /* Set up cfa_restores. We always need these when
28844 shrink-wrapping. If not shrink-wrapping then we only need
28845 the cfa_restore when the stack location is no longer valid.
28846 The cfa_restores must be emitted on or before the insn that
28847 invalidates the stack, and of course must not be emitted
28848 before the insn that actually does the restore. The latter
28849 is why it is a bad idea to emit the cfa_restores as a group
28850 on the last instruction here that actually does a restore:
28851 That insn may be reordered with respect to others doing
28852 restores. */
28853 if (flag_shrink_wrap
28854 && !restoring_GPRs_inline
28855 && info->first_fp_reg_save == 64)
28856 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28857
28858 for (i = info->first_gp_reg_save; i < 32; i++)
28859 if (save_reg_p (i)
28860 && !cfun->machine->gpr_is_wrapped_separately[i])
28861 {
28862 rtx reg = gen_rtx_REG (reg_mode, i);
28863 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28864 }
28865 }
28866
28867 if (!restoring_GPRs_inline
28868 && info->first_fp_reg_save == 64)
28869 {
28870 /* We are jumping to an out-of-line function. */
28871 if (cfa_restores)
28872 emit_cfa_restores (cfa_restores);
28873 return;
28874 }
28875
28876 if (restore_lr && !restoring_GPRs_inline)
28877 {
28878 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28879 restore_saved_lr (0, exit_func);
28880 }
28881
28882 /* Restore fpr's if we need to do it without calling a function. */
28883 if (restoring_FPRs_inline)
28884 {
28885 int offset = info->fp_save_offset + frame_off;
28886 for (i = info->first_fp_reg_save; i < 64; i++)
28887 {
28888 if (save_reg_p (i)
28889 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28890 {
28891 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28892 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28893 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28894 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28895 cfa_restores);
28896 }
28897
28898 offset += fp_reg_size;
28899 }
28900 }
28901
28902 /* If we saved cr, restore it here. Just those that were used. */
28903 if (info->cr_save_p)
28904 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28905
28906 /* If this is V.4, unwind the stack pointer after all of the loads
28907 have been done, or set up r11 if we are restoring fp out of line. */
28908 ptr_regno = 1;
28909 if (!restoring_FPRs_inline)
28910 {
28911 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28912 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28913 ptr_regno = ptr_regno_for_savres (sel);
28914 }
28915
28916 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28917 if (REGNO (frame_reg_rtx) == ptr_regno)
28918 frame_off = 0;
28919
28920 if (insn && restoring_FPRs_inline)
28921 {
28922 if (cfa_restores)
28923 {
28924 REG_NOTES (insn) = cfa_restores;
28925 cfa_restores = NULL_RTX;
28926 }
28927 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28928 RTX_FRAME_RELATED_P (insn) = 1;
28929 }
28930
28931 if (crtl->calls_eh_return)
28932 {
28933 rtx sa = EH_RETURN_STACKADJ_RTX;
28934 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28935 }
28936
28937 if (!sibcall && restoring_FPRs_inline)
28938 {
28939 if (cfa_restores)
28940 {
28941 /* We can't hang the cfa_restores off a simple return,
28942 since the shrink-wrap code sometimes uses an existing
28943 return. This means there might be a path from
28944 pre-prologue code to this return, and dwarf2cfi code
28945 wants the eh_frame unwinder state to be the same on
28946 all paths to any point. So we need to emit the
28947 cfa_restores before the return. For -m64 we really
28948 don't need epilogue cfa_restores at all, except for
28949 this irritating dwarf2cfi with shrink-wrap
28950 requirement; The stack red-zone means eh_frame info
28951 from the prologue telling the unwinder to restore
28952 from the stack is perfectly good right to the end of
28953 the function. */
28954 emit_insn (gen_blockage ());
28955 emit_cfa_restores (cfa_restores);
28956 cfa_restores = NULL_RTX;
28957 }
28958
28959 emit_jump_insn (targetm.gen_simple_return ());
28960 }
28961
28962 if (!sibcall && !restoring_FPRs_inline)
28963 {
28964 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28965 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28966 int elt = 0;
28967 RTVEC_ELT (p, elt++) = ret_rtx;
28968 if (lr)
28969 RTVEC_ELT (p, elt++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
28970
28971 /* We have to restore more than two FP registers, so branch to the
28972 restore function. It will return to our caller. */
28973 int i;
28974 int reg;
28975 rtx sym;
28976
28977 if (flag_shrink_wrap)
28978 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28979
28980 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28981 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28982 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28983 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28984
28985 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28986 {
28987 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28988
28989 RTVEC_ELT (p, elt++)
28990 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28991 if (flag_shrink_wrap
28992 && save_reg_p (info->first_fp_reg_save + i))
28993 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28994 }
28995
28996 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28997 }
28998
28999 if (cfa_restores)
29000 {
29001 if (sibcall)
29002 /* Ensure the cfa_restores are hung off an insn that won't
29003 be reordered above other restores. */
29004 emit_insn (gen_blockage ());
29005
29006 emit_cfa_restores (cfa_restores);
29007 }
29008 }
29009
29010 /* Write function epilogue. */
29011
29012 static void
29013 rs6000_output_function_epilogue (FILE *file)
29014 {
29015 #if TARGET_MACHO
29016 macho_branch_islands ();
29017
29018 {
29019 rtx_insn *insn = get_last_insn ();
29020 rtx_insn *deleted_debug_label = NULL;
29021
29022 /* Mach-O doesn't support labels at the end of objects, so if
29023 it looks like we might want one, take special action.
29024
29025 First, collect any sequence of deleted debug labels. */
29026 while (insn
29027 && NOTE_P (insn)
29028 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
29029 {
29030 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
29031 notes only, instead set their CODE_LABEL_NUMBER to -1,
29032 otherwise there would be code generation differences
29033 in between -g and -g0. */
29034 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29035 deleted_debug_label = insn;
29036 insn = PREV_INSN (insn);
29037 }
29038
29039 /* Second, if we have:
29040 label:
29041 barrier
29042 then this needs to be detected, so skip past the barrier. */
29043
29044 if (insn && BARRIER_P (insn))
29045 insn = PREV_INSN (insn);
29046
29047 /* Up to now we've only seen notes or barriers. */
29048 if (insn)
29049 {
29050 if (LABEL_P (insn)
29051 || (NOTE_P (insn)
29052 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
29053 /* Trailing label: <barrier>. */
29054 fputs ("\tnop\n", file);
29055 else
29056 {
29057 /* Lastly, see if we have a completely empty function body. */
29058 while (insn && ! INSN_P (insn))
29059 insn = PREV_INSN (insn);
29060 /* If we don't find any insns, we've got an empty function body;
29061 I.e. completely empty - without a return or branch. This is
29062 taken as the case where a function body has been removed
29063 because it contains an inline __builtin_unreachable(). GCC
29064 states that reaching __builtin_unreachable() means UB so we're
29065 not obliged to do anything special; however, we want
29066 non-zero-sized function bodies. To meet this, and help the
29067 user out, let's trap the case. */
29068 if (insn == NULL)
29069 fputs ("\ttrap\n", file);
29070 }
29071 }
29072 else if (deleted_debug_label)
29073 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
29074 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29075 CODE_LABEL_NUMBER (insn) = -1;
29076 }
29077 #endif
29078
29079 /* Output a traceback table here. See /usr/include/sys/debug.h for info
29080 on its format.
29081
29082 We don't output a traceback table if -finhibit-size-directive was
29083 used. The documentation for -finhibit-size-directive reads
29084 ``don't output a @code{.size} assembler directive, or anything
29085 else that would cause trouble if the function is split in the
29086 middle, and the two halves are placed at locations far apart in
29087 memory.'' The traceback table has this property, since it
29088 includes the offset from the start of the function to the
29089 traceback table itself.
29090
29091 System V.4 Powerpc's (and the embedded ABI derived from it) use a
29092 different traceback table. */
29093 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29094 && ! flag_inhibit_size_directive
29095 && rs6000_traceback != traceback_none && !cfun->is_thunk)
29096 {
29097 const char *fname = NULL;
29098 const char *language_string = lang_hooks.name;
29099 int fixed_parms = 0, float_parms = 0, parm_info = 0;
29100 int i;
29101 int optional_tbtab;
29102 rs6000_stack_t *info = rs6000_stack_info ();
29103
29104 if (rs6000_traceback == traceback_full)
29105 optional_tbtab = 1;
29106 else if (rs6000_traceback == traceback_part)
29107 optional_tbtab = 0;
29108 else
29109 optional_tbtab = !optimize_size && !TARGET_ELF;
29110
29111 if (optional_tbtab)
29112 {
29113 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
29114 while (*fname == '.') /* V.4 encodes . in the name */
29115 fname++;
29116
29117 /* Need label immediately before tbtab, so we can compute
29118 its offset from the function start. */
29119 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29120 ASM_OUTPUT_LABEL (file, fname);
29121 }
29122
29123 /* The .tbtab pseudo-op can only be used for the first eight
29124 expressions, since it can't handle the possibly variable
29125 length fields that follow. However, if you omit the optional
29126 fields, the assembler outputs zeros for all optional fields
29127 anyways, giving each variable length field is minimum length
29128 (as defined in sys/debug.h). Thus we cannot use the .tbtab
29129 pseudo-op at all. */
29130
29131 /* An all-zero word flags the start of the tbtab, for debuggers
29132 that have to find it by searching forward from the entry
29133 point or from the current pc. */
29134 fputs ("\t.long 0\n", file);
29135
29136 /* Tbtab format type. Use format type 0. */
29137 fputs ("\t.byte 0,", file);
29138
29139 /* Language type. Unfortunately, there does not seem to be any
29140 official way to discover the language being compiled, so we
29141 use language_string.
29142 C is 0. Fortran is 1. Ada is 3. C++ is 9.
29143 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29144 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
29145 numbers either, so for now use 0. */
29146 if (lang_GNU_C ()
29147 || ! strcmp (language_string, "GNU GIMPLE")
29148 || ! strcmp (language_string, "GNU Go")
29149 || ! strcmp (language_string, "GNU D")
29150 || ! strcmp (language_string, "libgccjit"))
29151 i = 0;
29152 else if (! strcmp (language_string, "GNU F77")
29153 || lang_GNU_Fortran ())
29154 i = 1;
29155 else if (! strcmp (language_string, "GNU Ada"))
29156 i = 3;
29157 else if (lang_GNU_CXX ()
29158 || ! strcmp (language_string, "GNU Objective-C++"))
29159 i = 9;
29160 else if (! strcmp (language_string, "GNU Java"))
29161 i = 13;
29162 else if (! strcmp (language_string, "GNU Objective-C"))
29163 i = 14;
29164 else
29165 gcc_unreachable ();
29166 fprintf (file, "%d,", i);
29167
29168 /* 8 single bit fields: global linkage (not set for C extern linkage,
29169 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29170 from start of procedure stored in tbtab, internal function, function
29171 has controlled storage, function has no toc, function uses fp,
29172 function logs/aborts fp operations. */
29173 /* Assume that fp operations are used if any fp reg must be saved. */
29174 fprintf (file, "%d,",
29175 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
29176
29177 /* 6 bitfields: function is interrupt handler, name present in
29178 proc table, function calls alloca, on condition directives
29179 (controls stack walks, 3 bits), saves condition reg, saves
29180 link reg. */
29181 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29182 set up as a frame pointer, even when there is no alloca call. */
29183 fprintf (file, "%d,",
29184 ((optional_tbtab << 6)
29185 | ((optional_tbtab & frame_pointer_needed) << 5)
29186 | (info->cr_save_p << 1)
29187 | (info->lr_save_p)));
29188
29189 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29190 (6 bits). */
29191 fprintf (file, "%d,",
29192 (info->push_p << 7) | (64 - info->first_fp_reg_save));
29193
29194 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29195 fprintf (file, "%d,", (32 - first_reg_to_save ()));
29196
29197 if (optional_tbtab)
29198 {
29199 /* Compute the parameter info from the function decl argument
29200 list. */
29201 tree decl;
29202 int next_parm_info_bit = 31;
29203
29204 for (decl = DECL_ARGUMENTS (current_function_decl);
29205 decl; decl = DECL_CHAIN (decl))
29206 {
29207 rtx parameter = DECL_INCOMING_RTL (decl);
29208 machine_mode mode = GET_MODE (parameter);
29209
29210 if (REG_P (parameter))
29211 {
29212 if (SCALAR_FLOAT_MODE_P (mode))
29213 {
29214 int bits;
29215
29216 float_parms++;
29217
29218 switch (mode)
29219 {
29220 case E_SFmode:
29221 case E_SDmode:
29222 bits = 0x2;
29223 break;
29224
29225 case E_DFmode:
29226 case E_DDmode:
29227 case E_TFmode:
29228 case E_TDmode:
29229 case E_IFmode:
29230 case E_KFmode:
29231 bits = 0x3;
29232 break;
29233
29234 default:
29235 gcc_unreachable ();
29236 }
29237
29238 /* If only one bit will fit, don't or in this entry. */
29239 if (next_parm_info_bit > 0)
29240 parm_info |= (bits << (next_parm_info_bit - 1));
29241 next_parm_info_bit -= 2;
29242 }
29243 else
29244 {
29245 fixed_parms += ((GET_MODE_SIZE (mode)
29246 + (UNITS_PER_WORD - 1))
29247 / UNITS_PER_WORD);
29248 next_parm_info_bit -= 1;
29249 }
29250 }
29251 }
29252 }
29253
29254 /* Number of fixed point parameters. */
29255 /* This is actually the number of words of fixed point parameters; thus
29256 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29257 fprintf (file, "%d,", fixed_parms);
29258
29259 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29260 all on stack. */
29261 /* This is actually the number of fp registers that hold parameters;
29262 and thus the maximum value is 13. */
29263 /* Set parameters on stack bit if parameters are not in their original
29264 registers, regardless of whether they are on the stack? Xlc
29265 seems to set the bit when not optimizing. */
29266 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
29267
29268 if (optional_tbtab)
29269 {
29270 /* Optional fields follow. Some are variable length. */
29271
29272 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29273 float, 11 double float. */
29274 /* There is an entry for each parameter in a register, in the order
29275 that they occur in the parameter list. Any intervening arguments
29276 on the stack are ignored. If the list overflows a long (max
29277 possible length 34 bits) then completely leave off all elements
29278 that don't fit. */
29279 /* Only emit this long if there was at least one parameter. */
29280 if (fixed_parms || float_parms)
29281 fprintf (file, "\t.long %d\n", parm_info);
29282
29283 /* Offset from start of code to tb table. */
29284 fputs ("\t.long ", file);
29285 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29286 RS6000_OUTPUT_BASENAME (file, fname);
29287 putc ('-', file);
29288 rs6000_output_function_entry (file, fname);
29289 putc ('\n', file);
29290
29291 /* Interrupt handler mask. */
29292 /* Omit this long, since we never set the interrupt handler bit
29293 above. */
29294
29295 /* Number of CTL (controlled storage) anchors. */
29296 /* Omit this long, since the has_ctl bit is never set above. */
29297
29298 /* Displacement into stack of each CTL anchor. */
29299 /* Omit this list of longs, because there are no CTL anchors. */
29300
29301 /* Length of function name. */
29302 if (*fname == '*')
29303 ++fname;
29304 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29305
29306 /* Function name. */
29307 assemble_string (fname, strlen (fname));
29308
29309 /* Register for alloca automatic storage; this is always reg 31.
29310 Only emit this if the alloca bit was set above. */
29311 if (frame_pointer_needed)
29312 fputs ("\t.byte 31\n", file);
29313
29314 fputs ("\t.align 2\n", file);
29315 }
29316 }
29317
29318 /* Arrange to define .LCTOC1 label, if not already done. */
29319 if (need_toc_init)
29320 {
29321 need_toc_init = 0;
29322 if (!toc_initialized)
29323 {
29324 switch_to_section (toc_section);
29325 switch_to_section (current_function_section ());
29326 }
29327 }
29328 }
29329
29330 /* -fsplit-stack support. */
29331
29332 /* A SYMBOL_REF for __morestack. */
29333 static GTY(()) rtx morestack_ref;
29334
29335 static rtx
29336 gen_add3_const (rtx rt, rtx ra, long c)
29337 {
29338 if (TARGET_64BIT)
29339 return gen_adddi3 (rt, ra, GEN_INT (c));
29340 else
29341 return gen_addsi3 (rt, ra, GEN_INT (c));
29342 }
29343
29344 /* Emit -fsplit-stack prologue, which goes before the regular function
29345 prologue (at local entry point in the case of ELFv2). */
29346
29347 void
29348 rs6000_expand_split_stack_prologue (void)
29349 {
29350 rs6000_stack_t *info = rs6000_stack_info ();
29351 unsigned HOST_WIDE_INT allocate;
29352 long alloc_hi, alloc_lo;
29353 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29354 rtx_insn *insn;
29355
29356 gcc_assert (flag_split_stack && reload_completed);
29357
29358 if (!info->push_p)
29359 return;
29360
29361 if (global_regs[29])
29362 {
29363 error ("%qs uses register r29", "%<-fsplit-stack%>");
29364 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29365 "conflicts with %qD", global_regs_decl[29]);
29366 }
29367
29368 allocate = info->total_size;
29369 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29370 {
29371 sorry ("Stack frame larger than 2G is not supported for "
29372 "%<-fsplit-stack%>");
29373 return;
29374 }
29375 if (morestack_ref == NULL_RTX)
29376 {
29377 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29378 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29379 | SYMBOL_FLAG_FUNCTION);
29380 }
29381
29382 r0 = gen_rtx_REG (Pmode, 0);
29383 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29384 r12 = gen_rtx_REG (Pmode, 12);
29385 emit_insn (gen_load_split_stack_limit (r0));
29386 /* Always emit two insns here to calculate the requested stack,
29387 so that the linker can edit them when adjusting size for calling
29388 non-split-stack code. */
29389 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29390 alloc_lo = -allocate - alloc_hi;
29391 if (alloc_hi != 0)
29392 {
29393 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29394 if (alloc_lo != 0)
29395 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29396 else
29397 emit_insn (gen_nop ());
29398 }
29399 else
29400 {
29401 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29402 emit_insn (gen_nop ());
29403 }
29404
29405 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29406 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29407 ok_label = gen_label_rtx ();
29408 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29409 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29410 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29411 pc_rtx);
29412 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29413 JUMP_LABEL (insn) = ok_label;
29414 /* Mark the jump as very likely to be taken. */
29415 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29416
29417 lr = gen_rtx_REG (Pmode, LR_REGNO);
29418 insn = emit_move_insn (r0, lr);
29419 RTX_FRAME_RELATED_P (insn) = 1;
29420 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29421 RTX_FRAME_RELATED_P (insn) = 1;
29422
29423 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29424 const0_rtx, const0_rtx));
29425 call_fusage = NULL_RTX;
29426 use_reg (&call_fusage, r12);
29427 /* Say the call uses r0, even though it doesn't, to stop regrename
29428 from twiddling with the insns saving lr, trashing args for cfun.
29429 The insns restoring lr are similarly protected by making
29430 split_stack_return use r0. */
29431 use_reg (&call_fusage, r0);
29432 add_function_usage_to (insn, call_fusage);
29433 /* Indicate that this function can't jump to non-local gotos. */
29434 make_reg_eh_region_note_nothrow_nononlocal (insn);
29435 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29436 insn = emit_move_insn (lr, r0);
29437 add_reg_note (insn, REG_CFA_RESTORE, lr);
29438 RTX_FRAME_RELATED_P (insn) = 1;
29439 emit_insn (gen_split_stack_return ());
29440
29441 emit_label (ok_label);
29442 LABEL_NUSES (ok_label) = 1;
29443 }
29444
29445 /* Return the internal arg pointer used for function incoming
29446 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29447 to copy it to a pseudo in order for it to be preserved over calls
29448 and suchlike. We'd really like to use a pseudo here for the
29449 internal arg pointer but data-flow analysis is not prepared to
29450 accept pseudos as live at the beginning of a function. */
29451
29452 static rtx
29453 rs6000_internal_arg_pointer (void)
29454 {
29455 if (flag_split_stack
29456 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29457 == NULL))
29458
29459 {
29460 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29461 {
29462 rtx pat;
29463
29464 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29465 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29466
29467 /* Put the pseudo initialization right after the note at the
29468 beginning of the function. */
29469 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29470 gen_rtx_REG (Pmode, 12));
29471 push_topmost_sequence ();
29472 emit_insn_after (pat, get_insns ());
29473 pop_topmost_sequence ();
29474 }
29475 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29476 FIRST_PARM_OFFSET (current_function_decl));
29477 return copy_to_reg (ret);
29478 }
29479 return virtual_incoming_args_rtx;
29480 }
29481
29482 /* We may have to tell the dataflow pass that the split stack prologue
29483 is initializing a register. */
29484
29485 static void
29486 rs6000_live_on_entry (bitmap regs)
29487 {
29488 if (flag_split_stack)
29489 bitmap_set_bit (regs, 12);
29490 }
29491
29492 /* Emit -fsplit-stack dynamic stack allocation space check. */
29493
29494 void
29495 rs6000_split_stack_space_check (rtx size, rtx label)
29496 {
29497 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29498 rtx limit = gen_reg_rtx (Pmode);
29499 rtx requested = gen_reg_rtx (Pmode);
29500 rtx cmp = gen_reg_rtx (CCUNSmode);
29501 rtx jump;
29502
29503 emit_insn (gen_load_split_stack_limit (limit));
29504 if (CONST_INT_P (size))
29505 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29506 else
29507 {
29508 size = force_reg (Pmode, size);
29509 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29510 }
29511 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29512 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29513 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29514 gen_rtx_LABEL_REF (VOIDmode, label),
29515 pc_rtx);
29516 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29517 JUMP_LABEL (jump) = label;
29518 }
29519 \f
29520 /* A C compound statement that outputs the assembler code for a thunk
29521 function, used to implement C++ virtual function calls with
29522 multiple inheritance. The thunk acts as a wrapper around a virtual
29523 function, adjusting the implicit object parameter before handing
29524 control off to the real function.
29525
29526 First, emit code to add the integer DELTA to the location that
29527 contains the incoming first argument. Assume that this argument
29528 contains a pointer, and is the one used to pass the `this' pointer
29529 in C++. This is the incoming argument *before* the function
29530 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29531 values of all other incoming arguments.
29532
29533 After the addition, emit code to jump to FUNCTION, which is a
29534 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29535 not touch the return address. Hence returning from FUNCTION will
29536 return to whoever called the current `thunk'.
29537
29538 The effect must be as if FUNCTION had been called directly with the
29539 adjusted first argument. This macro is responsible for emitting
29540 all of the code for a thunk function; output_function_prologue()
29541 and output_function_epilogue() are not invoked.
29542
29543 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29544 been extracted from it.) It might possibly be useful on some
29545 targets, but probably not.
29546
29547 If you do not define this macro, the target-independent code in the
29548 C++ frontend will generate a less efficient heavyweight thunk that
29549 calls FUNCTION instead of jumping to it. The generic approach does
29550 not support varargs. */
29551
29552 static void
29553 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29554 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29555 tree function)
29556 {
29557 rtx this_rtx, funexp;
29558 rtx_insn *insn;
29559
29560 reload_completed = 1;
29561 epilogue_completed = 1;
29562
29563 /* Mark the end of the (empty) prologue. */
29564 emit_note (NOTE_INSN_PROLOGUE_END);
29565
29566 /* Find the "this" pointer. If the function returns a structure,
29567 the structure return pointer is in r3. */
29568 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29569 this_rtx = gen_rtx_REG (Pmode, 4);
29570 else
29571 this_rtx = gen_rtx_REG (Pmode, 3);
29572
29573 /* Apply the constant offset, if required. */
29574 if (delta)
29575 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29576
29577 /* Apply the offset from the vtable, if required. */
29578 if (vcall_offset)
29579 {
29580 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29581 rtx tmp = gen_rtx_REG (Pmode, 12);
29582
29583 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29584 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29585 {
29586 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29587 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29588 }
29589 else
29590 {
29591 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29592
29593 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29594 }
29595 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29596 }
29597
29598 /* Generate a tail call to the target function. */
29599 if (!TREE_USED (function))
29600 {
29601 assemble_external (function);
29602 TREE_USED (function) = 1;
29603 }
29604 funexp = XEXP (DECL_RTL (function), 0);
29605 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29606
29607 #if TARGET_MACHO
29608 if (MACHOPIC_INDIRECT)
29609 funexp = machopic_indirect_call_target (funexp);
29610 #endif
29611
29612 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29613 generate sibcall RTL explicitly. */
29614 insn = emit_call_insn (
29615 gen_rtx_PARALLEL (VOIDmode,
29616 gen_rtvec (3,
29617 gen_rtx_CALL (VOIDmode,
29618 funexp, const0_rtx),
29619 gen_rtx_USE (VOIDmode, const0_rtx),
29620 simple_return_rtx)));
29621 SIBLING_CALL_P (insn) = 1;
29622 emit_barrier ();
29623
29624 /* Run just enough of rest_of_compilation to get the insns emitted.
29625 There's not really enough bulk here to make other passes such as
29626 instruction scheduling worth while. Note that use_thunk calls
29627 assemble_start_function and assemble_end_function. */
29628 insn = get_insns ();
29629 shorten_branches (insn);
29630 final_start_function (insn, file, 1);
29631 final (insn, file, 1);
29632 final_end_function ();
29633
29634 reload_completed = 0;
29635 epilogue_completed = 0;
29636 }
29637 \f
29638 /* A quick summary of the various types of 'constant-pool tables'
29639 under PowerPC:
29640
29641 Target Flags Name One table per
29642 AIX (none) AIX TOC object file
29643 AIX -mfull-toc AIX TOC object file
29644 AIX -mminimal-toc AIX minimal TOC translation unit
29645 SVR4/EABI (none) SVR4 SDATA object file
29646 SVR4/EABI -fpic SVR4 pic object file
29647 SVR4/EABI -fPIC SVR4 PIC translation unit
29648 SVR4/EABI -mrelocatable EABI TOC function
29649 SVR4/EABI -maix AIX TOC object file
29650 SVR4/EABI -maix -mminimal-toc
29651 AIX minimal TOC translation unit
29652
29653 Name Reg. Set by entries contains:
29654 made by addrs? fp? sum?
29655
29656 AIX TOC 2 crt0 as Y option option
29657 AIX minimal TOC 30 prolog gcc Y Y option
29658 SVR4 SDATA 13 crt0 gcc N Y N
29659 SVR4 pic 30 prolog ld Y not yet N
29660 SVR4 PIC 30 prolog gcc Y option option
29661 EABI TOC 30 prolog gcc Y option option
29662
29663 */
29664
29665 /* Hash functions for the hash table. */
29666
29667 static unsigned
29668 rs6000_hash_constant (rtx k)
29669 {
29670 enum rtx_code code = GET_CODE (k);
29671 machine_mode mode = GET_MODE (k);
29672 unsigned result = (code << 3) ^ mode;
29673 const char *format;
29674 int flen, fidx;
29675
29676 format = GET_RTX_FORMAT (code);
29677 flen = strlen (format);
29678 fidx = 0;
29679
29680 switch (code)
29681 {
29682 case LABEL_REF:
29683 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29684
29685 case CONST_WIDE_INT:
29686 {
29687 int i;
29688 flen = CONST_WIDE_INT_NUNITS (k);
29689 for (i = 0; i < flen; i++)
29690 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29691 return result;
29692 }
29693
29694 case CONST_DOUBLE:
29695 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29696
29697 case CODE_LABEL:
29698 fidx = 3;
29699 break;
29700
29701 default:
29702 break;
29703 }
29704
29705 for (; fidx < flen; fidx++)
29706 switch (format[fidx])
29707 {
29708 case 's':
29709 {
29710 unsigned i, len;
29711 const char *str = XSTR (k, fidx);
29712 len = strlen (str);
29713 result = result * 613 + len;
29714 for (i = 0; i < len; i++)
29715 result = result * 613 + (unsigned) str[i];
29716 break;
29717 }
29718 case 'u':
29719 case 'e':
29720 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29721 break;
29722 case 'i':
29723 case 'n':
29724 result = result * 613 + (unsigned) XINT (k, fidx);
29725 break;
29726 case 'w':
29727 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29728 result = result * 613 + (unsigned) XWINT (k, fidx);
29729 else
29730 {
29731 size_t i;
29732 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29733 result = result * 613 + (unsigned) (XWINT (k, fidx)
29734 >> CHAR_BIT * i);
29735 }
29736 break;
29737 case '0':
29738 break;
29739 default:
29740 gcc_unreachable ();
29741 }
29742
29743 return result;
29744 }
29745
29746 hashval_t
29747 toc_hasher::hash (toc_hash_struct *thc)
29748 {
29749 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29750 }
29751
29752 /* Compare H1 and H2 for equivalence. */
29753
29754 bool
29755 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29756 {
29757 rtx r1 = h1->key;
29758 rtx r2 = h2->key;
29759
29760 if (h1->key_mode != h2->key_mode)
29761 return 0;
29762
29763 return rtx_equal_p (r1, r2);
29764 }
29765
29766 /* These are the names given by the C++ front-end to vtables, and
29767 vtable-like objects. Ideally, this logic should not be here;
29768 instead, there should be some programmatic way of inquiring as
29769 to whether or not an object is a vtable. */
29770
29771 #define VTABLE_NAME_P(NAME) \
29772 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29773 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29774 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29775 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29776 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29777
29778 #ifdef NO_DOLLAR_IN_LABEL
29779 /* Return a GGC-allocated character string translating dollar signs in
29780 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29781
29782 const char *
29783 rs6000_xcoff_strip_dollar (const char *name)
29784 {
29785 char *strip, *p;
29786 const char *q;
29787 size_t len;
29788
29789 q = (const char *) strchr (name, '$');
29790
29791 if (q == 0 || q == name)
29792 return name;
29793
29794 len = strlen (name);
29795 strip = XALLOCAVEC (char, len + 1);
29796 strcpy (strip, name);
29797 p = strip + (q - name);
29798 while (p)
29799 {
29800 *p = '_';
29801 p = strchr (p + 1, '$');
29802 }
29803
29804 return ggc_alloc_string (strip, len);
29805 }
29806 #endif
29807
29808 void
29809 rs6000_output_symbol_ref (FILE *file, rtx x)
29810 {
29811 const char *name = XSTR (x, 0);
29812
29813 /* Currently C++ toc references to vtables can be emitted before it
29814 is decided whether the vtable is public or private. If this is
29815 the case, then the linker will eventually complain that there is
29816 a reference to an unknown section. Thus, for vtables only,
29817 we emit the TOC reference to reference the identifier and not the
29818 symbol. */
29819 if (VTABLE_NAME_P (name))
29820 {
29821 RS6000_OUTPUT_BASENAME (file, name);
29822 }
29823 else
29824 assemble_name (file, name);
29825 }
29826
29827 /* Output a TOC entry. We derive the entry name from what is being
29828 written. */
29829
29830 void
29831 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29832 {
29833 char buf[256];
29834 const char *name = buf;
29835 rtx base = x;
29836 HOST_WIDE_INT offset = 0;
29837
29838 gcc_assert (!TARGET_NO_TOC);
29839
29840 /* When the linker won't eliminate them, don't output duplicate
29841 TOC entries (this happens on AIX if there is any kind of TOC,
29842 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29843 CODE_LABELs. */
29844 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29845 {
29846 struct toc_hash_struct *h;
29847
29848 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29849 time because GGC is not initialized at that point. */
29850 if (toc_hash_table == NULL)
29851 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29852
29853 h = ggc_alloc<toc_hash_struct> ();
29854 h->key = x;
29855 h->key_mode = mode;
29856 h->labelno = labelno;
29857
29858 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29859 if (*found == NULL)
29860 *found = h;
29861 else /* This is indeed a duplicate.
29862 Set this label equal to that label. */
29863 {
29864 fputs ("\t.set ", file);
29865 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29866 fprintf (file, "%d,", labelno);
29867 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29868 fprintf (file, "%d\n", ((*found)->labelno));
29869
29870 #ifdef HAVE_AS_TLS
29871 if (TARGET_XCOFF && SYMBOL_REF_P (x)
29872 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29873 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29874 {
29875 fputs ("\t.set ", file);
29876 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29877 fprintf (file, "%d,", labelno);
29878 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29879 fprintf (file, "%d\n", ((*found)->labelno));
29880 }
29881 #endif
29882 return;
29883 }
29884 }
29885
29886 /* If we're going to put a double constant in the TOC, make sure it's
29887 aligned properly when strict alignment is on. */
29888 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29889 && STRICT_ALIGNMENT
29890 && GET_MODE_BITSIZE (mode) >= 64
29891 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29892 ASM_OUTPUT_ALIGN (file, 3);
29893 }
29894
29895 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29896
29897 /* Handle FP constants specially. Note that if we have a minimal
29898 TOC, things we put here aren't actually in the TOC, so we can allow
29899 FP constants. */
29900 if (CONST_DOUBLE_P (x)
29901 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29902 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29903 {
29904 long k[4];
29905
29906 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29907 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29908 else
29909 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29910
29911 if (TARGET_64BIT)
29912 {
29913 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29914 fputs (DOUBLE_INT_ASM_OP, file);
29915 else
29916 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29917 k[0] & 0xffffffff, k[1] & 0xffffffff,
29918 k[2] & 0xffffffff, k[3] & 0xffffffff);
29919 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29920 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29921 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29922 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29923 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29924 return;
29925 }
29926 else
29927 {
29928 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29929 fputs ("\t.long ", file);
29930 else
29931 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29932 k[0] & 0xffffffff, k[1] & 0xffffffff,
29933 k[2] & 0xffffffff, k[3] & 0xffffffff);
29934 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29935 k[0] & 0xffffffff, k[1] & 0xffffffff,
29936 k[2] & 0xffffffff, k[3] & 0xffffffff);
29937 return;
29938 }
29939 }
29940 else if (CONST_DOUBLE_P (x)
29941 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29942 {
29943 long k[2];
29944
29945 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29946 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29947 else
29948 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29949
29950 if (TARGET_64BIT)
29951 {
29952 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29953 fputs (DOUBLE_INT_ASM_OP, file);
29954 else
29955 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29956 k[0] & 0xffffffff, k[1] & 0xffffffff);
29957 fprintf (file, "0x%lx%08lx\n",
29958 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29959 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29960 return;
29961 }
29962 else
29963 {
29964 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29965 fputs ("\t.long ", file);
29966 else
29967 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29968 k[0] & 0xffffffff, k[1] & 0xffffffff);
29969 fprintf (file, "0x%lx,0x%lx\n",
29970 k[0] & 0xffffffff, k[1] & 0xffffffff);
29971 return;
29972 }
29973 }
29974 else if (CONST_DOUBLE_P (x)
29975 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29976 {
29977 long l;
29978
29979 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29980 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29981 else
29982 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29983
29984 if (TARGET_64BIT)
29985 {
29986 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29987 fputs (DOUBLE_INT_ASM_OP, file);
29988 else
29989 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29990 if (WORDS_BIG_ENDIAN)
29991 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29992 else
29993 fprintf (file, "0x%lx\n", l & 0xffffffff);
29994 return;
29995 }
29996 else
29997 {
29998 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29999 fputs ("\t.long ", file);
30000 else
30001 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
30002 fprintf (file, "0x%lx\n", l & 0xffffffff);
30003 return;
30004 }
30005 }
30006 else if (GET_MODE (x) == VOIDmode && CONST_INT_P (x))
30007 {
30008 unsigned HOST_WIDE_INT low;
30009 HOST_WIDE_INT high;
30010
30011 low = INTVAL (x) & 0xffffffff;
30012 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
30013
30014 /* TOC entries are always Pmode-sized, so when big-endian
30015 smaller integer constants in the TOC need to be padded.
30016 (This is still a win over putting the constants in
30017 a separate constant pool, because then we'd have
30018 to have both a TOC entry _and_ the actual constant.)
30019
30020 For a 32-bit target, CONST_INT values are loaded and shifted
30021 entirely within `low' and can be stored in one TOC entry. */
30022
30023 /* It would be easy to make this work, but it doesn't now. */
30024 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
30025
30026 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
30027 {
30028 low |= high << 32;
30029 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
30030 high = (HOST_WIDE_INT) low >> 32;
30031 low &= 0xffffffff;
30032 }
30033
30034 if (TARGET_64BIT)
30035 {
30036 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30037 fputs (DOUBLE_INT_ASM_OP, file);
30038 else
30039 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30040 (long) high & 0xffffffff, (long) low & 0xffffffff);
30041 fprintf (file, "0x%lx%08lx\n",
30042 (long) high & 0xffffffff, (long) low & 0xffffffff);
30043 return;
30044 }
30045 else
30046 {
30047 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
30048 {
30049 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30050 fputs ("\t.long ", file);
30051 else
30052 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30053 (long) high & 0xffffffff, (long) low & 0xffffffff);
30054 fprintf (file, "0x%lx,0x%lx\n",
30055 (long) high & 0xffffffff, (long) low & 0xffffffff);
30056 }
30057 else
30058 {
30059 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30060 fputs ("\t.long ", file);
30061 else
30062 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
30063 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
30064 }
30065 return;
30066 }
30067 }
30068
30069 if (GET_CODE (x) == CONST)
30070 {
30071 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
30072 && CONST_INT_P (XEXP (XEXP (x, 0), 1)));
30073
30074 base = XEXP (XEXP (x, 0), 0);
30075 offset = INTVAL (XEXP (XEXP (x, 0), 1));
30076 }
30077
30078 switch (GET_CODE (base))
30079 {
30080 case SYMBOL_REF:
30081 name = XSTR (base, 0);
30082 break;
30083
30084 case LABEL_REF:
30085 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
30086 CODE_LABEL_NUMBER (XEXP (base, 0)));
30087 break;
30088
30089 case CODE_LABEL:
30090 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
30091 break;
30092
30093 default:
30094 gcc_unreachable ();
30095 }
30096
30097 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30098 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
30099 else
30100 {
30101 fputs ("\t.tc ", file);
30102 RS6000_OUTPUT_BASENAME (file, name);
30103
30104 if (offset < 0)
30105 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
30106 else if (offset)
30107 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
30108
30109 /* Mark large TOC symbols on AIX with [TE] so they are mapped
30110 after other TOC symbols, reducing overflow of small TOC access
30111 to [TC] symbols. */
30112 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
30113 ? "[TE]," : "[TC],", file);
30114 }
30115
30116 /* Currently C++ toc references to vtables can be emitted before it
30117 is decided whether the vtable is public or private. If this is
30118 the case, then the linker will eventually complain that there is
30119 a TOC reference to an unknown section. Thus, for vtables only,
30120 we emit the TOC reference to reference the symbol and not the
30121 section. */
30122 if (VTABLE_NAME_P (name))
30123 {
30124 RS6000_OUTPUT_BASENAME (file, name);
30125 if (offset < 0)
30126 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
30127 else if (offset > 0)
30128 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
30129 }
30130 else
30131 output_addr_const (file, x);
30132
30133 #if HAVE_AS_TLS
30134 if (TARGET_XCOFF && SYMBOL_REF_P (base))
30135 {
30136 switch (SYMBOL_REF_TLS_MODEL (base))
30137 {
30138 case 0:
30139 break;
30140 case TLS_MODEL_LOCAL_EXEC:
30141 fputs ("@le", file);
30142 break;
30143 case TLS_MODEL_INITIAL_EXEC:
30144 fputs ("@ie", file);
30145 break;
30146 /* Use global-dynamic for local-dynamic. */
30147 case TLS_MODEL_GLOBAL_DYNAMIC:
30148 case TLS_MODEL_LOCAL_DYNAMIC:
30149 putc ('\n', file);
30150 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
30151 fputs ("\t.tc .", file);
30152 RS6000_OUTPUT_BASENAME (file, name);
30153 fputs ("[TC],", file);
30154 output_addr_const (file, x);
30155 fputs ("@m", file);
30156 break;
30157 default:
30158 gcc_unreachable ();
30159 }
30160 }
30161 #endif
30162
30163 putc ('\n', file);
30164 }
30165 \f
30166 /* Output an assembler pseudo-op to write an ASCII string of N characters
30167 starting at P to FILE.
30168
30169 On the RS/6000, we have to do this using the .byte operation and
30170 write out special characters outside the quoted string.
30171 Also, the assembler is broken; very long strings are truncated,
30172 so we must artificially break them up early. */
30173
30174 void
30175 output_ascii (FILE *file, const char *p, int n)
30176 {
30177 char c;
30178 int i, count_string;
30179 const char *for_string = "\t.byte \"";
30180 const char *for_decimal = "\t.byte ";
30181 const char *to_close = NULL;
30182
30183 count_string = 0;
30184 for (i = 0; i < n; i++)
30185 {
30186 c = *p++;
30187 if (c >= ' ' && c < 0177)
30188 {
30189 if (for_string)
30190 fputs (for_string, file);
30191 putc (c, file);
30192
30193 /* Write two quotes to get one. */
30194 if (c == '"')
30195 {
30196 putc (c, file);
30197 ++count_string;
30198 }
30199
30200 for_string = NULL;
30201 for_decimal = "\"\n\t.byte ";
30202 to_close = "\"\n";
30203 ++count_string;
30204
30205 if (count_string >= 512)
30206 {
30207 fputs (to_close, file);
30208
30209 for_string = "\t.byte \"";
30210 for_decimal = "\t.byte ";
30211 to_close = NULL;
30212 count_string = 0;
30213 }
30214 }
30215 else
30216 {
30217 if (for_decimal)
30218 fputs (for_decimal, file);
30219 fprintf (file, "%d", c);
30220
30221 for_string = "\n\t.byte \"";
30222 for_decimal = ", ";
30223 to_close = "\n";
30224 count_string = 0;
30225 }
30226 }
30227
30228 /* Now close the string if we have written one. Then end the line. */
30229 if (to_close)
30230 fputs (to_close, file);
30231 }
30232 \f
30233 /* Generate a unique section name for FILENAME for a section type
30234 represented by SECTION_DESC. Output goes into BUF.
30235
30236 SECTION_DESC can be any string, as long as it is different for each
30237 possible section type.
30238
30239 We name the section in the same manner as xlc. The name begins with an
30240 underscore followed by the filename (after stripping any leading directory
30241 names) with the last period replaced by the string SECTION_DESC. If
30242 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30243 the name. */
30244
30245 void
30246 rs6000_gen_section_name (char **buf, const char *filename,
30247 const char *section_desc)
30248 {
30249 const char *q, *after_last_slash, *last_period = 0;
30250 char *p;
30251 int len;
30252
30253 after_last_slash = filename;
30254 for (q = filename; *q; q++)
30255 {
30256 if (*q == '/')
30257 after_last_slash = q + 1;
30258 else if (*q == '.')
30259 last_period = q;
30260 }
30261
30262 len = strlen (after_last_slash) + strlen (section_desc) + 2;
30263 *buf = (char *) xmalloc (len);
30264
30265 p = *buf;
30266 *p++ = '_';
30267
30268 for (q = after_last_slash; *q; q++)
30269 {
30270 if (q == last_period)
30271 {
30272 strcpy (p, section_desc);
30273 p += strlen (section_desc);
30274 break;
30275 }
30276
30277 else if (ISALNUM (*q))
30278 *p++ = *q;
30279 }
30280
30281 if (last_period == 0)
30282 strcpy (p, section_desc);
30283 else
30284 *p = '\0';
30285 }
30286 \f
30287 /* Emit profile function. */
30288
30289 void
30290 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30291 {
30292 /* Non-standard profiling for kernels, which just saves LR then calls
30293 _mcount without worrying about arg saves. The idea is to change
30294 the function prologue as little as possible as it isn't easy to
30295 account for arg save/restore code added just for _mcount. */
30296 if (TARGET_PROFILE_KERNEL)
30297 return;
30298
30299 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30300 {
30301 #ifndef NO_PROFILE_COUNTERS
30302 # define NO_PROFILE_COUNTERS 0
30303 #endif
30304 if (NO_PROFILE_COUNTERS)
30305 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30306 LCT_NORMAL, VOIDmode);
30307 else
30308 {
30309 char buf[30];
30310 const char *label_name;
30311 rtx fun;
30312
30313 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30314 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30315 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30316
30317 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30318 LCT_NORMAL, VOIDmode, fun, Pmode);
30319 }
30320 }
30321 else if (DEFAULT_ABI == ABI_DARWIN)
30322 {
30323 const char *mcount_name = RS6000_MCOUNT;
30324 int caller_addr_regno = LR_REGNO;
30325
30326 /* Be conservative and always set this, at least for now. */
30327 crtl->uses_pic_offset_table = 1;
30328
30329 #if TARGET_MACHO
30330 /* For PIC code, set up a stub and collect the caller's address
30331 from r0, which is where the prologue puts it. */
30332 if (MACHOPIC_INDIRECT
30333 && crtl->uses_pic_offset_table)
30334 caller_addr_regno = 0;
30335 #endif
30336 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30337 LCT_NORMAL, VOIDmode,
30338 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30339 }
30340 }
30341
30342 /* Write function profiler code. */
30343
30344 void
30345 output_function_profiler (FILE *file, int labelno)
30346 {
30347 char buf[100];
30348
30349 switch (DEFAULT_ABI)
30350 {
30351 default:
30352 gcc_unreachable ();
30353
30354 case ABI_V4:
30355 if (!TARGET_32BIT)
30356 {
30357 warning (0, "no profiling of 64-bit code for this ABI");
30358 return;
30359 }
30360 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30361 fprintf (file, "\tmflr %s\n", reg_names[0]);
30362 if (NO_PROFILE_COUNTERS)
30363 {
30364 asm_fprintf (file, "\tstw %s,4(%s)\n",
30365 reg_names[0], reg_names[1]);
30366 }
30367 else if (TARGET_SECURE_PLT && flag_pic)
30368 {
30369 if (TARGET_LINK_STACK)
30370 {
30371 char name[32];
30372 get_ppc476_thunk_name (name);
30373 asm_fprintf (file, "\tbl %s\n", name);
30374 }
30375 else
30376 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30377 asm_fprintf (file, "\tstw %s,4(%s)\n",
30378 reg_names[0], reg_names[1]);
30379 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30380 asm_fprintf (file, "\taddis %s,%s,",
30381 reg_names[12], reg_names[12]);
30382 assemble_name (file, buf);
30383 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30384 assemble_name (file, buf);
30385 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30386 }
30387 else if (flag_pic == 1)
30388 {
30389 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30390 asm_fprintf (file, "\tstw %s,4(%s)\n",
30391 reg_names[0], reg_names[1]);
30392 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30393 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30394 assemble_name (file, buf);
30395 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30396 }
30397 else if (flag_pic > 1)
30398 {
30399 asm_fprintf (file, "\tstw %s,4(%s)\n",
30400 reg_names[0], reg_names[1]);
30401 /* Now, we need to get the address of the label. */
30402 if (TARGET_LINK_STACK)
30403 {
30404 char name[32];
30405 get_ppc476_thunk_name (name);
30406 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30407 assemble_name (file, buf);
30408 fputs ("-.\n1:", file);
30409 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30410 asm_fprintf (file, "\taddi %s,%s,4\n",
30411 reg_names[11], reg_names[11]);
30412 }
30413 else
30414 {
30415 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30416 assemble_name (file, buf);
30417 fputs ("-.\n1:", file);
30418 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30419 }
30420 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30421 reg_names[0], reg_names[11]);
30422 asm_fprintf (file, "\tadd %s,%s,%s\n",
30423 reg_names[0], reg_names[0], reg_names[11]);
30424 }
30425 else
30426 {
30427 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30428 assemble_name (file, buf);
30429 fputs ("@ha\n", file);
30430 asm_fprintf (file, "\tstw %s,4(%s)\n",
30431 reg_names[0], reg_names[1]);
30432 asm_fprintf (file, "\tla %s,", reg_names[0]);
30433 assemble_name (file, buf);
30434 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30435 }
30436
30437 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30438 fprintf (file, "\tbl %s%s\n",
30439 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30440 break;
30441
30442 case ABI_AIX:
30443 case ABI_ELFv2:
30444 case ABI_DARWIN:
30445 /* Don't do anything, done in output_profile_hook (). */
30446 break;
30447 }
30448 }
30449
30450 \f
30451
30452 /* The following variable value is the last issued insn. */
30453
30454 static rtx_insn *last_scheduled_insn;
30455
30456 /* The following variable helps to balance issuing of load and
30457 store instructions */
30458
30459 static int load_store_pendulum;
30460
30461 /* The following variable helps pair divide insns during scheduling. */
30462 static int divide_cnt;
30463 /* The following variable helps pair and alternate vector and vector load
30464 insns during scheduling. */
30465 static int vec_pairing;
30466
30467
30468 /* Power4 load update and store update instructions are cracked into a
30469 load or store and an integer insn which are executed in the same cycle.
30470 Branches have their own dispatch slot which does not count against the
30471 GCC issue rate, but it changes the program flow so there are no other
30472 instructions to issue in this cycle. */
30473
30474 static int
30475 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30476 {
30477 last_scheduled_insn = insn;
30478 if (GET_CODE (PATTERN (insn)) == USE
30479 || GET_CODE (PATTERN (insn)) == CLOBBER)
30480 {
30481 cached_can_issue_more = more;
30482 return cached_can_issue_more;
30483 }
30484
30485 if (insn_terminates_group_p (insn, current_group))
30486 {
30487 cached_can_issue_more = 0;
30488 return cached_can_issue_more;
30489 }
30490
30491 /* If no reservation, but reach here */
30492 if (recog_memoized (insn) < 0)
30493 return more;
30494
30495 if (rs6000_sched_groups)
30496 {
30497 if (is_microcoded_insn (insn))
30498 cached_can_issue_more = 0;
30499 else if (is_cracked_insn (insn))
30500 cached_can_issue_more = more > 2 ? more - 2 : 0;
30501 else
30502 cached_can_issue_more = more - 1;
30503
30504 return cached_can_issue_more;
30505 }
30506
30507 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30508 return 0;
30509
30510 cached_can_issue_more = more - 1;
30511 return cached_can_issue_more;
30512 }
30513
30514 static int
30515 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30516 {
30517 int r = rs6000_variable_issue_1 (insn, more);
30518 if (verbose)
30519 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30520 return r;
30521 }
30522
30523 /* Adjust the cost of a scheduling dependency. Return the new cost of
30524 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30525
30526 static int
30527 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30528 unsigned int)
30529 {
30530 enum attr_type attr_type;
30531
30532 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30533 return cost;
30534
30535 switch (dep_type)
30536 {
30537 case REG_DEP_TRUE:
30538 {
30539 /* Data dependency; DEP_INSN writes a register that INSN reads
30540 some cycles later. */
30541
30542 /* Separate a load from a narrower, dependent store. */
30543 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30544 && GET_CODE (PATTERN (insn)) == SET
30545 && GET_CODE (PATTERN (dep_insn)) == SET
30546 && MEM_P (XEXP (PATTERN (insn), 1))
30547 && MEM_P (XEXP (PATTERN (dep_insn), 0))
30548 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30549 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30550 return cost + 14;
30551
30552 attr_type = get_attr_type (insn);
30553
30554 switch (attr_type)
30555 {
30556 case TYPE_JMPREG:
30557 /* Tell the first scheduling pass about the latency between
30558 a mtctr and bctr (and mtlr and br/blr). The first
30559 scheduling pass will not know about this latency since
30560 the mtctr instruction, which has the latency associated
30561 to it, will be generated by reload. */
30562 return 4;
30563 case TYPE_BRANCH:
30564 /* Leave some extra cycles between a compare and its
30565 dependent branch, to inhibit expensive mispredicts. */
30566 if ((rs6000_tune == PROCESSOR_PPC603
30567 || rs6000_tune == PROCESSOR_PPC604
30568 || rs6000_tune == PROCESSOR_PPC604e
30569 || rs6000_tune == PROCESSOR_PPC620
30570 || rs6000_tune == PROCESSOR_PPC630
30571 || rs6000_tune == PROCESSOR_PPC750
30572 || rs6000_tune == PROCESSOR_PPC7400
30573 || rs6000_tune == PROCESSOR_PPC7450
30574 || rs6000_tune == PROCESSOR_PPCE5500
30575 || rs6000_tune == PROCESSOR_PPCE6500
30576 || rs6000_tune == PROCESSOR_POWER4
30577 || rs6000_tune == PROCESSOR_POWER5
30578 || rs6000_tune == PROCESSOR_POWER7
30579 || rs6000_tune == PROCESSOR_POWER8
30580 || rs6000_tune == PROCESSOR_POWER9
30581 || rs6000_tune == PROCESSOR_CELL)
30582 && recog_memoized (dep_insn)
30583 && (INSN_CODE (dep_insn) >= 0))
30584
30585 switch (get_attr_type (dep_insn))
30586 {
30587 case TYPE_CMP:
30588 case TYPE_FPCOMPARE:
30589 case TYPE_CR_LOGICAL:
30590 return cost + 2;
30591 case TYPE_EXTS:
30592 case TYPE_MUL:
30593 if (get_attr_dot (dep_insn) == DOT_YES)
30594 return cost + 2;
30595 else
30596 break;
30597 case TYPE_SHIFT:
30598 if (get_attr_dot (dep_insn) == DOT_YES
30599 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30600 return cost + 2;
30601 else
30602 break;
30603 default:
30604 break;
30605 }
30606 break;
30607
30608 case TYPE_STORE:
30609 case TYPE_FPSTORE:
30610 if ((rs6000_tune == PROCESSOR_POWER6)
30611 && recog_memoized (dep_insn)
30612 && (INSN_CODE (dep_insn) >= 0))
30613 {
30614
30615 if (GET_CODE (PATTERN (insn)) != SET)
30616 /* If this happens, we have to extend this to schedule
30617 optimally. Return default for now. */
30618 return cost;
30619
30620 /* Adjust the cost for the case where the value written
30621 by a fixed point operation is used as the address
30622 gen value on a store. */
30623 switch (get_attr_type (dep_insn))
30624 {
30625 case TYPE_LOAD:
30626 case TYPE_CNTLZ:
30627 {
30628 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30629 return get_attr_sign_extend (dep_insn)
30630 == SIGN_EXTEND_YES ? 6 : 4;
30631 break;
30632 }
30633 case TYPE_SHIFT:
30634 {
30635 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30636 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30637 6 : 3;
30638 break;
30639 }
30640 case TYPE_INTEGER:
30641 case TYPE_ADD:
30642 case TYPE_LOGICAL:
30643 case TYPE_EXTS:
30644 case TYPE_INSERT:
30645 {
30646 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30647 return 3;
30648 break;
30649 }
30650 case TYPE_STORE:
30651 case TYPE_FPLOAD:
30652 case TYPE_FPSTORE:
30653 {
30654 if (get_attr_update (dep_insn) == UPDATE_YES
30655 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30656 return 3;
30657 break;
30658 }
30659 case TYPE_MUL:
30660 {
30661 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30662 return 17;
30663 break;
30664 }
30665 case TYPE_DIV:
30666 {
30667 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30668 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30669 break;
30670 }
30671 default:
30672 break;
30673 }
30674 }
30675 break;
30676
30677 case TYPE_LOAD:
30678 if ((rs6000_tune == PROCESSOR_POWER6)
30679 && recog_memoized (dep_insn)
30680 && (INSN_CODE (dep_insn) >= 0))
30681 {
30682
30683 /* Adjust the cost for the case where the value written
30684 by a fixed point instruction is used within the address
30685 gen portion of a subsequent load(u)(x) */
30686 switch (get_attr_type (dep_insn))
30687 {
30688 case TYPE_LOAD:
30689 case TYPE_CNTLZ:
30690 {
30691 if (set_to_load_agen (dep_insn, insn))
30692 return get_attr_sign_extend (dep_insn)
30693 == SIGN_EXTEND_YES ? 6 : 4;
30694 break;
30695 }
30696 case TYPE_SHIFT:
30697 {
30698 if (set_to_load_agen (dep_insn, insn))
30699 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30700 6 : 3;
30701 break;
30702 }
30703 case TYPE_INTEGER:
30704 case TYPE_ADD:
30705 case TYPE_LOGICAL:
30706 case TYPE_EXTS:
30707 case TYPE_INSERT:
30708 {
30709 if (set_to_load_agen (dep_insn, insn))
30710 return 3;
30711 break;
30712 }
30713 case TYPE_STORE:
30714 case TYPE_FPLOAD:
30715 case TYPE_FPSTORE:
30716 {
30717 if (get_attr_update (dep_insn) == UPDATE_YES
30718 && set_to_load_agen (dep_insn, insn))
30719 return 3;
30720 break;
30721 }
30722 case TYPE_MUL:
30723 {
30724 if (set_to_load_agen (dep_insn, insn))
30725 return 17;
30726 break;
30727 }
30728 case TYPE_DIV:
30729 {
30730 if (set_to_load_agen (dep_insn, insn))
30731 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30732 break;
30733 }
30734 default:
30735 break;
30736 }
30737 }
30738 break;
30739
30740 case TYPE_FPLOAD:
30741 if ((rs6000_tune == PROCESSOR_POWER6)
30742 && get_attr_update (insn) == UPDATE_NO
30743 && recog_memoized (dep_insn)
30744 && (INSN_CODE (dep_insn) >= 0)
30745 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30746 return 2;
30747
30748 default:
30749 break;
30750 }
30751
30752 /* Fall out to return default cost. */
30753 }
30754 break;
30755
30756 case REG_DEP_OUTPUT:
30757 /* Output dependency; DEP_INSN writes a register that INSN writes some
30758 cycles later. */
30759 if ((rs6000_tune == PROCESSOR_POWER6)
30760 && recog_memoized (dep_insn)
30761 && (INSN_CODE (dep_insn) >= 0))
30762 {
30763 attr_type = get_attr_type (insn);
30764
30765 switch (attr_type)
30766 {
30767 case TYPE_FP:
30768 case TYPE_FPSIMPLE:
30769 if (get_attr_type (dep_insn) == TYPE_FP
30770 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30771 return 1;
30772 break;
30773 case TYPE_FPLOAD:
30774 if (get_attr_update (insn) == UPDATE_NO
30775 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30776 return 2;
30777 break;
30778 default:
30779 break;
30780 }
30781 }
30782 /* Fall through, no cost for output dependency. */
30783 /* FALLTHRU */
30784
30785 case REG_DEP_ANTI:
30786 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30787 cycles later. */
30788 return 0;
30789
30790 default:
30791 gcc_unreachable ();
30792 }
30793
30794 return cost;
30795 }
30796
30797 /* Debug version of rs6000_adjust_cost. */
30798
30799 static int
30800 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30801 int cost, unsigned int dw)
30802 {
30803 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30804
30805 if (ret != cost)
30806 {
30807 const char *dep;
30808
30809 switch (dep_type)
30810 {
30811 default: dep = "unknown depencency"; break;
30812 case REG_DEP_TRUE: dep = "data dependency"; break;
30813 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30814 case REG_DEP_ANTI: dep = "anti depencency"; break;
30815 }
30816
30817 fprintf (stderr,
30818 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30819 "%s, insn:\n", ret, cost, dep);
30820
30821 debug_rtx (insn);
30822 }
30823
30824 return ret;
30825 }
30826
30827 /* The function returns a true if INSN is microcoded.
30828 Return false otherwise. */
30829
30830 static bool
30831 is_microcoded_insn (rtx_insn *insn)
30832 {
30833 if (!insn || !NONDEBUG_INSN_P (insn)
30834 || GET_CODE (PATTERN (insn)) == USE
30835 || GET_CODE (PATTERN (insn)) == CLOBBER)
30836 return false;
30837
30838 if (rs6000_tune == PROCESSOR_CELL)
30839 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30840
30841 if (rs6000_sched_groups
30842 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30843 {
30844 enum attr_type type = get_attr_type (insn);
30845 if ((type == TYPE_LOAD
30846 && get_attr_update (insn) == UPDATE_YES
30847 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30848 || ((type == TYPE_LOAD || type == TYPE_STORE)
30849 && get_attr_update (insn) == UPDATE_YES
30850 && get_attr_indexed (insn) == INDEXED_YES)
30851 || type == TYPE_MFCR)
30852 return true;
30853 }
30854
30855 return false;
30856 }
30857
30858 /* The function returns true if INSN is cracked into 2 instructions
30859 by the processor (and therefore occupies 2 issue slots). */
30860
30861 static bool
30862 is_cracked_insn (rtx_insn *insn)
30863 {
30864 if (!insn || !NONDEBUG_INSN_P (insn)
30865 || GET_CODE (PATTERN (insn)) == USE
30866 || GET_CODE (PATTERN (insn)) == CLOBBER)
30867 return false;
30868
30869 if (rs6000_sched_groups
30870 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30871 {
30872 enum attr_type type = get_attr_type (insn);
30873 if ((type == TYPE_LOAD
30874 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30875 && get_attr_update (insn) == UPDATE_NO)
30876 || (type == TYPE_LOAD
30877 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30878 && get_attr_update (insn) == UPDATE_YES
30879 && get_attr_indexed (insn) == INDEXED_NO)
30880 || (type == TYPE_STORE
30881 && get_attr_update (insn) == UPDATE_YES
30882 && get_attr_indexed (insn) == INDEXED_NO)
30883 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30884 && get_attr_update (insn) == UPDATE_YES)
30885 || (type == TYPE_CR_LOGICAL
30886 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30887 || (type == TYPE_EXTS
30888 && get_attr_dot (insn) == DOT_YES)
30889 || (type == TYPE_SHIFT
30890 && get_attr_dot (insn) == DOT_YES
30891 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30892 || (type == TYPE_MUL
30893 && get_attr_dot (insn) == DOT_YES)
30894 || type == TYPE_DIV
30895 || (type == TYPE_INSERT
30896 && get_attr_size (insn) == SIZE_32))
30897 return true;
30898 }
30899
30900 return false;
30901 }
30902
30903 /* The function returns true if INSN can be issued only from
30904 the branch slot. */
30905
30906 static bool
30907 is_branch_slot_insn (rtx_insn *insn)
30908 {
30909 if (!insn || !NONDEBUG_INSN_P (insn)
30910 || GET_CODE (PATTERN (insn)) == USE
30911 || GET_CODE (PATTERN (insn)) == CLOBBER)
30912 return false;
30913
30914 if (rs6000_sched_groups)
30915 {
30916 enum attr_type type = get_attr_type (insn);
30917 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30918 return true;
30919 return false;
30920 }
30921
30922 return false;
30923 }
30924
30925 /* The function returns true if out_inst sets a value that is
30926 used in the address generation computation of in_insn */
30927 static bool
30928 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30929 {
30930 rtx out_set, in_set;
30931
30932 /* For performance reasons, only handle the simple case where
30933 both loads are a single_set. */
30934 out_set = single_set (out_insn);
30935 if (out_set)
30936 {
30937 in_set = single_set (in_insn);
30938 if (in_set)
30939 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30940 }
30941
30942 return false;
30943 }
30944
30945 /* Try to determine base/offset/size parts of the given MEM.
30946 Return true if successful, false if all the values couldn't
30947 be determined.
30948
30949 This function only looks for REG or REG+CONST address forms.
30950 REG+REG address form will return false. */
30951
30952 static bool
30953 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30954 HOST_WIDE_INT *size)
30955 {
30956 rtx addr_rtx;
30957 if MEM_SIZE_KNOWN_P (mem)
30958 *size = MEM_SIZE (mem);
30959 else
30960 return false;
30961
30962 addr_rtx = (XEXP (mem, 0));
30963 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30964 addr_rtx = XEXP (addr_rtx, 1);
30965
30966 *offset = 0;
30967 while (GET_CODE (addr_rtx) == PLUS
30968 && CONST_INT_P (XEXP (addr_rtx, 1)))
30969 {
30970 *offset += INTVAL (XEXP (addr_rtx, 1));
30971 addr_rtx = XEXP (addr_rtx, 0);
30972 }
30973 if (!REG_P (addr_rtx))
30974 return false;
30975
30976 *base = addr_rtx;
30977 return true;
30978 }
30979
30980 /* The function returns true if the target storage location of
30981 mem1 is adjacent to the target storage location of mem2 */
30982 /* Return 1 if memory locations are adjacent. */
30983
30984 static bool
30985 adjacent_mem_locations (rtx mem1, rtx mem2)
30986 {
30987 rtx reg1, reg2;
30988 HOST_WIDE_INT off1, size1, off2, size2;
30989
30990 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30991 && get_memref_parts (mem2, &reg2, &off2, &size2))
30992 return ((REGNO (reg1) == REGNO (reg2))
30993 && ((off1 + size1 == off2)
30994 || (off2 + size2 == off1)));
30995
30996 return false;
30997 }
30998
30999 /* This function returns true if it can be determined that the two MEM
31000 locations overlap by at least 1 byte based on base reg/offset/size. */
31001
31002 static bool
31003 mem_locations_overlap (rtx mem1, rtx mem2)
31004 {
31005 rtx reg1, reg2;
31006 HOST_WIDE_INT off1, size1, off2, size2;
31007
31008 if (get_memref_parts (mem1, &reg1, &off1, &size1)
31009 && get_memref_parts (mem2, &reg2, &off2, &size2))
31010 return ((REGNO (reg1) == REGNO (reg2))
31011 && (((off1 <= off2) && (off1 + size1 > off2))
31012 || ((off2 <= off1) && (off2 + size2 > off1))));
31013
31014 return false;
31015 }
31016
31017 /* A C statement (sans semicolon) to update the integer scheduling
31018 priority INSN_PRIORITY (INSN). Increase the priority to execute the
31019 INSN earlier, reduce the priority to execute INSN later. Do not
31020 define this macro if you do not need to adjust the scheduling
31021 priorities of insns. */
31022
31023 static int
31024 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
31025 {
31026 rtx load_mem, str_mem;
31027 /* On machines (like the 750) which have asymmetric integer units,
31028 where one integer unit can do multiply and divides and the other
31029 can't, reduce the priority of multiply/divide so it is scheduled
31030 before other integer operations. */
31031
31032 #if 0
31033 if (! INSN_P (insn))
31034 return priority;
31035
31036 if (GET_CODE (PATTERN (insn)) == USE)
31037 return priority;
31038
31039 switch (rs6000_tune) {
31040 case PROCESSOR_PPC750:
31041 switch (get_attr_type (insn))
31042 {
31043 default:
31044 break;
31045
31046 case TYPE_MUL:
31047 case TYPE_DIV:
31048 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
31049 priority, priority);
31050 if (priority >= 0 && priority < 0x01000000)
31051 priority >>= 3;
31052 break;
31053 }
31054 }
31055 #endif
31056
31057 if (insn_must_be_first_in_group (insn)
31058 && reload_completed
31059 && current_sched_info->sched_max_insns_priority
31060 && rs6000_sched_restricted_insns_priority)
31061 {
31062
31063 /* Prioritize insns that can be dispatched only in the first
31064 dispatch slot. */
31065 if (rs6000_sched_restricted_insns_priority == 1)
31066 /* Attach highest priority to insn. This means that in
31067 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
31068 precede 'priority' (critical path) considerations. */
31069 return current_sched_info->sched_max_insns_priority;
31070 else if (rs6000_sched_restricted_insns_priority == 2)
31071 /* Increase priority of insn by a minimal amount. This means that in
31072 haifa-sched.c:ready_sort(), only 'priority' (critical path)
31073 considerations precede dispatch-slot restriction considerations. */
31074 return (priority + 1);
31075 }
31076
31077 if (rs6000_tune == PROCESSOR_POWER6
31078 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
31079 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
31080 /* Attach highest priority to insn if the scheduler has just issued two
31081 stores and this instruction is a load, or two loads and this instruction
31082 is a store. Power6 wants loads and stores scheduled alternately
31083 when possible */
31084 return current_sched_info->sched_max_insns_priority;
31085
31086 return priority;
31087 }
31088
31089 /* Return true if the instruction is nonpipelined on the Cell. */
31090 static bool
31091 is_nonpipeline_insn (rtx_insn *insn)
31092 {
31093 enum attr_type type;
31094 if (!insn || !NONDEBUG_INSN_P (insn)
31095 || GET_CODE (PATTERN (insn)) == USE
31096 || GET_CODE (PATTERN (insn)) == CLOBBER)
31097 return false;
31098
31099 type = get_attr_type (insn);
31100 if (type == TYPE_MUL
31101 || type == TYPE_DIV
31102 || type == TYPE_SDIV
31103 || type == TYPE_DDIV
31104 || type == TYPE_SSQRT
31105 || type == TYPE_DSQRT
31106 || type == TYPE_MFCR
31107 || type == TYPE_MFCRF
31108 || type == TYPE_MFJMPR)
31109 {
31110 return true;
31111 }
31112 return false;
31113 }
31114
31115
31116 /* Return how many instructions the machine can issue per cycle. */
31117
31118 static int
31119 rs6000_issue_rate (void)
31120 {
31121 /* Unless scheduling for register pressure, use issue rate of 1 for
31122 first scheduling pass to decrease degradation. */
31123 if (!reload_completed && !flag_sched_pressure)
31124 return 1;
31125
31126 switch (rs6000_tune) {
31127 case PROCESSOR_RS64A:
31128 case PROCESSOR_PPC601: /* ? */
31129 case PROCESSOR_PPC7450:
31130 return 3;
31131 case PROCESSOR_PPC440:
31132 case PROCESSOR_PPC603:
31133 case PROCESSOR_PPC750:
31134 case PROCESSOR_PPC7400:
31135 case PROCESSOR_PPC8540:
31136 case PROCESSOR_PPC8548:
31137 case PROCESSOR_CELL:
31138 case PROCESSOR_PPCE300C2:
31139 case PROCESSOR_PPCE300C3:
31140 case PROCESSOR_PPCE500MC:
31141 case PROCESSOR_PPCE500MC64:
31142 case PROCESSOR_PPCE5500:
31143 case PROCESSOR_PPCE6500:
31144 case PROCESSOR_TITAN:
31145 return 2;
31146 case PROCESSOR_PPC476:
31147 case PROCESSOR_PPC604:
31148 case PROCESSOR_PPC604e:
31149 case PROCESSOR_PPC620:
31150 case PROCESSOR_PPC630:
31151 return 4;
31152 case PROCESSOR_POWER4:
31153 case PROCESSOR_POWER5:
31154 case PROCESSOR_POWER6:
31155 case PROCESSOR_POWER7:
31156 return 5;
31157 case PROCESSOR_POWER8:
31158 return 7;
31159 case PROCESSOR_POWER9:
31160 return 6;
31161 default:
31162 return 1;
31163 }
31164 }
31165
31166 /* Return how many instructions to look ahead for better insn
31167 scheduling. */
31168
31169 static int
31170 rs6000_use_sched_lookahead (void)
31171 {
31172 switch (rs6000_tune)
31173 {
31174 case PROCESSOR_PPC8540:
31175 case PROCESSOR_PPC8548:
31176 return 4;
31177
31178 case PROCESSOR_CELL:
31179 return (reload_completed ? 8 : 0);
31180
31181 default:
31182 return 0;
31183 }
31184 }
31185
31186 /* We are choosing insn from the ready queue. Return zero if INSN can be
31187 chosen. */
31188 static int
31189 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
31190 {
31191 if (ready_index == 0)
31192 return 0;
31193
31194 if (rs6000_tune != PROCESSOR_CELL)
31195 return 0;
31196
31197 gcc_assert (insn != NULL_RTX && INSN_P (insn));
31198
31199 if (!reload_completed
31200 || is_nonpipeline_insn (insn)
31201 || is_microcoded_insn (insn))
31202 return 1;
31203
31204 return 0;
31205 }
31206
31207 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31208 and return true. */
31209
31210 static bool
31211 find_mem_ref (rtx pat, rtx *mem_ref)
31212 {
31213 const char * fmt;
31214 int i, j;
31215
31216 /* stack_tie does not produce any real memory traffic. */
31217 if (tie_operand (pat, VOIDmode))
31218 return false;
31219
31220 if (MEM_P (pat))
31221 {
31222 *mem_ref = pat;
31223 return true;
31224 }
31225
31226 /* Recursively process the pattern. */
31227 fmt = GET_RTX_FORMAT (GET_CODE (pat));
31228
31229 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
31230 {
31231 if (fmt[i] == 'e')
31232 {
31233 if (find_mem_ref (XEXP (pat, i), mem_ref))
31234 return true;
31235 }
31236 else if (fmt[i] == 'E')
31237 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
31238 {
31239 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
31240 return true;
31241 }
31242 }
31243
31244 return false;
31245 }
31246
31247 /* Determine if PAT is a PATTERN of a load insn. */
31248
31249 static bool
31250 is_load_insn1 (rtx pat, rtx *load_mem)
31251 {
31252 if (!pat || pat == NULL_RTX)
31253 return false;
31254
31255 if (GET_CODE (pat) == SET)
31256 return find_mem_ref (SET_SRC (pat), load_mem);
31257
31258 if (GET_CODE (pat) == PARALLEL)
31259 {
31260 int i;
31261
31262 for (i = 0; i < XVECLEN (pat, 0); i++)
31263 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
31264 return true;
31265 }
31266
31267 return false;
31268 }
31269
31270 /* Determine if INSN loads from memory. */
31271
31272 static bool
31273 is_load_insn (rtx insn, rtx *load_mem)
31274 {
31275 if (!insn || !INSN_P (insn))
31276 return false;
31277
31278 if (CALL_P (insn))
31279 return false;
31280
31281 return is_load_insn1 (PATTERN (insn), load_mem);
31282 }
31283
31284 /* Determine if PAT is a PATTERN of a store insn. */
31285
31286 static bool
31287 is_store_insn1 (rtx pat, rtx *str_mem)
31288 {
31289 if (!pat || pat == NULL_RTX)
31290 return false;
31291
31292 if (GET_CODE (pat) == SET)
31293 return find_mem_ref (SET_DEST (pat), str_mem);
31294
31295 if (GET_CODE (pat) == PARALLEL)
31296 {
31297 int i;
31298
31299 for (i = 0; i < XVECLEN (pat, 0); i++)
31300 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31301 return true;
31302 }
31303
31304 return false;
31305 }
31306
31307 /* Determine if INSN stores to memory. */
31308
31309 static bool
31310 is_store_insn (rtx insn, rtx *str_mem)
31311 {
31312 if (!insn || !INSN_P (insn))
31313 return false;
31314
31315 return is_store_insn1 (PATTERN (insn), str_mem);
31316 }
31317
31318 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31319
31320 static bool
31321 is_power9_pairable_vec_type (enum attr_type type)
31322 {
31323 switch (type)
31324 {
31325 case TYPE_VECSIMPLE:
31326 case TYPE_VECCOMPLEX:
31327 case TYPE_VECDIV:
31328 case TYPE_VECCMP:
31329 case TYPE_VECPERM:
31330 case TYPE_VECFLOAT:
31331 case TYPE_VECFDIV:
31332 case TYPE_VECDOUBLE:
31333 return true;
31334 default:
31335 break;
31336 }
31337 return false;
31338 }
31339
31340 /* Returns whether the dependence between INSN and NEXT is considered
31341 costly by the given target. */
31342
31343 static bool
31344 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31345 {
31346 rtx insn;
31347 rtx next;
31348 rtx load_mem, str_mem;
31349
31350 /* If the flag is not enabled - no dependence is considered costly;
31351 allow all dependent insns in the same group.
31352 This is the most aggressive option. */
31353 if (rs6000_sched_costly_dep == no_dep_costly)
31354 return false;
31355
31356 /* If the flag is set to 1 - a dependence is always considered costly;
31357 do not allow dependent instructions in the same group.
31358 This is the most conservative option. */
31359 if (rs6000_sched_costly_dep == all_deps_costly)
31360 return true;
31361
31362 insn = DEP_PRO (dep);
31363 next = DEP_CON (dep);
31364
31365 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31366 && is_load_insn (next, &load_mem)
31367 && is_store_insn (insn, &str_mem))
31368 /* Prevent load after store in the same group. */
31369 return true;
31370
31371 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31372 && is_load_insn (next, &load_mem)
31373 && is_store_insn (insn, &str_mem)
31374 && DEP_TYPE (dep) == REG_DEP_TRUE
31375 && mem_locations_overlap(str_mem, load_mem))
31376 /* Prevent load after store in the same group if it is a true
31377 dependence. */
31378 return true;
31379
31380 /* The flag is set to X; dependences with latency >= X are considered costly,
31381 and will not be scheduled in the same group. */
31382 if (rs6000_sched_costly_dep <= max_dep_latency
31383 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31384 return true;
31385
31386 return false;
31387 }
31388
31389 /* Return the next insn after INSN that is found before TAIL is reached,
31390 skipping any "non-active" insns - insns that will not actually occupy
31391 an issue slot. Return NULL_RTX if such an insn is not found. */
31392
31393 static rtx_insn *
31394 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31395 {
31396 if (insn == NULL_RTX || insn == tail)
31397 return NULL;
31398
31399 while (1)
31400 {
31401 insn = NEXT_INSN (insn);
31402 if (insn == NULL_RTX || insn == tail)
31403 return NULL;
31404
31405 if (CALL_P (insn)
31406 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31407 || (NONJUMP_INSN_P (insn)
31408 && GET_CODE (PATTERN (insn)) != USE
31409 && GET_CODE (PATTERN (insn)) != CLOBBER
31410 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31411 break;
31412 }
31413 return insn;
31414 }
31415
31416 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31417
31418 static int
31419 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31420 {
31421 int pos;
31422 int i;
31423 rtx_insn *tmp;
31424 enum attr_type type, type2;
31425
31426 type = get_attr_type (last_scheduled_insn);
31427
31428 /* Try to issue fixed point divides back-to-back in pairs so they will be
31429 routed to separate execution units and execute in parallel. */
31430 if (type == TYPE_DIV && divide_cnt == 0)
31431 {
31432 /* First divide has been scheduled. */
31433 divide_cnt = 1;
31434
31435 /* Scan the ready list looking for another divide, if found move it
31436 to the end of the list so it is chosen next. */
31437 pos = lastpos;
31438 while (pos >= 0)
31439 {
31440 if (recog_memoized (ready[pos]) >= 0
31441 && get_attr_type (ready[pos]) == TYPE_DIV)
31442 {
31443 tmp = ready[pos];
31444 for (i = pos; i < lastpos; i++)
31445 ready[i] = ready[i + 1];
31446 ready[lastpos] = tmp;
31447 break;
31448 }
31449 pos--;
31450 }
31451 }
31452 else
31453 {
31454 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31455 divide_cnt = 0;
31456
31457 /* The best dispatch throughput for vector and vector load insns can be
31458 achieved by interleaving a vector and vector load such that they'll
31459 dispatch to the same superslice. If this pairing cannot be achieved
31460 then it is best to pair vector insns together and vector load insns
31461 together.
31462
31463 To aid in this pairing, vec_pairing maintains the current state with
31464 the following values:
31465
31466 0 : Initial state, no vecload/vector pairing has been started.
31467
31468 1 : A vecload or vector insn has been issued and a candidate for
31469 pairing has been found and moved to the end of the ready
31470 list. */
31471 if (type == TYPE_VECLOAD)
31472 {
31473 /* Issued a vecload. */
31474 if (vec_pairing == 0)
31475 {
31476 int vecload_pos = -1;
31477 /* We issued a single vecload, look for a vector insn to pair it
31478 with. If one isn't found, try to pair another vecload. */
31479 pos = lastpos;
31480 while (pos >= 0)
31481 {
31482 if (recog_memoized (ready[pos]) >= 0)
31483 {
31484 type2 = get_attr_type (ready[pos]);
31485 if (is_power9_pairable_vec_type (type2))
31486 {
31487 /* Found a vector insn to pair with, move it to the
31488 end of the ready list so it is scheduled next. */
31489 tmp = ready[pos];
31490 for (i = pos; i < lastpos; i++)
31491 ready[i] = ready[i + 1];
31492 ready[lastpos] = tmp;
31493 vec_pairing = 1;
31494 return cached_can_issue_more;
31495 }
31496 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31497 /* Remember position of first vecload seen. */
31498 vecload_pos = pos;
31499 }
31500 pos--;
31501 }
31502 if (vecload_pos >= 0)
31503 {
31504 /* Didn't find a vector to pair with but did find a vecload,
31505 move it to the end of the ready list. */
31506 tmp = ready[vecload_pos];
31507 for (i = vecload_pos; i < lastpos; i++)
31508 ready[i] = ready[i + 1];
31509 ready[lastpos] = tmp;
31510 vec_pairing = 1;
31511 return cached_can_issue_more;
31512 }
31513 }
31514 }
31515 else if (is_power9_pairable_vec_type (type))
31516 {
31517 /* Issued a vector operation. */
31518 if (vec_pairing == 0)
31519 {
31520 int vec_pos = -1;
31521 /* We issued a single vector insn, look for a vecload to pair it
31522 with. If one isn't found, try to pair another vector. */
31523 pos = lastpos;
31524 while (pos >= 0)
31525 {
31526 if (recog_memoized (ready[pos]) >= 0)
31527 {
31528 type2 = get_attr_type (ready[pos]);
31529 if (type2 == TYPE_VECLOAD)
31530 {
31531 /* Found a vecload insn to pair with, move it to the
31532 end of the ready list so it is scheduled next. */
31533 tmp = ready[pos];
31534 for (i = pos; i < lastpos; i++)
31535 ready[i] = ready[i + 1];
31536 ready[lastpos] = tmp;
31537 vec_pairing = 1;
31538 return cached_can_issue_more;
31539 }
31540 else if (is_power9_pairable_vec_type (type2)
31541 && vec_pos == -1)
31542 /* Remember position of first vector insn seen. */
31543 vec_pos = pos;
31544 }
31545 pos--;
31546 }
31547 if (vec_pos >= 0)
31548 {
31549 /* Didn't find a vecload to pair with but did find a vector
31550 insn, move it to the end of the ready list. */
31551 tmp = ready[vec_pos];
31552 for (i = vec_pos; i < lastpos; i++)
31553 ready[i] = ready[i + 1];
31554 ready[lastpos] = tmp;
31555 vec_pairing = 1;
31556 return cached_can_issue_more;
31557 }
31558 }
31559 }
31560
31561 /* We've either finished a vec/vecload pair, couldn't find an insn to
31562 continue the current pair, or the last insn had nothing to do with
31563 with pairing. In any case, reset the state. */
31564 vec_pairing = 0;
31565 }
31566
31567 return cached_can_issue_more;
31568 }
31569
31570 /* We are about to begin issuing insns for this clock cycle. */
31571
31572 static int
31573 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31574 rtx_insn **ready ATTRIBUTE_UNUSED,
31575 int *pn_ready ATTRIBUTE_UNUSED,
31576 int clock_var ATTRIBUTE_UNUSED)
31577 {
31578 int n_ready = *pn_ready;
31579
31580 if (sched_verbose)
31581 fprintf (dump, "// rs6000_sched_reorder :\n");
31582
31583 /* Reorder the ready list, if the second to last ready insn
31584 is a nonepipeline insn. */
31585 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31586 {
31587 if (is_nonpipeline_insn (ready[n_ready - 1])
31588 && (recog_memoized (ready[n_ready - 2]) > 0))
31589 /* Simply swap first two insns. */
31590 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31591 }
31592
31593 if (rs6000_tune == PROCESSOR_POWER6)
31594 load_store_pendulum = 0;
31595
31596 return rs6000_issue_rate ();
31597 }
31598
31599 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31600
31601 static int
31602 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31603 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31604 {
31605 if (sched_verbose)
31606 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31607
31608 /* For Power6, we need to handle some special cases to try and keep the
31609 store queue from overflowing and triggering expensive flushes.
31610
31611 This code monitors how load and store instructions are being issued
31612 and skews the ready list one way or the other to increase the likelihood
31613 that a desired instruction is issued at the proper time.
31614
31615 A couple of things are done. First, we maintain a "load_store_pendulum"
31616 to track the current state of load/store issue.
31617
31618 - If the pendulum is at zero, then no loads or stores have been
31619 issued in the current cycle so we do nothing.
31620
31621 - If the pendulum is 1, then a single load has been issued in this
31622 cycle and we attempt to locate another load in the ready list to
31623 issue with it.
31624
31625 - If the pendulum is -2, then two stores have already been
31626 issued in this cycle, so we increase the priority of the first load
31627 in the ready list to increase it's likelihood of being chosen first
31628 in the next cycle.
31629
31630 - If the pendulum is -1, then a single store has been issued in this
31631 cycle and we attempt to locate another store in the ready list to
31632 issue with it, preferring a store to an adjacent memory location to
31633 facilitate store pairing in the store queue.
31634
31635 - If the pendulum is 2, then two loads have already been
31636 issued in this cycle, so we increase the priority of the first store
31637 in the ready list to increase it's likelihood of being chosen first
31638 in the next cycle.
31639
31640 - If the pendulum < -2 or > 2, then do nothing.
31641
31642 Note: This code covers the most common scenarios. There exist non
31643 load/store instructions which make use of the LSU and which
31644 would need to be accounted for to strictly model the behavior
31645 of the machine. Those instructions are currently unaccounted
31646 for to help minimize compile time overhead of this code.
31647 */
31648 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31649 {
31650 int pos;
31651 int i;
31652 rtx_insn *tmp;
31653 rtx load_mem, str_mem;
31654
31655 if (is_store_insn (last_scheduled_insn, &str_mem))
31656 /* Issuing a store, swing the load_store_pendulum to the left */
31657 load_store_pendulum--;
31658 else if (is_load_insn (last_scheduled_insn, &load_mem))
31659 /* Issuing a load, swing the load_store_pendulum to the right */
31660 load_store_pendulum++;
31661 else
31662 return cached_can_issue_more;
31663
31664 /* If the pendulum is balanced, or there is only one instruction on
31665 the ready list, then all is well, so return. */
31666 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31667 return cached_can_issue_more;
31668
31669 if (load_store_pendulum == 1)
31670 {
31671 /* A load has been issued in this cycle. Scan the ready list
31672 for another load to issue with it */
31673 pos = *pn_ready-1;
31674
31675 while (pos >= 0)
31676 {
31677 if (is_load_insn (ready[pos], &load_mem))
31678 {
31679 /* Found a load. Move it to the head of the ready list,
31680 and adjust it's priority so that it is more likely to
31681 stay there */
31682 tmp = ready[pos];
31683 for (i=pos; i<*pn_ready-1; i++)
31684 ready[i] = ready[i + 1];
31685 ready[*pn_ready-1] = tmp;
31686
31687 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31688 INSN_PRIORITY (tmp)++;
31689 break;
31690 }
31691 pos--;
31692 }
31693 }
31694 else if (load_store_pendulum == -2)
31695 {
31696 /* Two stores have been issued in this cycle. Increase the
31697 priority of the first load in the ready list to favor it for
31698 issuing in the next cycle. */
31699 pos = *pn_ready-1;
31700
31701 while (pos >= 0)
31702 {
31703 if (is_load_insn (ready[pos], &load_mem)
31704 && !sel_sched_p ()
31705 && INSN_PRIORITY_KNOWN (ready[pos]))
31706 {
31707 INSN_PRIORITY (ready[pos])++;
31708
31709 /* Adjust the pendulum to account for the fact that a load
31710 was found and increased in priority. This is to prevent
31711 increasing the priority of multiple loads */
31712 load_store_pendulum--;
31713
31714 break;
31715 }
31716 pos--;
31717 }
31718 }
31719 else if (load_store_pendulum == -1)
31720 {
31721 /* A store has been issued in this cycle. Scan the ready list for
31722 another store to issue with it, preferring a store to an adjacent
31723 memory location */
31724 int first_store_pos = -1;
31725
31726 pos = *pn_ready-1;
31727
31728 while (pos >= 0)
31729 {
31730 if (is_store_insn (ready[pos], &str_mem))
31731 {
31732 rtx str_mem2;
31733 /* Maintain the index of the first store found on the
31734 list */
31735 if (first_store_pos == -1)
31736 first_store_pos = pos;
31737
31738 if (is_store_insn (last_scheduled_insn, &str_mem2)
31739 && adjacent_mem_locations (str_mem, str_mem2))
31740 {
31741 /* Found an adjacent store. Move it to the head of the
31742 ready list, and adjust it's priority so that it is
31743 more likely to stay there */
31744 tmp = ready[pos];
31745 for (i=pos; i<*pn_ready-1; i++)
31746 ready[i] = ready[i + 1];
31747 ready[*pn_ready-1] = tmp;
31748
31749 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31750 INSN_PRIORITY (tmp)++;
31751
31752 first_store_pos = -1;
31753
31754 break;
31755 };
31756 }
31757 pos--;
31758 }
31759
31760 if (first_store_pos >= 0)
31761 {
31762 /* An adjacent store wasn't found, but a non-adjacent store was,
31763 so move the non-adjacent store to the front of the ready
31764 list, and adjust its priority so that it is more likely to
31765 stay there. */
31766 tmp = ready[first_store_pos];
31767 for (i=first_store_pos; i<*pn_ready-1; i++)
31768 ready[i] = ready[i + 1];
31769 ready[*pn_ready-1] = tmp;
31770 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31771 INSN_PRIORITY (tmp)++;
31772 }
31773 }
31774 else if (load_store_pendulum == 2)
31775 {
31776 /* Two loads have been issued in this cycle. Increase the priority
31777 of the first store in the ready list to favor it for issuing in
31778 the next cycle. */
31779 pos = *pn_ready-1;
31780
31781 while (pos >= 0)
31782 {
31783 if (is_store_insn (ready[pos], &str_mem)
31784 && !sel_sched_p ()
31785 && INSN_PRIORITY_KNOWN (ready[pos]))
31786 {
31787 INSN_PRIORITY (ready[pos])++;
31788
31789 /* Adjust the pendulum to account for the fact that a store
31790 was found and increased in priority. This is to prevent
31791 increasing the priority of multiple stores */
31792 load_store_pendulum++;
31793
31794 break;
31795 }
31796 pos--;
31797 }
31798 }
31799 }
31800
31801 /* Do Power9 dependent reordering if necessary. */
31802 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31803 && recog_memoized (last_scheduled_insn) >= 0)
31804 return power9_sched_reorder2 (ready, *pn_ready - 1);
31805
31806 return cached_can_issue_more;
31807 }
31808
31809 /* Return whether the presence of INSN causes a dispatch group termination
31810 of group WHICH_GROUP.
31811
31812 If WHICH_GROUP == current_group, this function will return true if INSN
31813 causes the termination of the current group (i.e, the dispatch group to
31814 which INSN belongs). This means that INSN will be the last insn in the
31815 group it belongs to.
31816
31817 If WHICH_GROUP == previous_group, this function will return true if INSN
31818 causes the termination of the previous group (i.e, the dispatch group that
31819 precedes the group to which INSN belongs). This means that INSN will be
31820 the first insn in the group it belongs to). */
31821
31822 static bool
31823 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31824 {
31825 bool first, last;
31826
31827 if (! insn)
31828 return false;
31829
31830 first = insn_must_be_first_in_group (insn);
31831 last = insn_must_be_last_in_group (insn);
31832
31833 if (first && last)
31834 return true;
31835
31836 if (which_group == current_group)
31837 return last;
31838 else if (which_group == previous_group)
31839 return first;
31840
31841 return false;
31842 }
31843
31844
31845 static bool
31846 insn_must_be_first_in_group (rtx_insn *insn)
31847 {
31848 enum attr_type type;
31849
31850 if (!insn
31851 || NOTE_P (insn)
31852 || DEBUG_INSN_P (insn)
31853 || GET_CODE (PATTERN (insn)) == USE
31854 || GET_CODE (PATTERN (insn)) == CLOBBER)
31855 return false;
31856
31857 switch (rs6000_tune)
31858 {
31859 case PROCESSOR_POWER5:
31860 if (is_cracked_insn (insn))
31861 return true;
31862 /* FALLTHRU */
31863 case PROCESSOR_POWER4:
31864 if (is_microcoded_insn (insn))
31865 return true;
31866
31867 if (!rs6000_sched_groups)
31868 return false;
31869
31870 type = get_attr_type (insn);
31871
31872 switch (type)
31873 {
31874 case TYPE_MFCR:
31875 case TYPE_MFCRF:
31876 case TYPE_MTCR:
31877 case TYPE_CR_LOGICAL:
31878 case TYPE_MTJMPR:
31879 case TYPE_MFJMPR:
31880 case TYPE_DIV:
31881 case TYPE_LOAD_L:
31882 case TYPE_STORE_C:
31883 case TYPE_ISYNC:
31884 case TYPE_SYNC:
31885 return true;
31886 default:
31887 break;
31888 }
31889 break;
31890 case PROCESSOR_POWER6:
31891 type = get_attr_type (insn);
31892
31893 switch (type)
31894 {
31895 case TYPE_EXTS:
31896 case TYPE_CNTLZ:
31897 case TYPE_TRAP:
31898 case TYPE_MUL:
31899 case TYPE_INSERT:
31900 case TYPE_FPCOMPARE:
31901 case TYPE_MFCR:
31902 case TYPE_MTCR:
31903 case TYPE_MFJMPR:
31904 case TYPE_MTJMPR:
31905 case TYPE_ISYNC:
31906 case TYPE_SYNC:
31907 case TYPE_LOAD_L:
31908 case TYPE_STORE_C:
31909 return true;
31910 case TYPE_SHIFT:
31911 if (get_attr_dot (insn) == DOT_NO
31912 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31913 return true;
31914 else
31915 break;
31916 case TYPE_DIV:
31917 if (get_attr_size (insn) == SIZE_32)
31918 return true;
31919 else
31920 break;
31921 case TYPE_LOAD:
31922 case TYPE_STORE:
31923 case TYPE_FPLOAD:
31924 case TYPE_FPSTORE:
31925 if (get_attr_update (insn) == UPDATE_YES)
31926 return true;
31927 else
31928 break;
31929 default:
31930 break;
31931 }
31932 break;
31933 case PROCESSOR_POWER7:
31934 type = get_attr_type (insn);
31935
31936 switch (type)
31937 {
31938 case TYPE_CR_LOGICAL:
31939 case TYPE_MFCR:
31940 case TYPE_MFCRF:
31941 case TYPE_MTCR:
31942 case TYPE_DIV:
31943 case TYPE_ISYNC:
31944 case TYPE_LOAD_L:
31945 case TYPE_STORE_C:
31946 case TYPE_MFJMPR:
31947 case TYPE_MTJMPR:
31948 return true;
31949 case TYPE_MUL:
31950 case TYPE_SHIFT:
31951 case TYPE_EXTS:
31952 if (get_attr_dot (insn) == DOT_YES)
31953 return true;
31954 else
31955 break;
31956 case TYPE_LOAD:
31957 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31958 || get_attr_update (insn) == UPDATE_YES)
31959 return true;
31960 else
31961 break;
31962 case TYPE_STORE:
31963 case TYPE_FPLOAD:
31964 case TYPE_FPSTORE:
31965 if (get_attr_update (insn) == UPDATE_YES)
31966 return true;
31967 else
31968 break;
31969 default:
31970 break;
31971 }
31972 break;
31973 case PROCESSOR_POWER8:
31974 type = get_attr_type (insn);
31975
31976 switch (type)
31977 {
31978 case TYPE_CR_LOGICAL:
31979 case TYPE_MFCR:
31980 case TYPE_MFCRF:
31981 case TYPE_MTCR:
31982 case TYPE_SYNC:
31983 case TYPE_ISYNC:
31984 case TYPE_LOAD_L:
31985 case TYPE_STORE_C:
31986 case TYPE_VECSTORE:
31987 case TYPE_MFJMPR:
31988 case TYPE_MTJMPR:
31989 return true;
31990 case TYPE_SHIFT:
31991 case TYPE_EXTS:
31992 case TYPE_MUL:
31993 if (get_attr_dot (insn) == DOT_YES)
31994 return true;
31995 else
31996 break;
31997 case TYPE_LOAD:
31998 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31999 || get_attr_update (insn) == UPDATE_YES)
32000 return true;
32001 else
32002 break;
32003 case TYPE_STORE:
32004 if (get_attr_update (insn) == UPDATE_YES
32005 && get_attr_indexed (insn) == INDEXED_YES)
32006 return true;
32007 else
32008 break;
32009 default:
32010 break;
32011 }
32012 break;
32013 default:
32014 break;
32015 }
32016
32017 return false;
32018 }
32019
32020 static bool
32021 insn_must_be_last_in_group (rtx_insn *insn)
32022 {
32023 enum attr_type type;
32024
32025 if (!insn
32026 || NOTE_P (insn)
32027 || DEBUG_INSN_P (insn)
32028 || GET_CODE (PATTERN (insn)) == USE
32029 || GET_CODE (PATTERN (insn)) == CLOBBER)
32030 return false;
32031
32032 switch (rs6000_tune) {
32033 case PROCESSOR_POWER4:
32034 case PROCESSOR_POWER5:
32035 if (is_microcoded_insn (insn))
32036 return true;
32037
32038 if (is_branch_slot_insn (insn))
32039 return true;
32040
32041 break;
32042 case PROCESSOR_POWER6:
32043 type = get_attr_type (insn);
32044
32045 switch (type)
32046 {
32047 case TYPE_EXTS:
32048 case TYPE_CNTLZ:
32049 case TYPE_TRAP:
32050 case TYPE_MUL:
32051 case TYPE_FPCOMPARE:
32052 case TYPE_MFCR:
32053 case TYPE_MTCR:
32054 case TYPE_MFJMPR:
32055 case TYPE_MTJMPR:
32056 case TYPE_ISYNC:
32057 case TYPE_SYNC:
32058 case TYPE_LOAD_L:
32059 case TYPE_STORE_C:
32060 return true;
32061 case TYPE_SHIFT:
32062 if (get_attr_dot (insn) == DOT_NO
32063 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
32064 return true;
32065 else
32066 break;
32067 case TYPE_DIV:
32068 if (get_attr_size (insn) == SIZE_32)
32069 return true;
32070 else
32071 break;
32072 default:
32073 break;
32074 }
32075 break;
32076 case PROCESSOR_POWER7:
32077 type = get_attr_type (insn);
32078
32079 switch (type)
32080 {
32081 case TYPE_ISYNC:
32082 case TYPE_SYNC:
32083 case TYPE_LOAD_L:
32084 case TYPE_STORE_C:
32085 return true;
32086 case TYPE_LOAD:
32087 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32088 && get_attr_update (insn) == UPDATE_YES)
32089 return true;
32090 else
32091 break;
32092 case TYPE_STORE:
32093 if (get_attr_update (insn) == UPDATE_YES
32094 && get_attr_indexed (insn) == INDEXED_YES)
32095 return true;
32096 else
32097 break;
32098 default:
32099 break;
32100 }
32101 break;
32102 case PROCESSOR_POWER8:
32103 type = get_attr_type (insn);
32104
32105 switch (type)
32106 {
32107 case TYPE_MFCR:
32108 case TYPE_MTCR:
32109 case TYPE_ISYNC:
32110 case TYPE_SYNC:
32111 case TYPE_LOAD_L:
32112 case TYPE_STORE_C:
32113 return true;
32114 case TYPE_LOAD:
32115 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32116 && get_attr_update (insn) == UPDATE_YES)
32117 return true;
32118 else
32119 break;
32120 case TYPE_STORE:
32121 if (get_attr_update (insn) == UPDATE_YES
32122 && get_attr_indexed (insn) == INDEXED_YES)
32123 return true;
32124 else
32125 break;
32126 default:
32127 break;
32128 }
32129 break;
32130 default:
32131 break;
32132 }
32133
32134 return false;
32135 }
32136
32137 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
32138 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
32139
32140 static bool
32141 is_costly_group (rtx *group_insns, rtx next_insn)
32142 {
32143 int i;
32144 int issue_rate = rs6000_issue_rate ();
32145
32146 for (i = 0; i < issue_rate; i++)
32147 {
32148 sd_iterator_def sd_it;
32149 dep_t dep;
32150 rtx insn = group_insns[i];
32151
32152 if (!insn)
32153 continue;
32154
32155 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
32156 {
32157 rtx next = DEP_CON (dep);
32158
32159 if (next == next_insn
32160 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
32161 return true;
32162 }
32163 }
32164
32165 return false;
32166 }
32167
32168 /* Utility of the function redefine_groups.
32169 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32170 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32171 to keep it "far" (in a separate group) from GROUP_INSNS, following
32172 one of the following schemes, depending on the value of the flag
32173 -minsert_sched_nops = X:
32174 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32175 in order to force NEXT_INSN into a separate group.
32176 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32177 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32178 insertion (has a group just ended, how many vacant issue slots remain in the
32179 last group, and how many dispatch groups were encountered so far). */
32180
32181 static int
32182 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
32183 rtx_insn *next_insn, bool *group_end, int can_issue_more,
32184 int *group_count)
32185 {
32186 rtx nop;
32187 bool force;
32188 int issue_rate = rs6000_issue_rate ();
32189 bool end = *group_end;
32190 int i;
32191
32192 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
32193 return can_issue_more;
32194
32195 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
32196 return can_issue_more;
32197
32198 force = is_costly_group (group_insns, next_insn);
32199 if (!force)
32200 return can_issue_more;
32201
32202 if (sched_verbose > 6)
32203 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
32204 *group_count ,can_issue_more);
32205
32206 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
32207 {
32208 if (*group_end)
32209 can_issue_more = 0;
32210
32211 /* Since only a branch can be issued in the last issue_slot, it is
32212 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32213 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32214 in this case the last nop will start a new group and the branch
32215 will be forced to the new group. */
32216 if (can_issue_more && !is_branch_slot_insn (next_insn))
32217 can_issue_more--;
32218
32219 /* Do we have a special group ending nop? */
32220 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
32221 || rs6000_tune == PROCESSOR_POWER8)
32222 {
32223 nop = gen_group_ending_nop ();
32224 emit_insn_before (nop, next_insn);
32225 can_issue_more = 0;
32226 }
32227 else
32228 while (can_issue_more > 0)
32229 {
32230 nop = gen_nop ();
32231 emit_insn_before (nop, next_insn);
32232 can_issue_more--;
32233 }
32234
32235 *group_end = true;
32236 return 0;
32237 }
32238
32239 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
32240 {
32241 int n_nops = rs6000_sched_insert_nops;
32242
32243 /* Nops can't be issued from the branch slot, so the effective
32244 issue_rate for nops is 'issue_rate - 1'. */
32245 if (can_issue_more == 0)
32246 can_issue_more = issue_rate;
32247 can_issue_more--;
32248 if (can_issue_more == 0)
32249 {
32250 can_issue_more = issue_rate - 1;
32251 (*group_count)++;
32252 end = true;
32253 for (i = 0; i < issue_rate; i++)
32254 {
32255 group_insns[i] = 0;
32256 }
32257 }
32258
32259 while (n_nops > 0)
32260 {
32261 nop = gen_nop ();
32262 emit_insn_before (nop, next_insn);
32263 if (can_issue_more == issue_rate - 1) /* new group begins */
32264 end = false;
32265 can_issue_more--;
32266 if (can_issue_more == 0)
32267 {
32268 can_issue_more = issue_rate - 1;
32269 (*group_count)++;
32270 end = true;
32271 for (i = 0; i < issue_rate; i++)
32272 {
32273 group_insns[i] = 0;
32274 }
32275 }
32276 n_nops--;
32277 }
32278
32279 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32280 can_issue_more++;
32281
32282 /* Is next_insn going to start a new group? */
32283 *group_end
32284 = (end
32285 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32286 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32287 || (can_issue_more < issue_rate &&
32288 insn_terminates_group_p (next_insn, previous_group)));
32289 if (*group_end && end)
32290 (*group_count)--;
32291
32292 if (sched_verbose > 6)
32293 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32294 *group_count, can_issue_more);
32295 return can_issue_more;
32296 }
32297
32298 return can_issue_more;
32299 }
32300
32301 /* This function tries to synch the dispatch groups that the compiler "sees"
32302 with the dispatch groups that the processor dispatcher is expected to
32303 form in practice. It tries to achieve this synchronization by forcing the
32304 estimated processor grouping on the compiler (as opposed to the function
32305 'pad_goups' which tries to force the scheduler's grouping on the processor).
32306
32307 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32308 examines the (estimated) dispatch groups that will be formed by the processor
32309 dispatcher. It marks these group boundaries to reflect the estimated
32310 processor grouping, overriding the grouping that the scheduler had marked.
32311 Depending on the value of the flag '-minsert-sched-nops' this function can
32312 force certain insns into separate groups or force a certain distance between
32313 them by inserting nops, for example, if there exists a "costly dependence"
32314 between the insns.
32315
32316 The function estimates the group boundaries that the processor will form as
32317 follows: It keeps track of how many vacant issue slots are available after
32318 each insn. A subsequent insn will start a new group if one of the following
32319 4 cases applies:
32320 - no more vacant issue slots remain in the current dispatch group.
32321 - only the last issue slot, which is the branch slot, is vacant, but the next
32322 insn is not a branch.
32323 - only the last 2 or less issue slots, including the branch slot, are vacant,
32324 which means that a cracked insn (which occupies two issue slots) can't be
32325 issued in this group.
32326 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32327 start a new group. */
32328
32329 static int
32330 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32331 rtx_insn *tail)
32332 {
32333 rtx_insn *insn, *next_insn;
32334 int issue_rate;
32335 int can_issue_more;
32336 int slot, i;
32337 bool group_end;
32338 int group_count = 0;
32339 rtx *group_insns;
32340
32341 /* Initialize. */
32342 issue_rate = rs6000_issue_rate ();
32343 group_insns = XALLOCAVEC (rtx, issue_rate);
32344 for (i = 0; i < issue_rate; i++)
32345 {
32346 group_insns[i] = 0;
32347 }
32348 can_issue_more = issue_rate;
32349 slot = 0;
32350 insn = get_next_active_insn (prev_head_insn, tail);
32351 group_end = false;
32352
32353 while (insn != NULL_RTX)
32354 {
32355 slot = (issue_rate - can_issue_more);
32356 group_insns[slot] = insn;
32357 can_issue_more =
32358 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32359 if (insn_terminates_group_p (insn, current_group))
32360 can_issue_more = 0;
32361
32362 next_insn = get_next_active_insn (insn, tail);
32363 if (next_insn == NULL_RTX)
32364 return group_count + 1;
32365
32366 /* Is next_insn going to start a new group? */
32367 group_end
32368 = (can_issue_more == 0
32369 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32370 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32371 || (can_issue_more < issue_rate &&
32372 insn_terminates_group_p (next_insn, previous_group)));
32373
32374 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32375 next_insn, &group_end, can_issue_more,
32376 &group_count);
32377
32378 if (group_end)
32379 {
32380 group_count++;
32381 can_issue_more = 0;
32382 for (i = 0; i < issue_rate; i++)
32383 {
32384 group_insns[i] = 0;
32385 }
32386 }
32387
32388 if (GET_MODE (next_insn) == TImode && can_issue_more)
32389 PUT_MODE (next_insn, VOIDmode);
32390 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32391 PUT_MODE (next_insn, TImode);
32392
32393 insn = next_insn;
32394 if (can_issue_more == 0)
32395 can_issue_more = issue_rate;
32396 } /* while */
32397
32398 return group_count;
32399 }
32400
32401 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32402 dispatch group boundaries that the scheduler had marked. Pad with nops
32403 any dispatch groups which have vacant issue slots, in order to force the
32404 scheduler's grouping on the processor dispatcher. The function
32405 returns the number of dispatch groups found. */
32406
32407 static int
32408 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32409 rtx_insn *tail)
32410 {
32411 rtx_insn *insn, *next_insn;
32412 rtx nop;
32413 int issue_rate;
32414 int can_issue_more;
32415 int group_end;
32416 int group_count = 0;
32417
32418 /* Initialize issue_rate. */
32419 issue_rate = rs6000_issue_rate ();
32420 can_issue_more = issue_rate;
32421
32422 insn = get_next_active_insn (prev_head_insn, tail);
32423 next_insn = get_next_active_insn (insn, tail);
32424
32425 while (insn != NULL_RTX)
32426 {
32427 can_issue_more =
32428 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32429
32430 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32431
32432 if (next_insn == NULL_RTX)
32433 break;
32434
32435 if (group_end)
32436 {
32437 /* If the scheduler had marked group termination at this location
32438 (between insn and next_insn), and neither insn nor next_insn will
32439 force group termination, pad the group with nops to force group
32440 termination. */
32441 if (can_issue_more
32442 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32443 && !insn_terminates_group_p (insn, current_group)
32444 && !insn_terminates_group_p (next_insn, previous_group))
32445 {
32446 if (!is_branch_slot_insn (next_insn))
32447 can_issue_more--;
32448
32449 while (can_issue_more)
32450 {
32451 nop = gen_nop ();
32452 emit_insn_before (nop, next_insn);
32453 can_issue_more--;
32454 }
32455 }
32456
32457 can_issue_more = issue_rate;
32458 group_count++;
32459 }
32460
32461 insn = next_insn;
32462 next_insn = get_next_active_insn (insn, tail);
32463 }
32464
32465 return group_count;
32466 }
32467
32468 /* We're beginning a new block. Initialize data structures as necessary. */
32469
32470 static void
32471 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32472 int sched_verbose ATTRIBUTE_UNUSED,
32473 int max_ready ATTRIBUTE_UNUSED)
32474 {
32475 last_scheduled_insn = NULL;
32476 load_store_pendulum = 0;
32477 divide_cnt = 0;
32478 vec_pairing = 0;
32479 }
32480
32481 /* The following function is called at the end of scheduling BB.
32482 After reload, it inserts nops at insn group bundling. */
32483
32484 static void
32485 rs6000_sched_finish (FILE *dump, int sched_verbose)
32486 {
32487 int n_groups;
32488
32489 if (sched_verbose)
32490 fprintf (dump, "=== Finishing schedule.\n");
32491
32492 if (reload_completed && rs6000_sched_groups)
32493 {
32494 /* Do not run sched_finish hook when selective scheduling enabled. */
32495 if (sel_sched_p ())
32496 return;
32497
32498 if (rs6000_sched_insert_nops == sched_finish_none)
32499 return;
32500
32501 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32502 n_groups = pad_groups (dump, sched_verbose,
32503 current_sched_info->prev_head,
32504 current_sched_info->next_tail);
32505 else
32506 n_groups = redefine_groups (dump, sched_verbose,
32507 current_sched_info->prev_head,
32508 current_sched_info->next_tail);
32509
32510 if (sched_verbose >= 6)
32511 {
32512 fprintf (dump, "ngroups = %d\n", n_groups);
32513 print_rtl (dump, current_sched_info->prev_head);
32514 fprintf (dump, "Done finish_sched\n");
32515 }
32516 }
32517 }
32518
32519 struct rs6000_sched_context
32520 {
32521 short cached_can_issue_more;
32522 rtx_insn *last_scheduled_insn;
32523 int load_store_pendulum;
32524 int divide_cnt;
32525 int vec_pairing;
32526 };
32527
32528 typedef struct rs6000_sched_context rs6000_sched_context_def;
32529 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32530
32531 /* Allocate store for new scheduling context. */
32532 static void *
32533 rs6000_alloc_sched_context (void)
32534 {
32535 return xmalloc (sizeof (rs6000_sched_context_def));
32536 }
32537
32538 /* If CLEAN_P is true then initializes _SC with clean data,
32539 and from the global context otherwise. */
32540 static void
32541 rs6000_init_sched_context (void *_sc, bool clean_p)
32542 {
32543 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32544
32545 if (clean_p)
32546 {
32547 sc->cached_can_issue_more = 0;
32548 sc->last_scheduled_insn = NULL;
32549 sc->load_store_pendulum = 0;
32550 sc->divide_cnt = 0;
32551 sc->vec_pairing = 0;
32552 }
32553 else
32554 {
32555 sc->cached_can_issue_more = cached_can_issue_more;
32556 sc->last_scheduled_insn = last_scheduled_insn;
32557 sc->load_store_pendulum = load_store_pendulum;
32558 sc->divide_cnt = divide_cnt;
32559 sc->vec_pairing = vec_pairing;
32560 }
32561 }
32562
32563 /* Sets the global scheduling context to the one pointed to by _SC. */
32564 static void
32565 rs6000_set_sched_context (void *_sc)
32566 {
32567 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32568
32569 gcc_assert (sc != NULL);
32570
32571 cached_can_issue_more = sc->cached_can_issue_more;
32572 last_scheduled_insn = sc->last_scheduled_insn;
32573 load_store_pendulum = sc->load_store_pendulum;
32574 divide_cnt = sc->divide_cnt;
32575 vec_pairing = sc->vec_pairing;
32576 }
32577
32578 /* Free _SC. */
32579 static void
32580 rs6000_free_sched_context (void *_sc)
32581 {
32582 gcc_assert (_sc != NULL);
32583
32584 free (_sc);
32585 }
32586
32587 static bool
32588 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32589 {
32590 switch (get_attr_type (insn))
32591 {
32592 case TYPE_DIV:
32593 case TYPE_SDIV:
32594 case TYPE_DDIV:
32595 case TYPE_VECDIV:
32596 case TYPE_SSQRT:
32597 case TYPE_DSQRT:
32598 return false;
32599
32600 default:
32601 return true;
32602 }
32603 }
32604 \f
32605 /* Length in units of the trampoline for entering a nested function. */
32606
32607 int
32608 rs6000_trampoline_size (void)
32609 {
32610 int ret = 0;
32611
32612 switch (DEFAULT_ABI)
32613 {
32614 default:
32615 gcc_unreachable ();
32616
32617 case ABI_AIX:
32618 ret = (TARGET_32BIT) ? 12 : 24;
32619 break;
32620
32621 case ABI_ELFv2:
32622 gcc_assert (!TARGET_32BIT);
32623 ret = 32;
32624 break;
32625
32626 case ABI_DARWIN:
32627 case ABI_V4:
32628 ret = (TARGET_32BIT) ? 40 : 48;
32629 break;
32630 }
32631
32632 return ret;
32633 }
32634
32635 /* Emit RTL insns to initialize the variable parts of a trampoline.
32636 FNADDR is an RTX for the address of the function's pure code.
32637 CXT is an RTX for the static chain value for the function. */
32638
32639 static void
32640 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32641 {
32642 int regsize = (TARGET_32BIT) ? 4 : 8;
32643 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32644 rtx ctx_reg = force_reg (Pmode, cxt);
32645 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32646
32647 switch (DEFAULT_ABI)
32648 {
32649 default:
32650 gcc_unreachable ();
32651
32652 /* Under AIX, just build the 3 word function descriptor */
32653 case ABI_AIX:
32654 {
32655 rtx fnmem, fn_reg, toc_reg;
32656
32657 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32658 error ("you cannot take the address of a nested function if you use "
32659 "the %qs option", "-mno-pointers-to-nested-functions");
32660
32661 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32662 fn_reg = gen_reg_rtx (Pmode);
32663 toc_reg = gen_reg_rtx (Pmode);
32664
32665 /* Macro to shorten the code expansions below. */
32666 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32667
32668 m_tramp = replace_equiv_address (m_tramp, addr);
32669
32670 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32671 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32672 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32673 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32674 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32675
32676 # undef MEM_PLUS
32677 }
32678 break;
32679
32680 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32681 case ABI_ELFv2:
32682 case ABI_DARWIN:
32683 case ABI_V4:
32684 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32685 LCT_NORMAL, VOIDmode,
32686 addr, Pmode,
32687 GEN_INT (rs6000_trampoline_size ()), SImode,
32688 fnaddr, Pmode,
32689 ctx_reg, Pmode);
32690 break;
32691 }
32692 }
32693
32694 \f
32695 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32696 identifier as an argument, so the front end shouldn't look it up. */
32697
32698 static bool
32699 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32700 {
32701 return is_attribute_p ("altivec", attr_id);
32702 }
32703
32704 /* Handle the "altivec" attribute. The attribute may have
32705 arguments as follows:
32706
32707 __attribute__((altivec(vector__)))
32708 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32709 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32710
32711 and may appear more than once (e.g., 'vector bool char') in a
32712 given declaration. */
32713
32714 static tree
32715 rs6000_handle_altivec_attribute (tree *node,
32716 tree name ATTRIBUTE_UNUSED,
32717 tree args,
32718 int flags ATTRIBUTE_UNUSED,
32719 bool *no_add_attrs)
32720 {
32721 tree type = *node, result = NULL_TREE;
32722 machine_mode mode;
32723 int unsigned_p;
32724 char altivec_type
32725 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32726 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32727 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32728 : '?');
32729
32730 while (POINTER_TYPE_P (type)
32731 || TREE_CODE (type) == FUNCTION_TYPE
32732 || TREE_CODE (type) == METHOD_TYPE
32733 || TREE_CODE (type) == ARRAY_TYPE)
32734 type = TREE_TYPE (type);
32735
32736 mode = TYPE_MODE (type);
32737
32738 /* Check for invalid AltiVec type qualifiers. */
32739 if (type == long_double_type_node)
32740 error ("use of %<long double%> in AltiVec types is invalid");
32741 else if (type == boolean_type_node)
32742 error ("use of boolean types in AltiVec types is invalid");
32743 else if (TREE_CODE (type) == COMPLEX_TYPE)
32744 error ("use of %<complex%> in AltiVec types is invalid");
32745 else if (DECIMAL_FLOAT_MODE_P (mode))
32746 error ("use of decimal floating point types in AltiVec types is invalid");
32747 else if (!TARGET_VSX)
32748 {
32749 if (type == long_unsigned_type_node || type == long_integer_type_node)
32750 {
32751 if (TARGET_64BIT)
32752 error ("use of %<long%> in AltiVec types is invalid for "
32753 "64-bit code without %qs", "-mvsx");
32754 else if (rs6000_warn_altivec_long)
32755 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32756 "use %<int%>");
32757 }
32758 else if (type == long_long_unsigned_type_node
32759 || type == long_long_integer_type_node)
32760 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32761 "-mvsx");
32762 else if (type == double_type_node)
32763 error ("use of %<double%> in AltiVec types is invalid without %qs",
32764 "-mvsx");
32765 }
32766
32767 switch (altivec_type)
32768 {
32769 case 'v':
32770 unsigned_p = TYPE_UNSIGNED (type);
32771 switch (mode)
32772 {
32773 case E_TImode:
32774 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32775 break;
32776 case E_DImode:
32777 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32778 break;
32779 case E_SImode:
32780 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32781 break;
32782 case E_HImode:
32783 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32784 break;
32785 case E_QImode:
32786 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32787 break;
32788 case E_SFmode: result = V4SF_type_node; break;
32789 case E_DFmode: result = V2DF_type_node; break;
32790 /* If the user says 'vector int bool', we may be handed the 'bool'
32791 attribute _before_ the 'vector' attribute, and so select the
32792 proper type in the 'b' case below. */
32793 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32794 case E_V2DImode: case E_V2DFmode:
32795 result = type;
32796 default: break;
32797 }
32798 break;
32799 case 'b':
32800 switch (mode)
32801 {
32802 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32803 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32804 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32805 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32806 default: break;
32807 }
32808 break;
32809 case 'p':
32810 switch (mode)
32811 {
32812 case E_V8HImode: result = pixel_V8HI_type_node;
32813 default: break;
32814 }
32815 default: break;
32816 }
32817
32818 /* Propagate qualifiers attached to the element type
32819 onto the vector type. */
32820 if (result && result != type && TYPE_QUALS (type))
32821 result = build_qualified_type (result, TYPE_QUALS (type));
32822
32823 *no_add_attrs = true; /* No need to hang on to the attribute. */
32824
32825 if (result)
32826 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32827
32828 return NULL_TREE;
32829 }
32830
32831 /* AltiVec defines five built-in scalar types that serve as vector
32832 elements; we must teach the compiler how to mangle them. The 128-bit
32833 floating point mangling is target-specific as well. */
32834
32835 static const char *
32836 rs6000_mangle_type (const_tree type)
32837 {
32838 type = TYPE_MAIN_VARIANT (type);
32839
32840 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32841 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32842 return NULL;
32843
32844 if (type == bool_char_type_node) return "U6__boolc";
32845 if (type == bool_short_type_node) return "U6__bools";
32846 if (type == pixel_type_node) return "u7__pixel";
32847 if (type == bool_int_type_node) return "U6__booli";
32848 if (type == bool_long_long_type_node) return "U6__boolx";
32849
32850 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32851 return "g";
32852 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32853 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32854
32855 /* For all other types, use the default mangling. */
32856 return NULL;
32857 }
32858
32859 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32860 struct attribute_spec.handler. */
32861
32862 static tree
32863 rs6000_handle_longcall_attribute (tree *node, tree name,
32864 tree args ATTRIBUTE_UNUSED,
32865 int flags ATTRIBUTE_UNUSED,
32866 bool *no_add_attrs)
32867 {
32868 if (TREE_CODE (*node) != FUNCTION_TYPE
32869 && TREE_CODE (*node) != FIELD_DECL
32870 && TREE_CODE (*node) != TYPE_DECL)
32871 {
32872 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32873 name);
32874 *no_add_attrs = true;
32875 }
32876
32877 return NULL_TREE;
32878 }
32879
32880 /* Set longcall attributes on all functions declared when
32881 rs6000_default_long_calls is true. */
32882 static void
32883 rs6000_set_default_type_attributes (tree type)
32884 {
32885 if (rs6000_default_long_calls
32886 && (TREE_CODE (type) == FUNCTION_TYPE
32887 || TREE_CODE (type) == METHOD_TYPE))
32888 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32889 NULL_TREE,
32890 TYPE_ATTRIBUTES (type));
32891
32892 #if TARGET_MACHO
32893 darwin_set_default_type_attributes (type);
32894 #endif
32895 }
32896
32897 /* Return a reference suitable for calling a function with the
32898 longcall attribute. */
32899
32900 static rtx
32901 rs6000_longcall_ref (rtx call_ref, rtx arg)
32902 {
32903 /* System V adds '.' to the internal name, so skip them. */
32904 const char *call_name = XSTR (call_ref, 0);
32905 if (*call_name == '.')
32906 {
32907 while (*call_name == '.')
32908 call_name++;
32909
32910 tree node = get_identifier (call_name);
32911 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32912 }
32913
32914 if (TARGET_PLTSEQ)
32915 {
32916 rtx base = const0_rtx;
32917 int regno;
32918 if (DEFAULT_ABI == ABI_ELFv2)
32919 {
32920 base = gen_rtx_REG (Pmode, TOC_REGISTER);
32921 regno = 12;
32922 }
32923 else
32924 {
32925 if (flag_pic)
32926 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32927 regno = 11;
32928 }
32929 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
32930 may be used by a function global entry point. For SysV4, r11
32931 is used by __glink_PLTresolve lazy resolver entry. */
32932 rtx reg = gen_rtx_REG (Pmode, regno);
32933 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
32934 UNSPEC_PLT16_HA);
32935 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
32936 UNSPEC_PLT16_LO);
32937 emit_insn (gen_rtx_SET (reg, hi));
32938 emit_insn (gen_rtx_SET (reg, lo));
32939 return reg;
32940 }
32941
32942 return force_reg (Pmode, call_ref);
32943 }
32944 \f
32945 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32946 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32947 #endif
32948
32949 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32950 struct attribute_spec.handler. */
32951 static tree
32952 rs6000_handle_struct_attribute (tree *node, tree name,
32953 tree args ATTRIBUTE_UNUSED,
32954 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32955 {
32956 tree *type = NULL;
32957 if (DECL_P (*node))
32958 {
32959 if (TREE_CODE (*node) == TYPE_DECL)
32960 type = &TREE_TYPE (*node);
32961 }
32962 else
32963 type = node;
32964
32965 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32966 || TREE_CODE (*type) == UNION_TYPE)))
32967 {
32968 warning (OPT_Wattributes, "%qE attribute ignored", name);
32969 *no_add_attrs = true;
32970 }
32971
32972 else if ((is_attribute_p ("ms_struct", name)
32973 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32974 || ((is_attribute_p ("gcc_struct", name)
32975 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32976 {
32977 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32978 name);
32979 *no_add_attrs = true;
32980 }
32981
32982 return NULL_TREE;
32983 }
32984
32985 static bool
32986 rs6000_ms_bitfield_layout_p (const_tree record_type)
32987 {
32988 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32989 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32990 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32991 }
32992 \f
32993 #ifdef USING_ELFOS_H
32994
32995 /* A get_unnamed_section callback, used for switching to toc_section. */
32996
32997 static void
32998 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32999 {
33000 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33001 && TARGET_MINIMAL_TOC)
33002 {
33003 if (!toc_initialized)
33004 {
33005 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
33006 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33007 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
33008 fprintf (asm_out_file, "\t.tc ");
33009 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
33010 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33011 fprintf (asm_out_file, "\n");
33012
33013 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33014 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33015 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33016 fprintf (asm_out_file, " = .+32768\n");
33017 toc_initialized = 1;
33018 }
33019 else
33020 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33021 }
33022 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33023 {
33024 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
33025 if (!toc_initialized)
33026 {
33027 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33028 toc_initialized = 1;
33029 }
33030 }
33031 else
33032 {
33033 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33034 if (!toc_initialized)
33035 {
33036 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33037 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33038 fprintf (asm_out_file, " = .+32768\n");
33039 toc_initialized = 1;
33040 }
33041 }
33042 }
33043
33044 /* Implement TARGET_ASM_INIT_SECTIONS. */
33045
33046 static void
33047 rs6000_elf_asm_init_sections (void)
33048 {
33049 toc_section
33050 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
33051
33052 sdata2_section
33053 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
33054 SDATA2_SECTION_ASM_OP);
33055 }
33056
33057 /* Implement TARGET_SELECT_RTX_SECTION. */
33058
33059 static section *
33060 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
33061 unsigned HOST_WIDE_INT align)
33062 {
33063 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33064 return toc_section;
33065 else
33066 return default_elf_select_rtx_section (mode, x, align);
33067 }
33068 \f
33069 /* For a SYMBOL_REF, set generic flags and then perform some
33070 target-specific processing.
33071
33072 When the AIX ABI is requested on a non-AIX system, replace the
33073 function name with the real name (with a leading .) rather than the
33074 function descriptor name. This saves a lot of overriding code to
33075 read the prefixes. */
33076
33077 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
33078 static void
33079 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
33080 {
33081 default_encode_section_info (decl, rtl, first);
33082
33083 if (first
33084 && TREE_CODE (decl) == FUNCTION_DECL
33085 && !TARGET_AIX
33086 && DEFAULT_ABI == ABI_AIX)
33087 {
33088 rtx sym_ref = XEXP (rtl, 0);
33089 size_t len = strlen (XSTR (sym_ref, 0));
33090 char *str = XALLOCAVEC (char, len + 2);
33091 str[0] = '.';
33092 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
33093 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
33094 }
33095 }
33096
33097 static inline bool
33098 compare_section_name (const char *section, const char *templ)
33099 {
33100 int len;
33101
33102 len = strlen (templ);
33103 return (strncmp (section, templ, len) == 0
33104 && (section[len] == 0 || section[len] == '.'));
33105 }
33106
33107 bool
33108 rs6000_elf_in_small_data_p (const_tree decl)
33109 {
33110 if (rs6000_sdata == SDATA_NONE)
33111 return false;
33112
33113 /* We want to merge strings, so we never consider them small data. */
33114 if (TREE_CODE (decl) == STRING_CST)
33115 return false;
33116
33117 /* Functions are never in the small data area. */
33118 if (TREE_CODE (decl) == FUNCTION_DECL)
33119 return false;
33120
33121 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
33122 {
33123 const char *section = DECL_SECTION_NAME (decl);
33124 if (compare_section_name (section, ".sdata")
33125 || compare_section_name (section, ".sdata2")
33126 || compare_section_name (section, ".gnu.linkonce.s")
33127 || compare_section_name (section, ".sbss")
33128 || compare_section_name (section, ".sbss2")
33129 || compare_section_name (section, ".gnu.linkonce.sb")
33130 || strcmp (section, ".PPC.EMB.sdata0") == 0
33131 || strcmp (section, ".PPC.EMB.sbss0") == 0)
33132 return true;
33133 }
33134 else
33135 {
33136 /* If we are told not to put readonly data in sdata, then don't. */
33137 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
33138 && !rs6000_readonly_in_sdata)
33139 return false;
33140
33141 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
33142
33143 if (size > 0
33144 && size <= g_switch_value
33145 /* If it's not public, and we're not going to reference it there,
33146 there's no need to put it in the small data section. */
33147 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
33148 return true;
33149 }
33150
33151 return false;
33152 }
33153
33154 #endif /* USING_ELFOS_H */
33155 \f
33156 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33157
33158 static bool
33159 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
33160 {
33161 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
33162 }
33163
33164 /* Do not place thread-local symbols refs in the object blocks. */
33165
33166 static bool
33167 rs6000_use_blocks_for_decl_p (const_tree decl)
33168 {
33169 return !DECL_THREAD_LOCAL_P (decl);
33170 }
33171 \f
33172 /* Return a REG that occurs in ADDR with coefficient 1.
33173 ADDR can be effectively incremented by incrementing REG.
33174
33175 r0 is special and we must not select it as an address
33176 register by this routine since our caller will try to
33177 increment the returned register via an "la" instruction. */
33178
33179 rtx
33180 find_addr_reg (rtx addr)
33181 {
33182 while (GET_CODE (addr) == PLUS)
33183 {
33184 if (REG_P (XEXP (addr, 0))
33185 && REGNO (XEXP (addr, 0)) != 0)
33186 addr = XEXP (addr, 0);
33187 else if (REG_P (XEXP (addr, 1))
33188 && REGNO (XEXP (addr, 1)) != 0)
33189 addr = XEXP (addr, 1);
33190 else if (CONSTANT_P (XEXP (addr, 0)))
33191 addr = XEXP (addr, 1);
33192 else if (CONSTANT_P (XEXP (addr, 1)))
33193 addr = XEXP (addr, 0);
33194 else
33195 gcc_unreachable ();
33196 }
33197 gcc_assert (REG_P (addr) && REGNO (addr) != 0);
33198 return addr;
33199 }
33200
33201 void
33202 rs6000_fatal_bad_address (rtx op)
33203 {
33204 fatal_insn ("bad address", op);
33205 }
33206
33207 #if TARGET_MACHO
33208
33209 typedef struct branch_island_d {
33210 tree function_name;
33211 tree label_name;
33212 int line_number;
33213 } branch_island;
33214
33215
33216 static vec<branch_island, va_gc> *branch_islands;
33217
33218 /* Remember to generate a branch island for far calls to the given
33219 function. */
33220
33221 static void
33222 add_compiler_branch_island (tree label_name, tree function_name,
33223 int line_number)
33224 {
33225 branch_island bi = {function_name, label_name, line_number};
33226 vec_safe_push (branch_islands, bi);
33227 }
33228
33229 /* Generate far-jump branch islands for everything recorded in
33230 branch_islands. Invoked immediately after the last instruction of
33231 the epilogue has been emitted; the branch islands must be appended
33232 to, and contiguous with, the function body. Mach-O stubs are
33233 generated in machopic_output_stub(). */
33234
33235 static void
33236 macho_branch_islands (void)
33237 {
33238 char tmp_buf[512];
33239
33240 while (!vec_safe_is_empty (branch_islands))
33241 {
33242 branch_island *bi = &branch_islands->last ();
33243 const char *label = IDENTIFIER_POINTER (bi->label_name);
33244 const char *name = IDENTIFIER_POINTER (bi->function_name);
33245 char name_buf[512];
33246 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33247 if (name[0] == '*' || name[0] == '&')
33248 strcpy (name_buf, name+1);
33249 else
33250 {
33251 name_buf[0] = '_';
33252 strcpy (name_buf+1, name);
33253 }
33254 strcpy (tmp_buf, "\n");
33255 strcat (tmp_buf, label);
33256 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33257 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33258 dbxout_stabd (N_SLINE, bi->line_number);
33259 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33260 if (flag_pic)
33261 {
33262 if (TARGET_LINK_STACK)
33263 {
33264 char name[32];
33265 get_ppc476_thunk_name (name);
33266 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
33267 strcat (tmp_buf, name);
33268 strcat (tmp_buf, "\n");
33269 strcat (tmp_buf, label);
33270 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33271 }
33272 else
33273 {
33274 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33275 strcat (tmp_buf, label);
33276 strcat (tmp_buf, "_pic\n");
33277 strcat (tmp_buf, label);
33278 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33279 }
33280
33281 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33282 strcat (tmp_buf, name_buf);
33283 strcat (tmp_buf, " - ");
33284 strcat (tmp_buf, label);
33285 strcat (tmp_buf, "_pic)\n");
33286
33287 strcat (tmp_buf, "\tmtlr r0\n");
33288
33289 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33290 strcat (tmp_buf, name_buf);
33291 strcat (tmp_buf, " - ");
33292 strcat (tmp_buf, label);
33293 strcat (tmp_buf, "_pic)\n");
33294
33295 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33296 }
33297 else
33298 {
33299 strcat (tmp_buf, ":\nlis r12,hi16(");
33300 strcat (tmp_buf, name_buf);
33301 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33302 strcat (tmp_buf, name_buf);
33303 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33304 }
33305 output_asm_insn (tmp_buf, 0);
33306 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33307 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33308 dbxout_stabd (N_SLINE, bi->line_number);
33309 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33310 branch_islands->pop ();
33311 }
33312 }
33313
33314 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33315 already there or not. */
33316
33317 static int
33318 no_previous_def (tree function_name)
33319 {
33320 branch_island *bi;
33321 unsigned ix;
33322
33323 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33324 if (function_name == bi->function_name)
33325 return 0;
33326 return 1;
33327 }
33328
33329 /* GET_PREV_LABEL gets the label name from the previous definition of
33330 the function. */
33331
33332 static tree
33333 get_prev_label (tree function_name)
33334 {
33335 branch_island *bi;
33336 unsigned ix;
33337
33338 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33339 if (function_name == bi->function_name)
33340 return bi->label_name;
33341 return NULL_TREE;
33342 }
33343
33344 /* Generate PIC and indirect symbol stubs. */
33345
33346 void
33347 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33348 {
33349 unsigned int length;
33350 char *symbol_name, *lazy_ptr_name;
33351 char *local_label_0;
33352 static int label = 0;
33353
33354 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33355 symb = (*targetm.strip_name_encoding) (symb);
33356
33357
33358 length = strlen (symb);
33359 symbol_name = XALLOCAVEC (char, length + 32);
33360 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33361
33362 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33363 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33364
33365 if (flag_pic == 2)
33366 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33367 else
33368 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33369
33370 if (flag_pic == 2)
33371 {
33372 fprintf (file, "\t.align 5\n");
33373
33374 fprintf (file, "%s:\n", stub);
33375 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33376
33377 label++;
33378 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33379 sprintf (local_label_0, "\"L%011d$spb\"", label);
33380
33381 fprintf (file, "\tmflr r0\n");
33382 if (TARGET_LINK_STACK)
33383 {
33384 char name[32];
33385 get_ppc476_thunk_name (name);
33386 fprintf (file, "\tbl %s\n", name);
33387 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33388 }
33389 else
33390 {
33391 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33392 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33393 }
33394 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33395 lazy_ptr_name, local_label_0);
33396 fprintf (file, "\tmtlr r0\n");
33397 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33398 (TARGET_64BIT ? "ldu" : "lwzu"),
33399 lazy_ptr_name, local_label_0);
33400 fprintf (file, "\tmtctr r12\n");
33401 fprintf (file, "\tbctr\n");
33402 }
33403 else
33404 {
33405 fprintf (file, "\t.align 4\n");
33406
33407 fprintf (file, "%s:\n", stub);
33408 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33409
33410 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33411 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33412 (TARGET_64BIT ? "ldu" : "lwzu"),
33413 lazy_ptr_name);
33414 fprintf (file, "\tmtctr r12\n");
33415 fprintf (file, "\tbctr\n");
33416 }
33417
33418 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33419 fprintf (file, "%s:\n", lazy_ptr_name);
33420 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33421 fprintf (file, "%sdyld_stub_binding_helper\n",
33422 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33423 }
33424
33425 /* Legitimize PIC addresses. If the address is already
33426 position-independent, we return ORIG. Newly generated
33427 position-independent addresses go into a reg. This is REG if non
33428 zero, otherwise we allocate register(s) as necessary. */
33429
33430 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33431
33432 rtx
33433 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33434 rtx reg)
33435 {
33436 rtx base, offset;
33437
33438 if (reg == NULL && !reload_completed)
33439 reg = gen_reg_rtx (Pmode);
33440
33441 if (GET_CODE (orig) == CONST)
33442 {
33443 rtx reg_temp;
33444
33445 if (GET_CODE (XEXP (orig, 0)) == PLUS
33446 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33447 return orig;
33448
33449 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33450
33451 /* Use a different reg for the intermediate value, as
33452 it will be marked UNCHANGING. */
33453 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33454 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33455 Pmode, reg_temp);
33456 offset =
33457 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33458 Pmode, reg);
33459
33460 if (CONST_INT_P (offset))
33461 {
33462 if (SMALL_INT (offset))
33463 return plus_constant (Pmode, base, INTVAL (offset));
33464 else if (!reload_completed)
33465 offset = force_reg (Pmode, offset);
33466 else
33467 {
33468 rtx mem = force_const_mem (Pmode, orig);
33469 return machopic_legitimize_pic_address (mem, Pmode, reg);
33470 }
33471 }
33472 return gen_rtx_PLUS (Pmode, base, offset);
33473 }
33474
33475 /* Fall back on generic machopic code. */
33476 return machopic_legitimize_pic_address (orig, mode, reg);
33477 }
33478
33479 /* Output a .machine directive for the Darwin assembler, and call
33480 the generic start_file routine. */
33481
33482 static void
33483 rs6000_darwin_file_start (void)
33484 {
33485 static const struct
33486 {
33487 const char *arg;
33488 const char *name;
33489 HOST_WIDE_INT if_set;
33490 } mapping[] = {
33491 { "ppc64", "ppc64", MASK_64BIT },
33492 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33493 { "power4", "ppc970", 0 },
33494 { "G5", "ppc970", 0 },
33495 { "7450", "ppc7450", 0 },
33496 { "7400", "ppc7400", MASK_ALTIVEC },
33497 { "G4", "ppc7400", 0 },
33498 { "750", "ppc750", 0 },
33499 { "740", "ppc750", 0 },
33500 { "G3", "ppc750", 0 },
33501 { "604e", "ppc604e", 0 },
33502 { "604", "ppc604", 0 },
33503 { "603e", "ppc603", 0 },
33504 { "603", "ppc603", 0 },
33505 { "601", "ppc601", 0 },
33506 { NULL, "ppc", 0 } };
33507 const char *cpu_id = "";
33508 size_t i;
33509
33510 rs6000_file_start ();
33511 darwin_file_start ();
33512
33513 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33514
33515 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33516 cpu_id = rs6000_default_cpu;
33517
33518 if (global_options_set.x_rs6000_cpu_index)
33519 cpu_id = processor_target_table[rs6000_cpu_index].name;
33520
33521 /* Look through the mapping array. Pick the first name that either
33522 matches the argument, has a bit set in IF_SET that is also set
33523 in the target flags, or has a NULL name. */
33524
33525 i = 0;
33526 while (mapping[i].arg != NULL
33527 && strcmp (mapping[i].arg, cpu_id) != 0
33528 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33529 i++;
33530
33531 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33532 }
33533
33534 #endif /* TARGET_MACHO */
33535
33536 #if TARGET_ELF
33537 static int
33538 rs6000_elf_reloc_rw_mask (void)
33539 {
33540 if (flag_pic)
33541 return 3;
33542 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33543 return 2;
33544 else
33545 return 0;
33546 }
33547
33548 /* Record an element in the table of global constructors. SYMBOL is
33549 a SYMBOL_REF of the function to be called; PRIORITY is a number
33550 between 0 and MAX_INIT_PRIORITY.
33551
33552 This differs from default_named_section_asm_out_constructor in
33553 that we have special handling for -mrelocatable. */
33554
33555 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33556 static void
33557 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33558 {
33559 const char *section = ".ctors";
33560 char buf[18];
33561
33562 if (priority != DEFAULT_INIT_PRIORITY)
33563 {
33564 sprintf (buf, ".ctors.%.5u",
33565 /* Invert the numbering so the linker puts us in the proper
33566 order; constructors are run from right to left, and the
33567 linker sorts in increasing order. */
33568 MAX_INIT_PRIORITY - priority);
33569 section = buf;
33570 }
33571
33572 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33573 assemble_align (POINTER_SIZE);
33574
33575 if (DEFAULT_ABI == ABI_V4
33576 && (TARGET_RELOCATABLE || flag_pic > 1))
33577 {
33578 fputs ("\t.long (", asm_out_file);
33579 output_addr_const (asm_out_file, symbol);
33580 fputs (")@fixup\n", asm_out_file);
33581 }
33582 else
33583 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33584 }
33585
33586 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33587 static void
33588 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33589 {
33590 const char *section = ".dtors";
33591 char buf[18];
33592
33593 if (priority != DEFAULT_INIT_PRIORITY)
33594 {
33595 sprintf (buf, ".dtors.%.5u",
33596 /* Invert the numbering so the linker puts us in the proper
33597 order; constructors are run from right to left, and the
33598 linker sorts in increasing order. */
33599 MAX_INIT_PRIORITY - priority);
33600 section = buf;
33601 }
33602
33603 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33604 assemble_align (POINTER_SIZE);
33605
33606 if (DEFAULT_ABI == ABI_V4
33607 && (TARGET_RELOCATABLE || flag_pic > 1))
33608 {
33609 fputs ("\t.long (", asm_out_file);
33610 output_addr_const (asm_out_file, symbol);
33611 fputs (")@fixup\n", asm_out_file);
33612 }
33613 else
33614 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33615 }
33616
33617 void
33618 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33619 {
33620 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33621 {
33622 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33623 ASM_OUTPUT_LABEL (file, name);
33624 fputs (DOUBLE_INT_ASM_OP, file);
33625 rs6000_output_function_entry (file, name);
33626 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33627 if (DOT_SYMBOLS)
33628 {
33629 fputs ("\t.size\t", file);
33630 assemble_name (file, name);
33631 fputs (",24\n\t.type\t.", file);
33632 assemble_name (file, name);
33633 fputs (",@function\n", file);
33634 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33635 {
33636 fputs ("\t.globl\t.", file);
33637 assemble_name (file, name);
33638 putc ('\n', file);
33639 }
33640 }
33641 else
33642 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33643 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33644 rs6000_output_function_entry (file, name);
33645 fputs (":\n", file);
33646 return;
33647 }
33648
33649 int uses_toc;
33650 if (DEFAULT_ABI == ABI_V4
33651 && (TARGET_RELOCATABLE || flag_pic > 1)
33652 && !TARGET_SECURE_PLT
33653 && (!constant_pool_empty_p () || crtl->profile)
33654 && (uses_toc = uses_TOC ()))
33655 {
33656 char buf[256];
33657
33658 if (uses_toc == 2)
33659 switch_to_other_text_partition ();
33660 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33661
33662 fprintf (file, "\t.long ");
33663 assemble_name (file, toc_label_name);
33664 need_toc_init = 1;
33665 putc ('-', file);
33666 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33667 assemble_name (file, buf);
33668 putc ('\n', file);
33669 if (uses_toc == 2)
33670 switch_to_other_text_partition ();
33671 }
33672
33673 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33674 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33675
33676 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33677 {
33678 char buf[256];
33679
33680 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33681
33682 fprintf (file, "\t.quad .TOC.-");
33683 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33684 assemble_name (file, buf);
33685 putc ('\n', file);
33686 }
33687
33688 if (DEFAULT_ABI == ABI_AIX)
33689 {
33690 const char *desc_name, *orig_name;
33691
33692 orig_name = (*targetm.strip_name_encoding) (name);
33693 desc_name = orig_name;
33694 while (*desc_name == '.')
33695 desc_name++;
33696
33697 if (TREE_PUBLIC (decl))
33698 fprintf (file, "\t.globl %s\n", desc_name);
33699
33700 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33701 fprintf (file, "%s:\n", desc_name);
33702 fprintf (file, "\t.long %s\n", orig_name);
33703 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33704 fputs ("\t.long 0\n", file);
33705 fprintf (file, "\t.previous\n");
33706 }
33707 ASM_OUTPUT_LABEL (file, name);
33708 }
33709
33710 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33711 static void
33712 rs6000_elf_file_end (void)
33713 {
33714 #ifdef HAVE_AS_GNU_ATTRIBUTE
33715 /* ??? The value emitted depends on options active at file end.
33716 Assume anyone using #pragma or attributes that might change
33717 options knows what they are doing. */
33718 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33719 && rs6000_passes_float)
33720 {
33721 int fp;
33722
33723 if (TARGET_HARD_FLOAT)
33724 fp = 1;
33725 else
33726 fp = 2;
33727 if (rs6000_passes_long_double)
33728 {
33729 if (!TARGET_LONG_DOUBLE_128)
33730 fp |= 2 * 4;
33731 else if (TARGET_IEEEQUAD)
33732 fp |= 3 * 4;
33733 else
33734 fp |= 1 * 4;
33735 }
33736 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33737 }
33738 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33739 {
33740 if (rs6000_passes_vector)
33741 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33742 (TARGET_ALTIVEC_ABI ? 2 : 1));
33743 if (rs6000_returns_struct)
33744 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33745 aix_struct_return ? 2 : 1);
33746 }
33747 #endif
33748 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33749 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33750 file_end_indicate_exec_stack ();
33751 #endif
33752
33753 if (flag_split_stack)
33754 file_end_indicate_split_stack ();
33755
33756 if (cpu_builtin_p)
33757 {
33758 /* We have expanded a CPU builtin, so we need to emit a reference to
33759 the special symbol that LIBC uses to declare it supports the
33760 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33761 switch_to_section (data_section);
33762 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33763 fprintf (asm_out_file, "\t%s %s\n",
33764 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33765 }
33766 }
33767 #endif
33768
33769 #if TARGET_XCOFF
33770
33771 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33772 #define HAVE_XCOFF_DWARF_EXTRAS 0
33773 #endif
33774
33775 static enum unwind_info_type
33776 rs6000_xcoff_debug_unwind_info (void)
33777 {
33778 return UI_NONE;
33779 }
33780
33781 static void
33782 rs6000_xcoff_asm_output_anchor (rtx symbol)
33783 {
33784 char buffer[100];
33785
33786 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33787 SYMBOL_REF_BLOCK_OFFSET (symbol));
33788 fprintf (asm_out_file, "%s", SET_ASM_OP);
33789 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33790 fprintf (asm_out_file, ",");
33791 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33792 fprintf (asm_out_file, "\n");
33793 }
33794
33795 static void
33796 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33797 {
33798 fputs (GLOBAL_ASM_OP, stream);
33799 RS6000_OUTPUT_BASENAME (stream, name);
33800 putc ('\n', stream);
33801 }
33802
33803 /* A get_unnamed_decl callback, used for read-only sections. PTR
33804 points to the section string variable. */
33805
33806 static void
33807 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33808 {
33809 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33810 *(const char *const *) directive,
33811 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33812 }
33813
33814 /* Likewise for read-write sections. */
33815
33816 static void
33817 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33818 {
33819 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33820 *(const char *const *) directive,
33821 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33822 }
33823
33824 static void
33825 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33826 {
33827 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33828 *(const char *const *) directive,
33829 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33830 }
33831
33832 /* A get_unnamed_section callback, used for switching to toc_section. */
33833
33834 static void
33835 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33836 {
33837 if (TARGET_MINIMAL_TOC)
33838 {
33839 /* toc_section is always selected at least once from
33840 rs6000_xcoff_file_start, so this is guaranteed to
33841 always be defined once and only once in each file. */
33842 if (!toc_initialized)
33843 {
33844 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33845 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33846 toc_initialized = 1;
33847 }
33848 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33849 (TARGET_32BIT ? "" : ",3"));
33850 }
33851 else
33852 fputs ("\t.toc\n", asm_out_file);
33853 }
33854
33855 /* Implement TARGET_ASM_INIT_SECTIONS. */
33856
33857 static void
33858 rs6000_xcoff_asm_init_sections (void)
33859 {
33860 read_only_data_section
33861 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33862 &xcoff_read_only_section_name);
33863
33864 private_data_section
33865 = get_unnamed_section (SECTION_WRITE,
33866 rs6000_xcoff_output_readwrite_section_asm_op,
33867 &xcoff_private_data_section_name);
33868
33869 read_only_private_data_section
33870 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33871 &xcoff_private_rodata_section_name);
33872
33873 tls_data_section
33874 = get_unnamed_section (SECTION_TLS,
33875 rs6000_xcoff_output_tls_section_asm_op,
33876 &xcoff_tls_data_section_name);
33877
33878 tls_private_data_section
33879 = get_unnamed_section (SECTION_TLS,
33880 rs6000_xcoff_output_tls_section_asm_op,
33881 &xcoff_private_data_section_name);
33882
33883 toc_section
33884 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33885
33886 readonly_data_section = read_only_data_section;
33887 }
33888
33889 static int
33890 rs6000_xcoff_reloc_rw_mask (void)
33891 {
33892 return 3;
33893 }
33894
33895 static void
33896 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33897 tree decl ATTRIBUTE_UNUSED)
33898 {
33899 int smclass;
33900 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33901
33902 if (flags & SECTION_EXCLUDE)
33903 smclass = 4;
33904 else if (flags & SECTION_DEBUG)
33905 {
33906 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33907 return;
33908 }
33909 else if (flags & SECTION_CODE)
33910 smclass = 0;
33911 else if (flags & SECTION_TLS)
33912 smclass = 3;
33913 else if (flags & SECTION_WRITE)
33914 smclass = 2;
33915 else
33916 smclass = 1;
33917
33918 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33919 (flags & SECTION_CODE) ? "." : "",
33920 name, suffix[smclass], flags & SECTION_ENTSIZE);
33921 }
33922
33923 #define IN_NAMED_SECTION(DECL) \
33924 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33925 && DECL_SECTION_NAME (DECL) != NULL)
33926
33927 static section *
33928 rs6000_xcoff_select_section (tree decl, int reloc,
33929 unsigned HOST_WIDE_INT align)
33930 {
33931 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33932 named section. */
33933 if (align > BIGGEST_ALIGNMENT)
33934 {
33935 resolve_unique_section (decl, reloc, true);
33936 if (IN_NAMED_SECTION (decl))
33937 return get_named_section (decl, NULL, reloc);
33938 }
33939
33940 if (decl_readonly_section (decl, reloc))
33941 {
33942 if (TREE_PUBLIC (decl))
33943 return read_only_data_section;
33944 else
33945 return read_only_private_data_section;
33946 }
33947 else
33948 {
33949 #if HAVE_AS_TLS
33950 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33951 {
33952 if (TREE_PUBLIC (decl))
33953 return tls_data_section;
33954 else if (bss_initializer_p (decl))
33955 {
33956 /* Convert to COMMON to emit in BSS. */
33957 DECL_COMMON (decl) = 1;
33958 return tls_comm_section;
33959 }
33960 else
33961 return tls_private_data_section;
33962 }
33963 else
33964 #endif
33965 if (TREE_PUBLIC (decl))
33966 return data_section;
33967 else
33968 return private_data_section;
33969 }
33970 }
33971
33972 static void
33973 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33974 {
33975 const char *name;
33976
33977 /* Use select_section for private data and uninitialized data with
33978 alignment <= BIGGEST_ALIGNMENT. */
33979 if (!TREE_PUBLIC (decl)
33980 || DECL_COMMON (decl)
33981 || (DECL_INITIAL (decl) == NULL_TREE
33982 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33983 || DECL_INITIAL (decl) == error_mark_node
33984 || (flag_zero_initialized_in_bss
33985 && initializer_zerop (DECL_INITIAL (decl))))
33986 return;
33987
33988 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33989 name = (*targetm.strip_name_encoding) (name);
33990 set_decl_section_name (decl, name);
33991 }
33992
33993 /* Select section for constant in constant pool.
33994
33995 On RS/6000, all constants are in the private read-only data area.
33996 However, if this is being placed in the TOC it must be output as a
33997 toc entry. */
33998
33999 static section *
34000 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
34001 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
34002 {
34003 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
34004 return toc_section;
34005 else
34006 return read_only_private_data_section;
34007 }
34008
34009 /* Remove any trailing [DS] or the like from the symbol name. */
34010
34011 static const char *
34012 rs6000_xcoff_strip_name_encoding (const char *name)
34013 {
34014 size_t len;
34015 if (*name == '*')
34016 name++;
34017 len = strlen (name);
34018 if (name[len - 1] == ']')
34019 return ggc_alloc_string (name, len - 4);
34020 else
34021 return name;
34022 }
34023
34024 /* Section attributes. AIX is always PIC. */
34025
34026 static unsigned int
34027 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
34028 {
34029 unsigned int align;
34030 unsigned int flags = default_section_type_flags (decl, name, reloc);
34031
34032 /* Align to at least UNIT size. */
34033 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
34034 align = MIN_UNITS_PER_WORD;
34035 else
34036 /* Increase alignment of large objects if not already stricter. */
34037 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
34038 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
34039 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
34040
34041 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
34042 }
34043
34044 /* Output at beginning of assembler file.
34045
34046 Initialize the section names for the RS/6000 at this point.
34047
34048 Specify filename, including full path, to assembler.
34049
34050 We want to go into the TOC section so at least one .toc will be emitted.
34051 Also, in order to output proper .bs/.es pairs, we need at least one static
34052 [RW] section emitted.
34053
34054 Finally, declare mcount when profiling to make the assembler happy. */
34055
34056 static void
34057 rs6000_xcoff_file_start (void)
34058 {
34059 rs6000_gen_section_name (&xcoff_bss_section_name,
34060 main_input_filename, ".bss_");
34061 rs6000_gen_section_name (&xcoff_private_data_section_name,
34062 main_input_filename, ".rw_");
34063 rs6000_gen_section_name (&xcoff_private_rodata_section_name,
34064 main_input_filename, ".rop_");
34065 rs6000_gen_section_name (&xcoff_read_only_section_name,
34066 main_input_filename, ".ro_");
34067 rs6000_gen_section_name (&xcoff_tls_data_section_name,
34068 main_input_filename, ".tls_");
34069 rs6000_gen_section_name (&xcoff_tbss_section_name,
34070 main_input_filename, ".tbss_[UL]");
34071
34072 fputs ("\t.file\t", asm_out_file);
34073 output_quoted_string (asm_out_file, main_input_filename);
34074 fputc ('\n', asm_out_file);
34075 if (write_symbols != NO_DEBUG)
34076 switch_to_section (private_data_section);
34077 switch_to_section (toc_section);
34078 switch_to_section (text_section);
34079 if (profile_flag)
34080 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
34081 rs6000_file_start ();
34082 }
34083
34084 /* Output at end of assembler file.
34085 On the RS/6000, referencing data should automatically pull in text. */
34086
34087 static void
34088 rs6000_xcoff_file_end (void)
34089 {
34090 switch_to_section (text_section);
34091 fputs ("_section_.text:\n", asm_out_file);
34092 switch_to_section (data_section);
34093 fputs (TARGET_32BIT
34094 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
34095 asm_out_file);
34096 }
34097
34098 struct declare_alias_data
34099 {
34100 FILE *file;
34101 bool function_descriptor;
34102 };
34103
34104 /* Declare alias N. A helper function for for_node_and_aliases. */
34105
34106 static bool
34107 rs6000_declare_alias (struct symtab_node *n, void *d)
34108 {
34109 struct declare_alias_data *data = (struct declare_alias_data *)d;
34110 /* Main symbol is output specially, because varasm machinery does part of
34111 the job for us - we do not need to declare .globl/lglobs and such. */
34112 if (!n->alias || n->weakref)
34113 return false;
34114
34115 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
34116 return false;
34117
34118 /* Prevent assemble_alias from trying to use .set pseudo operation
34119 that does not behave as expected by the middle-end. */
34120 TREE_ASM_WRITTEN (n->decl) = true;
34121
34122 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
34123 char *buffer = (char *) alloca (strlen (name) + 2);
34124 char *p;
34125 int dollar_inside = 0;
34126
34127 strcpy (buffer, name);
34128 p = strchr (buffer, '$');
34129 while (p) {
34130 *p = '_';
34131 dollar_inside++;
34132 p = strchr (p + 1, '$');
34133 }
34134 if (TREE_PUBLIC (n->decl))
34135 {
34136 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
34137 {
34138 if (dollar_inside) {
34139 if (data->function_descriptor)
34140 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34141 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34142 }
34143 if (data->function_descriptor)
34144 {
34145 fputs ("\t.globl .", data->file);
34146 RS6000_OUTPUT_BASENAME (data->file, buffer);
34147 putc ('\n', data->file);
34148 }
34149 fputs ("\t.globl ", data->file);
34150 RS6000_OUTPUT_BASENAME (data->file, buffer);
34151 putc ('\n', data->file);
34152 }
34153 #ifdef ASM_WEAKEN_DECL
34154 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
34155 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
34156 #endif
34157 }
34158 else
34159 {
34160 if (dollar_inside)
34161 {
34162 if (data->function_descriptor)
34163 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34164 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34165 }
34166 if (data->function_descriptor)
34167 {
34168 fputs ("\t.lglobl .", data->file);
34169 RS6000_OUTPUT_BASENAME (data->file, buffer);
34170 putc ('\n', data->file);
34171 }
34172 fputs ("\t.lglobl ", data->file);
34173 RS6000_OUTPUT_BASENAME (data->file, buffer);
34174 putc ('\n', data->file);
34175 }
34176 if (data->function_descriptor)
34177 fputs (".", data->file);
34178 RS6000_OUTPUT_BASENAME (data->file, buffer);
34179 fputs (":\n", data->file);
34180 return false;
34181 }
34182
34183
34184 #ifdef HAVE_GAS_HIDDEN
34185 /* Helper function to calculate visibility of a DECL
34186 and return the value as a const string. */
34187
34188 static const char *
34189 rs6000_xcoff_visibility (tree decl)
34190 {
34191 static const char * const visibility_types[] = {
34192 "", ",protected", ",hidden", ",internal"
34193 };
34194
34195 enum symbol_visibility vis = DECL_VISIBILITY (decl);
34196 return visibility_types[vis];
34197 }
34198 #endif
34199
34200
34201 /* This macro produces the initial definition of a function name.
34202 On the RS/6000, we need to place an extra '.' in the function name and
34203 output the function descriptor.
34204 Dollar signs are converted to underscores.
34205
34206 The csect for the function will have already been created when
34207 text_section was selected. We do have to go back to that csect, however.
34208
34209 The third and fourth parameters to the .function pseudo-op (16 and 044)
34210 are placeholders which no longer have any use.
34211
34212 Because AIX assembler's .set command has unexpected semantics, we output
34213 all aliases as alternative labels in front of the definition. */
34214
34215 void
34216 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
34217 {
34218 char *buffer = (char *) alloca (strlen (name) + 1);
34219 char *p;
34220 int dollar_inside = 0;
34221 struct declare_alias_data data = {file, false};
34222
34223 strcpy (buffer, name);
34224 p = strchr (buffer, '$');
34225 while (p) {
34226 *p = '_';
34227 dollar_inside++;
34228 p = strchr (p + 1, '$');
34229 }
34230 if (TREE_PUBLIC (decl))
34231 {
34232 if (!RS6000_WEAK || !DECL_WEAK (decl))
34233 {
34234 if (dollar_inside) {
34235 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34236 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34237 }
34238 fputs ("\t.globl .", file);
34239 RS6000_OUTPUT_BASENAME (file, buffer);
34240 #ifdef HAVE_GAS_HIDDEN
34241 fputs (rs6000_xcoff_visibility (decl), file);
34242 #endif
34243 putc ('\n', file);
34244 }
34245 }
34246 else
34247 {
34248 if (dollar_inside) {
34249 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34250 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34251 }
34252 fputs ("\t.lglobl .", file);
34253 RS6000_OUTPUT_BASENAME (file, buffer);
34254 putc ('\n', file);
34255 }
34256 fputs ("\t.csect ", file);
34257 RS6000_OUTPUT_BASENAME (file, buffer);
34258 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34259 RS6000_OUTPUT_BASENAME (file, buffer);
34260 fputs (":\n", file);
34261 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34262 &data, true);
34263 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34264 RS6000_OUTPUT_BASENAME (file, buffer);
34265 fputs (", TOC[tc0], 0\n", file);
34266 in_section = NULL;
34267 switch_to_section (function_section (decl));
34268 putc ('.', file);
34269 RS6000_OUTPUT_BASENAME (file, buffer);
34270 fputs (":\n", file);
34271 data.function_descriptor = true;
34272 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34273 &data, true);
34274 if (!DECL_IGNORED_P (decl))
34275 {
34276 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34277 xcoffout_declare_function (file, decl, buffer);
34278 else if (write_symbols == DWARF2_DEBUG)
34279 {
34280 name = (*targetm.strip_name_encoding) (name);
34281 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34282 }
34283 }
34284 return;
34285 }
34286
34287
34288 /* Output assembly language to globalize a symbol from a DECL,
34289 possibly with visibility. */
34290
34291 void
34292 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34293 {
34294 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34295 fputs (GLOBAL_ASM_OP, stream);
34296 RS6000_OUTPUT_BASENAME (stream, name);
34297 #ifdef HAVE_GAS_HIDDEN
34298 fputs (rs6000_xcoff_visibility (decl), stream);
34299 #endif
34300 putc ('\n', stream);
34301 }
34302
34303 /* Output assembly language to define a symbol as COMMON from a DECL,
34304 possibly with visibility. */
34305
34306 void
34307 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34308 tree decl ATTRIBUTE_UNUSED,
34309 const char *name,
34310 unsigned HOST_WIDE_INT size,
34311 unsigned HOST_WIDE_INT align)
34312 {
34313 unsigned HOST_WIDE_INT align2 = 2;
34314
34315 if (align > 32)
34316 align2 = floor_log2 (align / BITS_PER_UNIT);
34317 else if (size > 4)
34318 align2 = 3;
34319
34320 fputs (COMMON_ASM_OP, stream);
34321 RS6000_OUTPUT_BASENAME (stream, name);
34322
34323 fprintf (stream,
34324 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34325 size, align2);
34326
34327 #ifdef HAVE_GAS_HIDDEN
34328 if (decl != NULL)
34329 fputs (rs6000_xcoff_visibility (decl), stream);
34330 #endif
34331 putc ('\n', stream);
34332 }
34333
34334 /* This macro produces the initial definition of a object (variable) name.
34335 Because AIX assembler's .set command has unexpected semantics, we output
34336 all aliases as alternative labels in front of the definition. */
34337
34338 void
34339 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34340 {
34341 struct declare_alias_data data = {file, false};
34342 RS6000_OUTPUT_BASENAME (file, name);
34343 fputs (":\n", file);
34344 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34345 &data, true);
34346 }
34347
34348 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34349
34350 void
34351 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34352 {
34353 fputs (integer_asm_op (size, FALSE), file);
34354 assemble_name (file, label);
34355 fputs ("-$", file);
34356 }
34357
34358 /* Output a symbol offset relative to the dbase for the current object.
34359 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34360 signed offsets.
34361
34362 __gcc_unwind_dbase is embedded in all executables/libraries through
34363 libgcc/config/rs6000/crtdbase.S. */
34364
34365 void
34366 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34367 {
34368 fputs (integer_asm_op (size, FALSE), file);
34369 assemble_name (file, label);
34370 fputs("-__gcc_unwind_dbase", file);
34371 }
34372
34373 #ifdef HAVE_AS_TLS
34374 static void
34375 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34376 {
34377 rtx symbol;
34378 int flags;
34379 const char *symname;
34380
34381 default_encode_section_info (decl, rtl, first);
34382
34383 /* Careful not to prod global register variables. */
34384 if (!MEM_P (rtl))
34385 return;
34386 symbol = XEXP (rtl, 0);
34387 if (!SYMBOL_REF_P (symbol))
34388 return;
34389
34390 flags = SYMBOL_REF_FLAGS (symbol);
34391
34392 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34393 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34394
34395 SYMBOL_REF_FLAGS (symbol) = flags;
34396
34397 /* Append mapping class to extern decls. */
34398 symname = XSTR (symbol, 0);
34399 if (decl /* sync condition with assemble_external () */
34400 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34401 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34402 || TREE_CODE (decl) == FUNCTION_DECL)
34403 && symname[strlen (symname) - 1] != ']')
34404 {
34405 char *newname = (char *) alloca (strlen (symname) + 5);
34406 strcpy (newname, symname);
34407 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34408 ? "[DS]" : "[UA]"));
34409 XSTR (symbol, 0) = ggc_strdup (newname);
34410 }
34411 }
34412 #endif /* HAVE_AS_TLS */
34413 #endif /* TARGET_XCOFF */
34414
34415 void
34416 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34417 const char *name, const char *val)
34418 {
34419 fputs ("\t.weak\t", stream);
34420 RS6000_OUTPUT_BASENAME (stream, name);
34421 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34422 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34423 {
34424 if (TARGET_XCOFF)
34425 fputs ("[DS]", stream);
34426 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34427 if (TARGET_XCOFF)
34428 fputs (rs6000_xcoff_visibility (decl), stream);
34429 #endif
34430 fputs ("\n\t.weak\t.", stream);
34431 RS6000_OUTPUT_BASENAME (stream, name);
34432 }
34433 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34434 if (TARGET_XCOFF)
34435 fputs (rs6000_xcoff_visibility (decl), stream);
34436 #endif
34437 fputc ('\n', stream);
34438 if (val)
34439 {
34440 #ifdef ASM_OUTPUT_DEF
34441 ASM_OUTPUT_DEF (stream, name, val);
34442 #endif
34443 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34444 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34445 {
34446 fputs ("\t.set\t.", stream);
34447 RS6000_OUTPUT_BASENAME (stream, name);
34448 fputs (",.", stream);
34449 RS6000_OUTPUT_BASENAME (stream, val);
34450 fputc ('\n', stream);
34451 }
34452 }
34453 }
34454
34455
34456 /* Return true if INSN should not be copied. */
34457
34458 static bool
34459 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34460 {
34461 return recog_memoized (insn) >= 0
34462 && get_attr_cannot_copy (insn);
34463 }
34464
34465 /* Compute a (partial) cost for rtx X. Return true if the complete
34466 cost has been computed, and false if subexpressions should be
34467 scanned. In either case, *TOTAL contains the cost result. */
34468
34469 static bool
34470 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34471 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34472 {
34473 int code = GET_CODE (x);
34474
34475 switch (code)
34476 {
34477 /* On the RS/6000, if it is valid in the insn, it is free. */
34478 case CONST_INT:
34479 if (((outer_code == SET
34480 || outer_code == PLUS
34481 || outer_code == MINUS)
34482 && (satisfies_constraint_I (x)
34483 || satisfies_constraint_L (x)))
34484 || (outer_code == AND
34485 && (satisfies_constraint_K (x)
34486 || (mode == SImode
34487 ? satisfies_constraint_L (x)
34488 : satisfies_constraint_J (x))))
34489 || ((outer_code == IOR || outer_code == XOR)
34490 && (satisfies_constraint_K (x)
34491 || (mode == SImode
34492 ? satisfies_constraint_L (x)
34493 : satisfies_constraint_J (x))))
34494 || outer_code == ASHIFT
34495 || outer_code == ASHIFTRT
34496 || outer_code == LSHIFTRT
34497 || outer_code == ROTATE
34498 || outer_code == ROTATERT
34499 || outer_code == ZERO_EXTRACT
34500 || (outer_code == MULT
34501 && satisfies_constraint_I (x))
34502 || ((outer_code == DIV || outer_code == UDIV
34503 || outer_code == MOD || outer_code == UMOD)
34504 && exact_log2 (INTVAL (x)) >= 0)
34505 || (outer_code == COMPARE
34506 && (satisfies_constraint_I (x)
34507 || satisfies_constraint_K (x)))
34508 || ((outer_code == EQ || outer_code == NE)
34509 && (satisfies_constraint_I (x)
34510 || satisfies_constraint_K (x)
34511 || (mode == SImode
34512 ? satisfies_constraint_L (x)
34513 : satisfies_constraint_J (x))))
34514 || (outer_code == GTU
34515 && satisfies_constraint_I (x))
34516 || (outer_code == LTU
34517 && satisfies_constraint_P (x)))
34518 {
34519 *total = 0;
34520 return true;
34521 }
34522 else if ((outer_code == PLUS
34523 && reg_or_add_cint_operand (x, VOIDmode))
34524 || (outer_code == MINUS
34525 && reg_or_sub_cint_operand (x, VOIDmode))
34526 || ((outer_code == SET
34527 || outer_code == IOR
34528 || outer_code == XOR)
34529 && (INTVAL (x)
34530 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34531 {
34532 *total = COSTS_N_INSNS (1);
34533 return true;
34534 }
34535 /* FALLTHRU */
34536
34537 case CONST_DOUBLE:
34538 case CONST_WIDE_INT:
34539 case CONST:
34540 case HIGH:
34541 case SYMBOL_REF:
34542 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34543 return true;
34544
34545 case MEM:
34546 /* When optimizing for size, MEM should be slightly more expensive
34547 than generating address, e.g., (plus (reg) (const)).
34548 L1 cache latency is about two instructions. */
34549 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34550 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34551 *total += COSTS_N_INSNS (100);
34552 return true;
34553
34554 case LABEL_REF:
34555 *total = 0;
34556 return true;
34557
34558 case PLUS:
34559 case MINUS:
34560 if (FLOAT_MODE_P (mode))
34561 *total = rs6000_cost->fp;
34562 else
34563 *total = COSTS_N_INSNS (1);
34564 return false;
34565
34566 case MULT:
34567 if (CONST_INT_P (XEXP (x, 1))
34568 && satisfies_constraint_I (XEXP (x, 1)))
34569 {
34570 if (INTVAL (XEXP (x, 1)) >= -256
34571 && INTVAL (XEXP (x, 1)) <= 255)
34572 *total = rs6000_cost->mulsi_const9;
34573 else
34574 *total = rs6000_cost->mulsi_const;
34575 }
34576 else if (mode == SFmode)
34577 *total = rs6000_cost->fp;
34578 else if (FLOAT_MODE_P (mode))
34579 *total = rs6000_cost->dmul;
34580 else if (mode == DImode)
34581 *total = rs6000_cost->muldi;
34582 else
34583 *total = rs6000_cost->mulsi;
34584 return false;
34585
34586 case FMA:
34587 if (mode == SFmode)
34588 *total = rs6000_cost->fp;
34589 else
34590 *total = rs6000_cost->dmul;
34591 break;
34592
34593 case DIV:
34594 case MOD:
34595 if (FLOAT_MODE_P (mode))
34596 {
34597 *total = mode == DFmode ? rs6000_cost->ddiv
34598 : rs6000_cost->sdiv;
34599 return false;
34600 }
34601 /* FALLTHRU */
34602
34603 case UDIV:
34604 case UMOD:
34605 if (CONST_INT_P (XEXP (x, 1))
34606 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34607 {
34608 if (code == DIV || code == MOD)
34609 /* Shift, addze */
34610 *total = COSTS_N_INSNS (2);
34611 else
34612 /* Shift */
34613 *total = COSTS_N_INSNS (1);
34614 }
34615 else
34616 {
34617 if (GET_MODE (XEXP (x, 1)) == DImode)
34618 *total = rs6000_cost->divdi;
34619 else
34620 *total = rs6000_cost->divsi;
34621 }
34622 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34623 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34624 *total += COSTS_N_INSNS (2);
34625 return false;
34626
34627 case CTZ:
34628 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34629 return false;
34630
34631 case FFS:
34632 *total = COSTS_N_INSNS (4);
34633 return false;
34634
34635 case POPCOUNT:
34636 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34637 return false;
34638
34639 case PARITY:
34640 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34641 return false;
34642
34643 case NOT:
34644 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34645 *total = 0;
34646 else
34647 *total = COSTS_N_INSNS (1);
34648 return false;
34649
34650 case AND:
34651 if (CONST_INT_P (XEXP (x, 1)))
34652 {
34653 rtx left = XEXP (x, 0);
34654 rtx_code left_code = GET_CODE (left);
34655
34656 /* rotate-and-mask: 1 insn. */
34657 if ((left_code == ROTATE
34658 || left_code == ASHIFT
34659 || left_code == LSHIFTRT)
34660 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34661 {
34662 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34663 if (!CONST_INT_P (XEXP (left, 1)))
34664 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34665 *total += COSTS_N_INSNS (1);
34666 return true;
34667 }
34668
34669 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34670 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34671 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34672 || (val & 0xffff) == val
34673 || (val & 0xffff0000) == val
34674 || ((val & 0xffff) == 0 && mode == SImode))
34675 {
34676 *total = rtx_cost (left, mode, AND, 0, speed);
34677 *total += COSTS_N_INSNS (1);
34678 return true;
34679 }
34680
34681 /* 2 insns. */
34682 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34683 {
34684 *total = rtx_cost (left, mode, AND, 0, speed);
34685 *total += COSTS_N_INSNS (2);
34686 return true;
34687 }
34688 }
34689
34690 *total = COSTS_N_INSNS (1);
34691 return false;
34692
34693 case IOR:
34694 /* FIXME */
34695 *total = COSTS_N_INSNS (1);
34696 return true;
34697
34698 case CLZ:
34699 case XOR:
34700 case ZERO_EXTRACT:
34701 *total = COSTS_N_INSNS (1);
34702 return false;
34703
34704 case ASHIFT:
34705 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34706 the sign extend and shift separately within the insn. */
34707 if (TARGET_EXTSWSLI && mode == DImode
34708 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34709 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34710 {
34711 *total = 0;
34712 return false;
34713 }
34714 /* fall through */
34715
34716 case ASHIFTRT:
34717 case LSHIFTRT:
34718 case ROTATE:
34719 case ROTATERT:
34720 /* Handle mul_highpart. */
34721 if (outer_code == TRUNCATE
34722 && GET_CODE (XEXP (x, 0)) == MULT)
34723 {
34724 if (mode == DImode)
34725 *total = rs6000_cost->muldi;
34726 else
34727 *total = rs6000_cost->mulsi;
34728 return true;
34729 }
34730 else if (outer_code == AND)
34731 *total = 0;
34732 else
34733 *total = COSTS_N_INSNS (1);
34734 return false;
34735
34736 case SIGN_EXTEND:
34737 case ZERO_EXTEND:
34738 if (MEM_P (XEXP (x, 0)))
34739 *total = 0;
34740 else
34741 *total = COSTS_N_INSNS (1);
34742 return false;
34743
34744 case COMPARE:
34745 case NEG:
34746 case ABS:
34747 if (!FLOAT_MODE_P (mode))
34748 {
34749 *total = COSTS_N_INSNS (1);
34750 return false;
34751 }
34752 /* FALLTHRU */
34753
34754 case FLOAT:
34755 case UNSIGNED_FLOAT:
34756 case FIX:
34757 case UNSIGNED_FIX:
34758 case FLOAT_TRUNCATE:
34759 *total = rs6000_cost->fp;
34760 return false;
34761
34762 case FLOAT_EXTEND:
34763 if (mode == DFmode)
34764 *total = rs6000_cost->sfdf_convert;
34765 else
34766 *total = rs6000_cost->fp;
34767 return false;
34768
34769 case UNSPEC:
34770 switch (XINT (x, 1))
34771 {
34772 case UNSPEC_FRSP:
34773 *total = rs6000_cost->fp;
34774 return true;
34775
34776 default:
34777 break;
34778 }
34779 break;
34780
34781 case CALL:
34782 case IF_THEN_ELSE:
34783 if (!speed)
34784 {
34785 *total = COSTS_N_INSNS (1);
34786 return true;
34787 }
34788 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34789 {
34790 *total = rs6000_cost->fp;
34791 return false;
34792 }
34793 break;
34794
34795 case NE:
34796 case EQ:
34797 case GTU:
34798 case LTU:
34799 /* Carry bit requires mode == Pmode.
34800 NEG or PLUS already counted so only add one. */
34801 if (mode == Pmode
34802 && (outer_code == NEG || outer_code == PLUS))
34803 {
34804 *total = COSTS_N_INSNS (1);
34805 return true;
34806 }
34807 /* FALLTHRU */
34808
34809 case GT:
34810 case LT:
34811 case UNORDERED:
34812 if (outer_code == SET)
34813 {
34814 if (XEXP (x, 1) == const0_rtx)
34815 {
34816 *total = COSTS_N_INSNS (2);
34817 return true;
34818 }
34819 else
34820 {
34821 *total = COSTS_N_INSNS (3);
34822 return false;
34823 }
34824 }
34825 /* CC COMPARE. */
34826 if (outer_code == COMPARE)
34827 {
34828 *total = 0;
34829 return true;
34830 }
34831 break;
34832
34833 default:
34834 break;
34835 }
34836
34837 return false;
34838 }
34839
34840 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34841
34842 static bool
34843 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34844 int opno, int *total, bool speed)
34845 {
34846 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34847
34848 fprintf (stderr,
34849 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34850 "opno = %d, total = %d, speed = %s, x:\n",
34851 ret ? "complete" : "scan inner",
34852 GET_MODE_NAME (mode),
34853 GET_RTX_NAME (outer_code),
34854 opno,
34855 *total,
34856 speed ? "true" : "false");
34857
34858 debug_rtx (x);
34859
34860 return ret;
34861 }
34862
34863 static int
34864 rs6000_insn_cost (rtx_insn *insn, bool speed)
34865 {
34866 if (recog_memoized (insn) < 0)
34867 return 0;
34868
34869 if (!speed)
34870 return get_attr_length (insn);
34871
34872 int cost = get_attr_cost (insn);
34873 if (cost > 0)
34874 return cost;
34875
34876 int n = get_attr_length (insn) / 4;
34877 enum attr_type type = get_attr_type (insn);
34878
34879 switch (type)
34880 {
34881 case TYPE_LOAD:
34882 case TYPE_FPLOAD:
34883 case TYPE_VECLOAD:
34884 cost = COSTS_N_INSNS (n + 1);
34885 break;
34886
34887 case TYPE_MUL:
34888 switch (get_attr_size (insn))
34889 {
34890 case SIZE_8:
34891 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34892 break;
34893 case SIZE_16:
34894 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34895 break;
34896 case SIZE_32:
34897 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34898 break;
34899 case SIZE_64:
34900 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34901 break;
34902 default:
34903 gcc_unreachable ();
34904 }
34905 break;
34906 case TYPE_DIV:
34907 switch (get_attr_size (insn))
34908 {
34909 case SIZE_32:
34910 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34911 break;
34912 case SIZE_64:
34913 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34914 break;
34915 default:
34916 gcc_unreachable ();
34917 }
34918 break;
34919
34920 case TYPE_FP:
34921 cost = n * rs6000_cost->fp;
34922 break;
34923 case TYPE_DMUL:
34924 cost = n * rs6000_cost->dmul;
34925 break;
34926 case TYPE_SDIV:
34927 cost = n * rs6000_cost->sdiv;
34928 break;
34929 case TYPE_DDIV:
34930 cost = n * rs6000_cost->ddiv;
34931 break;
34932
34933 case TYPE_SYNC:
34934 case TYPE_LOAD_L:
34935 case TYPE_MFCR:
34936 case TYPE_MFCRF:
34937 cost = COSTS_N_INSNS (n + 2);
34938 break;
34939
34940 default:
34941 cost = COSTS_N_INSNS (n);
34942 }
34943
34944 return cost;
34945 }
34946
34947 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34948
34949 static int
34950 rs6000_debug_address_cost (rtx x, machine_mode mode,
34951 addr_space_t as, bool speed)
34952 {
34953 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34954
34955 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34956 ret, speed ? "true" : "false");
34957 debug_rtx (x);
34958
34959 return ret;
34960 }
34961
34962
34963 /* A C expression returning the cost of moving data from a register of class
34964 CLASS1 to one of CLASS2. */
34965
34966 static int
34967 rs6000_register_move_cost (machine_mode mode,
34968 reg_class_t from, reg_class_t to)
34969 {
34970 int ret;
34971
34972 if (TARGET_DEBUG_COST)
34973 dbg_cost_ctrl++;
34974
34975 /* Moves from/to GENERAL_REGS. */
34976 if (reg_classes_intersect_p (to, GENERAL_REGS)
34977 || reg_classes_intersect_p (from, GENERAL_REGS))
34978 {
34979 reg_class_t rclass = from;
34980
34981 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34982 rclass = to;
34983
34984 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34985 ret = (rs6000_memory_move_cost (mode, rclass, false)
34986 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34987
34988 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34989 shift. */
34990 else if (rclass == CR_REGS)
34991 ret = 4;
34992
34993 /* For those processors that have slow LR/CTR moves, make them more
34994 expensive than memory in order to bias spills to memory .*/
34995 else if ((rs6000_tune == PROCESSOR_POWER6
34996 || rs6000_tune == PROCESSOR_POWER7
34997 || rs6000_tune == PROCESSOR_POWER8
34998 || rs6000_tune == PROCESSOR_POWER9)
34999 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
35000 ret = 6 * hard_regno_nregs (0, mode);
35001
35002 else
35003 /* A move will cost one instruction per GPR moved. */
35004 ret = 2 * hard_regno_nregs (0, mode);
35005 }
35006
35007 /* If we have VSX, we can easily move between FPR or Altivec registers. */
35008 else if (VECTOR_MEM_VSX_P (mode)
35009 && reg_classes_intersect_p (to, VSX_REGS)
35010 && reg_classes_intersect_p (from, VSX_REGS))
35011 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
35012
35013 /* Moving between two similar registers is just one instruction. */
35014 else if (reg_classes_intersect_p (to, from))
35015 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
35016
35017 /* Everything else has to go through GENERAL_REGS. */
35018 else
35019 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
35020 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
35021
35022 if (TARGET_DEBUG_COST)
35023 {
35024 if (dbg_cost_ctrl == 1)
35025 fprintf (stderr,
35026 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
35027 ret, GET_MODE_NAME (mode), reg_class_names[from],
35028 reg_class_names[to]);
35029 dbg_cost_ctrl--;
35030 }
35031
35032 return ret;
35033 }
35034
35035 /* A C expressions returning the cost of moving data of MODE from a register to
35036 or from memory. */
35037
35038 static int
35039 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
35040 bool in ATTRIBUTE_UNUSED)
35041 {
35042 int ret;
35043
35044 if (TARGET_DEBUG_COST)
35045 dbg_cost_ctrl++;
35046
35047 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
35048 ret = 4 * hard_regno_nregs (0, mode);
35049 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
35050 || reg_classes_intersect_p (rclass, VSX_REGS)))
35051 ret = 4 * hard_regno_nregs (32, mode);
35052 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
35053 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
35054 else
35055 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
35056
35057 if (TARGET_DEBUG_COST)
35058 {
35059 if (dbg_cost_ctrl == 1)
35060 fprintf (stderr,
35061 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
35062 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
35063 dbg_cost_ctrl--;
35064 }
35065
35066 return ret;
35067 }
35068
35069 /* Returns a code for a target-specific builtin that implements
35070 reciprocal of the function, or NULL_TREE if not available. */
35071
35072 static tree
35073 rs6000_builtin_reciprocal (tree fndecl)
35074 {
35075 switch (DECL_FUNCTION_CODE (fndecl))
35076 {
35077 case VSX_BUILTIN_XVSQRTDP:
35078 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
35079 return NULL_TREE;
35080
35081 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
35082
35083 case VSX_BUILTIN_XVSQRTSP:
35084 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
35085 return NULL_TREE;
35086
35087 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
35088
35089 default:
35090 return NULL_TREE;
35091 }
35092 }
35093
35094 /* Load up a constant. If the mode is a vector mode, splat the value across
35095 all of the vector elements. */
35096
35097 static rtx
35098 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
35099 {
35100 rtx reg;
35101
35102 if (mode == SFmode || mode == DFmode)
35103 {
35104 rtx d = const_double_from_real_value (dconst, mode);
35105 reg = force_reg (mode, d);
35106 }
35107 else if (mode == V4SFmode)
35108 {
35109 rtx d = const_double_from_real_value (dconst, SFmode);
35110 rtvec v = gen_rtvec (4, d, d, d, d);
35111 reg = gen_reg_rtx (mode);
35112 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35113 }
35114 else if (mode == V2DFmode)
35115 {
35116 rtx d = const_double_from_real_value (dconst, DFmode);
35117 rtvec v = gen_rtvec (2, d, d);
35118 reg = gen_reg_rtx (mode);
35119 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35120 }
35121 else
35122 gcc_unreachable ();
35123
35124 return reg;
35125 }
35126
35127 /* Generate an FMA instruction. */
35128
35129 static void
35130 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
35131 {
35132 machine_mode mode = GET_MODE (target);
35133 rtx dst;
35134
35135 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
35136 gcc_assert (dst != NULL);
35137
35138 if (dst != target)
35139 emit_move_insn (target, dst);
35140 }
35141
35142 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
35143
35144 static void
35145 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
35146 {
35147 machine_mode mode = GET_MODE (dst);
35148 rtx r;
35149
35150 /* This is a tad more complicated, since the fnma_optab is for
35151 a different expression: fma(-m1, m2, a), which is the same
35152 thing except in the case of signed zeros.
35153
35154 Fortunately we know that if FMA is supported that FNMSUB is
35155 also supported in the ISA. Just expand it directly. */
35156
35157 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
35158
35159 r = gen_rtx_NEG (mode, a);
35160 r = gen_rtx_FMA (mode, m1, m2, r);
35161 r = gen_rtx_NEG (mode, r);
35162 emit_insn (gen_rtx_SET (dst, r));
35163 }
35164
35165 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35166 add a reg_note saying that this was a division. Support both scalar and
35167 vector divide. Assumes no trapping math and finite arguments. */
35168
35169 void
35170 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
35171 {
35172 machine_mode mode = GET_MODE (dst);
35173 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
35174 int i;
35175
35176 /* Low precision estimates guarantee 5 bits of accuracy. High
35177 precision estimates guarantee 14 bits of accuracy. SFmode
35178 requires 23 bits of accuracy. DFmode requires 52 bits of
35179 accuracy. Each pass at least doubles the accuracy, leading
35180 to the following. */
35181 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35182 if (mode == DFmode || mode == V2DFmode)
35183 passes++;
35184
35185 enum insn_code code = optab_handler (smul_optab, mode);
35186 insn_gen_fn gen_mul = GEN_FCN (code);
35187
35188 gcc_assert (code != CODE_FOR_nothing);
35189
35190 one = rs6000_load_constant_and_splat (mode, dconst1);
35191
35192 /* x0 = 1./d estimate */
35193 x0 = gen_reg_rtx (mode);
35194 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
35195 UNSPEC_FRES)));
35196
35197 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35198 if (passes > 1) {
35199
35200 /* e0 = 1. - d * x0 */
35201 e0 = gen_reg_rtx (mode);
35202 rs6000_emit_nmsub (e0, d, x0, one);
35203
35204 /* x1 = x0 + e0 * x0 */
35205 x1 = gen_reg_rtx (mode);
35206 rs6000_emit_madd (x1, e0, x0, x0);
35207
35208 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
35209 ++i, xprev = xnext, eprev = enext) {
35210
35211 /* enext = eprev * eprev */
35212 enext = gen_reg_rtx (mode);
35213 emit_insn (gen_mul (enext, eprev, eprev));
35214
35215 /* xnext = xprev + enext * xprev */
35216 xnext = gen_reg_rtx (mode);
35217 rs6000_emit_madd (xnext, enext, xprev, xprev);
35218 }
35219
35220 } else
35221 xprev = x0;
35222
35223 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35224
35225 /* u = n * xprev */
35226 u = gen_reg_rtx (mode);
35227 emit_insn (gen_mul (u, n, xprev));
35228
35229 /* v = n - (d * u) */
35230 v = gen_reg_rtx (mode);
35231 rs6000_emit_nmsub (v, d, u, n);
35232
35233 /* dst = (v * xprev) + u */
35234 rs6000_emit_madd (dst, v, xprev, u);
35235
35236 if (note_p)
35237 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
35238 }
35239
35240 /* Goldschmidt's Algorithm for single/double-precision floating point
35241 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35242
35243 void
35244 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
35245 {
35246 machine_mode mode = GET_MODE (src);
35247 rtx e = gen_reg_rtx (mode);
35248 rtx g = gen_reg_rtx (mode);
35249 rtx h = gen_reg_rtx (mode);
35250
35251 /* Low precision estimates guarantee 5 bits of accuracy. High
35252 precision estimates guarantee 14 bits of accuracy. SFmode
35253 requires 23 bits of accuracy. DFmode requires 52 bits of
35254 accuracy. Each pass at least doubles the accuracy, leading
35255 to the following. */
35256 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35257 if (mode == DFmode || mode == V2DFmode)
35258 passes++;
35259
35260 int i;
35261 rtx mhalf;
35262 enum insn_code code = optab_handler (smul_optab, mode);
35263 insn_gen_fn gen_mul = GEN_FCN (code);
35264
35265 gcc_assert (code != CODE_FOR_nothing);
35266
35267 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35268
35269 /* e = rsqrt estimate */
35270 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35271 UNSPEC_RSQRT)));
35272
35273 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35274 if (!recip)
35275 {
35276 rtx zero = force_reg (mode, CONST0_RTX (mode));
35277
35278 if (mode == SFmode)
35279 {
35280 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35281 e, zero, mode, 0);
35282 if (target != e)
35283 emit_move_insn (e, target);
35284 }
35285 else
35286 {
35287 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35288 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35289 }
35290 }
35291
35292 /* g = sqrt estimate. */
35293 emit_insn (gen_mul (g, e, src));
35294 /* h = 1/(2*sqrt) estimate. */
35295 emit_insn (gen_mul (h, e, mhalf));
35296
35297 if (recip)
35298 {
35299 if (passes == 1)
35300 {
35301 rtx t = gen_reg_rtx (mode);
35302 rs6000_emit_nmsub (t, g, h, mhalf);
35303 /* Apply correction directly to 1/rsqrt estimate. */
35304 rs6000_emit_madd (dst, e, t, e);
35305 }
35306 else
35307 {
35308 for (i = 0; i < passes; i++)
35309 {
35310 rtx t1 = gen_reg_rtx (mode);
35311 rtx g1 = gen_reg_rtx (mode);
35312 rtx h1 = gen_reg_rtx (mode);
35313
35314 rs6000_emit_nmsub (t1, g, h, mhalf);
35315 rs6000_emit_madd (g1, g, t1, g);
35316 rs6000_emit_madd (h1, h, t1, h);
35317
35318 g = g1;
35319 h = h1;
35320 }
35321 /* Multiply by 2 for 1/rsqrt. */
35322 emit_insn (gen_add3_insn (dst, h, h));
35323 }
35324 }
35325 else
35326 {
35327 rtx t = gen_reg_rtx (mode);
35328 rs6000_emit_nmsub (t, g, h, mhalf);
35329 rs6000_emit_madd (dst, g, t, g);
35330 }
35331
35332 return;
35333 }
35334
35335 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35336 (Power7) targets. DST is the target, and SRC is the argument operand. */
35337
35338 void
35339 rs6000_emit_popcount (rtx dst, rtx src)
35340 {
35341 machine_mode mode = GET_MODE (dst);
35342 rtx tmp1, tmp2;
35343
35344 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35345 if (TARGET_POPCNTD)
35346 {
35347 if (mode == SImode)
35348 emit_insn (gen_popcntdsi2 (dst, src));
35349 else
35350 emit_insn (gen_popcntddi2 (dst, src));
35351 return;
35352 }
35353
35354 tmp1 = gen_reg_rtx (mode);
35355
35356 if (mode == SImode)
35357 {
35358 emit_insn (gen_popcntbsi2 (tmp1, src));
35359 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35360 NULL_RTX, 0);
35361 tmp2 = force_reg (SImode, tmp2);
35362 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35363 }
35364 else
35365 {
35366 emit_insn (gen_popcntbdi2 (tmp1, src));
35367 tmp2 = expand_mult (DImode, tmp1,
35368 GEN_INT ((HOST_WIDE_INT)
35369 0x01010101 << 32 | 0x01010101),
35370 NULL_RTX, 0);
35371 tmp2 = force_reg (DImode, tmp2);
35372 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35373 }
35374 }
35375
35376
35377 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35378 target, and SRC is the argument operand. */
35379
35380 void
35381 rs6000_emit_parity (rtx dst, rtx src)
35382 {
35383 machine_mode mode = GET_MODE (dst);
35384 rtx tmp;
35385
35386 tmp = gen_reg_rtx (mode);
35387
35388 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35389 if (TARGET_CMPB)
35390 {
35391 if (mode == SImode)
35392 {
35393 emit_insn (gen_popcntbsi2 (tmp, src));
35394 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35395 }
35396 else
35397 {
35398 emit_insn (gen_popcntbdi2 (tmp, src));
35399 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35400 }
35401 return;
35402 }
35403
35404 if (mode == SImode)
35405 {
35406 /* Is mult+shift >= shift+xor+shift+xor? */
35407 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35408 {
35409 rtx tmp1, tmp2, tmp3, tmp4;
35410
35411 tmp1 = gen_reg_rtx (SImode);
35412 emit_insn (gen_popcntbsi2 (tmp1, src));
35413
35414 tmp2 = gen_reg_rtx (SImode);
35415 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35416 tmp3 = gen_reg_rtx (SImode);
35417 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35418
35419 tmp4 = gen_reg_rtx (SImode);
35420 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35421 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35422 }
35423 else
35424 rs6000_emit_popcount (tmp, src);
35425 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35426 }
35427 else
35428 {
35429 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35430 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35431 {
35432 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35433
35434 tmp1 = gen_reg_rtx (DImode);
35435 emit_insn (gen_popcntbdi2 (tmp1, src));
35436
35437 tmp2 = gen_reg_rtx (DImode);
35438 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35439 tmp3 = gen_reg_rtx (DImode);
35440 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35441
35442 tmp4 = gen_reg_rtx (DImode);
35443 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35444 tmp5 = gen_reg_rtx (DImode);
35445 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35446
35447 tmp6 = gen_reg_rtx (DImode);
35448 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35449 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35450 }
35451 else
35452 rs6000_emit_popcount (tmp, src);
35453 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35454 }
35455 }
35456
35457 /* Expand an Altivec constant permutation for little endian mode.
35458 OP0 and OP1 are the input vectors and TARGET is the output vector.
35459 SEL specifies the constant permutation vector.
35460
35461 There are two issues: First, the two input operands must be
35462 swapped so that together they form a double-wide array in LE
35463 order. Second, the vperm instruction has surprising behavior
35464 in LE mode: it interprets the elements of the source vectors
35465 in BE mode ("left to right") and interprets the elements of
35466 the destination vector in LE mode ("right to left"). To
35467 correct for this, we must subtract each element of the permute
35468 control vector from 31.
35469
35470 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35471 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35472 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35473 serve as the permute control vector. Then, in BE mode,
35474
35475 vperm 9,10,11,12
35476
35477 places the desired result in vr9. However, in LE mode the
35478 vector contents will be
35479
35480 vr10 = 00000003 00000002 00000001 00000000
35481 vr11 = 00000007 00000006 00000005 00000004
35482
35483 The result of the vperm using the same permute control vector is
35484
35485 vr9 = 05000000 07000000 01000000 03000000
35486
35487 That is, the leftmost 4 bytes of vr10 are interpreted as the
35488 source for the rightmost 4 bytes of vr9, and so on.
35489
35490 If we change the permute control vector to
35491
35492 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35493
35494 and issue
35495
35496 vperm 9,11,10,12
35497
35498 we get the desired
35499
35500 vr9 = 00000006 00000004 00000002 00000000. */
35501
35502 static void
35503 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35504 const vec_perm_indices &sel)
35505 {
35506 unsigned int i;
35507 rtx perm[16];
35508 rtx constv, unspec;
35509
35510 /* Unpack and adjust the constant selector. */
35511 for (i = 0; i < 16; ++i)
35512 {
35513 unsigned int elt = 31 - (sel[i] & 31);
35514 perm[i] = GEN_INT (elt);
35515 }
35516
35517 /* Expand to a permute, swapping the inputs and using the
35518 adjusted selector. */
35519 if (!REG_P (op0))
35520 op0 = force_reg (V16QImode, op0);
35521 if (!REG_P (op1))
35522 op1 = force_reg (V16QImode, op1);
35523
35524 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35525 constv = force_reg (V16QImode, constv);
35526 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35527 UNSPEC_VPERM);
35528 if (!REG_P (target))
35529 {
35530 rtx tmp = gen_reg_rtx (V16QImode);
35531 emit_move_insn (tmp, unspec);
35532 unspec = tmp;
35533 }
35534
35535 emit_move_insn (target, unspec);
35536 }
35537
35538 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35539 permute control vector. But here it's not a constant, so we must
35540 generate a vector NAND or NOR to do the adjustment. */
35541
35542 void
35543 altivec_expand_vec_perm_le (rtx operands[4])
35544 {
35545 rtx notx, iorx, unspec;
35546 rtx target = operands[0];
35547 rtx op0 = operands[1];
35548 rtx op1 = operands[2];
35549 rtx sel = operands[3];
35550 rtx tmp = target;
35551 rtx norreg = gen_reg_rtx (V16QImode);
35552 machine_mode mode = GET_MODE (target);
35553
35554 /* Get everything in regs so the pattern matches. */
35555 if (!REG_P (op0))
35556 op0 = force_reg (mode, op0);
35557 if (!REG_P (op1))
35558 op1 = force_reg (mode, op1);
35559 if (!REG_P (sel))
35560 sel = force_reg (V16QImode, sel);
35561 if (!REG_P (target))
35562 tmp = gen_reg_rtx (mode);
35563
35564 if (TARGET_P9_VECTOR)
35565 {
35566 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35567 UNSPEC_VPERMR);
35568 }
35569 else
35570 {
35571 /* Invert the selector with a VNAND if available, else a VNOR.
35572 The VNAND is preferred for future fusion opportunities. */
35573 notx = gen_rtx_NOT (V16QImode, sel);
35574 iorx = (TARGET_P8_VECTOR
35575 ? gen_rtx_IOR (V16QImode, notx, notx)
35576 : gen_rtx_AND (V16QImode, notx, notx));
35577 emit_insn (gen_rtx_SET (norreg, iorx));
35578
35579 /* Permute with operands reversed and adjusted selector. */
35580 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35581 UNSPEC_VPERM);
35582 }
35583
35584 /* Copy into target, possibly by way of a register. */
35585 if (!REG_P (target))
35586 {
35587 emit_move_insn (tmp, unspec);
35588 unspec = tmp;
35589 }
35590
35591 emit_move_insn (target, unspec);
35592 }
35593
35594 /* Expand an Altivec constant permutation. Return true if we match
35595 an efficient implementation; false to fall back to VPERM.
35596
35597 OP0 and OP1 are the input vectors and TARGET is the output vector.
35598 SEL specifies the constant permutation vector. */
35599
35600 static bool
35601 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35602 const vec_perm_indices &sel)
35603 {
35604 struct altivec_perm_insn {
35605 HOST_WIDE_INT mask;
35606 enum insn_code impl;
35607 unsigned char perm[16];
35608 };
35609 static const struct altivec_perm_insn patterns[] = {
35610 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35611 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35612 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35613 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35614 { OPTION_MASK_ALTIVEC,
35615 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35616 : CODE_FOR_altivec_vmrglb_direct),
35617 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35618 { OPTION_MASK_ALTIVEC,
35619 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35620 : CODE_FOR_altivec_vmrglh_direct),
35621 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35622 { OPTION_MASK_ALTIVEC,
35623 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35624 : CODE_FOR_altivec_vmrglw_direct),
35625 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35626 { OPTION_MASK_ALTIVEC,
35627 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35628 : CODE_FOR_altivec_vmrghb_direct),
35629 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35630 { OPTION_MASK_ALTIVEC,
35631 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35632 : CODE_FOR_altivec_vmrghh_direct),
35633 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35634 { OPTION_MASK_ALTIVEC,
35635 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35636 : CODE_FOR_altivec_vmrghw_direct),
35637 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35638 { OPTION_MASK_P8_VECTOR,
35639 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35640 : CODE_FOR_p8_vmrgow_v4sf_direct),
35641 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35642 { OPTION_MASK_P8_VECTOR,
35643 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35644 : CODE_FOR_p8_vmrgew_v4sf_direct),
35645 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35646 };
35647
35648 unsigned int i, j, elt, which;
35649 unsigned char perm[16];
35650 rtx x;
35651 bool one_vec;
35652
35653 /* Unpack the constant selector. */
35654 for (i = which = 0; i < 16; ++i)
35655 {
35656 elt = sel[i] & 31;
35657 which |= (elt < 16 ? 1 : 2);
35658 perm[i] = elt;
35659 }
35660
35661 /* Simplify the constant selector based on operands. */
35662 switch (which)
35663 {
35664 default:
35665 gcc_unreachable ();
35666
35667 case 3:
35668 one_vec = false;
35669 if (!rtx_equal_p (op0, op1))
35670 break;
35671 /* FALLTHRU */
35672
35673 case 2:
35674 for (i = 0; i < 16; ++i)
35675 perm[i] &= 15;
35676 op0 = op1;
35677 one_vec = true;
35678 break;
35679
35680 case 1:
35681 op1 = op0;
35682 one_vec = true;
35683 break;
35684 }
35685
35686 /* Look for splat patterns. */
35687 if (one_vec)
35688 {
35689 elt = perm[0];
35690
35691 for (i = 0; i < 16; ++i)
35692 if (perm[i] != elt)
35693 break;
35694 if (i == 16)
35695 {
35696 if (!BYTES_BIG_ENDIAN)
35697 elt = 15 - elt;
35698 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35699 return true;
35700 }
35701
35702 if (elt % 2 == 0)
35703 {
35704 for (i = 0; i < 16; i += 2)
35705 if (perm[i] != elt || perm[i + 1] != elt + 1)
35706 break;
35707 if (i == 16)
35708 {
35709 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35710 x = gen_reg_rtx (V8HImode);
35711 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35712 GEN_INT (field)));
35713 emit_move_insn (target, gen_lowpart (V16QImode, x));
35714 return true;
35715 }
35716 }
35717
35718 if (elt % 4 == 0)
35719 {
35720 for (i = 0; i < 16; i += 4)
35721 if (perm[i] != elt
35722 || perm[i + 1] != elt + 1
35723 || perm[i + 2] != elt + 2
35724 || perm[i + 3] != elt + 3)
35725 break;
35726 if (i == 16)
35727 {
35728 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35729 x = gen_reg_rtx (V4SImode);
35730 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35731 GEN_INT (field)));
35732 emit_move_insn (target, gen_lowpart (V16QImode, x));
35733 return true;
35734 }
35735 }
35736 }
35737
35738 /* Look for merge and pack patterns. */
35739 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35740 {
35741 bool swapped;
35742
35743 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35744 continue;
35745
35746 elt = patterns[j].perm[0];
35747 if (perm[0] == elt)
35748 swapped = false;
35749 else if (perm[0] == elt + 16)
35750 swapped = true;
35751 else
35752 continue;
35753 for (i = 1; i < 16; ++i)
35754 {
35755 elt = patterns[j].perm[i];
35756 if (swapped)
35757 elt = (elt >= 16 ? elt - 16 : elt + 16);
35758 else if (one_vec && elt >= 16)
35759 elt -= 16;
35760 if (perm[i] != elt)
35761 break;
35762 }
35763 if (i == 16)
35764 {
35765 enum insn_code icode = patterns[j].impl;
35766 machine_mode omode = insn_data[icode].operand[0].mode;
35767 machine_mode imode = insn_data[icode].operand[1].mode;
35768
35769 /* For little-endian, don't use vpkuwum and vpkuhum if the
35770 underlying vector type is not V4SI and V8HI, respectively.
35771 For example, using vpkuwum with a V8HI picks up the even
35772 halfwords (BE numbering) when the even halfwords (LE
35773 numbering) are what we need. */
35774 if (!BYTES_BIG_ENDIAN
35775 && icode == CODE_FOR_altivec_vpkuwum_direct
35776 && ((REG_P (op0)
35777 && GET_MODE (op0) != V4SImode)
35778 || (SUBREG_P (op0)
35779 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35780 continue;
35781 if (!BYTES_BIG_ENDIAN
35782 && icode == CODE_FOR_altivec_vpkuhum_direct
35783 && ((REG_P (op0)
35784 && GET_MODE (op0) != V8HImode)
35785 || (SUBREG_P (op0)
35786 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35787 continue;
35788
35789 /* For little-endian, the two input operands must be swapped
35790 (or swapped back) to ensure proper right-to-left numbering
35791 from 0 to 2N-1. */
35792 if (swapped ^ !BYTES_BIG_ENDIAN)
35793 std::swap (op0, op1);
35794 if (imode != V16QImode)
35795 {
35796 op0 = gen_lowpart (imode, op0);
35797 op1 = gen_lowpart (imode, op1);
35798 }
35799 if (omode == V16QImode)
35800 x = target;
35801 else
35802 x = gen_reg_rtx (omode);
35803 emit_insn (GEN_FCN (icode) (x, op0, op1));
35804 if (omode != V16QImode)
35805 emit_move_insn (target, gen_lowpart (V16QImode, x));
35806 return true;
35807 }
35808 }
35809
35810 if (!BYTES_BIG_ENDIAN)
35811 {
35812 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35813 return true;
35814 }
35815
35816 return false;
35817 }
35818
35819 /* Expand a VSX Permute Doubleword constant permutation.
35820 Return true if we match an efficient implementation. */
35821
35822 static bool
35823 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35824 unsigned char perm0, unsigned char perm1)
35825 {
35826 rtx x;
35827
35828 /* If both selectors come from the same operand, fold to single op. */
35829 if ((perm0 & 2) == (perm1 & 2))
35830 {
35831 if (perm0 & 2)
35832 op0 = op1;
35833 else
35834 op1 = op0;
35835 }
35836 /* If both operands are equal, fold to simpler permutation. */
35837 if (rtx_equal_p (op0, op1))
35838 {
35839 perm0 = perm0 & 1;
35840 perm1 = (perm1 & 1) + 2;
35841 }
35842 /* If the first selector comes from the second operand, swap. */
35843 else if (perm0 & 2)
35844 {
35845 if (perm1 & 2)
35846 return false;
35847 perm0 -= 2;
35848 perm1 += 2;
35849 std::swap (op0, op1);
35850 }
35851 /* If the second selector does not come from the second operand, fail. */
35852 else if ((perm1 & 2) == 0)
35853 return false;
35854
35855 /* Success! */
35856 if (target != NULL)
35857 {
35858 machine_mode vmode, dmode;
35859 rtvec v;
35860
35861 vmode = GET_MODE (target);
35862 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35863 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35864 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35865 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35866 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35867 emit_insn (gen_rtx_SET (target, x));
35868 }
35869 return true;
35870 }
35871
35872 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35873
35874 static bool
35875 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35876 rtx op1, const vec_perm_indices &sel)
35877 {
35878 bool testing_p = !target;
35879
35880 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35881 if (TARGET_ALTIVEC && testing_p)
35882 return true;
35883
35884 /* Check for ps_merge* or xxpermdi insns. */
35885 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35886 {
35887 if (testing_p)
35888 {
35889 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35890 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35891 }
35892 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35893 return true;
35894 }
35895
35896 if (TARGET_ALTIVEC)
35897 {
35898 /* Force the target-independent code to lower to V16QImode. */
35899 if (vmode != V16QImode)
35900 return false;
35901 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35902 return true;
35903 }
35904
35905 return false;
35906 }
35907
35908 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35909 OP0 and OP1 are the input vectors and TARGET is the output vector.
35910 PERM specifies the constant permutation vector. */
35911
35912 static void
35913 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35914 machine_mode vmode, const vec_perm_builder &perm)
35915 {
35916 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35917 if (x != target)
35918 emit_move_insn (target, x);
35919 }
35920
35921 /* Expand an extract even operation. */
35922
35923 void
35924 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35925 {
35926 machine_mode vmode = GET_MODE (target);
35927 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35928 vec_perm_builder perm (nelt, nelt, 1);
35929
35930 for (i = 0; i < nelt; i++)
35931 perm.quick_push (i * 2);
35932
35933 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35934 }
35935
35936 /* Expand a vector interleave operation. */
35937
35938 void
35939 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35940 {
35941 machine_mode vmode = GET_MODE (target);
35942 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35943 vec_perm_builder perm (nelt, nelt, 1);
35944
35945 high = (highp ? 0 : nelt / 2);
35946 for (i = 0; i < nelt / 2; i++)
35947 {
35948 perm.quick_push (i + high);
35949 perm.quick_push (i + nelt + high);
35950 }
35951
35952 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35953 }
35954
35955 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35956 void
35957 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35958 {
35959 HOST_WIDE_INT hwi_scale (scale);
35960 REAL_VALUE_TYPE r_pow;
35961 rtvec v = rtvec_alloc (2);
35962 rtx elt;
35963 rtx scale_vec = gen_reg_rtx (V2DFmode);
35964 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35965 elt = const_double_from_real_value (r_pow, DFmode);
35966 RTVEC_ELT (v, 0) = elt;
35967 RTVEC_ELT (v, 1) = elt;
35968 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35969 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35970 }
35971
35972 /* Return an RTX representing where to find the function value of a
35973 function returning MODE. */
35974 static rtx
35975 rs6000_complex_function_value (machine_mode mode)
35976 {
35977 unsigned int regno;
35978 rtx r1, r2;
35979 machine_mode inner = GET_MODE_INNER (mode);
35980 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35981
35982 if (TARGET_FLOAT128_TYPE
35983 && (mode == KCmode
35984 || (mode == TCmode && TARGET_IEEEQUAD)))
35985 regno = ALTIVEC_ARG_RETURN;
35986
35987 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35988 regno = FP_ARG_RETURN;
35989
35990 else
35991 {
35992 regno = GP_ARG_RETURN;
35993
35994 /* 32-bit is OK since it'll go in r3/r4. */
35995 if (TARGET_32BIT && inner_bytes >= 4)
35996 return gen_rtx_REG (mode, regno);
35997 }
35998
35999 if (inner_bytes >= 8)
36000 return gen_rtx_REG (mode, regno);
36001
36002 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
36003 const0_rtx);
36004 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
36005 GEN_INT (inner_bytes));
36006 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
36007 }
36008
36009 /* Return an rtx describing a return value of MODE as a PARALLEL
36010 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
36011 stride REG_STRIDE. */
36012
36013 static rtx
36014 rs6000_parallel_return (machine_mode mode,
36015 int n_elts, machine_mode elt_mode,
36016 unsigned int regno, unsigned int reg_stride)
36017 {
36018 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
36019
36020 int i;
36021 for (i = 0; i < n_elts; i++)
36022 {
36023 rtx r = gen_rtx_REG (elt_mode, regno);
36024 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
36025 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
36026 regno += reg_stride;
36027 }
36028
36029 return par;
36030 }
36031
36032 /* Target hook for TARGET_FUNCTION_VALUE.
36033
36034 An integer value is in r3 and a floating-point value is in fp1,
36035 unless -msoft-float. */
36036
36037 static rtx
36038 rs6000_function_value (const_tree valtype,
36039 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
36040 bool outgoing ATTRIBUTE_UNUSED)
36041 {
36042 machine_mode mode;
36043 unsigned int regno;
36044 machine_mode elt_mode;
36045 int n_elts;
36046
36047 /* Special handling for structs in darwin64. */
36048 if (TARGET_MACHO
36049 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
36050 {
36051 CUMULATIVE_ARGS valcum;
36052 rtx valret;
36053
36054 valcum.words = 0;
36055 valcum.fregno = FP_ARG_MIN_REG;
36056 valcum.vregno = ALTIVEC_ARG_MIN_REG;
36057 /* Do a trial code generation as if this were going to be passed as
36058 an argument; if any part goes in memory, we return NULL. */
36059 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
36060 if (valret)
36061 return valret;
36062 /* Otherwise fall through to standard ABI rules. */
36063 }
36064
36065 mode = TYPE_MODE (valtype);
36066
36067 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
36068 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
36069 {
36070 int first_reg, n_regs;
36071
36072 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
36073 {
36074 /* _Decimal128 must use even/odd register pairs. */
36075 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36076 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
36077 }
36078 else
36079 {
36080 first_reg = ALTIVEC_ARG_RETURN;
36081 n_regs = 1;
36082 }
36083
36084 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
36085 }
36086
36087 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
36088 if (TARGET_32BIT && TARGET_POWERPC64)
36089 switch (mode)
36090 {
36091 default:
36092 break;
36093 case E_DImode:
36094 case E_SCmode:
36095 case E_DCmode:
36096 case E_TCmode:
36097 int count = GET_MODE_SIZE (mode) / 4;
36098 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
36099 }
36100
36101 if ((INTEGRAL_TYPE_P (valtype)
36102 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
36103 || POINTER_TYPE_P (valtype))
36104 mode = TARGET_32BIT ? SImode : DImode;
36105
36106 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36107 /* _Decimal128 must use an even/odd register pair. */
36108 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36109 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
36110 && !FLOAT128_VECTOR_P (mode))
36111 regno = FP_ARG_RETURN;
36112 else if (TREE_CODE (valtype) == COMPLEX_TYPE
36113 && targetm.calls.split_complex_arg)
36114 return rs6000_complex_function_value (mode);
36115 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36116 return register is used in both cases, and we won't see V2DImode/V2DFmode
36117 for pure altivec, combine the two cases. */
36118 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
36119 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
36120 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
36121 regno = ALTIVEC_ARG_RETURN;
36122 else
36123 regno = GP_ARG_RETURN;
36124
36125 return gen_rtx_REG (mode, regno);
36126 }
36127
36128 /* Define how to find the value returned by a library function
36129 assuming the value has mode MODE. */
36130 rtx
36131 rs6000_libcall_value (machine_mode mode)
36132 {
36133 unsigned int regno;
36134
36135 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
36136 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
36137 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
36138
36139 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36140 /* _Decimal128 must use an even/odd register pair. */
36141 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36142 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
36143 regno = FP_ARG_RETURN;
36144 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36145 return register is used in both cases, and we won't see V2DImode/V2DFmode
36146 for pure altivec, combine the two cases. */
36147 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
36148 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
36149 regno = ALTIVEC_ARG_RETURN;
36150 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
36151 return rs6000_complex_function_value (mode);
36152 else
36153 regno = GP_ARG_RETURN;
36154
36155 return gen_rtx_REG (mode, regno);
36156 }
36157
36158 /* Compute register pressure classes. We implement the target hook to avoid
36159 IRA picking something like GEN_OR_FLOAT_REGS as a pressure class, which can
36160 lead to incorrect estimates of number of available registers and therefor
36161 increased register pressure/spill. */
36162 static int
36163 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
36164 {
36165 int n;
36166
36167 n = 0;
36168 pressure_classes[n++] = GENERAL_REGS;
36169 if (TARGET_VSX)
36170 pressure_classes[n++] = VSX_REGS;
36171 else
36172 {
36173 if (TARGET_ALTIVEC)
36174 pressure_classes[n++] = ALTIVEC_REGS;
36175 if (TARGET_HARD_FLOAT)
36176 pressure_classes[n++] = FLOAT_REGS;
36177 }
36178 pressure_classes[n++] = CR_REGS;
36179 pressure_classes[n++] = SPECIAL_REGS;
36180
36181 return n;
36182 }
36183
36184 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36185 Frame pointer elimination is automatically handled.
36186
36187 For the RS/6000, if frame pointer elimination is being done, we would like
36188 to convert ap into fp, not sp.
36189
36190 We need r30 if -mminimal-toc was specified, and there are constant pool
36191 references. */
36192
36193 static bool
36194 rs6000_can_eliminate (const int from, const int to)
36195 {
36196 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
36197 ? ! frame_pointer_needed
36198 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
36199 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
36200 || constant_pool_empty_p ()
36201 : true);
36202 }
36203
36204 /* Define the offset between two registers, FROM to be eliminated and its
36205 replacement TO, at the start of a routine. */
36206 HOST_WIDE_INT
36207 rs6000_initial_elimination_offset (int from, int to)
36208 {
36209 rs6000_stack_t *info = rs6000_stack_info ();
36210 HOST_WIDE_INT offset;
36211
36212 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36213 offset = info->push_p ? 0 : -info->total_size;
36214 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36215 {
36216 offset = info->push_p ? 0 : -info->total_size;
36217 if (FRAME_GROWS_DOWNWARD)
36218 offset += info->fixed_size + info->vars_size + info->parm_size;
36219 }
36220 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36221 offset = FRAME_GROWS_DOWNWARD
36222 ? info->fixed_size + info->vars_size + info->parm_size
36223 : 0;
36224 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36225 offset = info->total_size;
36226 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36227 offset = info->push_p ? info->total_size : 0;
36228 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
36229 offset = 0;
36230 else
36231 gcc_unreachable ();
36232
36233 return offset;
36234 }
36235
36236 /* Fill in sizes of registers used by unwinder. */
36237
36238 static void
36239 rs6000_init_dwarf_reg_sizes_extra (tree address)
36240 {
36241 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36242 {
36243 int i;
36244 machine_mode mode = TYPE_MODE (char_type_node);
36245 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36246 rtx mem = gen_rtx_MEM (BLKmode, addr);
36247 rtx value = gen_int_mode (16, mode);
36248
36249 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36250 The unwinder still needs to know the size of Altivec registers. */
36251
36252 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36253 {
36254 int column = DWARF_REG_TO_UNWIND_COLUMN
36255 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36256 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36257
36258 emit_move_insn (adjust_address (mem, mode, offset), value);
36259 }
36260 }
36261 }
36262
36263 /* Map internal gcc register numbers to debug format register numbers.
36264 FORMAT specifies the type of debug register number to use:
36265 0 -- debug information, except for frame-related sections
36266 1 -- DWARF .debug_frame section
36267 2 -- DWARF .eh_frame section */
36268
36269 unsigned int
36270 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36271 {
36272 /* Except for the above, we use the internal number for non-DWARF
36273 debug information, and also for .eh_frame. */
36274 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
36275 return regno;
36276
36277 /* On some platforms, we use the standard DWARF register
36278 numbering for .debug_info and .debug_frame. */
36279 #ifdef RS6000_USE_DWARF_NUMBERING
36280 if (regno <= 63)
36281 return regno;
36282 if (regno == LR_REGNO)
36283 return 108;
36284 if (regno == CTR_REGNO)
36285 return 109;
36286 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36287 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36288 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36289 to the DWARF reg for CR. */
36290 if (format == 1 && regno == CR2_REGNO)
36291 return 64;
36292 if (CR_REGNO_P (regno))
36293 return regno - CR0_REGNO + 86;
36294 if (regno == CA_REGNO)
36295 return 101; /* XER */
36296 if (ALTIVEC_REGNO_P (regno))
36297 return regno - FIRST_ALTIVEC_REGNO + 1124;
36298 if (regno == VRSAVE_REGNO)
36299 return 356;
36300 if (regno == VSCR_REGNO)
36301 return 67;
36302 #endif
36303 return regno;
36304 }
36305
36306 /* target hook eh_return_filter_mode */
36307 static scalar_int_mode
36308 rs6000_eh_return_filter_mode (void)
36309 {
36310 return TARGET_32BIT ? SImode : word_mode;
36311 }
36312
36313 /* Target hook for translate_mode_attribute. */
36314 static machine_mode
36315 rs6000_translate_mode_attribute (machine_mode mode)
36316 {
36317 if ((FLOAT128_IEEE_P (mode)
36318 && ieee128_float_type_node == long_double_type_node)
36319 || (FLOAT128_IBM_P (mode)
36320 && ibm128_float_type_node == long_double_type_node))
36321 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
36322 return mode;
36323 }
36324
36325 /* Target hook for scalar_mode_supported_p. */
36326 static bool
36327 rs6000_scalar_mode_supported_p (scalar_mode mode)
36328 {
36329 /* -m32 does not support TImode. This is the default, from
36330 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36331 same ABI as for -m32. But default_scalar_mode_supported_p allows
36332 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36333 for -mpowerpc64. */
36334 if (TARGET_32BIT && mode == TImode)
36335 return false;
36336
36337 if (DECIMAL_FLOAT_MODE_P (mode))
36338 return default_decimal_float_supported_p ();
36339 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36340 return true;
36341 else
36342 return default_scalar_mode_supported_p (mode);
36343 }
36344
36345 /* Target hook for vector_mode_supported_p. */
36346 static bool
36347 rs6000_vector_mode_supported_p (machine_mode mode)
36348 {
36349 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36350 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36351 double-double. */
36352 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36353 return true;
36354
36355 else
36356 return false;
36357 }
36358
36359 /* Target hook for floatn_mode. */
36360 static opt_scalar_float_mode
36361 rs6000_floatn_mode (int n, bool extended)
36362 {
36363 if (extended)
36364 {
36365 switch (n)
36366 {
36367 case 32:
36368 return DFmode;
36369
36370 case 64:
36371 if (TARGET_FLOAT128_TYPE)
36372 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36373 else
36374 return opt_scalar_float_mode ();
36375
36376 case 128:
36377 return opt_scalar_float_mode ();
36378
36379 default:
36380 /* Those are the only valid _FloatNx types. */
36381 gcc_unreachable ();
36382 }
36383 }
36384 else
36385 {
36386 switch (n)
36387 {
36388 case 32:
36389 return SFmode;
36390
36391 case 64:
36392 return DFmode;
36393
36394 case 128:
36395 if (TARGET_FLOAT128_TYPE)
36396 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36397 else
36398 return opt_scalar_float_mode ();
36399
36400 default:
36401 return opt_scalar_float_mode ();
36402 }
36403 }
36404
36405 }
36406
36407 /* Target hook for c_mode_for_suffix. */
36408 static machine_mode
36409 rs6000_c_mode_for_suffix (char suffix)
36410 {
36411 if (TARGET_FLOAT128_TYPE)
36412 {
36413 if (suffix == 'q' || suffix == 'Q')
36414 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36415
36416 /* At the moment, we are not defining a suffix for IBM extended double.
36417 If/when the default for -mabi=ieeelongdouble is changed, and we want
36418 to support __ibm128 constants in legacy library code, we may need to
36419 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36420 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36421 __float80 constants. */
36422 }
36423
36424 return VOIDmode;
36425 }
36426
36427 /* Target hook for invalid_arg_for_unprototyped_fn. */
36428 static const char *
36429 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36430 {
36431 return (!rs6000_darwin64_abi
36432 && typelist == 0
36433 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36434 && (funcdecl == NULL_TREE
36435 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36436 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36437 ? N_("AltiVec argument passed to unprototyped function")
36438 : NULL;
36439 }
36440
36441 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36442 setup by using __stack_chk_fail_local hidden function instead of
36443 calling __stack_chk_fail directly. Otherwise it is better to call
36444 __stack_chk_fail directly. */
36445
36446 static tree ATTRIBUTE_UNUSED
36447 rs6000_stack_protect_fail (void)
36448 {
36449 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36450 ? default_hidden_stack_protect_fail ()
36451 : default_external_stack_protect_fail ();
36452 }
36453
36454 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36455
36456 #if TARGET_ELF
36457 static unsigned HOST_WIDE_INT
36458 rs6000_asan_shadow_offset (void)
36459 {
36460 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36461 }
36462 #endif
36463 \f
36464 /* Mask options that we want to support inside of attribute((target)) and
36465 #pragma GCC target operations. Note, we do not include things like
36466 64/32-bit, endianness, hard/soft floating point, etc. that would have
36467 different calling sequences. */
36468
36469 struct rs6000_opt_mask {
36470 const char *name; /* option name */
36471 HOST_WIDE_INT mask; /* mask to set */
36472 bool invert; /* invert sense of mask */
36473 bool valid_target; /* option is a target option */
36474 };
36475
36476 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36477 {
36478 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36479 { "cmpb", OPTION_MASK_CMPB, false, true },
36480 { "crypto", OPTION_MASK_CRYPTO, false, true },
36481 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36482 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36483 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36484 false, true },
36485 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36486 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36487 { "fprnd", OPTION_MASK_FPRND, false, true },
36488 { "hard-dfp", OPTION_MASK_DFP, false, true },
36489 { "htm", OPTION_MASK_HTM, false, true },
36490 { "isel", OPTION_MASK_ISEL, false, true },
36491 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36492 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36493 { "modulo", OPTION_MASK_MODULO, false, true },
36494 { "mulhw", OPTION_MASK_MULHW, false, true },
36495 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36496 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36497 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36498 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36499 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36500 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36501 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36502 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36503 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36504 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36505 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36506 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36507 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36508 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36509 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36510 { "string", 0, false, true },
36511 { "update", OPTION_MASK_NO_UPDATE, true , true },
36512 { "vsx", OPTION_MASK_VSX, false, true },
36513 #ifdef OPTION_MASK_64BIT
36514 #if TARGET_AIX_OS
36515 { "aix64", OPTION_MASK_64BIT, false, false },
36516 { "aix32", OPTION_MASK_64BIT, true, false },
36517 #else
36518 { "64", OPTION_MASK_64BIT, false, false },
36519 { "32", OPTION_MASK_64BIT, true, false },
36520 #endif
36521 #endif
36522 #ifdef OPTION_MASK_EABI
36523 { "eabi", OPTION_MASK_EABI, false, false },
36524 #endif
36525 #ifdef OPTION_MASK_LITTLE_ENDIAN
36526 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36527 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36528 #endif
36529 #ifdef OPTION_MASK_RELOCATABLE
36530 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36531 #endif
36532 #ifdef OPTION_MASK_STRICT_ALIGN
36533 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36534 #endif
36535 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36536 { "string", 0, false, false },
36537 };
36538
36539 /* Builtin mask mapping for printing the flags. */
36540 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36541 {
36542 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36543 { "vsx", RS6000_BTM_VSX, false, false },
36544 { "fre", RS6000_BTM_FRE, false, false },
36545 { "fres", RS6000_BTM_FRES, false, false },
36546 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36547 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36548 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36549 { "cell", RS6000_BTM_CELL, false, false },
36550 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36551 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36552 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36553 { "crypto", RS6000_BTM_CRYPTO, false, false },
36554 { "htm", RS6000_BTM_HTM, false, false },
36555 { "hard-dfp", RS6000_BTM_DFP, false, false },
36556 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36557 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36558 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36559 { "float128", RS6000_BTM_FLOAT128, false, false },
36560 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36561 };
36562
36563 /* Option variables that we want to support inside attribute((target)) and
36564 #pragma GCC target operations. */
36565
36566 struct rs6000_opt_var {
36567 const char *name; /* option name */
36568 size_t global_offset; /* offset of the option in global_options. */
36569 size_t target_offset; /* offset of the option in target options. */
36570 };
36571
36572 static struct rs6000_opt_var const rs6000_opt_vars[] =
36573 {
36574 { "friz",
36575 offsetof (struct gcc_options, x_TARGET_FRIZ),
36576 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36577 { "avoid-indexed-addresses",
36578 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36579 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36580 { "longcall",
36581 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36582 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36583 { "optimize-swaps",
36584 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36585 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36586 { "allow-movmisalign",
36587 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36588 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36589 { "sched-groups",
36590 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36591 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36592 { "always-hint",
36593 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36594 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36595 { "align-branch-targets",
36596 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36597 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36598 { "tls-markers",
36599 offsetof (struct gcc_options, x_tls_markers),
36600 offsetof (struct cl_target_option, x_tls_markers), },
36601 { "sched-prolog",
36602 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36603 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36604 { "sched-epilog",
36605 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36606 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36607 { "speculate-indirect-jumps",
36608 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36609 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36610 };
36611
36612 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36613 parsing. Return true if there were no errors. */
36614
36615 static bool
36616 rs6000_inner_target_options (tree args, bool attr_p)
36617 {
36618 bool ret = true;
36619
36620 if (args == NULL_TREE)
36621 ;
36622
36623 else if (TREE_CODE (args) == STRING_CST)
36624 {
36625 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36626 char *q;
36627
36628 while ((q = strtok (p, ",")) != NULL)
36629 {
36630 bool error_p = false;
36631 bool not_valid_p = false;
36632 const char *cpu_opt = NULL;
36633
36634 p = NULL;
36635 if (strncmp (q, "cpu=", 4) == 0)
36636 {
36637 int cpu_index = rs6000_cpu_name_lookup (q+4);
36638 if (cpu_index >= 0)
36639 rs6000_cpu_index = cpu_index;
36640 else
36641 {
36642 error_p = true;
36643 cpu_opt = q+4;
36644 }
36645 }
36646 else if (strncmp (q, "tune=", 5) == 0)
36647 {
36648 int tune_index = rs6000_cpu_name_lookup (q+5);
36649 if (tune_index >= 0)
36650 rs6000_tune_index = tune_index;
36651 else
36652 {
36653 error_p = true;
36654 cpu_opt = q+5;
36655 }
36656 }
36657 else
36658 {
36659 size_t i;
36660 bool invert = false;
36661 char *r = q;
36662
36663 error_p = true;
36664 if (strncmp (r, "no-", 3) == 0)
36665 {
36666 invert = true;
36667 r += 3;
36668 }
36669
36670 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36671 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36672 {
36673 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36674
36675 if (!rs6000_opt_masks[i].valid_target)
36676 not_valid_p = true;
36677 else
36678 {
36679 error_p = false;
36680 rs6000_isa_flags_explicit |= mask;
36681
36682 /* VSX needs altivec, so -mvsx automagically sets
36683 altivec and disables -mavoid-indexed-addresses. */
36684 if (!invert)
36685 {
36686 if (mask == OPTION_MASK_VSX)
36687 {
36688 mask |= OPTION_MASK_ALTIVEC;
36689 TARGET_AVOID_XFORM = 0;
36690 }
36691 }
36692
36693 if (rs6000_opt_masks[i].invert)
36694 invert = !invert;
36695
36696 if (invert)
36697 rs6000_isa_flags &= ~mask;
36698 else
36699 rs6000_isa_flags |= mask;
36700 }
36701 break;
36702 }
36703
36704 if (error_p && !not_valid_p)
36705 {
36706 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36707 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36708 {
36709 size_t j = rs6000_opt_vars[i].global_offset;
36710 *((int *) ((char *)&global_options + j)) = !invert;
36711 error_p = false;
36712 not_valid_p = false;
36713 break;
36714 }
36715 }
36716 }
36717
36718 if (error_p)
36719 {
36720 const char *eprefix, *esuffix;
36721
36722 ret = false;
36723 if (attr_p)
36724 {
36725 eprefix = "__attribute__((__target__(";
36726 esuffix = ")))";
36727 }
36728 else
36729 {
36730 eprefix = "#pragma GCC target ";
36731 esuffix = "";
36732 }
36733
36734 if (cpu_opt)
36735 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36736 q, esuffix);
36737 else if (not_valid_p)
36738 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36739 else
36740 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36741 }
36742 }
36743 }
36744
36745 else if (TREE_CODE (args) == TREE_LIST)
36746 {
36747 do
36748 {
36749 tree value = TREE_VALUE (args);
36750 if (value)
36751 {
36752 bool ret2 = rs6000_inner_target_options (value, attr_p);
36753 if (!ret2)
36754 ret = false;
36755 }
36756 args = TREE_CHAIN (args);
36757 }
36758 while (args != NULL_TREE);
36759 }
36760
36761 else
36762 {
36763 error ("attribute %<target%> argument not a string");
36764 return false;
36765 }
36766
36767 return ret;
36768 }
36769
36770 /* Print out the target options as a list for -mdebug=target. */
36771
36772 static void
36773 rs6000_debug_target_options (tree args, const char *prefix)
36774 {
36775 if (args == NULL_TREE)
36776 fprintf (stderr, "%s<NULL>", prefix);
36777
36778 else if (TREE_CODE (args) == STRING_CST)
36779 {
36780 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36781 char *q;
36782
36783 while ((q = strtok (p, ",")) != NULL)
36784 {
36785 p = NULL;
36786 fprintf (stderr, "%s\"%s\"", prefix, q);
36787 prefix = ", ";
36788 }
36789 }
36790
36791 else if (TREE_CODE (args) == TREE_LIST)
36792 {
36793 do
36794 {
36795 tree value = TREE_VALUE (args);
36796 if (value)
36797 {
36798 rs6000_debug_target_options (value, prefix);
36799 prefix = ", ";
36800 }
36801 args = TREE_CHAIN (args);
36802 }
36803 while (args != NULL_TREE);
36804 }
36805
36806 else
36807 gcc_unreachable ();
36808
36809 return;
36810 }
36811
36812 \f
36813 /* Hook to validate attribute((target("..."))). */
36814
36815 static bool
36816 rs6000_valid_attribute_p (tree fndecl,
36817 tree ARG_UNUSED (name),
36818 tree args,
36819 int flags)
36820 {
36821 struct cl_target_option cur_target;
36822 bool ret;
36823 tree old_optimize;
36824 tree new_target, new_optimize;
36825 tree func_optimize;
36826
36827 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36828
36829 if (TARGET_DEBUG_TARGET)
36830 {
36831 tree tname = DECL_NAME (fndecl);
36832 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36833 if (tname)
36834 fprintf (stderr, "function: %.*s\n",
36835 (int) IDENTIFIER_LENGTH (tname),
36836 IDENTIFIER_POINTER (tname));
36837 else
36838 fprintf (stderr, "function: unknown\n");
36839
36840 fprintf (stderr, "args:");
36841 rs6000_debug_target_options (args, " ");
36842 fprintf (stderr, "\n");
36843
36844 if (flags)
36845 fprintf (stderr, "flags: 0x%x\n", flags);
36846
36847 fprintf (stderr, "--------------------\n");
36848 }
36849
36850 /* attribute((target("default"))) does nothing, beyond
36851 affecting multi-versioning. */
36852 if (TREE_VALUE (args)
36853 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36854 && TREE_CHAIN (args) == NULL_TREE
36855 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36856 return true;
36857
36858 old_optimize = build_optimization_node (&global_options);
36859 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36860
36861 /* If the function changed the optimization levels as well as setting target
36862 options, start with the optimizations specified. */
36863 if (func_optimize && func_optimize != old_optimize)
36864 cl_optimization_restore (&global_options,
36865 TREE_OPTIMIZATION (func_optimize));
36866
36867 /* The target attributes may also change some optimization flags, so update
36868 the optimization options if necessary. */
36869 cl_target_option_save (&cur_target, &global_options);
36870 rs6000_cpu_index = rs6000_tune_index = -1;
36871 ret = rs6000_inner_target_options (args, true);
36872
36873 /* Set up any additional state. */
36874 if (ret)
36875 {
36876 ret = rs6000_option_override_internal (false);
36877 new_target = build_target_option_node (&global_options);
36878 }
36879 else
36880 new_target = NULL;
36881
36882 new_optimize = build_optimization_node (&global_options);
36883
36884 if (!new_target)
36885 ret = false;
36886
36887 else if (fndecl)
36888 {
36889 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36890
36891 if (old_optimize != new_optimize)
36892 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36893 }
36894
36895 cl_target_option_restore (&global_options, &cur_target);
36896
36897 if (old_optimize != new_optimize)
36898 cl_optimization_restore (&global_options,
36899 TREE_OPTIMIZATION (old_optimize));
36900
36901 return ret;
36902 }
36903
36904 \f
36905 /* Hook to validate the current #pragma GCC target and set the state, and
36906 update the macros based on what was changed. If ARGS is NULL, then
36907 POP_TARGET is used to reset the options. */
36908
36909 bool
36910 rs6000_pragma_target_parse (tree args, tree pop_target)
36911 {
36912 tree prev_tree = build_target_option_node (&global_options);
36913 tree cur_tree;
36914 struct cl_target_option *prev_opt, *cur_opt;
36915 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36916 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36917
36918 if (TARGET_DEBUG_TARGET)
36919 {
36920 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36921 fprintf (stderr, "args:");
36922 rs6000_debug_target_options (args, " ");
36923 fprintf (stderr, "\n");
36924
36925 if (pop_target)
36926 {
36927 fprintf (stderr, "pop_target:\n");
36928 debug_tree (pop_target);
36929 }
36930 else
36931 fprintf (stderr, "pop_target: <NULL>\n");
36932
36933 fprintf (stderr, "--------------------\n");
36934 }
36935
36936 if (! args)
36937 {
36938 cur_tree = ((pop_target)
36939 ? pop_target
36940 : target_option_default_node);
36941 cl_target_option_restore (&global_options,
36942 TREE_TARGET_OPTION (cur_tree));
36943 }
36944 else
36945 {
36946 rs6000_cpu_index = rs6000_tune_index = -1;
36947 if (!rs6000_inner_target_options (args, false)
36948 || !rs6000_option_override_internal (false)
36949 || (cur_tree = build_target_option_node (&global_options))
36950 == NULL_TREE)
36951 {
36952 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36953 fprintf (stderr, "invalid pragma\n");
36954
36955 return false;
36956 }
36957 }
36958
36959 target_option_current_node = cur_tree;
36960 rs6000_activate_target_options (target_option_current_node);
36961
36962 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36963 change the macros that are defined. */
36964 if (rs6000_target_modify_macros_ptr)
36965 {
36966 prev_opt = TREE_TARGET_OPTION (prev_tree);
36967 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36968 prev_flags = prev_opt->x_rs6000_isa_flags;
36969
36970 cur_opt = TREE_TARGET_OPTION (cur_tree);
36971 cur_flags = cur_opt->x_rs6000_isa_flags;
36972 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36973
36974 diff_bumask = (prev_bumask ^ cur_bumask);
36975 diff_flags = (prev_flags ^ cur_flags);
36976
36977 if ((diff_flags != 0) || (diff_bumask != 0))
36978 {
36979 /* Delete old macros. */
36980 rs6000_target_modify_macros_ptr (false,
36981 prev_flags & diff_flags,
36982 prev_bumask & diff_bumask);
36983
36984 /* Define new macros. */
36985 rs6000_target_modify_macros_ptr (true,
36986 cur_flags & diff_flags,
36987 cur_bumask & diff_bumask);
36988 }
36989 }
36990
36991 return true;
36992 }
36993
36994 \f
36995 /* Remember the last target of rs6000_set_current_function. */
36996 static GTY(()) tree rs6000_previous_fndecl;
36997
36998 /* Restore target's globals from NEW_TREE and invalidate the
36999 rs6000_previous_fndecl cache. */
37000
37001 void
37002 rs6000_activate_target_options (tree new_tree)
37003 {
37004 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
37005 if (TREE_TARGET_GLOBALS (new_tree))
37006 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
37007 else if (new_tree == target_option_default_node)
37008 restore_target_globals (&default_target_globals);
37009 else
37010 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
37011 rs6000_previous_fndecl = NULL_TREE;
37012 }
37013
37014 /* Establish appropriate back-end context for processing the function
37015 FNDECL. The argument might be NULL to indicate processing at top
37016 level, outside of any function scope. */
37017 static void
37018 rs6000_set_current_function (tree fndecl)
37019 {
37020 if (TARGET_DEBUG_TARGET)
37021 {
37022 fprintf (stderr, "\n==================== rs6000_set_current_function");
37023
37024 if (fndecl)
37025 fprintf (stderr, ", fndecl %s (%p)",
37026 (DECL_NAME (fndecl)
37027 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
37028 : "<unknown>"), (void *)fndecl);
37029
37030 if (rs6000_previous_fndecl)
37031 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
37032
37033 fprintf (stderr, "\n");
37034 }
37035
37036 /* Only change the context if the function changes. This hook is called
37037 several times in the course of compiling a function, and we don't want to
37038 slow things down too much or call target_reinit when it isn't safe. */
37039 if (fndecl == rs6000_previous_fndecl)
37040 return;
37041
37042 tree old_tree;
37043 if (rs6000_previous_fndecl == NULL_TREE)
37044 old_tree = target_option_current_node;
37045 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
37046 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
37047 else
37048 old_tree = target_option_default_node;
37049
37050 tree new_tree;
37051 if (fndecl == NULL_TREE)
37052 {
37053 if (old_tree != target_option_current_node)
37054 new_tree = target_option_current_node;
37055 else
37056 new_tree = NULL_TREE;
37057 }
37058 else
37059 {
37060 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37061 if (new_tree == NULL_TREE)
37062 new_tree = target_option_default_node;
37063 }
37064
37065 if (TARGET_DEBUG_TARGET)
37066 {
37067 if (new_tree)
37068 {
37069 fprintf (stderr, "\nnew fndecl target specific options:\n");
37070 debug_tree (new_tree);
37071 }
37072
37073 if (old_tree)
37074 {
37075 fprintf (stderr, "\nold fndecl target specific options:\n");
37076 debug_tree (old_tree);
37077 }
37078
37079 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
37080 fprintf (stderr, "--------------------\n");
37081 }
37082
37083 if (new_tree && old_tree != new_tree)
37084 rs6000_activate_target_options (new_tree);
37085
37086 if (fndecl)
37087 rs6000_previous_fndecl = fndecl;
37088 }
37089
37090 \f
37091 /* Save the current options */
37092
37093 static void
37094 rs6000_function_specific_save (struct cl_target_option *ptr,
37095 struct gcc_options *opts)
37096 {
37097 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
37098 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
37099 }
37100
37101 /* Restore the current options */
37102
37103 static void
37104 rs6000_function_specific_restore (struct gcc_options *opts,
37105 struct cl_target_option *ptr)
37106
37107 {
37108 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
37109 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
37110 (void) rs6000_option_override_internal (false);
37111 }
37112
37113 /* Print the current options */
37114
37115 static void
37116 rs6000_function_specific_print (FILE *file, int indent,
37117 struct cl_target_option *ptr)
37118 {
37119 rs6000_print_isa_options (file, indent, "Isa options set",
37120 ptr->x_rs6000_isa_flags);
37121
37122 rs6000_print_isa_options (file, indent, "Isa options explicit",
37123 ptr->x_rs6000_isa_flags_explicit);
37124 }
37125
37126 /* Helper function to print the current isa or misc options on a line. */
37127
37128 static void
37129 rs6000_print_options_internal (FILE *file,
37130 int indent,
37131 const char *string,
37132 HOST_WIDE_INT flags,
37133 const char *prefix,
37134 const struct rs6000_opt_mask *opts,
37135 size_t num_elements)
37136 {
37137 size_t i;
37138 size_t start_column = 0;
37139 size_t cur_column;
37140 size_t max_column = 120;
37141 size_t prefix_len = strlen (prefix);
37142 size_t comma_len = 0;
37143 const char *comma = "";
37144
37145 if (indent)
37146 start_column += fprintf (file, "%*s", indent, "");
37147
37148 if (!flags)
37149 {
37150 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
37151 return;
37152 }
37153
37154 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
37155
37156 /* Print the various mask options. */
37157 cur_column = start_column;
37158 for (i = 0; i < num_elements; i++)
37159 {
37160 bool invert = opts[i].invert;
37161 const char *name = opts[i].name;
37162 const char *no_str = "";
37163 HOST_WIDE_INT mask = opts[i].mask;
37164 size_t len = comma_len + prefix_len + strlen (name);
37165
37166 if (!invert)
37167 {
37168 if ((flags & mask) == 0)
37169 {
37170 no_str = "no-";
37171 len += sizeof ("no-") - 1;
37172 }
37173
37174 flags &= ~mask;
37175 }
37176
37177 else
37178 {
37179 if ((flags & mask) != 0)
37180 {
37181 no_str = "no-";
37182 len += sizeof ("no-") - 1;
37183 }
37184
37185 flags |= mask;
37186 }
37187
37188 cur_column += len;
37189 if (cur_column > max_column)
37190 {
37191 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
37192 cur_column = start_column + len;
37193 comma = "";
37194 }
37195
37196 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
37197 comma = ", ";
37198 comma_len = sizeof (", ") - 1;
37199 }
37200
37201 fputs ("\n", file);
37202 }
37203
37204 /* Helper function to print the current isa options on a line. */
37205
37206 static void
37207 rs6000_print_isa_options (FILE *file, int indent, const char *string,
37208 HOST_WIDE_INT flags)
37209 {
37210 rs6000_print_options_internal (file, indent, string, flags, "-m",
37211 &rs6000_opt_masks[0],
37212 ARRAY_SIZE (rs6000_opt_masks));
37213 }
37214
37215 static void
37216 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37217 HOST_WIDE_INT flags)
37218 {
37219 rs6000_print_options_internal (file, indent, string, flags, "",
37220 &rs6000_builtin_mask_names[0],
37221 ARRAY_SIZE (rs6000_builtin_mask_names));
37222 }
37223
37224 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37225 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37226 -mupper-regs-df, etc.).
37227
37228 If the user used -mno-power8-vector, we need to turn off all of the implicit
37229 ISA 2.07 and 3.0 options that relate to the vector unit.
37230
37231 If the user used -mno-power9-vector, we need to turn off all of the implicit
37232 ISA 3.0 options that relate to the vector unit.
37233
37234 This function does not handle explicit options such as the user specifying
37235 -mdirect-move. These are handled in rs6000_option_override_internal, and
37236 the appropriate error is given if needed.
37237
37238 We return a mask of all of the implicit options that should not be enabled
37239 by default. */
37240
37241 static HOST_WIDE_INT
37242 rs6000_disable_incompatible_switches (void)
37243 {
37244 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37245 size_t i, j;
37246
37247 static const struct {
37248 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37249 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37250 const char *const name; /* name of the switch. */
37251 } flags[] = {
37252 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37253 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37254 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37255 };
37256
37257 for (i = 0; i < ARRAY_SIZE (flags); i++)
37258 {
37259 HOST_WIDE_INT no_flag = flags[i].no_flag;
37260
37261 if ((rs6000_isa_flags & no_flag) == 0
37262 && (rs6000_isa_flags_explicit & no_flag) != 0)
37263 {
37264 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37265 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37266 & rs6000_isa_flags
37267 & dep_flags);
37268
37269 if (set_flags)
37270 {
37271 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37272 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37273 {
37274 set_flags &= ~rs6000_opt_masks[j].mask;
37275 error ("%<-mno-%s%> turns off %<-m%s%>",
37276 flags[i].name,
37277 rs6000_opt_masks[j].name);
37278 }
37279
37280 gcc_assert (!set_flags);
37281 }
37282
37283 rs6000_isa_flags &= ~dep_flags;
37284 ignore_masks |= no_flag | dep_flags;
37285 }
37286 }
37287
37288 return ignore_masks;
37289 }
37290
37291 \f
37292 /* Helper function for printing the function name when debugging. */
37293
37294 static const char *
37295 get_decl_name (tree fn)
37296 {
37297 tree name;
37298
37299 if (!fn)
37300 return "<null>";
37301
37302 name = DECL_NAME (fn);
37303 if (!name)
37304 return "<no-name>";
37305
37306 return IDENTIFIER_POINTER (name);
37307 }
37308
37309 /* Return the clone id of the target we are compiling code for in a target
37310 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37311 the priority list for the target clones (ordered from lowest to
37312 highest). */
37313
37314 static int
37315 rs6000_clone_priority (tree fndecl)
37316 {
37317 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37318 HOST_WIDE_INT isa_masks;
37319 int ret = CLONE_DEFAULT;
37320 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37321 const char *attrs_str = NULL;
37322
37323 attrs = TREE_VALUE (TREE_VALUE (attrs));
37324 attrs_str = TREE_STRING_POINTER (attrs);
37325
37326 /* Return priority zero for default function. Return the ISA needed for the
37327 function if it is not the default. */
37328 if (strcmp (attrs_str, "default") != 0)
37329 {
37330 if (fn_opts == NULL_TREE)
37331 fn_opts = target_option_default_node;
37332
37333 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37334 isa_masks = rs6000_isa_flags;
37335 else
37336 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37337
37338 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37339 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37340 break;
37341 }
37342
37343 if (TARGET_DEBUG_TARGET)
37344 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37345 get_decl_name (fndecl), ret);
37346
37347 return ret;
37348 }
37349
37350 /* This compares the priority of target features in function DECL1 and DECL2.
37351 It returns positive value if DECL1 is higher priority, negative value if
37352 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37353 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37354
37355 static int
37356 rs6000_compare_version_priority (tree decl1, tree decl2)
37357 {
37358 int priority1 = rs6000_clone_priority (decl1);
37359 int priority2 = rs6000_clone_priority (decl2);
37360 int ret = priority1 - priority2;
37361
37362 if (TARGET_DEBUG_TARGET)
37363 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37364 get_decl_name (decl1), get_decl_name (decl2), ret);
37365
37366 return ret;
37367 }
37368
37369 /* Make a dispatcher declaration for the multi-versioned function DECL.
37370 Calls to DECL function will be replaced with calls to the dispatcher
37371 by the front-end. Returns the decl of the dispatcher function. */
37372
37373 static tree
37374 rs6000_get_function_versions_dispatcher (void *decl)
37375 {
37376 tree fn = (tree) decl;
37377 struct cgraph_node *node = NULL;
37378 struct cgraph_node *default_node = NULL;
37379 struct cgraph_function_version_info *node_v = NULL;
37380 struct cgraph_function_version_info *first_v = NULL;
37381
37382 tree dispatch_decl = NULL;
37383
37384 struct cgraph_function_version_info *default_version_info = NULL;
37385 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37386
37387 if (TARGET_DEBUG_TARGET)
37388 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37389 get_decl_name (fn));
37390
37391 node = cgraph_node::get (fn);
37392 gcc_assert (node != NULL);
37393
37394 node_v = node->function_version ();
37395 gcc_assert (node_v != NULL);
37396
37397 if (node_v->dispatcher_resolver != NULL)
37398 return node_v->dispatcher_resolver;
37399
37400 /* Find the default version and make it the first node. */
37401 first_v = node_v;
37402 /* Go to the beginning of the chain. */
37403 while (first_v->prev != NULL)
37404 first_v = first_v->prev;
37405
37406 default_version_info = first_v;
37407 while (default_version_info != NULL)
37408 {
37409 const tree decl2 = default_version_info->this_node->decl;
37410 if (is_function_default_version (decl2))
37411 break;
37412 default_version_info = default_version_info->next;
37413 }
37414
37415 /* If there is no default node, just return NULL. */
37416 if (default_version_info == NULL)
37417 return NULL;
37418
37419 /* Make default info the first node. */
37420 if (first_v != default_version_info)
37421 {
37422 default_version_info->prev->next = default_version_info->next;
37423 if (default_version_info->next)
37424 default_version_info->next->prev = default_version_info->prev;
37425 first_v->prev = default_version_info;
37426 default_version_info->next = first_v;
37427 default_version_info->prev = NULL;
37428 }
37429
37430 default_node = default_version_info->this_node;
37431
37432 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37433 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37434 "target_clones attribute needs GLIBC (2.23 and newer) that "
37435 "exports hardware capability bits");
37436 #else
37437
37438 if (targetm.has_ifunc_p ())
37439 {
37440 struct cgraph_function_version_info *it_v = NULL;
37441 struct cgraph_node *dispatcher_node = NULL;
37442 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37443
37444 /* Right now, the dispatching is done via ifunc. */
37445 dispatch_decl = make_dispatcher_decl (default_node->decl);
37446
37447 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37448 gcc_assert (dispatcher_node != NULL);
37449 dispatcher_node->dispatcher_function = 1;
37450 dispatcher_version_info
37451 = dispatcher_node->insert_new_function_version ();
37452 dispatcher_version_info->next = default_version_info;
37453 dispatcher_node->definition = 1;
37454
37455 /* Set the dispatcher for all the versions. */
37456 it_v = default_version_info;
37457 while (it_v != NULL)
37458 {
37459 it_v->dispatcher_resolver = dispatch_decl;
37460 it_v = it_v->next;
37461 }
37462 }
37463 else
37464 {
37465 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37466 "multiversioning needs ifunc which is not supported "
37467 "on this target");
37468 }
37469 #endif
37470
37471 return dispatch_decl;
37472 }
37473
37474 /* Make the resolver function decl to dispatch the versions of a multi-
37475 versioned function, DEFAULT_DECL. Create an empty basic block in the
37476 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37477 function. */
37478
37479 static tree
37480 make_resolver_func (const tree default_decl,
37481 const tree dispatch_decl,
37482 basic_block *empty_bb)
37483 {
37484 /* Make the resolver function static. The resolver function returns
37485 void *. */
37486 tree decl_name = clone_function_name (default_decl, "resolver");
37487 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37488 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37489 tree decl = build_fn_decl (resolver_name, type);
37490 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37491
37492 DECL_NAME (decl) = decl_name;
37493 TREE_USED (decl) = 1;
37494 DECL_ARTIFICIAL (decl) = 1;
37495 DECL_IGNORED_P (decl) = 0;
37496 TREE_PUBLIC (decl) = 0;
37497 DECL_UNINLINABLE (decl) = 1;
37498
37499 /* Resolver is not external, body is generated. */
37500 DECL_EXTERNAL (decl) = 0;
37501 DECL_EXTERNAL (dispatch_decl) = 0;
37502
37503 DECL_CONTEXT (decl) = NULL_TREE;
37504 DECL_INITIAL (decl) = make_node (BLOCK);
37505 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37506
37507 /* Build result decl and add to function_decl. */
37508 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37509 DECL_CONTEXT (t) = decl;
37510 DECL_ARTIFICIAL (t) = 1;
37511 DECL_IGNORED_P (t) = 1;
37512 DECL_RESULT (decl) = t;
37513
37514 gimplify_function_tree (decl);
37515 push_cfun (DECL_STRUCT_FUNCTION (decl));
37516 *empty_bb = init_lowered_empty_function (decl, false,
37517 profile_count::uninitialized ());
37518
37519 cgraph_node::add_new_function (decl, true);
37520 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37521
37522 pop_cfun ();
37523
37524 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37525 DECL_ATTRIBUTES (dispatch_decl)
37526 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37527
37528 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37529
37530 return decl;
37531 }
37532
37533 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37534 return a pointer to VERSION_DECL if we are running on a machine that
37535 supports the index CLONE_ISA hardware architecture bits. This function will
37536 be called during version dispatch to decide which function version to
37537 execute. It returns the basic block at the end, to which more conditions
37538 can be added. */
37539
37540 static basic_block
37541 add_condition_to_bb (tree function_decl, tree version_decl,
37542 int clone_isa, basic_block new_bb)
37543 {
37544 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37545
37546 gcc_assert (new_bb != NULL);
37547 gimple_seq gseq = bb_seq (new_bb);
37548
37549
37550 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37551 build_fold_addr_expr (version_decl));
37552 tree result_var = create_tmp_var (ptr_type_node);
37553 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37554 gimple *return_stmt = gimple_build_return (result_var);
37555
37556 if (clone_isa == CLONE_DEFAULT)
37557 {
37558 gimple_seq_add_stmt (&gseq, convert_stmt);
37559 gimple_seq_add_stmt (&gseq, return_stmt);
37560 set_bb_seq (new_bb, gseq);
37561 gimple_set_bb (convert_stmt, new_bb);
37562 gimple_set_bb (return_stmt, new_bb);
37563 pop_cfun ();
37564 return new_bb;
37565 }
37566
37567 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37568 tree cond_var = create_tmp_var (bool_int_type_node);
37569 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37570 const char *arg_str = rs6000_clone_map[clone_isa].name;
37571 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37572 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37573 gimple_call_set_lhs (call_cond_stmt, cond_var);
37574
37575 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37576 gimple_set_bb (call_cond_stmt, new_bb);
37577 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37578
37579 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37580 NULL_TREE, NULL_TREE);
37581 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37582 gimple_set_bb (if_else_stmt, new_bb);
37583 gimple_seq_add_stmt (&gseq, if_else_stmt);
37584
37585 gimple_seq_add_stmt (&gseq, convert_stmt);
37586 gimple_seq_add_stmt (&gseq, return_stmt);
37587 set_bb_seq (new_bb, gseq);
37588
37589 basic_block bb1 = new_bb;
37590 edge e12 = split_block (bb1, if_else_stmt);
37591 basic_block bb2 = e12->dest;
37592 e12->flags &= ~EDGE_FALLTHRU;
37593 e12->flags |= EDGE_TRUE_VALUE;
37594
37595 edge e23 = split_block (bb2, return_stmt);
37596 gimple_set_bb (convert_stmt, bb2);
37597 gimple_set_bb (return_stmt, bb2);
37598
37599 basic_block bb3 = e23->dest;
37600 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37601
37602 remove_edge (e23);
37603 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37604
37605 pop_cfun ();
37606 return bb3;
37607 }
37608
37609 /* This function generates the dispatch function for multi-versioned functions.
37610 DISPATCH_DECL is the function which will contain the dispatch logic.
37611 FNDECLS are the function choices for dispatch, and is a tree chain.
37612 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37613 code is generated. */
37614
37615 static int
37616 dispatch_function_versions (tree dispatch_decl,
37617 void *fndecls_p,
37618 basic_block *empty_bb)
37619 {
37620 int ix;
37621 tree ele;
37622 vec<tree> *fndecls;
37623 tree clones[CLONE_MAX];
37624
37625 if (TARGET_DEBUG_TARGET)
37626 fputs ("dispatch_function_versions, top\n", stderr);
37627
37628 gcc_assert (dispatch_decl != NULL
37629 && fndecls_p != NULL
37630 && empty_bb != NULL);
37631
37632 /* fndecls_p is actually a vector. */
37633 fndecls = static_cast<vec<tree> *> (fndecls_p);
37634
37635 /* At least one more version other than the default. */
37636 gcc_assert (fndecls->length () >= 2);
37637
37638 /* The first version in the vector is the default decl. */
37639 memset ((void *) clones, '\0', sizeof (clones));
37640 clones[CLONE_DEFAULT] = (*fndecls)[0];
37641
37642 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37643 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37644 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37645 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37646 to insert the code here to do the call. */
37647
37648 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37649 {
37650 int priority = rs6000_clone_priority (ele);
37651 if (!clones[priority])
37652 clones[priority] = ele;
37653 }
37654
37655 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37656 if (clones[ix])
37657 {
37658 if (TARGET_DEBUG_TARGET)
37659 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37660 ix, get_decl_name (clones[ix]));
37661
37662 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37663 *empty_bb);
37664 }
37665
37666 return 0;
37667 }
37668
37669 /* Generate the dispatching code body to dispatch multi-versioned function
37670 DECL. The target hook is called to process the "target" attributes and
37671 provide the code to dispatch the right function at run-time. NODE points
37672 to the dispatcher decl whose body will be created. */
37673
37674 static tree
37675 rs6000_generate_version_dispatcher_body (void *node_p)
37676 {
37677 tree resolver;
37678 basic_block empty_bb;
37679 struct cgraph_node *node = (cgraph_node *) node_p;
37680 struct cgraph_function_version_info *ninfo = node->function_version ();
37681
37682 if (ninfo->dispatcher_resolver)
37683 return ninfo->dispatcher_resolver;
37684
37685 /* node is going to be an alias, so remove the finalized bit. */
37686 node->definition = false;
37687
37688 /* The first version in the chain corresponds to the default version. */
37689 ninfo->dispatcher_resolver = resolver
37690 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37691
37692 if (TARGET_DEBUG_TARGET)
37693 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37694 get_decl_name (resolver));
37695
37696 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37697 auto_vec<tree, 2> fn_ver_vec;
37698
37699 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37700 vinfo;
37701 vinfo = vinfo->next)
37702 {
37703 struct cgraph_node *version = vinfo->this_node;
37704 /* Check for virtual functions here again, as by this time it should
37705 have been determined if this function needs a vtable index or
37706 not. This happens for methods in derived classes that override
37707 virtual methods in base classes but are not explicitly marked as
37708 virtual. */
37709 if (DECL_VINDEX (version->decl))
37710 sorry ("Virtual function multiversioning not supported");
37711
37712 fn_ver_vec.safe_push (version->decl);
37713 }
37714
37715 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37716 cgraph_edge::rebuild_edges ();
37717 pop_cfun ();
37718 return resolver;
37719 }
37720
37721 \f
37722 /* Hook to determine if one function can safely inline another. */
37723
37724 static bool
37725 rs6000_can_inline_p (tree caller, tree callee)
37726 {
37727 bool ret = false;
37728 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37729 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37730
37731 /* If callee has no option attributes, then it is ok to inline. */
37732 if (!callee_tree)
37733 ret = true;
37734
37735 /* If caller has no option attributes, but callee does then it is not ok to
37736 inline. */
37737 else if (!caller_tree)
37738 ret = false;
37739
37740 else
37741 {
37742 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37743 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37744
37745 /* Callee's options should a subset of the caller's, i.e. a vsx function
37746 can inline an altivec function but a non-vsx function can't inline a
37747 vsx function. */
37748 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37749 == callee_opts->x_rs6000_isa_flags)
37750 ret = true;
37751 }
37752
37753 if (TARGET_DEBUG_TARGET)
37754 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37755 get_decl_name (caller), get_decl_name (callee),
37756 (ret ? "can" : "cannot"));
37757
37758 return ret;
37759 }
37760 \f
37761 /* Allocate a stack temp and fixup the address so it meets the particular
37762 memory requirements (either offetable or REG+REG addressing). */
37763
37764 rtx
37765 rs6000_allocate_stack_temp (machine_mode mode,
37766 bool offsettable_p,
37767 bool reg_reg_p)
37768 {
37769 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37770 rtx addr = XEXP (stack, 0);
37771 int strict_p = reload_completed;
37772
37773 if (!legitimate_indirect_address_p (addr, strict_p))
37774 {
37775 if (offsettable_p
37776 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37777 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37778
37779 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37780 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37781 }
37782
37783 return stack;
37784 }
37785
37786 /* Given a memory reference, if it is not a reg or reg+reg addressing,
37787 convert to such a form to deal with memory reference instructions
37788 like STFIWX and LDBRX that only take reg+reg addressing. */
37789
37790 rtx
37791 rs6000_force_indexed_or_indirect_mem (rtx x)
37792 {
37793 machine_mode mode = GET_MODE (x);
37794
37795 gcc_assert (MEM_P (x));
37796 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
37797 {
37798 rtx addr = XEXP (x, 0);
37799 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37800 {
37801 rtx reg = XEXP (addr, 0);
37802 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37803 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37804 gcc_assert (REG_P (reg));
37805 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37806 addr = reg;
37807 }
37808 else if (GET_CODE (addr) == PRE_MODIFY)
37809 {
37810 rtx reg = XEXP (addr, 0);
37811 rtx expr = XEXP (addr, 1);
37812 gcc_assert (REG_P (reg));
37813 gcc_assert (GET_CODE (expr) == PLUS);
37814 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37815 addr = reg;
37816 }
37817
37818 x = replace_equiv_address (x, force_reg (Pmode, addr));
37819 }
37820
37821 return x;
37822 }
37823
37824 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37825
37826 On the RS/6000, all integer constants are acceptable, most won't be valid
37827 for particular insns, though. Only easy FP constants are acceptable. */
37828
37829 static bool
37830 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37831 {
37832 if (TARGET_ELF && tls_referenced_p (x))
37833 return false;
37834
37835 if (CONST_DOUBLE_P (x))
37836 return easy_fp_constant (x, mode);
37837
37838 if (GET_CODE (x) == CONST_VECTOR)
37839 return easy_vector_constant (x, mode);
37840
37841 return true;
37842 }
37843
37844 \f
37845 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37846
37847 static bool
37848 chain_already_loaded (rtx_insn *last)
37849 {
37850 for (; last != NULL; last = PREV_INSN (last))
37851 {
37852 if (NONJUMP_INSN_P (last))
37853 {
37854 rtx patt = PATTERN (last);
37855
37856 if (GET_CODE (patt) == SET)
37857 {
37858 rtx lhs = XEXP (patt, 0);
37859
37860 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37861 return true;
37862 }
37863 }
37864 }
37865 return false;
37866 }
37867
37868 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37869
37870 void
37871 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37872 {
37873 rtx func = func_desc;
37874 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37875 rtx toc_load = NULL_RTX;
37876 rtx toc_restore = NULL_RTX;
37877 rtx func_addr;
37878 rtx abi_reg = NULL_RTX;
37879 rtx call[4];
37880 int n_call;
37881 rtx insn;
37882 bool is_pltseq_longcall;
37883
37884 if (global_tlsarg)
37885 tlsarg = global_tlsarg;
37886
37887 /* Handle longcall attributes. */
37888 is_pltseq_longcall = false;
37889 if ((INTVAL (cookie) & CALL_LONG) != 0
37890 && GET_CODE (func_desc) == SYMBOL_REF)
37891 {
37892 func = rs6000_longcall_ref (func_desc, tlsarg);
37893 if (TARGET_PLTSEQ)
37894 is_pltseq_longcall = true;
37895 }
37896
37897 /* Handle indirect calls. */
37898 if (!SYMBOL_REF_P (func)
37899 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
37900 {
37901 /* Save the TOC into its reserved slot before the call,
37902 and prepare to restore it after the call. */
37903 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37904 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37905 gen_rtvec (1, stack_toc_offset),
37906 UNSPEC_TOCSLOT);
37907 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37908
37909 /* Can we optimize saving the TOC in the prologue or
37910 do we need to do it at every call? */
37911 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37912 cfun->machine->save_toc_in_prologue = true;
37913 else
37914 {
37915 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37916 rtx stack_toc_mem = gen_frame_mem (Pmode,
37917 gen_rtx_PLUS (Pmode, stack_ptr,
37918 stack_toc_offset));
37919 MEM_VOLATILE_P (stack_toc_mem) = 1;
37920 if (is_pltseq_longcall)
37921 {
37922 /* Use USPEC_PLTSEQ here to emit every instruction in an
37923 inline PLT call sequence with a reloc, enabling the
37924 linker to edit the sequence back to a direct call
37925 when that makes sense. */
37926 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
37927 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37928 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
37929 }
37930 else
37931 emit_move_insn (stack_toc_mem, toc_reg);
37932 }
37933
37934 if (DEFAULT_ABI == ABI_ELFv2)
37935 {
37936 /* A function pointer in the ELFv2 ABI is just a plain address, but
37937 the ABI requires it to be loaded into r12 before the call. */
37938 func_addr = gen_rtx_REG (Pmode, 12);
37939 if (!rtx_equal_p (func_addr, func))
37940 emit_move_insn (func_addr, func);
37941 abi_reg = func_addr;
37942 /* Indirect calls via CTR are strongly preferred over indirect
37943 calls via LR, so move the address there. Needed to mark
37944 this insn for linker plt sequence editing too. */
37945 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37946 if (is_pltseq_longcall)
37947 {
37948 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
37949 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37950 emit_insn (gen_rtx_SET (func_addr, mark_func));
37951 v = gen_rtvec (2, func_addr, func_desc);
37952 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37953 }
37954 else
37955 emit_move_insn (func_addr, abi_reg);
37956 }
37957 else
37958 {
37959 /* A function pointer under AIX is a pointer to a data area whose
37960 first word contains the actual address of the function, whose
37961 second word contains a pointer to its TOC, and whose third word
37962 contains a value to place in the static chain register (r11).
37963 Note that if we load the static chain, our "trampoline" need
37964 not have any executable code. */
37965
37966 /* Load up address of the actual function. */
37967 func = force_reg (Pmode, func);
37968 func_addr = gen_reg_rtx (Pmode);
37969 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
37970
37971 /* Indirect calls via CTR are strongly preferred over indirect
37972 calls via LR, so move the address there. */
37973 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
37974 emit_move_insn (ctr_reg, func_addr);
37975 func_addr = ctr_reg;
37976
37977 /* Prepare to load the TOC of the called function. Note that the
37978 TOC load must happen immediately before the actual call so
37979 that unwinding the TOC registers works correctly. See the
37980 comment in frob_update_context. */
37981 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37982 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37983 gen_rtx_PLUS (Pmode, func,
37984 func_toc_offset));
37985 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37986
37987 /* If we have a static chain, load it up. But, if the call was
37988 originally direct, the 3rd word has not been written since no
37989 trampoline has been built, so we ought not to load it, lest we
37990 override a static chain value. */
37991 if (!(GET_CODE (func_desc) == SYMBOL_REF
37992 && SYMBOL_REF_FUNCTION_P (func_desc))
37993 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37994 && !chain_already_loaded (get_current_sequence ()->next->last))
37995 {
37996 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37997 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37998 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37999 gen_rtx_PLUS (Pmode, func,
38000 func_sc_offset));
38001 emit_move_insn (sc_reg, func_sc_mem);
38002 abi_reg = sc_reg;
38003 }
38004 }
38005 }
38006 else
38007 {
38008 /* Direct calls use the TOC: for local calls, the callee will
38009 assume the TOC register is set; for non-local calls, the
38010 PLT stub needs the TOC register. */
38011 abi_reg = toc_reg;
38012 func_addr = func;
38013 }
38014
38015 /* Create the call. */
38016 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38017 if (value != NULL_RTX)
38018 call[0] = gen_rtx_SET (value, call[0]);
38019 n_call = 1;
38020
38021 if (toc_load)
38022 call[n_call++] = toc_load;
38023 if (toc_restore)
38024 call[n_call++] = toc_restore;
38025
38026 call[n_call++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38027
38028 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
38029 insn = emit_call_insn (insn);
38030
38031 /* Mention all registers defined by the ABI to hold information
38032 as uses in CALL_INSN_FUNCTION_USAGE. */
38033 if (abi_reg)
38034 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38035 }
38036
38037 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
38038
38039 void
38040 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
38041 {
38042 rtx call[2];
38043 rtx insn;
38044
38045 gcc_assert (INTVAL (cookie) == 0);
38046
38047 if (global_tlsarg)
38048 tlsarg = global_tlsarg;
38049
38050 /* Create the call. */
38051 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
38052 if (value != NULL_RTX)
38053 call[0] = gen_rtx_SET (value, call[0]);
38054
38055 call[1] = simple_return_rtx;
38056
38057 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
38058 insn = emit_call_insn (insn);
38059
38060 /* Note use of the TOC register. */
38061 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
38062 }
38063
38064 /* Expand code to perform a call under the SYSV4 ABI. */
38065
38066 void
38067 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
38068 {
38069 rtx func = func_desc;
38070 rtx func_addr;
38071 rtx call[4];
38072 rtx insn;
38073 rtx abi_reg = NULL_RTX;
38074 int n;
38075
38076 if (global_tlsarg)
38077 tlsarg = global_tlsarg;
38078
38079 /* Handle longcall attributes. */
38080 if ((INTVAL (cookie) & CALL_LONG) != 0
38081 && GET_CODE (func_desc) == SYMBOL_REF)
38082 {
38083 func = rs6000_longcall_ref (func_desc, tlsarg);
38084 /* If the longcall was implemented as an inline PLT call using
38085 PLT unspecs then func will be REG:r11. If not, func will be
38086 a pseudo reg. The inline PLT call sequence supports lazy
38087 linking (and longcalls to functions in dlopen'd libraries).
38088 The other style of longcalls don't. The lazy linking entry
38089 to the dynamic symbol resolver requires r11 be the function
38090 address (as it is for linker generated PLT stubs). Ensure
38091 r11 stays valid to the bctrl by marking r11 used by the call. */
38092 if (TARGET_PLTSEQ)
38093 abi_reg = func;
38094 }
38095
38096 /* Handle indirect calls. */
38097 if (GET_CODE (func) != SYMBOL_REF)
38098 {
38099 func = force_reg (Pmode, func);
38100
38101 /* Indirect calls via CTR are strongly preferred over indirect
38102 calls via LR, so move the address there. That can't be left
38103 to reload because we want to mark every instruction in an
38104 inline PLT call sequence with a reloc, enabling the linker to
38105 edit the sequence back to a direct call when that makes sense. */
38106 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38107 if (abi_reg)
38108 {
38109 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
38110 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38111 emit_insn (gen_rtx_SET (func_addr, mark_func));
38112 v = gen_rtvec (2, func_addr, func_desc);
38113 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38114 }
38115 else
38116 emit_move_insn (func_addr, func);
38117 }
38118 else
38119 func_addr = func;
38120
38121 /* Create the call. */
38122 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38123 if (value != NULL_RTX)
38124 call[0] = gen_rtx_SET (value, call[0]);
38125
38126 call[1] = gen_rtx_USE (VOIDmode, cookie);
38127 n = 2;
38128 if (TARGET_SECURE_PLT
38129 && flag_pic
38130 && GET_CODE (func_addr) == SYMBOL_REF
38131 && !SYMBOL_REF_LOCAL_P (func_addr))
38132 call[n++] = gen_rtx_USE (VOIDmode, pic_offset_table_rtx);
38133
38134 call[n++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38135
38136 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n, call));
38137 insn = emit_call_insn (insn);
38138 if (abi_reg)
38139 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38140 }
38141
38142 /* Expand code to perform a sibling call under the SysV4 ABI. */
38143
38144 void
38145 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
38146 {
38147 rtx func = func_desc;
38148 rtx func_addr;
38149 rtx call[3];
38150 rtx insn;
38151 rtx abi_reg = NULL_RTX;
38152
38153 if (global_tlsarg)
38154 tlsarg = global_tlsarg;
38155
38156 /* Handle longcall attributes. */
38157 if ((INTVAL (cookie) & CALL_LONG) != 0
38158 && GET_CODE (func_desc) == SYMBOL_REF)
38159 {
38160 func = rs6000_longcall_ref (func_desc, tlsarg);
38161 /* If the longcall was implemented as an inline PLT call using
38162 PLT unspecs then func will be REG:r11. If not, func will be
38163 a pseudo reg. The inline PLT call sequence supports lazy
38164 linking (and longcalls to functions in dlopen'd libraries).
38165 The other style of longcalls don't. The lazy linking entry
38166 to the dynamic symbol resolver requires r11 be the function
38167 address (as it is for linker generated PLT stubs). Ensure
38168 r11 stays valid to the bctr by marking r11 used by the call. */
38169 if (TARGET_PLTSEQ)
38170 abi_reg = func;
38171 }
38172
38173 /* Handle indirect calls. */
38174 if (GET_CODE (func) != SYMBOL_REF)
38175 {
38176 func = force_reg (Pmode, func);
38177
38178 /* Indirect sibcalls must go via CTR. That can't be left to
38179 reload because we want to mark every instruction in an inline
38180 PLT call sequence with a reloc, enabling the linker to edit
38181 the sequence back to a direct call when that makes sense. */
38182 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38183 if (abi_reg)
38184 {
38185 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
38186 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38187 emit_insn (gen_rtx_SET (func_addr, mark_func));
38188 v = gen_rtvec (2, func_addr, func_desc);
38189 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38190 }
38191 else
38192 emit_move_insn (func_addr, func);
38193 }
38194 else
38195 func_addr = func;
38196
38197 /* Create the call. */
38198 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38199 if (value != NULL_RTX)
38200 call[0] = gen_rtx_SET (value, call[0]);
38201
38202 call[1] = gen_rtx_USE (VOIDmode, cookie);
38203 call[2] = simple_return_rtx;
38204
38205 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38206 insn = emit_call_insn (insn);
38207 if (abi_reg)
38208 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38209 }
38210
38211 #if TARGET_MACHO
38212
38213 /* Expand code to perform a call under the Darwin ABI.
38214 Modulo handling of mlongcall, this is much the same as sysv.
38215 if/when the longcall optimisation is removed, we could drop this
38216 code and use the sysv case (taking care to avoid the tls stuff).
38217
38218 We can use this for sibcalls too, if needed. */
38219
38220 void
38221 rs6000_call_darwin_1 (rtx value, rtx func_desc, rtx tlsarg,
38222 rtx cookie, bool sibcall)
38223 {
38224 rtx func = func_desc;
38225 rtx func_addr;
38226 rtx call[3];
38227 rtx insn;
38228 int cookie_val = INTVAL (cookie);
38229 bool make_island = false;
38230
38231 /* Handle longcall attributes, there are two cases for Darwin:
38232 1) Newer linkers are capable of synthesising any branch islands needed.
38233 2) We need a helper branch island synthesised by the compiler.
38234 The second case has mostly been retired and we don't use it for m64.
38235 In fact, it's is an optimisation, we could just indirect as sysv does..
38236 ... however, backwards compatibility for now.
38237 If we're going to use this, then we need to keep the CALL_LONG bit set,
38238 so that we can pick up the special insn form later. */
38239 if ((cookie_val & CALL_LONG) != 0
38240 && GET_CODE (func_desc) == SYMBOL_REF)
38241 {
38242 if (darwin_emit_branch_islands && TARGET_32BIT)
38243 make_island = true; /* Do nothing yet, retain the CALL_LONG flag. */
38244 else
38245 {
38246 /* The linker is capable of doing this, but the user explicitly
38247 asked for -mlongcall, so we'll do the 'normal' version. */
38248 func = rs6000_longcall_ref (func_desc, NULL_RTX);
38249 cookie_val &= ~CALL_LONG; /* Handled, zap it. */
38250 }
38251 }
38252
38253 /* Handle indirect calls. */
38254 if (GET_CODE (func) != SYMBOL_REF)
38255 {
38256 func = force_reg (Pmode, func);
38257
38258 /* Indirect calls via CTR are strongly preferred over indirect
38259 calls via LR, and are required for indirect sibcalls, so move
38260 the address there. */
38261 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38262 emit_move_insn (func_addr, func);
38263 }
38264 else
38265 func_addr = func;
38266
38267 /* Create the call. */
38268 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38269 if (value != NULL_RTX)
38270 call[0] = gen_rtx_SET (value, call[0]);
38271
38272 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (cookie_val));
38273
38274 if (sibcall)
38275 call[2] = simple_return_rtx;
38276 else
38277 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38278
38279 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38280 insn = emit_call_insn (insn);
38281 /* Now we have the debug info in the insn, we can set up the branch island
38282 if we're using one. */
38283 if (make_island)
38284 {
38285 tree funname = get_identifier (XSTR (func_desc, 0));
38286
38287 if (no_previous_def (funname))
38288 {
38289 rtx label_rtx = gen_label_rtx ();
38290 char *label_buf, temp_buf[256];
38291 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
38292 CODE_LABEL_NUMBER (label_rtx));
38293 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
38294 tree labelname = get_identifier (label_buf);
38295 add_compiler_branch_island (labelname, funname,
38296 insn_line ((const rtx_insn*)insn));
38297 }
38298 }
38299 }
38300 #endif
38301
38302 void
38303 rs6000_call_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38304 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38305 {
38306 #if TARGET_MACHO
38307 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, false);
38308 #else
38309 gcc_unreachable();
38310 #endif
38311 }
38312
38313
38314 void
38315 rs6000_sibcall_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38316 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38317 {
38318 #if TARGET_MACHO
38319 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, true);
38320 #else
38321 gcc_unreachable();
38322 #endif
38323 }
38324
38325
38326 /* Return whether we need to always update the saved TOC pointer when we update
38327 the stack pointer. */
38328
38329 static bool
38330 rs6000_save_toc_in_prologue_p (void)
38331 {
38332 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38333 }
38334
38335 #ifdef HAVE_GAS_HIDDEN
38336 # define USE_HIDDEN_LINKONCE 1
38337 #else
38338 # define USE_HIDDEN_LINKONCE 0
38339 #endif
38340
38341 /* Fills in the label name that should be used for a 476 link stack thunk. */
38342
38343 void
38344 get_ppc476_thunk_name (char name[32])
38345 {
38346 gcc_assert (TARGET_LINK_STACK);
38347
38348 if (USE_HIDDEN_LINKONCE)
38349 sprintf (name, "__ppc476.get_thunk");
38350 else
38351 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38352 }
38353
38354 /* This function emits the simple thunk routine that is used to preserve
38355 the link stack on the 476 cpu. */
38356
38357 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38358 static void
38359 rs6000_code_end (void)
38360 {
38361 char name[32];
38362 tree decl;
38363
38364 if (!TARGET_LINK_STACK)
38365 return;
38366
38367 get_ppc476_thunk_name (name);
38368
38369 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38370 build_function_type_list (void_type_node, NULL_TREE));
38371 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38372 NULL_TREE, void_type_node);
38373 TREE_PUBLIC (decl) = 1;
38374 TREE_STATIC (decl) = 1;
38375
38376 #if RS6000_WEAK
38377 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38378 {
38379 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38380 targetm.asm_out.unique_section (decl, 0);
38381 switch_to_section (get_named_section (decl, NULL, 0));
38382 DECL_WEAK (decl) = 1;
38383 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38384 targetm.asm_out.globalize_label (asm_out_file, name);
38385 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38386 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38387 }
38388 else
38389 #endif
38390 {
38391 switch_to_section (text_section);
38392 ASM_OUTPUT_LABEL (asm_out_file, name);
38393 }
38394
38395 DECL_INITIAL (decl) = make_node (BLOCK);
38396 current_function_decl = decl;
38397 allocate_struct_function (decl, false);
38398 init_function_start (decl);
38399 first_function_block_is_cold = false;
38400 /* Make sure unwind info is emitted for the thunk if needed. */
38401 final_start_function (emit_barrier (), asm_out_file, 1);
38402
38403 fputs ("\tblr\n", asm_out_file);
38404
38405 final_end_function ();
38406 init_insn_lengths ();
38407 free_after_compilation (cfun);
38408 set_cfun (NULL);
38409 current_function_decl = NULL;
38410 }
38411
38412 /* Add r30 to hard reg set if the prologue sets it up and it is not
38413 pic_offset_table_rtx. */
38414
38415 static void
38416 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38417 {
38418 if (!TARGET_SINGLE_PIC_BASE
38419 && TARGET_TOC
38420 && TARGET_MINIMAL_TOC
38421 && !constant_pool_empty_p ())
38422 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38423 if (cfun->machine->split_stack_argp_used)
38424 add_to_hard_reg_set (&set->set, Pmode, 12);
38425
38426 /* Make sure the hard reg set doesn't include r2, which was possibly added
38427 via PIC_OFFSET_TABLE_REGNUM. */
38428 if (TARGET_TOC)
38429 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38430 }
38431
38432 \f
38433 /* Helper function for rs6000_split_logical to emit a logical instruction after
38434 spliting the operation to single GPR registers.
38435
38436 DEST is the destination register.
38437 OP1 and OP2 are the input source registers.
38438 CODE is the base operation (AND, IOR, XOR, NOT).
38439 MODE is the machine mode.
38440 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38441 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38442 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38443
38444 static void
38445 rs6000_split_logical_inner (rtx dest,
38446 rtx op1,
38447 rtx op2,
38448 enum rtx_code code,
38449 machine_mode mode,
38450 bool complement_final_p,
38451 bool complement_op1_p,
38452 bool complement_op2_p)
38453 {
38454 rtx bool_rtx;
38455
38456 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38457 if (op2 && CONST_INT_P (op2)
38458 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38459 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38460 {
38461 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38462 HOST_WIDE_INT value = INTVAL (op2) & mask;
38463
38464 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38465 if (code == AND)
38466 {
38467 if (value == 0)
38468 {
38469 emit_insn (gen_rtx_SET (dest, const0_rtx));
38470 return;
38471 }
38472
38473 else if (value == mask)
38474 {
38475 if (!rtx_equal_p (dest, op1))
38476 emit_insn (gen_rtx_SET (dest, op1));
38477 return;
38478 }
38479 }
38480
38481 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38482 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38483 else if (code == IOR || code == XOR)
38484 {
38485 if (value == 0)
38486 {
38487 if (!rtx_equal_p (dest, op1))
38488 emit_insn (gen_rtx_SET (dest, op1));
38489 return;
38490 }
38491 }
38492 }
38493
38494 if (code == AND && mode == SImode
38495 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38496 {
38497 emit_insn (gen_andsi3 (dest, op1, op2));
38498 return;
38499 }
38500
38501 if (complement_op1_p)
38502 op1 = gen_rtx_NOT (mode, op1);
38503
38504 if (complement_op2_p)
38505 op2 = gen_rtx_NOT (mode, op2);
38506
38507 /* For canonical RTL, if only one arm is inverted it is the first. */
38508 if (!complement_op1_p && complement_op2_p)
38509 std::swap (op1, op2);
38510
38511 bool_rtx = ((code == NOT)
38512 ? gen_rtx_NOT (mode, op1)
38513 : gen_rtx_fmt_ee (code, mode, op1, op2));
38514
38515 if (complement_final_p)
38516 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38517
38518 emit_insn (gen_rtx_SET (dest, bool_rtx));
38519 }
38520
38521 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38522 operations are split immediately during RTL generation to allow for more
38523 optimizations of the AND/IOR/XOR.
38524
38525 OPERANDS is an array containing the destination and two input operands.
38526 CODE is the base operation (AND, IOR, XOR, NOT).
38527 MODE is the machine mode.
38528 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38529 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38530 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38531 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38532 formation of the AND instructions. */
38533
38534 static void
38535 rs6000_split_logical_di (rtx operands[3],
38536 enum rtx_code code,
38537 bool complement_final_p,
38538 bool complement_op1_p,
38539 bool complement_op2_p)
38540 {
38541 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38542 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38543 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38544 enum hi_lo { hi = 0, lo = 1 };
38545 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38546 size_t i;
38547
38548 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38549 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38550 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38551 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38552
38553 if (code == NOT)
38554 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38555 else
38556 {
38557 if (!CONST_INT_P (operands[2]))
38558 {
38559 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38560 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38561 }
38562 else
38563 {
38564 HOST_WIDE_INT value = INTVAL (operands[2]);
38565 HOST_WIDE_INT value_hi_lo[2];
38566
38567 gcc_assert (!complement_final_p);
38568 gcc_assert (!complement_op1_p);
38569 gcc_assert (!complement_op2_p);
38570
38571 value_hi_lo[hi] = value >> 32;
38572 value_hi_lo[lo] = value & lower_32bits;
38573
38574 for (i = 0; i < 2; i++)
38575 {
38576 HOST_WIDE_INT sub_value = value_hi_lo[i];
38577
38578 if (sub_value & sign_bit)
38579 sub_value |= upper_32bits;
38580
38581 op2_hi_lo[i] = GEN_INT (sub_value);
38582
38583 /* If this is an AND instruction, check to see if we need to load
38584 the value in a register. */
38585 if (code == AND && sub_value != -1 && sub_value != 0
38586 && !and_operand (op2_hi_lo[i], SImode))
38587 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38588 }
38589 }
38590 }
38591
38592 for (i = 0; i < 2; i++)
38593 {
38594 /* Split large IOR/XOR operations. */
38595 if ((code == IOR || code == XOR)
38596 && CONST_INT_P (op2_hi_lo[i])
38597 && !complement_final_p
38598 && !complement_op1_p
38599 && !complement_op2_p
38600 && !logical_const_operand (op2_hi_lo[i], SImode))
38601 {
38602 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38603 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38604 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38605 rtx tmp = gen_reg_rtx (SImode);
38606
38607 /* Make sure the constant is sign extended. */
38608 if ((hi_16bits & sign_bit) != 0)
38609 hi_16bits |= upper_32bits;
38610
38611 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38612 code, SImode, false, false, false);
38613
38614 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38615 code, SImode, false, false, false);
38616 }
38617 else
38618 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38619 code, SImode, complement_final_p,
38620 complement_op1_p, complement_op2_p);
38621 }
38622
38623 return;
38624 }
38625
38626 /* Split the insns that make up boolean operations operating on multiple GPR
38627 registers. The boolean MD patterns ensure that the inputs either are
38628 exactly the same as the output registers, or there is no overlap.
38629
38630 OPERANDS is an array containing the destination and two input operands.
38631 CODE is the base operation (AND, IOR, XOR, NOT).
38632 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38633 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38634 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38635
38636 void
38637 rs6000_split_logical (rtx operands[3],
38638 enum rtx_code code,
38639 bool complement_final_p,
38640 bool complement_op1_p,
38641 bool complement_op2_p)
38642 {
38643 machine_mode mode = GET_MODE (operands[0]);
38644 machine_mode sub_mode;
38645 rtx op0, op1, op2;
38646 int sub_size, regno0, regno1, nregs, i;
38647
38648 /* If this is DImode, use the specialized version that can run before
38649 register allocation. */
38650 if (mode == DImode && !TARGET_POWERPC64)
38651 {
38652 rs6000_split_logical_di (operands, code, complement_final_p,
38653 complement_op1_p, complement_op2_p);
38654 return;
38655 }
38656
38657 op0 = operands[0];
38658 op1 = operands[1];
38659 op2 = (code == NOT) ? NULL_RTX : operands[2];
38660 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38661 sub_size = GET_MODE_SIZE (sub_mode);
38662 regno0 = REGNO (op0);
38663 regno1 = REGNO (op1);
38664
38665 gcc_assert (reload_completed);
38666 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38667 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38668
38669 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38670 gcc_assert (nregs > 1);
38671
38672 if (op2 && REG_P (op2))
38673 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38674
38675 for (i = 0; i < nregs; i++)
38676 {
38677 int offset = i * sub_size;
38678 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38679 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38680 rtx sub_op2 = ((code == NOT)
38681 ? NULL_RTX
38682 : simplify_subreg (sub_mode, op2, mode, offset));
38683
38684 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38685 complement_final_p, complement_op1_p,
38686 complement_op2_p);
38687 }
38688
38689 return;
38690 }
38691
38692 \f
38693 /* Return true if the peephole2 can combine a load involving a combination of
38694 an addis instruction and a load with an offset that can be fused together on
38695 a power8. */
38696
38697 bool
38698 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38699 rtx addis_value, /* addis value. */
38700 rtx target, /* target register that is loaded. */
38701 rtx mem) /* bottom part of the memory addr. */
38702 {
38703 rtx addr;
38704 rtx base_reg;
38705
38706 /* Validate arguments. */
38707 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38708 return false;
38709
38710 if (!base_reg_operand (target, GET_MODE (target)))
38711 return false;
38712
38713 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38714 return false;
38715
38716 /* Allow sign/zero extension. */
38717 if (GET_CODE (mem) == ZERO_EXTEND
38718 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38719 mem = XEXP (mem, 0);
38720
38721 if (!MEM_P (mem))
38722 return false;
38723
38724 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38725 return false;
38726
38727 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38728 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38729 return false;
38730
38731 /* Validate that the register used to load the high value is either the
38732 register being loaded, or we can safely replace its use.
38733
38734 This function is only called from the peephole2 pass and we assume that
38735 there are 2 instructions in the peephole (addis and load), so we want to
38736 check if the target register was not used in the memory address and the
38737 register to hold the addis result is dead after the peephole. */
38738 if (REGNO (addis_reg) != REGNO (target))
38739 {
38740 if (reg_mentioned_p (target, mem))
38741 return false;
38742
38743 if (!peep2_reg_dead_p (2, addis_reg))
38744 return false;
38745
38746 /* If the target register being loaded is the stack pointer, we must
38747 avoid loading any other value into it, even temporarily. */
38748 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38749 return false;
38750 }
38751
38752 base_reg = XEXP (addr, 0);
38753 return REGNO (addis_reg) == REGNO (base_reg);
38754 }
38755
38756 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38757 sequence. We adjust the addis register to use the target register. If the
38758 load sign extends, we adjust the code to do the zero extending load, and an
38759 explicit sign extension later since the fusion only covers zero extending
38760 loads.
38761
38762 The operands are:
38763 operands[0] register set with addis (to be replaced with target)
38764 operands[1] value set via addis
38765 operands[2] target register being loaded
38766 operands[3] D-form memory reference using operands[0]. */
38767
38768 void
38769 expand_fusion_gpr_load (rtx *operands)
38770 {
38771 rtx addis_value = operands[1];
38772 rtx target = operands[2];
38773 rtx orig_mem = operands[3];
38774 rtx new_addr, new_mem, orig_addr, offset;
38775 enum rtx_code plus_or_lo_sum;
38776 machine_mode target_mode = GET_MODE (target);
38777 machine_mode extend_mode = target_mode;
38778 machine_mode ptr_mode = Pmode;
38779 enum rtx_code extend = UNKNOWN;
38780
38781 if (GET_CODE (orig_mem) == ZERO_EXTEND
38782 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38783 {
38784 extend = GET_CODE (orig_mem);
38785 orig_mem = XEXP (orig_mem, 0);
38786 target_mode = GET_MODE (orig_mem);
38787 }
38788
38789 gcc_assert (MEM_P (orig_mem));
38790
38791 orig_addr = XEXP (orig_mem, 0);
38792 plus_or_lo_sum = GET_CODE (orig_addr);
38793 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38794
38795 offset = XEXP (orig_addr, 1);
38796 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38797 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38798
38799 if (extend != UNKNOWN)
38800 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38801
38802 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38803 UNSPEC_FUSION_GPR);
38804 emit_insn (gen_rtx_SET (target, new_mem));
38805
38806 if (extend == SIGN_EXTEND)
38807 {
38808 int sub_off = ((BYTES_BIG_ENDIAN)
38809 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38810 : 0);
38811 rtx sign_reg
38812 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38813
38814 emit_insn (gen_rtx_SET (target,
38815 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38816 }
38817
38818 return;
38819 }
38820
38821 /* Emit the addis instruction that will be part of a fused instruction
38822 sequence. */
38823
38824 void
38825 emit_fusion_addis (rtx target, rtx addis_value)
38826 {
38827 rtx fuse_ops[10];
38828 const char *addis_str = NULL;
38829
38830 /* Emit the addis instruction. */
38831 fuse_ops[0] = target;
38832 if (satisfies_constraint_L (addis_value))
38833 {
38834 fuse_ops[1] = addis_value;
38835 addis_str = "lis %0,%v1";
38836 }
38837
38838 else if (GET_CODE (addis_value) == PLUS)
38839 {
38840 rtx op0 = XEXP (addis_value, 0);
38841 rtx op1 = XEXP (addis_value, 1);
38842
38843 if (REG_P (op0) && CONST_INT_P (op1)
38844 && satisfies_constraint_L (op1))
38845 {
38846 fuse_ops[1] = op0;
38847 fuse_ops[2] = op1;
38848 addis_str = "addis %0,%1,%v2";
38849 }
38850 }
38851
38852 else if (GET_CODE (addis_value) == HIGH)
38853 {
38854 rtx value = XEXP (addis_value, 0);
38855 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38856 {
38857 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38858 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38859 if (TARGET_ELF)
38860 addis_str = "addis %0,%2,%1@toc@ha";
38861
38862 else if (TARGET_XCOFF)
38863 addis_str = "addis %0,%1@u(%2)";
38864
38865 else
38866 gcc_unreachable ();
38867 }
38868
38869 else if (GET_CODE (value) == PLUS)
38870 {
38871 rtx op0 = XEXP (value, 0);
38872 rtx op1 = XEXP (value, 1);
38873
38874 if (GET_CODE (op0) == UNSPEC
38875 && XINT (op0, 1) == UNSPEC_TOCREL
38876 && CONST_INT_P (op1))
38877 {
38878 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38879 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38880 fuse_ops[3] = op1;
38881 if (TARGET_ELF)
38882 addis_str = "addis %0,%2,%1+%3@toc@ha";
38883
38884 else if (TARGET_XCOFF)
38885 addis_str = "addis %0,%1+%3@u(%2)";
38886
38887 else
38888 gcc_unreachable ();
38889 }
38890 }
38891
38892 else if (satisfies_constraint_L (value))
38893 {
38894 fuse_ops[1] = value;
38895 addis_str = "lis %0,%v1";
38896 }
38897
38898 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38899 {
38900 fuse_ops[1] = value;
38901 addis_str = "lis %0,%1@ha";
38902 }
38903 }
38904
38905 if (!addis_str)
38906 fatal_insn ("Could not generate addis value for fusion", addis_value);
38907
38908 output_asm_insn (addis_str, fuse_ops);
38909 }
38910
38911 /* Emit a D-form load or store instruction that is the second instruction
38912 of a fusion sequence. */
38913
38914 static void
38915 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
38916 {
38917 rtx fuse_ops[10];
38918 char insn_template[80];
38919
38920 fuse_ops[0] = load_reg;
38921 fuse_ops[1] = addis_reg;
38922
38923 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38924 {
38925 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38926 fuse_ops[2] = offset;
38927 output_asm_insn (insn_template, fuse_ops);
38928 }
38929
38930 else if (GET_CODE (offset) == UNSPEC
38931 && XINT (offset, 1) == UNSPEC_TOCREL)
38932 {
38933 if (TARGET_ELF)
38934 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38935
38936 else if (TARGET_XCOFF)
38937 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38938
38939 else
38940 gcc_unreachable ();
38941
38942 fuse_ops[2] = XVECEXP (offset, 0, 0);
38943 output_asm_insn (insn_template, fuse_ops);
38944 }
38945
38946 else if (GET_CODE (offset) == PLUS
38947 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38948 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38949 && CONST_INT_P (XEXP (offset, 1)))
38950 {
38951 rtx tocrel_unspec = XEXP (offset, 0);
38952 if (TARGET_ELF)
38953 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38954
38955 else if (TARGET_XCOFF)
38956 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38957
38958 else
38959 gcc_unreachable ();
38960
38961 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38962 fuse_ops[3] = XEXP (offset, 1);
38963 output_asm_insn (insn_template, fuse_ops);
38964 }
38965
38966 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38967 {
38968 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38969
38970 fuse_ops[2] = offset;
38971 output_asm_insn (insn_template, fuse_ops);
38972 }
38973
38974 else
38975 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38976
38977 return;
38978 }
38979
38980 /* Given an address, convert it into the addis and load offset parts. Addresses
38981 created during the peephole2 process look like:
38982 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38983 (unspec [(...)] UNSPEC_TOCREL)) */
38984
38985 static void
38986 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38987 {
38988 rtx hi, lo;
38989
38990 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38991 {
38992 hi = XEXP (addr, 0);
38993 lo = XEXP (addr, 1);
38994 }
38995 else
38996 gcc_unreachable ();
38997
38998 *p_hi = hi;
38999 *p_lo = lo;
39000 }
39001
39002 /* Return a string to fuse an addis instruction with a gpr load to the same
39003 register that we loaded up the addis instruction. The address that is used
39004 is the logical address that was formed during peephole2:
39005 (lo_sum (high) (low-part))
39006
39007 The code is complicated, so we call output_asm_insn directly, and just
39008 return "". */
39009
39010 const char *
39011 emit_fusion_gpr_load (rtx target, rtx mem)
39012 {
39013 rtx addis_value;
39014 rtx addr;
39015 rtx load_offset;
39016 const char *load_str = NULL;
39017 machine_mode mode;
39018
39019 if (GET_CODE (mem) == ZERO_EXTEND)
39020 mem = XEXP (mem, 0);
39021
39022 gcc_assert (REG_P (target) && MEM_P (mem));
39023
39024 addr = XEXP (mem, 0);
39025 fusion_split_address (addr, &addis_value, &load_offset);
39026
39027 /* Now emit the load instruction to the same register. */
39028 mode = GET_MODE (mem);
39029 switch (mode)
39030 {
39031 case E_QImode:
39032 load_str = "lbz";
39033 break;
39034
39035 case E_HImode:
39036 load_str = "lhz";
39037 break;
39038
39039 case E_SImode:
39040 case E_SFmode:
39041 load_str = "lwz";
39042 break;
39043
39044 case E_DImode:
39045 case E_DFmode:
39046 gcc_assert (TARGET_POWERPC64);
39047 load_str = "ld";
39048 break;
39049
39050 default:
39051 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
39052 }
39053
39054 /* Emit the addis instruction. */
39055 emit_fusion_addis (target, addis_value);
39056
39057 /* Emit the D-form load instruction. */
39058 emit_fusion_load (target, target, load_offset, load_str);
39059
39060 return "";
39061 }
39062 \f
39063
39064 #ifdef RS6000_GLIBC_ATOMIC_FENV
39065 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
39066 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
39067 #endif
39068
39069 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
39070
39071 static void
39072 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
39073 {
39074 if (!TARGET_HARD_FLOAT)
39075 {
39076 #ifdef RS6000_GLIBC_ATOMIC_FENV
39077 if (atomic_hold_decl == NULL_TREE)
39078 {
39079 atomic_hold_decl
39080 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39081 get_identifier ("__atomic_feholdexcept"),
39082 build_function_type_list (void_type_node,
39083 double_ptr_type_node,
39084 NULL_TREE));
39085 TREE_PUBLIC (atomic_hold_decl) = 1;
39086 DECL_EXTERNAL (atomic_hold_decl) = 1;
39087 }
39088
39089 if (atomic_clear_decl == NULL_TREE)
39090 {
39091 atomic_clear_decl
39092 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39093 get_identifier ("__atomic_feclearexcept"),
39094 build_function_type_list (void_type_node,
39095 NULL_TREE));
39096 TREE_PUBLIC (atomic_clear_decl) = 1;
39097 DECL_EXTERNAL (atomic_clear_decl) = 1;
39098 }
39099
39100 tree const_double = build_qualified_type (double_type_node,
39101 TYPE_QUAL_CONST);
39102 tree const_double_ptr = build_pointer_type (const_double);
39103 if (atomic_update_decl == NULL_TREE)
39104 {
39105 atomic_update_decl
39106 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39107 get_identifier ("__atomic_feupdateenv"),
39108 build_function_type_list (void_type_node,
39109 const_double_ptr,
39110 NULL_TREE));
39111 TREE_PUBLIC (atomic_update_decl) = 1;
39112 DECL_EXTERNAL (atomic_update_decl) = 1;
39113 }
39114
39115 tree fenv_var = create_tmp_var_raw (double_type_node);
39116 TREE_ADDRESSABLE (fenv_var) = 1;
39117 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
39118
39119 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
39120 *clear = build_call_expr (atomic_clear_decl, 0);
39121 *update = build_call_expr (atomic_update_decl, 1,
39122 fold_convert (const_double_ptr, fenv_addr));
39123 #endif
39124 return;
39125 }
39126
39127 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
39128 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
39129 tree call_mffs = build_call_expr (mffs, 0);
39130
39131 /* Generates the equivalent of feholdexcept (&fenv_var)
39132
39133 *fenv_var = __builtin_mffs ();
39134 double fenv_hold;
39135 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
39136 __builtin_mtfsf (0xff, fenv_hold); */
39137
39138 /* Mask to clear everything except for the rounding modes and non-IEEE
39139 arithmetic flag. */
39140 const unsigned HOST_WIDE_INT hold_exception_mask =
39141 HOST_WIDE_INT_C (0xffffffff00000007);
39142
39143 tree fenv_var = create_tmp_var_raw (double_type_node);
39144
39145 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
39146
39147 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
39148 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39149 build_int_cst (uint64_type_node,
39150 hold_exception_mask));
39151
39152 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39153 fenv_llu_and);
39154
39155 tree hold_mtfsf = build_call_expr (mtfsf, 2,
39156 build_int_cst (unsigned_type_node, 0xff),
39157 fenv_hold_mtfsf);
39158
39159 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
39160
39161 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
39162
39163 double fenv_clear = __builtin_mffs ();
39164 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
39165 __builtin_mtfsf (0xff, fenv_clear); */
39166
39167 /* Mask to clear everything except for the rounding modes and non-IEEE
39168 arithmetic flag. */
39169 const unsigned HOST_WIDE_INT clear_exception_mask =
39170 HOST_WIDE_INT_C (0xffffffff00000000);
39171
39172 tree fenv_clear = create_tmp_var_raw (double_type_node);
39173
39174 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
39175
39176 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
39177 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
39178 fenv_clean_llu,
39179 build_int_cst (uint64_type_node,
39180 clear_exception_mask));
39181
39182 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39183 fenv_clear_llu_and);
39184
39185 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39186 build_int_cst (unsigned_type_node, 0xff),
39187 fenv_clear_mtfsf);
39188
39189 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39190
39191 /* Generates the equivalent of feupdateenv (&fenv_var)
39192
39193 double old_fenv = __builtin_mffs ();
39194 double fenv_update;
39195 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39196 (*(uint64_t*)fenv_var 0x1ff80fff);
39197 __builtin_mtfsf (0xff, fenv_update); */
39198
39199 const unsigned HOST_WIDE_INT update_exception_mask =
39200 HOST_WIDE_INT_C (0xffffffff1fffff00);
39201 const unsigned HOST_WIDE_INT new_exception_mask =
39202 HOST_WIDE_INT_C (0x1ff80fff);
39203
39204 tree old_fenv = create_tmp_var_raw (double_type_node);
39205 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39206
39207 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39208 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39209 build_int_cst (uint64_type_node,
39210 update_exception_mask));
39211
39212 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39213 build_int_cst (uint64_type_node,
39214 new_exception_mask));
39215
39216 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39217 old_llu_and, new_llu_and);
39218
39219 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39220 new_llu_mask);
39221
39222 tree update_mtfsf = build_call_expr (mtfsf, 2,
39223 build_int_cst (unsigned_type_node, 0xff),
39224 fenv_update_mtfsf);
39225
39226 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39227 }
39228
39229 void
39230 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
39231 {
39232 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39233
39234 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39235 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39236
39237 /* The destination of the vmrgew instruction layout is:
39238 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39239 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39240 vmrgew instruction will be correct. */
39241 if (BYTES_BIG_ENDIAN)
39242 {
39243 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
39244 GEN_INT (0)));
39245 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
39246 GEN_INT (3)));
39247 }
39248 else
39249 {
39250 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
39251 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
39252 }
39253
39254 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39255 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39256
39257 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
39258 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
39259
39260 if (BYTES_BIG_ENDIAN)
39261 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39262 else
39263 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39264 }
39265
39266 void
39267 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39268 {
39269 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39270
39271 rtx_tmp0 = gen_reg_rtx (V2DImode);
39272 rtx_tmp1 = gen_reg_rtx (V2DImode);
39273
39274 /* The destination of the vmrgew instruction layout is:
39275 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39276 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39277 vmrgew instruction will be correct. */
39278 if (BYTES_BIG_ENDIAN)
39279 {
39280 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39281 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39282 }
39283 else
39284 {
39285 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39286 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39287 }
39288
39289 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39290 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39291
39292 if (signed_convert)
39293 {
39294 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39295 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39296 }
39297 else
39298 {
39299 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39300 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39301 }
39302
39303 if (BYTES_BIG_ENDIAN)
39304 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39305 else
39306 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39307 }
39308
39309 void
39310 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39311 rtx src2)
39312 {
39313 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39314
39315 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39316 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39317
39318 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39319 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39320
39321 rtx_tmp2 = gen_reg_rtx (V4SImode);
39322 rtx_tmp3 = gen_reg_rtx (V4SImode);
39323
39324 if (signed_convert)
39325 {
39326 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39327 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39328 }
39329 else
39330 {
39331 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39332 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39333 }
39334
39335 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39336 }
39337
39338 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39339
39340 static bool
39341 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39342 optimization_type opt_type)
39343 {
39344 switch (op)
39345 {
39346 case rsqrt_optab:
39347 return (opt_type == OPTIMIZE_FOR_SPEED
39348 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39349
39350 default:
39351 return true;
39352 }
39353 }
39354
39355 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39356
39357 static HOST_WIDE_INT
39358 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39359 {
39360 if (TREE_CODE (exp) == STRING_CST
39361 && (STRICT_ALIGNMENT || !optimize_size))
39362 return MAX (align, BITS_PER_WORD);
39363 return align;
39364 }
39365
39366 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39367
39368 static HOST_WIDE_INT
39369 rs6000_starting_frame_offset (void)
39370 {
39371 if (FRAME_GROWS_DOWNWARD)
39372 return 0;
39373 return RS6000_STARTING_FRAME_OFFSET;
39374 }
39375 \f
39376
39377 /* Create an alias for a mangled name where we have changed the mangling (in
39378 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
39379 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
39380
39381 #if TARGET_ELF && RS6000_WEAK
39382 static void
39383 rs6000_globalize_decl_name (FILE * stream, tree decl)
39384 {
39385 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
39386
39387 targetm.asm_out.globalize_label (stream, name);
39388
39389 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
39390 {
39391 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
39392 const char *old_name;
39393
39394 ieee128_mangling_gcc_8_1 = true;
39395 lang_hooks.set_decl_assembler_name (decl);
39396 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
39397 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
39398 ieee128_mangling_gcc_8_1 = false;
39399
39400 if (strcmp (name, old_name) != 0)
39401 {
39402 fprintf (stream, "\t.weak %s\n", old_name);
39403 fprintf (stream, "\t.set %s,%s\n", old_name, name);
39404 }
39405 }
39406 }
39407 #endif
39408
39409 \f
39410 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
39411 function names from <foo>l to <foo>f128 if the default long double type is
39412 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
39413 include file switches the names on systems that support long double as IEEE
39414 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
39415 In the future, glibc will export names like __ieee128_sinf128 and we can
39416 switch to using those instead of using sinf128, which pollutes the user's
39417 namespace.
39418
39419 This will switch the names for Fortran math functions as well (which doesn't
39420 use math.h). However, Fortran needs other changes to the compiler and
39421 library before you can switch the real*16 type at compile time.
39422
39423 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
39424 only do this if the default is that long double is IBM extended double, and
39425 the user asked for IEEE 128-bit. */
39426
39427 static tree
39428 rs6000_mangle_decl_assembler_name (tree decl, tree id)
39429 {
39430 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
39431 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
39432 {
39433 size_t len = IDENTIFIER_LENGTH (id);
39434 const char *name = IDENTIFIER_POINTER (id);
39435
39436 if (name[len - 1] == 'l')
39437 {
39438 bool uses_ieee128_p = false;
39439 tree type = TREE_TYPE (decl);
39440 machine_mode ret_mode = TYPE_MODE (type);
39441
39442 /* See if the function returns a IEEE 128-bit floating point type or
39443 complex type. */
39444 if (ret_mode == TFmode || ret_mode == TCmode)
39445 uses_ieee128_p = true;
39446 else
39447 {
39448 function_args_iterator args_iter;
39449 tree arg;
39450
39451 /* See if the function passes a IEEE 128-bit floating point type
39452 or complex type. */
39453 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
39454 {
39455 machine_mode arg_mode = TYPE_MODE (arg);
39456 if (arg_mode == TFmode || arg_mode == TCmode)
39457 {
39458 uses_ieee128_p = true;
39459 break;
39460 }
39461 }
39462 }
39463
39464 /* If we passed or returned an IEEE 128-bit floating point type,
39465 change the name. */
39466 if (uses_ieee128_p)
39467 {
39468 char *name2 = (char *) alloca (len + 4);
39469 memcpy (name2, name, len - 1);
39470 strcpy (name2 + len - 1, "f128");
39471 id = get_identifier (name2);
39472 }
39473 }
39474 }
39475
39476 return id;
39477 }
39478
39479 \f
39480 struct gcc_target targetm = TARGET_INITIALIZER;
39481
39482 #include "gt-rs6000.h"