[RS6000] Emit inline PLT when -mno-tls-markers
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2019 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84 #include "tree-vrp.h"
85 #include "tree-ssanames.h"
86
87 /* This file should be included last. */
88 #include "target-def.h"
89
90 #ifndef TARGET_NO_PROTOTYPE
91 #define TARGET_NO_PROTOTYPE 0
92 #endif
93
94 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
95 systems will also set long double to be IEEE 128-bit. AIX and Darwin
96 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
97 those systems will not pick up this default. This needs to be after all
98 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
99 properly defined. */
100 #ifndef TARGET_IEEEQUAD_DEFAULT
101 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
102 #define TARGET_IEEEQUAD_DEFAULT 1
103 #else
104 #define TARGET_IEEEQUAD_DEFAULT 0
105 #endif
106 #endif
107
108 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
109
110 /* Structure used to define the rs6000 stack */
111 typedef struct rs6000_stack {
112 int reload_completed; /* stack info won't change from here on */
113 int first_gp_reg_save; /* first callee saved GP register used */
114 int first_fp_reg_save; /* first callee saved FP register used */
115 int first_altivec_reg_save; /* first callee saved AltiVec register used */
116 int lr_save_p; /* true if the link reg needs to be saved */
117 int cr_save_p; /* true if the CR reg needs to be saved */
118 unsigned int vrsave_mask; /* mask of vec registers to save */
119 int push_p; /* true if we need to allocate stack space */
120 int calls_p; /* true if the function makes any calls */
121 int world_save_p; /* true if we're saving *everything*:
122 r13-r31, cr, f14-f31, vrsave, v20-v31 */
123 enum rs6000_abi abi; /* which ABI to use */
124 int gp_save_offset; /* offset to save GP regs from initial SP */
125 int fp_save_offset; /* offset to save FP regs from initial SP */
126 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
127 int lr_save_offset; /* offset to save LR from initial SP */
128 int cr_save_offset; /* offset to save CR from initial SP */
129 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
130 int varargs_save_offset; /* offset to save the varargs registers */
131 int ehrd_offset; /* offset to EH return data */
132 int ehcr_offset; /* offset to EH CR field data */
133 int reg_size; /* register size (4 or 8) */
134 HOST_WIDE_INT vars_size; /* variable save area size */
135 int parm_size; /* outgoing parameter size */
136 int save_size; /* save area size */
137 int fixed_size; /* fixed size of stack frame */
138 int gp_size; /* size of saved GP registers */
139 int fp_size; /* size of saved FP registers */
140 int altivec_size; /* size of saved AltiVec registers */
141 int cr_size; /* size to hold CR if not in fixed area */
142 int vrsave_size; /* size to hold VRSAVE */
143 int altivec_padding_size; /* size of altivec alignment padding */
144 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
145 int savres_strategy;
146 } rs6000_stack_t;
147
148 /* A C structure for machine-specific, per-function data.
149 This is added to the cfun structure. */
150 typedef struct GTY(()) machine_function
151 {
152 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
153 int ra_needs_full_frame;
154 /* Flags if __builtin_return_address (0) was used. */
155 int ra_need_lr;
156 /* Cache lr_save_p after expansion of builtin_eh_return. */
157 int lr_save_state;
158 /* Whether we need to save the TOC to the reserved stack location in the
159 function prologue. */
160 bool save_toc_in_prologue;
161 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
162 varargs save area. */
163 HOST_WIDE_INT varargs_save_offset;
164 /* Alternative internal arg pointer for -fsplit-stack. */
165 rtx split_stack_arg_pointer;
166 bool split_stack_argp_used;
167 /* Flag if r2 setup is needed with ELFv2 ABI. */
168 bool r2_setup_needed;
169 /* The number of components we use for separate shrink-wrapping. */
170 int n_components;
171 /* The components already handled by separate shrink-wrapping, which should
172 not be considered by the prologue and epilogue. */
173 bool gpr_is_wrapped_separately[32];
174 bool fpr_is_wrapped_separately[32];
175 bool lr_is_wrapped_separately;
176 bool toc_is_wrapped_separately;
177 } machine_function;
178
179 /* Support targetm.vectorize.builtin_mask_for_load. */
180 static GTY(()) tree altivec_builtin_mask_for_load;
181
182 /* Set to nonzero once AIX common-mode calls have been defined. */
183 static GTY(()) int common_mode_defined;
184
185 /* Label number of label created for -mrelocatable, to call to so we can
186 get the address of the GOT section */
187 static int rs6000_pic_labelno;
188
189 #ifdef USING_ELFOS_H
190 /* Counter for labels which are to be placed in .fixup. */
191 int fixuplabelno = 0;
192 #endif
193
194 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
195 int dot_symbols;
196
197 /* Specify the machine mode that pointers have. After generation of rtl, the
198 compiler makes no further distinction between pointers and any other objects
199 of this machine mode. */
200 scalar_int_mode rs6000_pmode;
201
202 #if TARGET_ELF
203 /* Note whether IEEE 128-bit floating point was passed or returned, either as
204 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
205 floating point. We changed the default C++ mangling for these types and we
206 may want to generate a weak alias of the old mangling (U10__float128) to the
207 new mangling (u9__ieee128). */
208 static bool rs6000_passes_ieee128;
209 #endif
210
211 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
212 name used in current releases (i.e. u9__ieee128). */
213 static bool ieee128_mangling_gcc_8_1;
214
215 /* Width in bits of a pointer. */
216 unsigned rs6000_pointer_size;
217
218 #ifdef HAVE_AS_GNU_ATTRIBUTE
219 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
220 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
221 # endif
222 /* Flag whether floating point values have been passed/returned.
223 Note that this doesn't say whether fprs are used, since the
224 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
225 should be set for soft-float values passed in gprs and ieee128
226 values passed in vsx registers. */
227 static bool rs6000_passes_float;
228 static bool rs6000_passes_long_double;
229 /* Flag whether vector values have been passed/returned. */
230 static bool rs6000_passes_vector;
231 /* Flag whether small (<= 8 byte) structures have been returned. */
232 static bool rs6000_returns_struct;
233 #endif
234
235 /* Value is TRUE if register/mode pair is acceptable. */
236 static bool rs6000_hard_regno_mode_ok_p
237 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
238
239 /* Maximum number of registers needed for a given register class and mode. */
240 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
241
242 /* How many registers are needed for a given register and mode. */
243 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
244
245 /* Map register number to register class. */
246 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
247
248 static int dbg_cost_ctrl;
249
250 /* Built in types. */
251 tree rs6000_builtin_types[RS6000_BTI_MAX];
252 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
253
254 /* Flag to say the TOC is initialized */
255 int toc_initialized, need_toc_init;
256 char toc_label_name[10];
257
258 /* Cached value of rs6000_variable_issue. This is cached in
259 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
260 static short cached_can_issue_more;
261
262 static GTY(()) section *read_only_data_section;
263 static GTY(()) section *private_data_section;
264 static GTY(()) section *tls_data_section;
265 static GTY(()) section *tls_private_data_section;
266 static GTY(()) section *read_only_private_data_section;
267 static GTY(()) section *sdata2_section;
268 static GTY(()) section *toc_section;
269
270 struct builtin_description
271 {
272 const HOST_WIDE_INT mask;
273 const enum insn_code icode;
274 const char *const name;
275 const enum rs6000_builtins code;
276 };
277
278 /* Describe the vector unit used for modes. */
279 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
280 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
281
282 /* Register classes for various constraints that are based on the target
283 switches. */
284 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
285
286 /* Describe the alignment of a vector. */
287 int rs6000_vector_align[NUM_MACHINE_MODES];
288
289 /* Map selected modes to types for builtins. */
290 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
291
292 /* What modes to automatically generate reciprocal divide estimate (fre) and
293 reciprocal sqrt (frsqrte) for. */
294 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
295
296 /* Masks to determine which reciprocal esitmate instructions to generate
297 automatically. */
298 enum rs6000_recip_mask {
299 RECIP_SF_DIV = 0x001, /* Use divide estimate */
300 RECIP_DF_DIV = 0x002,
301 RECIP_V4SF_DIV = 0x004,
302 RECIP_V2DF_DIV = 0x008,
303
304 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
305 RECIP_DF_RSQRT = 0x020,
306 RECIP_V4SF_RSQRT = 0x040,
307 RECIP_V2DF_RSQRT = 0x080,
308
309 /* Various combination of flags for -mrecip=xxx. */
310 RECIP_NONE = 0,
311 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
312 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
313 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
314
315 RECIP_HIGH_PRECISION = RECIP_ALL,
316
317 /* On low precision machines like the power5, don't enable double precision
318 reciprocal square root estimate, since it isn't accurate enough. */
319 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
320 };
321
322 /* -mrecip options. */
323 static struct
324 {
325 const char *string; /* option name */
326 unsigned int mask; /* mask bits to set */
327 } recip_options[] = {
328 { "all", RECIP_ALL },
329 { "none", RECIP_NONE },
330 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
331 | RECIP_V2DF_DIV) },
332 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
333 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
334 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
335 | RECIP_V2DF_RSQRT) },
336 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
337 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
338 };
339
340 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
341 static const struct
342 {
343 const char *cpu;
344 unsigned int cpuid;
345 } cpu_is_info[] = {
346 { "power9", PPC_PLATFORM_POWER9 },
347 { "power8", PPC_PLATFORM_POWER8 },
348 { "power7", PPC_PLATFORM_POWER7 },
349 { "power6x", PPC_PLATFORM_POWER6X },
350 { "power6", PPC_PLATFORM_POWER6 },
351 { "power5+", PPC_PLATFORM_POWER5_PLUS },
352 { "power5", PPC_PLATFORM_POWER5 },
353 { "ppc970", PPC_PLATFORM_PPC970 },
354 { "power4", PPC_PLATFORM_POWER4 },
355 { "ppca2", PPC_PLATFORM_PPCA2 },
356 { "ppc476", PPC_PLATFORM_PPC476 },
357 { "ppc464", PPC_PLATFORM_PPC464 },
358 { "ppc440", PPC_PLATFORM_PPC440 },
359 { "ppc405", PPC_PLATFORM_PPC405 },
360 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
361 };
362
363 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
364 static const struct
365 {
366 const char *hwcap;
367 int mask;
368 unsigned int id;
369 } cpu_supports_info[] = {
370 /* AT_HWCAP masks. */
371 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
372 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
373 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
374 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
375 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
376 { "booke", PPC_FEATURE_BOOKE, 0 },
377 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
378 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
379 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
380 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
381 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
382 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
383 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
384 { "notb", PPC_FEATURE_NO_TB, 0 },
385 { "pa6t", PPC_FEATURE_PA6T, 0 },
386 { "power4", PPC_FEATURE_POWER4, 0 },
387 { "power5", PPC_FEATURE_POWER5, 0 },
388 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
389 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
390 { "ppc32", PPC_FEATURE_32, 0 },
391 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
392 { "ppc64", PPC_FEATURE_64, 0 },
393 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
394 { "smt", PPC_FEATURE_SMT, 0 },
395 { "spe", PPC_FEATURE_HAS_SPE, 0 },
396 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
397 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
398 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
399
400 /* AT_HWCAP2 masks. */
401 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
402 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
403 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
404 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
405 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
406 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
407 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
408 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
409 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
410 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
411 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
412 { "darn", PPC_FEATURE2_DARN, 1 },
413 { "scv", PPC_FEATURE2_SCV, 1 }
414 };
415
416 /* On PowerPC, we have a limited number of target clones that we care about
417 which means we can use an array to hold the options, rather than having more
418 elaborate data structures to identify each possible variation. Order the
419 clones from the default to the highest ISA. */
420 enum {
421 CLONE_DEFAULT = 0, /* default clone. */
422 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
423 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
424 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
425 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
426 CLONE_MAX
427 };
428
429 /* Map compiler ISA bits into HWCAP names. */
430 struct clone_map {
431 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
432 const char *name; /* name to use in __builtin_cpu_supports. */
433 };
434
435 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
436 { 0, "" }, /* Default options. */
437 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
438 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
439 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
440 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
441 };
442
443
444 /* Newer LIBCs explicitly export this symbol to declare that they provide
445 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
446 reference to this symbol whenever we expand a CPU builtin, so that
447 we never link against an old LIBC. */
448 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
449
450 /* True if we have expanded a CPU builtin. */
451 bool cpu_builtin_p;
452
453 /* Pointer to function (in rs6000-c.c) that can define or undefine target
454 macros that have changed. Languages that don't support the preprocessor
455 don't link in rs6000-c.c, so we can't call it directly. */
456 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
457
458 /* Simplfy register classes into simpler classifications. We assume
459 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
460 check for standard register classes (gpr/floating/altivec/vsx) and
461 floating/vector classes (float/altivec/vsx). */
462
463 enum rs6000_reg_type {
464 NO_REG_TYPE,
465 PSEUDO_REG_TYPE,
466 GPR_REG_TYPE,
467 VSX_REG_TYPE,
468 ALTIVEC_REG_TYPE,
469 FPR_REG_TYPE,
470 SPR_REG_TYPE,
471 CR_REG_TYPE
472 };
473
474 /* Map register class to register type. */
475 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
476
477 /* First/last register type for the 'normal' register types (i.e. general
478 purpose, floating point, altivec, and VSX registers). */
479 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
480
481 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
482
483
484 /* Register classes we care about in secondary reload or go if legitimate
485 address. We only need to worry about GPR, FPR, and Altivec registers here,
486 along an ANY field that is the OR of the 3 register classes. */
487
488 enum rs6000_reload_reg_type {
489 RELOAD_REG_GPR, /* General purpose registers. */
490 RELOAD_REG_FPR, /* Traditional floating point regs. */
491 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
492 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
493 N_RELOAD_REG
494 };
495
496 /* For setting up register classes, loop through the 3 register classes mapping
497 into real registers, and skip the ANY class, which is just an OR of the
498 bits. */
499 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
500 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
501
502 /* Map reload register type to a register in the register class. */
503 struct reload_reg_map_type {
504 const char *name; /* Register class name. */
505 int reg; /* Register in the register class. */
506 };
507
508 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
509 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
510 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
511 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
512 { "Any", -1 }, /* RELOAD_REG_ANY. */
513 };
514
515 /* Mask bits for each register class, indexed per mode. Historically the
516 compiler has been more restrictive which types can do PRE_MODIFY instead of
517 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
518 typedef unsigned char addr_mask_type;
519
520 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
521 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
522 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
523 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
524 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
525 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
526 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
527 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
528
529 /* Register type masks based on the type, of valid addressing modes. */
530 struct rs6000_reg_addr {
531 enum insn_code reload_load; /* INSN to reload for loading. */
532 enum insn_code reload_store; /* INSN to reload for storing. */
533 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
534 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
535 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
536 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
537 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
538 };
539
540 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
541
542 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
543 static inline bool
544 mode_supports_pre_incdec_p (machine_mode mode)
545 {
546 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
547 != 0);
548 }
549
550 /* Helper function to say whether a mode supports PRE_MODIFY. */
551 static inline bool
552 mode_supports_pre_modify_p (machine_mode mode)
553 {
554 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
555 != 0);
556 }
557
558 /* Return true if we have D-form addressing in altivec registers. */
559 static inline bool
560 mode_supports_vmx_dform (machine_mode mode)
561 {
562 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
563 }
564
565 /* Return true if we have D-form addressing in VSX registers. This addressing
566 is more limited than normal d-form addressing in that the offset must be
567 aligned on a 16-byte boundary. */
568 static inline bool
569 mode_supports_dq_form (machine_mode mode)
570 {
571 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
572 != 0);
573 }
574
575 /* Given that there exists at least one variable that is set (produced)
576 by OUT_INSN and read (consumed) by IN_INSN, return true iff
577 IN_INSN represents one or more memory store operations and none of
578 the variables set by OUT_INSN is used by IN_INSN as the address of a
579 store operation. If either IN_INSN or OUT_INSN does not represent
580 a "single" RTL SET expression (as loosely defined by the
581 implementation of the single_set function) or a PARALLEL with only
582 SETs, CLOBBERs, and USEs inside, this function returns false.
583
584 This rs6000-specific version of store_data_bypass_p checks for
585 certain conditions that result in assertion failures (and internal
586 compiler errors) in the generic store_data_bypass_p function and
587 returns false rather than calling store_data_bypass_p if one of the
588 problematic conditions is detected. */
589
590 int
591 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
592 {
593 rtx out_set, in_set;
594 rtx out_pat, in_pat;
595 rtx out_exp, in_exp;
596 int i, j;
597
598 in_set = single_set (in_insn);
599 if (in_set)
600 {
601 if (MEM_P (SET_DEST (in_set)))
602 {
603 out_set = single_set (out_insn);
604 if (!out_set)
605 {
606 out_pat = PATTERN (out_insn);
607 if (GET_CODE (out_pat) == PARALLEL)
608 {
609 for (i = 0; i < XVECLEN (out_pat, 0); i++)
610 {
611 out_exp = XVECEXP (out_pat, 0, i);
612 if ((GET_CODE (out_exp) == CLOBBER)
613 || (GET_CODE (out_exp) == USE))
614 continue;
615 else if (GET_CODE (out_exp) != SET)
616 return false;
617 }
618 }
619 }
620 }
621 }
622 else
623 {
624 in_pat = PATTERN (in_insn);
625 if (GET_CODE (in_pat) != PARALLEL)
626 return false;
627
628 for (i = 0; i < XVECLEN (in_pat, 0); i++)
629 {
630 in_exp = XVECEXP (in_pat, 0, i);
631 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
632 continue;
633 else if (GET_CODE (in_exp) != SET)
634 return false;
635
636 if (MEM_P (SET_DEST (in_exp)))
637 {
638 out_set = single_set (out_insn);
639 if (!out_set)
640 {
641 out_pat = PATTERN (out_insn);
642 if (GET_CODE (out_pat) != PARALLEL)
643 return false;
644 for (j = 0; j < XVECLEN (out_pat, 0); j++)
645 {
646 out_exp = XVECEXP (out_pat, 0, j);
647 if ((GET_CODE (out_exp) == CLOBBER)
648 || (GET_CODE (out_exp) == USE))
649 continue;
650 else if (GET_CODE (out_exp) != SET)
651 return false;
652 }
653 }
654 }
655 }
656 }
657 return store_data_bypass_p (out_insn, in_insn);
658 }
659
660 \f
661 /* Processor costs (relative to an add) */
662
663 const struct processor_costs *rs6000_cost;
664
665 /* Instruction size costs on 32bit processors. */
666 static const
667 struct processor_costs size32_cost = {
668 COSTS_N_INSNS (1), /* mulsi */
669 COSTS_N_INSNS (1), /* mulsi_const */
670 COSTS_N_INSNS (1), /* mulsi_const9 */
671 COSTS_N_INSNS (1), /* muldi */
672 COSTS_N_INSNS (1), /* divsi */
673 COSTS_N_INSNS (1), /* divdi */
674 COSTS_N_INSNS (1), /* fp */
675 COSTS_N_INSNS (1), /* dmul */
676 COSTS_N_INSNS (1), /* sdiv */
677 COSTS_N_INSNS (1), /* ddiv */
678 32, /* cache line size */
679 0, /* l1 cache */
680 0, /* l2 cache */
681 0, /* streams */
682 0, /* SF->DF convert */
683 };
684
685 /* Instruction size costs on 64bit processors. */
686 static const
687 struct processor_costs size64_cost = {
688 COSTS_N_INSNS (1), /* mulsi */
689 COSTS_N_INSNS (1), /* mulsi_const */
690 COSTS_N_INSNS (1), /* mulsi_const9 */
691 COSTS_N_INSNS (1), /* muldi */
692 COSTS_N_INSNS (1), /* divsi */
693 COSTS_N_INSNS (1), /* divdi */
694 COSTS_N_INSNS (1), /* fp */
695 COSTS_N_INSNS (1), /* dmul */
696 COSTS_N_INSNS (1), /* sdiv */
697 COSTS_N_INSNS (1), /* ddiv */
698 128, /* cache line size */
699 0, /* l1 cache */
700 0, /* l2 cache */
701 0, /* streams */
702 0, /* SF->DF convert */
703 };
704
705 /* Instruction costs on RS64A processors. */
706 static const
707 struct processor_costs rs64a_cost = {
708 COSTS_N_INSNS (20), /* mulsi */
709 COSTS_N_INSNS (12), /* mulsi_const */
710 COSTS_N_INSNS (8), /* mulsi_const9 */
711 COSTS_N_INSNS (34), /* muldi */
712 COSTS_N_INSNS (65), /* divsi */
713 COSTS_N_INSNS (67), /* divdi */
714 COSTS_N_INSNS (4), /* fp */
715 COSTS_N_INSNS (4), /* dmul */
716 COSTS_N_INSNS (31), /* sdiv */
717 COSTS_N_INSNS (31), /* ddiv */
718 128, /* cache line size */
719 128, /* l1 cache */
720 2048, /* l2 cache */
721 1, /* streams */
722 0, /* SF->DF convert */
723 };
724
725 /* Instruction costs on MPCCORE processors. */
726 static const
727 struct processor_costs mpccore_cost = {
728 COSTS_N_INSNS (2), /* mulsi */
729 COSTS_N_INSNS (2), /* mulsi_const */
730 COSTS_N_INSNS (2), /* mulsi_const9 */
731 COSTS_N_INSNS (2), /* muldi */
732 COSTS_N_INSNS (6), /* divsi */
733 COSTS_N_INSNS (6), /* divdi */
734 COSTS_N_INSNS (4), /* fp */
735 COSTS_N_INSNS (5), /* dmul */
736 COSTS_N_INSNS (10), /* sdiv */
737 COSTS_N_INSNS (17), /* ddiv */
738 32, /* cache line size */
739 4, /* l1 cache */
740 16, /* l2 cache */
741 1, /* streams */
742 0, /* SF->DF convert */
743 };
744
745 /* Instruction costs on PPC403 processors. */
746 static const
747 struct processor_costs ppc403_cost = {
748 COSTS_N_INSNS (4), /* mulsi */
749 COSTS_N_INSNS (4), /* mulsi_const */
750 COSTS_N_INSNS (4), /* mulsi_const9 */
751 COSTS_N_INSNS (4), /* muldi */
752 COSTS_N_INSNS (33), /* divsi */
753 COSTS_N_INSNS (33), /* divdi */
754 COSTS_N_INSNS (11), /* fp */
755 COSTS_N_INSNS (11), /* dmul */
756 COSTS_N_INSNS (11), /* sdiv */
757 COSTS_N_INSNS (11), /* ddiv */
758 32, /* cache line size */
759 4, /* l1 cache */
760 16, /* l2 cache */
761 1, /* streams */
762 0, /* SF->DF convert */
763 };
764
765 /* Instruction costs on PPC405 processors. */
766 static const
767 struct processor_costs ppc405_cost = {
768 COSTS_N_INSNS (5), /* mulsi */
769 COSTS_N_INSNS (4), /* mulsi_const */
770 COSTS_N_INSNS (3), /* mulsi_const9 */
771 COSTS_N_INSNS (5), /* muldi */
772 COSTS_N_INSNS (35), /* divsi */
773 COSTS_N_INSNS (35), /* divdi */
774 COSTS_N_INSNS (11), /* fp */
775 COSTS_N_INSNS (11), /* dmul */
776 COSTS_N_INSNS (11), /* sdiv */
777 COSTS_N_INSNS (11), /* ddiv */
778 32, /* cache line size */
779 16, /* l1 cache */
780 128, /* l2 cache */
781 1, /* streams */
782 0, /* SF->DF convert */
783 };
784
785 /* Instruction costs on PPC440 processors. */
786 static const
787 struct processor_costs ppc440_cost = {
788 COSTS_N_INSNS (3), /* mulsi */
789 COSTS_N_INSNS (2), /* mulsi_const */
790 COSTS_N_INSNS (2), /* mulsi_const9 */
791 COSTS_N_INSNS (3), /* muldi */
792 COSTS_N_INSNS (34), /* divsi */
793 COSTS_N_INSNS (34), /* divdi */
794 COSTS_N_INSNS (5), /* fp */
795 COSTS_N_INSNS (5), /* dmul */
796 COSTS_N_INSNS (19), /* sdiv */
797 COSTS_N_INSNS (33), /* ddiv */
798 32, /* cache line size */
799 32, /* l1 cache */
800 256, /* l2 cache */
801 1, /* streams */
802 0, /* SF->DF convert */
803 };
804
805 /* Instruction costs on PPC476 processors. */
806 static const
807 struct processor_costs ppc476_cost = {
808 COSTS_N_INSNS (4), /* mulsi */
809 COSTS_N_INSNS (4), /* mulsi_const */
810 COSTS_N_INSNS (4), /* mulsi_const9 */
811 COSTS_N_INSNS (4), /* muldi */
812 COSTS_N_INSNS (11), /* divsi */
813 COSTS_N_INSNS (11), /* divdi */
814 COSTS_N_INSNS (6), /* fp */
815 COSTS_N_INSNS (6), /* dmul */
816 COSTS_N_INSNS (19), /* sdiv */
817 COSTS_N_INSNS (33), /* ddiv */
818 32, /* l1 cache line size */
819 32, /* l1 cache */
820 512, /* l2 cache */
821 1, /* streams */
822 0, /* SF->DF convert */
823 };
824
825 /* Instruction costs on PPC601 processors. */
826 static const
827 struct processor_costs ppc601_cost = {
828 COSTS_N_INSNS (5), /* mulsi */
829 COSTS_N_INSNS (5), /* mulsi_const */
830 COSTS_N_INSNS (5), /* mulsi_const9 */
831 COSTS_N_INSNS (5), /* muldi */
832 COSTS_N_INSNS (36), /* divsi */
833 COSTS_N_INSNS (36), /* divdi */
834 COSTS_N_INSNS (4), /* fp */
835 COSTS_N_INSNS (5), /* dmul */
836 COSTS_N_INSNS (17), /* sdiv */
837 COSTS_N_INSNS (31), /* ddiv */
838 32, /* cache line size */
839 32, /* l1 cache */
840 256, /* l2 cache */
841 1, /* streams */
842 0, /* SF->DF convert */
843 };
844
845 /* Instruction costs on PPC603 processors. */
846 static const
847 struct processor_costs ppc603_cost = {
848 COSTS_N_INSNS (5), /* mulsi */
849 COSTS_N_INSNS (3), /* mulsi_const */
850 COSTS_N_INSNS (2), /* mulsi_const9 */
851 COSTS_N_INSNS (5), /* muldi */
852 COSTS_N_INSNS (37), /* divsi */
853 COSTS_N_INSNS (37), /* divdi */
854 COSTS_N_INSNS (3), /* fp */
855 COSTS_N_INSNS (4), /* dmul */
856 COSTS_N_INSNS (18), /* sdiv */
857 COSTS_N_INSNS (33), /* ddiv */
858 32, /* cache line size */
859 8, /* l1 cache */
860 64, /* l2 cache */
861 1, /* streams */
862 0, /* SF->DF convert */
863 };
864
865 /* Instruction costs on PPC604 processors. */
866 static const
867 struct processor_costs ppc604_cost = {
868 COSTS_N_INSNS (4), /* mulsi */
869 COSTS_N_INSNS (4), /* mulsi_const */
870 COSTS_N_INSNS (4), /* mulsi_const9 */
871 COSTS_N_INSNS (4), /* muldi */
872 COSTS_N_INSNS (20), /* divsi */
873 COSTS_N_INSNS (20), /* divdi */
874 COSTS_N_INSNS (3), /* fp */
875 COSTS_N_INSNS (3), /* dmul */
876 COSTS_N_INSNS (18), /* sdiv */
877 COSTS_N_INSNS (32), /* ddiv */
878 32, /* cache line size */
879 16, /* l1 cache */
880 512, /* l2 cache */
881 1, /* streams */
882 0, /* SF->DF convert */
883 };
884
885 /* Instruction costs on PPC604e processors. */
886 static const
887 struct processor_costs ppc604e_cost = {
888 COSTS_N_INSNS (2), /* mulsi */
889 COSTS_N_INSNS (2), /* mulsi_const */
890 COSTS_N_INSNS (2), /* mulsi_const9 */
891 COSTS_N_INSNS (2), /* muldi */
892 COSTS_N_INSNS (20), /* divsi */
893 COSTS_N_INSNS (20), /* divdi */
894 COSTS_N_INSNS (3), /* fp */
895 COSTS_N_INSNS (3), /* dmul */
896 COSTS_N_INSNS (18), /* sdiv */
897 COSTS_N_INSNS (32), /* ddiv */
898 32, /* cache line size */
899 32, /* l1 cache */
900 1024, /* l2 cache */
901 1, /* streams */
902 0, /* SF->DF convert */
903 };
904
905 /* Instruction costs on PPC620 processors. */
906 static const
907 struct processor_costs ppc620_cost = {
908 COSTS_N_INSNS (5), /* mulsi */
909 COSTS_N_INSNS (4), /* mulsi_const */
910 COSTS_N_INSNS (3), /* mulsi_const9 */
911 COSTS_N_INSNS (7), /* muldi */
912 COSTS_N_INSNS (21), /* divsi */
913 COSTS_N_INSNS (37), /* divdi */
914 COSTS_N_INSNS (3), /* fp */
915 COSTS_N_INSNS (3), /* dmul */
916 COSTS_N_INSNS (18), /* sdiv */
917 COSTS_N_INSNS (32), /* ddiv */
918 128, /* cache line size */
919 32, /* l1 cache */
920 1024, /* l2 cache */
921 1, /* streams */
922 0, /* SF->DF convert */
923 };
924
925 /* Instruction costs on PPC630 processors. */
926 static const
927 struct processor_costs ppc630_cost = {
928 COSTS_N_INSNS (5), /* mulsi */
929 COSTS_N_INSNS (4), /* mulsi_const */
930 COSTS_N_INSNS (3), /* mulsi_const9 */
931 COSTS_N_INSNS (7), /* muldi */
932 COSTS_N_INSNS (21), /* divsi */
933 COSTS_N_INSNS (37), /* divdi */
934 COSTS_N_INSNS (3), /* fp */
935 COSTS_N_INSNS (3), /* dmul */
936 COSTS_N_INSNS (17), /* sdiv */
937 COSTS_N_INSNS (21), /* ddiv */
938 128, /* cache line size */
939 64, /* l1 cache */
940 1024, /* l2 cache */
941 1, /* streams */
942 0, /* SF->DF convert */
943 };
944
945 /* Instruction costs on Cell processor. */
946 /* COSTS_N_INSNS (1) ~ one add. */
947 static const
948 struct processor_costs ppccell_cost = {
949 COSTS_N_INSNS (9/2)+2, /* mulsi */
950 COSTS_N_INSNS (6/2), /* mulsi_const */
951 COSTS_N_INSNS (6/2), /* mulsi_const9 */
952 COSTS_N_INSNS (15/2)+2, /* muldi */
953 COSTS_N_INSNS (38/2), /* divsi */
954 COSTS_N_INSNS (70/2), /* divdi */
955 COSTS_N_INSNS (10/2), /* fp */
956 COSTS_N_INSNS (10/2), /* dmul */
957 COSTS_N_INSNS (74/2), /* sdiv */
958 COSTS_N_INSNS (74/2), /* ddiv */
959 128, /* cache line size */
960 32, /* l1 cache */
961 512, /* l2 cache */
962 6, /* streams */
963 0, /* SF->DF convert */
964 };
965
966 /* Instruction costs on PPC750 and PPC7400 processors. */
967 static const
968 struct processor_costs ppc750_cost = {
969 COSTS_N_INSNS (5), /* mulsi */
970 COSTS_N_INSNS (3), /* mulsi_const */
971 COSTS_N_INSNS (2), /* mulsi_const9 */
972 COSTS_N_INSNS (5), /* muldi */
973 COSTS_N_INSNS (17), /* divsi */
974 COSTS_N_INSNS (17), /* divdi */
975 COSTS_N_INSNS (3), /* fp */
976 COSTS_N_INSNS (3), /* dmul */
977 COSTS_N_INSNS (17), /* sdiv */
978 COSTS_N_INSNS (31), /* ddiv */
979 32, /* cache line size */
980 32, /* l1 cache */
981 512, /* l2 cache */
982 1, /* streams */
983 0, /* SF->DF convert */
984 };
985
986 /* Instruction costs on PPC7450 processors. */
987 static const
988 struct processor_costs ppc7450_cost = {
989 COSTS_N_INSNS (4), /* mulsi */
990 COSTS_N_INSNS (3), /* mulsi_const */
991 COSTS_N_INSNS (3), /* mulsi_const9 */
992 COSTS_N_INSNS (4), /* muldi */
993 COSTS_N_INSNS (23), /* divsi */
994 COSTS_N_INSNS (23), /* divdi */
995 COSTS_N_INSNS (5), /* fp */
996 COSTS_N_INSNS (5), /* dmul */
997 COSTS_N_INSNS (21), /* sdiv */
998 COSTS_N_INSNS (35), /* ddiv */
999 32, /* cache line size */
1000 32, /* l1 cache */
1001 1024, /* l2 cache */
1002 1, /* streams */
1003 0, /* SF->DF convert */
1004 };
1005
1006 /* Instruction costs on PPC8540 processors. */
1007 static const
1008 struct processor_costs ppc8540_cost = {
1009 COSTS_N_INSNS (4), /* mulsi */
1010 COSTS_N_INSNS (4), /* mulsi_const */
1011 COSTS_N_INSNS (4), /* mulsi_const9 */
1012 COSTS_N_INSNS (4), /* muldi */
1013 COSTS_N_INSNS (19), /* divsi */
1014 COSTS_N_INSNS (19), /* divdi */
1015 COSTS_N_INSNS (4), /* fp */
1016 COSTS_N_INSNS (4), /* dmul */
1017 COSTS_N_INSNS (29), /* sdiv */
1018 COSTS_N_INSNS (29), /* ddiv */
1019 32, /* cache line size */
1020 32, /* l1 cache */
1021 256, /* l2 cache */
1022 1, /* prefetch streams /*/
1023 0, /* SF->DF convert */
1024 };
1025
1026 /* Instruction costs on E300C2 and E300C3 cores. */
1027 static const
1028 struct processor_costs ppce300c2c3_cost = {
1029 COSTS_N_INSNS (4), /* mulsi */
1030 COSTS_N_INSNS (4), /* mulsi_const */
1031 COSTS_N_INSNS (4), /* mulsi_const9 */
1032 COSTS_N_INSNS (4), /* muldi */
1033 COSTS_N_INSNS (19), /* divsi */
1034 COSTS_N_INSNS (19), /* divdi */
1035 COSTS_N_INSNS (3), /* fp */
1036 COSTS_N_INSNS (4), /* dmul */
1037 COSTS_N_INSNS (18), /* sdiv */
1038 COSTS_N_INSNS (33), /* ddiv */
1039 32,
1040 16, /* l1 cache */
1041 16, /* l2 cache */
1042 1, /* prefetch streams /*/
1043 0, /* SF->DF convert */
1044 };
1045
1046 /* Instruction costs on PPCE500MC processors. */
1047 static const
1048 struct processor_costs ppce500mc_cost = {
1049 COSTS_N_INSNS (4), /* mulsi */
1050 COSTS_N_INSNS (4), /* mulsi_const */
1051 COSTS_N_INSNS (4), /* mulsi_const9 */
1052 COSTS_N_INSNS (4), /* muldi */
1053 COSTS_N_INSNS (14), /* divsi */
1054 COSTS_N_INSNS (14), /* divdi */
1055 COSTS_N_INSNS (8), /* fp */
1056 COSTS_N_INSNS (10), /* dmul */
1057 COSTS_N_INSNS (36), /* sdiv */
1058 COSTS_N_INSNS (66), /* ddiv */
1059 64, /* cache line size */
1060 32, /* l1 cache */
1061 128, /* l2 cache */
1062 1, /* prefetch streams /*/
1063 0, /* SF->DF convert */
1064 };
1065
1066 /* Instruction costs on PPCE500MC64 processors. */
1067 static const
1068 struct processor_costs ppce500mc64_cost = {
1069 COSTS_N_INSNS (4), /* mulsi */
1070 COSTS_N_INSNS (4), /* mulsi_const */
1071 COSTS_N_INSNS (4), /* mulsi_const9 */
1072 COSTS_N_INSNS (4), /* muldi */
1073 COSTS_N_INSNS (14), /* divsi */
1074 COSTS_N_INSNS (14), /* divdi */
1075 COSTS_N_INSNS (4), /* fp */
1076 COSTS_N_INSNS (10), /* dmul */
1077 COSTS_N_INSNS (36), /* sdiv */
1078 COSTS_N_INSNS (66), /* ddiv */
1079 64, /* cache line size */
1080 32, /* l1 cache */
1081 128, /* l2 cache */
1082 1, /* prefetch streams /*/
1083 0, /* SF->DF convert */
1084 };
1085
1086 /* Instruction costs on PPCE5500 processors. */
1087 static const
1088 struct processor_costs ppce5500_cost = {
1089 COSTS_N_INSNS (5), /* mulsi */
1090 COSTS_N_INSNS (5), /* mulsi_const */
1091 COSTS_N_INSNS (4), /* mulsi_const9 */
1092 COSTS_N_INSNS (5), /* muldi */
1093 COSTS_N_INSNS (14), /* divsi */
1094 COSTS_N_INSNS (14), /* divdi */
1095 COSTS_N_INSNS (7), /* fp */
1096 COSTS_N_INSNS (10), /* dmul */
1097 COSTS_N_INSNS (36), /* sdiv */
1098 COSTS_N_INSNS (66), /* ddiv */
1099 64, /* cache line size */
1100 32, /* l1 cache */
1101 128, /* l2 cache */
1102 1, /* prefetch streams /*/
1103 0, /* SF->DF convert */
1104 };
1105
1106 /* Instruction costs on PPCE6500 processors. */
1107 static const
1108 struct processor_costs ppce6500_cost = {
1109 COSTS_N_INSNS (5), /* mulsi */
1110 COSTS_N_INSNS (5), /* mulsi_const */
1111 COSTS_N_INSNS (4), /* mulsi_const9 */
1112 COSTS_N_INSNS (5), /* muldi */
1113 COSTS_N_INSNS (14), /* divsi */
1114 COSTS_N_INSNS (14), /* divdi */
1115 COSTS_N_INSNS (7), /* fp */
1116 COSTS_N_INSNS (10), /* dmul */
1117 COSTS_N_INSNS (36), /* sdiv */
1118 COSTS_N_INSNS (66), /* ddiv */
1119 64, /* cache line size */
1120 32, /* l1 cache */
1121 128, /* l2 cache */
1122 1, /* prefetch streams /*/
1123 0, /* SF->DF convert */
1124 };
1125
1126 /* Instruction costs on AppliedMicro Titan processors. */
1127 static const
1128 struct processor_costs titan_cost = {
1129 COSTS_N_INSNS (5), /* mulsi */
1130 COSTS_N_INSNS (5), /* mulsi_const */
1131 COSTS_N_INSNS (5), /* mulsi_const9 */
1132 COSTS_N_INSNS (5), /* muldi */
1133 COSTS_N_INSNS (18), /* divsi */
1134 COSTS_N_INSNS (18), /* divdi */
1135 COSTS_N_INSNS (10), /* fp */
1136 COSTS_N_INSNS (10), /* dmul */
1137 COSTS_N_INSNS (46), /* sdiv */
1138 COSTS_N_INSNS (72), /* ddiv */
1139 32, /* cache line size */
1140 32, /* l1 cache */
1141 512, /* l2 cache */
1142 1, /* prefetch streams /*/
1143 0, /* SF->DF convert */
1144 };
1145
1146 /* Instruction costs on POWER4 and POWER5 processors. */
1147 static const
1148 struct processor_costs power4_cost = {
1149 COSTS_N_INSNS (3), /* mulsi */
1150 COSTS_N_INSNS (2), /* mulsi_const */
1151 COSTS_N_INSNS (2), /* mulsi_const9 */
1152 COSTS_N_INSNS (4), /* muldi */
1153 COSTS_N_INSNS (18), /* divsi */
1154 COSTS_N_INSNS (34), /* divdi */
1155 COSTS_N_INSNS (3), /* fp */
1156 COSTS_N_INSNS (3), /* dmul */
1157 COSTS_N_INSNS (17), /* sdiv */
1158 COSTS_N_INSNS (17), /* ddiv */
1159 128, /* cache line size */
1160 32, /* l1 cache */
1161 1024, /* l2 cache */
1162 8, /* prefetch streams /*/
1163 0, /* SF->DF convert */
1164 };
1165
1166 /* Instruction costs on POWER6 processors. */
1167 static const
1168 struct processor_costs power6_cost = {
1169 COSTS_N_INSNS (8), /* mulsi */
1170 COSTS_N_INSNS (8), /* mulsi_const */
1171 COSTS_N_INSNS (8), /* mulsi_const9 */
1172 COSTS_N_INSNS (8), /* muldi */
1173 COSTS_N_INSNS (22), /* divsi */
1174 COSTS_N_INSNS (28), /* divdi */
1175 COSTS_N_INSNS (3), /* fp */
1176 COSTS_N_INSNS (3), /* dmul */
1177 COSTS_N_INSNS (13), /* sdiv */
1178 COSTS_N_INSNS (16), /* ddiv */
1179 128, /* cache line size */
1180 64, /* l1 cache */
1181 2048, /* l2 cache */
1182 16, /* prefetch streams */
1183 0, /* SF->DF convert */
1184 };
1185
1186 /* Instruction costs on POWER7 processors. */
1187 static const
1188 struct processor_costs power7_cost = {
1189 COSTS_N_INSNS (2), /* mulsi */
1190 COSTS_N_INSNS (2), /* mulsi_const */
1191 COSTS_N_INSNS (2), /* mulsi_const9 */
1192 COSTS_N_INSNS (2), /* muldi */
1193 COSTS_N_INSNS (18), /* divsi */
1194 COSTS_N_INSNS (34), /* divdi */
1195 COSTS_N_INSNS (3), /* fp */
1196 COSTS_N_INSNS (3), /* dmul */
1197 COSTS_N_INSNS (13), /* sdiv */
1198 COSTS_N_INSNS (16), /* ddiv */
1199 128, /* cache line size */
1200 32, /* l1 cache */
1201 256, /* l2 cache */
1202 12, /* prefetch streams */
1203 COSTS_N_INSNS (3), /* SF->DF convert */
1204 };
1205
1206 /* Instruction costs on POWER8 processors. */
1207 static const
1208 struct processor_costs power8_cost = {
1209 COSTS_N_INSNS (3), /* mulsi */
1210 COSTS_N_INSNS (3), /* mulsi_const */
1211 COSTS_N_INSNS (3), /* mulsi_const9 */
1212 COSTS_N_INSNS (3), /* muldi */
1213 COSTS_N_INSNS (19), /* divsi */
1214 COSTS_N_INSNS (35), /* divdi */
1215 COSTS_N_INSNS (3), /* fp */
1216 COSTS_N_INSNS (3), /* dmul */
1217 COSTS_N_INSNS (14), /* sdiv */
1218 COSTS_N_INSNS (17), /* ddiv */
1219 128, /* cache line size */
1220 32, /* l1 cache */
1221 256, /* l2 cache */
1222 12, /* prefetch streams */
1223 COSTS_N_INSNS (3), /* SF->DF convert */
1224 };
1225
1226 /* Instruction costs on POWER9 processors. */
1227 static const
1228 struct processor_costs power9_cost = {
1229 COSTS_N_INSNS (3), /* mulsi */
1230 COSTS_N_INSNS (3), /* mulsi_const */
1231 COSTS_N_INSNS (3), /* mulsi_const9 */
1232 COSTS_N_INSNS (3), /* muldi */
1233 COSTS_N_INSNS (8), /* divsi */
1234 COSTS_N_INSNS (12), /* divdi */
1235 COSTS_N_INSNS (3), /* fp */
1236 COSTS_N_INSNS (3), /* dmul */
1237 COSTS_N_INSNS (13), /* sdiv */
1238 COSTS_N_INSNS (18), /* ddiv */
1239 128, /* cache line size */
1240 32, /* l1 cache */
1241 512, /* l2 cache */
1242 8, /* prefetch streams */
1243 COSTS_N_INSNS (3), /* SF->DF convert */
1244 };
1245
1246 /* Instruction costs on POWER A2 processors. */
1247 static const
1248 struct processor_costs ppca2_cost = {
1249 COSTS_N_INSNS (16), /* mulsi */
1250 COSTS_N_INSNS (16), /* mulsi_const */
1251 COSTS_N_INSNS (16), /* mulsi_const9 */
1252 COSTS_N_INSNS (16), /* muldi */
1253 COSTS_N_INSNS (22), /* divsi */
1254 COSTS_N_INSNS (28), /* divdi */
1255 COSTS_N_INSNS (3), /* fp */
1256 COSTS_N_INSNS (3), /* dmul */
1257 COSTS_N_INSNS (59), /* sdiv */
1258 COSTS_N_INSNS (72), /* ddiv */
1259 64,
1260 16, /* l1 cache */
1261 2048, /* l2 cache */
1262 16, /* prefetch streams */
1263 0, /* SF->DF convert */
1264 };
1265
1266 \f
1267 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1268 #undef RS6000_BUILTIN_0
1269 #undef RS6000_BUILTIN_1
1270 #undef RS6000_BUILTIN_2
1271 #undef RS6000_BUILTIN_3
1272 #undef RS6000_BUILTIN_A
1273 #undef RS6000_BUILTIN_D
1274 #undef RS6000_BUILTIN_H
1275 #undef RS6000_BUILTIN_P
1276 #undef RS6000_BUILTIN_X
1277
1278 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1279 { NAME, ICODE, MASK, ATTR },
1280
1281 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1282 { NAME, ICODE, MASK, ATTR },
1283
1284 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1285 { NAME, ICODE, MASK, ATTR },
1286
1287 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1288 { NAME, ICODE, MASK, ATTR },
1289
1290 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1291 { NAME, ICODE, MASK, ATTR },
1292
1293 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1294 { NAME, ICODE, MASK, ATTR },
1295
1296 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1297 { NAME, ICODE, MASK, ATTR },
1298
1299 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1300 { NAME, ICODE, MASK, ATTR },
1301
1302 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1303 { NAME, ICODE, MASK, ATTR },
1304
1305 struct rs6000_builtin_info_type {
1306 const char *name;
1307 const enum insn_code icode;
1308 const HOST_WIDE_INT mask;
1309 const unsigned attr;
1310 };
1311
1312 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1313 {
1314 #include "rs6000-builtin.def"
1315 };
1316
1317 #undef RS6000_BUILTIN_0
1318 #undef RS6000_BUILTIN_1
1319 #undef RS6000_BUILTIN_2
1320 #undef RS6000_BUILTIN_3
1321 #undef RS6000_BUILTIN_A
1322 #undef RS6000_BUILTIN_D
1323 #undef RS6000_BUILTIN_H
1324 #undef RS6000_BUILTIN_P
1325 #undef RS6000_BUILTIN_X
1326
1327 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1328 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1329
1330 \f
1331 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1332 static struct machine_function * rs6000_init_machine_status (void);
1333 static int rs6000_ra_ever_killed (void);
1334 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1336 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1337 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1338 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1339 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1340 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1341 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1342 bool);
1343 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1344 unsigned int);
1345 static bool is_microcoded_insn (rtx_insn *);
1346 static bool is_nonpipeline_insn (rtx_insn *);
1347 static bool is_cracked_insn (rtx_insn *);
1348 static bool is_load_insn (rtx, rtx *);
1349 static bool is_store_insn (rtx, rtx *);
1350 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1351 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1352 static bool insn_must_be_first_in_group (rtx_insn *);
1353 static bool insn_must_be_last_in_group (rtx_insn *);
1354 static void altivec_init_builtins (void);
1355 static tree builtin_function_type (machine_mode, machine_mode,
1356 machine_mode, machine_mode,
1357 enum rs6000_builtins, const char *name);
1358 static void rs6000_common_init_builtins (void);
1359 static void htm_init_builtins (void);
1360 static rs6000_stack_t *rs6000_stack_info (void);
1361 static void is_altivec_return_reg (rtx, void *);
1362 int easy_vector_constant (rtx, machine_mode);
1363 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1364 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1365 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1366 bool, bool);
1367 #if TARGET_MACHO
1368 static void macho_branch_islands (void);
1369 static tree get_prev_label (tree);
1370 #endif
1371 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1372 int, int *);
1373 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1374 int, int, int *);
1375 static bool rs6000_mode_dependent_address (const_rtx);
1376 static bool rs6000_debug_mode_dependent_address (const_rtx);
1377 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1378 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1379 machine_mode, rtx);
1380 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1381 machine_mode,
1382 rtx);
1383 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1384 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1385 enum reg_class);
1386 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1387 reg_class_t,
1388 reg_class_t);
1389 static bool rs6000_debug_can_change_mode_class (machine_mode,
1390 machine_mode,
1391 reg_class_t);
1392 static bool rs6000_save_toc_in_prologue_p (void);
1393 static rtx rs6000_internal_arg_pointer (void);
1394
1395 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1396 int, int *)
1397 = rs6000_legitimize_reload_address;
1398
1399 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1400 = rs6000_mode_dependent_address;
1401
1402 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1403 machine_mode, rtx)
1404 = rs6000_secondary_reload_class;
1405
1406 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1407 = rs6000_preferred_reload_class;
1408
1409 const int INSN_NOT_AVAILABLE = -1;
1410
1411 static void rs6000_print_isa_options (FILE *, int, const char *,
1412 HOST_WIDE_INT);
1413 static void rs6000_print_builtin_options (FILE *, int, const char *,
1414 HOST_WIDE_INT);
1415 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1416
1417 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1418 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1419 enum rs6000_reg_type,
1420 machine_mode,
1421 secondary_reload_info *,
1422 bool);
1423 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1424 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1425 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1426
1427 /* Hash table stuff for keeping track of TOC entries. */
1428
1429 struct GTY((for_user)) toc_hash_struct
1430 {
1431 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1432 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1433 rtx key;
1434 machine_mode key_mode;
1435 int labelno;
1436 };
1437
1438 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1439 {
1440 static hashval_t hash (toc_hash_struct *);
1441 static bool equal (toc_hash_struct *, toc_hash_struct *);
1442 };
1443
1444 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1445
1446 /* Hash table to keep track of the argument types for builtin functions. */
1447
1448 struct GTY((for_user)) builtin_hash_struct
1449 {
1450 tree type;
1451 machine_mode mode[4]; /* return value + 3 arguments. */
1452 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1453 };
1454
1455 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1456 {
1457 static hashval_t hash (builtin_hash_struct *);
1458 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1459 };
1460
1461 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1462
1463 \f
1464 /* Default register names. */
1465 char rs6000_reg_names[][8] =
1466 {
1467 "0", "1", "2", "3", "4", "5", "6", "7",
1468 "8", "9", "10", "11", "12", "13", "14", "15",
1469 "16", "17", "18", "19", "20", "21", "22", "23",
1470 "24", "25", "26", "27", "28", "29", "30", "31",
1471 "0", "1", "2", "3", "4", "5", "6", "7",
1472 "8", "9", "10", "11", "12", "13", "14", "15",
1473 "16", "17", "18", "19", "20", "21", "22", "23",
1474 "24", "25", "26", "27", "28", "29", "30", "31",
1475 "mq", "lr", "ctr","ap",
1476 "0", "1", "2", "3", "4", "5", "6", "7",
1477 "ca",
1478 /* AltiVec registers. */
1479 "0", "1", "2", "3", "4", "5", "6", "7",
1480 "8", "9", "10", "11", "12", "13", "14", "15",
1481 "16", "17", "18", "19", "20", "21", "22", "23",
1482 "24", "25", "26", "27", "28", "29", "30", "31",
1483 "vrsave", "vscr",
1484 /* Soft frame pointer. */
1485 "sfp",
1486 /* HTM SPR registers. */
1487 "tfhar", "tfiar", "texasr"
1488 };
1489
1490 #ifdef TARGET_REGNAMES
1491 static const char alt_reg_names[][8] =
1492 {
1493 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1494 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1495 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1496 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1497 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1498 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1499 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1500 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1501 "mq", "lr", "ctr", "ap",
1502 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1503 "ca",
1504 /* AltiVec registers. */
1505 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1506 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1507 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1508 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1509 "vrsave", "vscr",
1510 /* Soft frame pointer. */
1511 "sfp",
1512 /* HTM SPR registers. */
1513 "tfhar", "tfiar", "texasr"
1514 };
1515 #endif
1516
1517 /* Table of valid machine attributes. */
1518
1519 static const struct attribute_spec rs6000_attribute_table[] =
1520 {
1521 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1522 affects_type_identity, handler, exclude } */
1523 { "altivec", 1, 1, false, true, false, false,
1524 rs6000_handle_altivec_attribute, NULL },
1525 { "longcall", 0, 0, false, true, true, false,
1526 rs6000_handle_longcall_attribute, NULL },
1527 { "shortcall", 0, 0, false, true, true, false,
1528 rs6000_handle_longcall_attribute, NULL },
1529 { "ms_struct", 0, 0, false, false, false, false,
1530 rs6000_handle_struct_attribute, NULL },
1531 { "gcc_struct", 0, 0, false, false, false, false,
1532 rs6000_handle_struct_attribute, NULL },
1533 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1534 SUBTARGET_ATTRIBUTE_TABLE,
1535 #endif
1536 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1537 };
1538 \f
1539 #ifndef TARGET_PROFILE_KERNEL
1540 #define TARGET_PROFILE_KERNEL 0
1541 #endif
1542
1543 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1544 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1545 \f
1546 /* Initialize the GCC target structure. */
1547 #undef TARGET_ATTRIBUTE_TABLE
1548 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1549 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1550 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1551 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1552 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1553
1554 #undef TARGET_ASM_ALIGNED_DI_OP
1555 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1556
1557 /* Default unaligned ops are only provided for ELF. Find the ops needed
1558 for non-ELF systems. */
1559 #ifndef OBJECT_FORMAT_ELF
1560 #if TARGET_XCOFF
1561 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1562 64-bit targets. */
1563 #undef TARGET_ASM_UNALIGNED_HI_OP
1564 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1565 #undef TARGET_ASM_UNALIGNED_SI_OP
1566 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1567 #undef TARGET_ASM_UNALIGNED_DI_OP
1568 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1569 #else
1570 /* For Darwin. */
1571 #undef TARGET_ASM_UNALIGNED_HI_OP
1572 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1573 #undef TARGET_ASM_UNALIGNED_SI_OP
1574 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1575 #undef TARGET_ASM_UNALIGNED_DI_OP
1576 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1577 #undef TARGET_ASM_ALIGNED_DI_OP
1578 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1579 #endif
1580 #endif
1581
1582 /* This hook deals with fixups for relocatable code and DI-mode objects
1583 in 64-bit code. */
1584 #undef TARGET_ASM_INTEGER
1585 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1586
1587 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1588 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1589 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1590 #endif
1591
1592 #undef TARGET_SET_UP_BY_PROLOGUE
1593 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1594
1595 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1596 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1597 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1598 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1599 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1600 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1601 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1602 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1603 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1604 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1605 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1606 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1607
1608 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1609 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1610
1611 #undef TARGET_INTERNAL_ARG_POINTER
1612 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1613
1614 #undef TARGET_HAVE_TLS
1615 #define TARGET_HAVE_TLS HAVE_AS_TLS
1616
1617 #undef TARGET_CANNOT_FORCE_CONST_MEM
1618 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1619
1620 #undef TARGET_DELEGITIMIZE_ADDRESS
1621 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1622
1623 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1624 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1625
1626 #undef TARGET_LEGITIMATE_COMBINED_INSN
1627 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1628
1629 #undef TARGET_ASM_FUNCTION_PROLOGUE
1630 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1631 #undef TARGET_ASM_FUNCTION_EPILOGUE
1632 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1633
1634 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1635 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1636
1637 #undef TARGET_LEGITIMIZE_ADDRESS
1638 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1639
1640 #undef TARGET_SCHED_VARIABLE_ISSUE
1641 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1642
1643 #undef TARGET_SCHED_ISSUE_RATE
1644 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1645 #undef TARGET_SCHED_ADJUST_COST
1646 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1647 #undef TARGET_SCHED_ADJUST_PRIORITY
1648 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1649 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1650 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1651 #undef TARGET_SCHED_INIT
1652 #define TARGET_SCHED_INIT rs6000_sched_init
1653 #undef TARGET_SCHED_FINISH
1654 #define TARGET_SCHED_FINISH rs6000_sched_finish
1655 #undef TARGET_SCHED_REORDER
1656 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1657 #undef TARGET_SCHED_REORDER2
1658 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1659
1660 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1661 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1662
1663 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1664 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1665
1666 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1667 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1668 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1669 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1670 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1671 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1672 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1673 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1674
1675 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1676 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1677
1678 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1679 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1680 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1681 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1682 rs6000_builtin_support_vector_misalignment
1683 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1684 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1685 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1686 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1687 rs6000_builtin_vectorization_cost
1688 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1689 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1690 rs6000_preferred_simd_mode
1691 #undef TARGET_VECTORIZE_INIT_COST
1692 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1693 #undef TARGET_VECTORIZE_ADD_STMT_COST
1694 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1695 #undef TARGET_VECTORIZE_FINISH_COST
1696 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1697 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1698 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1699
1700 #undef TARGET_INIT_BUILTINS
1701 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1702 #undef TARGET_BUILTIN_DECL
1703 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1704
1705 #undef TARGET_FOLD_BUILTIN
1706 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1707 #undef TARGET_GIMPLE_FOLD_BUILTIN
1708 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1709
1710 #undef TARGET_EXPAND_BUILTIN
1711 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1712
1713 #undef TARGET_MANGLE_TYPE
1714 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1715
1716 #undef TARGET_INIT_LIBFUNCS
1717 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1718
1719 #if TARGET_MACHO
1720 #undef TARGET_BINDS_LOCAL_P
1721 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1722 #endif
1723
1724 #undef TARGET_MS_BITFIELD_LAYOUT_P
1725 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1726
1727 #undef TARGET_ASM_OUTPUT_MI_THUNK
1728 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1729
1730 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1731 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1732
1733 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1734 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1735
1736 #undef TARGET_REGISTER_MOVE_COST
1737 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1738 #undef TARGET_MEMORY_MOVE_COST
1739 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1740 #undef TARGET_CANNOT_COPY_INSN_P
1741 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1742 #undef TARGET_RTX_COSTS
1743 #define TARGET_RTX_COSTS rs6000_rtx_costs
1744 #undef TARGET_ADDRESS_COST
1745 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1746 #undef TARGET_INSN_COST
1747 #define TARGET_INSN_COST rs6000_insn_cost
1748
1749 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1750 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1751
1752 #undef TARGET_PROMOTE_FUNCTION_MODE
1753 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1754
1755 #undef TARGET_RETURN_IN_MEMORY
1756 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1757
1758 #undef TARGET_RETURN_IN_MSB
1759 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1760
1761 #undef TARGET_SETUP_INCOMING_VARARGS
1762 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1763
1764 /* Always strict argument naming on rs6000. */
1765 #undef TARGET_STRICT_ARGUMENT_NAMING
1766 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1767 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1768 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1769 #undef TARGET_SPLIT_COMPLEX_ARG
1770 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1771 #undef TARGET_MUST_PASS_IN_STACK
1772 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1773 #undef TARGET_PASS_BY_REFERENCE
1774 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1775 #undef TARGET_ARG_PARTIAL_BYTES
1776 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1777 #undef TARGET_FUNCTION_ARG_ADVANCE
1778 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1779 #undef TARGET_FUNCTION_ARG
1780 #define TARGET_FUNCTION_ARG rs6000_function_arg
1781 #undef TARGET_FUNCTION_ARG_PADDING
1782 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1783 #undef TARGET_FUNCTION_ARG_BOUNDARY
1784 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1785
1786 #undef TARGET_BUILD_BUILTIN_VA_LIST
1787 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1788
1789 #undef TARGET_EXPAND_BUILTIN_VA_START
1790 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1791
1792 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1793 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1794
1795 #undef TARGET_EH_RETURN_FILTER_MODE
1796 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1797
1798 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1799 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1800
1801 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1802 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1803
1804 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1805 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1806
1807 #undef TARGET_FLOATN_MODE
1808 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1809
1810 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1811 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1812
1813 #undef TARGET_MD_ASM_ADJUST
1814 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1815
1816 #undef TARGET_OPTION_OVERRIDE
1817 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1818
1819 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1820 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1821 rs6000_builtin_vectorized_function
1822
1823 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1824 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1825 rs6000_builtin_md_vectorized_function
1826
1827 #undef TARGET_STACK_PROTECT_GUARD
1828 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1829
1830 #if !TARGET_MACHO
1831 #undef TARGET_STACK_PROTECT_FAIL
1832 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1833 #endif
1834
1835 #ifdef HAVE_AS_TLS
1836 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1837 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1838 #endif
1839
1840 /* Use a 32-bit anchor range. This leads to sequences like:
1841
1842 addis tmp,anchor,high
1843 add dest,tmp,low
1844
1845 where tmp itself acts as an anchor, and can be shared between
1846 accesses to the same 64k page. */
1847 #undef TARGET_MIN_ANCHOR_OFFSET
1848 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1849 #undef TARGET_MAX_ANCHOR_OFFSET
1850 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1851 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1852 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1853 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1854 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1855
1856 #undef TARGET_BUILTIN_RECIPROCAL
1857 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1858
1859 #undef TARGET_SECONDARY_RELOAD
1860 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1861 #undef TARGET_SECONDARY_MEMORY_NEEDED
1862 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1863 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1864 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1865
1866 #undef TARGET_LEGITIMATE_ADDRESS_P
1867 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1868
1869 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1870 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1871
1872 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1873 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1874
1875 #undef TARGET_CAN_ELIMINATE
1876 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1877
1878 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1879 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1880
1881 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1882 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1883
1884 #undef TARGET_TRAMPOLINE_INIT
1885 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1886
1887 #undef TARGET_FUNCTION_VALUE
1888 #define TARGET_FUNCTION_VALUE rs6000_function_value
1889
1890 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1891 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1892
1893 #undef TARGET_OPTION_SAVE
1894 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1895
1896 #undef TARGET_OPTION_RESTORE
1897 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1898
1899 #undef TARGET_OPTION_PRINT
1900 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1901
1902 #undef TARGET_CAN_INLINE_P
1903 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1904
1905 #undef TARGET_SET_CURRENT_FUNCTION
1906 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1907
1908 #undef TARGET_LEGITIMATE_CONSTANT_P
1909 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1910
1911 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1912 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1913
1914 #undef TARGET_CAN_USE_DOLOOP_P
1915 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1916
1917 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1918 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1919
1920 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1921 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1922 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1923 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1924 #undef TARGET_UNWIND_WORD_MODE
1925 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1926
1927 #undef TARGET_OFFLOAD_OPTIONS
1928 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1929
1930 #undef TARGET_C_MODE_FOR_SUFFIX
1931 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1932
1933 #undef TARGET_INVALID_BINARY_OP
1934 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1935
1936 #undef TARGET_OPTAB_SUPPORTED_P
1937 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1938
1939 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1940 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1941
1942 #undef TARGET_COMPARE_VERSION_PRIORITY
1943 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1944
1945 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1946 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1947 rs6000_generate_version_dispatcher_body
1948
1949 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1950 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1951 rs6000_get_function_versions_dispatcher
1952
1953 #undef TARGET_OPTION_FUNCTION_VERSIONS
1954 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1955
1956 #undef TARGET_HARD_REGNO_NREGS
1957 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1958 #undef TARGET_HARD_REGNO_MODE_OK
1959 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1960
1961 #undef TARGET_MODES_TIEABLE_P
1962 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1963
1964 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1965 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1966 rs6000_hard_regno_call_part_clobbered
1967
1968 #undef TARGET_SLOW_UNALIGNED_ACCESS
1969 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1970
1971 #undef TARGET_CAN_CHANGE_MODE_CLASS
1972 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1973
1974 #undef TARGET_CONSTANT_ALIGNMENT
1975 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1976
1977 #undef TARGET_STARTING_FRAME_OFFSET
1978 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1979
1980 #if TARGET_ELF && RS6000_WEAK
1981 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1982 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1983 #endif
1984
1985 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1986 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1987
1988 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1989 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1990 \f
1991
1992 /* Processor table. */
1993 struct rs6000_ptt
1994 {
1995 const char *const name; /* Canonical processor name. */
1996 const enum processor_type processor; /* Processor type enum value. */
1997 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1998 };
1999
2000 static struct rs6000_ptt const processor_target_table[] =
2001 {
2002 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
2003 #include "rs6000-cpus.def"
2004 #undef RS6000_CPU
2005 };
2006
2007 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2008 name is invalid. */
2009
2010 static int
2011 rs6000_cpu_name_lookup (const char *name)
2012 {
2013 size_t i;
2014
2015 if (name != NULL)
2016 {
2017 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2018 if (! strcmp (name, processor_target_table[i].name))
2019 return (int)i;
2020 }
2021
2022 return -1;
2023 }
2024
2025 \f
2026 /* Return number of consecutive hard regs needed starting at reg REGNO
2027 to hold something of mode MODE.
2028 This is ordinarily the length in words of a value of mode MODE
2029 but can be less for certain modes in special long registers.
2030
2031 POWER and PowerPC GPRs hold 32 bits worth;
2032 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2033
2034 static int
2035 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2036 {
2037 unsigned HOST_WIDE_INT reg_size;
2038
2039 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2040 128-bit floating point that can go in vector registers, which has VSX
2041 memory addressing. */
2042 if (FP_REGNO_P (regno))
2043 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2044 ? UNITS_PER_VSX_WORD
2045 : UNITS_PER_FP_WORD);
2046
2047 else if (ALTIVEC_REGNO_P (regno))
2048 reg_size = UNITS_PER_ALTIVEC_WORD;
2049
2050 else
2051 reg_size = UNITS_PER_WORD;
2052
2053 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2054 }
2055
2056 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2057 MODE. */
2058 static int
2059 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2060 {
2061 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2062
2063 if (COMPLEX_MODE_P (mode))
2064 mode = GET_MODE_INNER (mode);
2065
2066 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2067 register combinations, and use PTImode where we need to deal with quad
2068 word memory operations. Don't allow quad words in the argument or frame
2069 pointer registers, just registers 0..31. */
2070 if (mode == PTImode)
2071 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2072 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2073 && ((regno & 1) == 0));
2074
2075 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2076 implementations. Don't allow an item to be split between a FP register
2077 and an Altivec register. Allow TImode in all VSX registers if the user
2078 asked for it. */
2079 if (TARGET_VSX && VSX_REGNO_P (regno)
2080 && (VECTOR_MEM_VSX_P (mode)
2081 || FLOAT128_VECTOR_P (mode)
2082 || reg_addr[mode].scalar_in_vmx_p
2083 || mode == TImode
2084 || (TARGET_VADDUQM && mode == V1TImode)))
2085 {
2086 if (FP_REGNO_P (regno))
2087 return FP_REGNO_P (last_regno);
2088
2089 if (ALTIVEC_REGNO_P (regno))
2090 {
2091 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2092 return 0;
2093
2094 return ALTIVEC_REGNO_P (last_regno);
2095 }
2096 }
2097
2098 /* The GPRs can hold any mode, but values bigger than one register
2099 cannot go past R31. */
2100 if (INT_REGNO_P (regno))
2101 return INT_REGNO_P (last_regno);
2102
2103 /* The float registers (except for VSX vector modes) can only hold floating
2104 modes and DImode. */
2105 if (FP_REGNO_P (regno))
2106 {
2107 if (FLOAT128_VECTOR_P (mode))
2108 return false;
2109
2110 if (SCALAR_FLOAT_MODE_P (mode)
2111 && (mode != TDmode || (regno % 2) == 0)
2112 && FP_REGNO_P (last_regno))
2113 return 1;
2114
2115 if (GET_MODE_CLASS (mode) == MODE_INT)
2116 {
2117 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2118 return 1;
2119
2120 if (TARGET_P8_VECTOR && (mode == SImode))
2121 return 1;
2122
2123 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2124 return 1;
2125 }
2126
2127 return 0;
2128 }
2129
2130 /* The CR register can only hold CC modes. */
2131 if (CR_REGNO_P (regno))
2132 return GET_MODE_CLASS (mode) == MODE_CC;
2133
2134 if (CA_REGNO_P (regno))
2135 return mode == Pmode || mode == SImode;
2136
2137 /* AltiVec only in AldyVec registers. */
2138 if (ALTIVEC_REGNO_P (regno))
2139 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2140 || mode == V1TImode);
2141
2142 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2143 and it must be able to fit within the register set. */
2144
2145 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2146 }
2147
2148 /* Implement TARGET_HARD_REGNO_NREGS. */
2149
2150 static unsigned int
2151 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2152 {
2153 return rs6000_hard_regno_nregs[mode][regno];
2154 }
2155
2156 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2157
2158 static bool
2159 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2160 {
2161 return rs6000_hard_regno_mode_ok_p[mode][regno];
2162 }
2163
2164 /* Implement TARGET_MODES_TIEABLE_P.
2165
2166 PTImode cannot tie with other modes because PTImode is restricted to even
2167 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2168 57744).
2169
2170 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2171 128-bit floating point on VSX systems ties with other vectors. */
2172
2173 static bool
2174 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2175 {
2176 if (mode1 == PTImode)
2177 return mode2 == PTImode;
2178 if (mode2 == PTImode)
2179 return false;
2180
2181 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2182 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2183 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2184 return false;
2185
2186 if (SCALAR_FLOAT_MODE_P (mode1))
2187 return SCALAR_FLOAT_MODE_P (mode2);
2188 if (SCALAR_FLOAT_MODE_P (mode2))
2189 return false;
2190
2191 if (GET_MODE_CLASS (mode1) == MODE_CC)
2192 return GET_MODE_CLASS (mode2) == MODE_CC;
2193 if (GET_MODE_CLASS (mode2) == MODE_CC)
2194 return false;
2195
2196 return true;
2197 }
2198
2199 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2200
2201 static bool
2202 rs6000_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
2203 unsigned int regno, machine_mode mode)
2204 {
2205 if (TARGET_32BIT
2206 && TARGET_POWERPC64
2207 && GET_MODE_SIZE (mode) > 4
2208 && INT_REGNO_P (regno))
2209 return true;
2210
2211 if (TARGET_VSX
2212 && FP_REGNO_P (regno)
2213 && GET_MODE_SIZE (mode) > 8
2214 && !FLOAT128_2REG_P (mode))
2215 return true;
2216
2217 return false;
2218 }
2219
2220 /* Print interesting facts about registers. */
2221 static void
2222 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2223 {
2224 int r, m;
2225
2226 for (r = first_regno; r <= last_regno; ++r)
2227 {
2228 const char *comma = "";
2229 int len;
2230
2231 if (first_regno == last_regno)
2232 fprintf (stderr, "%s:\t", reg_name);
2233 else
2234 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2235
2236 len = 8;
2237 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2238 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2239 {
2240 if (len > 70)
2241 {
2242 fprintf (stderr, ",\n\t");
2243 len = 8;
2244 comma = "";
2245 }
2246
2247 if (rs6000_hard_regno_nregs[m][r] > 1)
2248 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2249 rs6000_hard_regno_nregs[m][r]);
2250 else
2251 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2252
2253 comma = ", ";
2254 }
2255
2256 if (call_used_regs[r])
2257 {
2258 if (len > 70)
2259 {
2260 fprintf (stderr, ",\n\t");
2261 len = 8;
2262 comma = "";
2263 }
2264
2265 len += fprintf (stderr, "%s%s", comma, "call-used");
2266 comma = ", ";
2267 }
2268
2269 if (fixed_regs[r])
2270 {
2271 if (len > 70)
2272 {
2273 fprintf (stderr, ",\n\t");
2274 len = 8;
2275 comma = "";
2276 }
2277
2278 len += fprintf (stderr, "%s%s", comma, "fixed");
2279 comma = ", ";
2280 }
2281
2282 if (len > 70)
2283 {
2284 fprintf (stderr, ",\n\t");
2285 comma = "";
2286 }
2287
2288 len += fprintf (stderr, "%sreg-class = %s", comma,
2289 reg_class_names[(int)rs6000_regno_regclass[r]]);
2290 comma = ", ";
2291
2292 if (len > 70)
2293 {
2294 fprintf (stderr, ",\n\t");
2295 comma = "";
2296 }
2297
2298 fprintf (stderr, "%sregno = %d\n", comma, r);
2299 }
2300 }
2301
2302 static const char *
2303 rs6000_debug_vector_unit (enum rs6000_vector v)
2304 {
2305 const char *ret;
2306
2307 switch (v)
2308 {
2309 case VECTOR_NONE: ret = "none"; break;
2310 case VECTOR_ALTIVEC: ret = "altivec"; break;
2311 case VECTOR_VSX: ret = "vsx"; break;
2312 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2313 default: ret = "unknown"; break;
2314 }
2315
2316 return ret;
2317 }
2318
2319 /* Inner function printing just the address mask for a particular reload
2320 register class. */
2321 DEBUG_FUNCTION char *
2322 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2323 {
2324 static char ret[8];
2325 char *p = ret;
2326
2327 if ((mask & RELOAD_REG_VALID) != 0)
2328 *p++ = 'v';
2329 else if (keep_spaces)
2330 *p++ = ' ';
2331
2332 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2333 *p++ = 'm';
2334 else if (keep_spaces)
2335 *p++ = ' ';
2336
2337 if ((mask & RELOAD_REG_INDEXED) != 0)
2338 *p++ = 'i';
2339 else if (keep_spaces)
2340 *p++ = ' ';
2341
2342 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2343 *p++ = 'O';
2344 else if ((mask & RELOAD_REG_OFFSET) != 0)
2345 *p++ = 'o';
2346 else if (keep_spaces)
2347 *p++ = ' ';
2348
2349 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2350 *p++ = '+';
2351 else if (keep_spaces)
2352 *p++ = ' ';
2353
2354 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2355 *p++ = '+';
2356 else if (keep_spaces)
2357 *p++ = ' ';
2358
2359 if ((mask & RELOAD_REG_AND_M16) != 0)
2360 *p++ = '&';
2361 else if (keep_spaces)
2362 *p++ = ' ';
2363
2364 *p = '\0';
2365
2366 return ret;
2367 }
2368
2369 /* Print the address masks in a human readble fashion. */
2370 DEBUG_FUNCTION void
2371 rs6000_debug_print_mode (ssize_t m)
2372 {
2373 ssize_t rc;
2374 int spaces = 0;
2375
2376 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2377 for (rc = 0; rc < N_RELOAD_REG; rc++)
2378 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2379 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2380
2381 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2382 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2383 {
2384 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2385 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2386 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2387 spaces = 0;
2388 }
2389 else
2390 spaces += sizeof (" Reload=sl") - 1;
2391
2392 if (reg_addr[m].scalar_in_vmx_p)
2393 {
2394 fprintf (stderr, "%*s Upper=y", spaces, "");
2395 spaces = 0;
2396 }
2397 else
2398 spaces += sizeof (" Upper=y") - 1;
2399
2400 if (rs6000_vector_unit[m] != VECTOR_NONE
2401 || rs6000_vector_mem[m] != VECTOR_NONE)
2402 {
2403 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2404 spaces, "",
2405 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2406 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2407 }
2408
2409 fputs ("\n", stderr);
2410 }
2411
2412 #define DEBUG_FMT_ID "%-32s= "
2413 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2414 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2415 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2416
2417 /* Print various interesting information with -mdebug=reg. */
2418 static void
2419 rs6000_debug_reg_global (void)
2420 {
2421 static const char *const tf[2] = { "false", "true" };
2422 const char *nl = (const char *)0;
2423 int m;
2424 size_t m1, m2, v;
2425 char costly_num[20];
2426 char nop_num[20];
2427 char flags_buffer[40];
2428 const char *costly_str;
2429 const char *nop_str;
2430 const char *trace_str;
2431 const char *abi_str;
2432 const char *cmodel_str;
2433 struct cl_target_option cl_opts;
2434
2435 /* Modes we want tieable information on. */
2436 static const machine_mode print_tieable_modes[] = {
2437 QImode,
2438 HImode,
2439 SImode,
2440 DImode,
2441 TImode,
2442 PTImode,
2443 SFmode,
2444 DFmode,
2445 TFmode,
2446 IFmode,
2447 KFmode,
2448 SDmode,
2449 DDmode,
2450 TDmode,
2451 V16QImode,
2452 V8HImode,
2453 V4SImode,
2454 V2DImode,
2455 V1TImode,
2456 V32QImode,
2457 V16HImode,
2458 V8SImode,
2459 V4DImode,
2460 V2TImode,
2461 V4SFmode,
2462 V2DFmode,
2463 V8SFmode,
2464 V4DFmode,
2465 CCmode,
2466 CCUNSmode,
2467 CCEQmode,
2468 };
2469
2470 /* Virtual regs we are interested in. */
2471 const static struct {
2472 int regno; /* register number. */
2473 const char *name; /* register name. */
2474 } virtual_regs[] = {
2475 { STACK_POINTER_REGNUM, "stack pointer:" },
2476 { TOC_REGNUM, "toc: " },
2477 { STATIC_CHAIN_REGNUM, "static chain: " },
2478 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2479 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2480 { ARG_POINTER_REGNUM, "arg pointer: " },
2481 { FRAME_POINTER_REGNUM, "frame pointer:" },
2482 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2483 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2484 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2485 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2486 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2487 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2488 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2489 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2490 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2491 };
2492
2493 fputs ("\nHard register information:\n", stderr);
2494 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2495 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2496 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2497 LAST_ALTIVEC_REGNO,
2498 "vs");
2499 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2500 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2501 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2502 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2503 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2504 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2505
2506 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2507 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2508 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2509
2510 fprintf (stderr,
2511 "\n"
2512 "d reg_class = %s\n"
2513 "f reg_class = %s\n"
2514 "v reg_class = %s\n"
2515 "wa reg_class = %s\n"
2516 "wb reg_class = %s\n"
2517 "wd reg_class = %s\n"
2518 "we reg_class = %s\n"
2519 "wf reg_class = %s\n"
2520 "wg reg_class = %s\n"
2521 "wh reg_class = %s\n"
2522 "wi reg_class = %s\n"
2523 "wj reg_class = %s\n"
2524 "wk reg_class = %s\n"
2525 "wl reg_class = %s\n"
2526 "wm reg_class = %s\n"
2527 "wo reg_class = %s\n"
2528 "wp reg_class = %s\n"
2529 "wq reg_class = %s\n"
2530 "wr reg_class = %s\n"
2531 "ws reg_class = %s\n"
2532 "wt reg_class = %s\n"
2533 "wu reg_class = %s\n"
2534 "wv reg_class = %s\n"
2535 "ww reg_class = %s\n"
2536 "wx reg_class = %s\n"
2537 "wy reg_class = %s\n"
2538 "wz reg_class = %s\n"
2539 "wA reg_class = %s\n"
2540 "wH reg_class = %s\n"
2541 "wI reg_class = %s\n"
2542 "wJ reg_class = %s\n"
2543 "wK reg_class = %s\n"
2544 "\n",
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2560 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2561 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2562 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2563 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2564 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2565 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2566 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2567 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2568 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2569 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2570 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2571 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2572 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2573 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2574 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2575 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2576 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2577
2578 nl = "\n";
2579 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2580 rs6000_debug_print_mode (m);
2581
2582 fputs ("\n", stderr);
2583
2584 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2585 {
2586 machine_mode mode1 = print_tieable_modes[m1];
2587 bool first_time = true;
2588
2589 nl = (const char *)0;
2590 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2591 {
2592 machine_mode mode2 = print_tieable_modes[m2];
2593 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2594 {
2595 if (first_time)
2596 {
2597 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2598 nl = "\n";
2599 first_time = false;
2600 }
2601
2602 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2603 }
2604 }
2605
2606 if (!first_time)
2607 fputs ("\n", stderr);
2608 }
2609
2610 if (nl)
2611 fputs (nl, stderr);
2612
2613 if (rs6000_recip_control)
2614 {
2615 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2616
2617 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2618 if (rs6000_recip_bits[m])
2619 {
2620 fprintf (stderr,
2621 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2622 GET_MODE_NAME (m),
2623 (RS6000_RECIP_AUTO_RE_P (m)
2624 ? "auto"
2625 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2626 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2627 ? "auto"
2628 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2629 }
2630
2631 fputs ("\n", stderr);
2632 }
2633
2634 if (rs6000_cpu_index >= 0)
2635 {
2636 const char *name = processor_target_table[rs6000_cpu_index].name;
2637 HOST_WIDE_INT flags
2638 = processor_target_table[rs6000_cpu_index].target_enable;
2639
2640 sprintf (flags_buffer, "-mcpu=%s flags", name);
2641 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2642 }
2643 else
2644 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2645
2646 if (rs6000_tune_index >= 0)
2647 {
2648 const char *name = processor_target_table[rs6000_tune_index].name;
2649 HOST_WIDE_INT flags
2650 = processor_target_table[rs6000_tune_index].target_enable;
2651
2652 sprintf (flags_buffer, "-mtune=%s flags", name);
2653 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2654 }
2655 else
2656 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2657
2658 cl_target_option_save (&cl_opts, &global_options);
2659 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2660 rs6000_isa_flags);
2661
2662 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2663 rs6000_isa_flags_explicit);
2664
2665 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2666 rs6000_builtin_mask);
2667
2668 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2669
2670 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2671 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2672
2673 switch (rs6000_sched_costly_dep)
2674 {
2675 case max_dep_latency:
2676 costly_str = "max_dep_latency";
2677 break;
2678
2679 case no_dep_costly:
2680 costly_str = "no_dep_costly";
2681 break;
2682
2683 case all_deps_costly:
2684 costly_str = "all_deps_costly";
2685 break;
2686
2687 case true_store_to_load_dep_costly:
2688 costly_str = "true_store_to_load_dep_costly";
2689 break;
2690
2691 case store_to_load_dep_costly:
2692 costly_str = "store_to_load_dep_costly";
2693 break;
2694
2695 default:
2696 costly_str = costly_num;
2697 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2698 break;
2699 }
2700
2701 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2702
2703 switch (rs6000_sched_insert_nops)
2704 {
2705 case sched_finish_regroup_exact:
2706 nop_str = "sched_finish_regroup_exact";
2707 break;
2708
2709 case sched_finish_pad_groups:
2710 nop_str = "sched_finish_pad_groups";
2711 break;
2712
2713 case sched_finish_none:
2714 nop_str = "sched_finish_none";
2715 break;
2716
2717 default:
2718 nop_str = nop_num;
2719 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2720 break;
2721 }
2722
2723 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2724
2725 switch (rs6000_sdata)
2726 {
2727 default:
2728 case SDATA_NONE:
2729 break;
2730
2731 case SDATA_DATA:
2732 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2733 break;
2734
2735 case SDATA_SYSV:
2736 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2737 break;
2738
2739 case SDATA_EABI:
2740 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2741 break;
2742
2743 }
2744
2745 switch (rs6000_traceback)
2746 {
2747 case traceback_default: trace_str = "default"; break;
2748 case traceback_none: trace_str = "none"; break;
2749 case traceback_part: trace_str = "part"; break;
2750 case traceback_full: trace_str = "full"; break;
2751 default: trace_str = "unknown"; break;
2752 }
2753
2754 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2755
2756 switch (rs6000_current_cmodel)
2757 {
2758 case CMODEL_SMALL: cmodel_str = "small"; break;
2759 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2760 case CMODEL_LARGE: cmodel_str = "large"; break;
2761 default: cmodel_str = "unknown"; break;
2762 }
2763
2764 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2765
2766 switch (rs6000_current_abi)
2767 {
2768 case ABI_NONE: abi_str = "none"; break;
2769 case ABI_AIX: abi_str = "aix"; break;
2770 case ABI_ELFv2: abi_str = "ELFv2"; break;
2771 case ABI_V4: abi_str = "V4"; break;
2772 case ABI_DARWIN: abi_str = "darwin"; break;
2773 default: abi_str = "unknown"; break;
2774 }
2775
2776 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2777
2778 if (rs6000_altivec_abi)
2779 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2780
2781 if (rs6000_darwin64_abi)
2782 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2783
2784 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2785 (TARGET_SOFT_FLOAT ? "true" : "false"));
2786
2787 if (TARGET_LINK_STACK)
2788 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2789
2790 if (TARGET_P8_FUSION)
2791 {
2792 char options[80];
2793
2794 strcpy (options, "power8");
2795 if (TARGET_P8_FUSION_SIGN)
2796 strcat (options, ", sign");
2797
2798 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2799 }
2800
2801 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2802 TARGET_SECURE_PLT ? "secure" : "bss");
2803 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2804 aix_struct_return ? "aix" : "sysv");
2805 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2806 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2807 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2808 tf[!!rs6000_align_branch_targets]);
2809 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2810 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2811 rs6000_long_double_type_size);
2812 if (rs6000_long_double_type_size > 64)
2813 {
2814 fprintf (stderr, DEBUG_FMT_S, "long double type",
2815 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2816 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2817 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2818 }
2819 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2820 (int)rs6000_sched_restricted_insns_priority);
2821 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2822 (int)END_BUILTINS);
2823 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2824 (int)RS6000_BUILTIN_COUNT);
2825
2826 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2827 (int)TARGET_FLOAT128_ENABLE_TYPE);
2828
2829 if (TARGET_VSX)
2830 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2831 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2832
2833 if (TARGET_DIRECT_MOVE_128)
2834 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2835 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2836 }
2837
2838 \f
2839 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2840 legitimate address support to figure out the appropriate addressing to
2841 use. */
2842
2843 static void
2844 rs6000_setup_reg_addr_masks (void)
2845 {
2846 ssize_t rc, reg, m, nregs;
2847 addr_mask_type any_addr_mask, addr_mask;
2848
2849 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2850 {
2851 machine_mode m2 = (machine_mode) m;
2852 bool complex_p = false;
2853 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2854 size_t msize;
2855
2856 if (COMPLEX_MODE_P (m2))
2857 {
2858 complex_p = true;
2859 m2 = GET_MODE_INNER (m2);
2860 }
2861
2862 msize = GET_MODE_SIZE (m2);
2863
2864 /* SDmode is special in that we want to access it only via REG+REG
2865 addressing on power7 and above, since we want to use the LFIWZX and
2866 STFIWZX instructions to load it. */
2867 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2868
2869 any_addr_mask = 0;
2870 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2871 {
2872 addr_mask = 0;
2873 reg = reload_reg_map[rc].reg;
2874
2875 /* Can mode values go in the GPR/FPR/Altivec registers? */
2876 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2877 {
2878 bool small_int_vsx_p = (small_int_p
2879 && (rc == RELOAD_REG_FPR
2880 || rc == RELOAD_REG_VMX));
2881
2882 nregs = rs6000_hard_regno_nregs[m][reg];
2883 addr_mask |= RELOAD_REG_VALID;
2884
2885 /* Indicate if the mode takes more than 1 physical register. If
2886 it takes a single register, indicate it can do REG+REG
2887 addressing. Small integers in VSX registers can only do
2888 REG+REG addressing. */
2889 if (small_int_vsx_p)
2890 addr_mask |= RELOAD_REG_INDEXED;
2891 else if (nregs > 1 || m == BLKmode || complex_p)
2892 addr_mask |= RELOAD_REG_MULTIPLE;
2893 else
2894 addr_mask |= RELOAD_REG_INDEXED;
2895
2896 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2897 addressing. If we allow scalars into Altivec registers,
2898 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2899
2900 For VSX systems, we don't allow update addressing for
2901 DFmode/SFmode if those registers can go in both the
2902 traditional floating point registers and Altivec registers.
2903 The load/store instructions for the Altivec registers do not
2904 have update forms. If we allowed update addressing, it seems
2905 to break IV-OPT code using floating point if the index type is
2906 int instead of long (PR target/81550 and target/84042). */
2907
2908 if (TARGET_UPDATE
2909 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2910 && msize <= 8
2911 && !VECTOR_MODE_P (m2)
2912 && !FLOAT128_VECTOR_P (m2)
2913 && !complex_p
2914 && (m != E_DFmode || !TARGET_VSX)
2915 && (m != E_SFmode || !TARGET_P8_VECTOR)
2916 && !small_int_vsx_p)
2917 {
2918 addr_mask |= RELOAD_REG_PRE_INCDEC;
2919
2920 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2921 we don't allow PRE_MODIFY for some multi-register
2922 operations. */
2923 switch (m)
2924 {
2925 default:
2926 addr_mask |= RELOAD_REG_PRE_MODIFY;
2927 break;
2928
2929 case E_DImode:
2930 if (TARGET_POWERPC64)
2931 addr_mask |= RELOAD_REG_PRE_MODIFY;
2932 break;
2933
2934 case E_DFmode:
2935 case E_DDmode:
2936 if (TARGET_HARD_FLOAT)
2937 addr_mask |= RELOAD_REG_PRE_MODIFY;
2938 break;
2939 }
2940 }
2941 }
2942
2943 /* GPR and FPR registers can do REG+OFFSET addressing, except
2944 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2945 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2946 if ((addr_mask != 0) && !indexed_only_p
2947 && msize <= 8
2948 && (rc == RELOAD_REG_GPR
2949 || ((msize == 8 || m2 == SFmode)
2950 && (rc == RELOAD_REG_FPR
2951 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2952 addr_mask |= RELOAD_REG_OFFSET;
2953
2954 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2955 instructions are enabled. The offset for 128-bit VSX registers is
2956 only 12-bits. While GPRs can handle the full offset range, VSX
2957 registers can only handle the restricted range. */
2958 else if ((addr_mask != 0) && !indexed_only_p
2959 && msize == 16 && TARGET_P9_VECTOR
2960 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2961 || (m2 == TImode && TARGET_VSX)))
2962 {
2963 addr_mask |= RELOAD_REG_OFFSET;
2964 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2965 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2966 }
2967
2968 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2969 addressing on 128-bit types. */
2970 if (rc == RELOAD_REG_VMX && msize == 16
2971 && (addr_mask & RELOAD_REG_VALID) != 0)
2972 addr_mask |= RELOAD_REG_AND_M16;
2973
2974 reg_addr[m].addr_mask[rc] = addr_mask;
2975 any_addr_mask |= addr_mask;
2976 }
2977
2978 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2979 }
2980 }
2981
2982 \f
2983 /* Initialize the various global tables that are based on register size. */
2984 static void
2985 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2986 {
2987 ssize_t r, m, c;
2988 int align64;
2989 int align32;
2990
2991 /* Precalculate REGNO_REG_CLASS. */
2992 rs6000_regno_regclass[0] = GENERAL_REGS;
2993 for (r = 1; r < 32; ++r)
2994 rs6000_regno_regclass[r] = BASE_REGS;
2995
2996 for (r = 32; r < 64; ++r)
2997 rs6000_regno_regclass[r] = FLOAT_REGS;
2998
2999 for (r = 64; HARD_REGISTER_NUM_P (r); ++r)
3000 rs6000_regno_regclass[r] = NO_REGS;
3001
3002 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3003 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3004
3005 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3006 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3007 rs6000_regno_regclass[r] = CR_REGS;
3008
3009 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3010 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3011 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3012 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3013 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3014 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3015 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3016 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3017 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3018 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3019
3020 /* Precalculate register class to simpler reload register class. We don't
3021 need all of the register classes that are combinations of different
3022 classes, just the simple ones that have constraint letters. */
3023 for (c = 0; c < N_REG_CLASSES; c++)
3024 reg_class_to_reg_type[c] = NO_REG_TYPE;
3025
3026 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3027 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3028 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3029 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3030 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3031 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3032 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3033 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3034 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3035 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3036
3037 if (TARGET_VSX)
3038 {
3039 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3040 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3041 }
3042 else
3043 {
3044 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3045 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3046 }
3047
3048 /* Precalculate the valid memory formats as well as the vector information,
3049 this must be set up before the rs6000_hard_regno_nregs_internal calls
3050 below. */
3051 gcc_assert ((int)VECTOR_NONE == 0);
3052 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3053 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3054
3055 gcc_assert ((int)CODE_FOR_nothing == 0);
3056 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3057
3058 gcc_assert ((int)NO_REGS == 0);
3059 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3060
3061 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3062 believes it can use native alignment or still uses 128-bit alignment. */
3063 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3064 {
3065 align64 = 64;
3066 align32 = 32;
3067 }
3068 else
3069 {
3070 align64 = 128;
3071 align32 = 128;
3072 }
3073
3074 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3075 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3076 if (TARGET_FLOAT128_TYPE)
3077 {
3078 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3079 rs6000_vector_align[KFmode] = 128;
3080
3081 if (FLOAT128_IEEE_P (TFmode))
3082 {
3083 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3084 rs6000_vector_align[TFmode] = 128;
3085 }
3086 }
3087
3088 /* V2DF mode, VSX only. */
3089 if (TARGET_VSX)
3090 {
3091 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3092 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3093 rs6000_vector_align[V2DFmode] = align64;
3094 }
3095
3096 /* V4SF mode, either VSX or Altivec. */
3097 if (TARGET_VSX)
3098 {
3099 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3100 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3101 rs6000_vector_align[V4SFmode] = align32;
3102 }
3103 else if (TARGET_ALTIVEC)
3104 {
3105 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3106 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3107 rs6000_vector_align[V4SFmode] = align32;
3108 }
3109
3110 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3111 and stores. */
3112 if (TARGET_ALTIVEC)
3113 {
3114 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3115 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3116 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3117 rs6000_vector_align[V4SImode] = align32;
3118 rs6000_vector_align[V8HImode] = align32;
3119 rs6000_vector_align[V16QImode] = align32;
3120
3121 if (TARGET_VSX)
3122 {
3123 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3124 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3125 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3126 }
3127 else
3128 {
3129 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3130 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3131 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3132 }
3133 }
3134
3135 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3136 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3137 if (TARGET_VSX)
3138 {
3139 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3140 rs6000_vector_unit[V2DImode]
3141 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3142 rs6000_vector_align[V2DImode] = align64;
3143
3144 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3145 rs6000_vector_unit[V1TImode]
3146 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3147 rs6000_vector_align[V1TImode] = 128;
3148 }
3149
3150 /* DFmode, see if we want to use the VSX unit. Memory is handled
3151 differently, so don't set rs6000_vector_mem. */
3152 if (TARGET_VSX)
3153 {
3154 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3155 rs6000_vector_align[DFmode] = 64;
3156 }
3157
3158 /* SFmode, see if we want to use the VSX unit. */
3159 if (TARGET_P8_VECTOR)
3160 {
3161 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3162 rs6000_vector_align[SFmode] = 32;
3163 }
3164
3165 /* Allow TImode in VSX register and set the VSX memory macros. */
3166 if (TARGET_VSX)
3167 {
3168 rs6000_vector_mem[TImode] = VECTOR_VSX;
3169 rs6000_vector_align[TImode] = align64;
3170 }
3171
3172 /* Register class constraints for the constraints that depend on compile
3173 switches. When the VSX code was added, different constraints were added
3174 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3175 of the VSX registers are used. The register classes for scalar floating
3176 point types is set, based on whether we allow that type into the upper
3177 (Altivec) registers. GCC has register classes to target the Altivec
3178 registers for load/store operations, to select using a VSX memory
3179 operation instead of the traditional floating point operation. The
3180 constraints are:
3181
3182 d - Register class to use with traditional DFmode instructions.
3183 f - Register class to use with traditional SFmode instructions.
3184 v - Altivec register.
3185 wa - Any VSX register.
3186 wc - Reserved to represent individual CR bits (used in LLVM).
3187 wd - Preferred register class for V2DFmode.
3188 wf - Preferred register class for V4SFmode.
3189 wg - Float register for power6x move insns.
3190 wh - FP register for direct move instructions.
3191 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3192 wj - FP or VSX register to hold 64-bit integers for direct moves.
3193 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3194 wl - Float register if we can do 32-bit signed int loads.
3195 wm - VSX register for ISA 2.07 direct move operations.
3196 wn - always NO_REGS.
3197 wr - GPR if 64-bit mode is permitted.
3198 ws - Register class to do ISA 2.06 DF operations.
3199 wt - VSX register for TImode in VSX registers.
3200 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3201 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3202 ww - Register class to do SF conversions in with VSX operations.
3203 wx - Float register if we can do 32-bit int stores.
3204 wy - Register class to do ISA 2.07 SF operations.
3205 wz - Float register if we can do 32-bit unsigned int loads.
3206 wH - Altivec register if SImode is allowed in VSX registers.
3207 wI - VSX register if SImode is allowed in VSX registers.
3208 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3209 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3210
3211 if (TARGET_HARD_FLOAT)
3212 {
3213 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3214 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3215 }
3216
3217 if (TARGET_VSX)
3218 {
3219 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3220 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3221 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3222 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3223 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3224 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3225 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3226 }
3227
3228 /* Add conditional constraints based on various options, to allow us to
3229 collapse multiple insn patterns. */
3230 if (TARGET_ALTIVEC)
3231 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3232
3233 if (TARGET_MFPGPR) /* DFmode */
3234 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3235
3236 if (TARGET_LFIWAX)
3237 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3238
3239 if (TARGET_DIRECT_MOVE)
3240 {
3241 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3242 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3243 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3244 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3245 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3246 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3247 }
3248
3249 if (TARGET_POWERPC64)
3250 {
3251 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3252 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3253 }
3254
3255 if (TARGET_P8_VECTOR) /* SFmode */
3256 {
3257 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3258 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3259 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3260 }
3261 else if (TARGET_VSX)
3262 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3263
3264 if (TARGET_STFIWX)
3265 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3266
3267 if (TARGET_LFIWZX)
3268 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3269
3270 if (TARGET_FLOAT128_TYPE)
3271 {
3272 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3273 if (FLOAT128_IEEE_P (TFmode))
3274 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3275 }
3276
3277 if (TARGET_P9_VECTOR)
3278 {
3279 /* Support for new D-form instructions. */
3280 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3281
3282 /* Support for ISA 3.0 (power9) vectors. */
3283 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3284 }
3285
3286 /* Support for new direct moves (ISA 3.0 + 64bit). */
3287 if (TARGET_DIRECT_MOVE_128)
3288 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3289
3290 /* Support small integers in VSX registers. */
3291 if (TARGET_P8_VECTOR)
3292 {
3293 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3294 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3295 if (TARGET_P9_VECTOR)
3296 {
3297 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3298 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3299 }
3300 }
3301
3302 /* Set up the reload helper and direct move functions. */
3303 if (TARGET_VSX || TARGET_ALTIVEC)
3304 {
3305 if (TARGET_64BIT)
3306 {
3307 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3308 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3309 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3310 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3311 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3312 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3313 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3314 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3315 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3316 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3317 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3318 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3319 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3320 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3321 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3322 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3323 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3324 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3325 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3326 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3327
3328 if (FLOAT128_VECTOR_P (KFmode))
3329 {
3330 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3331 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3332 }
3333
3334 if (FLOAT128_VECTOR_P (TFmode))
3335 {
3336 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3337 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3338 }
3339
3340 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3341 available. */
3342 if (TARGET_NO_SDMODE_STACK)
3343 {
3344 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3345 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3346 }
3347
3348 if (TARGET_VSX)
3349 {
3350 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3351 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3352 }
3353
3354 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3355 {
3356 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3357 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3358 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3359 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3360 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3361 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3362 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3363 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3364 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3365
3366 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3367 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3368 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3369 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3370 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3371 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3372 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3373 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3374 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3375
3376 if (FLOAT128_VECTOR_P (KFmode))
3377 {
3378 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3379 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3380 }
3381
3382 if (FLOAT128_VECTOR_P (TFmode))
3383 {
3384 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3385 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3386 }
3387 }
3388 }
3389 else
3390 {
3391 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3392 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3393 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3394 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3395 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3396 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3397 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3398 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3399 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3400 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3401 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3402 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3403 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3404 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3405 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3406 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3407 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3408 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3409 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3410 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3411
3412 if (FLOAT128_VECTOR_P (KFmode))
3413 {
3414 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3415 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3416 }
3417
3418 if (FLOAT128_IEEE_P (TFmode))
3419 {
3420 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3421 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3422 }
3423
3424 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3425 available. */
3426 if (TARGET_NO_SDMODE_STACK)
3427 {
3428 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3429 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3430 }
3431
3432 if (TARGET_VSX)
3433 {
3434 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3435 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3436 }
3437
3438 if (TARGET_DIRECT_MOVE)
3439 {
3440 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3441 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3442 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3443 }
3444 }
3445
3446 reg_addr[DFmode].scalar_in_vmx_p = true;
3447 reg_addr[DImode].scalar_in_vmx_p = true;
3448
3449 if (TARGET_P8_VECTOR)
3450 {
3451 reg_addr[SFmode].scalar_in_vmx_p = true;
3452 reg_addr[SImode].scalar_in_vmx_p = true;
3453
3454 if (TARGET_P9_VECTOR)
3455 {
3456 reg_addr[HImode].scalar_in_vmx_p = true;
3457 reg_addr[QImode].scalar_in_vmx_p = true;
3458 }
3459 }
3460 }
3461
3462 /* Precalculate HARD_REGNO_NREGS. */
3463 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3464 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3465 rs6000_hard_regno_nregs[m][r]
3466 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3467
3468 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3469 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3470 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3471 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3472 rs6000_hard_regno_mode_ok_p[m][r] = true;
3473
3474 /* Precalculate CLASS_MAX_NREGS sizes. */
3475 for (c = 0; c < LIM_REG_CLASSES; ++c)
3476 {
3477 int reg_size;
3478
3479 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3480 reg_size = UNITS_PER_VSX_WORD;
3481
3482 else if (c == ALTIVEC_REGS)
3483 reg_size = UNITS_PER_ALTIVEC_WORD;
3484
3485 else if (c == FLOAT_REGS)
3486 reg_size = UNITS_PER_FP_WORD;
3487
3488 else
3489 reg_size = UNITS_PER_WORD;
3490
3491 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3492 {
3493 machine_mode m2 = (machine_mode)m;
3494 int reg_size2 = reg_size;
3495
3496 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3497 in VSX. */
3498 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3499 reg_size2 = UNITS_PER_FP_WORD;
3500
3501 rs6000_class_max_nregs[m][c]
3502 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3503 }
3504 }
3505
3506 /* Calculate which modes to automatically generate code to use a the
3507 reciprocal divide and square root instructions. In the future, possibly
3508 automatically generate the instructions even if the user did not specify
3509 -mrecip. The older machines double precision reciprocal sqrt estimate is
3510 not accurate enough. */
3511 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3512 if (TARGET_FRES)
3513 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3514 if (TARGET_FRE)
3515 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3516 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3517 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3518 if (VECTOR_UNIT_VSX_P (V2DFmode))
3519 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3520
3521 if (TARGET_FRSQRTES)
3522 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3523 if (TARGET_FRSQRTE)
3524 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3525 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3526 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3527 if (VECTOR_UNIT_VSX_P (V2DFmode))
3528 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3529
3530 if (rs6000_recip_control)
3531 {
3532 if (!flag_finite_math_only)
3533 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3534 "-ffast-math");
3535 if (flag_trapping_math)
3536 warning (0, "%qs requires %qs or %qs", "-mrecip",
3537 "-fno-trapping-math", "-ffast-math");
3538 if (!flag_reciprocal_math)
3539 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3540 "-ffast-math");
3541 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3542 {
3543 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3544 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3545 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3546
3547 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3548 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3549 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3550
3551 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3552 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3553 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3554
3555 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3556 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3557 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3558
3559 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3560 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3561 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3562
3563 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3564 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3565 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3566
3567 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3568 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3569 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3570
3571 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3572 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3573 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3574 }
3575 }
3576
3577 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3578 legitimate address support to figure out the appropriate addressing to
3579 use. */
3580 rs6000_setup_reg_addr_masks ();
3581
3582 if (global_init_p || TARGET_DEBUG_TARGET)
3583 {
3584 if (TARGET_DEBUG_REG)
3585 rs6000_debug_reg_global ();
3586
3587 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3588 fprintf (stderr,
3589 "SImode variable mult cost = %d\n"
3590 "SImode constant mult cost = %d\n"
3591 "SImode short constant mult cost = %d\n"
3592 "DImode multipliciation cost = %d\n"
3593 "SImode division cost = %d\n"
3594 "DImode division cost = %d\n"
3595 "Simple fp operation cost = %d\n"
3596 "DFmode multiplication cost = %d\n"
3597 "SFmode division cost = %d\n"
3598 "DFmode division cost = %d\n"
3599 "cache line size = %d\n"
3600 "l1 cache size = %d\n"
3601 "l2 cache size = %d\n"
3602 "simultaneous prefetches = %d\n"
3603 "\n",
3604 rs6000_cost->mulsi,
3605 rs6000_cost->mulsi_const,
3606 rs6000_cost->mulsi_const9,
3607 rs6000_cost->muldi,
3608 rs6000_cost->divsi,
3609 rs6000_cost->divdi,
3610 rs6000_cost->fp,
3611 rs6000_cost->dmul,
3612 rs6000_cost->sdiv,
3613 rs6000_cost->ddiv,
3614 rs6000_cost->cache_line_size,
3615 rs6000_cost->l1_cache_size,
3616 rs6000_cost->l2_cache_size,
3617 rs6000_cost->simultaneous_prefetches);
3618 }
3619 }
3620
3621 #if TARGET_MACHO
3622 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3623
3624 static void
3625 darwin_rs6000_override_options (void)
3626 {
3627 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3628 off. */
3629 rs6000_altivec_abi = 1;
3630 TARGET_ALTIVEC_VRSAVE = 1;
3631 rs6000_current_abi = ABI_DARWIN;
3632
3633 if (DEFAULT_ABI == ABI_DARWIN
3634 && TARGET_64BIT)
3635 darwin_one_byte_bool = 1;
3636
3637 if (TARGET_64BIT && ! TARGET_POWERPC64)
3638 {
3639 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3640 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3641 }
3642 if (flag_mkernel)
3643 {
3644 rs6000_default_long_calls = 1;
3645 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3646 }
3647
3648 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3649 Altivec. */
3650 if (!flag_mkernel && !flag_apple_kext
3651 && TARGET_64BIT
3652 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3653 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3654
3655 /* Unless the user (not the configurer) has explicitly overridden
3656 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3657 G4 unless targeting the kernel. */
3658 if (!flag_mkernel
3659 && !flag_apple_kext
3660 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3661 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3662 && ! global_options_set.x_rs6000_cpu_index)
3663 {
3664 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3665 }
3666 }
3667 #endif
3668
3669 /* If not otherwise specified by a target, make 'long double' equivalent to
3670 'double'. */
3671
3672 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3673 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3674 #endif
3675
3676 /* Return the builtin mask of the various options used that could affect which
3677 builtins were used. In the past we used target_flags, but we've run out of
3678 bits, and some options are no longer in target_flags. */
3679
3680 HOST_WIDE_INT
3681 rs6000_builtin_mask_calculate (void)
3682 {
3683 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3684 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3685 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3686 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3687 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3688 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3689 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3690 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3691 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3692 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3693 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3694 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3695 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3696 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3697 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3698 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3699 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3700 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3701 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3702 | ((TARGET_LONG_DOUBLE_128
3703 && TARGET_HARD_FLOAT
3704 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3705 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3706 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3707 }
3708
3709 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3710 to clobber the XER[CA] bit because clobbering that bit without telling
3711 the compiler worked just fine with versions of GCC before GCC 5, and
3712 breaking a lot of older code in ways that are hard to track down is
3713 not such a great idea. */
3714
3715 static rtx_insn *
3716 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3717 vec<const char *> &/*constraints*/,
3718 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3719 {
3720 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3721 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3722 return NULL;
3723 }
3724
3725 /* Override command line options.
3726
3727 Combine build-specific configuration information with options
3728 specified on the command line to set various state variables which
3729 influence code generation, optimization, and expansion of built-in
3730 functions. Assure that command-line configuration preferences are
3731 compatible with each other and with the build configuration; issue
3732 warnings while adjusting configuration or error messages while
3733 rejecting configuration.
3734
3735 Upon entry to this function:
3736
3737 This function is called once at the beginning of
3738 compilation, and then again at the start and end of compiling
3739 each section of code that has a different configuration, as
3740 indicated, for example, by adding the
3741
3742 __attribute__((__target__("cpu=power9")))
3743
3744 qualifier to a function definition or, for example, by bracketing
3745 code between
3746
3747 #pragma GCC target("altivec")
3748
3749 and
3750
3751 #pragma GCC reset_options
3752
3753 directives. Parameter global_init_p is true for the initial
3754 invocation, which initializes global variables, and false for all
3755 subsequent invocations.
3756
3757
3758 Various global state information is assumed to be valid. This
3759 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3760 default CPU specified at build configure time, TARGET_DEFAULT,
3761 representing the default set of option flags for the default
3762 target, and global_options_set.x_rs6000_isa_flags, representing
3763 which options were requested on the command line.
3764
3765 Upon return from this function:
3766
3767 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3768 was set by name on the command line. Additionally, if certain
3769 attributes are automatically enabled or disabled by this function
3770 in order to assure compatibility between options and
3771 configuration, the flags associated with those attributes are
3772 also set. By setting these "explicit bits", we avoid the risk
3773 that other code might accidentally overwrite these particular
3774 attributes with "default values".
3775
3776 The various bits of rs6000_isa_flags are set to indicate the
3777 target options that have been selected for the most current
3778 compilation efforts. This has the effect of also turning on the
3779 associated TARGET_XXX values since these are macros which are
3780 generally defined to test the corresponding bit of the
3781 rs6000_isa_flags variable.
3782
3783 The variable rs6000_builtin_mask is set to represent the target
3784 options for the most current compilation efforts, consistent with
3785 the current contents of rs6000_isa_flags. This variable controls
3786 expansion of built-in functions.
3787
3788 Various other global variables and fields of global structures
3789 (over 50 in all) are initialized to reflect the desired options
3790 for the most current compilation efforts. */
3791
3792 static bool
3793 rs6000_option_override_internal (bool global_init_p)
3794 {
3795 bool ret = true;
3796
3797 HOST_WIDE_INT set_masks;
3798 HOST_WIDE_INT ignore_masks;
3799 int cpu_index = -1;
3800 int tune_index;
3801 struct cl_target_option *main_target_opt
3802 = ((global_init_p || target_option_default_node == NULL)
3803 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3804
3805 /* Print defaults. */
3806 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3807 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3808
3809 /* Remember the explicit arguments. */
3810 if (global_init_p)
3811 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3812
3813 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3814 library functions, so warn about it. The flag may be useful for
3815 performance studies from time to time though, so don't disable it
3816 entirely. */
3817 if (global_options_set.x_rs6000_alignment_flags
3818 && rs6000_alignment_flags == MASK_ALIGN_POWER
3819 && DEFAULT_ABI == ABI_DARWIN
3820 && TARGET_64BIT)
3821 warning (0, "%qs is not supported for 64-bit Darwin;"
3822 " it is incompatible with the installed C and C++ libraries",
3823 "-malign-power");
3824
3825 /* Numerous experiment shows that IRA based loop pressure
3826 calculation works better for RTL loop invariant motion on targets
3827 with enough (>= 32) registers. It is an expensive optimization.
3828 So it is on only for peak performance. */
3829 if (optimize >= 3 && global_init_p
3830 && !global_options_set.x_flag_ira_loop_pressure)
3831 flag_ira_loop_pressure = 1;
3832
3833 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3834 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3835 options were already specified. */
3836 if (flag_sanitize & SANITIZE_USER_ADDRESS
3837 && !global_options_set.x_flag_asynchronous_unwind_tables)
3838 flag_asynchronous_unwind_tables = 1;
3839
3840 /* Set the pointer size. */
3841 if (TARGET_64BIT)
3842 {
3843 rs6000_pmode = DImode;
3844 rs6000_pointer_size = 64;
3845 }
3846 else
3847 {
3848 rs6000_pmode = SImode;
3849 rs6000_pointer_size = 32;
3850 }
3851
3852 /* Some OSs don't support saving the high part of 64-bit registers on context
3853 switch. Other OSs don't support saving Altivec registers. On those OSs,
3854 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3855 if the user wants either, the user must explicitly specify them and we
3856 won't interfere with the user's specification. */
3857
3858 set_masks = POWERPC_MASKS;
3859 #ifdef OS_MISSING_POWERPC64
3860 if (OS_MISSING_POWERPC64)
3861 set_masks &= ~OPTION_MASK_POWERPC64;
3862 #endif
3863 #ifdef OS_MISSING_ALTIVEC
3864 if (OS_MISSING_ALTIVEC)
3865 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3866 | OTHER_VSX_VECTOR_MASKS);
3867 #endif
3868
3869 /* Don't override by the processor default if given explicitly. */
3870 set_masks &= ~rs6000_isa_flags_explicit;
3871
3872 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3873 the cpu in a target attribute or pragma, but did not specify a tuning
3874 option, use the cpu for the tuning option rather than the option specified
3875 with -mtune on the command line. Process a '--with-cpu' configuration
3876 request as an implicit --cpu. */
3877 if (rs6000_cpu_index >= 0)
3878 cpu_index = rs6000_cpu_index;
3879 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3880 cpu_index = main_target_opt->x_rs6000_cpu_index;
3881 else if (OPTION_TARGET_CPU_DEFAULT)
3882 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3883
3884 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3885 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3886 with those from the cpu, except for options that were explicitly set. If
3887 we don't have a cpu, do not override the target bits set in
3888 TARGET_DEFAULT. */
3889 if (cpu_index >= 0)
3890 {
3891 rs6000_cpu_index = cpu_index;
3892 rs6000_isa_flags &= ~set_masks;
3893 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3894 & set_masks);
3895 }
3896 else
3897 {
3898 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3899 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3900 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3901 to using rs6000_isa_flags, we need to do the initialization here.
3902
3903 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3904 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3905 HOST_WIDE_INT flags;
3906 if (TARGET_DEFAULT)
3907 flags = TARGET_DEFAULT;
3908 else
3909 {
3910 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3911 const char *default_cpu = (!TARGET_POWERPC64
3912 ? "powerpc"
3913 : (BYTES_BIG_ENDIAN
3914 ? "powerpc64"
3915 : "powerpc64le"));
3916 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3917 flags = processor_target_table[default_cpu_index].target_enable;
3918 }
3919 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3920 }
3921
3922 if (rs6000_tune_index >= 0)
3923 tune_index = rs6000_tune_index;
3924 else if (cpu_index >= 0)
3925 rs6000_tune_index = tune_index = cpu_index;
3926 else
3927 {
3928 size_t i;
3929 enum processor_type tune_proc
3930 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3931
3932 tune_index = -1;
3933 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3934 if (processor_target_table[i].processor == tune_proc)
3935 {
3936 tune_index = i;
3937 break;
3938 }
3939 }
3940
3941 if (cpu_index >= 0)
3942 rs6000_cpu = processor_target_table[cpu_index].processor;
3943 else
3944 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3945
3946 gcc_assert (tune_index >= 0);
3947 rs6000_tune = processor_target_table[tune_index].processor;
3948
3949 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3950 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3951 || rs6000_cpu == PROCESSOR_PPCE5500)
3952 {
3953 if (TARGET_ALTIVEC)
3954 error ("AltiVec not supported in this target");
3955 }
3956
3957 /* If we are optimizing big endian systems for space, use the load/store
3958 multiple instructions. */
3959 if (BYTES_BIG_ENDIAN && optimize_size)
3960 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3961
3962 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3963 because the hardware doesn't support the instructions used in little
3964 endian mode, and causes an alignment trap. The 750 does not cause an
3965 alignment trap (except when the target is unaligned). */
3966
3967 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3968 {
3969 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3970 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3971 warning (0, "%qs is not supported on little endian systems",
3972 "-mmultiple");
3973 }
3974
3975 /* If little-endian, default to -mstrict-align on older processors.
3976 Testing for htm matches power8 and later. */
3977 if (!BYTES_BIG_ENDIAN
3978 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3979 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3980
3981 if (!rs6000_fold_gimple)
3982 fprintf (stderr,
3983 "gimple folding of rs6000 builtins has been disabled.\n");
3984
3985 /* Add some warnings for VSX. */
3986 if (TARGET_VSX)
3987 {
3988 const char *msg = NULL;
3989 if (!TARGET_HARD_FLOAT)
3990 {
3991 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3992 msg = N_("-mvsx requires hardware floating point");
3993 else
3994 {
3995 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3996 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3997 }
3998 }
3999 else if (TARGET_AVOID_XFORM > 0)
4000 msg = N_("-mvsx needs indexed addressing");
4001 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4002 & OPTION_MASK_ALTIVEC))
4003 {
4004 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4005 msg = N_("-mvsx and -mno-altivec are incompatible");
4006 else
4007 msg = N_("-mno-altivec disables vsx");
4008 }
4009
4010 if (msg)
4011 {
4012 warning (0, msg);
4013 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4014 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4015 }
4016 }
4017
4018 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4019 the -mcpu setting to enable options that conflict. */
4020 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4021 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4022 | OPTION_MASK_ALTIVEC
4023 | OPTION_MASK_VSX)) != 0)
4024 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4025 | OPTION_MASK_DIRECT_MOVE)
4026 & ~rs6000_isa_flags_explicit);
4027
4028 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4029 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4030
4031 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4032 off all of the options that depend on those flags. */
4033 ignore_masks = rs6000_disable_incompatible_switches ();
4034
4035 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4036 unless the user explicitly used the -mno-<option> to disable the code. */
4037 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4038 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4039 else if (TARGET_P9_MINMAX)
4040 {
4041 if (cpu_index >= 0)
4042 {
4043 if (cpu_index == PROCESSOR_POWER9)
4044 {
4045 /* legacy behavior: allow -mcpu=power9 with certain
4046 capabilities explicitly disabled. */
4047 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4048 }
4049 else
4050 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4051 "for <xxx> less than power9", "-mcpu");
4052 }
4053 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4054 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4055 & rs6000_isa_flags_explicit))
4056 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4057 were explicitly cleared. */
4058 error ("%qs incompatible with explicitly disabled options",
4059 "-mpower9-minmax");
4060 else
4061 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4062 }
4063 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4064 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4065 else if (TARGET_VSX)
4066 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4067 else if (TARGET_POPCNTD)
4068 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4069 else if (TARGET_DFP)
4070 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4071 else if (TARGET_CMPB)
4072 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4073 else if (TARGET_FPRND)
4074 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4075 else if (TARGET_POPCNTB)
4076 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4077 else if (TARGET_ALTIVEC)
4078 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4079
4080 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4081 {
4082 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4083 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4084 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4085 }
4086
4087 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4088 {
4089 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4090 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4091 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4092 }
4093
4094 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4095 {
4096 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4097 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4098 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4099 }
4100
4101 if (TARGET_P8_VECTOR && !TARGET_VSX)
4102 {
4103 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4104 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4105 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4106 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4107 {
4108 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4109 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4110 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4111 }
4112 else
4113 {
4114 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4115 not explicit. */
4116 rs6000_isa_flags |= OPTION_MASK_VSX;
4117 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4118 }
4119 }
4120
4121 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4122 {
4123 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4124 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4125 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4126 }
4127
4128 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4129 silently turn off quad memory mode. */
4130 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4131 {
4132 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4133 warning (0, N_("-mquad-memory requires 64-bit mode"));
4134
4135 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4136 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4137
4138 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4139 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4140 }
4141
4142 /* Non-atomic quad memory load/store are disabled for little endian, since
4143 the words are reversed, but atomic operations can still be done by
4144 swapping the words. */
4145 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4146 {
4147 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4148 warning (0, N_("-mquad-memory is not available in little endian "
4149 "mode"));
4150
4151 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4152 }
4153
4154 /* Assume if the user asked for normal quad memory instructions, they want
4155 the atomic versions as well, unless they explicity told us not to use quad
4156 word atomic instructions. */
4157 if (TARGET_QUAD_MEMORY
4158 && !TARGET_QUAD_MEMORY_ATOMIC
4159 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4160 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4161
4162 /* If we can shrink-wrap the TOC register save separately, then use
4163 -msave-toc-indirect unless explicitly disabled. */
4164 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4165 && flag_shrink_wrap_separate
4166 && optimize_function_for_speed_p (cfun))
4167 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4168
4169 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4170 generating power8 instructions. Power9 does not optimize power8 fusion
4171 cases. */
4172 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4173 {
4174 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
4175 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4176 else
4177 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4178 }
4179
4180 /* Setting additional fusion flags turns on base fusion. */
4181 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4182 {
4183 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4184 {
4185 if (TARGET_P8_FUSION_SIGN)
4186 error ("%qs requires %qs", "-mpower8-fusion-sign",
4187 "-mpower8-fusion");
4188
4189 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4190 }
4191 else
4192 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4193 }
4194
4195 /* Power8 does not fuse sign extended loads with the addis. If we are
4196 optimizing at high levels for speed, convert a sign extended load into a
4197 zero extending load, and an explicit sign extension. */
4198 if (TARGET_P8_FUSION
4199 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4200 && optimize_function_for_speed_p (cfun)
4201 && optimize >= 3)
4202 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4203
4204 /* ISA 3.0 vector instructions include ISA 2.07. */
4205 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4206 {
4207 /* We prefer to not mention undocumented options in
4208 error messages. However, if users have managed to select
4209 power9-vector without selecting power8-vector, they
4210 already know about undocumented flags. */
4211 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4212 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4213 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4214 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4215 {
4216 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4217 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4218 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4219 }
4220 else
4221 {
4222 /* OPTION_MASK_P9_VECTOR is explicit and
4223 OPTION_MASK_P8_VECTOR is not explicit. */
4224 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4225 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4226 }
4227 }
4228
4229 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4230 support. If we only have ISA 2.06 support, and the user did not specify
4231 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4232 but we don't enable the full vectorization support */
4233 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4234 TARGET_ALLOW_MOVMISALIGN = 1;
4235
4236 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4237 {
4238 if (TARGET_ALLOW_MOVMISALIGN > 0
4239 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4240 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4241
4242 TARGET_ALLOW_MOVMISALIGN = 0;
4243 }
4244
4245 /* Determine when unaligned vector accesses are permitted, and when
4246 they are preferred over masked Altivec loads. Note that if
4247 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4248 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4249 not true. */
4250 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4251 {
4252 if (!TARGET_VSX)
4253 {
4254 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4255 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4256
4257 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4258 }
4259
4260 else if (!TARGET_ALLOW_MOVMISALIGN)
4261 {
4262 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4263 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4264 "-mallow-movmisalign");
4265
4266 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4267 }
4268 }
4269
4270 /* Use long double size to select the appropriate long double. We use
4271 TYPE_PRECISION to differentiate the 3 different long double types. We map
4272 128 into the precision used for TFmode. */
4273 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4274 ? 64
4275 : FLOAT_PRECISION_TFmode);
4276
4277 /* Set long double size before the IEEE 128-bit tests. */
4278 if (!global_options_set.x_rs6000_long_double_type_size)
4279 {
4280 if (main_target_opt != NULL
4281 && (main_target_opt->x_rs6000_long_double_type_size
4282 != default_long_double_size))
4283 error ("target attribute or pragma changes long double size");
4284 else
4285 rs6000_long_double_type_size = default_long_double_size;
4286 }
4287 else if (rs6000_long_double_type_size == 128)
4288 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4289 else if (global_options_set.x_rs6000_ieeequad)
4290 {
4291 if (global_options.x_rs6000_ieeequad)
4292 error ("%qs requires %qs", "-mabi=ieeelongdouble", "-mlong-double-128");
4293 else
4294 error ("%qs requires %qs", "-mabi=ibmlongdouble", "-mlong-double-128");
4295 }
4296
4297 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4298 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4299 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4300 those systems will not pick up this default. Warn if the user changes the
4301 default unless -Wno-psabi. */
4302 if (!global_options_set.x_rs6000_ieeequad)
4303 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4304
4305 else
4306 {
4307 if (global_options.x_rs6000_ieeequad
4308 && (!TARGET_POPCNTD || !TARGET_VSX))
4309 error ("%qs requires full ISA 2.06 support", "-mabi=ieeelongdouble");
4310
4311 if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4312 {
4313 static bool warned_change_long_double;
4314 if (!warned_change_long_double)
4315 {
4316 warned_change_long_double = true;
4317 if (TARGET_IEEEQUAD)
4318 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4319 else
4320 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4321 }
4322 }
4323 }
4324
4325 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4326 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4327 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4328 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4329 the keyword as well as the type. */
4330 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4331
4332 /* IEEE 128-bit floating point requires VSX support. */
4333 if (TARGET_FLOAT128_KEYWORD)
4334 {
4335 if (!TARGET_VSX)
4336 {
4337 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4338 error ("%qs requires VSX support", "-mfloat128");
4339
4340 TARGET_FLOAT128_TYPE = 0;
4341 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4342 | OPTION_MASK_FLOAT128_HW);
4343 }
4344 else if (!TARGET_FLOAT128_TYPE)
4345 {
4346 TARGET_FLOAT128_TYPE = 1;
4347 warning (0, "The -mfloat128 option may not be fully supported");
4348 }
4349 }
4350
4351 /* Enable the __float128 keyword under Linux by default. */
4352 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4353 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4354 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4355
4356 /* If we have are supporting the float128 type and full ISA 3.0 support,
4357 enable -mfloat128-hardware by default. However, don't enable the
4358 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4359 because sometimes the compiler wants to put things in an integer
4360 container, and if we don't have __int128 support, it is impossible. */
4361 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4362 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4363 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4364 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4365
4366 if (TARGET_FLOAT128_HW
4367 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4368 {
4369 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4370 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4371
4372 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4373 }
4374
4375 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4376 {
4377 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4378 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4379
4380 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4381 }
4382
4383 /* Print the options after updating the defaults. */
4384 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4385 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4386
4387 /* E500mc does "better" if we inline more aggressively. Respect the
4388 user's opinion, though. */
4389 if (rs6000_block_move_inline_limit == 0
4390 && (rs6000_tune == PROCESSOR_PPCE500MC
4391 || rs6000_tune == PROCESSOR_PPCE500MC64
4392 || rs6000_tune == PROCESSOR_PPCE5500
4393 || rs6000_tune == PROCESSOR_PPCE6500))
4394 rs6000_block_move_inline_limit = 128;
4395
4396 /* store_one_arg depends on expand_block_move to handle at least the
4397 size of reg_parm_stack_space. */
4398 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4399 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4400
4401 if (global_init_p)
4402 {
4403 /* If the appropriate debug option is enabled, replace the target hooks
4404 with debug versions that call the real version and then prints
4405 debugging information. */
4406 if (TARGET_DEBUG_COST)
4407 {
4408 targetm.rtx_costs = rs6000_debug_rtx_costs;
4409 targetm.address_cost = rs6000_debug_address_cost;
4410 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4411 }
4412
4413 if (TARGET_DEBUG_ADDR)
4414 {
4415 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4416 targetm.legitimize_address = rs6000_debug_legitimize_address;
4417 rs6000_secondary_reload_class_ptr
4418 = rs6000_debug_secondary_reload_class;
4419 targetm.secondary_memory_needed
4420 = rs6000_debug_secondary_memory_needed;
4421 targetm.can_change_mode_class
4422 = rs6000_debug_can_change_mode_class;
4423 rs6000_preferred_reload_class_ptr
4424 = rs6000_debug_preferred_reload_class;
4425 rs6000_legitimize_reload_address_ptr
4426 = rs6000_debug_legitimize_reload_address;
4427 rs6000_mode_dependent_address_ptr
4428 = rs6000_debug_mode_dependent_address;
4429 }
4430
4431 if (rs6000_veclibabi_name)
4432 {
4433 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4434 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4435 else
4436 {
4437 error ("unknown vectorization library ABI type (%qs) for "
4438 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4439 ret = false;
4440 }
4441 }
4442 }
4443
4444 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4445 target attribute or pragma which automatically enables both options,
4446 unless the altivec ABI was set. This is set by default for 64-bit, but
4447 not for 32-bit. */
4448 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4449 {
4450 TARGET_FLOAT128_TYPE = 0;
4451 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4452 | OPTION_MASK_FLOAT128_KEYWORD)
4453 & ~rs6000_isa_flags_explicit);
4454 }
4455
4456 /* Enable Altivec ABI for AIX -maltivec. */
4457 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4458 {
4459 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4460 error ("target attribute or pragma changes AltiVec ABI");
4461 else
4462 rs6000_altivec_abi = 1;
4463 }
4464
4465 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4466 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4467 be explicitly overridden in either case. */
4468 if (TARGET_ELF)
4469 {
4470 if (!global_options_set.x_rs6000_altivec_abi
4471 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4472 {
4473 if (main_target_opt != NULL &&
4474 !main_target_opt->x_rs6000_altivec_abi)
4475 error ("target attribute or pragma changes AltiVec ABI");
4476 else
4477 rs6000_altivec_abi = 1;
4478 }
4479 }
4480
4481 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4482 So far, the only darwin64 targets are also MACH-O. */
4483 if (TARGET_MACHO
4484 && DEFAULT_ABI == ABI_DARWIN
4485 && TARGET_64BIT)
4486 {
4487 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4488 error ("target attribute or pragma changes darwin64 ABI");
4489 else
4490 {
4491 rs6000_darwin64_abi = 1;
4492 /* Default to natural alignment, for better performance. */
4493 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4494 }
4495 }
4496
4497 /* Place FP constants in the constant pool instead of TOC
4498 if section anchors enabled. */
4499 if (flag_section_anchors
4500 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4501 TARGET_NO_FP_IN_TOC = 1;
4502
4503 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4504 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4505
4506 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4507 SUBTARGET_OVERRIDE_OPTIONS;
4508 #endif
4509 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4510 SUBSUBTARGET_OVERRIDE_OPTIONS;
4511 #endif
4512 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4513 SUB3TARGET_OVERRIDE_OPTIONS;
4514 #endif
4515
4516 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4517 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4518
4519 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4520 && rs6000_tune != PROCESSOR_POWER5
4521 && rs6000_tune != PROCESSOR_POWER6
4522 && rs6000_tune != PROCESSOR_POWER7
4523 && rs6000_tune != PROCESSOR_POWER8
4524 && rs6000_tune != PROCESSOR_POWER9
4525 && rs6000_tune != PROCESSOR_PPCA2
4526 && rs6000_tune != PROCESSOR_CELL
4527 && rs6000_tune != PROCESSOR_PPC476);
4528 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4529 || rs6000_tune == PROCESSOR_POWER5
4530 || rs6000_tune == PROCESSOR_POWER7
4531 || rs6000_tune == PROCESSOR_POWER8);
4532 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4533 || rs6000_tune == PROCESSOR_POWER5
4534 || rs6000_tune == PROCESSOR_POWER6
4535 || rs6000_tune == PROCESSOR_POWER7
4536 || rs6000_tune == PROCESSOR_POWER8
4537 || rs6000_tune == PROCESSOR_POWER9
4538 || rs6000_tune == PROCESSOR_PPCE500MC
4539 || rs6000_tune == PROCESSOR_PPCE500MC64
4540 || rs6000_tune == PROCESSOR_PPCE5500
4541 || rs6000_tune == PROCESSOR_PPCE6500);
4542
4543 /* Allow debug switches to override the above settings. These are set to -1
4544 in rs6000.opt to indicate the user hasn't directly set the switch. */
4545 if (TARGET_ALWAYS_HINT >= 0)
4546 rs6000_always_hint = TARGET_ALWAYS_HINT;
4547
4548 if (TARGET_SCHED_GROUPS >= 0)
4549 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4550
4551 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4552 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4553
4554 rs6000_sched_restricted_insns_priority
4555 = (rs6000_sched_groups ? 1 : 0);
4556
4557 /* Handle -msched-costly-dep option. */
4558 rs6000_sched_costly_dep
4559 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4560
4561 if (rs6000_sched_costly_dep_str)
4562 {
4563 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4564 rs6000_sched_costly_dep = no_dep_costly;
4565 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4566 rs6000_sched_costly_dep = all_deps_costly;
4567 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4568 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4569 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4570 rs6000_sched_costly_dep = store_to_load_dep_costly;
4571 else
4572 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4573 atoi (rs6000_sched_costly_dep_str));
4574 }
4575
4576 /* Handle -minsert-sched-nops option. */
4577 rs6000_sched_insert_nops
4578 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4579
4580 if (rs6000_sched_insert_nops_str)
4581 {
4582 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4583 rs6000_sched_insert_nops = sched_finish_none;
4584 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4585 rs6000_sched_insert_nops = sched_finish_pad_groups;
4586 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4587 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4588 else
4589 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4590 atoi (rs6000_sched_insert_nops_str));
4591 }
4592
4593 /* Handle stack protector */
4594 if (!global_options_set.x_rs6000_stack_protector_guard)
4595 #ifdef TARGET_THREAD_SSP_OFFSET
4596 rs6000_stack_protector_guard = SSP_TLS;
4597 #else
4598 rs6000_stack_protector_guard = SSP_GLOBAL;
4599 #endif
4600
4601 #ifdef TARGET_THREAD_SSP_OFFSET
4602 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4603 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4604 #endif
4605
4606 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4607 {
4608 char *endp;
4609 const char *str = rs6000_stack_protector_guard_offset_str;
4610
4611 errno = 0;
4612 long offset = strtol (str, &endp, 0);
4613 if (!*str || *endp || errno)
4614 error ("%qs is not a valid number in %qs", str,
4615 "-mstack-protector-guard-offset=");
4616
4617 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4618 || (TARGET_64BIT && (offset & 3)))
4619 error ("%qs is not a valid offset in %qs", str,
4620 "-mstack-protector-guard-offset=");
4621
4622 rs6000_stack_protector_guard_offset = offset;
4623 }
4624
4625 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4626 {
4627 const char *str = rs6000_stack_protector_guard_reg_str;
4628 int reg = decode_reg_name (str);
4629
4630 if (!IN_RANGE (reg, 1, 31))
4631 error ("%qs is not a valid base register in %qs", str,
4632 "-mstack-protector-guard-reg=");
4633
4634 rs6000_stack_protector_guard_reg = reg;
4635 }
4636
4637 if (rs6000_stack_protector_guard == SSP_TLS
4638 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4639 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4640
4641 if (global_init_p)
4642 {
4643 #ifdef TARGET_REGNAMES
4644 /* If the user desires alternate register names, copy in the
4645 alternate names now. */
4646 if (TARGET_REGNAMES)
4647 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4648 #endif
4649
4650 /* Set aix_struct_return last, after the ABI is determined.
4651 If -maix-struct-return or -msvr4-struct-return was explicitly
4652 used, don't override with the ABI default. */
4653 if (!global_options_set.x_aix_struct_return)
4654 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4655
4656 #if 0
4657 /* IBM XL compiler defaults to unsigned bitfields. */
4658 if (TARGET_XL_COMPAT)
4659 flag_signed_bitfields = 0;
4660 #endif
4661
4662 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4663 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4664
4665 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4666
4667 /* We can only guarantee the availability of DI pseudo-ops when
4668 assembling for 64-bit targets. */
4669 if (!TARGET_64BIT)
4670 {
4671 targetm.asm_out.aligned_op.di = NULL;
4672 targetm.asm_out.unaligned_op.di = NULL;
4673 }
4674
4675
4676 /* Set branch target alignment, if not optimizing for size. */
4677 if (!optimize_size)
4678 {
4679 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4680 aligned 8byte to avoid misprediction by the branch predictor. */
4681 if (rs6000_tune == PROCESSOR_TITAN
4682 || rs6000_tune == PROCESSOR_CELL)
4683 {
4684 if (flag_align_functions && !str_align_functions)
4685 str_align_functions = "8";
4686 if (flag_align_jumps && !str_align_jumps)
4687 str_align_jumps = "8";
4688 if (flag_align_loops && !str_align_loops)
4689 str_align_loops = "8";
4690 }
4691 if (rs6000_align_branch_targets)
4692 {
4693 if (flag_align_functions && !str_align_functions)
4694 str_align_functions = "16";
4695 if (flag_align_jumps && !str_align_jumps)
4696 str_align_jumps = "16";
4697 if (flag_align_loops && !str_align_loops)
4698 {
4699 can_override_loop_align = 1;
4700 str_align_loops = "16";
4701 }
4702 }
4703
4704 if (flag_align_jumps && !str_align_jumps)
4705 str_align_jumps = "16";
4706 if (flag_align_loops && !str_align_loops)
4707 str_align_loops = "16";
4708 }
4709
4710 /* Arrange to save and restore machine status around nested functions. */
4711 init_machine_status = rs6000_init_machine_status;
4712
4713 /* We should always be splitting complex arguments, but we can't break
4714 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4715 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4716 targetm.calls.split_complex_arg = NULL;
4717
4718 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4719 if (DEFAULT_ABI == ABI_AIX)
4720 targetm.calls.custom_function_descriptors = 0;
4721 }
4722
4723 /* Initialize rs6000_cost with the appropriate target costs. */
4724 if (optimize_size)
4725 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4726 else
4727 switch (rs6000_tune)
4728 {
4729 case PROCESSOR_RS64A:
4730 rs6000_cost = &rs64a_cost;
4731 break;
4732
4733 case PROCESSOR_MPCCORE:
4734 rs6000_cost = &mpccore_cost;
4735 break;
4736
4737 case PROCESSOR_PPC403:
4738 rs6000_cost = &ppc403_cost;
4739 break;
4740
4741 case PROCESSOR_PPC405:
4742 rs6000_cost = &ppc405_cost;
4743 break;
4744
4745 case PROCESSOR_PPC440:
4746 rs6000_cost = &ppc440_cost;
4747 break;
4748
4749 case PROCESSOR_PPC476:
4750 rs6000_cost = &ppc476_cost;
4751 break;
4752
4753 case PROCESSOR_PPC601:
4754 rs6000_cost = &ppc601_cost;
4755 break;
4756
4757 case PROCESSOR_PPC603:
4758 rs6000_cost = &ppc603_cost;
4759 break;
4760
4761 case PROCESSOR_PPC604:
4762 rs6000_cost = &ppc604_cost;
4763 break;
4764
4765 case PROCESSOR_PPC604e:
4766 rs6000_cost = &ppc604e_cost;
4767 break;
4768
4769 case PROCESSOR_PPC620:
4770 rs6000_cost = &ppc620_cost;
4771 break;
4772
4773 case PROCESSOR_PPC630:
4774 rs6000_cost = &ppc630_cost;
4775 break;
4776
4777 case PROCESSOR_CELL:
4778 rs6000_cost = &ppccell_cost;
4779 break;
4780
4781 case PROCESSOR_PPC750:
4782 case PROCESSOR_PPC7400:
4783 rs6000_cost = &ppc750_cost;
4784 break;
4785
4786 case PROCESSOR_PPC7450:
4787 rs6000_cost = &ppc7450_cost;
4788 break;
4789
4790 case PROCESSOR_PPC8540:
4791 case PROCESSOR_PPC8548:
4792 rs6000_cost = &ppc8540_cost;
4793 break;
4794
4795 case PROCESSOR_PPCE300C2:
4796 case PROCESSOR_PPCE300C3:
4797 rs6000_cost = &ppce300c2c3_cost;
4798 break;
4799
4800 case PROCESSOR_PPCE500MC:
4801 rs6000_cost = &ppce500mc_cost;
4802 break;
4803
4804 case PROCESSOR_PPCE500MC64:
4805 rs6000_cost = &ppce500mc64_cost;
4806 break;
4807
4808 case PROCESSOR_PPCE5500:
4809 rs6000_cost = &ppce5500_cost;
4810 break;
4811
4812 case PROCESSOR_PPCE6500:
4813 rs6000_cost = &ppce6500_cost;
4814 break;
4815
4816 case PROCESSOR_TITAN:
4817 rs6000_cost = &titan_cost;
4818 break;
4819
4820 case PROCESSOR_POWER4:
4821 case PROCESSOR_POWER5:
4822 rs6000_cost = &power4_cost;
4823 break;
4824
4825 case PROCESSOR_POWER6:
4826 rs6000_cost = &power6_cost;
4827 break;
4828
4829 case PROCESSOR_POWER7:
4830 rs6000_cost = &power7_cost;
4831 break;
4832
4833 case PROCESSOR_POWER8:
4834 rs6000_cost = &power8_cost;
4835 break;
4836
4837 case PROCESSOR_POWER9:
4838 rs6000_cost = &power9_cost;
4839 break;
4840
4841 case PROCESSOR_PPCA2:
4842 rs6000_cost = &ppca2_cost;
4843 break;
4844
4845 default:
4846 gcc_unreachable ();
4847 }
4848
4849 if (global_init_p)
4850 {
4851 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4852 rs6000_cost->simultaneous_prefetches,
4853 global_options.x_param_values,
4854 global_options_set.x_param_values);
4855 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4856 global_options.x_param_values,
4857 global_options_set.x_param_values);
4858 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4859 rs6000_cost->cache_line_size,
4860 global_options.x_param_values,
4861 global_options_set.x_param_values);
4862 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4863 global_options.x_param_values,
4864 global_options_set.x_param_values);
4865
4866 /* Increase loop peeling limits based on performance analysis. */
4867 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4868 global_options.x_param_values,
4869 global_options_set.x_param_values);
4870 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4871 global_options.x_param_values,
4872 global_options_set.x_param_values);
4873
4874 /* Use the 'model' -fsched-pressure algorithm by default. */
4875 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4876 SCHED_PRESSURE_MODEL,
4877 global_options.x_param_values,
4878 global_options_set.x_param_values);
4879
4880 /* If using typedef char *va_list, signal that
4881 __builtin_va_start (&ap, 0) can be optimized to
4882 ap = __builtin_next_arg (0). */
4883 if (DEFAULT_ABI != ABI_V4)
4884 targetm.expand_builtin_va_start = NULL;
4885 }
4886
4887 /* If not explicitly specified via option, decide whether to generate indexed
4888 load/store instructions. A value of -1 indicates that the
4889 initial value of this variable has not been overwritten. During
4890 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4891 if (TARGET_AVOID_XFORM == -1)
4892 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4893 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4894 need indexed accesses and the type used is the scalar type of the element
4895 being loaded or stored. */
4896 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4897 && !TARGET_ALTIVEC);
4898
4899 /* Set the -mrecip options. */
4900 if (rs6000_recip_name)
4901 {
4902 char *p = ASTRDUP (rs6000_recip_name);
4903 char *q;
4904 unsigned int mask, i;
4905 bool invert;
4906
4907 while ((q = strtok (p, ",")) != NULL)
4908 {
4909 p = NULL;
4910 if (*q == '!')
4911 {
4912 invert = true;
4913 q++;
4914 }
4915 else
4916 invert = false;
4917
4918 if (!strcmp (q, "default"))
4919 mask = ((TARGET_RECIP_PRECISION)
4920 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4921 else
4922 {
4923 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4924 if (!strcmp (q, recip_options[i].string))
4925 {
4926 mask = recip_options[i].mask;
4927 break;
4928 }
4929
4930 if (i == ARRAY_SIZE (recip_options))
4931 {
4932 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4933 invert = false;
4934 mask = 0;
4935 ret = false;
4936 }
4937 }
4938
4939 if (invert)
4940 rs6000_recip_control &= ~mask;
4941 else
4942 rs6000_recip_control |= mask;
4943 }
4944 }
4945
4946 /* Set the builtin mask of the various options used that could affect which
4947 builtins were used. In the past we used target_flags, but we've run out
4948 of bits, and some options are no longer in target_flags. */
4949 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4950 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4951 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4952 rs6000_builtin_mask);
4953
4954 /* Initialize all of the registers. */
4955 rs6000_init_hard_regno_mode_ok (global_init_p);
4956
4957 /* Save the initial options in case the user does function specific options */
4958 if (global_init_p)
4959 target_option_default_node = target_option_current_node
4960 = build_target_option_node (&global_options);
4961
4962 /* If not explicitly specified via option, decide whether to generate the
4963 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4964 if (TARGET_LINK_STACK == -1)
4965 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4966
4967 /* Deprecate use of -mno-speculate-indirect-jumps. */
4968 if (!rs6000_speculate_indirect_jumps)
4969 warning (0, "%qs is deprecated and not recommended in any circumstances",
4970 "-mno-speculate-indirect-jumps");
4971
4972 return ret;
4973 }
4974
4975 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4976 define the target cpu type. */
4977
4978 static void
4979 rs6000_option_override (void)
4980 {
4981 (void) rs6000_option_override_internal (true);
4982 }
4983
4984 \f
4985 /* Implement targetm.vectorize.builtin_mask_for_load. */
4986 static tree
4987 rs6000_builtin_mask_for_load (void)
4988 {
4989 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4990 if ((TARGET_ALTIVEC && !TARGET_VSX)
4991 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4992 return altivec_builtin_mask_for_load;
4993 else
4994 return 0;
4995 }
4996
4997 /* Implement LOOP_ALIGN. */
4998 align_flags
4999 rs6000_loop_align (rtx label)
5000 {
5001 basic_block bb;
5002 int ninsns;
5003
5004 /* Don't override loop alignment if -falign-loops was specified. */
5005 if (!can_override_loop_align)
5006 return align_loops;
5007
5008 bb = BLOCK_FOR_INSN (label);
5009 ninsns = num_loop_insns(bb->loop_father);
5010
5011 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5012 if (ninsns > 4 && ninsns <= 8
5013 && (rs6000_tune == PROCESSOR_POWER4
5014 || rs6000_tune == PROCESSOR_POWER5
5015 || rs6000_tune == PROCESSOR_POWER6
5016 || rs6000_tune == PROCESSOR_POWER7
5017 || rs6000_tune == PROCESSOR_POWER8))
5018 return align_flags (5);
5019 else
5020 return align_loops;
5021 }
5022
5023 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5024 after applying N number of iterations. This routine does not determine
5025 how may iterations are required to reach desired alignment. */
5026
5027 static bool
5028 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5029 {
5030 if (is_packed)
5031 return false;
5032
5033 if (TARGET_32BIT)
5034 {
5035 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5036 return true;
5037
5038 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5039 return true;
5040
5041 return false;
5042 }
5043 else
5044 {
5045 if (TARGET_MACHO)
5046 return false;
5047
5048 /* Assuming that all other types are naturally aligned. CHECKME! */
5049 return true;
5050 }
5051 }
5052
5053 /* Return true if the vector misalignment factor is supported by the
5054 target. */
5055 static bool
5056 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5057 const_tree type,
5058 int misalignment,
5059 bool is_packed)
5060 {
5061 if (TARGET_VSX)
5062 {
5063 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5064 return true;
5065
5066 /* Return if movmisalign pattern is not supported for this mode. */
5067 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5068 return false;
5069
5070 if (misalignment == -1)
5071 {
5072 /* Misalignment factor is unknown at compile time but we know
5073 it's word aligned. */
5074 if (rs6000_vector_alignment_reachable (type, is_packed))
5075 {
5076 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5077
5078 if (element_size == 64 || element_size == 32)
5079 return true;
5080 }
5081
5082 return false;
5083 }
5084
5085 /* VSX supports word-aligned vector. */
5086 if (misalignment % 4 == 0)
5087 return true;
5088 }
5089 return false;
5090 }
5091
5092 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5093 static int
5094 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5095 tree vectype, int misalign)
5096 {
5097 unsigned elements;
5098 tree elem_type;
5099
5100 switch (type_of_cost)
5101 {
5102 case scalar_stmt:
5103 case scalar_load:
5104 case scalar_store:
5105 case vector_stmt:
5106 case vector_load:
5107 case vector_store:
5108 case vec_to_scalar:
5109 case scalar_to_vec:
5110 case cond_branch_not_taken:
5111 return 1;
5112
5113 case vec_perm:
5114 if (TARGET_VSX)
5115 return 3;
5116 else
5117 return 1;
5118
5119 case vec_promote_demote:
5120 if (TARGET_VSX)
5121 return 4;
5122 else
5123 return 1;
5124
5125 case cond_branch_taken:
5126 return 3;
5127
5128 case unaligned_load:
5129 case vector_gather_load:
5130 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5131 return 1;
5132
5133 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5134 {
5135 elements = TYPE_VECTOR_SUBPARTS (vectype);
5136 if (elements == 2)
5137 /* Double word aligned. */
5138 return 2;
5139
5140 if (elements == 4)
5141 {
5142 switch (misalign)
5143 {
5144 case 8:
5145 /* Double word aligned. */
5146 return 2;
5147
5148 case -1:
5149 /* Unknown misalignment. */
5150 case 4:
5151 case 12:
5152 /* Word aligned. */
5153 return 22;
5154
5155 default:
5156 gcc_unreachable ();
5157 }
5158 }
5159 }
5160
5161 if (TARGET_ALTIVEC)
5162 /* Misaligned loads are not supported. */
5163 gcc_unreachable ();
5164
5165 return 2;
5166
5167 case unaligned_store:
5168 case vector_scatter_store:
5169 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5170 return 1;
5171
5172 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5173 {
5174 elements = TYPE_VECTOR_SUBPARTS (vectype);
5175 if (elements == 2)
5176 /* Double word aligned. */
5177 return 2;
5178
5179 if (elements == 4)
5180 {
5181 switch (misalign)
5182 {
5183 case 8:
5184 /* Double word aligned. */
5185 return 2;
5186
5187 case -1:
5188 /* Unknown misalignment. */
5189 case 4:
5190 case 12:
5191 /* Word aligned. */
5192 return 23;
5193
5194 default:
5195 gcc_unreachable ();
5196 }
5197 }
5198 }
5199
5200 if (TARGET_ALTIVEC)
5201 /* Misaligned stores are not supported. */
5202 gcc_unreachable ();
5203
5204 return 2;
5205
5206 case vec_construct:
5207 /* This is a rough approximation assuming non-constant elements
5208 constructed into a vector via element insertion. FIXME:
5209 vec_construct is not granular enough for uniformly good
5210 decisions. If the initialization is a splat, this is
5211 cheaper than we estimate. Improve this someday. */
5212 elem_type = TREE_TYPE (vectype);
5213 /* 32-bit vectors loaded into registers are stored as double
5214 precision, so we need 2 permutes, 2 converts, and 1 merge
5215 to construct a vector of short floats from them. */
5216 if (SCALAR_FLOAT_TYPE_P (elem_type)
5217 && TYPE_PRECISION (elem_type) == 32)
5218 return 5;
5219 /* On POWER9, integer vector types are built up in GPRs and then
5220 use a direct move (2 cycles). For POWER8 this is even worse,
5221 as we need two direct moves and a merge, and the direct moves
5222 are five cycles. */
5223 else if (INTEGRAL_TYPE_P (elem_type))
5224 {
5225 if (TARGET_P9_VECTOR)
5226 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5227 else
5228 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5229 }
5230 else
5231 /* V2DFmode doesn't need a direct move. */
5232 return 2;
5233
5234 default:
5235 gcc_unreachable ();
5236 }
5237 }
5238
5239 /* Implement targetm.vectorize.preferred_simd_mode. */
5240
5241 static machine_mode
5242 rs6000_preferred_simd_mode (scalar_mode mode)
5243 {
5244 if (TARGET_VSX)
5245 switch (mode)
5246 {
5247 case E_DFmode:
5248 return V2DFmode;
5249 default:;
5250 }
5251 if (TARGET_ALTIVEC || TARGET_VSX)
5252 switch (mode)
5253 {
5254 case E_SFmode:
5255 return V4SFmode;
5256 case E_TImode:
5257 return V1TImode;
5258 case E_DImode:
5259 return V2DImode;
5260 case E_SImode:
5261 return V4SImode;
5262 case E_HImode:
5263 return V8HImode;
5264 case E_QImode:
5265 return V16QImode;
5266 default:;
5267 }
5268 return word_mode;
5269 }
5270
5271 typedef struct _rs6000_cost_data
5272 {
5273 struct loop *loop_info;
5274 unsigned cost[3];
5275 } rs6000_cost_data;
5276
5277 /* Test for likely overcommitment of vector hardware resources. If a
5278 loop iteration is relatively large, and too large a percentage of
5279 instructions in the loop are vectorized, the cost model may not
5280 adequately reflect delays from unavailable vector resources.
5281 Penalize the loop body cost for this case. */
5282
5283 static void
5284 rs6000_density_test (rs6000_cost_data *data)
5285 {
5286 const int DENSITY_PCT_THRESHOLD = 85;
5287 const int DENSITY_SIZE_THRESHOLD = 70;
5288 const int DENSITY_PENALTY = 10;
5289 struct loop *loop = data->loop_info;
5290 basic_block *bbs = get_loop_body (loop);
5291 int nbbs = loop->num_nodes;
5292 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5293 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5294 int i, density_pct;
5295
5296 for (i = 0; i < nbbs; i++)
5297 {
5298 basic_block bb = bbs[i];
5299 gimple_stmt_iterator gsi;
5300
5301 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5302 {
5303 gimple *stmt = gsi_stmt (gsi);
5304 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5305
5306 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5307 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5308 not_vec_cost++;
5309 }
5310 }
5311
5312 free (bbs);
5313 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5314
5315 if (density_pct > DENSITY_PCT_THRESHOLD
5316 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5317 {
5318 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5319 if (dump_enabled_p ())
5320 dump_printf_loc (MSG_NOTE, vect_location,
5321 "density %d%%, cost %d exceeds threshold, penalizing "
5322 "loop body cost by %d%%", density_pct,
5323 vec_cost + not_vec_cost, DENSITY_PENALTY);
5324 }
5325 }
5326
5327 /* Implement targetm.vectorize.init_cost. */
5328
5329 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5330 instruction is needed by the vectorization. */
5331 static bool rs6000_vect_nonmem;
5332
5333 static void *
5334 rs6000_init_cost (struct loop *loop_info)
5335 {
5336 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5337 data->loop_info = loop_info;
5338 data->cost[vect_prologue] = 0;
5339 data->cost[vect_body] = 0;
5340 data->cost[vect_epilogue] = 0;
5341 rs6000_vect_nonmem = false;
5342 return data;
5343 }
5344
5345 /* Implement targetm.vectorize.add_stmt_cost. */
5346
5347 static unsigned
5348 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5349 struct _stmt_vec_info *stmt_info, int misalign,
5350 enum vect_cost_model_location where)
5351 {
5352 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5353 unsigned retval = 0;
5354
5355 if (flag_vect_cost_model)
5356 {
5357 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5358 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5359 misalign);
5360 /* Statements in an inner loop relative to the loop being
5361 vectorized are weighted more heavily. The value here is
5362 arbitrary and could potentially be improved with analysis. */
5363 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5364 count *= 50; /* FIXME. */
5365
5366 retval = (unsigned) (count * stmt_cost);
5367 cost_data->cost[where] += retval;
5368
5369 /* Check whether we're doing something other than just a copy loop.
5370 Not all such loops may be profitably vectorized; see
5371 rs6000_finish_cost. */
5372 if ((kind == vec_to_scalar || kind == vec_perm
5373 || kind == vec_promote_demote || kind == vec_construct
5374 || kind == scalar_to_vec)
5375 || (where == vect_body && kind == vector_stmt))
5376 rs6000_vect_nonmem = true;
5377 }
5378
5379 return retval;
5380 }
5381
5382 /* Implement targetm.vectorize.finish_cost. */
5383
5384 static void
5385 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5386 unsigned *body_cost, unsigned *epilogue_cost)
5387 {
5388 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5389
5390 if (cost_data->loop_info)
5391 rs6000_density_test (cost_data);
5392
5393 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5394 that require versioning for any reason. The vectorization is at
5395 best a wash inside the loop, and the versioning checks make
5396 profitability highly unlikely and potentially quite harmful. */
5397 if (cost_data->loop_info)
5398 {
5399 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5400 if (!rs6000_vect_nonmem
5401 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5402 && LOOP_REQUIRES_VERSIONING (vec_info))
5403 cost_data->cost[vect_body] += 10000;
5404 }
5405
5406 *prologue_cost = cost_data->cost[vect_prologue];
5407 *body_cost = cost_data->cost[vect_body];
5408 *epilogue_cost = cost_data->cost[vect_epilogue];
5409 }
5410
5411 /* Implement targetm.vectorize.destroy_cost_data. */
5412
5413 static void
5414 rs6000_destroy_cost_data (void *data)
5415 {
5416 free (data);
5417 }
5418
5419 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5420 library with vectorized intrinsics. */
5421
5422 static tree
5423 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5424 tree type_in)
5425 {
5426 char name[32];
5427 const char *suffix = NULL;
5428 tree fntype, new_fndecl, bdecl = NULL_TREE;
5429 int n_args = 1;
5430 const char *bname;
5431 machine_mode el_mode, in_mode;
5432 int n, in_n;
5433
5434 /* Libmass is suitable for unsafe math only as it does not correctly support
5435 parts of IEEE with the required precision such as denormals. Only support
5436 it if we have VSX to use the simd d2 or f4 functions.
5437 XXX: Add variable length support. */
5438 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5439 return NULL_TREE;
5440
5441 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5442 n = TYPE_VECTOR_SUBPARTS (type_out);
5443 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5444 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5445 if (el_mode != in_mode
5446 || n != in_n)
5447 return NULL_TREE;
5448
5449 switch (fn)
5450 {
5451 CASE_CFN_ATAN2:
5452 CASE_CFN_HYPOT:
5453 CASE_CFN_POW:
5454 n_args = 2;
5455 gcc_fallthrough ();
5456
5457 CASE_CFN_ACOS:
5458 CASE_CFN_ACOSH:
5459 CASE_CFN_ASIN:
5460 CASE_CFN_ASINH:
5461 CASE_CFN_ATAN:
5462 CASE_CFN_ATANH:
5463 CASE_CFN_CBRT:
5464 CASE_CFN_COS:
5465 CASE_CFN_COSH:
5466 CASE_CFN_ERF:
5467 CASE_CFN_ERFC:
5468 CASE_CFN_EXP2:
5469 CASE_CFN_EXP:
5470 CASE_CFN_EXPM1:
5471 CASE_CFN_LGAMMA:
5472 CASE_CFN_LOG10:
5473 CASE_CFN_LOG1P:
5474 CASE_CFN_LOG2:
5475 CASE_CFN_LOG:
5476 CASE_CFN_SIN:
5477 CASE_CFN_SINH:
5478 CASE_CFN_SQRT:
5479 CASE_CFN_TAN:
5480 CASE_CFN_TANH:
5481 if (el_mode == DFmode && n == 2)
5482 {
5483 bdecl = mathfn_built_in (double_type_node, fn);
5484 suffix = "d2"; /* pow -> powd2 */
5485 }
5486 else if (el_mode == SFmode && n == 4)
5487 {
5488 bdecl = mathfn_built_in (float_type_node, fn);
5489 suffix = "4"; /* powf -> powf4 */
5490 }
5491 else
5492 return NULL_TREE;
5493 if (!bdecl)
5494 return NULL_TREE;
5495 break;
5496
5497 default:
5498 return NULL_TREE;
5499 }
5500
5501 gcc_assert (suffix != NULL);
5502 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5503 if (!bname)
5504 return NULL_TREE;
5505
5506 strcpy (name, bname + sizeof ("__builtin_") - 1);
5507 strcat (name, suffix);
5508
5509 if (n_args == 1)
5510 fntype = build_function_type_list (type_out, type_in, NULL);
5511 else if (n_args == 2)
5512 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5513 else
5514 gcc_unreachable ();
5515
5516 /* Build a function declaration for the vectorized function. */
5517 new_fndecl = build_decl (BUILTINS_LOCATION,
5518 FUNCTION_DECL, get_identifier (name), fntype);
5519 TREE_PUBLIC (new_fndecl) = 1;
5520 DECL_EXTERNAL (new_fndecl) = 1;
5521 DECL_IS_NOVOPS (new_fndecl) = 1;
5522 TREE_READONLY (new_fndecl) = 1;
5523
5524 return new_fndecl;
5525 }
5526
5527 /* Returns a function decl for a vectorized version of the builtin function
5528 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5529 if it is not available. */
5530
5531 static tree
5532 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5533 tree type_in)
5534 {
5535 machine_mode in_mode, out_mode;
5536 int in_n, out_n;
5537
5538 if (TARGET_DEBUG_BUILTIN)
5539 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5540 combined_fn_name (combined_fn (fn)),
5541 GET_MODE_NAME (TYPE_MODE (type_out)),
5542 GET_MODE_NAME (TYPE_MODE (type_in)));
5543
5544 if (TREE_CODE (type_out) != VECTOR_TYPE
5545 || TREE_CODE (type_in) != VECTOR_TYPE)
5546 return NULL_TREE;
5547
5548 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5549 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5550 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5551 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5552
5553 switch (fn)
5554 {
5555 CASE_CFN_COPYSIGN:
5556 if (VECTOR_UNIT_VSX_P (V2DFmode)
5557 && out_mode == DFmode && out_n == 2
5558 && in_mode == DFmode && in_n == 2)
5559 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5560 if (VECTOR_UNIT_VSX_P (V4SFmode)
5561 && out_mode == SFmode && out_n == 4
5562 && in_mode == SFmode && in_n == 4)
5563 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5564 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5565 && out_mode == SFmode && out_n == 4
5566 && in_mode == SFmode && in_n == 4)
5567 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5568 break;
5569 CASE_CFN_CEIL:
5570 if (VECTOR_UNIT_VSX_P (V2DFmode)
5571 && out_mode == DFmode && out_n == 2
5572 && in_mode == DFmode && in_n == 2)
5573 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5574 if (VECTOR_UNIT_VSX_P (V4SFmode)
5575 && out_mode == SFmode && out_n == 4
5576 && in_mode == SFmode && in_n == 4)
5577 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5578 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5579 && out_mode == SFmode && out_n == 4
5580 && in_mode == SFmode && in_n == 4)
5581 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5582 break;
5583 CASE_CFN_FLOOR:
5584 if (VECTOR_UNIT_VSX_P (V2DFmode)
5585 && out_mode == DFmode && out_n == 2
5586 && in_mode == DFmode && in_n == 2)
5587 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5588 if (VECTOR_UNIT_VSX_P (V4SFmode)
5589 && out_mode == SFmode && out_n == 4
5590 && in_mode == SFmode && in_n == 4)
5591 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5592 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5593 && out_mode == SFmode && out_n == 4
5594 && in_mode == SFmode && in_n == 4)
5595 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5596 break;
5597 CASE_CFN_FMA:
5598 if (VECTOR_UNIT_VSX_P (V2DFmode)
5599 && out_mode == DFmode && out_n == 2
5600 && in_mode == DFmode && in_n == 2)
5601 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5602 if (VECTOR_UNIT_VSX_P (V4SFmode)
5603 && out_mode == SFmode && out_n == 4
5604 && in_mode == SFmode && in_n == 4)
5605 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5606 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5607 && out_mode == SFmode && out_n == 4
5608 && in_mode == SFmode && in_n == 4)
5609 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5610 break;
5611 CASE_CFN_TRUNC:
5612 if (VECTOR_UNIT_VSX_P (V2DFmode)
5613 && out_mode == DFmode && out_n == 2
5614 && in_mode == DFmode && in_n == 2)
5615 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5616 if (VECTOR_UNIT_VSX_P (V4SFmode)
5617 && out_mode == SFmode && out_n == 4
5618 && in_mode == SFmode && in_n == 4)
5619 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5620 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5621 && out_mode == SFmode && out_n == 4
5622 && in_mode == SFmode && in_n == 4)
5623 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5624 break;
5625 CASE_CFN_NEARBYINT:
5626 if (VECTOR_UNIT_VSX_P (V2DFmode)
5627 && flag_unsafe_math_optimizations
5628 && out_mode == DFmode && out_n == 2
5629 && in_mode == DFmode && in_n == 2)
5630 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5631 if (VECTOR_UNIT_VSX_P (V4SFmode)
5632 && flag_unsafe_math_optimizations
5633 && out_mode == SFmode && out_n == 4
5634 && in_mode == SFmode && in_n == 4)
5635 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5636 break;
5637 CASE_CFN_RINT:
5638 if (VECTOR_UNIT_VSX_P (V2DFmode)
5639 && !flag_trapping_math
5640 && out_mode == DFmode && out_n == 2
5641 && in_mode == DFmode && in_n == 2)
5642 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5643 if (VECTOR_UNIT_VSX_P (V4SFmode)
5644 && !flag_trapping_math
5645 && out_mode == SFmode && out_n == 4
5646 && in_mode == SFmode && in_n == 4)
5647 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5648 break;
5649 default:
5650 break;
5651 }
5652
5653 /* Generate calls to libmass if appropriate. */
5654 if (rs6000_veclib_handler)
5655 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5656
5657 return NULL_TREE;
5658 }
5659
5660 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5661
5662 static tree
5663 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5664 tree type_in)
5665 {
5666 machine_mode in_mode, out_mode;
5667 int in_n, out_n;
5668
5669 if (TARGET_DEBUG_BUILTIN)
5670 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5671 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5672 GET_MODE_NAME (TYPE_MODE (type_out)),
5673 GET_MODE_NAME (TYPE_MODE (type_in)));
5674
5675 if (TREE_CODE (type_out) != VECTOR_TYPE
5676 || TREE_CODE (type_in) != VECTOR_TYPE)
5677 return NULL_TREE;
5678
5679 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5680 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5681 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5682 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5683
5684 enum rs6000_builtins fn
5685 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5686 switch (fn)
5687 {
5688 case RS6000_BUILTIN_RSQRTF:
5689 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5690 && out_mode == SFmode && out_n == 4
5691 && in_mode == SFmode && in_n == 4)
5692 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5693 break;
5694 case RS6000_BUILTIN_RSQRT:
5695 if (VECTOR_UNIT_VSX_P (V2DFmode)
5696 && out_mode == DFmode && out_n == 2
5697 && in_mode == DFmode && in_n == 2)
5698 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5699 break;
5700 case RS6000_BUILTIN_RECIPF:
5701 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5702 && out_mode == SFmode && out_n == 4
5703 && in_mode == SFmode && in_n == 4)
5704 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5705 break;
5706 case RS6000_BUILTIN_RECIP:
5707 if (VECTOR_UNIT_VSX_P (V2DFmode)
5708 && out_mode == DFmode && out_n == 2
5709 && in_mode == DFmode && in_n == 2)
5710 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5711 break;
5712 default:
5713 break;
5714 }
5715 return NULL_TREE;
5716 }
5717 \f
5718 /* Default CPU string for rs6000*_file_start functions. */
5719 static const char *rs6000_default_cpu;
5720
5721 /* Do anything needed at the start of the asm file. */
5722
5723 static void
5724 rs6000_file_start (void)
5725 {
5726 char buffer[80];
5727 const char *start = buffer;
5728 FILE *file = asm_out_file;
5729
5730 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5731
5732 default_file_start ();
5733
5734 if (flag_verbose_asm)
5735 {
5736 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5737
5738 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5739 {
5740 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5741 start = "";
5742 }
5743
5744 if (global_options_set.x_rs6000_cpu_index)
5745 {
5746 fprintf (file, "%s -mcpu=%s", start,
5747 processor_target_table[rs6000_cpu_index].name);
5748 start = "";
5749 }
5750
5751 if (global_options_set.x_rs6000_tune_index)
5752 {
5753 fprintf (file, "%s -mtune=%s", start,
5754 processor_target_table[rs6000_tune_index].name);
5755 start = "";
5756 }
5757
5758 if (PPC405_ERRATUM77)
5759 {
5760 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5761 start = "";
5762 }
5763
5764 #ifdef USING_ELFOS_H
5765 switch (rs6000_sdata)
5766 {
5767 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5768 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5769 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5770 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5771 }
5772
5773 if (rs6000_sdata && g_switch_value)
5774 {
5775 fprintf (file, "%s -G %d", start,
5776 g_switch_value);
5777 start = "";
5778 }
5779 #endif
5780
5781 if (*start == '\0')
5782 putc ('\n', file);
5783 }
5784
5785 #ifdef USING_ELFOS_H
5786 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5787 && !global_options_set.x_rs6000_cpu_index)
5788 {
5789 fputs ("\t.machine ", asm_out_file);
5790 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5791 fputs ("power9\n", asm_out_file);
5792 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5793 fputs ("power8\n", asm_out_file);
5794 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5795 fputs ("power7\n", asm_out_file);
5796 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5797 fputs ("power6\n", asm_out_file);
5798 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5799 fputs ("power5\n", asm_out_file);
5800 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5801 fputs ("power4\n", asm_out_file);
5802 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5803 fputs ("ppc64\n", asm_out_file);
5804 else
5805 fputs ("ppc\n", asm_out_file);
5806 }
5807 #endif
5808
5809 if (DEFAULT_ABI == ABI_ELFv2)
5810 fprintf (file, "\t.abiversion 2\n");
5811 }
5812
5813 \f
5814 /* Return nonzero if this function is known to have a null epilogue. */
5815
5816 int
5817 direct_return (void)
5818 {
5819 if (reload_completed)
5820 {
5821 rs6000_stack_t *info = rs6000_stack_info ();
5822
5823 if (info->first_gp_reg_save == 32
5824 && info->first_fp_reg_save == 64
5825 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5826 && ! info->lr_save_p
5827 && ! info->cr_save_p
5828 && info->vrsave_size == 0
5829 && ! info->push_p)
5830 return 1;
5831 }
5832
5833 return 0;
5834 }
5835
5836 /* Helper for num_insns_constant. Calculate number of instructions to
5837 load VALUE to a single gpr using combinations of addi, addis, ori,
5838 oris and sldi instructions. */
5839
5840 static int
5841 num_insns_constant_gpr (HOST_WIDE_INT value)
5842 {
5843 /* signed constant loadable with addi */
5844 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5845 return 1;
5846
5847 /* constant loadable with addis */
5848 else if ((value & 0xffff) == 0
5849 && (value >> 31 == -1 || value >> 31 == 0))
5850 return 1;
5851
5852 else if (TARGET_POWERPC64)
5853 {
5854 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5855 HOST_WIDE_INT high = value >> 31;
5856
5857 if (high == 0 || high == -1)
5858 return 2;
5859
5860 high >>= 1;
5861
5862 if (low == 0)
5863 return num_insns_constant_gpr (high) + 1;
5864 else if (high == 0)
5865 return num_insns_constant_gpr (low) + 1;
5866 else
5867 return (num_insns_constant_gpr (high)
5868 + num_insns_constant_gpr (low) + 1);
5869 }
5870
5871 else
5872 return 2;
5873 }
5874
5875 /* Helper for num_insns_constant. Allow constants formed by the
5876 num_insns_constant_gpr sequences, plus li -1, rldicl/rldicr/rlwinm,
5877 and handle modes that require multiple gprs. */
5878
5879 static int
5880 num_insns_constant_multi (HOST_WIDE_INT value, machine_mode mode)
5881 {
5882 int nregs = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5883 int total = 0;
5884 while (nregs-- > 0)
5885 {
5886 HOST_WIDE_INT low = sext_hwi (value, BITS_PER_WORD);
5887 int insns = num_insns_constant_gpr (low);
5888 if (insns > 2
5889 /* We won't get more than 2 from num_insns_constant_gpr
5890 except when TARGET_POWERPC64 and mode is DImode or
5891 wider, so the register mode must be DImode. */
5892 && rs6000_is_valid_and_mask (GEN_INT (low), DImode))
5893 insns = 2;
5894 total += insns;
5895 value >>= BITS_PER_WORD;
5896 }
5897 return total;
5898 }
5899
5900 /* Return the number of instructions it takes to form a constant in as
5901 many gprs are needed for MODE. */
5902
5903 int
5904 num_insns_constant (rtx op, machine_mode mode)
5905 {
5906 HOST_WIDE_INT val;
5907
5908 switch (GET_CODE (op))
5909 {
5910 case CONST_INT:
5911 val = INTVAL (op);
5912 break;
5913
5914 case CONST_WIDE_INT:
5915 {
5916 int insns = 0;
5917 for (int i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5918 insns += num_insns_constant_multi (CONST_WIDE_INT_ELT (op, i),
5919 DImode);
5920 return insns;
5921 }
5922
5923 case CONST_DOUBLE:
5924 {
5925 const struct real_value *rv = CONST_DOUBLE_REAL_VALUE (op);
5926
5927 if (mode == SFmode || mode == SDmode)
5928 {
5929 long l;
5930
5931 if (mode == SDmode)
5932 REAL_VALUE_TO_TARGET_DECIMAL32 (*rv, l);
5933 else
5934 REAL_VALUE_TO_TARGET_SINGLE (*rv, l);
5935 /* See the first define_split in rs6000.md handling a
5936 const_double_operand. */
5937 val = l;
5938 mode = SImode;
5939 }
5940 else if (mode == DFmode || mode == DDmode)
5941 {
5942 long l[2];
5943
5944 if (mode == DDmode)
5945 REAL_VALUE_TO_TARGET_DECIMAL64 (*rv, l);
5946 else
5947 REAL_VALUE_TO_TARGET_DOUBLE (*rv, l);
5948
5949 /* See the second (32-bit) and third (64-bit) define_split
5950 in rs6000.md handling a const_double_operand. */
5951 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 1] << 32;
5952 val |= l[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffffUL;
5953 mode = DImode;
5954 }
5955 else if (mode == TFmode || mode == TDmode
5956 || mode == KFmode || mode == IFmode)
5957 {
5958 long l[4];
5959 int insns;
5960
5961 if (mode == TDmode)
5962 REAL_VALUE_TO_TARGET_DECIMAL128 (*rv, l);
5963 else
5964 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*rv, l);
5965
5966 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 3] << 32;
5967 val |= l[WORDS_BIG_ENDIAN ? 1 : 2] & 0xffffffffUL;
5968 insns = num_insns_constant_multi (val, DImode);
5969 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 2 : 1] << 32;
5970 val |= l[WORDS_BIG_ENDIAN ? 3 : 0] & 0xffffffffUL;
5971 insns += num_insns_constant_multi (val, DImode);
5972 return insns;
5973 }
5974 else
5975 gcc_unreachable ();
5976 }
5977 break;
5978
5979 default:
5980 gcc_unreachable ();
5981 }
5982
5983 return num_insns_constant_multi (val, mode);
5984 }
5985
5986 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5987 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5988 corresponding element of the vector, but for V4SFmode, the
5989 corresponding "float" is interpreted as an SImode integer. */
5990
5991 HOST_WIDE_INT
5992 const_vector_elt_as_int (rtx op, unsigned int elt)
5993 {
5994 rtx tmp;
5995
5996 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5997 gcc_assert (GET_MODE (op) != V2DImode
5998 && GET_MODE (op) != V2DFmode);
5999
6000 tmp = CONST_VECTOR_ELT (op, elt);
6001 if (GET_MODE (op) == V4SFmode)
6002 tmp = gen_lowpart (SImode, tmp);
6003 return INTVAL (tmp);
6004 }
6005
6006 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6007 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6008 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6009 all items are set to the same value and contain COPIES replicas of the
6010 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6011 operand and the others are set to the value of the operand's msb. */
6012
6013 static bool
6014 vspltis_constant (rtx op, unsigned step, unsigned copies)
6015 {
6016 machine_mode mode = GET_MODE (op);
6017 machine_mode inner = GET_MODE_INNER (mode);
6018
6019 unsigned i;
6020 unsigned nunits;
6021 unsigned bitsize;
6022 unsigned mask;
6023
6024 HOST_WIDE_INT val;
6025 HOST_WIDE_INT splat_val;
6026 HOST_WIDE_INT msb_val;
6027
6028 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6029 return false;
6030
6031 nunits = GET_MODE_NUNITS (mode);
6032 bitsize = GET_MODE_BITSIZE (inner);
6033 mask = GET_MODE_MASK (inner);
6034
6035 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6036 splat_val = val;
6037 msb_val = val >= 0 ? 0 : -1;
6038
6039 /* Construct the value to be splatted, if possible. If not, return 0. */
6040 for (i = 2; i <= copies; i *= 2)
6041 {
6042 HOST_WIDE_INT small_val;
6043 bitsize /= 2;
6044 small_val = splat_val >> bitsize;
6045 mask >>= bitsize;
6046 if (splat_val != ((HOST_WIDE_INT)
6047 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6048 | (small_val & mask)))
6049 return false;
6050 splat_val = small_val;
6051 }
6052
6053 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6054 if (EASY_VECTOR_15 (splat_val))
6055 ;
6056
6057 /* Also check if we can splat, and then add the result to itself. Do so if
6058 the value is positive, of if the splat instruction is using OP's mode;
6059 for splat_val < 0, the splat and the add should use the same mode. */
6060 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6061 && (splat_val >= 0 || (step == 1 && copies == 1)))
6062 ;
6063
6064 /* Also check if are loading up the most significant bit which can be done by
6065 loading up -1 and shifting the value left by -1. */
6066 else if (EASY_VECTOR_MSB (splat_val, inner))
6067 ;
6068
6069 else
6070 return false;
6071
6072 /* Check if VAL is present in every STEP-th element, and the
6073 other elements are filled with its most significant bit. */
6074 for (i = 1; i < nunits; ++i)
6075 {
6076 HOST_WIDE_INT desired_val;
6077 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6078 if ((i & (step - 1)) == 0)
6079 desired_val = val;
6080 else
6081 desired_val = msb_val;
6082
6083 if (desired_val != const_vector_elt_as_int (op, elt))
6084 return false;
6085 }
6086
6087 return true;
6088 }
6089
6090 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6091 instruction, filling in the bottom elements with 0 or -1.
6092
6093 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6094 for the number of zeroes to shift in, or negative for the number of 0xff
6095 bytes to shift in.
6096
6097 OP is a CONST_VECTOR. */
6098
6099 int
6100 vspltis_shifted (rtx op)
6101 {
6102 machine_mode mode = GET_MODE (op);
6103 machine_mode inner = GET_MODE_INNER (mode);
6104
6105 unsigned i, j;
6106 unsigned nunits;
6107 unsigned mask;
6108
6109 HOST_WIDE_INT val;
6110
6111 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6112 return false;
6113
6114 /* We need to create pseudo registers to do the shift, so don't recognize
6115 shift vector constants after reload. */
6116 if (!can_create_pseudo_p ())
6117 return false;
6118
6119 nunits = GET_MODE_NUNITS (mode);
6120 mask = GET_MODE_MASK (inner);
6121
6122 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6123
6124 /* Check if the value can really be the operand of a vspltis[bhw]. */
6125 if (EASY_VECTOR_15 (val))
6126 ;
6127
6128 /* Also check if we are loading up the most significant bit which can be done
6129 by loading up -1 and shifting the value left by -1. */
6130 else if (EASY_VECTOR_MSB (val, inner))
6131 ;
6132
6133 else
6134 return 0;
6135
6136 /* Check if VAL is present in every STEP-th element until we find elements
6137 that are 0 or all 1 bits. */
6138 for (i = 1; i < nunits; ++i)
6139 {
6140 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6141 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6142
6143 /* If the value isn't the splat value, check for the remaining elements
6144 being 0/-1. */
6145 if (val != elt_val)
6146 {
6147 if (elt_val == 0)
6148 {
6149 for (j = i+1; j < nunits; ++j)
6150 {
6151 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6152 if (const_vector_elt_as_int (op, elt2) != 0)
6153 return 0;
6154 }
6155
6156 return (nunits - i) * GET_MODE_SIZE (inner);
6157 }
6158
6159 else if ((elt_val & mask) == mask)
6160 {
6161 for (j = i+1; j < nunits; ++j)
6162 {
6163 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6164 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6165 return 0;
6166 }
6167
6168 return -((nunits - i) * GET_MODE_SIZE (inner));
6169 }
6170
6171 else
6172 return 0;
6173 }
6174 }
6175
6176 /* If all elements are equal, we don't need to do VLSDOI. */
6177 return 0;
6178 }
6179
6180
6181 /* Return true if OP is of the given MODE and can be synthesized
6182 with a vspltisb, vspltish or vspltisw. */
6183
6184 bool
6185 easy_altivec_constant (rtx op, machine_mode mode)
6186 {
6187 unsigned step, copies;
6188
6189 if (mode == VOIDmode)
6190 mode = GET_MODE (op);
6191 else if (mode != GET_MODE (op))
6192 return false;
6193
6194 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6195 constants. */
6196 if (mode == V2DFmode)
6197 return zero_constant (op, mode);
6198
6199 else if (mode == V2DImode)
6200 {
6201 if (!CONST_INT_P (CONST_VECTOR_ELT (op, 0))
6202 || !CONST_INT_P (CONST_VECTOR_ELT (op, 1)))
6203 return false;
6204
6205 if (zero_constant (op, mode))
6206 return true;
6207
6208 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6209 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6210 return true;
6211
6212 return false;
6213 }
6214
6215 /* V1TImode is a special container for TImode. Ignore for now. */
6216 else if (mode == V1TImode)
6217 return false;
6218
6219 /* Start with a vspltisw. */
6220 step = GET_MODE_NUNITS (mode) / 4;
6221 copies = 1;
6222
6223 if (vspltis_constant (op, step, copies))
6224 return true;
6225
6226 /* Then try with a vspltish. */
6227 if (step == 1)
6228 copies <<= 1;
6229 else
6230 step >>= 1;
6231
6232 if (vspltis_constant (op, step, copies))
6233 return true;
6234
6235 /* And finally a vspltisb. */
6236 if (step == 1)
6237 copies <<= 1;
6238 else
6239 step >>= 1;
6240
6241 if (vspltis_constant (op, step, copies))
6242 return true;
6243
6244 if (vspltis_shifted (op) != 0)
6245 return true;
6246
6247 return false;
6248 }
6249
6250 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6251 result is OP. Abort if it is not possible. */
6252
6253 rtx
6254 gen_easy_altivec_constant (rtx op)
6255 {
6256 machine_mode mode = GET_MODE (op);
6257 int nunits = GET_MODE_NUNITS (mode);
6258 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6259 unsigned step = nunits / 4;
6260 unsigned copies = 1;
6261
6262 /* Start with a vspltisw. */
6263 if (vspltis_constant (op, step, copies))
6264 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6265
6266 /* Then try with a vspltish. */
6267 if (step == 1)
6268 copies <<= 1;
6269 else
6270 step >>= 1;
6271
6272 if (vspltis_constant (op, step, copies))
6273 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6274
6275 /* And finally a vspltisb. */
6276 if (step == 1)
6277 copies <<= 1;
6278 else
6279 step >>= 1;
6280
6281 if (vspltis_constant (op, step, copies))
6282 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6283
6284 gcc_unreachable ();
6285 }
6286
6287 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6288 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6289
6290 Return the number of instructions needed (1 or 2) into the address pointed
6291 via NUM_INSNS_PTR.
6292
6293 Return the constant that is being split via CONSTANT_PTR. */
6294
6295 bool
6296 xxspltib_constant_p (rtx op,
6297 machine_mode mode,
6298 int *num_insns_ptr,
6299 int *constant_ptr)
6300 {
6301 size_t nunits = GET_MODE_NUNITS (mode);
6302 size_t i;
6303 HOST_WIDE_INT value;
6304 rtx element;
6305
6306 /* Set the returned values to out of bound values. */
6307 *num_insns_ptr = -1;
6308 *constant_ptr = 256;
6309
6310 if (!TARGET_P9_VECTOR)
6311 return false;
6312
6313 if (mode == VOIDmode)
6314 mode = GET_MODE (op);
6315
6316 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6317 return false;
6318
6319 /* Handle (vec_duplicate <constant>). */
6320 if (GET_CODE (op) == VEC_DUPLICATE)
6321 {
6322 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6323 && mode != V2DImode)
6324 return false;
6325
6326 element = XEXP (op, 0);
6327 if (!CONST_INT_P (element))
6328 return false;
6329
6330 value = INTVAL (element);
6331 if (!IN_RANGE (value, -128, 127))
6332 return false;
6333 }
6334
6335 /* Handle (const_vector [...]). */
6336 else if (GET_CODE (op) == CONST_VECTOR)
6337 {
6338 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6339 && mode != V2DImode)
6340 return false;
6341
6342 element = CONST_VECTOR_ELT (op, 0);
6343 if (!CONST_INT_P (element))
6344 return false;
6345
6346 value = INTVAL (element);
6347 if (!IN_RANGE (value, -128, 127))
6348 return false;
6349
6350 for (i = 1; i < nunits; i++)
6351 {
6352 element = CONST_VECTOR_ELT (op, i);
6353 if (!CONST_INT_P (element))
6354 return false;
6355
6356 if (value != INTVAL (element))
6357 return false;
6358 }
6359 }
6360
6361 /* Handle integer constants being loaded into the upper part of the VSX
6362 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6363 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6364 else if (CONST_INT_P (op))
6365 {
6366 if (!SCALAR_INT_MODE_P (mode))
6367 return false;
6368
6369 value = INTVAL (op);
6370 if (!IN_RANGE (value, -128, 127))
6371 return false;
6372
6373 if (!IN_RANGE (value, -1, 0))
6374 {
6375 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6376 return false;
6377
6378 if (EASY_VECTOR_15 (value))
6379 return false;
6380 }
6381 }
6382
6383 else
6384 return false;
6385
6386 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6387 sign extend. Special case 0/-1 to allow getting any VSX register instead
6388 of an Altivec register. */
6389 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6390 && EASY_VECTOR_15 (value))
6391 return false;
6392
6393 /* Return # of instructions and the constant byte for XXSPLTIB. */
6394 if (mode == V16QImode)
6395 *num_insns_ptr = 1;
6396
6397 else if (IN_RANGE (value, -1, 0))
6398 *num_insns_ptr = 1;
6399
6400 else
6401 *num_insns_ptr = 2;
6402
6403 *constant_ptr = (int) value;
6404 return true;
6405 }
6406
6407 const char *
6408 output_vec_const_move (rtx *operands)
6409 {
6410 int shift;
6411 machine_mode mode;
6412 rtx dest, vec;
6413
6414 dest = operands[0];
6415 vec = operands[1];
6416 mode = GET_MODE (dest);
6417
6418 if (TARGET_VSX)
6419 {
6420 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6421 int xxspltib_value = 256;
6422 int num_insns = -1;
6423
6424 if (zero_constant (vec, mode))
6425 {
6426 if (TARGET_P9_VECTOR)
6427 return "xxspltib %x0,0";
6428
6429 else if (dest_vmx_p)
6430 return "vspltisw %0,0";
6431
6432 else
6433 return "xxlxor %x0,%x0,%x0";
6434 }
6435
6436 if (all_ones_constant (vec, mode))
6437 {
6438 if (TARGET_P9_VECTOR)
6439 return "xxspltib %x0,255";
6440
6441 else if (dest_vmx_p)
6442 return "vspltisw %0,-1";
6443
6444 else if (TARGET_P8_VECTOR)
6445 return "xxlorc %x0,%x0,%x0";
6446
6447 else
6448 gcc_unreachable ();
6449 }
6450
6451 if (TARGET_P9_VECTOR
6452 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6453 {
6454 if (num_insns == 1)
6455 {
6456 operands[2] = GEN_INT (xxspltib_value & 0xff);
6457 return "xxspltib %x0,%2";
6458 }
6459
6460 return "#";
6461 }
6462 }
6463
6464 if (TARGET_ALTIVEC)
6465 {
6466 rtx splat_vec;
6467
6468 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6469 if (zero_constant (vec, mode))
6470 return "vspltisw %0,0";
6471
6472 if (all_ones_constant (vec, mode))
6473 return "vspltisw %0,-1";
6474
6475 /* Do we need to construct a value using VSLDOI? */
6476 shift = vspltis_shifted (vec);
6477 if (shift != 0)
6478 return "#";
6479
6480 splat_vec = gen_easy_altivec_constant (vec);
6481 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6482 operands[1] = XEXP (splat_vec, 0);
6483 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6484 return "#";
6485
6486 switch (GET_MODE (splat_vec))
6487 {
6488 case E_V4SImode:
6489 return "vspltisw %0,%1";
6490
6491 case E_V8HImode:
6492 return "vspltish %0,%1";
6493
6494 case E_V16QImode:
6495 return "vspltisb %0,%1";
6496
6497 default:
6498 gcc_unreachable ();
6499 }
6500 }
6501
6502 gcc_unreachable ();
6503 }
6504
6505 /* Initialize vector TARGET to VALS. */
6506
6507 void
6508 rs6000_expand_vector_init (rtx target, rtx vals)
6509 {
6510 machine_mode mode = GET_MODE (target);
6511 machine_mode inner_mode = GET_MODE_INNER (mode);
6512 int n_elts = GET_MODE_NUNITS (mode);
6513 int n_var = 0, one_var = -1;
6514 bool all_same = true, all_const_zero = true;
6515 rtx x, mem;
6516 int i;
6517
6518 for (i = 0; i < n_elts; ++i)
6519 {
6520 x = XVECEXP (vals, 0, i);
6521 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6522 ++n_var, one_var = i;
6523 else if (x != CONST0_RTX (inner_mode))
6524 all_const_zero = false;
6525
6526 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6527 all_same = false;
6528 }
6529
6530 if (n_var == 0)
6531 {
6532 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6533 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6534 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6535 {
6536 /* Zero register. */
6537 emit_move_insn (target, CONST0_RTX (mode));
6538 return;
6539 }
6540 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6541 {
6542 /* Splat immediate. */
6543 emit_insn (gen_rtx_SET (target, const_vec));
6544 return;
6545 }
6546 else
6547 {
6548 /* Load from constant pool. */
6549 emit_move_insn (target, const_vec);
6550 return;
6551 }
6552 }
6553
6554 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6555 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6556 {
6557 rtx op[2];
6558 size_t i;
6559 size_t num_elements = all_same ? 1 : 2;
6560 for (i = 0; i < num_elements; i++)
6561 {
6562 op[i] = XVECEXP (vals, 0, i);
6563 /* Just in case there is a SUBREG with a smaller mode, do a
6564 conversion. */
6565 if (GET_MODE (op[i]) != inner_mode)
6566 {
6567 rtx tmp = gen_reg_rtx (inner_mode);
6568 convert_move (tmp, op[i], 0);
6569 op[i] = tmp;
6570 }
6571 /* Allow load with splat double word. */
6572 else if (MEM_P (op[i]))
6573 {
6574 if (!all_same)
6575 op[i] = force_reg (inner_mode, op[i]);
6576 }
6577 else if (!REG_P (op[i]))
6578 op[i] = force_reg (inner_mode, op[i]);
6579 }
6580
6581 if (all_same)
6582 {
6583 if (mode == V2DFmode)
6584 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6585 else
6586 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6587 }
6588 else
6589 {
6590 if (mode == V2DFmode)
6591 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6592 else
6593 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6594 }
6595 return;
6596 }
6597
6598 /* Special case initializing vector int if we are on 64-bit systems with
6599 direct move or we have the ISA 3.0 instructions. */
6600 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6601 && TARGET_DIRECT_MOVE_64BIT)
6602 {
6603 if (all_same)
6604 {
6605 rtx element0 = XVECEXP (vals, 0, 0);
6606 if (MEM_P (element0))
6607 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6608 else
6609 element0 = force_reg (SImode, element0);
6610
6611 if (TARGET_P9_VECTOR)
6612 emit_insn (gen_vsx_splat_v4si (target, element0));
6613 else
6614 {
6615 rtx tmp = gen_reg_rtx (DImode);
6616 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6617 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6618 }
6619 return;
6620 }
6621 else
6622 {
6623 rtx elements[4];
6624 size_t i;
6625
6626 for (i = 0; i < 4; i++)
6627 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6628
6629 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6630 elements[2], elements[3]));
6631 return;
6632 }
6633 }
6634
6635 /* With single precision floating point on VSX, know that internally single
6636 precision is actually represented as a double, and either make 2 V2DF
6637 vectors, and convert these vectors to single precision, or do one
6638 conversion, and splat the result to the other elements. */
6639 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6640 {
6641 if (all_same)
6642 {
6643 rtx element0 = XVECEXP (vals, 0, 0);
6644
6645 if (TARGET_P9_VECTOR)
6646 {
6647 if (MEM_P (element0))
6648 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6649
6650 emit_insn (gen_vsx_splat_v4sf (target, element0));
6651 }
6652
6653 else
6654 {
6655 rtx freg = gen_reg_rtx (V4SFmode);
6656 rtx sreg = force_reg (SFmode, element0);
6657 rtx cvt = (TARGET_XSCVDPSPN
6658 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6659 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6660
6661 emit_insn (cvt);
6662 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6663 const0_rtx));
6664 }
6665 }
6666 else
6667 {
6668 rtx dbl_even = gen_reg_rtx (V2DFmode);
6669 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6670 rtx flt_even = gen_reg_rtx (V4SFmode);
6671 rtx flt_odd = gen_reg_rtx (V4SFmode);
6672 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6673 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6674 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6675 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6676
6677 /* Use VMRGEW if we can instead of doing a permute. */
6678 if (TARGET_P8_VECTOR)
6679 {
6680 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6681 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6682 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6683 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6684 if (BYTES_BIG_ENDIAN)
6685 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6686 else
6687 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6688 }
6689 else
6690 {
6691 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6692 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6693 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6694 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6695 rs6000_expand_extract_even (target, flt_even, flt_odd);
6696 }
6697 }
6698 return;
6699 }
6700
6701 /* Special case initializing vector short/char that are splats if we are on
6702 64-bit systems with direct move. */
6703 if (all_same && TARGET_DIRECT_MOVE_64BIT
6704 && (mode == V16QImode || mode == V8HImode))
6705 {
6706 rtx op0 = XVECEXP (vals, 0, 0);
6707 rtx di_tmp = gen_reg_rtx (DImode);
6708
6709 if (!REG_P (op0))
6710 op0 = force_reg (GET_MODE_INNER (mode), op0);
6711
6712 if (mode == V16QImode)
6713 {
6714 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6715 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6716 return;
6717 }
6718
6719 if (mode == V8HImode)
6720 {
6721 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6722 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6723 return;
6724 }
6725 }
6726
6727 /* Store value to stack temp. Load vector element. Splat. However, splat
6728 of 64-bit items is not supported on Altivec. */
6729 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6730 {
6731 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6732 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6733 XVECEXP (vals, 0, 0));
6734 x = gen_rtx_UNSPEC (VOIDmode,
6735 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6736 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6737 gen_rtvec (2,
6738 gen_rtx_SET (target, mem),
6739 x)));
6740 x = gen_rtx_VEC_SELECT (inner_mode, target,
6741 gen_rtx_PARALLEL (VOIDmode,
6742 gen_rtvec (1, const0_rtx)));
6743 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6744 return;
6745 }
6746
6747 /* One field is non-constant. Load constant then overwrite
6748 varying field. */
6749 if (n_var == 1)
6750 {
6751 rtx copy = copy_rtx (vals);
6752
6753 /* Load constant part of vector, substitute neighboring value for
6754 varying element. */
6755 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6756 rs6000_expand_vector_init (target, copy);
6757
6758 /* Insert variable. */
6759 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6760 return;
6761 }
6762
6763 /* Construct the vector in memory one field at a time
6764 and load the whole vector. */
6765 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6766 for (i = 0; i < n_elts; i++)
6767 emit_move_insn (adjust_address_nv (mem, inner_mode,
6768 i * GET_MODE_SIZE (inner_mode)),
6769 XVECEXP (vals, 0, i));
6770 emit_move_insn (target, mem);
6771 }
6772
6773 /* Set field ELT of TARGET to VAL. */
6774
6775 void
6776 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6777 {
6778 machine_mode mode = GET_MODE (target);
6779 machine_mode inner_mode = GET_MODE_INNER (mode);
6780 rtx reg = gen_reg_rtx (mode);
6781 rtx mask, mem, x;
6782 int width = GET_MODE_SIZE (inner_mode);
6783 int i;
6784
6785 val = force_reg (GET_MODE (val), val);
6786
6787 if (VECTOR_MEM_VSX_P (mode))
6788 {
6789 rtx insn = NULL_RTX;
6790 rtx elt_rtx = GEN_INT (elt);
6791
6792 if (mode == V2DFmode)
6793 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6794
6795 else if (mode == V2DImode)
6796 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6797
6798 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6799 {
6800 if (mode == V4SImode)
6801 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6802 else if (mode == V8HImode)
6803 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6804 else if (mode == V16QImode)
6805 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6806 else if (mode == V4SFmode)
6807 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6808 }
6809
6810 if (insn)
6811 {
6812 emit_insn (insn);
6813 return;
6814 }
6815 }
6816
6817 /* Simplify setting single element vectors like V1TImode. */
6818 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6819 {
6820 emit_move_insn (target, gen_lowpart (mode, val));
6821 return;
6822 }
6823
6824 /* Load single variable value. */
6825 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6826 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6827 x = gen_rtx_UNSPEC (VOIDmode,
6828 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6829 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6830 gen_rtvec (2,
6831 gen_rtx_SET (reg, mem),
6832 x)));
6833
6834 /* Linear sequence. */
6835 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6836 for (i = 0; i < 16; ++i)
6837 XVECEXP (mask, 0, i) = GEN_INT (i);
6838
6839 /* Set permute mask to insert element into target. */
6840 for (i = 0; i < width; ++i)
6841 XVECEXP (mask, 0, elt*width + i)
6842 = GEN_INT (i + 0x10);
6843 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6844
6845 if (BYTES_BIG_ENDIAN)
6846 x = gen_rtx_UNSPEC (mode,
6847 gen_rtvec (3, target, reg,
6848 force_reg (V16QImode, x)),
6849 UNSPEC_VPERM);
6850 else
6851 {
6852 if (TARGET_P9_VECTOR)
6853 x = gen_rtx_UNSPEC (mode,
6854 gen_rtvec (3, reg, target,
6855 force_reg (V16QImode, x)),
6856 UNSPEC_VPERMR);
6857 else
6858 {
6859 /* Invert selector. We prefer to generate VNAND on P8 so
6860 that future fusion opportunities can kick in, but must
6861 generate VNOR elsewhere. */
6862 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6863 rtx iorx = (TARGET_P8_VECTOR
6864 ? gen_rtx_IOR (V16QImode, notx, notx)
6865 : gen_rtx_AND (V16QImode, notx, notx));
6866 rtx tmp = gen_reg_rtx (V16QImode);
6867 emit_insn (gen_rtx_SET (tmp, iorx));
6868
6869 /* Permute with operands reversed and adjusted selector. */
6870 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6871 UNSPEC_VPERM);
6872 }
6873 }
6874
6875 emit_insn (gen_rtx_SET (target, x));
6876 }
6877
6878 /* Extract field ELT from VEC into TARGET. */
6879
6880 void
6881 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6882 {
6883 machine_mode mode = GET_MODE (vec);
6884 machine_mode inner_mode = GET_MODE_INNER (mode);
6885 rtx mem;
6886
6887 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6888 {
6889 switch (mode)
6890 {
6891 default:
6892 break;
6893 case E_V1TImode:
6894 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
6895 emit_move_insn (target, gen_lowpart (TImode, vec));
6896 break;
6897 case E_V2DFmode:
6898 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6899 return;
6900 case E_V2DImode:
6901 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6902 return;
6903 case E_V4SFmode:
6904 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6905 return;
6906 case E_V16QImode:
6907 if (TARGET_DIRECT_MOVE_64BIT)
6908 {
6909 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6910 return;
6911 }
6912 else
6913 break;
6914 case E_V8HImode:
6915 if (TARGET_DIRECT_MOVE_64BIT)
6916 {
6917 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6918 return;
6919 }
6920 else
6921 break;
6922 case E_V4SImode:
6923 if (TARGET_DIRECT_MOVE_64BIT)
6924 {
6925 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6926 return;
6927 }
6928 break;
6929 }
6930 }
6931 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6932 && TARGET_DIRECT_MOVE_64BIT)
6933 {
6934 if (GET_MODE (elt) != DImode)
6935 {
6936 rtx tmp = gen_reg_rtx (DImode);
6937 convert_move (tmp, elt, 0);
6938 elt = tmp;
6939 }
6940 else if (!REG_P (elt))
6941 elt = force_reg (DImode, elt);
6942
6943 switch (mode)
6944 {
6945 case E_V2DFmode:
6946 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6947 return;
6948
6949 case E_V2DImode:
6950 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6951 return;
6952
6953 case E_V4SFmode:
6954 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6955 return;
6956
6957 case E_V4SImode:
6958 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6959 return;
6960
6961 case E_V8HImode:
6962 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6963 return;
6964
6965 case E_V16QImode:
6966 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6967 return;
6968
6969 default:
6970 gcc_unreachable ();
6971 }
6972 }
6973
6974 gcc_assert (CONST_INT_P (elt));
6975
6976 /* Allocate mode-sized buffer. */
6977 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6978
6979 emit_move_insn (mem, vec);
6980
6981 /* Add offset to field within buffer matching vector element. */
6982 mem = adjust_address_nv (mem, inner_mode,
6983 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
6984
6985 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6986 }
6987
6988 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6989 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6990 temporary (BASE_TMP) to fixup the address. Return the new memory address
6991 that is valid for reads or writes to a given register (SCALAR_REG). */
6992
6993 rtx
6994 rs6000_adjust_vec_address (rtx scalar_reg,
6995 rtx mem,
6996 rtx element,
6997 rtx base_tmp,
6998 machine_mode scalar_mode)
6999 {
7000 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7001 rtx addr = XEXP (mem, 0);
7002 rtx element_offset;
7003 rtx new_addr;
7004 bool valid_addr_p;
7005
7006 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7007 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7008
7009 /* Calculate what we need to add to the address to get the element
7010 address. */
7011 if (CONST_INT_P (element))
7012 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7013 else
7014 {
7015 int byte_shift = exact_log2 (scalar_size);
7016 gcc_assert (byte_shift >= 0);
7017
7018 if (byte_shift == 0)
7019 element_offset = element;
7020
7021 else
7022 {
7023 if (TARGET_POWERPC64)
7024 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7025 else
7026 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7027
7028 element_offset = base_tmp;
7029 }
7030 }
7031
7032 /* Create the new address pointing to the element within the vector. If we
7033 are adding 0, we don't have to change the address. */
7034 if (element_offset == const0_rtx)
7035 new_addr = addr;
7036
7037 /* A simple indirect address can be converted into a reg + offset
7038 address. */
7039 else if (REG_P (addr) || SUBREG_P (addr))
7040 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7041
7042 /* Optimize D-FORM addresses with constant offset with a constant element, to
7043 include the element offset in the address directly. */
7044 else if (GET_CODE (addr) == PLUS)
7045 {
7046 rtx op0 = XEXP (addr, 0);
7047 rtx op1 = XEXP (addr, 1);
7048 rtx insn;
7049
7050 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7051 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7052 {
7053 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7054 rtx offset_rtx = GEN_INT (offset);
7055
7056 if (IN_RANGE (offset, -32768, 32767)
7057 && (scalar_size < 8 || (offset & 0x3) == 0))
7058 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7059 else
7060 {
7061 emit_move_insn (base_tmp, offset_rtx);
7062 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7063 }
7064 }
7065 else
7066 {
7067 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7068 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7069
7070 /* Note, ADDI requires the register being added to be a base
7071 register. If the register was R0, load it up into the temporary
7072 and do the add. */
7073 if (op1_reg_p
7074 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7075 {
7076 insn = gen_add3_insn (base_tmp, op1, element_offset);
7077 gcc_assert (insn != NULL_RTX);
7078 emit_insn (insn);
7079 }
7080
7081 else if (ele_reg_p
7082 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7083 {
7084 insn = gen_add3_insn (base_tmp, element_offset, op1);
7085 gcc_assert (insn != NULL_RTX);
7086 emit_insn (insn);
7087 }
7088
7089 else
7090 {
7091 emit_move_insn (base_tmp, op1);
7092 emit_insn (gen_add2_insn (base_tmp, element_offset));
7093 }
7094
7095 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7096 }
7097 }
7098
7099 else
7100 {
7101 emit_move_insn (base_tmp, addr);
7102 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7103 }
7104
7105 /* If we have a PLUS, we need to see whether the particular register class
7106 allows for D-FORM or X-FORM addressing. */
7107 if (GET_CODE (new_addr) == PLUS)
7108 {
7109 rtx op1 = XEXP (new_addr, 1);
7110 addr_mask_type addr_mask;
7111 unsigned int scalar_regno = reg_or_subregno (scalar_reg);
7112
7113 gcc_assert (HARD_REGISTER_NUM_P (scalar_regno));
7114 if (INT_REGNO_P (scalar_regno))
7115 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7116
7117 else if (FP_REGNO_P (scalar_regno))
7118 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7119
7120 else if (ALTIVEC_REGNO_P (scalar_regno))
7121 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7122
7123 else
7124 gcc_unreachable ();
7125
7126 if (REG_P (op1) || SUBREG_P (op1))
7127 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7128 else
7129 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7130 }
7131
7132 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7133 valid_addr_p = true;
7134
7135 else
7136 valid_addr_p = false;
7137
7138 if (!valid_addr_p)
7139 {
7140 emit_move_insn (base_tmp, new_addr);
7141 new_addr = base_tmp;
7142 }
7143
7144 return change_address (mem, scalar_mode, new_addr);
7145 }
7146
7147 /* Split a variable vec_extract operation into the component instructions. */
7148
7149 void
7150 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7151 rtx tmp_altivec)
7152 {
7153 machine_mode mode = GET_MODE (src);
7154 machine_mode scalar_mode = GET_MODE (dest);
7155 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7156 int byte_shift = exact_log2 (scalar_size);
7157
7158 gcc_assert (byte_shift >= 0);
7159
7160 /* If we are given a memory address, optimize to load just the element. We
7161 don't have to adjust the vector element number on little endian
7162 systems. */
7163 if (MEM_P (src))
7164 {
7165 gcc_assert (REG_P (tmp_gpr));
7166 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7167 tmp_gpr, scalar_mode));
7168 return;
7169 }
7170
7171 else if (REG_P (src) || SUBREG_P (src))
7172 {
7173 int bit_shift = byte_shift + 3;
7174 rtx element2;
7175 unsigned int dest_regno = reg_or_subregno (dest);
7176 unsigned int src_regno = reg_or_subregno (src);
7177 unsigned int element_regno = reg_or_subregno (element);
7178
7179 gcc_assert (REG_P (tmp_gpr));
7180
7181 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7182 a general purpose register. */
7183 if (TARGET_P9_VECTOR
7184 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7185 && INT_REGNO_P (dest_regno)
7186 && ALTIVEC_REGNO_P (src_regno)
7187 && INT_REGNO_P (element_regno))
7188 {
7189 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7190 rtx element_si = gen_rtx_REG (SImode, element_regno);
7191
7192 if (mode == V16QImode)
7193 emit_insn (BYTES_BIG_ENDIAN
7194 ? gen_vextublx (dest_si, element_si, src)
7195 : gen_vextubrx (dest_si, element_si, src));
7196
7197 else if (mode == V8HImode)
7198 {
7199 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7200 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7201 emit_insn (BYTES_BIG_ENDIAN
7202 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7203 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7204 }
7205
7206
7207 else
7208 {
7209 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7210 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7211 emit_insn (BYTES_BIG_ENDIAN
7212 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7213 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7214 }
7215
7216 return;
7217 }
7218
7219
7220 gcc_assert (REG_P (tmp_altivec));
7221
7222 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7223 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7224 will shift the element into the upper position (adding 3 to convert a
7225 byte shift into a bit shift). */
7226 if (scalar_size == 8)
7227 {
7228 if (!BYTES_BIG_ENDIAN)
7229 {
7230 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7231 element2 = tmp_gpr;
7232 }
7233 else
7234 element2 = element;
7235
7236 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7237 bit. */
7238 emit_insn (gen_rtx_SET (tmp_gpr,
7239 gen_rtx_AND (DImode,
7240 gen_rtx_ASHIFT (DImode,
7241 element2,
7242 GEN_INT (6)),
7243 GEN_INT (64))));
7244 }
7245 else
7246 {
7247 if (!BYTES_BIG_ENDIAN)
7248 {
7249 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7250
7251 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7252 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7253 element2 = tmp_gpr;
7254 }
7255 else
7256 element2 = element;
7257
7258 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7259 }
7260
7261 /* Get the value into the lower byte of the Altivec register where VSLO
7262 expects it. */
7263 if (TARGET_P9_VECTOR)
7264 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7265 else if (can_create_pseudo_p ())
7266 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7267 else
7268 {
7269 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7270 emit_move_insn (tmp_di, tmp_gpr);
7271 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7272 }
7273
7274 /* Do the VSLO to get the value into the final location. */
7275 switch (mode)
7276 {
7277 case E_V2DFmode:
7278 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7279 return;
7280
7281 case E_V2DImode:
7282 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7283 return;
7284
7285 case E_V4SFmode:
7286 {
7287 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7288 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7289 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7290 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7291 tmp_altivec));
7292
7293 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7294 return;
7295 }
7296
7297 case E_V4SImode:
7298 case E_V8HImode:
7299 case E_V16QImode:
7300 {
7301 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7302 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7303 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7304 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7305 tmp_altivec));
7306 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7307 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7308 GEN_INT (64 - (8 * scalar_size))));
7309 return;
7310 }
7311
7312 default:
7313 gcc_unreachable ();
7314 }
7315
7316 return;
7317 }
7318 else
7319 gcc_unreachable ();
7320 }
7321
7322 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7323 selects whether the alignment is abi mandated, optional, or
7324 both abi and optional alignment. */
7325
7326 unsigned int
7327 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7328 {
7329 if (how != align_opt)
7330 {
7331 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7332 align = 128;
7333 }
7334
7335 if (how != align_abi)
7336 {
7337 if (TREE_CODE (type) == ARRAY_TYPE
7338 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7339 {
7340 if (align < BITS_PER_WORD)
7341 align = BITS_PER_WORD;
7342 }
7343 }
7344
7345 return align;
7346 }
7347
7348 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7349 instructions simply ignore the low bits; VSX memory instructions
7350 are aligned to 4 or 8 bytes. */
7351
7352 static bool
7353 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7354 {
7355 return (STRICT_ALIGNMENT
7356 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7357 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7358 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7359 && (int) align < VECTOR_ALIGN (mode)))));
7360 }
7361
7362 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7363
7364 bool
7365 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7366 {
7367 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7368 {
7369 if (computed != 128)
7370 {
7371 static bool warned;
7372 if (!warned && warn_psabi)
7373 {
7374 warned = true;
7375 inform (input_location,
7376 "the layout of aggregates containing vectors with"
7377 " %d-byte alignment has changed in GCC 5",
7378 computed / BITS_PER_UNIT);
7379 }
7380 }
7381 /* In current GCC there is no special case. */
7382 return false;
7383 }
7384
7385 return false;
7386 }
7387
7388 /* AIX increases natural record alignment to doubleword if the first
7389 field is an FP double while the FP fields remain word aligned. */
7390
7391 unsigned int
7392 rs6000_special_round_type_align (tree type, unsigned int computed,
7393 unsigned int specified)
7394 {
7395 unsigned int align = MAX (computed, specified);
7396 tree field = TYPE_FIELDS (type);
7397
7398 /* Skip all non field decls */
7399 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7400 field = DECL_CHAIN (field);
7401
7402 if (field != NULL && field != type)
7403 {
7404 type = TREE_TYPE (field);
7405 while (TREE_CODE (type) == ARRAY_TYPE)
7406 type = TREE_TYPE (type);
7407
7408 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7409 align = MAX (align, 64);
7410 }
7411
7412 return align;
7413 }
7414
7415 /* Darwin increases record alignment to the natural alignment of
7416 the first field. */
7417
7418 unsigned int
7419 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7420 unsigned int specified)
7421 {
7422 unsigned int align = MAX (computed, specified);
7423
7424 if (TYPE_PACKED (type))
7425 return align;
7426
7427 /* Find the first field, looking down into aggregates. */
7428 do {
7429 tree field = TYPE_FIELDS (type);
7430 /* Skip all non field decls */
7431 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7432 field = DECL_CHAIN (field);
7433 if (! field)
7434 break;
7435 /* A packed field does not contribute any extra alignment. */
7436 if (DECL_PACKED (field))
7437 return align;
7438 type = TREE_TYPE (field);
7439 while (TREE_CODE (type) == ARRAY_TYPE)
7440 type = TREE_TYPE (type);
7441 } while (AGGREGATE_TYPE_P (type));
7442
7443 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7444 align = MAX (align, TYPE_ALIGN (type));
7445
7446 return align;
7447 }
7448
7449 /* Return 1 for an operand in small memory on V.4/eabi. */
7450
7451 int
7452 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7453 machine_mode mode ATTRIBUTE_UNUSED)
7454 {
7455 #if TARGET_ELF
7456 rtx sym_ref;
7457
7458 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7459 return 0;
7460
7461 if (DEFAULT_ABI != ABI_V4)
7462 return 0;
7463
7464 if (SYMBOL_REF_P (op))
7465 sym_ref = op;
7466
7467 else if (GET_CODE (op) != CONST
7468 || GET_CODE (XEXP (op, 0)) != PLUS
7469 || !SYMBOL_REF_P (XEXP (XEXP (op, 0), 0))
7470 || !CONST_INT_P (XEXP (XEXP (op, 0), 1)))
7471 return 0;
7472
7473 else
7474 {
7475 rtx sum = XEXP (op, 0);
7476 HOST_WIDE_INT summand;
7477
7478 /* We have to be careful here, because it is the referenced address
7479 that must be 32k from _SDA_BASE_, not just the symbol. */
7480 summand = INTVAL (XEXP (sum, 1));
7481 if (summand < 0 || summand > g_switch_value)
7482 return 0;
7483
7484 sym_ref = XEXP (sum, 0);
7485 }
7486
7487 return SYMBOL_REF_SMALL_P (sym_ref);
7488 #else
7489 return 0;
7490 #endif
7491 }
7492
7493 /* Return true if either operand is a general purpose register. */
7494
7495 bool
7496 gpr_or_gpr_p (rtx op0, rtx op1)
7497 {
7498 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7499 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7500 }
7501
7502 /* Return true if this is a move direct operation between GPR registers and
7503 floating point/VSX registers. */
7504
7505 bool
7506 direct_move_p (rtx op0, rtx op1)
7507 {
7508 int regno0, regno1;
7509
7510 if (!REG_P (op0) || !REG_P (op1))
7511 return false;
7512
7513 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7514 return false;
7515
7516 regno0 = REGNO (op0);
7517 regno1 = REGNO (op1);
7518 if (!HARD_REGISTER_NUM_P (regno0) || !HARD_REGISTER_NUM_P (regno1))
7519 return false;
7520
7521 if (INT_REGNO_P (regno0))
7522 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7523
7524 else if (INT_REGNO_P (regno1))
7525 {
7526 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7527 return true;
7528
7529 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7530 return true;
7531 }
7532
7533 return false;
7534 }
7535
7536 /* Return true if the OFFSET is valid for the quad address instructions that
7537 use d-form (register + offset) addressing. */
7538
7539 static inline bool
7540 quad_address_offset_p (HOST_WIDE_INT offset)
7541 {
7542 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7543 }
7544
7545 /* Return true if the ADDR is an acceptable address for a quad memory
7546 operation of mode MODE (either LQ/STQ for general purpose registers, or
7547 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7548 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7549 3.0 LXV/STXV instruction. */
7550
7551 bool
7552 quad_address_p (rtx addr, machine_mode mode, bool strict)
7553 {
7554 rtx op0, op1;
7555
7556 if (GET_MODE_SIZE (mode) != 16)
7557 return false;
7558
7559 if (legitimate_indirect_address_p (addr, strict))
7560 return true;
7561
7562 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7563 return false;
7564
7565 if (GET_CODE (addr) != PLUS)
7566 return false;
7567
7568 op0 = XEXP (addr, 0);
7569 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7570 return false;
7571
7572 op1 = XEXP (addr, 1);
7573 if (!CONST_INT_P (op1))
7574 return false;
7575
7576 return quad_address_offset_p (INTVAL (op1));
7577 }
7578
7579 /* Return true if this is a load or store quad operation. This function does
7580 not handle the atomic quad memory instructions. */
7581
7582 bool
7583 quad_load_store_p (rtx op0, rtx op1)
7584 {
7585 bool ret;
7586
7587 if (!TARGET_QUAD_MEMORY)
7588 ret = false;
7589
7590 else if (REG_P (op0) && MEM_P (op1))
7591 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7592 && quad_memory_operand (op1, GET_MODE (op1))
7593 && !reg_overlap_mentioned_p (op0, op1));
7594
7595 else if (MEM_P (op0) && REG_P (op1))
7596 ret = (quad_memory_operand (op0, GET_MODE (op0))
7597 && quad_int_reg_operand (op1, GET_MODE (op1)));
7598
7599 else
7600 ret = false;
7601
7602 if (TARGET_DEBUG_ADDR)
7603 {
7604 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7605 ret ? "true" : "false");
7606 debug_rtx (gen_rtx_SET (op0, op1));
7607 }
7608
7609 return ret;
7610 }
7611
7612 /* Given an address, return a constant offset term if one exists. */
7613
7614 static rtx
7615 address_offset (rtx op)
7616 {
7617 if (GET_CODE (op) == PRE_INC
7618 || GET_CODE (op) == PRE_DEC)
7619 op = XEXP (op, 0);
7620 else if (GET_CODE (op) == PRE_MODIFY
7621 || GET_CODE (op) == LO_SUM)
7622 op = XEXP (op, 1);
7623
7624 if (GET_CODE (op) == CONST)
7625 op = XEXP (op, 0);
7626
7627 if (GET_CODE (op) == PLUS)
7628 op = XEXP (op, 1);
7629
7630 if (CONST_INT_P (op))
7631 return op;
7632
7633 return NULL_RTX;
7634 }
7635
7636 /* Return true if the MEM operand is a memory operand suitable for use
7637 with a (full width, possibly multiple) gpr load/store. On
7638 powerpc64 this means the offset must be divisible by 4.
7639 Implements 'Y' constraint.
7640
7641 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7642 a constraint function we know the operand has satisfied a suitable
7643 memory predicate. Also accept some odd rtl generated by reload
7644 (see rs6000_legitimize_reload_address for various forms). It is
7645 important that reload rtl be accepted by appropriate constraints
7646 but not by the operand predicate.
7647
7648 Offsetting a lo_sum should not be allowed, except where we know by
7649 alignment that a 32k boundary is not crossed, but see the ???
7650 comment in rs6000_legitimize_reload_address. Note that by
7651 "offsetting" here we mean a further offset to access parts of the
7652 MEM. It's fine to have a lo_sum where the inner address is offset
7653 from a sym, since the same sym+offset will appear in the high part
7654 of the address calculation. */
7655
7656 bool
7657 mem_operand_gpr (rtx op, machine_mode mode)
7658 {
7659 unsigned HOST_WIDE_INT offset;
7660 int extra;
7661 rtx addr = XEXP (op, 0);
7662
7663 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7664 if (TARGET_UPDATE
7665 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7666 && mode_supports_pre_incdec_p (mode)
7667 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7668 return true;
7669
7670 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7671 if (!rs6000_offsettable_memref_p (op, mode, false))
7672 return false;
7673
7674 op = address_offset (addr);
7675 if (op == NULL_RTX)
7676 return true;
7677
7678 offset = INTVAL (op);
7679 if (TARGET_POWERPC64 && (offset & 3) != 0)
7680 return false;
7681
7682 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7683 if (extra < 0)
7684 extra = 0;
7685
7686 if (GET_CODE (addr) == LO_SUM)
7687 /* For lo_sum addresses, we must allow any offset except one that
7688 causes a wrap, so test only the low 16 bits. */
7689 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7690
7691 return offset + 0x8000 < 0x10000u - extra;
7692 }
7693
7694 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7695 enforce an offset divisible by 4 even for 32-bit. */
7696
7697 bool
7698 mem_operand_ds_form (rtx op, machine_mode mode)
7699 {
7700 unsigned HOST_WIDE_INT offset;
7701 int extra;
7702 rtx addr = XEXP (op, 0);
7703
7704 if (!offsettable_address_p (false, mode, addr))
7705 return false;
7706
7707 op = address_offset (addr);
7708 if (op == NULL_RTX)
7709 return true;
7710
7711 offset = INTVAL (op);
7712 if ((offset & 3) != 0)
7713 return false;
7714
7715 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7716 if (extra < 0)
7717 extra = 0;
7718
7719 if (GET_CODE (addr) == LO_SUM)
7720 /* For lo_sum addresses, we must allow any offset except one that
7721 causes a wrap, so test only the low 16 bits. */
7722 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7723
7724 return offset + 0x8000 < 0x10000u - extra;
7725 }
7726 \f
7727 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7728
7729 static bool
7730 reg_offset_addressing_ok_p (machine_mode mode)
7731 {
7732 switch (mode)
7733 {
7734 case E_V16QImode:
7735 case E_V8HImode:
7736 case E_V4SFmode:
7737 case E_V4SImode:
7738 case E_V2DFmode:
7739 case E_V2DImode:
7740 case E_V1TImode:
7741 case E_TImode:
7742 case E_TFmode:
7743 case E_KFmode:
7744 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7745 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7746 a vector mode, if we want to use the VSX registers to move it around,
7747 we need to restrict ourselves to reg+reg addressing. Similarly for
7748 IEEE 128-bit floating point that is passed in a single vector
7749 register. */
7750 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7751 return mode_supports_dq_form (mode);
7752 break;
7753
7754 case E_SDmode:
7755 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7756 addressing for the LFIWZX and STFIWX instructions. */
7757 if (TARGET_NO_SDMODE_STACK)
7758 return false;
7759 break;
7760
7761 default:
7762 break;
7763 }
7764
7765 return true;
7766 }
7767
7768 static bool
7769 virtual_stack_registers_memory_p (rtx op)
7770 {
7771 int regnum;
7772
7773 if (REG_P (op))
7774 regnum = REGNO (op);
7775
7776 else if (GET_CODE (op) == PLUS
7777 && REG_P (XEXP (op, 0))
7778 && CONST_INT_P (XEXP (op, 1)))
7779 regnum = REGNO (XEXP (op, 0));
7780
7781 else
7782 return false;
7783
7784 return (regnum >= FIRST_VIRTUAL_REGISTER
7785 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7786 }
7787
7788 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7789 is known to not straddle a 32k boundary. This function is used
7790 to determine whether -mcmodel=medium code can use TOC pointer
7791 relative addressing for OP. This means the alignment of the TOC
7792 pointer must also be taken into account, and unfortunately that is
7793 only 8 bytes. */
7794
7795 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7796 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7797 #endif
7798
7799 static bool
7800 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7801 machine_mode mode)
7802 {
7803 tree decl;
7804 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7805
7806 if (!SYMBOL_REF_P (op))
7807 return false;
7808
7809 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7810 SYMBOL_REF. */
7811 if (mode_supports_dq_form (mode))
7812 return false;
7813
7814 dsize = GET_MODE_SIZE (mode);
7815 decl = SYMBOL_REF_DECL (op);
7816 if (!decl)
7817 {
7818 if (dsize == 0)
7819 return false;
7820
7821 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7822 replacing memory addresses with an anchor plus offset. We
7823 could find the decl by rummaging around in the block->objects
7824 VEC for the given offset but that seems like too much work. */
7825 dalign = BITS_PER_UNIT;
7826 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7827 && SYMBOL_REF_ANCHOR_P (op)
7828 && SYMBOL_REF_BLOCK (op) != NULL)
7829 {
7830 struct object_block *block = SYMBOL_REF_BLOCK (op);
7831
7832 dalign = block->alignment;
7833 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7834 }
7835 else if (CONSTANT_POOL_ADDRESS_P (op))
7836 {
7837 /* It would be nice to have get_pool_align().. */
7838 machine_mode cmode = get_pool_mode (op);
7839
7840 dalign = GET_MODE_ALIGNMENT (cmode);
7841 }
7842 }
7843 else if (DECL_P (decl))
7844 {
7845 dalign = DECL_ALIGN (decl);
7846
7847 if (dsize == 0)
7848 {
7849 /* Allow BLKmode when the entire object is known to not
7850 cross a 32k boundary. */
7851 if (!DECL_SIZE_UNIT (decl))
7852 return false;
7853
7854 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7855 return false;
7856
7857 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7858 if (dsize > 32768)
7859 return false;
7860
7861 dalign /= BITS_PER_UNIT;
7862 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7863 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7864 return dalign >= dsize;
7865 }
7866 }
7867 else
7868 gcc_unreachable ();
7869
7870 /* Find how many bits of the alignment we know for this access. */
7871 dalign /= BITS_PER_UNIT;
7872 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7873 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7874 mask = dalign - 1;
7875 lsb = offset & -offset;
7876 mask &= lsb - 1;
7877 dalign = mask + 1;
7878
7879 return dalign >= dsize;
7880 }
7881
7882 static bool
7883 constant_pool_expr_p (rtx op)
7884 {
7885 rtx base, offset;
7886
7887 split_const (op, &base, &offset);
7888 return (SYMBOL_REF_P (base)
7889 && CONSTANT_POOL_ADDRESS_P (base)
7890 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7891 }
7892
7893 /* These are only used to pass through from print_operand/print_operand_address
7894 to rs6000_output_addr_const_extra over the intervening function
7895 output_addr_const which is not target code. */
7896 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7897
7898 /* Return true if OP is a toc pointer relative address (the output
7899 of create_TOC_reference). If STRICT, do not match non-split
7900 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7901 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7902 TOCREL_OFFSET_RET respectively. */
7903
7904 bool
7905 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7906 const_rtx *tocrel_offset_ret)
7907 {
7908 if (!TARGET_TOC)
7909 return false;
7910
7911 if (TARGET_CMODEL != CMODEL_SMALL)
7912 {
7913 /* When strict ensure we have everything tidy. */
7914 if (strict
7915 && !(GET_CODE (op) == LO_SUM
7916 && REG_P (XEXP (op, 0))
7917 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7918 return false;
7919
7920 /* When not strict, allow non-split TOC addresses and also allow
7921 (lo_sum (high ..)) TOC addresses created during reload. */
7922 if (GET_CODE (op) == LO_SUM)
7923 op = XEXP (op, 1);
7924 }
7925
7926 const_rtx tocrel_base = op;
7927 const_rtx tocrel_offset = const0_rtx;
7928
7929 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7930 {
7931 tocrel_base = XEXP (op, 0);
7932 tocrel_offset = XEXP (op, 1);
7933 }
7934
7935 if (tocrel_base_ret)
7936 *tocrel_base_ret = tocrel_base;
7937 if (tocrel_offset_ret)
7938 *tocrel_offset_ret = tocrel_offset;
7939
7940 return (GET_CODE (tocrel_base) == UNSPEC
7941 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7942 && REG_P (XVECEXP (tocrel_base, 0, 1))
7943 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7944 }
7945
7946 /* Return true if X is a constant pool address, and also for cmodel=medium
7947 if X is a toc-relative address known to be offsettable within MODE. */
7948
7949 bool
7950 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7951 bool strict)
7952 {
7953 const_rtx tocrel_base, tocrel_offset;
7954 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7955 && (TARGET_CMODEL != CMODEL_MEDIUM
7956 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7957 || mode == QImode
7958 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7959 INTVAL (tocrel_offset), mode)));
7960 }
7961
7962 static bool
7963 legitimate_small_data_p (machine_mode mode, rtx x)
7964 {
7965 return (DEFAULT_ABI == ABI_V4
7966 && !flag_pic && !TARGET_TOC
7967 && (SYMBOL_REF_P (x) || GET_CODE (x) == CONST)
7968 && small_data_operand (x, mode));
7969 }
7970
7971 bool
7972 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7973 bool strict, bool worst_case)
7974 {
7975 unsigned HOST_WIDE_INT offset;
7976 unsigned int extra;
7977
7978 if (GET_CODE (x) != PLUS)
7979 return false;
7980 if (!REG_P (XEXP (x, 0)))
7981 return false;
7982 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7983 return false;
7984 if (mode_supports_dq_form (mode))
7985 return quad_address_p (x, mode, strict);
7986 if (!reg_offset_addressing_ok_p (mode))
7987 return virtual_stack_registers_memory_p (x);
7988 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7989 return true;
7990 if (!CONST_INT_P (XEXP (x, 1)))
7991 return false;
7992
7993 offset = INTVAL (XEXP (x, 1));
7994 extra = 0;
7995 switch (mode)
7996 {
7997 case E_DFmode:
7998 case E_DDmode:
7999 case E_DImode:
8000 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8001 addressing. */
8002 if (VECTOR_MEM_VSX_P (mode))
8003 return false;
8004
8005 if (!worst_case)
8006 break;
8007 if (!TARGET_POWERPC64)
8008 extra = 4;
8009 else if (offset & 3)
8010 return false;
8011 break;
8012
8013 case E_TFmode:
8014 case E_IFmode:
8015 case E_KFmode:
8016 case E_TDmode:
8017 case E_TImode:
8018 case E_PTImode:
8019 extra = 8;
8020 if (!worst_case)
8021 break;
8022 if (!TARGET_POWERPC64)
8023 extra = 12;
8024 else if (offset & 3)
8025 return false;
8026 break;
8027
8028 default:
8029 break;
8030 }
8031
8032 offset += 0x8000;
8033 return offset < 0x10000 - extra;
8034 }
8035
8036 bool
8037 legitimate_indexed_address_p (rtx x, int strict)
8038 {
8039 rtx op0, op1;
8040
8041 if (GET_CODE (x) != PLUS)
8042 return false;
8043
8044 op0 = XEXP (x, 0);
8045 op1 = XEXP (x, 1);
8046
8047 return (REG_P (op0) && REG_P (op1)
8048 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8049 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8050 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8051 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8052 }
8053
8054 bool
8055 avoiding_indexed_address_p (machine_mode mode)
8056 {
8057 /* Avoid indexed addressing for modes that have non-indexed
8058 load/store instruction forms. */
8059 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8060 }
8061
8062 bool
8063 legitimate_indirect_address_p (rtx x, int strict)
8064 {
8065 return REG_P (x) && INT_REG_OK_FOR_BASE_P (x, strict);
8066 }
8067
8068 bool
8069 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8070 {
8071 if (!TARGET_MACHO || !flag_pic
8072 || mode != SImode || !MEM_P (x))
8073 return false;
8074 x = XEXP (x, 0);
8075
8076 if (GET_CODE (x) != LO_SUM)
8077 return false;
8078 if (!REG_P (XEXP (x, 0)))
8079 return false;
8080 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8081 return false;
8082 x = XEXP (x, 1);
8083
8084 return CONSTANT_P (x);
8085 }
8086
8087 static bool
8088 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8089 {
8090 if (GET_CODE (x) != LO_SUM)
8091 return false;
8092 if (!REG_P (XEXP (x, 0)))
8093 return false;
8094 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8095 return false;
8096 /* quad word addresses are restricted, and we can't use LO_SUM. */
8097 if (mode_supports_dq_form (mode))
8098 return false;
8099 x = XEXP (x, 1);
8100
8101 if (TARGET_ELF || TARGET_MACHO)
8102 {
8103 bool large_toc_ok;
8104
8105 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8106 return false;
8107 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8108 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8109 recognizes some LO_SUM addresses as valid although this
8110 function says opposite. In most cases, LRA through different
8111 transformations can generate correct code for address reloads.
8112 It cannot manage only some LO_SUM cases. So we need to add
8113 code analogous to one in rs6000_legitimize_reload_address for
8114 LOW_SUM here saying that some addresses are still valid. */
8115 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8116 && small_toc_ref (x, VOIDmode));
8117 if (TARGET_TOC && ! large_toc_ok)
8118 return false;
8119 if (GET_MODE_NUNITS (mode) != 1)
8120 return false;
8121 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8122 && !(/* ??? Assume floating point reg based on mode? */
8123 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8124 return false;
8125
8126 return CONSTANT_P (x) || large_toc_ok;
8127 }
8128
8129 return false;
8130 }
8131
8132
8133 /* Try machine-dependent ways of modifying an illegitimate address
8134 to be legitimate. If we find one, return the new, valid address.
8135 This is used from only one place: `memory_address' in explow.c.
8136
8137 OLDX is the address as it was before break_out_memory_refs was
8138 called. In some cases it is useful to look at this to decide what
8139 needs to be done.
8140
8141 It is always safe for this function to do nothing. It exists to
8142 recognize opportunities to optimize the output.
8143
8144 On RS/6000, first check for the sum of a register with a constant
8145 integer that is out of range. If so, generate code to add the
8146 constant with the low-order 16 bits masked to the register and force
8147 this result into another register (this can be done with `cau').
8148 Then generate an address of REG+(CONST&0xffff), allowing for the
8149 possibility of bit 16 being a one.
8150
8151 Then check for the sum of a register and something not constant, try to
8152 load the other things into a register and return the sum. */
8153
8154 static rtx
8155 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8156 machine_mode mode)
8157 {
8158 unsigned int extra;
8159
8160 if (!reg_offset_addressing_ok_p (mode)
8161 || mode_supports_dq_form (mode))
8162 {
8163 if (virtual_stack_registers_memory_p (x))
8164 return x;
8165
8166 /* In theory we should not be seeing addresses of the form reg+0,
8167 but just in case it is generated, optimize it away. */
8168 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8169 return force_reg (Pmode, XEXP (x, 0));
8170
8171 /* For TImode with load/store quad, restrict addresses to just a single
8172 pointer, so it works with both GPRs and VSX registers. */
8173 /* Make sure both operands are registers. */
8174 else if (GET_CODE (x) == PLUS
8175 && (mode != TImode || !TARGET_VSX))
8176 return gen_rtx_PLUS (Pmode,
8177 force_reg (Pmode, XEXP (x, 0)),
8178 force_reg (Pmode, XEXP (x, 1)));
8179 else
8180 return force_reg (Pmode, x);
8181 }
8182 if (SYMBOL_REF_P (x))
8183 {
8184 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8185 if (model != 0)
8186 return rs6000_legitimize_tls_address (x, model);
8187 }
8188
8189 extra = 0;
8190 switch (mode)
8191 {
8192 case E_TFmode:
8193 case E_TDmode:
8194 case E_TImode:
8195 case E_PTImode:
8196 case E_IFmode:
8197 case E_KFmode:
8198 /* As in legitimate_offset_address_p we do not assume
8199 worst-case. The mode here is just a hint as to the registers
8200 used. A TImode is usually in gprs, but may actually be in
8201 fprs. Leave worst-case scenario for reload to handle via
8202 insn constraints. PTImode is only GPRs. */
8203 extra = 8;
8204 break;
8205 default:
8206 break;
8207 }
8208
8209 if (GET_CODE (x) == PLUS
8210 && REG_P (XEXP (x, 0))
8211 && CONST_INT_P (XEXP (x, 1))
8212 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8213 >= 0x10000 - extra))
8214 {
8215 HOST_WIDE_INT high_int, low_int;
8216 rtx sum;
8217 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8218 if (low_int >= 0x8000 - extra)
8219 low_int = 0;
8220 high_int = INTVAL (XEXP (x, 1)) - low_int;
8221 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8222 GEN_INT (high_int)), 0);
8223 return plus_constant (Pmode, sum, low_int);
8224 }
8225 else if (GET_CODE (x) == PLUS
8226 && REG_P (XEXP (x, 0))
8227 && !CONST_INT_P (XEXP (x, 1))
8228 && GET_MODE_NUNITS (mode) == 1
8229 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8230 || (/* ??? Assume floating point reg based on mode? */
8231 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8232 && !avoiding_indexed_address_p (mode))
8233 {
8234 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8235 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8236 }
8237 else if ((TARGET_ELF
8238 #if TARGET_MACHO
8239 || !MACHO_DYNAMIC_NO_PIC_P
8240 #endif
8241 )
8242 && TARGET_32BIT
8243 && TARGET_NO_TOC
8244 && !flag_pic
8245 && !CONST_INT_P (x)
8246 && !CONST_WIDE_INT_P (x)
8247 && !CONST_DOUBLE_P (x)
8248 && CONSTANT_P (x)
8249 && GET_MODE_NUNITS (mode) == 1
8250 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8251 || (/* ??? Assume floating point reg based on mode? */
8252 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8253 {
8254 rtx reg = gen_reg_rtx (Pmode);
8255 if (TARGET_ELF)
8256 emit_insn (gen_elf_high (reg, x));
8257 else
8258 emit_insn (gen_macho_high (reg, x));
8259 return gen_rtx_LO_SUM (Pmode, reg, x);
8260 }
8261 else if (TARGET_TOC
8262 && SYMBOL_REF_P (x)
8263 && constant_pool_expr_p (x)
8264 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8265 return create_TOC_reference (x, NULL_RTX);
8266 else
8267 return x;
8268 }
8269
8270 /* Debug version of rs6000_legitimize_address. */
8271 static rtx
8272 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8273 {
8274 rtx ret;
8275 rtx_insn *insns;
8276
8277 start_sequence ();
8278 ret = rs6000_legitimize_address (x, oldx, mode);
8279 insns = get_insns ();
8280 end_sequence ();
8281
8282 if (ret != x)
8283 {
8284 fprintf (stderr,
8285 "\nrs6000_legitimize_address: mode %s, old code %s, "
8286 "new code %s, modified\n",
8287 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8288 GET_RTX_NAME (GET_CODE (ret)));
8289
8290 fprintf (stderr, "Original address:\n");
8291 debug_rtx (x);
8292
8293 fprintf (stderr, "oldx:\n");
8294 debug_rtx (oldx);
8295
8296 fprintf (stderr, "New address:\n");
8297 debug_rtx (ret);
8298
8299 if (insns)
8300 {
8301 fprintf (stderr, "Insns added:\n");
8302 debug_rtx_list (insns, 20);
8303 }
8304 }
8305 else
8306 {
8307 fprintf (stderr,
8308 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8309 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8310
8311 debug_rtx (x);
8312 }
8313
8314 if (insns)
8315 emit_insn (insns);
8316
8317 return ret;
8318 }
8319
8320 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8321 We need to emit DTP-relative relocations. */
8322
8323 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8324 static void
8325 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8326 {
8327 switch (size)
8328 {
8329 case 4:
8330 fputs ("\t.long\t", file);
8331 break;
8332 case 8:
8333 fputs (DOUBLE_INT_ASM_OP, file);
8334 break;
8335 default:
8336 gcc_unreachable ();
8337 }
8338 output_addr_const (file, x);
8339 if (TARGET_ELF)
8340 fputs ("@dtprel+0x8000", file);
8341 else if (TARGET_XCOFF && SYMBOL_REF_P (x))
8342 {
8343 switch (SYMBOL_REF_TLS_MODEL (x))
8344 {
8345 case 0:
8346 break;
8347 case TLS_MODEL_LOCAL_EXEC:
8348 fputs ("@le", file);
8349 break;
8350 case TLS_MODEL_INITIAL_EXEC:
8351 fputs ("@ie", file);
8352 break;
8353 case TLS_MODEL_GLOBAL_DYNAMIC:
8354 case TLS_MODEL_LOCAL_DYNAMIC:
8355 fputs ("@m", file);
8356 break;
8357 default:
8358 gcc_unreachable ();
8359 }
8360 }
8361 }
8362
8363 /* Return true if X is a symbol that refers to real (rather than emulated)
8364 TLS. */
8365
8366 static bool
8367 rs6000_real_tls_symbol_ref_p (rtx x)
8368 {
8369 return (SYMBOL_REF_P (x)
8370 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8371 }
8372
8373 /* In the name of slightly smaller debug output, and to cater to
8374 general assembler lossage, recognize various UNSPEC sequences
8375 and turn them back into a direct symbol reference. */
8376
8377 static rtx
8378 rs6000_delegitimize_address (rtx orig_x)
8379 {
8380 rtx x, y, offset;
8381
8382 if (GET_CODE (orig_x) == UNSPEC && XINT (orig_x, 1) == UNSPEC_FUSION_GPR)
8383 orig_x = XVECEXP (orig_x, 0, 0);
8384
8385 orig_x = delegitimize_mem_from_attrs (orig_x);
8386
8387 x = orig_x;
8388 if (MEM_P (x))
8389 x = XEXP (x, 0);
8390
8391 y = x;
8392 if (TARGET_CMODEL != CMODEL_SMALL && GET_CODE (y) == LO_SUM)
8393 y = XEXP (y, 1);
8394
8395 offset = NULL_RTX;
8396 if (GET_CODE (y) == PLUS
8397 && GET_MODE (y) == Pmode
8398 && CONST_INT_P (XEXP (y, 1)))
8399 {
8400 offset = XEXP (y, 1);
8401 y = XEXP (y, 0);
8402 }
8403
8404 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_TOCREL)
8405 {
8406 y = XVECEXP (y, 0, 0);
8407
8408 #ifdef HAVE_AS_TLS
8409 /* Do not associate thread-local symbols with the original
8410 constant pool symbol. */
8411 if (TARGET_XCOFF
8412 && SYMBOL_REF_P (y)
8413 && CONSTANT_POOL_ADDRESS_P (y)
8414 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8415 return orig_x;
8416 #endif
8417
8418 if (offset != NULL_RTX)
8419 y = gen_rtx_PLUS (Pmode, y, offset);
8420 if (!MEM_P (orig_x))
8421 return y;
8422 else
8423 return replace_equiv_address_nv (orig_x, y);
8424 }
8425
8426 if (TARGET_MACHO
8427 && GET_CODE (orig_x) == LO_SUM
8428 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8429 {
8430 y = XEXP (XEXP (orig_x, 1), 0);
8431 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8432 return XVECEXP (y, 0, 0);
8433 }
8434
8435 return orig_x;
8436 }
8437
8438 /* Return true if X shouldn't be emitted into the debug info.
8439 The linker doesn't like .toc section references from
8440 .debug_* sections, so reject .toc section symbols. */
8441
8442 static bool
8443 rs6000_const_not_ok_for_debug_p (rtx x)
8444 {
8445 if (GET_CODE (x) == UNSPEC)
8446 return true;
8447 if (SYMBOL_REF_P (x)
8448 && CONSTANT_POOL_ADDRESS_P (x))
8449 {
8450 rtx c = get_pool_constant (x);
8451 machine_mode cmode = get_pool_mode (x);
8452 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8453 return true;
8454 }
8455
8456 return false;
8457 }
8458
8459 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8460
8461 static bool
8462 rs6000_legitimate_combined_insn (rtx_insn *insn)
8463 {
8464 int icode = INSN_CODE (insn);
8465
8466 /* Reject creating doloop insns. Combine should not be allowed
8467 to create these for a number of reasons:
8468 1) In a nested loop, if combine creates one of these in an
8469 outer loop and the register allocator happens to allocate ctr
8470 to the outer loop insn, then the inner loop can't use ctr.
8471 Inner loops ought to be more highly optimized.
8472 2) Combine often wants to create one of these from what was
8473 originally a three insn sequence, first combining the three
8474 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8475 allocated ctr, the splitter takes use back to the three insn
8476 sequence. It's better to stop combine at the two insn
8477 sequence.
8478 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8479 insns, the register allocator sometimes uses floating point
8480 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8481 jump insn and output reloads are not implemented for jumps,
8482 the ctrsi/ctrdi splitters need to handle all possible cases.
8483 That's a pain, and it gets to be seriously difficult when a
8484 splitter that runs after reload needs memory to transfer from
8485 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8486 for the difficult case. It's better to not create problems
8487 in the first place. */
8488 if (icode != CODE_FOR_nothing
8489 && (icode == CODE_FOR_bdz_si
8490 || icode == CODE_FOR_bdz_di
8491 || icode == CODE_FOR_bdnz_si
8492 || icode == CODE_FOR_bdnz_di
8493 || icode == CODE_FOR_bdztf_si
8494 || icode == CODE_FOR_bdztf_di
8495 || icode == CODE_FOR_bdnztf_si
8496 || icode == CODE_FOR_bdnztf_di))
8497 return false;
8498
8499 return true;
8500 }
8501
8502 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8503
8504 static GTY(()) rtx rs6000_tls_symbol;
8505 static rtx
8506 rs6000_tls_get_addr (void)
8507 {
8508 if (!rs6000_tls_symbol)
8509 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8510
8511 return rs6000_tls_symbol;
8512 }
8513
8514 /* Construct the SYMBOL_REF for TLS GOT references. */
8515
8516 static GTY(()) rtx rs6000_got_symbol;
8517 static rtx
8518 rs6000_got_sym (void)
8519 {
8520 if (!rs6000_got_symbol)
8521 {
8522 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8523 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8524 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8525 }
8526
8527 return rs6000_got_symbol;
8528 }
8529
8530 /* AIX Thread-Local Address support. */
8531
8532 static rtx
8533 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8534 {
8535 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8536 const char *name;
8537 char *tlsname;
8538
8539 name = XSTR (addr, 0);
8540 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8541 or the symbol will be in TLS private data section. */
8542 if (name[strlen (name) - 1] != ']'
8543 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8544 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8545 {
8546 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8547 strcpy (tlsname, name);
8548 strcat (tlsname,
8549 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8550 tlsaddr = copy_rtx (addr);
8551 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8552 }
8553 else
8554 tlsaddr = addr;
8555
8556 /* Place addr into TOC constant pool. */
8557 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8558
8559 /* Output the TOC entry and create the MEM referencing the value. */
8560 if (constant_pool_expr_p (XEXP (sym, 0))
8561 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8562 {
8563 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8564 mem = gen_const_mem (Pmode, tocref);
8565 set_mem_alias_set (mem, get_TOC_alias_set ());
8566 }
8567 else
8568 return sym;
8569
8570 /* Use global-dynamic for local-dynamic. */
8571 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8572 || model == TLS_MODEL_LOCAL_DYNAMIC)
8573 {
8574 /* Create new TOC reference for @m symbol. */
8575 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8576 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8577 strcpy (tlsname, "*LCM");
8578 strcat (tlsname, name + 3);
8579 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8580 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8581 tocref = create_TOC_reference (modaddr, NULL_RTX);
8582 rtx modmem = gen_const_mem (Pmode, tocref);
8583 set_mem_alias_set (modmem, get_TOC_alias_set ());
8584
8585 rtx modreg = gen_reg_rtx (Pmode);
8586 emit_insn (gen_rtx_SET (modreg, modmem));
8587
8588 tmpreg = gen_reg_rtx (Pmode);
8589 emit_insn (gen_rtx_SET (tmpreg, mem));
8590
8591 dest = gen_reg_rtx (Pmode);
8592 if (TARGET_32BIT)
8593 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8594 else
8595 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8596 return dest;
8597 }
8598 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8599 else if (TARGET_32BIT)
8600 {
8601 tlsreg = gen_reg_rtx (SImode);
8602 emit_insn (gen_tls_get_tpointer (tlsreg));
8603 }
8604 else
8605 tlsreg = gen_rtx_REG (DImode, 13);
8606
8607 /* Load the TOC value into temporary register. */
8608 tmpreg = gen_reg_rtx (Pmode);
8609 emit_insn (gen_rtx_SET (tmpreg, mem));
8610 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8611 gen_rtx_MINUS (Pmode, addr, tlsreg));
8612
8613 /* Add TOC symbol value to TLS pointer. */
8614 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8615
8616 return dest;
8617 }
8618
8619 /* Output arg setup instructions for a !TARGET_TLS_MARKERS
8620 __tls_get_addr call. */
8621
8622 void
8623 rs6000_output_tlsargs (rtx *operands)
8624 {
8625 /* Set up operands for output_asm_insn, without modifying OPERANDS. */
8626 rtx op[3];
8627
8628 /* The set dest of the call, ie. r3, which is also the first arg reg. */
8629 op[0] = operands[0];
8630 /* The TLS symbol from global_tlsarg stashed as CALL operand 2. */
8631 op[1] = XVECEXP (operands[2], 0, 0);
8632 if (XINT (operands[2], 1) == UNSPEC_TLSGD)
8633 {
8634 /* The GOT register. */
8635 op[2] = XVECEXP (operands[2], 0, 1);
8636 if (TARGET_CMODEL != CMODEL_SMALL)
8637 output_asm_insn ("addis %0,%2,%1@got@tlsgd@ha\n\t"
8638 "addi %0,%0,%1@got@tlsgd@l", op);
8639 else
8640 output_asm_insn ("addi %0,%2,%1@got@tlsgd", op);
8641 }
8642 else if (XINT (operands[2], 1) == UNSPEC_TLSLD)
8643 {
8644 if (TARGET_CMODEL != CMODEL_SMALL)
8645 output_asm_insn ("addis %0,%1,%&@got@tlsld@ha\n\t"
8646 "addi %0,%0,%&@got@tlsld@l", op);
8647 else
8648 output_asm_insn ("addi %0,%1,%&@got@tlsld", op);
8649 }
8650 else
8651 gcc_unreachable ();
8652 }
8653
8654 /* Passes the tls arg value for global dynamic and local dynamic
8655 emit_library_call_value in rs6000_legitimize_tls_address to
8656 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8657 marker relocs put on __tls_get_addr calls. */
8658 static rtx global_tlsarg;
8659
8660 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8661 this (thread-local) address. */
8662
8663 static rtx
8664 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8665 {
8666 rtx dest, insn;
8667
8668 if (TARGET_XCOFF)
8669 return rs6000_legitimize_tls_address_aix (addr, model);
8670
8671 dest = gen_reg_rtx (Pmode);
8672 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8673 {
8674 rtx tlsreg;
8675
8676 if (TARGET_64BIT)
8677 {
8678 tlsreg = gen_rtx_REG (Pmode, 13);
8679 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8680 }
8681 else
8682 {
8683 tlsreg = gen_rtx_REG (Pmode, 2);
8684 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8685 }
8686 emit_insn (insn);
8687 }
8688 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8689 {
8690 rtx tlsreg, tmp;
8691
8692 tmp = gen_reg_rtx (Pmode);
8693 if (TARGET_64BIT)
8694 {
8695 tlsreg = gen_rtx_REG (Pmode, 13);
8696 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8697 }
8698 else
8699 {
8700 tlsreg = gen_rtx_REG (Pmode, 2);
8701 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8702 }
8703 emit_insn (insn);
8704 if (TARGET_64BIT)
8705 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8706 else
8707 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8708 emit_insn (insn);
8709 }
8710 else
8711 {
8712 rtx got, tga, tmp1, tmp2;
8713
8714 /* We currently use relocations like @got@tlsgd for tls, which
8715 means the linker will handle allocation of tls entries, placing
8716 them in the .got section. So use a pointer to the .got section,
8717 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8718 or to secondary GOT sections used by 32-bit -fPIC. */
8719 if (TARGET_64BIT)
8720 got = gen_rtx_REG (Pmode, 2);
8721 else
8722 {
8723 if (flag_pic == 1)
8724 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8725 else
8726 {
8727 rtx gsym = rs6000_got_sym ();
8728 got = gen_reg_rtx (Pmode);
8729 if (flag_pic == 0)
8730 rs6000_emit_move (got, gsym, Pmode);
8731 else
8732 {
8733 rtx mem, lab;
8734
8735 tmp1 = gen_reg_rtx (Pmode);
8736 tmp2 = gen_reg_rtx (Pmode);
8737 mem = gen_const_mem (Pmode, tmp1);
8738 lab = gen_label_rtx ();
8739 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8740 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8741 if (TARGET_LINK_STACK)
8742 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8743 emit_move_insn (tmp2, mem);
8744 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8745 set_unique_reg_note (last, REG_EQUAL, gsym);
8746 }
8747 }
8748 }
8749
8750 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8751 {
8752 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8753 UNSPEC_TLSGD);
8754 tga = rs6000_tls_get_addr ();
8755 global_tlsarg = arg;
8756 if (TARGET_TLS_MARKERS)
8757 {
8758 rtx argreg = gen_rtx_REG (Pmode, 3);
8759 emit_insn (gen_rtx_SET (argreg, arg));
8760 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8761 argreg, Pmode);
8762 }
8763 else
8764 emit_library_call_value (tga, dest, LCT_CONST, Pmode);
8765 global_tlsarg = NULL_RTX;
8766 }
8767 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8768 {
8769 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got), UNSPEC_TLSLD);
8770 tga = rs6000_tls_get_addr ();
8771 tmp1 = gen_reg_rtx (Pmode);
8772 global_tlsarg = arg;
8773 if (TARGET_TLS_MARKERS)
8774 {
8775 rtx argreg = gen_rtx_REG (Pmode, 3);
8776 emit_insn (gen_rtx_SET (argreg, arg));
8777 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8778 argreg, Pmode);
8779 }
8780 else
8781 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode);
8782 global_tlsarg = NULL_RTX;
8783
8784 if (rs6000_tls_size == 16)
8785 {
8786 if (TARGET_64BIT)
8787 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8788 else
8789 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8790 }
8791 else if (rs6000_tls_size == 32)
8792 {
8793 tmp2 = gen_reg_rtx (Pmode);
8794 if (TARGET_64BIT)
8795 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8796 else
8797 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8798 emit_insn (insn);
8799 if (TARGET_64BIT)
8800 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8801 else
8802 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8803 }
8804 else
8805 {
8806 tmp2 = gen_reg_rtx (Pmode);
8807 if (TARGET_64BIT)
8808 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8809 else
8810 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8811 emit_insn (insn);
8812 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8813 }
8814 emit_insn (insn);
8815 }
8816 else
8817 {
8818 /* IE, or 64-bit offset LE. */
8819 tmp2 = gen_reg_rtx (Pmode);
8820 if (TARGET_64BIT)
8821 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8822 else
8823 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8824 emit_insn (insn);
8825 if (TARGET_64BIT)
8826 insn = gen_tls_tls_64 (dest, tmp2, addr);
8827 else
8828 insn = gen_tls_tls_32 (dest, tmp2, addr);
8829 emit_insn (insn);
8830 }
8831 }
8832
8833 return dest;
8834 }
8835
8836 /* Only create the global variable for the stack protect guard if we are using
8837 the global flavor of that guard. */
8838 static tree
8839 rs6000_init_stack_protect_guard (void)
8840 {
8841 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8842 return default_stack_protect_guard ();
8843
8844 return NULL_TREE;
8845 }
8846
8847 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8848
8849 static bool
8850 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8851 {
8852 if (GET_CODE (x) == HIGH
8853 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8854 return true;
8855
8856 /* A TLS symbol in the TOC cannot contain a sum. */
8857 if (GET_CODE (x) == CONST
8858 && GET_CODE (XEXP (x, 0)) == PLUS
8859 && SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
8860 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8861 return true;
8862
8863 /* Do not place an ELF TLS symbol in the constant pool. */
8864 return TARGET_ELF && tls_referenced_p (x);
8865 }
8866
8867 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8868 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8869 can be addressed relative to the toc pointer. */
8870
8871 static bool
8872 use_toc_relative_ref (rtx sym, machine_mode mode)
8873 {
8874 return ((constant_pool_expr_p (sym)
8875 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8876 get_pool_mode (sym)))
8877 || (TARGET_CMODEL == CMODEL_MEDIUM
8878 && SYMBOL_REF_LOCAL_P (sym)
8879 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8880 }
8881
8882 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8883 replace the input X, or the original X if no replacement is called for.
8884 The output parameter *WIN is 1 if the calling macro should goto WIN,
8885 0 if it should not.
8886
8887 For RS/6000, we wish to handle large displacements off a base
8888 register by splitting the addend across an addiu/addis and the mem insn.
8889 This cuts number of extra insns needed from 3 to 1.
8890
8891 On Darwin, we use this to generate code for floating point constants.
8892 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8893 The Darwin code is inside #if TARGET_MACHO because only then are the
8894 machopic_* functions defined. */
8895 static rtx
8896 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8897 int opnum, int type,
8898 int ind_levels ATTRIBUTE_UNUSED, int *win)
8899 {
8900 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8901 bool quad_offset_p = mode_supports_dq_form (mode);
8902
8903 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8904 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8905 if (reg_offset_p
8906 && opnum == 1
8907 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8908 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8909 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8910 && TARGET_P9_VECTOR)
8911 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8912 && TARGET_P9_VECTOR)))
8913 reg_offset_p = false;
8914
8915 /* We must recognize output that we have already generated ourselves. */
8916 if (GET_CODE (x) == PLUS
8917 && GET_CODE (XEXP (x, 0)) == PLUS
8918 && REG_P (XEXP (XEXP (x, 0), 0))
8919 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8920 && CONST_INT_P (XEXP (x, 1)))
8921 {
8922 if (TARGET_DEBUG_ADDR)
8923 {
8924 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8925 debug_rtx (x);
8926 }
8927 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8928 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8929 opnum, (enum reload_type) type);
8930 *win = 1;
8931 return x;
8932 }
8933
8934 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8935 if (GET_CODE (x) == LO_SUM
8936 && GET_CODE (XEXP (x, 0)) == HIGH)
8937 {
8938 if (TARGET_DEBUG_ADDR)
8939 {
8940 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8941 debug_rtx (x);
8942 }
8943 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8944 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8945 opnum, (enum reload_type) type);
8946 *win = 1;
8947 return x;
8948 }
8949
8950 #if TARGET_MACHO
8951 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8952 && GET_CODE (x) == LO_SUM
8953 && GET_CODE (XEXP (x, 0)) == PLUS
8954 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8955 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8956 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8957 && machopic_operand_p (XEXP (x, 1)))
8958 {
8959 /* Result of previous invocation of this function on Darwin
8960 floating point constant. */
8961 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8962 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8963 opnum, (enum reload_type) type);
8964 *win = 1;
8965 return x;
8966 }
8967 #endif
8968
8969 if (TARGET_CMODEL != CMODEL_SMALL
8970 && reg_offset_p
8971 && !quad_offset_p
8972 && small_toc_ref (x, VOIDmode))
8973 {
8974 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
8975 x = gen_rtx_LO_SUM (Pmode, hi, x);
8976 if (TARGET_DEBUG_ADDR)
8977 {
8978 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
8979 debug_rtx (x);
8980 }
8981 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8982 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8983 opnum, (enum reload_type) type);
8984 *win = 1;
8985 return x;
8986 }
8987
8988 if (GET_CODE (x) == PLUS
8989 && REG_P (XEXP (x, 0))
8990 && HARD_REGISTER_P (XEXP (x, 0))
8991 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
8992 && CONST_INT_P (XEXP (x, 1))
8993 && reg_offset_p
8994 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
8995 {
8996 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
8997 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
8998 HOST_WIDE_INT high
8999 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9000
9001 /* Check for 32-bit overflow or quad addresses with one of the
9002 four least significant bits set. */
9003 if (high + low != val
9004 || (quad_offset_p && (low & 0xf)))
9005 {
9006 *win = 0;
9007 return x;
9008 }
9009
9010 /* Reload the high part into a base reg; leave the low part
9011 in the mem directly. */
9012
9013 x = gen_rtx_PLUS (GET_MODE (x),
9014 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9015 GEN_INT (high)),
9016 GEN_INT (low));
9017
9018 if (TARGET_DEBUG_ADDR)
9019 {
9020 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9021 debug_rtx (x);
9022 }
9023 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9024 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9025 opnum, (enum reload_type) type);
9026 *win = 1;
9027 return x;
9028 }
9029
9030 if (SYMBOL_REF_P (x)
9031 && reg_offset_p
9032 && !quad_offset_p
9033 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9034 #if TARGET_MACHO
9035 && DEFAULT_ABI == ABI_DARWIN
9036 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9037 && machopic_symbol_defined_p (x)
9038 #else
9039 && DEFAULT_ABI == ABI_V4
9040 && !flag_pic
9041 #endif
9042 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9043 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9044 without fprs.
9045 ??? Assume floating point reg based on mode? This assumption is
9046 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9047 where reload ends up doing a DFmode load of a constant from
9048 mem using two gprs. Unfortunately, at this point reload
9049 hasn't yet selected regs so poking around in reload data
9050 won't help and even if we could figure out the regs reliably,
9051 we'd still want to allow this transformation when the mem is
9052 naturally aligned. Since we say the address is good here, we
9053 can't disable offsets from LO_SUMs in mem_operand_gpr.
9054 FIXME: Allow offset from lo_sum for other modes too, when
9055 mem is sufficiently aligned.
9056
9057 Also disallow this if the type can go in VMX/Altivec registers, since
9058 those registers do not have d-form (reg+offset) address modes. */
9059 && !reg_addr[mode].scalar_in_vmx_p
9060 && mode != TFmode
9061 && mode != TDmode
9062 && mode != IFmode
9063 && mode != KFmode
9064 && (mode != TImode || !TARGET_VSX)
9065 && mode != PTImode
9066 && (mode != DImode || TARGET_POWERPC64)
9067 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9068 || TARGET_HARD_FLOAT))
9069 {
9070 #if TARGET_MACHO
9071 if (flag_pic)
9072 {
9073 rtx offset = machopic_gen_offset (x);
9074 x = gen_rtx_LO_SUM (GET_MODE (x),
9075 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9076 gen_rtx_HIGH (Pmode, offset)), offset);
9077 }
9078 else
9079 #endif
9080 x = gen_rtx_LO_SUM (GET_MODE (x),
9081 gen_rtx_HIGH (Pmode, x), x);
9082
9083 if (TARGET_DEBUG_ADDR)
9084 {
9085 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9086 debug_rtx (x);
9087 }
9088 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9089 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9090 opnum, (enum reload_type) type);
9091 *win = 1;
9092 return x;
9093 }
9094
9095 /* Reload an offset address wrapped by an AND that represents the
9096 masking of the lower bits. Strip the outer AND and let reload
9097 convert the offset address into an indirect address. For VSX,
9098 force reload to create the address with an AND in a separate
9099 register, because we can't guarantee an altivec register will
9100 be used. */
9101 if (VECTOR_MEM_ALTIVEC_P (mode)
9102 && GET_CODE (x) == AND
9103 && GET_CODE (XEXP (x, 0)) == PLUS
9104 && REG_P (XEXP (XEXP (x, 0), 0))
9105 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9106 && CONST_INT_P (XEXP (x, 1))
9107 && INTVAL (XEXP (x, 1)) == -16)
9108 {
9109 x = XEXP (x, 0);
9110 *win = 1;
9111 return x;
9112 }
9113
9114 if (TARGET_TOC
9115 && reg_offset_p
9116 && !quad_offset_p
9117 && SYMBOL_REF_P (x)
9118 && use_toc_relative_ref (x, mode))
9119 {
9120 x = create_TOC_reference (x, NULL_RTX);
9121 if (TARGET_CMODEL != CMODEL_SMALL)
9122 {
9123 if (TARGET_DEBUG_ADDR)
9124 {
9125 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9126 debug_rtx (x);
9127 }
9128 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9129 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9130 opnum, (enum reload_type) type);
9131 }
9132 *win = 1;
9133 return x;
9134 }
9135 *win = 0;
9136 return x;
9137 }
9138
9139 /* Debug version of rs6000_legitimize_reload_address. */
9140 static rtx
9141 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9142 int opnum, int type,
9143 int ind_levels, int *win)
9144 {
9145 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9146 ind_levels, win);
9147 fprintf (stderr,
9148 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9149 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9150 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9151 debug_rtx (x);
9152
9153 if (x == ret)
9154 fprintf (stderr, "Same address returned\n");
9155 else if (!ret)
9156 fprintf (stderr, "NULL returned\n");
9157 else
9158 {
9159 fprintf (stderr, "New address:\n");
9160 debug_rtx (ret);
9161 }
9162
9163 return ret;
9164 }
9165
9166 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9167 that is a valid memory address for an instruction.
9168 The MODE argument is the machine mode for the MEM expression
9169 that wants to use this address.
9170
9171 On the RS/6000, there are four valid address: a SYMBOL_REF that
9172 refers to a constant pool entry of an address (or the sum of it
9173 plus a constant), a short (16-bit signed) constant plus a register,
9174 the sum of two registers, or a register indirect, possibly with an
9175 auto-increment. For DFmode, DDmode and DImode with a constant plus
9176 register, we must ensure that both words are addressable or PowerPC64
9177 with offset word aligned.
9178
9179 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9180 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9181 because adjacent memory cells are accessed by adding word-sized offsets
9182 during assembly output. */
9183 static bool
9184 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9185 {
9186 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9187 bool quad_offset_p = mode_supports_dq_form (mode);
9188
9189 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9190 if (VECTOR_MEM_ALTIVEC_P (mode)
9191 && GET_CODE (x) == AND
9192 && CONST_INT_P (XEXP (x, 1))
9193 && INTVAL (XEXP (x, 1)) == -16)
9194 x = XEXP (x, 0);
9195
9196 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9197 return 0;
9198 if (legitimate_indirect_address_p (x, reg_ok_strict))
9199 return 1;
9200 if (TARGET_UPDATE
9201 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9202 && mode_supports_pre_incdec_p (mode)
9203 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9204 return 1;
9205 /* Handle restricted vector d-form offsets in ISA 3.0. */
9206 if (quad_offset_p)
9207 {
9208 if (quad_address_p (x, mode, reg_ok_strict))
9209 return 1;
9210 }
9211 else if (virtual_stack_registers_memory_p (x))
9212 return 1;
9213
9214 else if (reg_offset_p)
9215 {
9216 if (legitimate_small_data_p (mode, x))
9217 return 1;
9218 if (legitimate_constant_pool_address_p (x, mode,
9219 reg_ok_strict || lra_in_progress))
9220 return 1;
9221 }
9222
9223 /* For TImode, if we have TImode in VSX registers, only allow register
9224 indirect addresses. This will allow the values to go in either GPRs
9225 or VSX registers without reloading. The vector types would tend to
9226 go into VSX registers, so we allow REG+REG, while TImode seems
9227 somewhat split, in that some uses are GPR based, and some VSX based. */
9228 /* FIXME: We could loosen this by changing the following to
9229 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9230 but currently we cannot allow REG+REG addressing for TImode. See
9231 PR72827 for complete details on how this ends up hoodwinking DSE. */
9232 if (mode == TImode && TARGET_VSX)
9233 return 0;
9234 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9235 if (! reg_ok_strict
9236 && reg_offset_p
9237 && GET_CODE (x) == PLUS
9238 && REG_P (XEXP (x, 0))
9239 && (XEXP (x, 0) == virtual_stack_vars_rtx
9240 || XEXP (x, 0) == arg_pointer_rtx)
9241 && CONST_INT_P (XEXP (x, 1)))
9242 return 1;
9243 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9244 return 1;
9245 if (!FLOAT128_2REG_P (mode)
9246 && (TARGET_HARD_FLOAT
9247 || TARGET_POWERPC64
9248 || (mode != DFmode && mode != DDmode))
9249 && (TARGET_POWERPC64 || mode != DImode)
9250 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9251 && mode != PTImode
9252 && !avoiding_indexed_address_p (mode)
9253 && legitimate_indexed_address_p (x, reg_ok_strict))
9254 return 1;
9255 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9256 && mode_supports_pre_modify_p (mode)
9257 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9258 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9259 reg_ok_strict, false)
9260 || (!avoiding_indexed_address_p (mode)
9261 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9262 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9263 return 1;
9264 if (reg_offset_p && !quad_offset_p
9265 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9266 return 1;
9267 return 0;
9268 }
9269
9270 /* Debug version of rs6000_legitimate_address_p. */
9271 static bool
9272 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9273 bool reg_ok_strict)
9274 {
9275 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9276 fprintf (stderr,
9277 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9278 "strict = %d, reload = %s, code = %s\n",
9279 ret ? "true" : "false",
9280 GET_MODE_NAME (mode),
9281 reg_ok_strict,
9282 (reload_completed ? "after" : "before"),
9283 GET_RTX_NAME (GET_CODE (x)));
9284 debug_rtx (x);
9285
9286 return ret;
9287 }
9288
9289 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9290
9291 static bool
9292 rs6000_mode_dependent_address_p (const_rtx addr,
9293 addr_space_t as ATTRIBUTE_UNUSED)
9294 {
9295 return rs6000_mode_dependent_address_ptr (addr);
9296 }
9297
9298 /* Go to LABEL if ADDR (a legitimate address expression)
9299 has an effect that depends on the machine mode it is used for.
9300
9301 On the RS/6000 this is true of all integral offsets (since AltiVec
9302 and VSX modes don't allow them) or is a pre-increment or decrement.
9303
9304 ??? Except that due to conceptual problems in offsettable_address_p
9305 we can't really report the problems of integral offsets. So leave
9306 this assuming that the adjustable offset must be valid for the
9307 sub-words of a TFmode operand, which is what we had before. */
9308
9309 static bool
9310 rs6000_mode_dependent_address (const_rtx addr)
9311 {
9312 switch (GET_CODE (addr))
9313 {
9314 case PLUS:
9315 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9316 is considered a legitimate address before reload, so there
9317 are no offset restrictions in that case. Note that this
9318 condition is safe in strict mode because any address involving
9319 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9320 been rejected as illegitimate. */
9321 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9322 && XEXP (addr, 0) != arg_pointer_rtx
9323 && CONST_INT_P (XEXP (addr, 1)))
9324 {
9325 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9326 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9327 }
9328 break;
9329
9330 case LO_SUM:
9331 /* Anything in the constant pool is sufficiently aligned that
9332 all bytes have the same high part address. */
9333 return !legitimate_constant_pool_address_p (addr, QImode, false);
9334
9335 /* Auto-increment cases are now treated generically in recog.c. */
9336 case PRE_MODIFY:
9337 return TARGET_UPDATE;
9338
9339 /* AND is only allowed in Altivec loads. */
9340 case AND:
9341 return true;
9342
9343 default:
9344 break;
9345 }
9346
9347 return false;
9348 }
9349
9350 /* Debug version of rs6000_mode_dependent_address. */
9351 static bool
9352 rs6000_debug_mode_dependent_address (const_rtx addr)
9353 {
9354 bool ret = rs6000_mode_dependent_address (addr);
9355
9356 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9357 ret ? "true" : "false");
9358 debug_rtx (addr);
9359
9360 return ret;
9361 }
9362
9363 /* Implement FIND_BASE_TERM. */
9364
9365 rtx
9366 rs6000_find_base_term (rtx op)
9367 {
9368 rtx base;
9369
9370 base = op;
9371 if (GET_CODE (base) == CONST)
9372 base = XEXP (base, 0);
9373 if (GET_CODE (base) == PLUS)
9374 base = XEXP (base, 0);
9375 if (GET_CODE (base) == UNSPEC)
9376 switch (XINT (base, 1))
9377 {
9378 case UNSPEC_TOCREL:
9379 case UNSPEC_MACHOPIC_OFFSET:
9380 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9381 for aliasing purposes. */
9382 return XVECEXP (base, 0, 0);
9383 }
9384
9385 return op;
9386 }
9387
9388 /* More elaborate version of recog's offsettable_memref_p predicate
9389 that works around the ??? note of rs6000_mode_dependent_address.
9390 In particular it accepts
9391
9392 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9393
9394 in 32-bit mode, that the recog predicate rejects. */
9395
9396 static bool
9397 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9398 {
9399 bool worst_case;
9400
9401 if (!MEM_P (op))
9402 return false;
9403
9404 /* First mimic offsettable_memref_p. */
9405 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9406 return true;
9407
9408 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9409 the latter predicate knows nothing about the mode of the memory
9410 reference and, therefore, assumes that it is the largest supported
9411 mode (TFmode). As a consequence, legitimate offsettable memory
9412 references are rejected. rs6000_legitimate_offset_address_p contains
9413 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9414 at least with a little bit of help here given that we know the
9415 actual registers used. */
9416 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9417 || GET_MODE_SIZE (reg_mode) == 4);
9418 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9419 strict, worst_case);
9420 }
9421
9422 /* Determine the reassociation width to be used in reassociate_bb.
9423 This takes into account how many parallel operations we
9424 can actually do of a given type, and also the latency.
9425 P8:
9426 int add/sub 6/cycle
9427 mul 2/cycle
9428 vect add/sub/mul 2/cycle
9429 fp add/sub/mul 2/cycle
9430 dfp 1/cycle
9431 */
9432
9433 static int
9434 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9435 machine_mode mode)
9436 {
9437 switch (rs6000_tune)
9438 {
9439 case PROCESSOR_POWER8:
9440 case PROCESSOR_POWER9:
9441 if (DECIMAL_FLOAT_MODE_P (mode))
9442 return 1;
9443 if (VECTOR_MODE_P (mode))
9444 return 4;
9445 if (INTEGRAL_MODE_P (mode))
9446 return 1;
9447 if (FLOAT_MODE_P (mode))
9448 return 4;
9449 break;
9450 default:
9451 break;
9452 }
9453 return 1;
9454 }
9455
9456 /* Change register usage conditional on target flags. */
9457 static void
9458 rs6000_conditional_register_usage (void)
9459 {
9460 int i;
9461
9462 if (TARGET_DEBUG_TARGET)
9463 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9464
9465 /* Set MQ register fixed (already call_used) so that it will not be
9466 allocated. */
9467 fixed_regs[64] = 1;
9468
9469 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9470 if (TARGET_64BIT)
9471 fixed_regs[13] = call_used_regs[13]
9472 = call_really_used_regs[13] = 1;
9473
9474 /* Conditionally disable FPRs. */
9475 if (TARGET_SOFT_FLOAT)
9476 for (i = 32; i < 64; i++)
9477 fixed_regs[i] = call_used_regs[i]
9478 = call_really_used_regs[i] = 1;
9479
9480 /* The TOC register is not killed across calls in a way that is
9481 visible to the compiler. */
9482 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9483 call_really_used_regs[2] = 0;
9484
9485 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9486 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9487
9488 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9489 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9490 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9491 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9492
9493 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9494 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9495 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9496 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9497
9498 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9499 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9500 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9501
9502 if (!TARGET_ALTIVEC && !TARGET_VSX)
9503 {
9504 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9505 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9506 call_really_used_regs[VRSAVE_REGNO] = 1;
9507 }
9508
9509 if (TARGET_ALTIVEC || TARGET_VSX)
9510 global_regs[VSCR_REGNO] = 1;
9511
9512 if (TARGET_ALTIVEC_ABI)
9513 {
9514 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9515 call_used_regs[i] = call_really_used_regs[i] = 1;
9516
9517 /* AIX reserves VR20:31 in non-extended ABI mode. */
9518 if (TARGET_XCOFF)
9519 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9520 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9521 }
9522 }
9523
9524 \f
9525 /* Output insns to set DEST equal to the constant SOURCE as a series of
9526 lis, ori and shl instructions and return TRUE. */
9527
9528 bool
9529 rs6000_emit_set_const (rtx dest, rtx source)
9530 {
9531 machine_mode mode = GET_MODE (dest);
9532 rtx temp, set;
9533 rtx_insn *insn;
9534 HOST_WIDE_INT c;
9535
9536 gcc_checking_assert (CONST_INT_P (source));
9537 c = INTVAL (source);
9538 switch (mode)
9539 {
9540 case E_QImode:
9541 case E_HImode:
9542 emit_insn (gen_rtx_SET (dest, source));
9543 return true;
9544
9545 case E_SImode:
9546 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9547
9548 emit_insn (gen_rtx_SET (copy_rtx (temp),
9549 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9550 emit_insn (gen_rtx_SET (dest,
9551 gen_rtx_IOR (SImode, copy_rtx (temp),
9552 GEN_INT (c & 0xffff))));
9553 break;
9554
9555 case E_DImode:
9556 if (!TARGET_POWERPC64)
9557 {
9558 rtx hi, lo;
9559
9560 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9561 DImode);
9562 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9563 DImode);
9564 emit_move_insn (hi, GEN_INT (c >> 32));
9565 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9566 emit_move_insn (lo, GEN_INT (c));
9567 }
9568 else
9569 rs6000_emit_set_long_const (dest, c);
9570 break;
9571
9572 default:
9573 gcc_unreachable ();
9574 }
9575
9576 insn = get_last_insn ();
9577 set = single_set (insn);
9578 if (! CONSTANT_P (SET_SRC (set)))
9579 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9580
9581 return true;
9582 }
9583
9584 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9585 Output insns to set DEST equal to the constant C as a series of
9586 lis, ori and shl instructions. */
9587
9588 static void
9589 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9590 {
9591 rtx temp;
9592 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9593
9594 ud1 = c & 0xffff;
9595 c = c >> 16;
9596 ud2 = c & 0xffff;
9597 c = c >> 16;
9598 ud3 = c & 0xffff;
9599 c = c >> 16;
9600 ud4 = c & 0xffff;
9601
9602 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9603 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9604 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9605
9606 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9607 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9608 {
9609 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9610
9611 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9612 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9613 if (ud1 != 0)
9614 emit_move_insn (dest,
9615 gen_rtx_IOR (DImode, copy_rtx (temp),
9616 GEN_INT (ud1)));
9617 }
9618 else if (ud3 == 0 && ud4 == 0)
9619 {
9620 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9621
9622 gcc_assert (ud2 & 0x8000);
9623 emit_move_insn (copy_rtx (temp),
9624 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9625 if (ud1 != 0)
9626 emit_move_insn (copy_rtx (temp),
9627 gen_rtx_IOR (DImode, copy_rtx (temp),
9628 GEN_INT (ud1)));
9629 emit_move_insn (dest,
9630 gen_rtx_ZERO_EXTEND (DImode,
9631 gen_lowpart (SImode,
9632 copy_rtx (temp))));
9633 }
9634 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9635 || (ud4 == 0 && ! (ud3 & 0x8000)))
9636 {
9637 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9638
9639 emit_move_insn (copy_rtx (temp),
9640 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9641 if (ud2 != 0)
9642 emit_move_insn (copy_rtx (temp),
9643 gen_rtx_IOR (DImode, copy_rtx (temp),
9644 GEN_INT (ud2)));
9645 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9646 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9647 GEN_INT (16)));
9648 if (ud1 != 0)
9649 emit_move_insn (dest,
9650 gen_rtx_IOR (DImode, copy_rtx (temp),
9651 GEN_INT (ud1)));
9652 }
9653 else
9654 {
9655 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9656
9657 emit_move_insn (copy_rtx (temp),
9658 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9659 if (ud3 != 0)
9660 emit_move_insn (copy_rtx (temp),
9661 gen_rtx_IOR (DImode, copy_rtx (temp),
9662 GEN_INT (ud3)));
9663
9664 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9665 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9666 GEN_INT (32)));
9667 if (ud2 != 0)
9668 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9669 gen_rtx_IOR (DImode, copy_rtx (temp),
9670 GEN_INT (ud2 << 16)));
9671 if (ud1 != 0)
9672 emit_move_insn (dest,
9673 gen_rtx_IOR (DImode, copy_rtx (temp),
9674 GEN_INT (ud1)));
9675 }
9676 }
9677
9678 /* Helper for the following. Get rid of [r+r] memory refs
9679 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9680
9681 static void
9682 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9683 {
9684 if (MEM_P (operands[0])
9685 && !REG_P (XEXP (operands[0], 0))
9686 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9687 GET_MODE (operands[0]), false))
9688 operands[0]
9689 = replace_equiv_address (operands[0],
9690 copy_addr_to_reg (XEXP (operands[0], 0)));
9691
9692 if (MEM_P (operands[1])
9693 && !REG_P (XEXP (operands[1], 0))
9694 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9695 GET_MODE (operands[1]), false))
9696 operands[1]
9697 = replace_equiv_address (operands[1],
9698 copy_addr_to_reg (XEXP (operands[1], 0)));
9699 }
9700
9701 /* Generate a vector of constants to permute MODE for a little-endian
9702 storage operation by swapping the two halves of a vector. */
9703 static rtvec
9704 rs6000_const_vec (machine_mode mode)
9705 {
9706 int i, subparts;
9707 rtvec v;
9708
9709 switch (mode)
9710 {
9711 case E_V1TImode:
9712 subparts = 1;
9713 break;
9714 case E_V2DFmode:
9715 case E_V2DImode:
9716 subparts = 2;
9717 break;
9718 case E_V4SFmode:
9719 case E_V4SImode:
9720 subparts = 4;
9721 break;
9722 case E_V8HImode:
9723 subparts = 8;
9724 break;
9725 case E_V16QImode:
9726 subparts = 16;
9727 break;
9728 default:
9729 gcc_unreachable();
9730 }
9731
9732 v = rtvec_alloc (subparts);
9733
9734 for (i = 0; i < subparts / 2; ++i)
9735 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9736 for (i = subparts / 2; i < subparts; ++i)
9737 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9738
9739 return v;
9740 }
9741
9742 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9743 store operation. */
9744 void
9745 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9746 {
9747 /* Scalar permutations are easier to express in integer modes rather than
9748 floating-point modes, so cast them here. We use V1TImode instead
9749 of TImode to ensure that the values don't go through GPRs. */
9750 if (FLOAT128_VECTOR_P (mode))
9751 {
9752 dest = gen_lowpart (V1TImode, dest);
9753 source = gen_lowpart (V1TImode, source);
9754 mode = V1TImode;
9755 }
9756
9757 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9758 scalar. */
9759 if (mode == TImode || mode == V1TImode)
9760 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9761 GEN_INT (64))));
9762 else
9763 {
9764 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9765 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9766 }
9767 }
9768
9769 /* Emit a little-endian load from vector memory location SOURCE to VSX
9770 register DEST in mode MODE. The load is done with two permuting
9771 insn's that represent an lxvd2x and xxpermdi. */
9772 void
9773 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9774 {
9775 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9776 V1TImode). */
9777 if (mode == TImode || mode == V1TImode)
9778 {
9779 mode = V2DImode;
9780 dest = gen_lowpart (V2DImode, dest);
9781 source = adjust_address (source, V2DImode, 0);
9782 }
9783
9784 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9785 rs6000_emit_le_vsx_permute (tmp, source, mode);
9786 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9787 }
9788
9789 /* Emit a little-endian store to vector memory location DEST from VSX
9790 register SOURCE in mode MODE. The store is done with two permuting
9791 insn's that represent an xxpermdi and an stxvd2x. */
9792 void
9793 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9794 {
9795 /* This should never be called during or after LRA, because it does
9796 not re-permute the source register. It is intended only for use
9797 during expand. */
9798 gcc_assert (!lra_in_progress && !reload_completed);
9799
9800 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9801 V1TImode). */
9802 if (mode == TImode || mode == V1TImode)
9803 {
9804 mode = V2DImode;
9805 dest = adjust_address (dest, V2DImode, 0);
9806 source = gen_lowpart (V2DImode, source);
9807 }
9808
9809 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9810 rs6000_emit_le_vsx_permute (tmp, source, mode);
9811 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9812 }
9813
9814 /* Emit a sequence representing a little-endian VSX load or store,
9815 moving data from SOURCE to DEST in mode MODE. This is done
9816 separately from rs6000_emit_move to ensure it is called only
9817 during expand. LE VSX loads and stores introduced later are
9818 handled with a split. The expand-time RTL generation allows
9819 us to optimize away redundant pairs of register-permutes. */
9820 void
9821 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9822 {
9823 gcc_assert (!BYTES_BIG_ENDIAN
9824 && VECTOR_MEM_VSX_P (mode)
9825 && !TARGET_P9_VECTOR
9826 && !gpr_or_gpr_p (dest, source)
9827 && (MEM_P (source) ^ MEM_P (dest)));
9828
9829 if (MEM_P (source))
9830 {
9831 gcc_assert (REG_P (dest) || SUBREG_P (dest));
9832 rs6000_emit_le_vsx_load (dest, source, mode);
9833 }
9834 else
9835 {
9836 if (!REG_P (source))
9837 source = force_reg (mode, source);
9838 rs6000_emit_le_vsx_store (dest, source, mode);
9839 }
9840 }
9841
9842 /* Return whether a SFmode or SImode move can be done without converting one
9843 mode to another. This arrises when we have:
9844
9845 (SUBREG:SF (REG:SI ...))
9846 (SUBREG:SI (REG:SF ...))
9847
9848 and one of the values is in a floating point/vector register, where SFmode
9849 scalars are stored in DFmode format. */
9850
9851 bool
9852 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9853 {
9854 if (TARGET_ALLOW_SF_SUBREG)
9855 return true;
9856
9857 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9858 return true;
9859
9860 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9861 return true;
9862
9863 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9864 if (SUBREG_P (dest))
9865 {
9866 rtx dest_subreg = SUBREG_REG (dest);
9867 rtx src_subreg = SUBREG_REG (src);
9868 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9869 }
9870
9871 return false;
9872 }
9873
9874
9875 /* Helper function to change moves with:
9876
9877 (SUBREG:SF (REG:SI)) and
9878 (SUBREG:SI (REG:SF))
9879
9880 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9881 values are stored as DFmode values in the VSX registers. We need to convert
9882 the bits before we can use a direct move or operate on the bits in the
9883 vector register as an integer type.
9884
9885 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9886
9887 static bool
9888 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9889 {
9890 if (TARGET_DIRECT_MOVE_64BIT && !lra_in_progress && !reload_completed
9891 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9892 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9893 {
9894 rtx inner_source = SUBREG_REG (source);
9895 machine_mode inner_mode = GET_MODE (inner_source);
9896
9897 if (mode == SImode && inner_mode == SFmode)
9898 {
9899 emit_insn (gen_movsi_from_sf (dest, inner_source));
9900 return true;
9901 }
9902
9903 if (mode == SFmode && inner_mode == SImode)
9904 {
9905 emit_insn (gen_movsf_from_si (dest, inner_source));
9906 return true;
9907 }
9908 }
9909
9910 return false;
9911 }
9912
9913 /* Emit a move from SOURCE to DEST in mode MODE. */
9914 void
9915 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9916 {
9917 rtx operands[2];
9918 operands[0] = dest;
9919 operands[1] = source;
9920
9921 if (TARGET_DEBUG_ADDR)
9922 {
9923 fprintf (stderr,
9924 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9925 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9926 GET_MODE_NAME (mode),
9927 lra_in_progress,
9928 reload_completed,
9929 can_create_pseudo_p ());
9930 debug_rtx (dest);
9931 fprintf (stderr, "source:\n");
9932 debug_rtx (source);
9933 }
9934
9935 /* Check that we get CONST_WIDE_INT only when we should. */
9936 if (CONST_WIDE_INT_P (operands[1])
9937 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9938 gcc_unreachable ();
9939
9940 #ifdef HAVE_AS_GNU_ATTRIBUTE
9941 /* If we use a long double type, set the flags in .gnu_attribute that say
9942 what the long double type is. This is to allow the linker's warning
9943 message for the wrong long double to be useful, even if the function does
9944 not do a call (for example, doing a 128-bit add on power9 if the long
9945 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9946 used if they aren't the default long dobule type. */
9947 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9948 {
9949 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9950 rs6000_passes_float = rs6000_passes_long_double = true;
9951
9952 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9953 rs6000_passes_float = rs6000_passes_long_double = true;
9954 }
9955 #endif
9956
9957 /* See if we need to special case SImode/SFmode SUBREG moves. */
9958 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9959 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9960 return;
9961
9962 /* Check if GCC is setting up a block move that will end up using FP
9963 registers as temporaries. We must make sure this is acceptable. */
9964 if (MEM_P (operands[0])
9965 && MEM_P (operands[1])
9966 && mode == DImode
9967 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9968 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9969 && ! (rs6000_slow_unaligned_access (SImode,
9970 (MEM_ALIGN (operands[0]) > 32
9971 ? 32 : MEM_ALIGN (operands[0])))
9972 || rs6000_slow_unaligned_access (SImode,
9973 (MEM_ALIGN (operands[1]) > 32
9974 ? 32 : MEM_ALIGN (operands[1]))))
9975 && ! MEM_VOLATILE_P (operands [0])
9976 && ! MEM_VOLATILE_P (operands [1]))
9977 {
9978 emit_move_insn (adjust_address (operands[0], SImode, 0),
9979 adjust_address (operands[1], SImode, 0));
9980 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9981 adjust_address (copy_rtx (operands[1]), SImode, 4));
9982 return;
9983 }
9984
9985 if (can_create_pseudo_p () && MEM_P (operands[0])
9986 && !gpc_reg_operand (operands[1], mode))
9987 operands[1] = force_reg (mode, operands[1]);
9988
9989 /* Recognize the case where operand[1] is a reference to thread-local
9990 data and load its address to a register. */
9991 if (tls_referenced_p (operands[1]))
9992 {
9993 enum tls_model model;
9994 rtx tmp = operands[1];
9995 rtx addend = NULL;
9996
9997 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9998 {
9999 addend = XEXP (XEXP (tmp, 0), 1);
10000 tmp = XEXP (XEXP (tmp, 0), 0);
10001 }
10002
10003 gcc_assert (SYMBOL_REF_P (tmp));
10004 model = SYMBOL_REF_TLS_MODEL (tmp);
10005 gcc_assert (model != 0);
10006
10007 tmp = rs6000_legitimize_tls_address (tmp, model);
10008 if (addend)
10009 {
10010 tmp = gen_rtx_PLUS (mode, tmp, addend);
10011 tmp = force_operand (tmp, operands[0]);
10012 }
10013 operands[1] = tmp;
10014 }
10015
10016 /* 128-bit constant floating-point values on Darwin should really be loaded
10017 as two parts. However, this premature splitting is a problem when DFmode
10018 values can go into Altivec registers. */
10019 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
10020 && !reg_addr[DFmode].scalar_in_vmx_p)
10021 {
10022 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10023 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10024 DFmode);
10025 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10026 GET_MODE_SIZE (DFmode)),
10027 simplify_gen_subreg (DFmode, operands[1], mode,
10028 GET_MODE_SIZE (DFmode)),
10029 DFmode);
10030 return;
10031 }
10032
10033 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10034 p1:SD) if p1 is not of floating point class and p0 is spilled as
10035 we can have no analogous movsd_store for this. */
10036 if (lra_in_progress && mode == DDmode
10037 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
10038 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10039 && SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1]))
10040 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10041 {
10042 enum reg_class cl;
10043 int regno = REGNO (SUBREG_REG (operands[1]));
10044
10045 if (!HARD_REGISTER_NUM_P (regno))
10046 {
10047 cl = reg_preferred_class (regno);
10048 regno = reg_renumber[regno];
10049 if (regno < 0)
10050 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10051 }
10052 if (regno >= 0 && ! FP_REGNO_P (regno))
10053 {
10054 mode = SDmode;
10055 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10056 operands[1] = SUBREG_REG (operands[1]);
10057 }
10058 }
10059 if (lra_in_progress
10060 && mode == SDmode
10061 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
10062 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10063 && (REG_P (operands[1])
10064 || (SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1])))))
10065 {
10066 int regno = reg_or_subregno (operands[1]);
10067 enum reg_class cl;
10068
10069 if (!HARD_REGISTER_NUM_P (regno))
10070 {
10071 cl = reg_preferred_class (regno);
10072 gcc_assert (cl != NO_REGS);
10073 regno = reg_renumber[regno];
10074 if (regno < 0)
10075 regno = ira_class_hard_regs[cl][0];
10076 }
10077 if (FP_REGNO_P (regno))
10078 {
10079 if (GET_MODE (operands[0]) != DDmode)
10080 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10081 emit_insn (gen_movsd_store (operands[0], operands[1]));
10082 }
10083 else if (INT_REGNO_P (regno))
10084 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10085 else
10086 gcc_unreachable();
10087 return;
10088 }
10089 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10090 p:DD)) if p0 is not of floating point class and p1 is spilled as
10091 we can have no analogous movsd_load for this. */
10092 if (lra_in_progress && mode == DDmode
10093 && SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))
10094 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10095 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
10096 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10097 {
10098 enum reg_class cl;
10099 int regno = REGNO (SUBREG_REG (operands[0]));
10100
10101 if (!HARD_REGISTER_NUM_P (regno))
10102 {
10103 cl = reg_preferred_class (regno);
10104 regno = reg_renumber[regno];
10105 if (regno < 0)
10106 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10107 }
10108 if (regno >= 0 && ! FP_REGNO_P (regno))
10109 {
10110 mode = SDmode;
10111 operands[0] = SUBREG_REG (operands[0]);
10112 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10113 }
10114 }
10115 if (lra_in_progress
10116 && mode == SDmode
10117 && (REG_P (operands[0])
10118 || (SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))))
10119 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
10120 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10121 {
10122 int regno = reg_or_subregno (operands[0]);
10123 enum reg_class cl;
10124
10125 if (!HARD_REGISTER_NUM_P (regno))
10126 {
10127 cl = reg_preferred_class (regno);
10128 gcc_assert (cl != NO_REGS);
10129 regno = reg_renumber[regno];
10130 if (regno < 0)
10131 regno = ira_class_hard_regs[cl][0];
10132 }
10133 if (FP_REGNO_P (regno))
10134 {
10135 if (GET_MODE (operands[1]) != DDmode)
10136 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10137 emit_insn (gen_movsd_load (operands[0], operands[1]));
10138 }
10139 else if (INT_REGNO_P (regno))
10140 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10141 else
10142 gcc_unreachable();
10143 return;
10144 }
10145
10146 /* FIXME: In the long term, this switch statement should go away
10147 and be replaced by a sequence of tests based on things like
10148 mode == Pmode. */
10149 switch (mode)
10150 {
10151 case E_HImode:
10152 case E_QImode:
10153 if (CONSTANT_P (operands[1])
10154 && !CONST_INT_P (operands[1]))
10155 operands[1] = force_const_mem (mode, operands[1]);
10156 break;
10157
10158 case E_TFmode:
10159 case E_TDmode:
10160 case E_IFmode:
10161 case E_KFmode:
10162 if (FLOAT128_2REG_P (mode))
10163 rs6000_eliminate_indexed_memrefs (operands);
10164 /* fall through */
10165
10166 case E_DFmode:
10167 case E_DDmode:
10168 case E_SFmode:
10169 case E_SDmode:
10170 if (CONSTANT_P (operands[1])
10171 && ! easy_fp_constant (operands[1], mode))
10172 operands[1] = force_const_mem (mode, operands[1]);
10173 break;
10174
10175 case E_V16QImode:
10176 case E_V8HImode:
10177 case E_V4SFmode:
10178 case E_V4SImode:
10179 case E_V2DFmode:
10180 case E_V2DImode:
10181 case E_V1TImode:
10182 if (CONSTANT_P (operands[1])
10183 && !easy_vector_constant (operands[1], mode))
10184 operands[1] = force_const_mem (mode, operands[1]);
10185 break;
10186
10187 case E_SImode:
10188 case E_DImode:
10189 /* Use default pattern for address of ELF small data */
10190 if (TARGET_ELF
10191 && mode == Pmode
10192 && DEFAULT_ABI == ABI_V4
10193 && (SYMBOL_REF_P (operands[1])
10194 || GET_CODE (operands[1]) == CONST)
10195 && small_data_operand (operands[1], mode))
10196 {
10197 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10198 return;
10199 }
10200
10201 if (DEFAULT_ABI == ABI_V4
10202 && mode == Pmode && mode == SImode
10203 && flag_pic == 1 && got_operand (operands[1], mode))
10204 {
10205 emit_insn (gen_movsi_got (operands[0], operands[1]));
10206 return;
10207 }
10208
10209 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10210 && TARGET_NO_TOC
10211 && ! flag_pic
10212 && mode == Pmode
10213 && CONSTANT_P (operands[1])
10214 && GET_CODE (operands[1]) != HIGH
10215 && !CONST_INT_P (operands[1]))
10216 {
10217 rtx target = (!can_create_pseudo_p ()
10218 ? operands[0]
10219 : gen_reg_rtx (mode));
10220
10221 /* If this is a function address on -mcall-aixdesc,
10222 convert it to the address of the descriptor. */
10223 if (DEFAULT_ABI == ABI_AIX
10224 && SYMBOL_REF_P (operands[1])
10225 && XSTR (operands[1], 0)[0] == '.')
10226 {
10227 const char *name = XSTR (operands[1], 0);
10228 rtx new_ref;
10229 while (*name == '.')
10230 name++;
10231 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10232 CONSTANT_POOL_ADDRESS_P (new_ref)
10233 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10234 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10235 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10236 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10237 operands[1] = new_ref;
10238 }
10239
10240 if (DEFAULT_ABI == ABI_DARWIN)
10241 {
10242 #if TARGET_MACHO
10243 if (MACHO_DYNAMIC_NO_PIC_P)
10244 {
10245 /* Take care of any required data indirection. */
10246 operands[1] = rs6000_machopic_legitimize_pic_address (
10247 operands[1], mode, operands[0]);
10248 if (operands[0] != operands[1])
10249 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10250 return;
10251 }
10252 #endif
10253 emit_insn (gen_macho_high (target, operands[1]));
10254 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10255 return;
10256 }
10257
10258 emit_insn (gen_elf_high (target, operands[1]));
10259 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10260 return;
10261 }
10262
10263 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10264 and we have put it in the TOC, we just need to make a TOC-relative
10265 reference to it. */
10266 if (TARGET_TOC
10267 && SYMBOL_REF_P (operands[1])
10268 && use_toc_relative_ref (operands[1], mode))
10269 operands[1] = create_TOC_reference (operands[1], operands[0]);
10270 else if (mode == Pmode
10271 && CONSTANT_P (operands[1])
10272 && GET_CODE (operands[1]) != HIGH
10273 && ((REG_P (operands[0])
10274 && FP_REGNO_P (REGNO (operands[0])))
10275 || !CONST_INT_P (operands[1])
10276 || (num_insns_constant (operands[1], mode)
10277 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10278 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10279 && (TARGET_CMODEL == CMODEL_SMALL
10280 || can_create_pseudo_p ()
10281 || (REG_P (operands[0])
10282 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10283 {
10284
10285 #if TARGET_MACHO
10286 /* Darwin uses a special PIC legitimizer. */
10287 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10288 {
10289 operands[1] =
10290 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10291 operands[0]);
10292 if (operands[0] != operands[1])
10293 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10294 return;
10295 }
10296 #endif
10297
10298 /* If we are to limit the number of things we put in the TOC and
10299 this is a symbol plus a constant we can add in one insn,
10300 just put the symbol in the TOC and add the constant. */
10301 if (GET_CODE (operands[1]) == CONST
10302 && TARGET_NO_SUM_IN_TOC
10303 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10304 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10305 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10306 || SYMBOL_REF_P (XEXP (XEXP (operands[1], 0), 0)))
10307 && ! side_effects_p (operands[0]))
10308 {
10309 rtx sym =
10310 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10311 rtx other = XEXP (XEXP (operands[1], 0), 1);
10312
10313 sym = force_reg (mode, sym);
10314 emit_insn (gen_add3_insn (operands[0], sym, other));
10315 return;
10316 }
10317
10318 operands[1] = force_const_mem (mode, operands[1]);
10319
10320 if (TARGET_TOC
10321 && SYMBOL_REF_P (XEXP (operands[1], 0))
10322 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10323 {
10324 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10325 operands[0]);
10326 operands[1] = gen_const_mem (mode, tocref);
10327 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10328 }
10329 }
10330 break;
10331
10332 case E_TImode:
10333 if (!VECTOR_MEM_VSX_P (TImode))
10334 rs6000_eliminate_indexed_memrefs (operands);
10335 break;
10336
10337 case E_PTImode:
10338 rs6000_eliminate_indexed_memrefs (operands);
10339 break;
10340
10341 default:
10342 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10343 }
10344
10345 /* Above, we may have called force_const_mem which may have returned
10346 an invalid address. If we can, fix this up; otherwise, reload will
10347 have to deal with it. */
10348 if (MEM_P (operands[1]))
10349 operands[1] = validize_mem (operands[1]);
10350
10351 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10352 }
10353 \f
10354 /* Nonzero if we can use a floating-point register to pass this arg. */
10355 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10356 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10357 && (CUM)->fregno <= FP_ARG_MAX_REG \
10358 && TARGET_HARD_FLOAT)
10359
10360 /* Nonzero if we can use an AltiVec register to pass this arg. */
10361 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10362 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10363 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10364 && TARGET_ALTIVEC_ABI \
10365 && (NAMED))
10366
10367 /* Walk down the type tree of TYPE counting consecutive base elements.
10368 If *MODEP is VOIDmode, then set it to the first valid floating point
10369 or vector type. If a non-floating point or vector type is found, or
10370 if a floating point or vector type that doesn't match a non-VOIDmode
10371 *MODEP is found, then return -1, otherwise return the count in the
10372 sub-tree. */
10373
10374 static int
10375 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10376 {
10377 machine_mode mode;
10378 HOST_WIDE_INT size;
10379
10380 switch (TREE_CODE (type))
10381 {
10382 case REAL_TYPE:
10383 mode = TYPE_MODE (type);
10384 if (!SCALAR_FLOAT_MODE_P (mode))
10385 return -1;
10386
10387 if (*modep == VOIDmode)
10388 *modep = mode;
10389
10390 if (*modep == mode)
10391 return 1;
10392
10393 break;
10394
10395 case COMPLEX_TYPE:
10396 mode = TYPE_MODE (TREE_TYPE (type));
10397 if (!SCALAR_FLOAT_MODE_P (mode))
10398 return -1;
10399
10400 if (*modep == VOIDmode)
10401 *modep = mode;
10402
10403 if (*modep == mode)
10404 return 2;
10405
10406 break;
10407
10408 case VECTOR_TYPE:
10409 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10410 return -1;
10411
10412 /* Use V4SImode as representative of all 128-bit vector types. */
10413 size = int_size_in_bytes (type);
10414 switch (size)
10415 {
10416 case 16:
10417 mode = V4SImode;
10418 break;
10419 default:
10420 return -1;
10421 }
10422
10423 if (*modep == VOIDmode)
10424 *modep = mode;
10425
10426 /* Vector modes are considered to be opaque: two vectors are
10427 equivalent for the purposes of being homogeneous aggregates
10428 if they are the same size. */
10429 if (*modep == mode)
10430 return 1;
10431
10432 break;
10433
10434 case ARRAY_TYPE:
10435 {
10436 int count;
10437 tree index = TYPE_DOMAIN (type);
10438
10439 /* Can't handle incomplete types nor sizes that are not
10440 fixed. */
10441 if (!COMPLETE_TYPE_P (type)
10442 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10443 return -1;
10444
10445 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10446 if (count == -1
10447 || !index
10448 || !TYPE_MAX_VALUE (index)
10449 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10450 || !TYPE_MIN_VALUE (index)
10451 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10452 || count < 0)
10453 return -1;
10454
10455 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10456 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10457
10458 /* There must be no padding. */
10459 if (wi::to_wide (TYPE_SIZE (type))
10460 != count * GET_MODE_BITSIZE (*modep))
10461 return -1;
10462
10463 return count;
10464 }
10465
10466 case RECORD_TYPE:
10467 {
10468 int count = 0;
10469 int sub_count;
10470 tree field;
10471
10472 /* Can't handle incomplete types nor sizes that are not
10473 fixed. */
10474 if (!COMPLETE_TYPE_P (type)
10475 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10476 return -1;
10477
10478 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10479 {
10480 if (TREE_CODE (field) != FIELD_DECL)
10481 continue;
10482
10483 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10484 if (sub_count < 0)
10485 return -1;
10486 count += sub_count;
10487 }
10488
10489 /* There must be no padding. */
10490 if (wi::to_wide (TYPE_SIZE (type))
10491 != count * GET_MODE_BITSIZE (*modep))
10492 return -1;
10493
10494 return count;
10495 }
10496
10497 case UNION_TYPE:
10498 case QUAL_UNION_TYPE:
10499 {
10500 /* These aren't very interesting except in a degenerate case. */
10501 int count = 0;
10502 int sub_count;
10503 tree field;
10504
10505 /* Can't handle incomplete types nor sizes that are not
10506 fixed. */
10507 if (!COMPLETE_TYPE_P (type)
10508 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10509 return -1;
10510
10511 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10512 {
10513 if (TREE_CODE (field) != FIELD_DECL)
10514 continue;
10515
10516 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10517 if (sub_count < 0)
10518 return -1;
10519 count = count > sub_count ? count : sub_count;
10520 }
10521
10522 /* There must be no padding. */
10523 if (wi::to_wide (TYPE_SIZE (type))
10524 != count * GET_MODE_BITSIZE (*modep))
10525 return -1;
10526
10527 return count;
10528 }
10529
10530 default:
10531 break;
10532 }
10533
10534 return -1;
10535 }
10536
10537 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10538 float or vector aggregate that shall be passed in FP/vector registers
10539 according to the ELFv2 ABI, return the homogeneous element mode in
10540 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10541
10542 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10543
10544 static bool
10545 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10546 machine_mode *elt_mode,
10547 int *n_elts)
10548 {
10549 /* Note that we do not accept complex types at the top level as
10550 homogeneous aggregates; these types are handled via the
10551 targetm.calls.split_complex_arg mechanism. Complex types
10552 can be elements of homogeneous aggregates, however. */
10553 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10554 && AGGREGATE_TYPE_P (type))
10555 {
10556 machine_mode field_mode = VOIDmode;
10557 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10558
10559 if (field_count > 0)
10560 {
10561 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10562 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10563
10564 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10565 up to AGGR_ARG_NUM_REG registers. */
10566 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10567 {
10568 if (elt_mode)
10569 *elt_mode = field_mode;
10570 if (n_elts)
10571 *n_elts = field_count;
10572 return true;
10573 }
10574 }
10575 }
10576
10577 if (elt_mode)
10578 *elt_mode = mode;
10579 if (n_elts)
10580 *n_elts = 1;
10581 return false;
10582 }
10583
10584 /* Return a nonzero value to say to return the function value in
10585 memory, just as large structures are always returned. TYPE will be
10586 the data type of the value, and FNTYPE will be the type of the
10587 function doing the returning, or @code{NULL} for libcalls.
10588
10589 The AIX ABI for the RS/6000 specifies that all structures are
10590 returned in memory. The Darwin ABI does the same.
10591
10592 For the Darwin 64 Bit ABI, a function result can be returned in
10593 registers or in memory, depending on the size of the return data
10594 type. If it is returned in registers, the value occupies the same
10595 registers as it would if it were the first and only function
10596 argument. Otherwise, the function places its result in memory at
10597 the location pointed to by GPR3.
10598
10599 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10600 but a draft put them in memory, and GCC used to implement the draft
10601 instead of the final standard. Therefore, aix_struct_return
10602 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10603 compatibility can change DRAFT_V4_STRUCT_RET to override the
10604 default, and -m switches get the final word. See
10605 rs6000_option_override_internal for more details.
10606
10607 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10608 long double support is enabled. These values are returned in memory.
10609
10610 int_size_in_bytes returns -1 for variable size objects, which go in
10611 memory always. The cast to unsigned makes -1 > 8. */
10612
10613 static bool
10614 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10615 {
10616 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10617 if (TARGET_MACHO
10618 && rs6000_darwin64_abi
10619 && TREE_CODE (type) == RECORD_TYPE
10620 && int_size_in_bytes (type) > 0)
10621 {
10622 CUMULATIVE_ARGS valcum;
10623 rtx valret;
10624
10625 valcum.words = 0;
10626 valcum.fregno = FP_ARG_MIN_REG;
10627 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10628 /* Do a trial code generation as if this were going to be passed
10629 as an argument; if any part goes in memory, we return NULL. */
10630 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10631 if (valret)
10632 return false;
10633 /* Otherwise fall through to more conventional ABI rules. */
10634 }
10635
10636 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10637 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10638 NULL, NULL))
10639 return false;
10640
10641 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10642 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10643 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10644 return false;
10645
10646 if (AGGREGATE_TYPE_P (type)
10647 && (aix_struct_return
10648 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10649 return true;
10650
10651 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10652 modes only exist for GCC vector types if -maltivec. */
10653 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10654 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10655 return false;
10656
10657 /* Return synthetic vectors in memory. */
10658 if (TREE_CODE (type) == VECTOR_TYPE
10659 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10660 {
10661 static bool warned_for_return_big_vectors = false;
10662 if (!warned_for_return_big_vectors)
10663 {
10664 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10665 "non-standard ABI extension with no compatibility "
10666 "guarantee");
10667 warned_for_return_big_vectors = true;
10668 }
10669 return true;
10670 }
10671
10672 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10673 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10674 return true;
10675
10676 return false;
10677 }
10678
10679 /* Specify whether values returned in registers should be at the most
10680 significant end of a register. We want aggregates returned by
10681 value to match the way aggregates are passed to functions. */
10682
10683 static bool
10684 rs6000_return_in_msb (const_tree valtype)
10685 {
10686 return (DEFAULT_ABI == ABI_ELFv2
10687 && BYTES_BIG_ENDIAN
10688 && AGGREGATE_TYPE_P (valtype)
10689 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10690 == PAD_UPWARD));
10691 }
10692
10693 #ifdef HAVE_AS_GNU_ATTRIBUTE
10694 /* Return TRUE if a call to function FNDECL may be one that
10695 potentially affects the function calling ABI of the object file. */
10696
10697 static bool
10698 call_ABI_of_interest (tree fndecl)
10699 {
10700 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10701 {
10702 struct cgraph_node *c_node;
10703
10704 /* Libcalls are always interesting. */
10705 if (fndecl == NULL_TREE)
10706 return true;
10707
10708 /* Any call to an external function is interesting. */
10709 if (DECL_EXTERNAL (fndecl))
10710 return true;
10711
10712 /* Interesting functions that we are emitting in this object file. */
10713 c_node = cgraph_node::get (fndecl);
10714 c_node = c_node->ultimate_alias_target ();
10715 return !c_node->only_called_directly_p ();
10716 }
10717 return false;
10718 }
10719 #endif
10720
10721 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10722 for a call to a function whose data type is FNTYPE.
10723 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10724
10725 For incoming args we set the number of arguments in the prototype large
10726 so we never return a PARALLEL. */
10727
10728 void
10729 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10730 rtx libname ATTRIBUTE_UNUSED, int incoming,
10731 int libcall, int n_named_args,
10732 tree fndecl,
10733 machine_mode return_mode ATTRIBUTE_UNUSED)
10734 {
10735 static CUMULATIVE_ARGS zero_cumulative;
10736
10737 *cum = zero_cumulative;
10738 cum->words = 0;
10739 cum->fregno = FP_ARG_MIN_REG;
10740 cum->vregno = ALTIVEC_ARG_MIN_REG;
10741 cum->prototype = (fntype && prototype_p (fntype));
10742 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10743 ? CALL_LIBCALL : CALL_NORMAL);
10744 cum->sysv_gregno = GP_ARG_MIN_REG;
10745 cum->stdarg = stdarg_p (fntype);
10746 cum->libcall = libcall;
10747
10748 cum->nargs_prototype = 0;
10749 if (incoming || cum->prototype)
10750 cum->nargs_prototype = n_named_args;
10751
10752 /* Check for a longcall attribute. */
10753 if ((!fntype && rs6000_default_long_calls)
10754 || (fntype
10755 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10756 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10757 cum->call_cookie |= CALL_LONG;
10758 else if (DEFAULT_ABI != ABI_DARWIN)
10759 {
10760 bool is_local = (fndecl
10761 && !DECL_EXTERNAL (fndecl)
10762 && !DECL_WEAK (fndecl)
10763 && (*targetm.binds_local_p) (fndecl));
10764 if (is_local)
10765 ;
10766 else if (flag_plt)
10767 {
10768 if (fntype
10769 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10770 cum->call_cookie |= CALL_LONG;
10771 }
10772 else
10773 {
10774 if (!(fntype
10775 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10776 cum->call_cookie |= CALL_LONG;
10777 }
10778 }
10779
10780 if (TARGET_DEBUG_ARG)
10781 {
10782 fprintf (stderr, "\ninit_cumulative_args:");
10783 if (fntype)
10784 {
10785 tree ret_type = TREE_TYPE (fntype);
10786 fprintf (stderr, " ret code = %s,",
10787 get_tree_code_name (TREE_CODE (ret_type)));
10788 }
10789
10790 if (cum->call_cookie & CALL_LONG)
10791 fprintf (stderr, " longcall,");
10792
10793 fprintf (stderr, " proto = %d, nargs = %d\n",
10794 cum->prototype, cum->nargs_prototype);
10795 }
10796
10797 #ifdef HAVE_AS_GNU_ATTRIBUTE
10798 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10799 {
10800 cum->escapes = call_ABI_of_interest (fndecl);
10801 if (cum->escapes)
10802 {
10803 tree return_type;
10804
10805 if (fntype)
10806 {
10807 return_type = TREE_TYPE (fntype);
10808 return_mode = TYPE_MODE (return_type);
10809 }
10810 else
10811 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10812
10813 if (return_type != NULL)
10814 {
10815 if (TREE_CODE (return_type) == RECORD_TYPE
10816 && TYPE_TRANSPARENT_AGGR (return_type))
10817 {
10818 return_type = TREE_TYPE (first_field (return_type));
10819 return_mode = TYPE_MODE (return_type);
10820 }
10821 if (AGGREGATE_TYPE_P (return_type)
10822 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10823 <= 8))
10824 rs6000_returns_struct = true;
10825 }
10826 if (SCALAR_FLOAT_MODE_P (return_mode))
10827 {
10828 rs6000_passes_float = true;
10829 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10830 && (FLOAT128_IBM_P (return_mode)
10831 || FLOAT128_IEEE_P (return_mode)
10832 || (return_type != NULL
10833 && (TYPE_MAIN_VARIANT (return_type)
10834 == long_double_type_node))))
10835 rs6000_passes_long_double = true;
10836
10837 /* Note if we passed or return a IEEE 128-bit type. We changed
10838 the mangling for these types, and we may need to make an alias
10839 with the old mangling. */
10840 if (FLOAT128_IEEE_P (return_mode))
10841 rs6000_passes_ieee128 = true;
10842 }
10843 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10844 rs6000_passes_vector = true;
10845 }
10846 }
10847 #endif
10848
10849 if (fntype
10850 && !TARGET_ALTIVEC
10851 && TARGET_ALTIVEC_ABI
10852 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10853 {
10854 error ("cannot return value in vector register because"
10855 " altivec instructions are disabled, use %qs"
10856 " to enable them", "-maltivec");
10857 }
10858 }
10859 \f
10860 /* The mode the ABI uses for a word. This is not the same as word_mode
10861 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10862
10863 static scalar_int_mode
10864 rs6000_abi_word_mode (void)
10865 {
10866 return TARGET_32BIT ? SImode : DImode;
10867 }
10868
10869 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10870 static char *
10871 rs6000_offload_options (void)
10872 {
10873 if (TARGET_64BIT)
10874 return xstrdup ("-foffload-abi=lp64");
10875 else
10876 return xstrdup ("-foffload-abi=ilp32");
10877 }
10878
10879 /* On rs6000, function arguments are promoted, as are function return
10880 values. */
10881
10882 static machine_mode
10883 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10884 machine_mode mode,
10885 int *punsignedp ATTRIBUTE_UNUSED,
10886 const_tree, int)
10887 {
10888 PROMOTE_MODE (mode, *punsignedp, type);
10889
10890 return mode;
10891 }
10892
10893 /* Return true if TYPE must be passed on the stack and not in registers. */
10894
10895 static bool
10896 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10897 {
10898 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10899 return must_pass_in_stack_var_size (mode, type);
10900 else
10901 return must_pass_in_stack_var_size_or_pad (mode, type);
10902 }
10903
10904 static inline bool
10905 is_complex_IBM_long_double (machine_mode mode)
10906 {
10907 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10908 }
10909
10910 /* Whether ABI_V4 passes MODE args to a function in floating point
10911 registers. */
10912
10913 static bool
10914 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10915 {
10916 if (!TARGET_HARD_FLOAT)
10917 return false;
10918 if (mode == DFmode)
10919 return true;
10920 if (mode == SFmode && named)
10921 return true;
10922 /* ABI_V4 passes complex IBM long double in 8 gprs.
10923 Stupid, but we can't change the ABI now. */
10924 if (is_complex_IBM_long_double (mode))
10925 return false;
10926 if (FLOAT128_2REG_P (mode))
10927 return true;
10928 if (DECIMAL_FLOAT_MODE_P (mode))
10929 return true;
10930 return false;
10931 }
10932
10933 /* Implement TARGET_FUNCTION_ARG_PADDING.
10934
10935 For the AIX ABI structs are always stored left shifted in their
10936 argument slot. */
10937
10938 static pad_direction
10939 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10940 {
10941 #ifndef AGGREGATE_PADDING_FIXED
10942 #define AGGREGATE_PADDING_FIXED 0
10943 #endif
10944 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10945 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10946 #endif
10947
10948 if (!AGGREGATE_PADDING_FIXED)
10949 {
10950 /* GCC used to pass structures of the same size as integer types as
10951 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10952 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10953 passed padded downward, except that -mstrict-align further
10954 muddied the water in that multi-component structures of 2 and 4
10955 bytes in size were passed padded upward.
10956
10957 The following arranges for best compatibility with previous
10958 versions of gcc, but removes the -mstrict-align dependency. */
10959 if (BYTES_BIG_ENDIAN)
10960 {
10961 HOST_WIDE_INT size = 0;
10962
10963 if (mode == BLKmode)
10964 {
10965 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10966 size = int_size_in_bytes (type);
10967 }
10968 else
10969 size = GET_MODE_SIZE (mode);
10970
10971 if (size == 1 || size == 2 || size == 4)
10972 return PAD_DOWNWARD;
10973 }
10974 return PAD_UPWARD;
10975 }
10976
10977 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10978 {
10979 if (type != 0 && AGGREGATE_TYPE_P (type))
10980 return PAD_UPWARD;
10981 }
10982
10983 /* Fall back to the default. */
10984 return default_function_arg_padding (mode, type);
10985 }
10986
10987 /* If defined, a C expression that gives the alignment boundary, in bits,
10988 of an argument with the specified mode and type. If it is not defined,
10989 PARM_BOUNDARY is used for all arguments.
10990
10991 V.4 wants long longs and doubles to be double word aligned. Just
10992 testing the mode size is a boneheaded way to do this as it means
10993 that other types such as complex int are also double word aligned.
10994 However, we're stuck with this because changing the ABI might break
10995 existing library interfaces.
10996
10997 Quadword align Altivec/VSX vectors.
10998 Quadword align large synthetic vector types. */
10999
11000 static unsigned int
11001 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11002 {
11003 machine_mode elt_mode;
11004 int n_elts;
11005
11006 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11007
11008 if (DEFAULT_ABI == ABI_V4
11009 && (GET_MODE_SIZE (mode) == 8
11010 || (TARGET_HARD_FLOAT
11011 && !is_complex_IBM_long_double (mode)
11012 && FLOAT128_2REG_P (mode))))
11013 return 64;
11014 else if (FLOAT128_VECTOR_P (mode))
11015 return 128;
11016 else if (type && TREE_CODE (type) == VECTOR_TYPE
11017 && int_size_in_bytes (type) >= 8
11018 && int_size_in_bytes (type) < 16)
11019 return 64;
11020 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11021 || (type && TREE_CODE (type) == VECTOR_TYPE
11022 && int_size_in_bytes (type) >= 16))
11023 return 128;
11024
11025 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11026 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11027 -mcompat-align-parm is used. */
11028 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11029 || DEFAULT_ABI == ABI_ELFv2)
11030 && type && TYPE_ALIGN (type) > 64)
11031 {
11032 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11033 or homogeneous float/vector aggregates here. We already handled
11034 vector aggregates above, but still need to check for float here. */
11035 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11036 && !SCALAR_FLOAT_MODE_P (elt_mode));
11037
11038 /* We used to check for BLKmode instead of the above aggregate type
11039 check. Warn when this results in any difference to the ABI. */
11040 if (aggregate_p != (mode == BLKmode))
11041 {
11042 static bool warned;
11043 if (!warned && warn_psabi)
11044 {
11045 warned = true;
11046 inform (input_location,
11047 "the ABI of passing aggregates with %d-byte alignment"
11048 " has changed in GCC 5",
11049 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11050 }
11051 }
11052
11053 if (aggregate_p)
11054 return 128;
11055 }
11056
11057 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11058 implement the "aggregate type" check as a BLKmode check here; this
11059 means certain aggregate types are in fact not aligned. */
11060 if (TARGET_MACHO && rs6000_darwin64_abi
11061 && mode == BLKmode
11062 && type && TYPE_ALIGN (type) > 64)
11063 return 128;
11064
11065 return PARM_BOUNDARY;
11066 }
11067
11068 /* The offset in words to the start of the parameter save area. */
11069
11070 static unsigned int
11071 rs6000_parm_offset (void)
11072 {
11073 return (DEFAULT_ABI == ABI_V4 ? 2
11074 : DEFAULT_ABI == ABI_ELFv2 ? 4
11075 : 6);
11076 }
11077
11078 /* For a function parm of MODE and TYPE, return the starting word in
11079 the parameter area. NWORDS of the parameter area are already used. */
11080
11081 static unsigned int
11082 rs6000_parm_start (machine_mode mode, const_tree type,
11083 unsigned int nwords)
11084 {
11085 unsigned int align;
11086
11087 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11088 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11089 }
11090
11091 /* Compute the size (in words) of a function argument. */
11092
11093 static unsigned long
11094 rs6000_arg_size (machine_mode mode, const_tree type)
11095 {
11096 unsigned long size;
11097
11098 if (mode != BLKmode)
11099 size = GET_MODE_SIZE (mode);
11100 else
11101 size = int_size_in_bytes (type);
11102
11103 if (TARGET_32BIT)
11104 return (size + 3) >> 2;
11105 else
11106 return (size + 7) >> 3;
11107 }
11108 \f
11109 /* Use this to flush pending int fields. */
11110
11111 static void
11112 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11113 HOST_WIDE_INT bitpos, int final)
11114 {
11115 unsigned int startbit, endbit;
11116 int intregs, intoffset;
11117
11118 /* Handle the situations where a float is taking up the first half
11119 of the GPR, and the other half is empty (typically due to
11120 alignment restrictions). We can detect this by a 8-byte-aligned
11121 int field, or by seeing that this is the final flush for this
11122 argument. Count the word and continue on. */
11123 if (cum->floats_in_gpr == 1
11124 && (cum->intoffset % 64 == 0
11125 || (cum->intoffset == -1 && final)))
11126 {
11127 cum->words++;
11128 cum->floats_in_gpr = 0;
11129 }
11130
11131 if (cum->intoffset == -1)
11132 return;
11133
11134 intoffset = cum->intoffset;
11135 cum->intoffset = -1;
11136 cum->floats_in_gpr = 0;
11137
11138 if (intoffset % BITS_PER_WORD != 0)
11139 {
11140 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11141 if (!int_mode_for_size (bits, 0).exists ())
11142 {
11143 /* We couldn't find an appropriate mode, which happens,
11144 e.g., in packed structs when there are 3 bytes to load.
11145 Back intoffset back to the beginning of the word in this
11146 case. */
11147 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11148 }
11149 }
11150
11151 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11152 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11153 intregs = (endbit - startbit) / BITS_PER_WORD;
11154 cum->words += intregs;
11155 /* words should be unsigned. */
11156 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11157 {
11158 int pad = (endbit/BITS_PER_WORD) - cum->words;
11159 cum->words += pad;
11160 }
11161 }
11162
11163 /* The darwin64 ABI calls for us to recurse down through structs,
11164 looking for elements passed in registers. Unfortunately, we have
11165 to track int register count here also because of misalignments
11166 in powerpc alignment mode. */
11167
11168 static void
11169 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11170 const_tree type,
11171 HOST_WIDE_INT startbitpos)
11172 {
11173 tree f;
11174
11175 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11176 if (TREE_CODE (f) == FIELD_DECL)
11177 {
11178 HOST_WIDE_INT bitpos = startbitpos;
11179 tree ftype = TREE_TYPE (f);
11180 machine_mode mode;
11181 if (ftype == error_mark_node)
11182 continue;
11183 mode = TYPE_MODE (ftype);
11184
11185 if (DECL_SIZE (f) != 0
11186 && tree_fits_uhwi_p (bit_position (f)))
11187 bitpos += int_bit_position (f);
11188
11189 /* ??? FIXME: else assume zero offset. */
11190
11191 if (TREE_CODE (ftype) == RECORD_TYPE)
11192 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11193 else if (USE_FP_FOR_ARG_P (cum, mode))
11194 {
11195 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11196 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11197 cum->fregno += n_fpregs;
11198 /* Single-precision floats present a special problem for
11199 us, because they are smaller than an 8-byte GPR, and so
11200 the structure-packing rules combined with the standard
11201 varargs behavior mean that we want to pack float/float
11202 and float/int combinations into a single register's
11203 space. This is complicated by the arg advance flushing,
11204 which works on arbitrarily large groups of int-type
11205 fields. */
11206 if (mode == SFmode)
11207 {
11208 if (cum->floats_in_gpr == 1)
11209 {
11210 /* Two floats in a word; count the word and reset
11211 the float count. */
11212 cum->words++;
11213 cum->floats_in_gpr = 0;
11214 }
11215 else if (bitpos % 64 == 0)
11216 {
11217 /* A float at the beginning of an 8-byte word;
11218 count it and put off adjusting cum->words until
11219 we see if a arg advance flush is going to do it
11220 for us. */
11221 cum->floats_in_gpr++;
11222 }
11223 else
11224 {
11225 /* The float is at the end of a word, preceded
11226 by integer fields, so the arg advance flush
11227 just above has already set cum->words and
11228 everything is taken care of. */
11229 }
11230 }
11231 else
11232 cum->words += n_fpregs;
11233 }
11234 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11235 {
11236 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11237 cum->vregno++;
11238 cum->words += 2;
11239 }
11240 else if (cum->intoffset == -1)
11241 cum->intoffset = bitpos;
11242 }
11243 }
11244
11245 /* Check for an item that needs to be considered specially under the darwin 64
11246 bit ABI. These are record types where the mode is BLK or the structure is
11247 8 bytes in size. */
11248 static int
11249 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11250 {
11251 return rs6000_darwin64_abi
11252 && ((mode == BLKmode
11253 && TREE_CODE (type) == RECORD_TYPE
11254 && int_size_in_bytes (type) > 0)
11255 || (type && TREE_CODE (type) == RECORD_TYPE
11256 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11257 }
11258
11259 /* Update the data in CUM to advance over an argument
11260 of mode MODE and data type TYPE.
11261 (TYPE is null for libcalls where that information may not be available.)
11262
11263 Note that for args passed by reference, function_arg will be called
11264 with MODE and TYPE set to that of the pointer to the arg, not the arg
11265 itself. */
11266
11267 static void
11268 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11269 const_tree type, bool named, int depth)
11270 {
11271 machine_mode elt_mode;
11272 int n_elts;
11273
11274 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11275
11276 /* Only tick off an argument if we're not recursing. */
11277 if (depth == 0)
11278 cum->nargs_prototype--;
11279
11280 #ifdef HAVE_AS_GNU_ATTRIBUTE
11281 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11282 && cum->escapes)
11283 {
11284 if (SCALAR_FLOAT_MODE_P (mode))
11285 {
11286 rs6000_passes_float = true;
11287 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11288 && (FLOAT128_IBM_P (mode)
11289 || FLOAT128_IEEE_P (mode)
11290 || (type != NULL
11291 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11292 rs6000_passes_long_double = true;
11293
11294 /* Note if we passed or return a IEEE 128-bit type. We changed the
11295 mangling for these types, and we may need to make an alias with
11296 the old mangling. */
11297 if (FLOAT128_IEEE_P (mode))
11298 rs6000_passes_ieee128 = true;
11299 }
11300 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11301 rs6000_passes_vector = true;
11302 }
11303 #endif
11304
11305 if (TARGET_ALTIVEC_ABI
11306 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11307 || (type && TREE_CODE (type) == VECTOR_TYPE
11308 && int_size_in_bytes (type) == 16)))
11309 {
11310 bool stack = false;
11311
11312 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11313 {
11314 cum->vregno += n_elts;
11315
11316 if (!TARGET_ALTIVEC)
11317 error ("cannot pass argument in vector register because"
11318 " altivec instructions are disabled, use %qs"
11319 " to enable them", "-maltivec");
11320
11321 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11322 even if it is going to be passed in a vector register.
11323 Darwin does the same for variable-argument functions. */
11324 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11325 && TARGET_64BIT)
11326 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11327 stack = true;
11328 }
11329 else
11330 stack = true;
11331
11332 if (stack)
11333 {
11334 int align;
11335
11336 /* Vector parameters must be 16-byte aligned. In 32-bit
11337 mode this means we need to take into account the offset
11338 to the parameter save area. In 64-bit mode, they just
11339 have to start on an even word, since the parameter save
11340 area is 16-byte aligned. */
11341 if (TARGET_32BIT)
11342 align = -(rs6000_parm_offset () + cum->words) & 3;
11343 else
11344 align = cum->words & 1;
11345 cum->words += align + rs6000_arg_size (mode, type);
11346
11347 if (TARGET_DEBUG_ARG)
11348 {
11349 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11350 cum->words, align);
11351 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11352 cum->nargs_prototype, cum->prototype,
11353 GET_MODE_NAME (mode));
11354 }
11355 }
11356 }
11357 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11358 {
11359 int size = int_size_in_bytes (type);
11360 /* Variable sized types have size == -1 and are
11361 treated as if consisting entirely of ints.
11362 Pad to 16 byte boundary if needed. */
11363 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11364 && (cum->words % 2) != 0)
11365 cum->words++;
11366 /* For varargs, we can just go up by the size of the struct. */
11367 if (!named)
11368 cum->words += (size + 7) / 8;
11369 else
11370 {
11371 /* It is tempting to say int register count just goes up by
11372 sizeof(type)/8, but this is wrong in a case such as
11373 { int; double; int; } [powerpc alignment]. We have to
11374 grovel through the fields for these too. */
11375 cum->intoffset = 0;
11376 cum->floats_in_gpr = 0;
11377 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11378 rs6000_darwin64_record_arg_advance_flush (cum,
11379 size * BITS_PER_UNIT, 1);
11380 }
11381 if (TARGET_DEBUG_ARG)
11382 {
11383 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11384 cum->words, TYPE_ALIGN (type), size);
11385 fprintf (stderr,
11386 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11387 cum->nargs_prototype, cum->prototype,
11388 GET_MODE_NAME (mode));
11389 }
11390 }
11391 else if (DEFAULT_ABI == ABI_V4)
11392 {
11393 if (abi_v4_pass_in_fpr (mode, named))
11394 {
11395 /* _Decimal128 must use an even/odd register pair. This assumes
11396 that the register number is odd when fregno is odd. */
11397 if (mode == TDmode && (cum->fregno % 2) == 1)
11398 cum->fregno++;
11399
11400 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11401 <= FP_ARG_V4_MAX_REG)
11402 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11403 else
11404 {
11405 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11406 if (mode == DFmode || FLOAT128_IBM_P (mode)
11407 || mode == DDmode || mode == TDmode)
11408 cum->words += cum->words & 1;
11409 cum->words += rs6000_arg_size (mode, type);
11410 }
11411 }
11412 else
11413 {
11414 int n_words = rs6000_arg_size (mode, type);
11415 int gregno = cum->sysv_gregno;
11416
11417 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11418 As does any other 2 word item such as complex int due to a
11419 historical mistake. */
11420 if (n_words == 2)
11421 gregno += (1 - gregno) & 1;
11422
11423 /* Multi-reg args are not split between registers and stack. */
11424 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11425 {
11426 /* Long long is aligned on the stack. So are other 2 word
11427 items such as complex int due to a historical mistake. */
11428 if (n_words == 2)
11429 cum->words += cum->words & 1;
11430 cum->words += n_words;
11431 }
11432
11433 /* Note: continuing to accumulate gregno past when we've started
11434 spilling to the stack indicates the fact that we've started
11435 spilling to the stack to expand_builtin_saveregs. */
11436 cum->sysv_gregno = gregno + n_words;
11437 }
11438
11439 if (TARGET_DEBUG_ARG)
11440 {
11441 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11442 cum->words, cum->fregno);
11443 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11444 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11445 fprintf (stderr, "mode = %4s, named = %d\n",
11446 GET_MODE_NAME (mode), named);
11447 }
11448 }
11449 else
11450 {
11451 int n_words = rs6000_arg_size (mode, type);
11452 int start_words = cum->words;
11453 int align_words = rs6000_parm_start (mode, type, start_words);
11454
11455 cum->words = align_words + n_words;
11456
11457 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11458 {
11459 /* _Decimal128 must be passed in an even/odd float register pair.
11460 This assumes that the register number is odd when fregno is
11461 odd. */
11462 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11463 cum->fregno++;
11464 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11465 }
11466
11467 if (TARGET_DEBUG_ARG)
11468 {
11469 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11470 cum->words, cum->fregno);
11471 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11472 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11473 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11474 named, align_words - start_words, depth);
11475 }
11476 }
11477 }
11478
11479 static void
11480 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11481 const_tree type, bool named)
11482 {
11483 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11484 0);
11485 }
11486
11487 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11488 structure between cum->intoffset and bitpos to integer registers. */
11489
11490 static void
11491 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11492 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11493 {
11494 machine_mode mode;
11495 unsigned int regno;
11496 unsigned int startbit, endbit;
11497 int this_regno, intregs, intoffset;
11498 rtx reg;
11499
11500 if (cum->intoffset == -1)
11501 return;
11502
11503 intoffset = cum->intoffset;
11504 cum->intoffset = -1;
11505
11506 /* If this is the trailing part of a word, try to only load that
11507 much into the register. Otherwise load the whole register. Note
11508 that in the latter case we may pick up unwanted bits. It's not a
11509 problem at the moment but may wish to revisit. */
11510
11511 if (intoffset % BITS_PER_WORD != 0)
11512 {
11513 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11514 if (!int_mode_for_size (bits, 0).exists (&mode))
11515 {
11516 /* We couldn't find an appropriate mode, which happens,
11517 e.g., in packed structs when there are 3 bytes to load.
11518 Back intoffset back to the beginning of the word in this
11519 case. */
11520 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11521 mode = word_mode;
11522 }
11523 }
11524 else
11525 mode = word_mode;
11526
11527 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11528 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11529 intregs = (endbit - startbit) / BITS_PER_WORD;
11530 this_regno = cum->words + intoffset / BITS_PER_WORD;
11531
11532 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11533 cum->use_stack = 1;
11534
11535 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11536 if (intregs <= 0)
11537 return;
11538
11539 intoffset /= BITS_PER_UNIT;
11540 do
11541 {
11542 regno = GP_ARG_MIN_REG + this_regno;
11543 reg = gen_rtx_REG (mode, regno);
11544 rvec[(*k)++] =
11545 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11546
11547 this_regno += 1;
11548 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11549 mode = word_mode;
11550 intregs -= 1;
11551 }
11552 while (intregs > 0);
11553 }
11554
11555 /* Recursive workhorse for the following. */
11556
11557 static void
11558 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11559 HOST_WIDE_INT startbitpos, rtx rvec[],
11560 int *k)
11561 {
11562 tree f;
11563
11564 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11565 if (TREE_CODE (f) == FIELD_DECL)
11566 {
11567 HOST_WIDE_INT bitpos = startbitpos;
11568 tree ftype = TREE_TYPE (f);
11569 machine_mode mode;
11570 if (ftype == error_mark_node)
11571 continue;
11572 mode = TYPE_MODE (ftype);
11573
11574 if (DECL_SIZE (f) != 0
11575 && tree_fits_uhwi_p (bit_position (f)))
11576 bitpos += int_bit_position (f);
11577
11578 /* ??? FIXME: else assume zero offset. */
11579
11580 if (TREE_CODE (ftype) == RECORD_TYPE)
11581 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11582 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11583 {
11584 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11585 #if 0
11586 switch (mode)
11587 {
11588 case E_SCmode: mode = SFmode; break;
11589 case E_DCmode: mode = DFmode; break;
11590 case E_TCmode: mode = TFmode; break;
11591 default: break;
11592 }
11593 #endif
11594 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11595 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11596 {
11597 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11598 && (mode == TFmode || mode == TDmode));
11599 /* Long double or _Decimal128 split over regs and memory. */
11600 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11601 cum->use_stack=1;
11602 }
11603 rvec[(*k)++]
11604 = gen_rtx_EXPR_LIST (VOIDmode,
11605 gen_rtx_REG (mode, cum->fregno++),
11606 GEN_INT (bitpos / BITS_PER_UNIT));
11607 if (FLOAT128_2REG_P (mode))
11608 cum->fregno++;
11609 }
11610 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11611 {
11612 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11613 rvec[(*k)++]
11614 = gen_rtx_EXPR_LIST (VOIDmode,
11615 gen_rtx_REG (mode, cum->vregno++),
11616 GEN_INT (bitpos / BITS_PER_UNIT));
11617 }
11618 else if (cum->intoffset == -1)
11619 cum->intoffset = bitpos;
11620 }
11621 }
11622
11623 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11624 the register(s) to be used for each field and subfield of a struct
11625 being passed by value, along with the offset of where the
11626 register's value may be found in the block. FP fields go in FP
11627 register, vector fields go in vector registers, and everything
11628 else goes in int registers, packed as in memory.
11629
11630 This code is also used for function return values. RETVAL indicates
11631 whether this is the case.
11632
11633 Much of this is taken from the SPARC V9 port, which has a similar
11634 calling convention. */
11635
11636 static rtx
11637 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11638 bool named, bool retval)
11639 {
11640 rtx rvec[FIRST_PSEUDO_REGISTER];
11641 int k = 1, kbase = 1;
11642 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11643 /* This is a copy; modifications are not visible to our caller. */
11644 CUMULATIVE_ARGS copy_cum = *orig_cum;
11645 CUMULATIVE_ARGS *cum = &copy_cum;
11646
11647 /* Pad to 16 byte boundary if needed. */
11648 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11649 && (cum->words % 2) != 0)
11650 cum->words++;
11651
11652 cum->intoffset = 0;
11653 cum->use_stack = 0;
11654 cum->named = named;
11655
11656 /* Put entries into rvec[] for individual FP and vector fields, and
11657 for the chunks of memory that go in int regs. Note we start at
11658 element 1; 0 is reserved for an indication of using memory, and
11659 may or may not be filled in below. */
11660 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11661 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11662
11663 /* If any part of the struct went on the stack put all of it there.
11664 This hack is because the generic code for
11665 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11666 parts of the struct are not at the beginning. */
11667 if (cum->use_stack)
11668 {
11669 if (retval)
11670 return NULL_RTX; /* doesn't go in registers at all */
11671 kbase = 0;
11672 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11673 }
11674 if (k > 1 || cum->use_stack)
11675 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11676 else
11677 return NULL_RTX;
11678 }
11679
11680 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11681
11682 static rtx
11683 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11684 int align_words)
11685 {
11686 int n_units;
11687 int i, k;
11688 rtx rvec[GP_ARG_NUM_REG + 1];
11689
11690 if (align_words >= GP_ARG_NUM_REG)
11691 return NULL_RTX;
11692
11693 n_units = rs6000_arg_size (mode, type);
11694
11695 /* Optimize the simple case where the arg fits in one gpr, except in
11696 the case of BLKmode due to assign_parms assuming that registers are
11697 BITS_PER_WORD wide. */
11698 if (n_units == 0
11699 || (n_units == 1 && mode != BLKmode))
11700 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11701
11702 k = 0;
11703 if (align_words + n_units > GP_ARG_NUM_REG)
11704 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11705 using a magic NULL_RTX component.
11706 This is not strictly correct. Only some of the arg belongs in
11707 memory, not all of it. However, the normal scheme using
11708 function_arg_partial_nregs can result in unusual subregs, eg.
11709 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11710 store the whole arg to memory is often more efficient than code
11711 to store pieces, and we know that space is available in the right
11712 place for the whole arg. */
11713 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11714
11715 i = 0;
11716 do
11717 {
11718 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11719 rtx off = GEN_INT (i++ * 4);
11720 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11721 }
11722 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11723
11724 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11725 }
11726
11727 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11728 but must also be copied into the parameter save area starting at
11729 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11730 to the GPRs and/or memory. Return the number of elements used. */
11731
11732 static int
11733 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11734 int align_words, rtx *rvec)
11735 {
11736 int k = 0;
11737
11738 if (align_words < GP_ARG_NUM_REG)
11739 {
11740 int n_words = rs6000_arg_size (mode, type);
11741
11742 if (align_words + n_words > GP_ARG_NUM_REG
11743 || mode == BLKmode
11744 || (TARGET_32BIT && TARGET_POWERPC64))
11745 {
11746 /* If this is partially on the stack, then we only
11747 include the portion actually in registers here. */
11748 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11749 int i = 0;
11750
11751 if (align_words + n_words > GP_ARG_NUM_REG)
11752 {
11753 /* Not all of the arg fits in gprs. Say that it goes in memory
11754 too, using a magic NULL_RTX component. Also see comment in
11755 rs6000_mixed_function_arg for why the normal
11756 function_arg_partial_nregs scheme doesn't work in this case. */
11757 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11758 }
11759
11760 do
11761 {
11762 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11763 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11764 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11765 }
11766 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11767 }
11768 else
11769 {
11770 /* The whole arg fits in gprs. */
11771 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11772 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11773 }
11774 }
11775 else
11776 {
11777 /* It's entirely in memory. */
11778 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11779 }
11780
11781 return k;
11782 }
11783
11784 /* RVEC is a vector of K components of an argument of mode MODE.
11785 Construct the final function_arg return value from it. */
11786
11787 static rtx
11788 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11789 {
11790 gcc_assert (k >= 1);
11791
11792 /* Avoid returning a PARALLEL in the trivial cases. */
11793 if (k == 1)
11794 {
11795 if (XEXP (rvec[0], 0) == NULL_RTX)
11796 return NULL_RTX;
11797
11798 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11799 return XEXP (rvec[0], 0);
11800 }
11801
11802 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11803 }
11804
11805 /* Determine where to put an argument to a function.
11806 Value is zero to push the argument on the stack,
11807 or a hard register in which to store the argument.
11808
11809 MODE is the argument's machine mode.
11810 TYPE is the data type of the argument (as a tree).
11811 This is null for libcalls where that information may
11812 not be available.
11813 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11814 the preceding args and about the function being called. It is
11815 not modified in this routine.
11816 NAMED is nonzero if this argument is a named parameter
11817 (otherwise it is an extra parameter matching an ellipsis).
11818
11819 On RS/6000 the first eight words of non-FP are normally in registers
11820 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11821 Under V.4, the first 8 FP args are in registers.
11822
11823 If this is floating-point and no prototype is specified, we use
11824 both an FP and integer register (or possibly FP reg and stack). Library
11825 functions (when CALL_LIBCALL is set) always have the proper types for args,
11826 so we can pass the FP value just in one register. emit_library_function
11827 doesn't support PARALLEL anyway.
11828
11829 Note that for args passed by reference, function_arg will be called
11830 with MODE and TYPE set to that of the pointer to the arg, not the arg
11831 itself. */
11832
11833 static rtx
11834 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11835 const_tree type, bool named)
11836 {
11837 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11838 enum rs6000_abi abi = DEFAULT_ABI;
11839 machine_mode elt_mode;
11840 int n_elts;
11841
11842 /* Return a marker to indicate whether CR1 needs to set or clear the
11843 bit that V.4 uses to say fp args were passed in registers.
11844 Assume that we don't need the marker for software floating point,
11845 or compiler generated library calls. */
11846 if (mode == VOIDmode)
11847 {
11848 if (abi == ABI_V4
11849 && (cum->call_cookie & CALL_LIBCALL) == 0
11850 && (cum->stdarg
11851 || (cum->nargs_prototype < 0
11852 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11853 && TARGET_HARD_FLOAT)
11854 return GEN_INT (cum->call_cookie
11855 | ((cum->fregno == FP_ARG_MIN_REG)
11856 ? CALL_V4_SET_FP_ARGS
11857 : CALL_V4_CLEAR_FP_ARGS));
11858
11859 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11860 }
11861
11862 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11863
11864 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11865 {
11866 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11867 if (rslt != NULL_RTX)
11868 return rslt;
11869 /* Else fall through to usual handling. */
11870 }
11871
11872 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11873 {
11874 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11875 rtx r, off;
11876 int i, k = 0;
11877
11878 /* Do we also need to pass this argument in the parameter save area?
11879 Library support functions for IEEE 128-bit are assumed to not need the
11880 value passed both in GPRs and in vector registers. */
11881 if (TARGET_64BIT && !cum->prototype
11882 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11883 {
11884 int align_words = ROUND_UP (cum->words, 2);
11885 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11886 }
11887
11888 /* Describe where this argument goes in the vector registers. */
11889 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11890 {
11891 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11892 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11893 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11894 }
11895
11896 return rs6000_finish_function_arg (mode, rvec, k);
11897 }
11898 else if (TARGET_ALTIVEC_ABI
11899 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11900 || (type && TREE_CODE (type) == VECTOR_TYPE
11901 && int_size_in_bytes (type) == 16)))
11902 {
11903 if (named || abi == ABI_V4)
11904 return NULL_RTX;
11905 else
11906 {
11907 /* Vector parameters to varargs functions under AIX or Darwin
11908 get passed in memory and possibly also in GPRs. */
11909 int align, align_words, n_words;
11910 machine_mode part_mode;
11911
11912 /* Vector parameters must be 16-byte aligned. In 32-bit
11913 mode this means we need to take into account the offset
11914 to the parameter save area. In 64-bit mode, they just
11915 have to start on an even word, since the parameter save
11916 area is 16-byte aligned. */
11917 if (TARGET_32BIT)
11918 align = -(rs6000_parm_offset () + cum->words) & 3;
11919 else
11920 align = cum->words & 1;
11921 align_words = cum->words + align;
11922
11923 /* Out of registers? Memory, then. */
11924 if (align_words >= GP_ARG_NUM_REG)
11925 return NULL_RTX;
11926
11927 if (TARGET_32BIT && TARGET_POWERPC64)
11928 return rs6000_mixed_function_arg (mode, type, align_words);
11929
11930 /* The vector value goes in GPRs. Only the part of the
11931 value in GPRs is reported here. */
11932 part_mode = mode;
11933 n_words = rs6000_arg_size (mode, type);
11934 if (align_words + n_words > GP_ARG_NUM_REG)
11935 /* Fortunately, there are only two possibilities, the value
11936 is either wholly in GPRs or half in GPRs and half not. */
11937 part_mode = DImode;
11938
11939 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11940 }
11941 }
11942
11943 else if (abi == ABI_V4)
11944 {
11945 if (abi_v4_pass_in_fpr (mode, named))
11946 {
11947 /* _Decimal128 must use an even/odd register pair. This assumes
11948 that the register number is odd when fregno is odd. */
11949 if (mode == TDmode && (cum->fregno % 2) == 1)
11950 cum->fregno++;
11951
11952 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11953 <= FP_ARG_V4_MAX_REG)
11954 return gen_rtx_REG (mode, cum->fregno);
11955 else
11956 return NULL_RTX;
11957 }
11958 else
11959 {
11960 int n_words = rs6000_arg_size (mode, type);
11961 int gregno = cum->sysv_gregno;
11962
11963 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11964 As does any other 2 word item such as complex int due to a
11965 historical mistake. */
11966 if (n_words == 2)
11967 gregno += (1 - gregno) & 1;
11968
11969 /* Multi-reg args are not split between registers and stack. */
11970 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11971 return NULL_RTX;
11972
11973 if (TARGET_32BIT && TARGET_POWERPC64)
11974 return rs6000_mixed_function_arg (mode, type,
11975 gregno - GP_ARG_MIN_REG);
11976 return gen_rtx_REG (mode, gregno);
11977 }
11978 }
11979 else
11980 {
11981 int align_words = rs6000_parm_start (mode, type, cum->words);
11982
11983 /* _Decimal128 must be passed in an even/odd float register pair.
11984 This assumes that the register number is odd when fregno is odd. */
11985 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11986 cum->fregno++;
11987
11988 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11989 && !(TARGET_AIX && !TARGET_ELF
11990 && type != NULL && AGGREGATE_TYPE_P (type)))
11991 {
11992 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11993 rtx r, off;
11994 int i, k = 0;
11995 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11996 int fpr_words;
11997
11998 /* Do we also need to pass this argument in the parameter
11999 save area? */
12000 if (type && (cum->nargs_prototype <= 0
12001 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12002 && TARGET_XL_COMPAT
12003 && align_words >= GP_ARG_NUM_REG)))
12004 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12005
12006 /* Describe where this argument goes in the fprs. */
12007 for (i = 0; i < n_elts
12008 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12009 {
12010 /* Check if the argument is split over registers and memory.
12011 This can only ever happen for long double or _Decimal128;
12012 complex types are handled via split_complex_arg. */
12013 machine_mode fmode = elt_mode;
12014 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12015 {
12016 gcc_assert (FLOAT128_2REG_P (fmode));
12017 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12018 }
12019
12020 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12021 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12022 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12023 }
12024
12025 /* If there were not enough FPRs to hold the argument, the rest
12026 usually goes into memory. However, if the current position
12027 is still within the register parameter area, a portion may
12028 actually have to go into GPRs.
12029
12030 Note that it may happen that the portion of the argument
12031 passed in the first "half" of the first GPR was already
12032 passed in the last FPR as well.
12033
12034 For unnamed arguments, we already set up GPRs to cover the
12035 whole argument in rs6000_psave_function_arg, so there is
12036 nothing further to do at this point. */
12037 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12038 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12039 && cum->nargs_prototype > 0)
12040 {
12041 static bool warned;
12042
12043 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12044 int n_words = rs6000_arg_size (mode, type);
12045
12046 align_words += fpr_words;
12047 n_words -= fpr_words;
12048
12049 do
12050 {
12051 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12052 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12053 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12054 }
12055 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12056
12057 if (!warned && warn_psabi)
12058 {
12059 warned = true;
12060 inform (input_location,
12061 "the ABI of passing homogeneous float aggregates"
12062 " has changed in GCC 5");
12063 }
12064 }
12065
12066 return rs6000_finish_function_arg (mode, rvec, k);
12067 }
12068 else if (align_words < GP_ARG_NUM_REG)
12069 {
12070 if (TARGET_32BIT && TARGET_POWERPC64)
12071 return rs6000_mixed_function_arg (mode, type, align_words);
12072
12073 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12074 }
12075 else
12076 return NULL_RTX;
12077 }
12078 }
12079 \f
12080 /* For an arg passed partly in registers and partly in memory, this is
12081 the number of bytes passed in registers. For args passed entirely in
12082 registers or entirely in memory, zero. When an arg is described by a
12083 PARALLEL, perhaps using more than one register type, this function
12084 returns the number of bytes used by the first element of the PARALLEL. */
12085
12086 static int
12087 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12088 tree type, bool named)
12089 {
12090 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12091 bool passed_in_gprs = true;
12092 int ret = 0;
12093 int align_words;
12094 machine_mode elt_mode;
12095 int n_elts;
12096
12097 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12098
12099 if (DEFAULT_ABI == ABI_V4)
12100 return 0;
12101
12102 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12103 {
12104 /* If we are passing this arg in the fixed parameter save area (gprs or
12105 memory) as well as VRs, we do not use the partial bytes mechanism;
12106 instead, rs6000_function_arg will return a PARALLEL including a memory
12107 element as necessary. Library support functions for IEEE 128-bit are
12108 assumed to not need the value passed both in GPRs and in vector
12109 registers. */
12110 if (TARGET_64BIT && !cum->prototype
12111 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12112 return 0;
12113
12114 /* Otherwise, we pass in VRs only. Check for partial copies. */
12115 passed_in_gprs = false;
12116 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12117 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12118 }
12119
12120 /* In this complicated case we just disable the partial_nregs code. */
12121 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12122 return 0;
12123
12124 align_words = rs6000_parm_start (mode, type, cum->words);
12125
12126 if (USE_FP_FOR_ARG_P (cum, elt_mode)
12127 && !(TARGET_AIX && !TARGET_ELF
12128 && type != NULL && AGGREGATE_TYPE_P (type)))
12129 {
12130 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12131
12132 /* If we are passing this arg in the fixed parameter save area
12133 (gprs or memory) as well as FPRs, we do not use the partial
12134 bytes mechanism; instead, rs6000_function_arg will return a
12135 PARALLEL including a memory element as necessary. */
12136 if (type
12137 && (cum->nargs_prototype <= 0
12138 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12139 && TARGET_XL_COMPAT
12140 && align_words >= GP_ARG_NUM_REG)))
12141 return 0;
12142
12143 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12144 passed_in_gprs = false;
12145 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12146 {
12147 /* Compute number of bytes / words passed in FPRs. If there
12148 is still space available in the register parameter area
12149 *after* that amount, a part of the argument will be passed
12150 in GPRs. In that case, the total amount passed in any
12151 registers is equal to the amount that would have been passed
12152 in GPRs if everything were passed there, so we fall back to
12153 the GPR code below to compute the appropriate value. */
12154 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12155 * MIN (8, GET_MODE_SIZE (elt_mode)));
12156 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12157
12158 if (align_words + fpr_words < GP_ARG_NUM_REG)
12159 passed_in_gprs = true;
12160 else
12161 ret = fpr;
12162 }
12163 }
12164
12165 if (passed_in_gprs
12166 && align_words < GP_ARG_NUM_REG
12167 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12168 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12169
12170 if (ret != 0 && TARGET_DEBUG_ARG)
12171 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12172
12173 return ret;
12174 }
12175 \f
12176 /* A C expression that indicates when an argument must be passed by
12177 reference. If nonzero for an argument, a copy of that argument is
12178 made in memory and a pointer to the argument is passed instead of
12179 the argument itself. The pointer is passed in whatever way is
12180 appropriate for passing a pointer to that type.
12181
12182 Under V.4, aggregates and long double are passed by reference.
12183
12184 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12185 reference unless the AltiVec vector extension ABI is in force.
12186
12187 As an extension to all ABIs, variable sized types are passed by
12188 reference. */
12189
12190 static bool
12191 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12192 machine_mode mode, const_tree type,
12193 bool named ATTRIBUTE_UNUSED)
12194 {
12195 if (!type)
12196 return 0;
12197
12198 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12199 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12200 {
12201 if (TARGET_DEBUG_ARG)
12202 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12203 return 1;
12204 }
12205
12206 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12207 {
12208 if (TARGET_DEBUG_ARG)
12209 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12210 return 1;
12211 }
12212
12213 if (int_size_in_bytes (type) < 0)
12214 {
12215 if (TARGET_DEBUG_ARG)
12216 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12217 return 1;
12218 }
12219
12220 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12221 modes only exist for GCC vector types if -maltivec. */
12222 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12223 {
12224 if (TARGET_DEBUG_ARG)
12225 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12226 return 1;
12227 }
12228
12229 /* Pass synthetic vectors in memory. */
12230 if (TREE_CODE (type) == VECTOR_TYPE
12231 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12232 {
12233 static bool warned_for_pass_big_vectors = false;
12234 if (TARGET_DEBUG_ARG)
12235 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12236 if (!warned_for_pass_big_vectors)
12237 {
12238 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12239 "non-standard ABI extension with no compatibility "
12240 "guarantee");
12241 warned_for_pass_big_vectors = true;
12242 }
12243 return 1;
12244 }
12245
12246 return 0;
12247 }
12248
12249 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12250 already processes. Return true if the parameter must be passed
12251 (fully or partially) on the stack. */
12252
12253 static bool
12254 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12255 {
12256 machine_mode mode;
12257 int unsignedp;
12258 rtx entry_parm;
12259
12260 /* Catch errors. */
12261 if (type == NULL || type == error_mark_node)
12262 return true;
12263
12264 /* Handle types with no storage requirement. */
12265 if (TYPE_MODE (type) == VOIDmode)
12266 return false;
12267
12268 /* Handle complex types. */
12269 if (TREE_CODE (type) == COMPLEX_TYPE)
12270 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12271 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12272
12273 /* Handle transparent aggregates. */
12274 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12275 && TYPE_TRANSPARENT_AGGR (type))
12276 type = TREE_TYPE (first_field (type));
12277
12278 /* See if this arg was passed by invisible reference. */
12279 if (pass_by_reference (get_cumulative_args (args_so_far),
12280 TYPE_MODE (type), type, true))
12281 type = build_pointer_type (type);
12282
12283 /* Find mode as it is passed by the ABI. */
12284 unsignedp = TYPE_UNSIGNED (type);
12285 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12286
12287 /* If we must pass in stack, we need a stack. */
12288 if (rs6000_must_pass_in_stack (mode, type))
12289 return true;
12290
12291 /* If there is no incoming register, we need a stack. */
12292 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12293 if (entry_parm == NULL)
12294 return true;
12295
12296 /* Likewise if we need to pass both in registers and on the stack. */
12297 if (GET_CODE (entry_parm) == PARALLEL
12298 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12299 return true;
12300
12301 /* Also true if we're partially in registers and partially not. */
12302 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12303 return true;
12304
12305 /* Update info on where next arg arrives in registers. */
12306 rs6000_function_arg_advance (args_so_far, mode, type, true);
12307 return false;
12308 }
12309
12310 /* Return true if FUN has no prototype, has a variable argument
12311 list, or passes any parameter in memory. */
12312
12313 static bool
12314 rs6000_function_parms_need_stack (tree fun, bool incoming)
12315 {
12316 tree fntype, result;
12317 CUMULATIVE_ARGS args_so_far_v;
12318 cumulative_args_t args_so_far;
12319
12320 if (!fun)
12321 /* Must be a libcall, all of which only use reg parms. */
12322 return false;
12323
12324 fntype = fun;
12325 if (!TYPE_P (fun))
12326 fntype = TREE_TYPE (fun);
12327
12328 /* Varargs functions need the parameter save area. */
12329 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12330 return true;
12331
12332 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12333 args_so_far = pack_cumulative_args (&args_so_far_v);
12334
12335 /* When incoming, we will have been passed the function decl.
12336 It is necessary to use the decl to handle K&R style functions,
12337 where TYPE_ARG_TYPES may not be available. */
12338 if (incoming)
12339 {
12340 gcc_assert (DECL_P (fun));
12341 result = DECL_RESULT (fun);
12342 }
12343 else
12344 result = TREE_TYPE (fntype);
12345
12346 if (result && aggregate_value_p (result, fntype))
12347 {
12348 if (!TYPE_P (result))
12349 result = TREE_TYPE (result);
12350 result = build_pointer_type (result);
12351 rs6000_parm_needs_stack (args_so_far, result);
12352 }
12353
12354 if (incoming)
12355 {
12356 tree parm;
12357
12358 for (parm = DECL_ARGUMENTS (fun);
12359 parm && parm != void_list_node;
12360 parm = TREE_CHAIN (parm))
12361 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12362 return true;
12363 }
12364 else
12365 {
12366 function_args_iterator args_iter;
12367 tree arg_type;
12368
12369 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12370 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12371 return true;
12372 }
12373
12374 return false;
12375 }
12376
12377 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12378 usually a constant depending on the ABI. However, in the ELFv2 ABI
12379 the register parameter area is optional when calling a function that
12380 has a prototype is scope, has no variable argument list, and passes
12381 all parameters in registers. */
12382
12383 int
12384 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12385 {
12386 int reg_parm_stack_space;
12387
12388 switch (DEFAULT_ABI)
12389 {
12390 default:
12391 reg_parm_stack_space = 0;
12392 break;
12393
12394 case ABI_AIX:
12395 case ABI_DARWIN:
12396 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12397 break;
12398
12399 case ABI_ELFv2:
12400 /* ??? Recomputing this every time is a bit expensive. Is there
12401 a place to cache this information? */
12402 if (rs6000_function_parms_need_stack (fun, incoming))
12403 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12404 else
12405 reg_parm_stack_space = 0;
12406 break;
12407 }
12408
12409 return reg_parm_stack_space;
12410 }
12411
12412 static void
12413 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12414 {
12415 int i;
12416 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12417
12418 if (nregs == 0)
12419 return;
12420
12421 for (i = 0; i < nregs; i++)
12422 {
12423 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12424 if (reload_completed)
12425 {
12426 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12427 tem = NULL_RTX;
12428 else
12429 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12430 i * GET_MODE_SIZE (reg_mode));
12431 }
12432 else
12433 tem = replace_equiv_address (tem, XEXP (tem, 0));
12434
12435 gcc_assert (tem);
12436
12437 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12438 }
12439 }
12440 \f
12441 /* Perform any needed actions needed for a function that is receiving a
12442 variable number of arguments.
12443
12444 CUM is as above.
12445
12446 MODE and TYPE are the mode and type of the current parameter.
12447
12448 PRETEND_SIZE is a variable that should be set to the amount of stack
12449 that must be pushed by the prolog to pretend that our caller pushed
12450 it.
12451
12452 Normally, this macro will push all remaining incoming registers on the
12453 stack and set PRETEND_SIZE to the length of the registers pushed. */
12454
12455 static void
12456 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12457 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12458 int no_rtl)
12459 {
12460 CUMULATIVE_ARGS next_cum;
12461 int reg_size = TARGET_32BIT ? 4 : 8;
12462 rtx save_area = NULL_RTX, mem;
12463 int first_reg_offset;
12464 alias_set_type set;
12465
12466 /* Skip the last named argument. */
12467 next_cum = *get_cumulative_args (cum);
12468 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12469
12470 if (DEFAULT_ABI == ABI_V4)
12471 {
12472 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12473
12474 if (! no_rtl)
12475 {
12476 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12477 HOST_WIDE_INT offset = 0;
12478
12479 /* Try to optimize the size of the varargs save area.
12480 The ABI requires that ap.reg_save_area is doubleword
12481 aligned, but we don't need to allocate space for all
12482 the bytes, only those to which we actually will save
12483 anything. */
12484 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12485 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12486 if (TARGET_HARD_FLOAT
12487 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12488 && cfun->va_list_fpr_size)
12489 {
12490 if (gpr_reg_num)
12491 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12492 * UNITS_PER_FP_WORD;
12493 if (cfun->va_list_fpr_size
12494 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12495 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12496 else
12497 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12498 * UNITS_PER_FP_WORD;
12499 }
12500 if (gpr_reg_num)
12501 {
12502 offset = -((first_reg_offset * reg_size) & ~7);
12503 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12504 {
12505 gpr_reg_num = cfun->va_list_gpr_size;
12506 if (reg_size == 4 && (first_reg_offset & 1))
12507 gpr_reg_num++;
12508 }
12509 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12510 }
12511 else if (fpr_size)
12512 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12513 * UNITS_PER_FP_WORD
12514 - (int) (GP_ARG_NUM_REG * reg_size);
12515
12516 if (gpr_size + fpr_size)
12517 {
12518 rtx reg_save_area
12519 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12520 gcc_assert (MEM_P (reg_save_area));
12521 reg_save_area = XEXP (reg_save_area, 0);
12522 if (GET_CODE (reg_save_area) == PLUS)
12523 {
12524 gcc_assert (XEXP (reg_save_area, 0)
12525 == virtual_stack_vars_rtx);
12526 gcc_assert (CONST_INT_P (XEXP (reg_save_area, 1)));
12527 offset += INTVAL (XEXP (reg_save_area, 1));
12528 }
12529 else
12530 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12531 }
12532
12533 cfun->machine->varargs_save_offset = offset;
12534 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12535 }
12536 }
12537 else
12538 {
12539 first_reg_offset = next_cum.words;
12540 save_area = crtl->args.internal_arg_pointer;
12541
12542 if (targetm.calls.must_pass_in_stack (mode, type))
12543 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12544 }
12545
12546 set = get_varargs_alias_set ();
12547 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12548 && cfun->va_list_gpr_size)
12549 {
12550 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12551
12552 if (va_list_gpr_counter_field)
12553 /* V4 va_list_gpr_size counts number of registers needed. */
12554 n_gpr = cfun->va_list_gpr_size;
12555 else
12556 /* char * va_list instead counts number of bytes needed. */
12557 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12558
12559 if (nregs > n_gpr)
12560 nregs = n_gpr;
12561
12562 mem = gen_rtx_MEM (BLKmode,
12563 plus_constant (Pmode, save_area,
12564 first_reg_offset * reg_size));
12565 MEM_NOTRAP_P (mem) = 1;
12566 set_mem_alias_set (mem, set);
12567 set_mem_align (mem, BITS_PER_WORD);
12568
12569 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12570 nregs);
12571 }
12572
12573 /* Save FP registers if needed. */
12574 if (DEFAULT_ABI == ABI_V4
12575 && TARGET_HARD_FLOAT
12576 && ! no_rtl
12577 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12578 && cfun->va_list_fpr_size)
12579 {
12580 int fregno = next_cum.fregno, nregs;
12581 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12582 rtx lab = gen_label_rtx ();
12583 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12584 * UNITS_PER_FP_WORD);
12585
12586 emit_jump_insn
12587 (gen_rtx_SET (pc_rtx,
12588 gen_rtx_IF_THEN_ELSE (VOIDmode,
12589 gen_rtx_NE (VOIDmode, cr1,
12590 const0_rtx),
12591 gen_rtx_LABEL_REF (VOIDmode, lab),
12592 pc_rtx)));
12593
12594 for (nregs = 0;
12595 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12596 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12597 {
12598 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12599 plus_constant (Pmode, save_area, off));
12600 MEM_NOTRAP_P (mem) = 1;
12601 set_mem_alias_set (mem, set);
12602 set_mem_align (mem, GET_MODE_ALIGNMENT (
12603 TARGET_HARD_FLOAT ? DFmode : SFmode));
12604 emit_move_insn (mem, gen_rtx_REG (
12605 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12606 }
12607
12608 emit_label (lab);
12609 }
12610 }
12611
12612 /* Create the va_list data type. */
12613
12614 static tree
12615 rs6000_build_builtin_va_list (void)
12616 {
12617 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12618
12619 /* For AIX, prefer 'char *' because that's what the system
12620 header files like. */
12621 if (DEFAULT_ABI != ABI_V4)
12622 return build_pointer_type (char_type_node);
12623
12624 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12625 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12626 get_identifier ("__va_list_tag"), record);
12627
12628 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12629 unsigned_char_type_node);
12630 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12631 unsigned_char_type_node);
12632 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12633 every user file. */
12634 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12635 get_identifier ("reserved"), short_unsigned_type_node);
12636 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12637 get_identifier ("overflow_arg_area"),
12638 ptr_type_node);
12639 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12640 get_identifier ("reg_save_area"),
12641 ptr_type_node);
12642
12643 va_list_gpr_counter_field = f_gpr;
12644 va_list_fpr_counter_field = f_fpr;
12645
12646 DECL_FIELD_CONTEXT (f_gpr) = record;
12647 DECL_FIELD_CONTEXT (f_fpr) = record;
12648 DECL_FIELD_CONTEXT (f_res) = record;
12649 DECL_FIELD_CONTEXT (f_ovf) = record;
12650 DECL_FIELD_CONTEXT (f_sav) = record;
12651
12652 TYPE_STUB_DECL (record) = type_decl;
12653 TYPE_NAME (record) = type_decl;
12654 TYPE_FIELDS (record) = f_gpr;
12655 DECL_CHAIN (f_gpr) = f_fpr;
12656 DECL_CHAIN (f_fpr) = f_res;
12657 DECL_CHAIN (f_res) = f_ovf;
12658 DECL_CHAIN (f_ovf) = f_sav;
12659
12660 layout_type (record);
12661
12662 /* The correct type is an array type of one element. */
12663 return build_array_type (record, build_index_type (size_zero_node));
12664 }
12665
12666 /* Implement va_start. */
12667
12668 static void
12669 rs6000_va_start (tree valist, rtx nextarg)
12670 {
12671 HOST_WIDE_INT words, n_gpr, n_fpr;
12672 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12673 tree gpr, fpr, ovf, sav, t;
12674
12675 /* Only SVR4 needs something special. */
12676 if (DEFAULT_ABI != ABI_V4)
12677 {
12678 std_expand_builtin_va_start (valist, nextarg);
12679 return;
12680 }
12681
12682 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12683 f_fpr = DECL_CHAIN (f_gpr);
12684 f_res = DECL_CHAIN (f_fpr);
12685 f_ovf = DECL_CHAIN (f_res);
12686 f_sav = DECL_CHAIN (f_ovf);
12687
12688 valist = build_simple_mem_ref (valist);
12689 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12690 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12691 f_fpr, NULL_TREE);
12692 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12693 f_ovf, NULL_TREE);
12694 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12695 f_sav, NULL_TREE);
12696
12697 /* Count number of gp and fp argument registers used. */
12698 words = crtl->args.info.words;
12699 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12700 GP_ARG_NUM_REG);
12701 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12702 FP_ARG_NUM_REG);
12703
12704 if (TARGET_DEBUG_ARG)
12705 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12706 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12707 words, n_gpr, n_fpr);
12708
12709 if (cfun->va_list_gpr_size)
12710 {
12711 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12712 build_int_cst (NULL_TREE, n_gpr));
12713 TREE_SIDE_EFFECTS (t) = 1;
12714 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12715 }
12716
12717 if (cfun->va_list_fpr_size)
12718 {
12719 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12720 build_int_cst (NULL_TREE, n_fpr));
12721 TREE_SIDE_EFFECTS (t) = 1;
12722 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12723
12724 #ifdef HAVE_AS_GNU_ATTRIBUTE
12725 if (call_ABI_of_interest (cfun->decl))
12726 rs6000_passes_float = true;
12727 #endif
12728 }
12729
12730 /* Find the overflow area. */
12731 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12732 if (words != 0)
12733 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12734 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12735 TREE_SIDE_EFFECTS (t) = 1;
12736 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12737
12738 /* If there were no va_arg invocations, don't set up the register
12739 save area. */
12740 if (!cfun->va_list_gpr_size
12741 && !cfun->va_list_fpr_size
12742 && n_gpr < GP_ARG_NUM_REG
12743 && n_fpr < FP_ARG_V4_MAX_REG)
12744 return;
12745
12746 /* Find the register save area. */
12747 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12748 if (cfun->machine->varargs_save_offset)
12749 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12750 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12751 TREE_SIDE_EFFECTS (t) = 1;
12752 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12753 }
12754
12755 /* Implement va_arg. */
12756
12757 static tree
12758 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12759 gimple_seq *post_p)
12760 {
12761 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12762 tree gpr, fpr, ovf, sav, reg, t, u;
12763 int size, rsize, n_reg, sav_ofs, sav_scale;
12764 tree lab_false, lab_over, addr;
12765 int align;
12766 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12767 int regalign = 0;
12768 gimple *stmt;
12769
12770 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12771 {
12772 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12773 return build_va_arg_indirect_ref (t);
12774 }
12775
12776 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12777 earlier version of gcc, with the property that it always applied alignment
12778 adjustments to the va-args (even for zero-sized types). The cheapest way
12779 to deal with this is to replicate the effect of the part of
12780 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12781 of relevance.
12782 We don't need to check for pass-by-reference because of the test above.
12783 We can return a simplifed answer, since we know there's no offset to add. */
12784
12785 if (((TARGET_MACHO
12786 && rs6000_darwin64_abi)
12787 || DEFAULT_ABI == ABI_ELFv2
12788 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12789 && integer_zerop (TYPE_SIZE (type)))
12790 {
12791 unsigned HOST_WIDE_INT align, boundary;
12792 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12793 align = PARM_BOUNDARY / BITS_PER_UNIT;
12794 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12795 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12796 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12797 boundary /= BITS_PER_UNIT;
12798 if (boundary > align)
12799 {
12800 tree t ;
12801 /* This updates arg ptr by the amount that would be necessary
12802 to align the zero-sized (but not zero-alignment) item. */
12803 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12804 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12805 gimplify_and_add (t, pre_p);
12806
12807 t = fold_convert (sizetype, valist_tmp);
12808 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12809 fold_convert (TREE_TYPE (valist),
12810 fold_build2 (BIT_AND_EXPR, sizetype, t,
12811 size_int (-boundary))));
12812 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12813 gimplify_and_add (t, pre_p);
12814 }
12815 /* Since it is zero-sized there's no increment for the item itself. */
12816 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12817 return build_va_arg_indirect_ref (valist_tmp);
12818 }
12819
12820 if (DEFAULT_ABI != ABI_V4)
12821 {
12822 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12823 {
12824 tree elem_type = TREE_TYPE (type);
12825 machine_mode elem_mode = TYPE_MODE (elem_type);
12826 int elem_size = GET_MODE_SIZE (elem_mode);
12827
12828 if (elem_size < UNITS_PER_WORD)
12829 {
12830 tree real_part, imag_part;
12831 gimple_seq post = NULL;
12832
12833 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12834 &post);
12835 /* Copy the value into a temporary, lest the formal temporary
12836 be reused out from under us. */
12837 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12838 gimple_seq_add_seq (pre_p, post);
12839
12840 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12841 post_p);
12842
12843 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12844 }
12845 }
12846
12847 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12848 }
12849
12850 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12851 f_fpr = DECL_CHAIN (f_gpr);
12852 f_res = DECL_CHAIN (f_fpr);
12853 f_ovf = DECL_CHAIN (f_res);
12854 f_sav = DECL_CHAIN (f_ovf);
12855
12856 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12857 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12858 f_fpr, NULL_TREE);
12859 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12860 f_ovf, NULL_TREE);
12861 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12862 f_sav, NULL_TREE);
12863
12864 size = int_size_in_bytes (type);
12865 rsize = (size + 3) / 4;
12866 int pad = 4 * rsize - size;
12867 align = 1;
12868
12869 machine_mode mode = TYPE_MODE (type);
12870 if (abi_v4_pass_in_fpr (mode, false))
12871 {
12872 /* FP args go in FP registers, if present. */
12873 reg = fpr;
12874 n_reg = (size + 7) / 8;
12875 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12876 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12877 if (mode != SFmode && mode != SDmode)
12878 align = 8;
12879 }
12880 else
12881 {
12882 /* Otherwise into GP registers. */
12883 reg = gpr;
12884 n_reg = rsize;
12885 sav_ofs = 0;
12886 sav_scale = 4;
12887 if (n_reg == 2)
12888 align = 8;
12889 }
12890
12891 /* Pull the value out of the saved registers.... */
12892
12893 lab_over = NULL;
12894 addr = create_tmp_var (ptr_type_node, "addr");
12895
12896 /* AltiVec vectors never go in registers when -mabi=altivec. */
12897 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12898 align = 16;
12899 else
12900 {
12901 lab_false = create_artificial_label (input_location);
12902 lab_over = create_artificial_label (input_location);
12903
12904 /* Long long is aligned in the registers. As are any other 2 gpr
12905 item such as complex int due to a historical mistake. */
12906 u = reg;
12907 if (n_reg == 2 && reg == gpr)
12908 {
12909 regalign = 1;
12910 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12911 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12912 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12913 unshare_expr (reg), u);
12914 }
12915 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12916 reg number is 0 for f1, so we want to make it odd. */
12917 else if (reg == fpr && mode == TDmode)
12918 {
12919 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12920 build_int_cst (TREE_TYPE (reg), 1));
12921 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12922 }
12923
12924 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12925 t = build2 (GE_EXPR, boolean_type_node, u, t);
12926 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12927 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12928 gimplify_and_add (t, pre_p);
12929
12930 t = sav;
12931 if (sav_ofs)
12932 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12933
12934 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12935 build_int_cst (TREE_TYPE (reg), n_reg));
12936 u = fold_convert (sizetype, u);
12937 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12938 t = fold_build_pointer_plus (t, u);
12939
12940 /* _Decimal32 varargs are located in the second word of the 64-bit
12941 FP register for 32-bit binaries. */
12942 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12943 t = fold_build_pointer_plus_hwi (t, size);
12944
12945 /* Args are passed right-aligned. */
12946 if (BYTES_BIG_ENDIAN)
12947 t = fold_build_pointer_plus_hwi (t, pad);
12948
12949 gimplify_assign (addr, t, pre_p);
12950
12951 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12952
12953 stmt = gimple_build_label (lab_false);
12954 gimple_seq_add_stmt (pre_p, stmt);
12955
12956 if ((n_reg == 2 && !regalign) || n_reg > 2)
12957 {
12958 /* Ensure that we don't find any more args in regs.
12959 Alignment has taken care of for special cases. */
12960 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12961 }
12962 }
12963
12964 /* ... otherwise out of the overflow area. */
12965
12966 /* Care for on-stack alignment if needed. */
12967 t = ovf;
12968 if (align != 1)
12969 {
12970 t = fold_build_pointer_plus_hwi (t, align - 1);
12971 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12972 build_int_cst (TREE_TYPE (t), -align));
12973 }
12974
12975 /* Args are passed right-aligned. */
12976 if (BYTES_BIG_ENDIAN)
12977 t = fold_build_pointer_plus_hwi (t, pad);
12978
12979 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12980
12981 gimplify_assign (unshare_expr (addr), t, pre_p);
12982
12983 t = fold_build_pointer_plus_hwi (t, size);
12984 gimplify_assign (unshare_expr (ovf), t, pre_p);
12985
12986 if (lab_over)
12987 {
12988 stmt = gimple_build_label (lab_over);
12989 gimple_seq_add_stmt (pre_p, stmt);
12990 }
12991
12992 if (STRICT_ALIGNMENT
12993 && (TYPE_ALIGN (type)
12994 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12995 {
12996 /* The value (of type complex double, for example) may not be
12997 aligned in memory in the saved registers, so copy via a
12998 temporary. (This is the same code as used for SPARC.) */
12999 tree tmp = create_tmp_var (type, "va_arg_tmp");
13000 tree dest_addr = build_fold_addr_expr (tmp);
13001
13002 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13003 3, dest_addr, addr, size_int (rsize * 4));
13004 TREE_ADDRESSABLE (tmp) = 1;
13005
13006 gimplify_and_add (copy, pre_p);
13007 addr = dest_addr;
13008 }
13009
13010 addr = fold_convert (ptrtype, addr);
13011 return build_va_arg_indirect_ref (addr);
13012 }
13013
13014 /* Builtins. */
13015
13016 static void
13017 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13018 {
13019 tree t;
13020 unsigned classify = rs6000_builtin_info[(int)code].attr;
13021 const char *attr_string = "";
13022
13023 gcc_assert (name != NULL);
13024 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13025
13026 if (rs6000_builtin_decls[(int)code])
13027 fatal_error (input_location,
13028 "internal error: builtin function %qs already processed",
13029 name);
13030
13031 rs6000_builtin_decls[(int)code] = t =
13032 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13033
13034 /* Set any special attributes. */
13035 if ((classify & RS6000_BTC_CONST) != 0)
13036 {
13037 /* const function, function only depends on the inputs. */
13038 TREE_READONLY (t) = 1;
13039 TREE_NOTHROW (t) = 1;
13040 attr_string = ", const";
13041 }
13042 else if ((classify & RS6000_BTC_PURE) != 0)
13043 {
13044 /* pure function, function can read global memory, but does not set any
13045 external state. */
13046 DECL_PURE_P (t) = 1;
13047 TREE_NOTHROW (t) = 1;
13048 attr_string = ", pure";
13049 }
13050 else if ((classify & RS6000_BTC_FP) != 0)
13051 {
13052 /* Function is a math function. If rounding mode is on, then treat the
13053 function as not reading global memory, but it can have arbitrary side
13054 effects. If it is off, then assume the function is a const function.
13055 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13056 builtin-attribute.def that is used for the math functions. */
13057 TREE_NOTHROW (t) = 1;
13058 if (flag_rounding_math)
13059 {
13060 DECL_PURE_P (t) = 1;
13061 DECL_IS_NOVOPS (t) = 1;
13062 attr_string = ", fp, pure";
13063 }
13064 else
13065 {
13066 TREE_READONLY (t) = 1;
13067 attr_string = ", fp, const";
13068 }
13069 }
13070 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13071 gcc_unreachable ();
13072
13073 if (TARGET_DEBUG_BUILTIN)
13074 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13075 (int)code, name, attr_string);
13076 }
13077
13078 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13079
13080 #undef RS6000_BUILTIN_0
13081 #undef RS6000_BUILTIN_1
13082 #undef RS6000_BUILTIN_2
13083 #undef RS6000_BUILTIN_3
13084 #undef RS6000_BUILTIN_A
13085 #undef RS6000_BUILTIN_D
13086 #undef RS6000_BUILTIN_H
13087 #undef RS6000_BUILTIN_P
13088 #undef RS6000_BUILTIN_X
13089
13090 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13091 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13092 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13093 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13094 { MASK, ICODE, NAME, ENUM },
13095
13096 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13097 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13098 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13099 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13100 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13101
13102 static const struct builtin_description bdesc_3arg[] =
13103 {
13104 #include "rs6000-builtin.def"
13105 };
13106
13107 /* DST operations: void foo (void *, const int, const char). */
13108
13109 #undef RS6000_BUILTIN_0
13110 #undef RS6000_BUILTIN_1
13111 #undef RS6000_BUILTIN_2
13112 #undef RS6000_BUILTIN_3
13113 #undef RS6000_BUILTIN_A
13114 #undef RS6000_BUILTIN_D
13115 #undef RS6000_BUILTIN_H
13116 #undef RS6000_BUILTIN_P
13117 #undef RS6000_BUILTIN_X
13118
13119 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13120 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13121 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13122 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13123 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13124 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13125 { MASK, ICODE, NAME, ENUM },
13126
13127 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13128 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13129 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13130
13131 static const struct builtin_description bdesc_dst[] =
13132 {
13133 #include "rs6000-builtin.def"
13134 };
13135
13136 /* Simple binary operations: VECc = foo (VECa, VECb). */
13137
13138 #undef RS6000_BUILTIN_0
13139 #undef RS6000_BUILTIN_1
13140 #undef RS6000_BUILTIN_2
13141 #undef RS6000_BUILTIN_3
13142 #undef RS6000_BUILTIN_A
13143 #undef RS6000_BUILTIN_D
13144 #undef RS6000_BUILTIN_H
13145 #undef RS6000_BUILTIN_P
13146 #undef RS6000_BUILTIN_X
13147
13148 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13149 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13150 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13151 { MASK, ICODE, NAME, ENUM },
13152
13153 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13154 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13155 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13156 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13157 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13158 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13159
13160 static const struct builtin_description bdesc_2arg[] =
13161 {
13162 #include "rs6000-builtin.def"
13163 };
13164
13165 #undef RS6000_BUILTIN_0
13166 #undef RS6000_BUILTIN_1
13167 #undef RS6000_BUILTIN_2
13168 #undef RS6000_BUILTIN_3
13169 #undef RS6000_BUILTIN_A
13170 #undef RS6000_BUILTIN_D
13171 #undef RS6000_BUILTIN_H
13172 #undef RS6000_BUILTIN_P
13173 #undef RS6000_BUILTIN_X
13174
13175 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13176 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13177 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13178 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13179 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13180 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13181 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13182 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13183 { MASK, ICODE, NAME, ENUM },
13184
13185 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13186
13187 /* AltiVec predicates. */
13188
13189 static const struct builtin_description bdesc_altivec_preds[] =
13190 {
13191 #include "rs6000-builtin.def"
13192 };
13193
13194 /* ABS* operations. */
13195
13196 #undef RS6000_BUILTIN_0
13197 #undef RS6000_BUILTIN_1
13198 #undef RS6000_BUILTIN_2
13199 #undef RS6000_BUILTIN_3
13200 #undef RS6000_BUILTIN_A
13201 #undef RS6000_BUILTIN_D
13202 #undef RS6000_BUILTIN_H
13203 #undef RS6000_BUILTIN_P
13204 #undef RS6000_BUILTIN_X
13205
13206 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13207 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13208 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13209 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13210 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13211 { MASK, ICODE, NAME, ENUM },
13212
13213 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13214 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13215 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13216 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13217
13218 static const struct builtin_description bdesc_abs[] =
13219 {
13220 #include "rs6000-builtin.def"
13221 };
13222
13223 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13224 foo (VECa). */
13225
13226 #undef RS6000_BUILTIN_0
13227 #undef RS6000_BUILTIN_1
13228 #undef RS6000_BUILTIN_2
13229 #undef RS6000_BUILTIN_3
13230 #undef RS6000_BUILTIN_A
13231 #undef RS6000_BUILTIN_D
13232 #undef RS6000_BUILTIN_H
13233 #undef RS6000_BUILTIN_P
13234 #undef RS6000_BUILTIN_X
13235
13236 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13237 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13238 { MASK, ICODE, NAME, ENUM },
13239
13240 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13241 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13242 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13243 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13244 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13245 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13246 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13247
13248 static const struct builtin_description bdesc_1arg[] =
13249 {
13250 #include "rs6000-builtin.def"
13251 };
13252
13253 /* Simple no-argument operations: result = __builtin_darn_32 () */
13254
13255 #undef RS6000_BUILTIN_0
13256 #undef RS6000_BUILTIN_1
13257 #undef RS6000_BUILTIN_2
13258 #undef RS6000_BUILTIN_3
13259 #undef RS6000_BUILTIN_A
13260 #undef RS6000_BUILTIN_D
13261 #undef RS6000_BUILTIN_H
13262 #undef RS6000_BUILTIN_P
13263 #undef RS6000_BUILTIN_X
13264
13265 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13266 { MASK, ICODE, NAME, ENUM },
13267
13268 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13269 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13270 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13271 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13272 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13273 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13274 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13275 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13276
13277 static const struct builtin_description bdesc_0arg[] =
13278 {
13279 #include "rs6000-builtin.def"
13280 };
13281
13282 /* HTM builtins. */
13283 #undef RS6000_BUILTIN_0
13284 #undef RS6000_BUILTIN_1
13285 #undef RS6000_BUILTIN_2
13286 #undef RS6000_BUILTIN_3
13287 #undef RS6000_BUILTIN_A
13288 #undef RS6000_BUILTIN_D
13289 #undef RS6000_BUILTIN_H
13290 #undef RS6000_BUILTIN_P
13291 #undef RS6000_BUILTIN_X
13292
13293 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13294 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13295 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13296 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13297 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13298 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13299 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13300 { MASK, ICODE, NAME, ENUM },
13301
13302 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13303 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13304
13305 static const struct builtin_description bdesc_htm[] =
13306 {
13307 #include "rs6000-builtin.def"
13308 };
13309
13310 #undef RS6000_BUILTIN_0
13311 #undef RS6000_BUILTIN_1
13312 #undef RS6000_BUILTIN_2
13313 #undef RS6000_BUILTIN_3
13314 #undef RS6000_BUILTIN_A
13315 #undef RS6000_BUILTIN_D
13316 #undef RS6000_BUILTIN_H
13317 #undef RS6000_BUILTIN_P
13318
13319 /* Return true if a builtin function is overloaded. */
13320 bool
13321 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13322 {
13323 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13324 }
13325
13326 const char *
13327 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13328 {
13329 return rs6000_builtin_info[(int)fncode].name;
13330 }
13331
13332 /* Expand an expression EXP that calls a builtin without arguments. */
13333 static rtx
13334 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13335 {
13336 rtx pat;
13337 machine_mode tmode = insn_data[icode].operand[0].mode;
13338
13339 if (icode == CODE_FOR_nothing)
13340 /* Builtin not supported on this processor. */
13341 return 0;
13342
13343 if (icode == CODE_FOR_rs6000_mffsl
13344 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13345 {
13346 error ("__builtin_mffsl() not supported with -msoft-float");
13347 return const0_rtx;
13348 }
13349
13350 if (target == 0
13351 || GET_MODE (target) != tmode
13352 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13353 target = gen_reg_rtx (tmode);
13354
13355 pat = GEN_FCN (icode) (target);
13356 if (! pat)
13357 return 0;
13358 emit_insn (pat);
13359
13360 return target;
13361 }
13362
13363
13364 static rtx
13365 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13366 {
13367 rtx pat;
13368 tree arg0 = CALL_EXPR_ARG (exp, 0);
13369 tree arg1 = CALL_EXPR_ARG (exp, 1);
13370 rtx op0 = expand_normal (arg0);
13371 rtx op1 = expand_normal (arg1);
13372 machine_mode mode0 = insn_data[icode].operand[0].mode;
13373 machine_mode mode1 = insn_data[icode].operand[1].mode;
13374
13375 if (icode == CODE_FOR_nothing)
13376 /* Builtin not supported on this processor. */
13377 return 0;
13378
13379 /* If we got invalid arguments bail out before generating bad rtl. */
13380 if (arg0 == error_mark_node || arg1 == error_mark_node)
13381 return const0_rtx;
13382
13383 if (!CONST_INT_P (op0)
13384 || INTVAL (op0) > 255
13385 || INTVAL (op0) < 0)
13386 {
13387 error ("argument 1 must be an 8-bit field value");
13388 return const0_rtx;
13389 }
13390
13391 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13392 op0 = copy_to_mode_reg (mode0, op0);
13393
13394 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13395 op1 = copy_to_mode_reg (mode1, op1);
13396
13397 pat = GEN_FCN (icode) (op0, op1);
13398 if (!pat)
13399 return const0_rtx;
13400 emit_insn (pat);
13401
13402 return NULL_RTX;
13403 }
13404
13405 static rtx
13406 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13407 {
13408 rtx pat;
13409 tree arg0 = CALL_EXPR_ARG (exp, 0);
13410 rtx op0 = expand_normal (arg0);
13411
13412 if (icode == CODE_FOR_nothing)
13413 /* Builtin not supported on this processor. */
13414 return 0;
13415
13416 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13417 {
13418 error ("__builtin_mtfsb0 and __builtin_mtfsb1 not supported with -msoft-float");
13419 return const0_rtx;
13420 }
13421
13422 /* If we got invalid arguments bail out before generating bad rtl. */
13423 if (arg0 == error_mark_node)
13424 return const0_rtx;
13425
13426 /* Only allow bit numbers 0 to 31. */
13427 if (!u5bit_cint_operand (op0, VOIDmode))
13428 {
13429 error ("Argument must be a constant between 0 and 31.");
13430 return const0_rtx;
13431 }
13432
13433 pat = GEN_FCN (icode) (op0);
13434 if (!pat)
13435 return const0_rtx;
13436 emit_insn (pat);
13437
13438 return NULL_RTX;
13439 }
13440
13441 static rtx
13442 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13443 {
13444 rtx pat;
13445 tree arg0 = CALL_EXPR_ARG (exp, 0);
13446 rtx op0 = expand_normal (arg0);
13447 machine_mode mode0 = insn_data[icode].operand[0].mode;
13448
13449 if (icode == CODE_FOR_nothing)
13450 /* Builtin not supported on this processor. */
13451 return 0;
13452
13453 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13454 {
13455 error ("__builtin_set_fpscr_rn not supported with -msoft-float");
13456 return const0_rtx;
13457 }
13458
13459 /* If we got invalid arguments bail out before generating bad rtl. */
13460 if (arg0 == error_mark_node)
13461 return const0_rtx;
13462
13463 /* If the argument is a constant, check the range. Argument can only be a
13464 2-bit value. Unfortunately, can't check the range of the value at
13465 compile time if the argument is a variable. The least significant two
13466 bits of the argument, regardless of type, are used to set the rounding
13467 mode. All other bits are ignored. */
13468 if (CONST_INT_P (op0) && !const_0_to_3_operand(op0, VOIDmode))
13469 {
13470 error ("Argument must be a value between 0 and 3.");
13471 return const0_rtx;
13472 }
13473
13474 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13475 op0 = copy_to_mode_reg (mode0, op0);
13476
13477 pat = GEN_FCN (icode) (op0);
13478 if (!pat)
13479 return const0_rtx;
13480 emit_insn (pat);
13481
13482 return NULL_RTX;
13483 }
13484 static rtx
13485 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13486 {
13487 rtx pat;
13488 tree arg0 = CALL_EXPR_ARG (exp, 0);
13489 rtx op0 = expand_normal (arg0);
13490 machine_mode mode0 = insn_data[icode].operand[0].mode;
13491
13492 if (TARGET_32BIT)
13493 /* Builtin not supported in 32-bit mode. */
13494 fatal_error (input_location,
13495 "__builtin_set_fpscr_drn is not supported in 32-bit mode.");
13496
13497 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13498 {
13499 error ("__builtin_set_fpscr_drn not supported with -msoft-float");
13500 return const0_rtx;
13501 }
13502
13503 if (icode == CODE_FOR_nothing)
13504 /* Builtin not supported on this processor. */
13505 return 0;
13506
13507 /* If we got invalid arguments bail out before generating bad rtl. */
13508 if (arg0 == error_mark_node)
13509 return const0_rtx;
13510
13511 /* If the argument is a constant, check the range. Agrument can only be a
13512 3-bit value. Unfortunately, can't check the range of the value at
13513 compile time if the argument is a variable. The least significant two
13514 bits of the argument, regardless of type, are used to set the rounding
13515 mode. All other bits are ignored. */
13516 if (CONST_INT_P (op0) && !const_0_to_7_operand(op0, VOIDmode))
13517 {
13518 error ("Argument must be a value between 0 and 7.");
13519 return const0_rtx;
13520 }
13521
13522 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13523 op0 = copy_to_mode_reg (mode0, op0);
13524
13525 pat = GEN_FCN (icode) (op0);
13526 if (! pat)
13527 return const0_rtx;
13528 emit_insn (pat);
13529
13530 return NULL_RTX;
13531 }
13532
13533 static rtx
13534 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13535 {
13536 rtx pat;
13537 tree arg0 = CALL_EXPR_ARG (exp, 0);
13538 rtx op0 = expand_normal (arg0);
13539 machine_mode tmode = insn_data[icode].operand[0].mode;
13540 machine_mode mode0 = insn_data[icode].operand[1].mode;
13541
13542 if (icode == CODE_FOR_nothing)
13543 /* Builtin not supported on this processor. */
13544 return 0;
13545
13546 /* If we got invalid arguments bail out before generating bad rtl. */
13547 if (arg0 == error_mark_node)
13548 return const0_rtx;
13549
13550 if (icode == CODE_FOR_altivec_vspltisb
13551 || icode == CODE_FOR_altivec_vspltish
13552 || icode == CODE_FOR_altivec_vspltisw)
13553 {
13554 /* Only allow 5-bit *signed* literals. */
13555 if (!CONST_INT_P (op0)
13556 || INTVAL (op0) > 15
13557 || INTVAL (op0) < -16)
13558 {
13559 error ("argument 1 must be a 5-bit signed literal");
13560 return CONST0_RTX (tmode);
13561 }
13562 }
13563
13564 if (target == 0
13565 || GET_MODE (target) != tmode
13566 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13567 target = gen_reg_rtx (tmode);
13568
13569 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13570 op0 = copy_to_mode_reg (mode0, op0);
13571
13572 pat = GEN_FCN (icode) (target, op0);
13573 if (! pat)
13574 return 0;
13575 emit_insn (pat);
13576
13577 return target;
13578 }
13579
13580 static rtx
13581 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13582 {
13583 rtx pat, scratch1, scratch2;
13584 tree arg0 = CALL_EXPR_ARG (exp, 0);
13585 rtx op0 = expand_normal (arg0);
13586 machine_mode tmode = insn_data[icode].operand[0].mode;
13587 machine_mode mode0 = insn_data[icode].operand[1].mode;
13588
13589 /* If we have invalid arguments, bail out before generating bad rtl. */
13590 if (arg0 == error_mark_node)
13591 return const0_rtx;
13592
13593 if (target == 0
13594 || GET_MODE (target) != tmode
13595 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13596 target = gen_reg_rtx (tmode);
13597
13598 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13599 op0 = copy_to_mode_reg (mode0, op0);
13600
13601 scratch1 = gen_reg_rtx (mode0);
13602 scratch2 = gen_reg_rtx (mode0);
13603
13604 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13605 if (! pat)
13606 return 0;
13607 emit_insn (pat);
13608
13609 return target;
13610 }
13611
13612 static rtx
13613 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13614 {
13615 rtx pat;
13616 tree arg0 = CALL_EXPR_ARG (exp, 0);
13617 tree arg1 = CALL_EXPR_ARG (exp, 1);
13618 rtx op0 = expand_normal (arg0);
13619 rtx op1 = expand_normal (arg1);
13620 machine_mode tmode = insn_data[icode].operand[0].mode;
13621 machine_mode mode0 = insn_data[icode].operand[1].mode;
13622 machine_mode mode1 = insn_data[icode].operand[2].mode;
13623
13624 if (icode == CODE_FOR_nothing)
13625 /* Builtin not supported on this processor. */
13626 return 0;
13627
13628 /* If we got invalid arguments bail out before generating bad rtl. */
13629 if (arg0 == error_mark_node || arg1 == error_mark_node)
13630 return const0_rtx;
13631
13632 if (icode == CODE_FOR_unpackv1ti
13633 || icode == CODE_FOR_unpackkf
13634 || icode == CODE_FOR_unpacktf
13635 || icode == CODE_FOR_unpackif
13636 || icode == CODE_FOR_unpacktd)
13637 {
13638 /* Only allow 1-bit unsigned literals. */
13639 STRIP_NOPS (arg1);
13640 if (TREE_CODE (arg1) != INTEGER_CST
13641 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13642 {
13643 error ("argument 2 must be a 1-bit unsigned literal");
13644 return CONST0_RTX (tmode);
13645 }
13646 }
13647 else if (icode == CODE_FOR_altivec_vspltw)
13648 {
13649 /* Only allow 2-bit unsigned literals. */
13650 STRIP_NOPS (arg1);
13651 if (TREE_CODE (arg1) != INTEGER_CST
13652 || TREE_INT_CST_LOW (arg1) & ~3)
13653 {
13654 error ("argument 2 must be a 2-bit unsigned literal");
13655 return CONST0_RTX (tmode);
13656 }
13657 }
13658 else if (icode == CODE_FOR_altivec_vsplth)
13659 {
13660 /* Only allow 3-bit unsigned literals. */
13661 STRIP_NOPS (arg1);
13662 if (TREE_CODE (arg1) != INTEGER_CST
13663 || TREE_INT_CST_LOW (arg1) & ~7)
13664 {
13665 error ("argument 2 must be a 3-bit unsigned literal");
13666 return CONST0_RTX (tmode);
13667 }
13668 }
13669 else if (icode == CODE_FOR_altivec_vspltb)
13670 {
13671 /* Only allow 4-bit unsigned literals. */
13672 STRIP_NOPS (arg1);
13673 if (TREE_CODE (arg1) != INTEGER_CST
13674 || TREE_INT_CST_LOW (arg1) & ~15)
13675 {
13676 error ("argument 2 must be a 4-bit unsigned literal");
13677 return CONST0_RTX (tmode);
13678 }
13679 }
13680 else if (icode == CODE_FOR_altivec_vcfux
13681 || icode == CODE_FOR_altivec_vcfsx
13682 || icode == CODE_FOR_altivec_vctsxs
13683 || icode == CODE_FOR_altivec_vctuxs)
13684 {
13685 /* Only allow 5-bit unsigned literals. */
13686 STRIP_NOPS (arg1);
13687 if (TREE_CODE (arg1) != INTEGER_CST
13688 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13689 {
13690 error ("argument 2 must be a 5-bit unsigned literal");
13691 return CONST0_RTX (tmode);
13692 }
13693 }
13694 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13695 || icode == CODE_FOR_dfptstsfi_lt_dd
13696 || icode == CODE_FOR_dfptstsfi_gt_dd
13697 || icode == CODE_FOR_dfptstsfi_unordered_dd
13698 || icode == CODE_FOR_dfptstsfi_eq_td
13699 || icode == CODE_FOR_dfptstsfi_lt_td
13700 || icode == CODE_FOR_dfptstsfi_gt_td
13701 || icode == CODE_FOR_dfptstsfi_unordered_td)
13702 {
13703 /* Only allow 6-bit unsigned literals. */
13704 STRIP_NOPS (arg0);
13705 if (TREE_CODE (arg0) != INTEGER_CST
13706 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13707 {
13708 error ("argument 1 must be a 6-bit unsigned literal");
13709 return CONST0_RTX (tmode);
13710 }
13711 }
13712 else if (icode == CODE_FOR_xststdcqp_kf
13713 || icode == CODE_FOR_xststdcqp_tf
13714 || icode == CODE_FOR_xststdcdp
13715 || icode == CODE_FOR_xststdcsp
13716 || icode == CODE_FOR_xvtstdcdp
13717 || icode == CODE_FOR_xvtstdcsp)
13718 {
13719 /* Only allow 7-bit unsigned literals. */
13720 STRIP_NOPS (arg1);
13721 if (TREE_CODE (arg1) != INTEGER_CST
13722 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13723 {
13724 error ("argument 2 must be a 7-bit unsigned literal");
13725 return CONST0_RTX (tmode);
13726 }
13727 }
13728
13729 if (target == 0
13730 || GET_MODE (target) != tmode
13731 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13732 target = gen_reg_rtx (tmode);
13733
13734 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13735 op0 = copy_to_mode_reg (mode0, op0);
13736 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13737 op1 = copy_to_mode_reg (mode1, op1);
13738
13739 pat = GEN_FCN (icode) (target, op0, op1);
13740 if (! pat)
13741 return 0;
13742 emit_insn (pat);
13743
13744 return target;
13745 }
13746
13747 static rtx
13748 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13749 {
13750 rtx pat, scratch;
13751 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13752 tree arg0 = CALL_EXPR_ARG (exp, 1);
13753 tree arg1 = CALL_EXPR_ARG (exp, 2);
13754 rtx op0 = expand_normal (arg0);
13755 rtx op1 = expand_normal (arg1);
13756 machine_mode tmode = SImode;
13757 machine_mode mode0 = insn_data[icode].operand[1].mode;
13758 machine_mode mode1 = insn_data[icode].operand[2].mode;
13759 int cr6_form_int;
13760
13761 if (TREE_CODE (cr6_form) != INTEGER_CST)
13762 {
13763 error ("argument 1 of %qs must be a constant",
13764 "__builtin_altivec_predicate");
13765 return const0_rtx;
13766 }
13767 else
13768 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13769
13770 gcc_assert (mode0 == mode1);
13771
13772 /* If we have invalid arguments, bail out before generating bad rtl. */
13773 if (arg0 == error_mark_node || arg1 == error_mark_node)
13774 return const0_rtx;
13775
13776 if (target == 0
13777 || GET_MODE (target) != tmode
13778 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13779 target = gen_reg_rtx (tmode);
13780
13781 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13782 op0 = copy_to_mode_reg (mode0, op0);
13783 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13784 op1 = copy_to_mode_reg (mode1, op1);
13785
13786 /* Note that for many of the relevant operations (e.g. cmpne or
13787 cmpeq) with float or double operands, it makes more sense for the
13788 mode of the allocated scratch register to select a vector of
13789 integer. But the choice to copy the mode of operand 0 was made
13790 long ago and there are no plans to change it. */
13791 scratch = gen_reg_rtx (mode0);
13792
13793 pat = GEN_FCN (icode) (scratch, op0, op1);
13794 if (! pat)
13795 return 0;
13796 emit_insn (pat);
13797
13798 /* The vec_any* and vec_all* predicates use the same opcodes for two
13799 different operations, but the bits in CR6 will be different
13800 depending on what information we want. So we have to play tricks
13801 with CR6 to get the right bits out.
13802
13803 If you think this is disgusting, look at the specs for the
13804 AltiVec predicates. */
13805
13806 switch (cr6_form_int)
13807 {
13808 case 0:
13809 emit_insn (gen_cr6_test_for_zero (target));
13810 break;
13811 case 1:
13812 emit_insn (gen_cr6_test_for_zero_reverse (target));
13813 break;
13814 case 2:
13815 emit_insn (gen_cr6_test_for_lt (target));
13816 break;
13817 case 3:
13818 emit_insn (gen_cr6_test_for_lt_reverse (target));
13819 break;
13820 default:
13821 error ("argument 1 of %qs is out of range",
13822 "__builtin_altivec_predicate");
13823 break;
13824 }
13825
13826 return target;
13827 }
13828
13829 rtx
13830 swap_endian_selector_for_mode (machine_mode mode)
13831 {
13832 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13833 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13834 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13835 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13836
13837 unsigned int *swaparray, i;
13838 rtx perm[16];
13839
13840 switch (mode)
13841 {
13842 case E_V1TImode:
13843 swaparray = swap1;
13844 break;
13845 case E_V2DFmode:
13846 case E_V2DImode:
13847 swaparray = swap2;
13848 break;
13849 case E_V4SFmode:
13850 case E_V4SImode:
13851 swaparray = swap4;
13852 break;
13853 case E_V8HImode:
13854 swaparray = swap8;
13855 break;
13856 default:
13857 gcc_unreachable ();
13858 }
13859
13860 for (i = 0; i < 16; ++i)
13861 perm[i] = GEN_INT (swaparray[i]);
13862
13863 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13864 gen_rtvec_v (16, perm)));
13865 }
13866
13867 static rtx
13868 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13869 {
13870 rtx pat, addr;
13871 tree arg0 = CALL_EXPR_ARG (exp, 0);
13872 tree arg1 = CALL_EXPR_ARG (exp, 1);
13873 machine_mode tmode = insn_data[icode].operand[0].mode;
13874 machine_mode mode0 = Pmode;
13875 machine_mode mode1 = Pmode;
13876 rtx op0 = expand_normal (arg0);
13877 rtx op1 = expand_normal (arg1);
13878
13879 if (icode == CODE_FOR_nothing)
13880 /* Builtin not supported on this processor. */
13881 return 0;
13882
13883 /* If we got invalid arguments bail out before generating bad rtl. */
13884 if (arg0 == error_mark_node || arg1 == error_mark_node)
13885 return const0_rtx;
13886
13887 if (target == 0
13888 || GET_MODE (target) != tmode
13889 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13890 target = gen_reg_rtx (tmode);
13891
13892 op1 = copy_to_mode_reg (mode1, op1);
13893
13894 /* For LVX, express the RTL accurately by ANDing the address with -16.
13895 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13896 so the raw address is fine. */
13897 if (icode == CODE_FOR_altivec_lvx_v1ti
13898 || icode == CODE_FOR_altivec_lvx_v2df
13899 || icode == CODE_FOR_altivec_lvx_v2di
13900 || icode == CODE_FOR_altivec_lvx_v4sf
13901 || icode == CODE_FOR_altivec_lvx_v4si
13902 || icode == CODE_FOR_altivec_lvx_v8hi
13903 || icode == CODE_FOR_altivec_lvx_v16qi)
13904 {
13905 rtx rawaddr;
13906 if (op0 == const0_rtx)
13907 rawaddr = op1;
13908 else
13909 {
13910 op0 = copy_to_mode_reg (mode0, op0);
13911 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13912 }
13913 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13914 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13915
13916 emit_insn (gen_rtx_SET (target, addr));
13917 }
13918 else
13919 {
13920 if (op0 == const0_rtx)
13921 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13922 else
13923 {
13924 op0 = copy_to_mode_reg (mode0, op0);
13925 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13926 gen_rtx_PLUS (Pmode, op1, op0));
13927 }
13928
13929 pat = GEN_FCN (icode) (target, addr);
13930 if (! pat)
13931 return 0;
13932 emit_insn (pat);
13933 }
13934
13935 return target;
13936 }
13937
13938 static rtx
13939 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13940 {
13941 rtx pat;
13942 tree arg0 = CALL_EXPR_ARG (exp, 0);
13943 tree arg1 = CALL_EXPR_ARG (exp, 1);
13944 tree arg2 = CALL_EXPR_ARG (exp, 2);
13945 rtx op0 = expand_normal (arg0);
13946 rtx op1 = expand_normal (arg1);
13947 rtx op2 = expand_normal (arg2);
13948 machine_mode mode0 = insn_data[icode].operand[0].mode;
13949 machine_mode mode1 = insn_data[icode].operand[1].mode;
13950 machine_mode mode2 = insn_data[icode].operand[2].mode;
13951
13952 if (icode == CODE_FOR_nothing)
13953 /* Builtin not supported on this processor. */
13954 return NULL_RTX;
13955
13956 /* If we got invalid arguments bail out before generating bad rtl. */
13957 if (arg0 == error_mark_node
13958 || arg1 == error_mark_node
13959 || arg2 == error_mark_node)
13960 return NULL_RTX;
13961
13962 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13963 op0 = copy_to_mode_reg (mode0, op0);
13964 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13965 op1 = copy_to_mode_reg (mode1, op1);
13966 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13967 op2 = copy_to_mode_reg (mode2, op2);
13968
13969 pat = GEN_FCN (icode) (op0, op1, op2);
13970 if (pat)
13971 emit_insn (pat);
13972
13973 return NULL_RTX;
13974 }
13975
13976 static rtx
13977 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13978 {
13979 tree arg0 = CALL_EXPR_ARG (exp, 0);
13980 tree arg1 = CALL_EXPR_ARG (exp, 1);
13981 tree arg2 = CALL_EXPR_ARG (exp, 2);
13982 rtx op0 = expand_normal (arg0);
13983 rtx op1 = expand_normal (arg1);
13984 rtx op2 = expand_normal (arg2);
13985 rtx pat, addr, rawaddr;
13986 machine_mode tmode = insn_data[icode].operand[0].mode;
13987 machine_mode smode = insn_data[icode].operand[1].mode;
13988 machine_mode mode1 = Pmode;
13989 machine_mode mode2 = Pmode;
13990
13991 /* Invalid arguments. Bail before doing anything stoopid! */
13992 if (arg0 == error_mark_node
13993 || arg1 == error_mark_node
13994 || arg2 == error_mark_node)
13995 return const0_rtx;
13996
13997 op2 = copy_to_mode_reg (mode2, op2);
13998
13999 /* For STVX, express the RTL accurately by ANDing the address with -16.
14000 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14001 so the raw address is fine. */
14002 if (icode == CODE_FOR_altivec_stvx_v2df
14003 || icode == CODE_FOR_altivec_stvx_v2di
14004 || icode == CODE_FOR_altivec_stvx_v4sf
14005 || icode == CODE_FOR_altivec_stvx_v4si
14006 || icode == CODE_FOR_altivec_stvx_v8hi
14007 || icode == CODE_FOR_altivec_stvx_v16qi)
14008 {
14009 if (op1 == const0_rtx)
14010 rawaddr = op2;
14011 else
14012 {
14013 op1 = copy_to_mode_reg (mode1, op1);
14014 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14015 }
14016
14017 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14018 addr = gen_rtx_MEM (tmode, addr);
14019
14020 op0 = copy_to_mode_reg (tmode, op0);
14021
14022 emit_insn (gen_rtx_SET (addr, op0));
14023 }
14024 else
14025 {
14026 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14027 op0 = copy_to_mode_reg (smode, op0);
14028
14029 if (op1 == const0_rtx)
14030 addr = gen_rtx_MEM (tmode, op2);
14031 else
14032 {
14033 op1 = copy_to_mode_reg (mode1, op1);
14034 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14035 }
14036
14037 pat = GEN_FCN (icode) (addr, op0);
14038 if (pat)
14039 emit_insn (pat);
14040 }
14041
14042 return NULL_RTX;
14043 }
14044
14045 /* Return the appropriate SPR number associated with the given builtin. */
14046 static inline HOST_WIDE_INT
14047 htm_spr_num (enum rs6000_builtins code)
14048 {
14049 if (code == HTM_BUILTIN_GET_TFHAR
14050 || code == HTM_BUILTIN_SET_TFHAR)
14051 return TFHAR_SPR;
14052 else if (code == HTM_BUILTIN_GET_TFIAR
14053 || code == HTM_BUILTIN_SET_TFIAR)
14054 return TFIAR_SPR;
14055 else if (code == HTM_BUILTIN_GET_TEXASR
14056 || code == HTM_BUILTIN_SET_TEXASR)
14057 return TEXASR_SPR;
14058 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14059 || code == HTM_BUILTIN_SET_TEXASRU);
14060 return TEXASRU_SPR;
14061 }
14062
14063 /* Return the appropriate SPR regno associated with the given builtin. */
14064 static inline HOST_WIDE_INT
14065 htm_spr_regno (enum rs6000_builtins code)
14066 {
14067 if (code == HTM_BUILTIN_GET_TFHAR
14068 || code == HTM_BUILTIN_SET_TFHAR)
14069 return TFHAR_REGNO;
14070 else if (code == HTM_BUILTIN_GET_TFIAR
14071 || code == HTM_BUILTIN_SET_TFIAR)
14072 return TFIAR_REGNO;
14073 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14074 || code == HTM_BUILTIN_SET_TEXASR
14075 || code == HTM_BUILTIN_GET_TEXASRU
14076 || code == HTM_BUILTIN_SET_TEXASRU);
14077 return TEXASR_REGNO;
14078 }
14079
14080 /* Return the correct ICODE value depending on whether we are
14081 setting or reading the HTM SPRs. */
14082 static inline enum insn_code
14083 rs6000_htm_spr_icode (bool nonvoid)
14084 {
14085 if (nonvoid)
14086 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14087 else
14088 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14089 }
14090
14091 /* Expand the HTM builtin in EXP and store the result in TARGET.
14092 Store true in *EXPANDEDP if we found a builtin to expand. */
14093 static rtx
14094 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14095 {
14096 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14097 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14098 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14099 const struct builtin_description *d;
14100 size_t i;
14101
14102 *expandedp = true;
14103
14104 if (!TARGET_POWERPC64
14105 && (fcode == HTM_BUILTIN_TABORTDC
14106 || fcode == HTM_BUILTIN_TABORTDCI))
14107 {
14108 size_t uns_fcode = (size_t)fcode;
14109 const char *name = rs6000_builtin_info[uns_fcode].name;
14110 error ("builtin %qs is only valid in 64-bit mode", name);
14111 return const0_rtx;
14112 }
14113
14114 /* Expand the HTM builtins. */
14115 d = bdesc_htm;
14116 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14117 if (d->code == fcode)
14118 {
14119 rtx op[MAX_HTM_OPERANDS], pat;
14120 int nopnds = 0;
14121 tree arg;
14122 call_expr_arg_iterator iter;
14123 unsigned attr = rs6000_builtin_info[fcode].attr;
14124 enum insn_code icode = d->icode;
14125 const struct insn_operand_data *insn_op;
14126 bool uses_spr = (attr & RS6000_BTC_SPR);
14127 rtx cr = NULL_RTX;
14128
14129 if (uses_spr)
14130 icode = rs6000_htm_spr_icode (nonvoid);
14131 insn_op = &insn_data[icode].operand[0];
14132
14133 if (nonvoid)
14134 {
14135 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14136 if (!target
14137 || GET_MODE (target) != tmode
14138 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14139 target = gen_reg_rtx (tmode);
14140 if (uses_spr)
14141 op[nopnds++] = target;
14142 }
14143
14144 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14145 {
14146 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14147 return const0_rtx;
14148
14149 insn_op = &insn_data[icode].operand[nopnds];
14150
14151 op[nopnds] = expand_normal (arg);
14152
14153 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14154 {
14155 if (!strcmp (insn_op->constraint, "n"))
14156 {
14157 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14158 if (!CONST_INT_P (op[nopnds]))
14159 error ("argument %d must be an unsigned literal", arg_num);
14160 else
14161 error ("argument %d is an unsigned literal that is "
14162 "out of range", arg_num);
14163 return const0_rtx;
14164 }
14165 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14166 }
14167
14168 nopnds++;
14169 }
14170
14171 /* Handle the builtins for extended mnemonics. These accept
14172 no arguments, but map to builtins that take arguments. */
14173 switch (fcode)
14174 {
14175 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14176 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14177 op[nopnds++] = GEN_INT (1);
14178 if (flag_checking)
14179 attr |= RS6000_BTC_UNARY;
14180 break;
14181 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14182 op[nopnds++] = GEN_INT (0);
14183 if (flag_checking)
14184 attr |= RS6000_BTC_UNARY;
14185 break;
14186 default:
14187 break;
14188 }
14189
14190 /* If this builtin accesses SPRs, then pass in the appropriate
14191 SPR number and SPR regno as the last two operands. */
14192 if (uses_spr)
14193 {
14194 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14195 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14196 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14197 }
14198 /* If this builtin accesses a CR, then pass in a scratch
14199 CR as the last operand. */
14200 else if (attr & RS6000_BTC_CR)
14201 { cr = gen_reg_rtx (CCmode);
14202 op[nopnds++] = cr;
14203 }
14204
14205 if (flag_checking)
14206 {
14207 int expected_nopnds = 0;
14208 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14209 expected_nopnds = 1;
14210 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14211 expected_nopnds = 2;
14212 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14213 expected_nopnds = 3;
14214 if (!(attr & RS6000_BTC_VOID))
14215 expected_nopnds += 1;
14216 if (uses_spr)
14217 expected_nopnds += 2;
14218
14219 gcc_assert (nopnds == expected_nopnds
14220 && nopnds <= MAX_HTM_OPERANDS);
14221 }
14222
14223 switch (nopnds)
14224 {
14225 case 1:
14226 pat = GEN_FCN (icode) (op[0]);
14227 break;
14228 case 2:
14229 pat = GEN_FCN (icode) (op[0], op[1]);
14230 break;
14231 case 3:
14232 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14233 break;
14234 case 4:
14235 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14236 break;
14237 default:
14238 gcc_unreachable ();
14239 }
14240 if (!pat)
14241 return NULL_RTX;
14242 emit_insn (pat);
14243
14244 if (attr & RS6000_BTC_CR)
14245 {
14246 if (fcode == HTM_BUILTIN_TBEGIN)
14247 {
14248 /* Emit code to set TARGET to true or false depending on
14249 whether the tbegin. instruction successfully or failed
14250 to start a transaction. We do this by placing the 1's
14251 complement of CR's EQ bit into TARGET. */
14252 rtx scratch = gen_reg_rtx (SImode);
14253 emit_insn (gen_rtx_SET (scratch,
14254 gen_rtx_EQ (SImode, cr,
14255 const0_rtx)));
14256 emit_insn (gen_rtx_SET (target,
14257 gen_rtx_XOR (SImode, scratch,
14258 GEN_INT (1))));
14259 }
14260 else
14261 {
14262 /* Emit code to copy the 4-bit condition register field
14263 CR into the least significant end of register TARGET. */
14264 rtx scratch1 = gen_reg_rtx (SImode);
14265 rtx scratch2 = gen_reg_rtx (SImode);
14266 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14267 emit_insn (gen_movcc (subreg, cr));
14268 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14269 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14270 }
14271 }
14272
14273 if (nonvoid)
14274 return target;
14275 return const0_rtx;
14276 }
14277
14278 *expandedp = false;
14279 return NULL_RTX;
14280 }
14281
14282 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14283
14284 static rtx
14285 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14286 rtx target)
14287 {
14288 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14289 if (fcode == RS6000_BUILTIN_CPU_INIT)
14290 return const0_rtx;
14291
14292 if (target == 0 || GET_MODE (target) != SImode)
14293 target = gen_reg_rtx (SImode);
14294
14295 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14296 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14297 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14298 to a STRING_CST. */
14299 if (TREE_CODE (arg) == ARRAY_REF
14300 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14301 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14302 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14303 arg = TREE_OPERAND (arg, 0);
14304
14305 if (TREE_CODE (arg) != STRING_CST)
14306 {
14307 error ("builtin %qs only accepts a string argument",
14308 rs6000_builtin_info[(size_t) fcode].name);
14309 return const0_rtx;
14310 }
14311
14312 if (fcode == RS6000_BUILTIN_CPU_IS)
14313 {
14314 const char *cpu = TREE_STRING_POINTER (arg);
14315 rtx cpuid = NULL_RTX;
14316 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14317 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14318 {
14319 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14320 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14321 break;
14322 }
14323 if (cpuid == NULL_RTX)
14324 {
14325 /* Invalid CPU argument. */
14326 error ("cpu %qs is an invalid argument to builtin %qs",
14327 cpu, rs6000_builtin_info[(size_t) fcode].name);
14328 return const0_rtx;
14329 }
14330
14331 rtx platform = gen_reg_rtx (SImode);
14332 rtx tcbmem = gen_const_mem (SImode,
14333 gen_rtx_PLUS (Pmode,
14334 gen_rtx_REG (Pmode, TLS_REGNUM),
14335 GEN_INT (TCB_PLATFORM_OFFSET)));
14336 emit_move_insn (platform, tcbmem);
14337 emit_insn (gen_eqsi3 (target, platform, cpuid));
14338 }
14339 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14340 {
14341 const char *hwcap = TREE_STRING_POINTER (arg);
14342 rtx mask = NULL_RTX;
14343 int hwcap_offset;
14344 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14345 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14346 {
14347 mask = GEN_INT (cpu_supports_info[i].mask);
14348 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14349 break;
14350 }
14351 if (mask == NULL_RTX)
14352 {
14353 /* Invalid HWCAP argument. */
14354 error ("%s %qs is an invalid argument to builtin %qs",
14355 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14356 return const0_rtx;
14357 }
14358
14359 rtx tcb_hwcap = gen_reg_rtx (SImode);
14360 rtx tcbmem = gen_const_mem (SImode,
14361 gen_rtx_PLUS (Pmode,
14362 gen_rtx_REG (Pmode, TLS_REGNUM),
14363 GEN_INT (hwcap_offset)));
14364 emit_move_insn (tcb_hwcap, tcbmem);
14365 rtx scratch1 = gen_reg_rtx (SImode);
14366 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14367 rtx scratch2 = gen_reg_rtx (SImode);
14368 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14369 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14370 }
14371 else
14372 gcc_unreachable ();
14373
14374 /* Record that we have expanded a CPU builtin, so that we can later
14375 emit a reference to the special symbol exported by LIBC to ensure we
14376 do not link against an old LIBC that doesn't support this feature. */
14377 cpu_builtin_p = true;
14378
14379 #else
14380 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14381 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14382
14383 /* For old LIBCs, always return FALSE. */
14384 emit_move_insn (target, GEN_INT (0));
14385 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14386
14387 return target;
14388 }
14389
14390 static rtx
14391 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14392 {
14393 rtx pat;
14394 tree arg0 = CALL_EXPR_ARG (exp, 0);
14395 tree arg1 = CALL_EXPR_ARG (exp, 1);
14396 tree arg2 = CALL_EXPR_ARG (exp, 2);
14397 rtx op0 = expand_normal (arg0);
14398 rtx op1 = expand_normal (arg1);
14399 rtx op2 = expand_normal (arg2);
14400 machine_mode tmode = insn_data[icode].operand[0].mode;
14401 machine_mode mode0 = insn_data[icode].operand[1].mode;
14402 machine_mode mode1 = insn_data[icode].operand[2].mode;
14403 machine_mode mode2 = insn_data[icode].operand[3].mode;
14404
14405 if (icode == CODE_FOR_nothing)
14406 /* Builtin not supported on this processor. */
14407 return 0;
14408
14409 /* If we got invalid arguments bail out before generating bad rtl. */
14410 if (arg0 == error_mark_node
14411 || arg1 == error_mark_node
14412 || arg2 == error_mark_node)
14413 return const0_rtx;
14414
14415 /* Check and prepare argument depending on the instruction code.
14416
14417 Note that a switch statement instead of the sequence of tests
14418 would be incorrect as many of the CODE_FOR values could be
14419 CODE_FOR_nothing and that would yield multiple alternatives
14420 with identical values. We'd never reach here at runtime in
14421 this case. */
14422 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14423 || icode == CODE_FOR_altivec_vsldoi_v2df
14424 || icode == CODE_FOR_altivec_vsldoi_v4si
14425 || icode == CODE_FOR_altivec_vsldoi_v8hi
14426 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14427 {
14428 /* Only allow 4-bit unsigned literals. */
14429 STRIP_NOPS (arg2);
14430 if (TREE_CODE (arg2) != INTEGER_CST
14431 || TREE_INT_CST_LOW (arg2) & ~0xf)
14432 {
14433 error ("argument 3 must be a 4-bit unsigned literal");
14434 return CONST0_RTX (tmode);
14435 }
14436 }
14437 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14438 || icode == CODE_FOR_vsx_xxpermdi_v2di
14439 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14440 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14441 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14442 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14443 || icode == CODE_FOR_vsx_xxpermdi_v4si
14444 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14445 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14446 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14447 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14448 || icode == CODE_FOR_vsx_xxsldwi_v4si
14449 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14450 || icode == CODE_FOR_vsx_xxsldwi_v2di
14451 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14452 {
14453 /* Only allow 2-bit unsigned literals. */
14454 STRIP_NOPS (arg2);
14455 if (TREE_CODE (arg2) != INTEGER_CST
14456 || TREE_INT_CST_LOW (arg2) & ~0x3)
14457 {
14458 error ("argument 3 must be a 2-bit unsigned literal");
14459 return CONST0_RTX (tmode);
14460 }
14461 }
14462 else if (icode == CODE_FOR_vsx_set_v2df
14463 || icode == CODE_FOR_vsx_set_v2di
14464 || icode == CODE_FOR_bcdadd
14465 || icode == CODE_FOR_bcdadd_lt
14466 || icode == CODE_FOR_bcdadd_eq
14467 || icode == CODE_FOR_bcdadd_gt
14468 || icode == CODE_FOR_bcdsub
14469 || icode == CODE_FOR_bcdsub_lt
14470 || icode == CODE_FOR_bcdsub_eq
14471 || icode == CODE_FOR_bcdsub_gt)
14472 {
14473 /* Only allow 1-bit unsigned literals. */
14474 STRIP_NOPS (arg2);
14475 if (TREE_CODE (arg2) != INTEGER_CST
14476 || TREE_INT_CST_LOW (arg2) & ~0x1)
14477 {
14478 error ("argument 3 must be a 1-bit unsigned literal");
14479 return CONST0_RTX (tmode);
14480 }
14481 }
14482 else if (icode == CODE_FOR_dfp_ddedpd_dd
14483 || icode == CODE_FOR_dfp_ddedpd_td)
14484 {
14485 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14486 STRIP_NOPS (arg0);
14487 if (TREE_CODE (arg0) != INTEGER_CST
14488 || TREE_INT_CST_LOW (arg2) & ~0x3)
14489 {
14490 error ("argument 1 must be 0 or 2");
14491 return CONST0_RTX (tmode);
14492 }
14493 }
14494 else if (icode == CODE_FOR_dfp_denbcd_dd
14495 || icode == CODE_FOR_dfp_denbcd_td)
14496 {
14497 /* Only allow 1-bit unsigned literals. */
14498 STRIP_NOPS (arg0);
14499 if (TREE_CODE (arg0) != INTEGER_CST
14500 || TREE_INT_CST_LOW (arg0) & ~0x1)
14501 {
14502 error ("argument 1 must be a 1-bit unsigned literal");
14503 return CONST0_RTX (tmode);
14504 }
14505 }
14506 else if (icode == CODE_FOR_dfp_dscli_dd
14507 || icode == CODE_FOR_dfp_dscli_td
14508 || icode == CODE_FOR_dfp_dscri_dd
14509 || icode == CODE_FOR_dfp_dscri_td)
14510 {
14511 /* Only allow 6-bit unsigned literals. */
14512 STRIP_NOPS (arg1);
14513 if (TREE_CODE (arg1) != INTEGER_CST
14514 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14515 {
14516 error ("argument 2 must be a 6-bit unsigned literal");
14517 return CONST0_RTX (tmode);
14518 }
14519 }
14520 else if (icode == CODE_FOR_crypto_vshasigmaw
14521 || icode == CODE_FOR_crypto_vshasigmad)
14522 {
14523 /* Check whether the 2nd and 3rd arguments are integer constants and in
14524 range and prepare arguments. */
14525 STRIP_NOPS (arg1);
14526 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14527 {
14528 error ("argument 2 must be 0 or 1");
14529 return CONST0_RTX (tmode);
14530 }
14531
14532 STRIP_NOPS (arg2);
14533 if (TREE_CODE (arg2) != INTEGER_CST
14534 || wi::geu_p (wi::to_wide (arg2), 16))
14535 {
14536 error ("argument 3 must be in the range 0..15");
14537 return CONST0_RTX (tmode);
14538 }
14539 }
14540
14541 if (target == 0
14542 || GET_MODE (target) != tmode
14543 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14544 target = gen_reg_rtx (tmode);
14545
14546 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14547 op0 = copy_to_mode_reg (mode0, op0);
14548 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14549 op1 = copy_to_mode_reg (mode1, op1);
14550 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14551 op2 = copy_to_mode_reg (mode2, op2);
14552
14553 pat = GEN_FCN (icode) (target, op0, op1, op2);
14554 if (! pat)
14555 return 0;
14556 emit_insn (pat);
14557
14558 return target;
14559 }
14560
14561
14562 /* Expand the dst builtins. */
14563 static rtx
14564 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14565 bool *expandedp)
14566 {
14567 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14568 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14569 tree arg0, arg1, arg2;
14570 machine_mode mode0, mode1;
14571 rtx pat, op0, op1, op2;
14572 const struct builtin_description *d;
14573 size_t i;
14574
14575 *expandedp = false;
14576
14577 /* Handle DST variants. */
14578 d = bdesc_dst;
14579 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14580 if (d->code == fcode)
14581 {
14582 arg0 = CALL_EXPR_ARG (exp, 0);
14583 arg1 = CALL_EXPR_ARG (exp, 1);
14584 arg2 = CALL_EXPR_ARG (exp, 2);
14585 op0 = expand_normal (arg0);
14586 op1 = expand_normal (arg1);
14587 op2 = expand_normal (arg2);
14588 mode0 = insn_data[d->icode].operand[0].mode;
14589 mode1 = insn_data[d->icode].operand[1].mode;
14590
14591 /* Invalid arguments, bail out before generating bad rtl. */
14592 if (arg0 == error_mark_node
14593 || arg1 == error_mark_node
14594 || arg2 == error_mark_node)
14595 return const0_rtx;
14596
14597 *expandedp = true;
14598 STRIP_NOPS (arg2);
14599 if (TREE_CODE (arg2) != INTEGER_CST
14600 || TREE_INT_CST_LOW (arg2) & ~0x3)
14601 {
14602 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14603 return const0_rtx;
14604 }
14605
14606 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14607 op0 = copy_to_mode_reg (Pmode, op0);
14608 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14609 op1 = copy_to_mode_reg (mode1, op1);
14610
14611 pat = GEN_FCN (d->icode) (op0, op1, op2);
14612 if (pat != 0)
14613 emit_insn (pat);
14614
14615 return NULL_RTX;
14616 }
14617
14618 return NULL_RTX;
14619 }
14620
14621 /* Expand vec_init builtin. */
14622 static rtx
14623 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14624 {
14625 machine_mode tmode = TYPE_MODE (type);
14626 machine_mode inner_mode = GET_MODE_INNER (tmode);
14627 int i, n_elt = GET_MODE_NUNITS (tmode);
14628
14629 gcc_assert (VECTOR_MODE_P (tmode));
14630 gcc_assert (n_elt == call_expr_nargs (exp));
14631
14632 if (!target || !register_operand (target, tmode))
14633 target = gen_reg_rtx (tmode);
14634
14635 /* If we have a vector compromised of a single element, such as V1TImode, do
14636 the initialization directly. */
14637 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14638 {
14639 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14640 emit_move_insn (target, gen_lowpart (tmode, x));
14641 }
14642 else
14643 {
14644 rtvec v = rtvec_alloc (n_elt);
14645
14646 for (i = 0; i < n_elt; ++i)
14647 {
14648 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14649 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14650 }
14651
14652 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14653 }
14654
14655 return target;
14656 }
14657
14658 /* Return the integer constant in ARG. Constrain it to be in the range
14659 of the subparts of VEC_TYPE; issue an error if not. */
14660
14661 static int
14662 get_element_number (tree vec_type, tree arg)
14663 {
14664 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14665
14666 if (!tree_fits_uhwi_p (arg)
14667 || (elt = tree_to_uhwi (arg), elt > max))
14668 {
14669 error ("selector must be an integer constant in the range 0..%wi", max);
14670 return 0;
14671 }
14672
14673 return elt;
14674 }
14675
14676 /* Expand vec_set builtin. */
14677 static rtx
14678 altivec_expand_vec_set_builtin (tree exp)
14679 {
14680 machine_mode tmode, mode1;
14681 tree arg0, arg1, arg2;
14682 int elt;
14683 rtx op0, op1;
14684
14685 arg0 = CALL_EXPR_ARG (exp, 0);
14686 arg1 = CALL_EXPR_ARG (exp, 1);
14687 arg2 = CALL_EXPR_ARG (exp, 2);
14688
14689 tmode = TYPE_MODE (TREE_TYPE (arg0));
14690 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14691 gcc_assert (VECTOR_MODE_P (tmode));
14692
14693 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14694 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14695 elt = get_element_number (TREE_TYPE (arg0), arg2);
14696
14697 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14698 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14699
14700 op0 = force_reg (tmode, op0);
14701 op1 = force_reg (mode1, op1);
14702
14703 rs6000_expand_vector_set (op0, op1, elt);
14704
14705 return op0;
14706 }
14707
14708 /* Expand vec_ext builtin. */
14709 static rtx
14710 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14711 {
14712 machine_mode tmode, mode0;
14713 tree arg0, arg1;
14714 rtx op0;
14715 rtx op1;
14716
14717 arg0 = CALL_EXPR_ARG (exp, 0);
14718 arg1 = CALL_EXPR_ARG (exp, 1);
14719
14720 op0 = expand_normal (arg0);
14721 op1 = expand_normal (arg1);
14722
14723 /* Call get_element_number to validate arg1 if it is a constant. */
14724 if (TREE_CODE (arg1) == INTEGER_CST)
14725 (void) get_element_number (TREE_TYPE (arg0), arg1);
14726
14727 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14728 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14729 gcc_assert (VECTOR_MODE_P (mode0));
14730
14731 op0 = force_reg (mode0, op0);
14732
14733 if (optimize || !target || !register_operand (target, tmode))
14734 target = gen_reg_rtx (tmode);
14735
14736 rs6000_expand_vector_extract (target, op0, op1);
14737
14738 return target;
14739 }
14740
14741 /* Expand the builtin in EXP and store the result in TARGET. Store
14742 true in *EXPANDEDP if we found a builtin to expand. */
14743 static rtx
14744 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14745 {
14746 const struct builtin_description *d;
14747 size_t i;
14748 enum insn_code icode;
14749 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14750 tree arg0, arg1, arg2;
14751 rtx op0, pat;
14752 machine_mode tmode, mode0;
14753 enum rs6000_builtins fcode
14754 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14755
14756 if (rs6000_overloaded_builtin_p (fcode))
14757 {
14758 *expandedp = true;
14759 error ("unresolved overload for Altivec builtin %qF", fndecl);
14760
14761 /* Given it is invalid, just generate a normal call. */
14762 return expand_call (exp, target, false);
14763 }
14764
14765 target = altivec_expand_dst_builtin (exp, target, expandedp);
14766 if (*expandedp)
14767 return target;
14768
14769 *expandedp = true;
14770
14771 switch (fcode)
14772 {
14773 case ALTIVEC_BUILTIN_STVX_V2DF:
14774 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14775 case ALTIVEC_BUILTIN_STVX_V2DI:
14776 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14777 case ALTIVEC_BUILTIN_STVX_V4SF:
14778 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14779 case ALTIVEC_BUILTIN_STVX:
14780 case ALTIVEC_BUILTIN_STVX_V4SI:
14781 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14782 case ALTIVEC_BUILTIN_STVX_V8HI:
14783 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14784 case ALTIVEC_BUILTIN_STVX_V16QI:
14785 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14786 case ALTIVEC_BUILTIN_STVEBX:
14787 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14788 case ALTIVEC_BUILTIN_STVEHX:
14789 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14790 case ALTIVEC_BUILTIN_STVEWX:
14791 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14792 case ALTIVEC_BUILTIN_STVXL_V2DF:
14793 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14794 case ALTIVEC_BUILTIN_STVXL_V2DI:
14795 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14796 case ALTIVEC_BUILTIN_STVXL_V4SF:
14797 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14798 case ALTIVEC_BUILTIN_STVXL:
14799 case ALTIVEC_BUILTIN_STVXL_V4SI:
14800 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14801 case ALTIVEC_BUILTIN_STVXL_V8HI:
14802 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14803 case ALTIVEC_BUILTIN_STVXL_V16QI:
14804 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14805
14806 case ALTIVEC_BUILTIN_STVLX:
14807 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14808 case ALTIVEC_BUILTIN_STVLXL:
14809 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14810 case ALTIVEC_BUILTIN_STVRX:
14811 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14812 case ALTIVEC_BUILTIN_STVRXL:
14813 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14814
14815 case P9V_BUILTIN_STXVL:
14816 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14817
14818 case P9V_BUILTIN_XST_LEN_R:
14819 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14820
14821 case VSX_BUILTIN_STXVD2X_V1TI:
14822 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14823 case VSX_BUILTIN_STXVD2X_V2DF:
14824 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14825 case VSX_BUILTIN_STXVD2X_V2DI:
14826 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14827 case VSX_BUILTIN_STXVW4X_V4SF:
14828 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14829 case VSX_BUILTIN_STXVW4X_V4SI:
14830 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14831 case VSX_BUILTIN_STXVW4X_V8HI:
14832 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14833 case VSX_BUILTIN_STXVW4X_V16QI:
14834 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14835
14836 /* For the following on big endian, it's ok to use any appropriate
14837 unaligned-supporting store, so use a generic expander. For
14838 little-endian, the exact element-reversing instruction must
14839 be used. */
14840 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14841 {
14842 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14843 : CODE_FOR_vsx_st_elemrev_v1ti);
14844 return altivec_expand_stv_builtin (code, exp);
14845 }
14846 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14847 {
14848 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14849 : CODE_FOR_vsx_st_elemrev_v2df);
14850 return altivec_expand_stv_builtin (code, exp);
14851 }
14852 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14853 {
14854 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14855 : CODE_FOR_vsx_st_elemrev_v2di);
14856 return altivec_expand_stv_builtin (code, exp);
14857 }
14858 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14859 {
14860 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14861 : CODE_FOR_vsx_st_elemrev_v4sf);
14862 return altivec_expand_stv_builtin (code, exp);
14863 }
14864 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14865 {
14866 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14867 : CODE_FOR_vsx_st_elemrev_v4si);
14868 return altivec_expand_stv_builtin (code, exp);
14869 }
14870 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14871 {
14872 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14873 : CODE_FOR_vsx_st_elemrev_v8hi);
14874 return altivec_expand_stv_builtin (code, exp);
14875 }
14876 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14877 {
14878 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14879 : CODE_FOR_vsx_st_elemrev_v16qi);
14880 return altivec_expand_stv_builtin (code, exp);
14881 }
14882
14883 case ALTIVEC_BUILTIN_MFVSCR:
14884 icode = CODE_FOR_altivec_mfvscr;
14885 tmode = insn_data[icode].operand[0].mode;
14886
14887 if (target == 0
14888 || GET_MODE (target) != tmode
14889 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14890 target = gen_reg_rtx (tmode);
14891
14892 pat = GEN_FCN (icode) (target);
14893 if (! pat)
14894 return 0;
14895 emit_insn (pat);
14896 return target;
14897
14898 case ALTIVEC_BUILTIN_MTVSCR:
14899 icode = CODE_FOR_altivec_mtvscr;
14900 arg0 = CALL_EXPR_ARG (exp, 0);
14901 op0 = expand_normal (arg0);
14902 mode0 = insn_data[icode].operand[0].mode;
14903
14904 /* If we got invalid arguments bail out before generating bad rtl. */
14905 if (arg0 == error_mark_node)
14906 return const0_rtx;
14907
14908 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14909 op0 = copy_to_mode_reg (mode0, op0);
14910
14911 pat = GEN_FCN (icode) (op0);
14912 if (pat)
14913 emit_insn (pat);
14914 return NULL_RTX;
14915
14916 case ALTIVEC_BUILTIN_DSSALL:
14917 emit_insn (gen_altivec_dssall ());
14918 return NULL_RTX;
14919
14920 case ALTIVEC_BUILTIN_DSS:
14921 icode = CODE_FOR_altivec_dss;
14922 arg0 = CALL_EXPR_ARG (exp, 0);
14923 STRIP_NOPS (arg0);
14924 op0 = expand_normal (arg0);
14925 mode0 = insn_data[icode].operand[0].mode;
14926
14927 /* If we got invalid arguments bail out before generating bad rtl. */
14928 if (arg0 == error_mark_node)
14929 return const0_rtx;
14930
14931 if (TREE_CODE (arg0) != INTEGER_CST
14932 || TREE_INT_CST_LOW (arg0) & ~0x3)
14933 {
14934 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14935 return const0_rtx;
14936 }
14937
14938 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14939 op0 = copy_to_mode_reg (mode0, op0);
14940
14941 emit_insn (gen_altivec_dss (op0));
14942 return NULL_RTX;
14943
14944 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14945 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14946 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14947 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14948 case VSX_BUILTIN_VEC_INIT_V2DF:
14949 case VSX_BUILTIN_VEC_INIT_V2DI:
14950 case VSX_BUILTIN_VEC_INIT_V1TI:
14951 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14952
14953 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14954 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14955 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14956 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14957 case VSX_BUILTIN_VEC_SET_V2DF:
14958 case VSX_BUILTIN_VEC_SET_V2DI:
14959 case VSX_BUILTIN_VEC_SET_V1TI:
14960 return altivec_expand_vec_set_builtin (exp);
14961
14962 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14963 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14964 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14965 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14966 case VSX_BUILTIN_VEC_EXT_V2DF:
14967 case VSX_BUILTIN_VEC_EXT_V2DI:
14968 case VSX_BUILTIN_VEC_EXT_V1TI:
14969 return altivec_expand_vec_ext_builtin (exp, target);
14970
14971 case P9V_BUILTIN_VEC_EXTRACT4B:
14972 arg1 = CALL_EXPR_ARG (exp, 1);
14973 STRIP_NOPS (arg1);
14974
14975 /* Generate a normal call if it is invalid. */
14976 if (arg1 == error_mark_node)
14977 return expand_call (exp, target, false);
14978
14979 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14980 {
14981 error ("second argument to %qs must be 0..12", "vec_vextract4b");
14982 return expand_call (exp, target, false);
14983 }
14984 break;
14985
14986 case P9V_BUILTIN_VEC_INSERT4B:
14987 arg2 = CALL_EXPR_ARG (exp, 2);
14988 STRIP_NOPS (arg2);
14989
14990 /* Generate a normal call if it is invalid. */
14991 if (arg2 == error_mark_node)
14992 return expand_call (exp, target, false);
14993
14994 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
14995 {
14996 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
14997 return expand_call (exp, target, false);
14998 }
14999 break;
15000
15001 default:
15002 break;
15003 /* Fall through. */
15004 }
15005
15006 /* Expand abs* operations. */
15007 d = bdesc_abs;
15008 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15009 if (d->code == fcode)
15010 return altivec_expand_abs_builtin (d->icode, exp, target);
15011
15012 /* Expand the AltiVec predicates. */
15013 d = bdesc_altivec_preds;
15014 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15015 if (d->code == fcode)
15016 return altivec_expand_predicate_builtin (d->icode, exp, target);
15017
15018 /* LV* are funky. We initialized them differently. */
15019 switch (fcode)
15020 {
15021 case ALTIVEC_BUILTIN_LVSL:
15022 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15023 exp, target, false);
15024 case ALTIVEC_BUILTIN_LVSR:
15025 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15026 exp, target, false);
15027 case ALTIVEC_BUILTIN_LVEBX:
15028 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15029 exp, target, false);
15030 case ALTIVEC_BUILTIN_LVEHX:
15031 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15032 exp, target, false);
15033 case ALTIVEC_BUILTIN_LVEWX:
15034 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15035 exp, target, false);
15036 case ALTIVEC_BUILTIN_LVXL_V2DF:
15037 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15038 exp, target, false);
15039 case ALTIVEC_BUILTIN_LVXL_V2DI:
15040 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15041 exp, target, false);
15042 case ALTIVEC_BUILTIN_LVXL_V4SF:
15043 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15044 exp, target, false);
15045 case ALTIVEC_BUILTIN_LVXL:
15046 case ALTIVEC_BUILTIN_LVXL_V4SI:
15047 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15048 exp, target, false);
15049 case ALTIVEC_BUILTIN_LVXL_V8HI:
15050 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15051 exp, target, false);
15052 case ALTIVEC_BUILTIN_LVXL_V16QI:
15053 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15054 exp, target, false);
15055 case ALTIVEC_BUILTIN_LVX_V1TI:
15056 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
15057 exp, target, false);
15058 case ALTIVEC_BUILTIN_LVX_V2DF:
15059 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
15060 exp, target, false);
15061 case ALTIVEC_BUILTIN_LVX_V2DI:
15062 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
15063 exp, target, false);
15064 case ALTIVEC_BUILTIN_LVX_V4SF:
15065 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
15066 exp, target, false);
15067 case ALTIVEC_BUILTIN_LVX:
15068 case ALTIVEC_BUILTIN_LVX_V4SI:
15069 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
15070 exp, target, false);
15071 case ALTIVEC_BUILTIN_LVX_V8HI:
15072 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
15073 exp, target, false);
15074 case ALTIVEC_BUILTIN_LVX_V16QI:
15075 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
15076 exp, target, false);
15077 case ALTIVEC_BUILTIN_LVLX:
15078 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15079 exp, target, true);
15080 case ALTIVEC_BUILTIN_LVLXL:
15081 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15082 exp, target, true);
15083 case ALTIVEC_BUILTIN_LVRX:
15084 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15085 exp, target, true);
15086 case ALTIVEC_BUILTIN_LVRXL:
15087 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15088 exp, target, true);
15089 case VSX_BUILTIN_LXVD2X_V1TI:
15090 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15091 exp, target, false);
15092 case VSX_BUILTIN_LXVD2X_V2DF:
15093 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15094 exp, target, false);
15095 case VSX_BUILTIN_LXVD2X_V2DI:
15096 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15097 exp, target, false);
15098 case VSX_BUILTIN_LXVW4X_V4SF:
15099 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15100 exp, target, false);
15101 case VSX_BUILTIN_LXVW4X_V4SI:
15102 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15103 exp, target, false);
15104 case VSX_BUILTIN_LXVW4X_V8HI:
15105 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15106 exp, target, false);
15107 case VSX_BUILTIN_LXVW4X_V16QI:
15108 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15109 exp, target, false);
15110 /* For the following on big endian, it's ok to use any appropriate
15111 unaligned-supporting load, so use a generic expander. For
15112 little-endian, the exact element-reversing instruction must
15113 be used. */
15114 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15115 {
15116 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15117 : CODE_FOR_vsx_ld_elemrev_v2df);
15118 return altivec_expand_lv_builtin (code, exp, target, false);
15119 }
15120 case VSX_BUILTIN_LD_ELEMREV_V1TI:
15121 {
15122 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
15123 : CODE_FOR_vsx_ld_elemrev_v1ti);
15124 return altivec_expand_lv_builtin (code, exp, target, false);
15125 }
15126 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15127 {
15128 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15129 : CODE_FOR_vsx_ld_elemrev_v2di);
15130 return altivec_expand_lv_builtin (code, exp, target, false);
15131 }
15132 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15133 {
15134 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15135 : CODE_FOR_vsx_ld_elemrev_v4sf);
15136 return altivec_expand_lv_builtin (code, exp, target, false);
15137 }
15138 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15139 {
15140 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15141 : CODE_FOR_vsx_ld_elemrev_v4si);
15142 return altivec_expand_lv_builtin (code, exp, target, false);
15143 }
15144 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15145 {
15146 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15147 : CODE_FOR_vsx_ld_elemrev_v8hi);
15148 return altivec_expand_lv_builtin (code, exp, target, false);
15149 }
15150 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15151 {
15152 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15153 : CODE_FOR_vsx_ld_elemrev_v16qi);
15154 return altivec_expand_lv_builtin (code, exp, target, false);
15155 }
15156 break;
15157 default:
15158 break;
15159 /* Fall through. */
15160 }
15161
15162 *expandedp = false;
15163 return NULL_RTX;
15164 }
15165
15166 /* Check whether a builtin function is supported in this target
15167 configuration. */
15168 bool
15169 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
15170 {
15171 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
15172 if ((fnmask & rs6000_builtin_mask) != fnmask)
15173 return false;
15174 else
15175 return true;
15176 }
15177
15178 /* Raise an error message for a builtin function that is called without the
15179 appropriate target options being set. */
15180
15181 static void
15182 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15183 {
15184 size_t uns_fncode = (size_t) fncode;
15185 const char *name = rs6000_builtin_info[uns_fncode].name;
15186 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15187
15188 gcc_assert (name != NULL);
15189 if ((fnmask & RS6000_BTM_CELL) != 0)
15190 error ("builtin function %qs is only valid for the cell processor", name);
15191 else if ((fnmask & RS6000_BTM_VSX) != 0)
15192 error ("builtin function %qs requires the %qs option", name, "-mvsx");
15193 else if ((fnmask & RS6000_BTM_HTM) != 0)
15194 error ("builtin function %qs requires the %qs option", name, "-mhtm");
15195 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15196 error ("builtin function %qs requires the %qs option", name, "-maltivec");
15197 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15198 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15199 error ("builtin function %qs requires the %qs and %qs options",
15200 name, "-mhard-dfp", "-mpower8-vector");
15201 else if ((fnmask & RS6000_BTM_DFP) != 0)
15202 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
15203 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15204 error ("builtin function %qs requires the %qs option", name,
15205 "-mpower8-vector");
15206 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15207 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15208 error ("builtin function %qs requires the %qs and %qs options",
15209 name, "-mcpu=power9", "-m64");
15210 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15211 error ("builtin function %qs requires the %qs option", name,
15212 "-mcpu=power9");
15213 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15214 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15215 error ("builtin function %qs requires the %qs and %qs options",
15216 name, "-mcpu=power9", "-m64");
15217 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15218 error ("builtin function %qs requires the %qs option", name,
15219 "-mcpu=power9");
15220 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
15221 {
15222 if (!TARGET_HARD_FLOAT)
15223 error ("builtin function %qs requires the %qs option", name,
15224 "-mhard-float");
15225 else
15226 error ("builtin function %qs requires the %qs option", name,
15227 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
15228 }
15229 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15230 error ("builtin function %qs requires the %qs option", name,
15231 "-mhard-float");
15232 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
15233 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15234 name);
15235 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15236 error ("builtin function %qs requires the %qs option", name, "-mfloat128");
15237 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15238 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15239 error ("builtin function %qs requires the %qs (or newer), and "
15240 "%qs or %qs options",
15241 name, "-mcpu=power7", "-m64", "-mpowerpc64");
15242 else
15243 error ("builtin function %qs is not supported with the current options",
15244 name);
15245 }
15246
15247 /* Target hook for early folding of built-ins, shamelessly stolen
15248 from ia64.c. */
15249
15250 static tree
15251 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
15252 int n_args ATTRIBUTE_UNUSED,
15253 tree *args ATTRIBUTE_UNUSED,
15254 bool ignore ATTRIBUTE_UNUSED)
15255 {
15256 #ifdef SUBTARGET_FOLD_BUILTIN
15257 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15258 #else
15259 return NULL_TREE;
15260 #endif
15261 }
15262
15263 /* Helper function to sort out which built-ins may be valid without having
15264 a LHS. */
15265 static bool
15266 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
15267 {
15268 switch (fn_code)
15269 {
15270 case ALTIVEC_BUILTIN_STVX_V16QI:
15271 case ALTIVEC_BUILTIN_STVX_V8HI:
15272 case ALTIVEC_BUILTIN_STVX_V4SI:
15273 case ALTIVEC_BUILTIN_STVX_V4SF:
15274 case ALTIVEC_BUILTIN_STVX_V2DI:
15275 case ALTIVEC_BUILTIN_STVX_V2DF:
15276 case VSX_BUILTIN_STXVW4X_V16QI:
15277 case VSX_BUILTIN_STXVW4X_V8HI:
15278 case VSX_BUILTIN_STXVW4X_V4SF:
15279 case VSX_BUILTIN_STXVW4X_V4SI:
15280 case VSX_BUILTIN_STXVD2X_V2DF:
15281 case VSX_BUILTIN_STXVD2X_V2DI:
15282 return true;
15283 default:
15284 return false;
15285 }
15286 }
15287
15288 /* Helper function to handle the gimple folding of a vector compare
15289 operation. This sets up true/false vectors, and uses the
15290 VEC_COND_EXPR operation.
15291 CODE indicates which comparison is to be made. (EQ, GT, ...).
15292 TYPE indicates the type of the result. */
15293 static tree
15294 fold_build_vec_cmp (tree_code code, tree type,
15295 tree arg0, tree arg1)
15296 {
15297 tree cmp_type = build_same_sized_truth_vector_type (type);
15298 tree zero_vec = build_zero_cst (type);
15299 tree minus_one_vec = build_minus_one_cst (type);
15300 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
15301 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
15302 }
15303
15304 /* Helper function to handle the in-between steps for the
15305 vector compare built-ins. */
15306 static void
15307 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15308 {
15309 tree arg0 = gimple_call_arg (stmt, 0);
15310 tree arg1 = gimple_call_arg (stmt, 1);
15311 tree lhs = gimple_call_lhs (stmt);
15312 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15313 gimple *g = gimple_build_assign (lhs, cmp);
15314 gimple_set_location (g, gimple_location (stmt));
15315 gsi_replace (gsi, g, true);
15316 }
15317
15318 /* Helper function to map V2DF and V4SF types to their
15319 integral equivalents (V2DI and V4SI). */
15320 tree map_to_integral_tree_type (tree input_tree_type)
15321 {
15322 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
15323 return input_tree_type;
15324 else
15325 {
15326 if (types_compatible_p (TREE_TYPE (input_tree_type),
15327 TREE_TYPE (V2DF_type_node)))
15328 return V2DI_type_node;
15329 else if (types_compatible_p (TREE_TYPE (input_tree_type),
15330 TREE_TYPE (V4SF_type_node)))
15331 return V4SI_type_node;
15332 else
15333 gcc_unreachable ();
15334 }
15335 }
15336
15337 /* Helper function to handle the vector merge[hl] built-ins. The
15338 implementation difference between h and l versions for this code are in
15339 the values used when building of the permute vector for high word versus
15340 low word merge. The variance is keyed off the use_high parameter. */
15341 static void
15342 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15343 {
15344 tree arg0 = gimple_call_arg (stmt, 0);
15345 tree arg1 = gimple_call_arg (stmt, 1);
15346 tree lhs = gimple_call_lhs (stmt);
15347 tree lhs_type = TREE_TYPE (lhs);
15348 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15349 int midpoint = n_elts / 2;
15350 int offset = 0;
15351
15352 if (use_high == 1)
15353 offset = midpoint;
15354
15355 /* The permute_type will match the lhs for integral types. For double and
15356 float types, the permute type needs to map to the V2 or V4 type that
15357 matches size. */
15358 tree permute_type;
15359 permute_type = map_to_integral_tree_type (lhs_type);
15360 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15361
15362 for (int i = 0; i < midpoint; i++)
15363 {
15364 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15365 offset + i));
15366 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15367 offset + n_elts + i));
15368 }
15369
15370 tree permute = elts.build ();
15371
15372 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15373 gimple_set_location (g, gimple_location (stmt));
15374 gsi_replace (gsi, g, true);
15375 }
15376
15377 /* Helper function to handle the vector merge[eo] built-ins. */
15378 static void
15379 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
15380 {
15381 tree arg0 = gimple_call_arg (stmt, 0);
15382 tree arg1 = gimple_call_arg (stmt, 1);
15383 tree lhs = gimple_call_lhs (stmt);
15384 tree lhs_type = TREE_TYPE (lhs);
15385 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15386
15387 /* The permute_type will match the lhs for integral types. For double and
15388 float types, the permute type needs to map to the V2 or V4 type that
15389 matches size. */
15390 tree permute_type;
15391 permute_type = map_to_integral_tree_type (lhs_type);
15392
15393 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15394
15395 /* Build the permute vector. */
15396 for (int i = 0; i < n_elts / 2; i++)
15397 {
15398 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15399 2*i + use_odd));
15400 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15401 2*i + use_odd + n_elts));
15402 }
15403
15404 tree permute = elts.build ();
15405
15406 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15407 gimple_set_location (g, gimple_location (stmt));
15408 gsi_replace (gsi, g, true);
15409 }
15410
15411 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15412 a constant, use rs6000_fold_builtin.) */
15413
15414 bool
15415 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15416 {
15417 gimple *stmt = gsi_stmt (*gsi);
15418 tree fndecl = gimple_call_fndecl (stmt);
15419 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15420 enum rs6000_builtins fn_code
15421 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15422 tree arg0, arg1, lhs, temp;
15423 enum tree_code bcode;
15424 gimple *g;
15425
15426 size_t uns_fncode = (size_t) fn_code;
15427 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15428 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15429 const char *fn_name2 = (icode != CODE_FOR_nothing)
15430 ? get_insn_name ((int) icode)
15431 : "nothing";
15432
15433 if (TARGET_DEBUG_BUILTIN)
15434 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15435 fn_code, fn_name1, fn_name2);
15436
15437 if (!rs6000_fold_gimple)
15438 return false;
15439
15440 /* Prevent gimple folding for code that does not have a LHS, unless it is
15441 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15442 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15443 return false;
15444
15445 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15446 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15447 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15448 if (!func_valid_p)
15449 return false;
15450
15451 switch (fn_code)
15452 {
15453 /* Flavors of vec_add. We deliberately don't expand
15454 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15455 TImode, resulting in much poorer code generation. */
15456 case ALTIVEC_BUILTIN_VADDUBM:
15457 case ALTIVEC_BUILTIN_VADDUHM:
15458 case ALTIVEC_BUILTIN_VADDUWM:
15459 case P8V_BUILTIN_VADDUDM:
15460 case ALTIVEC_BUILTIN_VADDFP:
15461 case VSX_BUILTIN_XVADDDP:
15462 bcode = PLUS_EXPR;
15463 do_binary:
15464 arg0 = gimple_call_arg (stmt, 0);
15465 arg1 = gimple_call_arg (stmt, 1);
15466 lhs = gimple_call_lhs (stmt);
15467 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15468 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15469 {
15470 /* Ensure the binary operation is performed in a type
15471 that wraps if it is integral type. */
15472 gimple_seq stmts = NULL;
15473 tree type = unsigned_type_for (TREE_TYPE (lhs));
15474 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15475 type, arg0);
15476 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15477 type, arg1);
15478 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15479 type, uarg0, uarg1);
15480 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15481 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15482 build1 (VIEW_CONVERT_EXPR,
15483 TREE_TYPE (lhs), res));
15484 gsi_replace (gsi, g, true);
15485 return true;
15486 }
15487 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15488 gimple_set_location (g, gimple_location (stmt));
15489 gsi_replace (gsi, g, true);
15490 return true;
15491 /* Flavors of vec_sub. We deliberately don't expand
15492 P8V_BUILTIN_VSUBUQM. */
15493 case ALTIVEC_BUILTIN_VSUBUBM:
15494 case ALTIVEC_BUILTIN_VSUBUHM:
15495 case ALTIVEC_BUILTIN_VSUBUWM:
15496 case P8V_BUILTIN_VSUBUDM:
15497 case ALTIVEC_BUILTIN_VSUBFP:
15498 case VSX_BUILTIN_XVSUBDP:
15499 bcode = MINUS_EXPR;
15500 goto do_binary;
15501 case VSX_BUILTIN_XVMULSP:
15502 case VSX_BUILTIN_XVMULDP:
15503 arg0 = gimple_call_arg (stmt, 0);
15504 arg1 = gimple_call_arg (stmt, 1);
15505 lhs = gimple_call_lhs (stmt);
15506 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15507 gimple_set_location (g, gimple_location (stmt));
15508 gsi_replace (gsi, g, true);
15509 return true;
15510 /* Even element flavors of vec_mul (signed). */
15511 case ALTIVEC_BUILTIN_VMULESB:
15512 case ALTIVEC_BUILTIN_VMULESH:
15513 case P8V_BUILTIN_VMULESW:
15514 /* Even element flavors of vec_mul (unsigned). */
15515 case ALTIVEC_BUILTIN_VMULEUB:
15516 case ALTIVEC_BUILTIN_VMULEUH:
15517 case P8V_BUILTIN_VMULEUW:
15518 arg0 = gimple_call_arg (stmt, 0);
15519 arg1 = gimple_call_arg (stmt, 1);
15520 lhs = gimple_call_lhs (stmt);
15521 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15522 gimple_set_location (g, gimple_location (stmt));
15523 gsi_replace (gsi, g, true);
15524 return true;
15525 /* Odd element flavors of vec_mul (signed). */
15526 case ALTIVEC_BUILTIN_VMULOSB:
15527 case ALTIVEC_BUILTIN_VMULOSH:
15528 case P8V_BUILTIN_VMULOSW:
15529 /* Odd element flavors of vec_mul (unsigned). */
15530 case ALTIVEC_BUILTIN_VMULOUB:
15531 case ALTIVEC_BUILTIN_VMULOUH:
15532 case P8V_BUILTIN_VMULOUW:
15533 arg0 = gimple_call_arg (stmt, 0);
15534 arg1 = gimple_call_arg (stmt, 1);
15535 lhs = gimple_call_lhs (stmt);
15536 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15537 gimple_set_location (g, gimple_location (stmt));
15538 gsi_replace (gsi, g, true);
15539 return true;
15540 /* Flavors of vec_div (Integer). */
15541 case VSX_BUILTIN_DIV_V2DI:
15542 case VSX_BUILTIN_UDIV_V2DI:
15543 arg0 = gimple_call_arg (stmt, 0);
15544 arg1 = gimple_call_arg (stmt, 1);
15545 lhs = gimple_call_lhs (stmt);
15546 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15547 gimple_set_location (g, gimple_location (stmt));
15548 gsi_replace (gsi, g, true);
15549 return true;
15550 /* Flavors of vec_div (Float). */
15551 case VSX_BUILTIN_XVDIVSP:
15552 case VSX_BUILTIN_XVDIVDP:
15553 arg0 = gimple_call_arg (stmt, 0);
15554 arg1 = gimple_call_arg (stmt, 1);
15555 lhs = gimple_call_lhs (stmt);
15556 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15557 gimple_set_location (g, gimple_location (stmt));
15558 gsi_replace (gsi, g, true);
15559 return true;
15560 /* Flavors of vec_and. */
15561 case ALTIVEC_BUILTIN_VAND:
15562 arg0 = gimple_call_arg (stmt, 0);
15563 arg1 = gimple_call_arg (stmt, 1);
15564 lhs = gimple_call_lhs (stmt);
15565 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15566 gimple_set_location (g, gimple_location (stmt));
15567 gsi_replace (gsi, g, true);
15568 return true;
15569 /* Flavors of vec_andc. */
15570 case ALTIVEC_BUILTIN_VANDC:
15571 arg0 = gimple_call_arg (stmt, 0);
15572 arg1 = gimple_call_arg (stmt, 1);
15573 lhs = gimple_call_lhs (stmt);
15574 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15575 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15576 gimple_set_location (g, gimple_location (stmt));
15577 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15578 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15579 gimple_set_location (g, gimple_location (stmt));
15580 gsi_replace (gsi, g, true);
15581 return true;
15582 /* Flavors of vec_nand. */
15583 case P8V_BUILTIN_VEC_NAND:
15584 case P8V_BUILTIN_NAND_V16QI:
15585 case P8V_BUILTIN_NAND_V8HI:
15586 case P8V_BUILTIN_NAND_V4SI:
15587 case P8V_BUILTIN_NAND_V4SF:
15588 case P8V_BUILTIN_NAND_V2DF:
15589 case P8V_BUILTIN_NAND_V2DI:
15590 arg0 = gimple_call_arg (stmt, 0);
15591 arg1 = gimple_call_arg (stmt, 1);
15592 lhs = gimple_call_lhs (stmt);
15593 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15594 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15595 gimple_set_location (g, gimple_location (stmt));
15596 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15597 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15598 gimple_set_location (g, gimple_location (stmt));
15599 gsi_replace (gsi, g, true);
15600 return true;
15601 /* Flavors of vec_or. */
15602 case ALTIVEC_BUILTIN_VOR:
15603 arg0 = gimple_call_arg (stmt, 0);
15604 arg1 = gimple_call_arg (stmt, 1);
15605 lhs = gimple_call_lhs (stmt);
15606 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15607 gimple_set_location (g, gimple_location (stmt));
15608 gsi_replace (gsi, g, true);
15609 return true;
15610 /* flavors of vec_orc. */
15611 case P8V_BUILTIN_ORC_V16QI:
15612 case P8V_BUILTIN_ORC_V8HI:
15613 case P8V_BUILTIN_ORC_V4SI:
15614 case P8V_BUILTIN_ORC_V4SF:
15615 case P8V_BUILTIN_ORC_V2DF:
15616 case P8V_BUILTIN_ORC_V2DI:
15617 arg0 = gimple_call_arg (stmt, 0);
15618 arg1 = gimple_call_arg (stmt, 1);
15619 lhs = gimple_call_lhs (stmt);
15620 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15621 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15622 gimple_set_location (g, gimple_location (stmt));
15623 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15624 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15625 gimple_set_location (g, gimple_location (stmt));
15626 gsi_replace (gsi, g, true);
15627 return true;
15628 /* Flavors of vec_xor. */
15629 case ALTIVEC_BUILTIN_VXOR:
15630 arg0 = gimple_call_arg (stmt, 0);
15631 arg1 = gimple_call_arg (stmt, 1);
15632 lhs = gimple_call_lhs (stmt);
15633 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15634 gimple_set_location (g, gimple_location (stmt));
15635 gsi_replace (gsi, g, true);
15636 return true;
15637 /* Flavors of vec_nor. */
15638 case ALTIVEC_BUILTIN_VNOR:
15639 arg0 = gimple_call_arg (stmt, 0);
15640 arg1 = gimple_call_arg (stmt, 1);
15641 lhs = gimple_call_lhs (stmt);
15642 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15643 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15644 gimple_set_location (g, gimple_location (stmt));
15645 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15646 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15647 gimple_set_location (g, gimple_location (stmt));
15648 gsi_replace (gsi, g, true);
15649 return true;
15650 /* flavors of vec_abs. */
15651 case ALTIVEC_BUILTIN_ABS_V16QI:
15652 case ALTIVEC_BUILTIN_ABS_V8HI:
15653 case ALTIVEC_BUILTIN_ABS_V4SI:
15654 case ALTIVEC_BUILTIN_ABS_V4SF:
15655 case P8V_BUILTIN_ABS_V2DI:
15656 case VSX_BUILTIN_XVABSDP:
15657 arg0 = gimple_call_arg (stmt, 0);
15658 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15659 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15660 return false;
15661 lhs = gimple_call_lhs (stmt);
15662 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15663 gimple_set_location (g, gimple_location (stmt));
15664 gsi_replace (gsi, g, true);
15665 return true;
15666 /* flavors of vec_min. */
15667 case VSX_BUILTIN_XVMINDP:
15668 case P8V_BUILTIN_VMINSD:
15669 case P8V_BUILTIN_VMINUD:
15670 case ALTIVEC_BUILTIN_VMINSB:
15671 case ALTIVEC_BUILTIN_VMINSH:
15672 case ALTIVEC_BUILTIN_VMINSW:
15673 case ALTIVEC_BUILTIN_VMINUB:
15674 case ALTIVEC_BUILTIN_VMINUH:
15675 case ALTIVEC_BUILTIN_VMINUW:
15676 case ALTIVEC_BUILTIN_VMINFP:
15677 arg0 = gimple_call_arg (stmt, 0);
15678 arg1 = gimple_call_arg (stmt, 1);
15679 lhs = gimple_call_lhs (stmt);
15680 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15681 gimple_set_location (g, gimple_location (stmt));
15682 gsi_replace (gsi, g, true);
15683 return true;
15684 /* flavors of vec_max. */
15685 case VSX_BUILTIN_XVMAXDP:
15686 case P8V_BUILTIN_VMAXSD:
15687 case P8V_BUILTIN_VMAXUD:
15688 case ALTIVEC_BUILTIN_VMAXSB:
15689 case ALTIVEC_BUILTIN_VMAXSH:
15690 case ALTIVEC_BUILTIN_VMAXSW:
15691 case ALTIVEC_BUILTIN_VMAXUB:
15692 case ALTIVEC_BUILTIN_VMAXUH:
15693 case ALTIVEC_BUILTIN_VMAXUW:
15694 case ALTIVEC_BUILTIN_VMAXFP:
15695 arg0 = gimple_call_arg (stmt, 0);
15696 arg1 = gimple_call_arg (stmt, 1);
15697 lhs = gimple_call_lhs (stmt);
15698 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15699 gimple_set_location (g, gimple_location (stmt));
15700 gsi_replace (gsi, g, true);
15701 return true;
15702 /* Flavors of vec_eqv. */
15703 case P8V_BUILTIN_EQV_V16QI:
15704 case P8V_BUILTIN_EQV_V8HI:
15705 case P8V_BUILTIN_EQV_V4SI:
15706 case P8V_BUILTIN_EQV_V4SF:
15707 case P8V_BUILTIN_EQV_V2DF:
15708 case P8V_BUILTIN_EQV_V2DI:
15709 arg0 = gimple_call_arg (stmt, 0);
15710 arg1 = gimple_call_arg (stmt, 1);
15711 lhs = gimple_call_lhs (stmt);
15712 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15713 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15714 gimple_set_location (g, gimple_location (stmt));
15715 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15716 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15717 gimple_set_location (g, gimple_location (stmt));
15718 gsi_replace (gsi, g, true);
15719 return true;
15720 /* Flavors of vec_rotate_left. */
15721 case ALTIVEC_BUILTIN_VRLB:
15722 case ALTIVEC_BUILTIN_VRLH:
15723 case ALTIVEC_BUILTIN_VRLW:
15724 case P8V_BUILTIN_VRLD:
15725 arg0 = gimple_call_arg (stmt, 0);
15726 arg1 = gimple_call_arg (stmt, 1);
15727 lhs = gimple_call_lhs (stmt);
15728 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15729 gimple_set_location (g, gimple_location (stmt));
15730 gsi_replace (gsi, g, true);
15731 return true;
15732 /* Flavors of vector shift right algebraic.
15733 vec_sra{b,h,w} -> vsra{b,h,w}. */
15734 case ALTIVEC_BUILTIN_VSRAB:
15735 case ALTIVEC_BUILTIN_VSRAH:
15736 case ALTIVEC_BUILTIN_VSRAW:
15737 case P8V_BUILTIN_VSRAD:
15738 arg0 = gimple_call_arg (stmt, 0);
15739 arg1 = gimple_call_arg (stmt, 1);
15740 lhs = gimple_call_lhs (stmt);
15741 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, arg1);
15742 gimple_set_location (g, gimple_location (stmt));
15743 gsi_replace (gsi, g, true);
15744 return true;
15745 /* Flavors of vector shift left.
15746 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15747 case ALTIVEC_BUILTIN_VSLB:
15748 case ALTIVEC_BUILTIN_VSLH:
15749 case ALTIVEC_BUILTIN_VSLW:
15750 case P8V_BUILTIN_VSLD:
15751 {
15752 location_t loc;
15753 gimple_seq stmts = NULL;
15754 arg0 = gimple_call_arg (stmt, 0);
15755 tree arg0_type = TREE_TYPE (arg0);
15756 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15757 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15758 return false;
15759 arg1 = gimple_call_arg (stmt, 1);
15760 tree arg1_type = TREE_TYPE (arg1);
15761 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15762 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15763 loc = gimple_location (stmt);
15764 lhs = gimple_call_lhs (stmt);
15765 /* Force arg1 into the range valid matching the arg0 type. */
15766 /* Build a vector consisting of the max valid bit-size values. */
15767 int n_elts = VECTOR_CST_NELTS (arg1);
15768 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15769 * BITS_PER_UNIT;
15770 tree element_size = build_int_cst (unsigned_element_type,
15771 tree_size_in_bits / n_elts);
15772 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15773 for (int i = 0; i < n_elts; i++)
15774 elts.safe_push (element_size);
15775 tree modulo_tree = elts.build ();
15776 /* Modulo the provided shift value against that vector. */
15777 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15778 unsigned_arg1_type, arg1);
15779 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15780 unsigned_arg1_type, unsigned_arg1,
15781 modulo_tree);
15782 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15783 /* And finally, do the shift. */
15784 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15785 gimple_set_location (g, gimple_location (stmt));
15786 gsi_replace (gsi, g, true);
15787 return true;
15788 }
15789 /* Flavors of vector shift right. */
15790 case ALTIVEC_BUILTIN_VSRB:
15791 case ALTIVEC_BUILTIN_VSRH:
15792 case ALTIVEC_BUILTIN_VSRW:
15793 case P8V_BUILTIN_VSRD:
15794 {
15795 arg0 = gimple_call_arg (stmt, 0);
15796 arg1 = gimple_call_arg (stmt, 1);
15797 lhs = gimple_call_lhs (stmt);
15798 gimple_seq stmts = NULL;
15799 /* Convert arg0 to unsigned. */
15800 tree arg0_unsigned
15801 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15802 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15803 tree res
15804 = gimple_build (&stmts, RSHIFT_EXPR,
15805 TREE_TYPE (arg0_unsigned), arg0_unsigned, arg1);
15806 /* Convert result back to the lhs type. */
15807 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15808 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15809 update_call_from_tree (gsi, res);
15810 return true;
15811 }
15812 /* Vector loads. */
15813 case ALTIVEC_BUILTIN_LVX_V16QI:
15814 case ALTIVEC_BUILTIN_LVX_V8HI:
15815 case ALTIVEC_BUILTIN_LVX_V4SI:
15816 case ALTIVEC_BUILTIN_LVX_V4SF:
15817 case ALTIVEC_BUILTIN_LVX_V2DI:
15818 case ALTIVEC_BUILTIN_LVX_V2DF:
15819 case ALTIVEC_BUILTIN_LVX_V1TI:
15820 {
15821 arg0 = gimple_call_arg (stmt, 0); // offset
15822 arg1 = gimple_call_arg (stmt, 1); // address
15823 lhs = gimple_call_lhs (stmt);
15824 location_t loc = gimple_location (stmt);
15825 /* Since arg1 may be cast to a different type, just use ptr_type_node
15826 here instead of trying to enforce TBAA on pointer types. */
15827 tree arg1_type = ptr_type_node;
15828 tree lhs_type = TREE_TYPE (lhs);
15829 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15830 the tree using the value from arg0. The resulting type will match
15831 the type of arg1. */
15832 gimple_seq stmts = NULL;
15833 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15834 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15835 arg1_type, arg1, temp_offset);
15836 /* Mask off any lower bits from the address. */
15837 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15838 arg1_type, temp_addr,
15839 build_int_cst (arg1_type, -16));
15840 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15841 if (!is_gimple_mem_ref_addr (aligned_addr))
15842 {
15843 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15844 gimple *g = gimple_build_assign (t, aligned_addr);
15845 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15846 aligned_addr = t;
15847 }
15848 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15849 take an offset, but since we've already incorporated the offset
15850 above, here we just pass in a zero. */
15851 gimple *g
15852 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15853 build_int_cst (arg1_type, 0)));
15854 gimple_set_location (g, loc);
15855 gsi_replace (gsi, g, true);
15856 return true;
15857 }
15858 /* Vector stores. */
15859 case ALTIVEC_BUILTIN_STVX_V16QI:
15860 case ALTIVEC_BUILTIN_STVX_V8HI:
15861 case ALTIVEC_BUILTIN_STVX_V4SI:
15862 case ALTIVEC_BUILTIN_STVX_V4SF:
15863 case ALTIVEC_BUILTIN_STVX_V2DI:
15864 case ALTIVEC_BUILTIN_STVX_V2DF:
15865 {
15866 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15867 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15868 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15869 location_t loc = gimple_location (stmt);
15870 tree arg0_type = TREE_TYPE (arg0);
15871 /* Use ptr_type_node (no TBAA) for the arg2_type.
15872 FIXME: (Richard) "A proper fix would be to transition this type as
15873 seen from the frontend to GIMPLE, for example in a similar way we
15874 do for MEM_REFs by piggy-backing that on an extra argument, a
15875 constant zero pointer of the alias pointer type to use (which would
15876 also serve as a type indicator of the store itself). I'd use a
15877 target specific internal function for this (not sure if we can have
15878 those target specific, but I guess if it's folded away then that's
15879 fine) and get away with the overload set." */
15880 tree arg2_type = ptr_type_node;
15881 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15882 the tree using the value from arg0. The resulting type will match
15883 the type of arg2. */
15884 gimple_seq stmts = NULL;
15885 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15886 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15887 arg2_type, arg2, temp_offset);
15888 /* Mask off any lower bits from the address. */
15889 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15890 arg2_type, temp_addr,
15891 build_int_cst (arg2_type, -16));
15892 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15893 if (!is_gimple_mem_ref_addr (aligned_addr))
15894 {
15895 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15896 gimple *g = gimple_build_assign (t, aligned_addr);
15897 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15898 aligned_addr = t;
15899 }
15900 /* The desired gimple result should be similar to:
15901 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15902 gimple *g
15903 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15904 build_int_cst (arg2_type, 0)), arg0);
15905 gimple_set_location (g, loc);
15906 gsi_replace (gsi, g, true);
15907 return true;
15908 }
15909
15910 /* unaligned Vector loads. */
15911 case VSX_BUILTIN_LXVW4X_V16QI:
15912 case VSX_BUILTIN_LXVW4X_V8HI:
15913 case VSX_BUILTIN_LXVW4X_V4SF:
15914 case VSX_BUILTIN_LXVW4X_V4SI:
15915 case VSX_BUILTIN_LXVD2X_V2DF:
15916 case VSX_BUILTIN_LXVD2X_V2DI:
15917 {
15918 arg0 = gimple_call_arg (stmt, 0); // offset
15919 arg1 = gimple_call_arg (stmt, 1); // address
15920 lhs = gimple_call_lhs (stmt);
15921 location_t loc = gimple_location (stmt);
15922 /* Since arg1 may be cast to a different type, just use ptr_type_node
15923 here instead of trying to enforce TBAA on pointer types. */
15924 tree arg1_type = ptr_type_node;
15925 tree lhs_type = TREE_TYPE (lhs);
15926 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15927 required alignment (power) is 4 bytes regardless of data type. */
15928 tree align_ltype = build_aligned_type (lhs_type, 4);
15929 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15930 the tree using the value from arg0. The resulting type will match
15931 the type of arg1. */
15932 gimple_seq stmts = NULL;
15933 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15934 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15935 arg1_type, arg1, temp_offset);
15936 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15937 if (!is_gimple_mem_ref_addr (temp_addr))
15938 {
15939 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15940 gimple *g = gimple_build_assign (t, temp_addr);
15941 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15942 temp_addr = t;
15943 }
15944 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15945 take an offset, but since we've already incorporated the offset
15946 above, here we just pass in a zero. */
15947 gimple *g;
15948 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15949 build_int_cst (arg1_type, 0)));
15950 gimple_set_location (g, loc);
15951 gsi_replace (gsi, g, true);
15952 return true;
15953 }
15954
15955 /* unaligned Vector stores. */
15956 case VSX_BUILTIN_STXVW4X_V16QI:
15957 case VSX_BUILTIN_STXVW4X_V8HI:
15958 case VSX_BUILTIN_STXVW4X_V4SF:
15959 case VSX_BUILTIN_STXVW4X_V4SI:
15960 case VSX_BUILTIN_STXVD2X_V2DF:
15961 case VSX_BUILTIN_STXVD2X_V2DI:
15962 {
15963 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15964 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15965 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15966 location_t loc = gimple_location (stmt);
15967 tree arg0_type = TREE_TYPE (arg0);
15968 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15969 tree arg2_type = ptr_type_node;
15970 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15971 required alignment (power) is 4 bytes regardless of data type. */
15972 tree align_stype = build_aligned_type (arg0_type, 4);
15973 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15974 the tree using the value from arg1. */
15975 gimple_seq stmts = NULL;
15976 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15977 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15978 arg2_type, arg2, temp_offset);
15979 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15980 if (!is_gimple_mem_ref_addr (temp_addr))
15981 {
15982 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15983 gimple *g = gimple_build_assign (t, temp_addr);
15984 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15985 temp_addr = t;
15986 }
15987 gimple *g;
15988 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
15989 build_int_cst (arg2_type, 0)), arg0);
15990 gimple_set_location (g, loc);
15991 gsi_replace (gsi, g, true);
15992 return true;
15993 }
15994
15995 /* Vector Fused multiply-add (fma). */
15996 case ALTIVEC_BUILTIN_VMADDFP:
15997 case VSX_BUILTIN_XVMADDDP:
15998 case ALTIVEC_BUILTIN_VMLADDUHM:
15999 {
16000 arg0 = gimple_call_arg (stmt, 0);
16001 arg1 = gimple_call_arg (stmt, 1);
16002 tree arg2 = gimple_call_arg (stmt, 2);
16003 lhs = gimple_call_lhs (stmt);
16004 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
16005 gimple_call_set_lhs (g, lhs);
16006 gimple_call_set_nothrow (g, true);
16007 gimple_set_location (g, gimple_location (stmt));
16008 gsi_replace (gsi, g, true);
16009 return true;
16010 }
16011
16012 /* Vector compares; EQ, NE, GE, GT, LE. */
16013 case ALTIVEC_BUILTIN_VCMPEQUB:
16014 case ALTIVEC_BUILTIN_VCMPEQUH:
16015 case ALTIVEC_BUILTIN_VCMPEQUW:
16016 case P8V_BUILTIN_VCMPEQUD:
16017 fold_compare_helper (gsi, EQ_EXPR, stmt);
16018 return true;
16019
16020 case P9V_BUILTIN_CMPNEB:
16021 case P9V_BUILTIN_CMPNEH:
16022 case P9V_BUILTIN_CMPNEW:
16023 fold_compare_helper (gsi, NE_EXPR, stmt);
16024 return true;
16025
16026 case VSX_BUILTIN_CMPGE_16QI:
16027 case VSX_BUILTIN_CMPGE_U16QI:
16028 case VSX_BUILTIN_CMPGE_8HI:
16029 case VSX_BUILTIN_CMPGE_U8HI:
16030 case VSX_BUILTIN_CMPGE_4SI:
16031 case VSX_BUILTIN_CMPGE_U4SI:
16032 case VSX_BUILTIN_CMPGE_2DI:
16033 case VSX_BUILTIN_CMPGE_U2DI:
16034 fold_compare_helper (gsi, GE_EXPR, stmt);
16035 return true;
16036
16037 case ALTIVEC_BUILTIN_VCMPGTSB:
16038 case ALTIVEC_BUILTIN_VCMPGTUB:
16039 case ALTIVEC_BUILTIN_VCMPGTSH:
16040 case ALTIVEC_BUILTIN_VCMPGTUH:
16041 case ALTIVEC_BUILTIN_VCMPGTSW:
16042 case ALTIVEC_BUILTIN_VCMPGTUW:
16043 case P8V_BUILTIN_VCMPGTUD:
16044 case P8V_BUILTIN_VCMPGTSD:
16045 fold_compare_helper (gsi, GT_EXPR, stmt);
16046 return true;
16047
16048 case VSX_BUILTIN_CMPLE_16QI:
16049 case VSX_BUILTIN_CMPLE_U16QI:
16050 case VSX_BUILTIN_CMPLE_8HI:
16051 case VSX_BUILTIN_CMPLE_U8HI:
16052 case VSX_BUILTIN_CMPLE_4SI:
16053 case VSX_BUILTIN_CMPLE_U4SI:
16054 case VSX_BUILTIN_CMPLE_2DI:
16055 case VSX_BUILTIN_CMPLE_U2DI:
16056 fold_compare_helper (gsi, LE_EXPR, stmt);
16057 return true;
16058
16059 /* flavors of vec_splat_[us]{8,16,32}. */
16060 case ALTIVEC_BUILTIN_VSPLTISB:
16061 case ALTIVEC_BUILTIN_VSPLTISH:
16062 case ALTIVEC_BUILTIN_VSPLTISW:
16063 {
16064 int size;
16065 if (fn_code == ALTIVEC_BUILTIN_VSPLTISB)
16066 size = 8;
16067 else if (fn_code == ALTIVEC_BUILTIN_VSPLTISH)
16068 size = 16;
16069 else
16070 size = 32;
16071
16072 arg0 = gimple_call_arg (stmt, 0);
16073 lhs = gimple_call_lhs (stmt);
16074
16075 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
16076 5-bit signed constant in range -16 to +15. */
16077 if (TREE_CODE (arg0) != INTEGER_CST
16078 || !IN_RANGE (sext_hwi (TREE_INT_CST_LOW (arg0), size),
16079 -16, 15))
16080 return false;
16081 gimple_seq stmts = NULL;
16082 location_t loc = gimple_location (stmt);
16083 tree splat_value = gimple_convert (&stmts, loc,
16084 TREE_TYPE (TREE_TYPE (lhs)), arg0);
16085 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16086 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
16087 g = gimple_build_assign (lhs, splat_tree);
16088 gimple_set_location (g, gimple_location (stmt));
16089 gsi_replace (gsi, g, true);
16090 return true;
16091 }
16092
16093 /* Flavors of vec_splat. */
16094 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
16095 case ALTIVEC_BUILTIN_VSPLTB:
16096 case ALTIVEC_BUILTIN_VSPLTH:
16097 case ALTIVEC_BUILTIN_VSPLTW:
16098 case VSX_BUILTIN_XXSPLTD_V2DI:
16099 case VSX_BUILTIN_XXSPLTD_V2DF:
16100 {
16101 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
16102 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
16103 /* Only fold the vec_splat_*() if arg1 is both a constant value and
16104 is a valid index into the arg0 vector. */
16105 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
16106 if (TREE_CODE (arg1) != INTEGER_CST
16107 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
16108 return false;
16109 lhs = gimple_call_lhs (stmt);
16110 tree lhs_type = TREE_TYPE (lhs);
16111 tree arg0_type = TREE_TYPE (arg0);
16112 tree splat;
16113 if (TREE_CODE (arg0) == VECTOR_CST)
16114 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
16115 else
16116 {
16117 /* Determine (in bits) the length and start location of the
16118 splat value for a call to the tree_vec_extract helper. */
16119 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
16120 * BITS_PER_UNIT / n_elts;
16121 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
16122 tree len = build_int_cst (bitsizetype, splat_elem_size);
16123 tree start = build_int_cst (bitsizetype, splat_start_bit);
16124 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
16125 len, start);
16126 }
16127 /* And finally, build the new vector. */
16128 tree splat_tree = build_vector_from_val (lhs_type, splat);
16129 g = gimple_build_assign (lhs, splat_tree);
16130 gimple_set_location (g, gimple_location (stmt));
16131 gsi_replace (gsi, g, true);
16132 return true;
16133 }
16134
16135 /* vec_mergel (integrals). */
16136 case ALTIVEC_BUILTIN_VMRGLH:
16137 case ALTIVEC_BUILTIN_VMRGLW:
16138 case VSX_BUILTIN_XXMRGLW_4SI:
16139 case ALTIVEC_BUILTIN_VMRGLB:
16140 case VSX_BUILTIN_VEC_MERGEL_V2DI:
16141 case VSX_BUILTIN_XXMRGLW_4SF:
16142 case VSX_BUILTIN_VEC_MERGEL_V2DF:
16143 fold_mergehl_helper (gsi, stmt, 1);
16144 return true;
16145 /* vec_mergeh (integrals). */
16146 case ALTIVEC_BUILTIN_VMRGHH:
16147 case ALTIVEC_BUILTIN_VMRGHW:
16148 case VSX_BUILTIN_XXMRGHW_4SI:
16149 case ALTIVEC_BUILTIN_VMRGHB:
16150 case VSX_BUILTIN_VEC_MERGEH_V2DI:
16151 case VSX_BUILTIN_XXMRGHW_4SF:
16152 case VSX_BUILTIN_VEC_MERGEH_V2DF:
16153 fold_mergehl_helper (gsi, stmt, 0);
16154 return true;
16155
16156 /* Flavors of vec_mergee. */
16157 case P8V_BUILTIN_VMRGEW_V4SI:
16158 case P8V_BUILTIN_VMRGEW_V2DI:
16159 case P8V_BUILTIN_VMRGEW_V4SF:
16160 case P8V_BUILTIN_VMRGEW_V2DF:
16161 fold_mergeeo_helper (gsi, stmt, 0);
16162 return true;
16163 /* Flavors of vec_mergeo. */
16164 case P8V_BUILTIN_VMRGOW_V4SI:
16165 case P8V_BUILTIN_VMRGOW_V2DI:
16166 case P8V_BUILTIN_VMRGOW_V4SF:
16167 case P8V_BUILTIN_VMRGOW_V2DF:
16168 fold_mergeeo_helper (gsi, stmt, 1);
16169 return true;
16170
16171 /* d = vec_pack (a, b) */
16172 case P8V_BUILTIN_VPKUDUM:
16173 case ALTIVEC_BUILTIN_VPKUHUM:
16174 case ALTIVEC_BUILTIN_VPKUWUM:
16175 {
16176 arg0 = gimple_call_arg (stmt, 0);
16177 arg1 = gimple_call_arg (stmt, 1);
16178 lhs = gimple_call_lhs (stmt);
16179 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
16180 gimple_set_location (g, gimple_location (stmt));
16181 gsi_replace (gsi, g, true);
16182 return true;
16183 }
16184
16185 /* d = vec_unpackh (a) */
16186 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
16187 in this code is sensitive to endian-ness, and needs to be inverted to
16188 handle both LE and BE targets. */
16189 case ALTIVEC_BUILTIN_VUPKHSB:
16190 case ALTIVEC_BUILTIN_VUPKHSH:
16191 case P8V_BUILTIN_VUPKHSW:
16192 {
16193 arg0 = gimple_call_arg (stmt, 0);
16194 lhs = gimple_call_lhs (stmt);
16195 if (BYTES_BIG_ENDIAN)
16196 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16197 else
16198 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16199 gimple_set_location (g, gimple_location (stmt));
16200 gsi_replace (gsi, g, true);
16201 return true;
16202 }
16203 /* d = vec_unpackl (a) */
16204 case ALTIVEC_BUILTIN_VUPKLSB:
16205 case ALTIVEC_BUILTIN_VUPKLSH:
16206 case P8V_BUILTIN_VUPKLSW:
16207 {
16208 arg0 = gimple_call_arg (stmt, 0);
16209 lhs = gimple_call_lhs (stmt);
16210 if (BYTES_BIG_ENDIAN)
16211 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16212 else
16213 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16214 gimple_set_location (g, gimple_location (stmt));
16215 gsi_replace (gsi, g, true);
16216 return true;
16217 }
16218 /* There is no gimple type corresponding with pixel, so just return. */
16219 case ALTIVEC_BUILTIN_VUPKHPX:
16220 case ALTIVEC_BUILTIN_VUPKLPX:
16221 return false;
16222
16223 /* vec_perm. */
16224 case ALTIVEC_BUILTIN_VPERM_16QI:
16225 case ALTIVEC_BUILTIN_VPERM_8HI:
16226 case ALTIVEC_BUILTIN_VPERM_4SI:
16227 case ALTIVEC_BUILTIN_VPERM_2DI:
16228 case ALTIVEC_BUILTIN_VPERM_4SF:
16229 case ALTIVEC_BUILTIN_VPERM_2DF:
16230 {
16231 arg0 = gimple_call_arg (stmt, 0);
16232 arg1 = gimple_call_arg (stmt, 1);
16233 tree permute = gimple_call_arg (stmt, 2);
16234 lhs = gimple_call_lhs (stmt);
16235 location_t loc = gimple_location (stmt);
16236 gimple_seq stmts = NULL;
16237 // convert arg0 and arg1 to match the type of the permute
16238 // for the VEC_PERM_EXPR operation.
16239 tree permute_type = (TREE_TYPE (permute));
16240 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
16241 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
16242 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
16243 permute_type, arg0_ptype, arg1_ptype,
16244 permute);
16245 // Convert the result back to the desired lhs type upon completion.
16246 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
16247 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16248 g = gimple_build_assign (lhs, temp);
16249 gimple_set_location (g, loc);
16250 gsi_replace (gsi, g, true);
16251 return true;
16252 }
16253
16254 default:
16255 if (TARGET_DEBUG_BUILTIN)
16256 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16257 fn_code, fn_name1, fn_name2);
16258 break;
16259 }
16260
16261 return false;
16262 }
16263
16264 /* Expand an expression EXP that calls a built-in function,
16265 with result going to TARGET if that's convenient
16266 (and in mode MODE if that's convenient).
16267 SUBTARGET may be used as the target for computing one of EXP's operands.
16268 IGNORE is nonzero if the value is to be ignored. */
16269
16270 static rtx
16271 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16272 machine_mode mode ATTRIBUTE_UNUSED,
16273 int ignore ATTRIBUTE_UNUSED)
16274 {
16275 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16276 enum rs6000_builtins fcode
16277 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16278 size_t uns_fcode = (size_t)fcode;
16279 const struct builtin_description *d;
16280 size_t i;
16281 rtx ret;
16282 bool success;
16283 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16284 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16285 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16286
16287 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16288 floating point type, depending on whether long double is the IBM extended
16289 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16290 we only define one variant of the built-in function, and switch the code
16291 when defining it, rather than defining two built-ins and using the
16292 overload table in rs6000-c.c to switch between the two. If we don't have
16293 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16294 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16295 if (FLOAT128_IEEE_P (TFmode))
16296 switch (icode)
16297 {
16298 default:
16299 break;
16300
16301 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16302 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16303 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16304 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16305 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16306 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16307 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16308 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16309 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16310 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16311 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16312 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16313 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16314 }
16315
16316 if (TARGET_DEBUG_BUILTIN)
16317 {
16318 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16319 const char *name2 = (icode != CODE_FOR_nothing)
16320 ? get_insn_name ((int) icode)
16321 : "nothing";
16322 const char *name3;
16323
16324 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16325 {
16326 default: name3 = "unknown"; break;
16327 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16328 case RS6000_BTC_UNARY: name3 = "unary"; break;
16329 case RS6000_BTC_BINARY: name3 = "binary"; break;
16330 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16331 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16332 case RS6000_BTC_ABS: name3 = "abs"; break;
16333 case RS6000_BTC_DST: name3 = "dst"; break;
16334 }
16335
16336
16337 fprintf (stderr,
16338 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16339 (name1) ? name1 : "---", fcode,
16340 (name2) ? name2 : "---", (int) icode,
16341 name3,
16342 func_valid_p ? "" : ", not valid");
16343 }
16344
16345 if (!func_valid_p)
16346 {
16347 rs6000_invalid_builtin (fcode);
16348
16349 /* Given it is invalid, just generate a normal call. */
16350 return expand_call (exp, target, ignore);
16351 }
16352
16353 switch (fcode)
16354 {
16355 case RS6000_BUILTIN_RECIP:
16356 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16357
16358 case RS6000_BUILTIN_RECIPF:
16359 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16360
16361 case RS6000_BUILTIN_RSQRTF:
16362 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16363
16364 case RS6000_BUILTIN_RSQRT:
16365 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16366
16367 case POWER7_BUILTIN_BPERMD:
16368 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16369 ? CODE_FOR_bpermd_di
16370 : CODE_FOR_bpermd_si), exp, target);
16371
16372 case RS6000_BUILTIN_GET_TB:
16373 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16374 target);
16375
16376 case RS6000_BUILTIN_MFTB:
16377 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16378 ? CODE_FOR_rs6000_mftb_di
16379 : CODE_FOR_rs6000_mftb_si),
16380 target);
16381
16382 case RS6000_BUILTIN_MFFS:
16383 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16384
16385 case RS6000_BUILTIN_MTFSB0:
16386 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16387
16388 case RS6000_BUILTIN_MTFSB1:
16389 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16390
16391 case RS6000_BUILTIN_SET_FPSCR_RN:
16392 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16393 exp);
16394
16395 case RS6000_BUILTIN_SET_FPSCR_DRN:
16396 return
16397 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16398 exp);
16399
16400 case RS6000_BUILTIN_MFFSL:
16401 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16402
16403 case RS6000_BUILTIN_MTFSF:
16404 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16405
16406 case RS6000_BUILTIN_CPU_INIT:
16407 case RS6000_BUILTIN_CPU_IS:
16408 case RS6000_BUILTIN_CPU_SUPPORTS:
16409 return cpu_expand_builtin (fcode, exp, target);
16410
16411 case MISC_BUILTIN_SPEC_BARRIER:
16412 {
16413 emit_insn (gen_speculation_barrier ());
16414 return NULL_RTX;
16415 }
16416
16417 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16418 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16419 {
16420 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16421 : (int) CODE_FOR_altivec_lvsl_direct);
16422 machine_mode tmode = insn_data[icode2].operand[0].mode;
16423 machine_mode mode = insn_data[icode2].operand[1].mode;
16424 tree arg;
16425 rtx op, addr, pat;
16426
16427 gcc_assert (TARGET_ALTIVEC);
16428
16429 arg = CALL_EXPR_ARG (exp, 0);
16430 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16431 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16432 addr = memory_address (mode, op);
16433 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16434 op = addr;
16435 else
16436 {
16437 /* For the load case need to negate the address. */
16438 op = gen_reg_rtx (GET_MODE (addr));
16439 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16440 }
16441 op = gen_rtx_MEM (mode, op);
16442
16443 if (target == 0
16444 || GET_MODE (target) != tmode
16445 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16446 target = gen_reg_rtx (tmode);
16447
16448 pat = GEN_FCN (icode2) (target, op);
16449 if (!pat)
16450 return 0;
16451 emit_insn (pat);
16452
16453 return target;
16454 }
16455
16456 case ALTIVEC_BUILTIN_VCFUX:
16457 case ALTIVEC_BUILTIN_VCFSX:
16458 case ALTIVEC_BUILTIN_VCTUXS:
16459 case ALTIVEC_BUILTIN_VCTSXS:
16460 /* FIXME: There's got to be a nicer way to handle this case than
16461 constructing a new CALL_EXPR. */
16462 if (call_expr_nargs (exp) == 1)
16463 {
16464 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16465 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16466 }
16467 break;
16468
16469 /* For the pack and unpack int128 routines, fix up the builtin so it
16470 uses the correct IBM128 type. */
16471 case MISC_BUILTIN_PACK_IF:
16472 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16473 {
16474 icode = CODE_FOR_packtf;
16475 fcode = MISC_BUILTIN_PACK_TF;
16476 uns_fcode = (size_t)fcode;
16477 }
16478 break;
16479
16480 case MISC_BUILTIN_UNPACK_IF:
16481 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16482 {
16483 icode = CODE_FOR_unpacktf;
16484 fcode = MISC_BUILTIN_UNPACK_TF;
16485 uns_fcode = (size_t)fcode;
16486 }
16487 break;
16488
16489 default:
16490 break;
16491 }
16492
16493 if (TARGET_ALTIVEC)
16494 {
16495 ret = altivec_expand_builtin (exp, target, &success);
16496
16497 if (success)
16498 return ret;
16499 }
16500 if (TARGET_HTM)
16501 {
16502 ret = htm_expand_builtin (exp, target, &success);
16503
16504 if (success)
16505 return ret;
16506 }
16507
16508 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16509 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16510 gcc_assert (attr == RS6000_BTC_UNARY
16511 || attr == RS6000_BTC_BINARY
16512 || attr == RS6000_BTC_TERNARY
16513 || attr == RS6000_BTC_SPECIAL);
16514
16515 /* Handle simple unary operations. */
16516 d = bdesc_1arg;
16517 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16518 if (d->code == fcode)
16519 return rs6000_expand_unop_builtin (icode, exp, target);
16520
16521 /* Handle simple binary operations. */
16522 d = bdesc_2arg;
16523 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16524 if (d->code == fcode)
16525 return rs6000_expand_binop_builtin (icode, exp, target);
16526
16527 /* Handle simple ternary operations. */
16528 d = bdesc_3arg;
16529 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16530 if (d->code == fcode)
16531 return rs6000_expand_ternop_builtin (icode, exp, target);
16532
16533 /* Handle simple no-argument operations. */
16534 d = bdesc_0arg;
16535 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16536 if (d->code == fcode)
16537 return rs6000_expand_zeroop_builtin (icode, target);
16538
16539 gcc_unreachable ();
16540 }
16541
16542 /* Create a builtin vector type with a name. Taking care not to give
16543 the canonical type a name. */
16544
16545 static tree
16546 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16547 {
16548 tree result = build_vector_type (elt_type, num_elts);
16549
16550 /* Copy so we don't give the canonical type a name. */
16551 result = build_variant_type_copy (result);
16552
16553 add_builtin_type (name, result);
16554
16555 return result;
16556 }
16557
16558 static void
16559 rs6000_init_builtins (void)
16560 {
16561 tree tdecl;
16562 tree ftype;
16563 machine_mode mode;
16564
16565 if (TARGET_DEBUG_BUILTIN)
16566 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16567 (TARGET_ALTIVEC) ? ", altivec" : "",
16568 (TARGET_VSX) ? ", vsx" : "");
16569
16570 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16571 : "__vector long long",
16572 intDI_type_node, 2);
16573 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16574 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16575 intSI_type_node, 4);
16576 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16577 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16578 intHI_type_node, 8);
16579 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16580 intQI_type_node, 16);
16581
16582 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16583 unsigned_intQI_type_node, 16);
16584 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16585 unsigned_intHI_type_node, 8);
16586 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16587 unsigned_intSI_type_node, 4);
16588 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16589 ? "__vector unsigned long"
16590 : "__vector unsigned long long",
16591 unsigned_intDI_type_node, 2);
16592
16593 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16594
16595 const_str_type_node
16596 = build_pointer_type (build_qualified_type (char_type_node,
16597 TYPE_QUAL_CONST));
16598
16599 /* We use V1TI mode as a special container to hold __int128_t items that
16600 must live in VSX registers. */
16601 if (intTI_type_node)
16602 {
16603 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16604 intTI_type_node, 1);
16605 unsigned_V1TI_type_node
16606 = rs6000_vector_type ("__vector unsigned __int128",
16607 unsigned_intTI_type_node, 1);
16608 }
16609
16610 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16611 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16612 'vector unsigned short'. */
16613
16614 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16615 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16616 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16617 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16618 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16619
16620 long_integer_type_internal_node = long_integer_type_node;
16621 long_unsigned_type_internal_node = long_unsigned_type_node;
16622 long_long_integer_type_internal_node = long_long_integer_type_node;
16623 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16624 intQI_type_internal_node = intQI_type_node;
16625 uintQI_type_internal_node = unsigned_intQI_type_node;
16626 intHI_type_internal_node = intHI_type_node;
16627 uintHI_type_internal_node = unsigned_intHI_type_node;
16628 intSI_type_internal_node = intSI_type_node;
16629 uintSI_type_internal_node = unsigned_intSI_type_node;
16630 intDI_type_internal_node = intDI_type_node;
16631 uintDI_type_internal_node = unsigned_intDI_type_node;
16632 intTI_type_internal_node = intTI_type_node;
16633 uintTI_type_internal_node = unsigned_intTI_type_node;
16634 float_type_internal_node = float_type_node;
16635 double_type_internal_node = double_type_node;
16636 long_double_type_internal_node = long_double_type_node;
16637 dfloat64_type_internal_node = dfloat64_type_node;
16638 dfloat128_type_internal_node = dfloat128_type_node;
16639 void_type_internal_node = void_type_node;
16640
16641 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16642 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16643 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16644 format that uses a pair of doubles, depending on the switches and
16645 defaults.
16646
16647 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16648 floating point, we need make sure the type is non-zero or else self-test
16649 fails during bootstrap.
16650
16651 Always create __ibm128 as a separate type, even if the current long double
16652 format is IBM extended double.
16653
16654 For IEEE 128-bit floating point, always create the type __ieee128. If the
16655 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16656 __ieee128. */
16657 if (TARGET_FLOAT128_TYPE)
16658 {
16659 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16660 ibm128_float_type_node = long_double_type_node;
16661 else
16662 {
16663 ibm128_float_type_node = make_node (REAL_TYPE);
16664 TYPE_PRECISION (ibm128_float_type_node) = 128;
16665 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16666 layout_type (ibm128_float_type_node);
16667 }
16668
16669 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16670 "__ibm128");
16671
16672 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16673 ieee128_float_type_node = long_double_type_node;
16674 else
16675 ieee128_float_type_node = float128_type_node;
16676
16677 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16678 "__ieee128");
16679 }
16680
16681 else
16682 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16683
16684 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16685 tree type node. */
16686 builtin_mode_to_type[QImode][0] = integer_type_node;
16687 builtin_mode_to_type[HImode][0] = integer_type_node;
16688 builtin_mode_to_type[SImode][0] = intSI_type_node;
16689 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16690 builtin_mode_to_type[DImode][0] = intDI_type_node;
16691 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16692 builtin_mode_to_type[TImode][0] = intTI_type_node;
16693 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16694 builtin_mode_to_type[SFmode][0] = float_type_node;
16695 builtin_mode_to_type[DFmode][0] = double_type_node;
16696 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16697 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16698 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16699 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16700 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16701 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16702 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16703 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16704 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16705 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16706 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16707 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16708 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16709 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16710 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16711 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16712 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16713
16714 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16715 TYPE_NAME (bool_char_type_node) = tdecl;
16716
16717 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16718 TYPE_NAME (bool_short_type_node) = tdecl;
16719
16720 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16721 TYPE_NAME (bool_int_type_node) = tdecl;
16722
16723 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16724 TYPE_NAME (pixel_type_node) = tdecl;
16725
16726 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16727 bool_char_type_node, 16);
16728 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16729 bool_short_type_node, 8);
16730 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16731 bool_int_type_node, 4);
16732 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16733 ? "__vector __bool long"
16734 : "__vector __bool long long",
16735 bool_long_long_type_node, 2);
16736 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16737 pixel_type_node, 8);
16738
16739 /* Create Altivec and VSX builtins on machines with at least the
16740 general purpose extensions (970 and newer) to allow the use of
16741 the target attribute. */
16742 if (TARGET_EXTRA_BUILTINS)
16743 altivec_init_builtins ();
16744 if (TARGET_HTM)
16745 htm_init_builtins ();
16746
16747 if (TARGET_EXTRA_BUILTINS)
16748 rs6000_common_init_builtins ();
16749
16750 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16751 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16752 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16753
16754 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16755 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16756 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16757
16758 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16759 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16760 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16761
16762 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16763 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16764 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16765
16766 mode = (TARGET_64BIT) ? DImode : SImode;
16767 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16768 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16769 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16770
16771 ftype = build_function_type_list (unsigned_intDI_type_node,
16772 NULL_TREE);
16773 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16774
16775 if (TARGET_64BIT)
16776 ftype = build_function_type_list (unsigned_intDI_type_node,
16777 NULL_TREE);
16778 else
16779 ftype = build_function_type_list (unsigned_intSI_type_node,
16780 NULL_TREE);
16781 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16782
16783 ftype = build_function_type_list (double_type_node, NULL_TREE);
16784 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16785
16786 ftype = build_function_type_list (double_type_node, NULL_TREE);
16787 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16788
16789 ftype = build_function_type_list (void_type_node,
16790 intSI_type_node,
16791 NULL_TREE);
16792 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16793
16794 ftype = build_function_type_list (void_type_node,
16795 intSI_type_node,
16796 NULL_TREE);
16797 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16798
16799 ftype = build_function_type_list (void_type_node,
16800 intDI_type_node,
16801 NULL_TREE);
16802 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16803
16804 ftype = build_function_type_list (void_type_node,
16805 intDI_type_node,
16806 NULL_TREE);
16807 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16808
16809 ftype = build_function_type_list (void_type_node,
16810 intSI_type_node, double_type_node,
16811 NULL_TREE);
16812 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16813
16814 ftype = build_function_type_list (void_type_node, NULL_TREE);
16815 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16816 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16817 MISC_BUILTIN_SPEC_BARRIER);
16818
16819 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16820 NULL_TREE);
16821 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16822 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16823
16824 /* AIX libm provides clog as __clog. */
16825 if (TARGET_XCOFF &&
16826 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16827 set_user_assembler_name (tdecl, "__clog");
16828
16829 #ifdef SUBTARGET_INIT_BUILTINS
16830 SUBTARGET_INIT_BUILTINS;
16831 #endif
16832 }
16833
16834 /* Returns the rs6000 builtin decl for CODE. */
16835
16836 static tree
16837 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16838 {
16839 HOST_WIDE_INT fnmask;
16840
16841 if (code >= RS6000_BUILTIN_COUNT)
16842 return error_mark_node;
16843
16844 fnmask = rs6000_builtin_info[code].mask;
16845 if ((fnmask & rs6000_builtin_mask) != fnmask)
16846 {
16847 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16848 return error_mark_node;
16849 }
16850
16851 return rs6000_builtin_decls[code];
16852 }
16853
16854 static void
16855 altivec_init_builtins (void)
16856 {
16857 const struct builtin_description *d;
16858 size_t i;
16859 tree ftype;
16860 tree decl;
16861 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16862
16863 tree pvoid_type_node = build_pointer_type (void_type_node);
16864
16865 tree pcvoid_type_node
16866 = build_pointer_type (build_qualified_type (void_type_node,
16867 TYPE_QUAL_CONST));
16868
16869 tree int_ftype_opaque
16870 = build_function_type_list (integer_type_node,
16871 opaque_V4SI_type_node, NULL_TREE);
16872 tree opaque_ftype_opaque
16873 = build_function_type_list (integer_type_node, NULL_TREE);
16874 tree opaque_ftype_opaque_int
16875 = build_function_type_list (opaque_V4SI_type_node,
16876 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16877 tree opaque_ftype_opaque_opaque_int
16878 = build_function_type_list (opaque_V4SI_type_node,
16879 opaque_V4SI_type_node, opaque_V4SI_type_node,
16880 integer_type_node, NULL_TREE);
16881 tree opaque_ftype_opaque_opaque_opaque
16882 = build_function_type_list (opaque_V4SI_type_node,
16883 opaque_V4SI_type_node, opaque_V4SI_type_node,
16884 opaque_V4SI_type_node, NULL_TREE);
16885 tree opaque_ftype_opaque_opaque
16886 = build_function_type_list (opaque_V4SI_type_node,
16887 opaque_V4SI_type_node, opaque_V4SI_type_node,
16888 NULL_TREE);
16889 tree int_ftype_int_opaque_opaque
16890 = build_function_type_list (integer_type_node,
16891 integer_type_node, opaque_V4SI_type_node,
16892 opaque_V4SI_type_node, NULL_TREE);
16893 tree int_ftype_int_v4si_v4si
16894 = build_function_type_list (integer_type_node,
16895 integer_type_node, V4SI_type_node,
16896 V4SI_type_node, NULL_TREE);
16897 tree int_ftype_int_v2di_v2di
16898 = build_function_type_list (integer_type_node,
16899 integer_type_node, V2DI_type_node,
16900 V2DI_type_node, NULL_TREE);
16901 tree void_ftype_v4si
16902 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16903 tree v8hi_ftype_void
16904 = build_function_type_list (V8HI_type_node, NULL_TREE);
16905 tree void_ftype_void
16906 = build_function_type_list (void_type_node, NULL_TREE);
16907 tree void_ftype_int
16908 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16909
16910 tree opaque_ftype_long_pcvoid
16911 = build_function_type_list (opaque_V4SI_type_node,
16912 long_integer_type_node, pcvoid_type_node,
16913 NULL_TREE);
16914 tree v16qi_ftype_long_pcvoid
16915 = build_function_type_list (V16QI_type_node,
16916 long_integer_type_node, pcvoid_type_node,
16917 NULL_TREE);
16918 tree v8hi_ftype_long_pcvoid
16919 = build_function_type_list (V8HI_type_node,
16920 long_integer_type_node, pcvoid_type_node,
16921 NULL_TREE);
16922 tree v4si_ftype_long_pcvoid
16923 = build_function_type_list (V4SI_type_node,
16924 long_integer_type_node, pcvoid_type_node,
16925 NULL_TREE);
16926 tree v4sf_ftype_long_pcvoid
16927 = build_function_type_list (V4SF_type_node,
16928 long_integer_type_node, pcvoid_type_node,
16929 NULL_TREE);
16930 tree v2df_ftype_long_pcvoid
16931 = build_function_type_list (V2DF_type_node,
16932 long_integer_type_node, pcvoid_type_node,
16933 NULL_TREE);
16934 tree v2di_ftype_long_pcvoid
16935 = build_function_type_list (V2DI_type_node,
16936 long_integer_type_node, pcvoid_type_node,
16937 NULL_TREE);
16938 tree v1ti_ftype_long_pcvoid
16939 = build_function_type_list (V1TI_type_node,
16940 long_integer_type_node, pcvoid_type_node,
16941 NULL_TREE);
16942
16943 tree void_ftype_opaque_long_pvoid
16944 = build_function_type_list (void_type_node,
16945 opaque_V4SI_type_node, long_integer_type_node,
16946 pvoid_type_node, NULL_TREE);
16947 tree void_ftype_v4si_long_pvoid
16948 = build_function_type_list (void_type_node,
16949 V4SI_type_node, long_integer_type_node,
16950 pvoid_type_node, NULL_TREE);
16951 tree void_ftype_v16qi_long_pvoid
16952 = build_function_type_list (void_type_node,
16953 V16QI_type_node, long_integer_type_node,
16954 pvoid_type_node, NULL_TREE);
16955
16956 tree void_ftype_v16qi_pvoid_long
16957 = build_function_type_list (void_type_node,
16958 V16QI_type_node, pvoid_type_node,
16959 long_integer_type_node, NULL_TREE);
16960
16961 tree void_ftype_v8hi_long_pvoid
16962 = build_function_type_list (void_type_node,
16963 V8HI_type_node, long_integer_type_node,
16964 pvoid_type_node, NULL_TREE);
16965 tree void_ftype_v4sf_long_pvoid
16966 = build_function_type_list (void_type_node,
16967 V4SF_type_node, long_integer_type_node,
16968 pvoid_type_node, NULL_TREE);
16969 tree void_ftype_v2df_long_pvoid
16970 = build_function_type_list (void_type_node,
16971 V2DF_type_node, long_integer_type_node,
16972 pvoid_type_node, NULL_TREE);
16973 tree void_ftype_v1ti_long_pvoid
16974 = build_function_type_list (void_type_node,
16975 V1TI_type_node, long_integer_type_node,
16976 pvoid_type_node, NULL_TREE);
16977 tree void_ftype_v2di_long_pvoid
16978 = build_function_type_list (void_type_node,
16979 V2DI_type_node, long_integer_type_node,
16980 pvoid_type_node, NULL_TREE);
16981 tree int_ftype_int_v8hi_v8hi
16982 = build_function_type_list (integer_type_node,
16983 integer_type_node, V8HI_type_node,
16984 V8HI_type_node, NULL_TREE);
16985 tree int_ftype_int_v16qi_v16qi
16986 = build_function_type_list (integer_type_node,
16987 integer_type_node, V16QI_type_node,
16988 V16QI_type_node, NULL_TREE);
16989 tree int_ftype_int_v4sf_v4sf
16990 = build_function_type_list (integer_type_node,
16991 integer_type_node, V4SF_type_node,
16992 V4SF_type_node, NULL_TREE);
16993 tree int_ftype_int_v2df_v2df
16994 = build_function_type_list (integer_type_node,
16995 integer_type_node, V2DF_type_node,
16996 V2DF_type_node, NULL_TREE);
16997 tree v2di_ftype_v2di
16998 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16999 tree v4si_ftype_v4si
17000 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17001 tree v8hi_ftype_v8hi
17002 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17003 tree v16qi_ftype_v16qi
17004 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17005 tree v4sf_ftype_v4sf
17006 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17007 tree v2df_ftype_v2df
17008 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17009 tree void_ftype_pcvoid_int_int
17010 = build_function_type_list (void_type_node,
17011 pcvoid_type_node, integer_type_node,
17012 integer_type_node, NULL_TREE);
17013
17014 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17015 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17016 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17017 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17018 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17019 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17020 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17021 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17022 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17023 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17024 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17025 ALTIVEC_BUILTIN_LVXL_V2DF);
17026 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17027 ALTIVEC_BUILTIN_LVXL_V2DI);
17028 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17029 ALTIVEC_BUILTIN_LVXL_V4SF);
17030 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17031 ALTIVEC_BUILTIN_LVXL_V4SI);
17032 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17033 ALTIVEC_BUILTIN_LVXL_V8HI);
17034 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17035 ALTIVEC_BUILTIN_LVXL_V16QI);
17036 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17037 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
17038 ALTIVEC_BUILTIN_LVX_V1TI);
17039 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17040 ALTIVEC_BUILTIN_LVX_V2DF);
17041 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17042 ALTIVEC_BUILTIN_LVX_V2DI);
17043 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17044 ALTIVEC_BUILTIN_LVX_V4SF);
17045 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17046 ALTIVEC_BUILTIN_LVX_V4SI);
17047 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17048 ALTIVEC_BUILTIN_LVX_V8HI);
17049 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17050 ALTIVEC_BUILTIN_LVX_V16QI);
17051 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17052 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17053 ALTIVEC_BUILTIN_STVX_V2DF);
17054 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17055 ALTIVEC_BUILTIN_STVX_V2DI);
17056 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17057 ALTIVEC_BUILTIN_STVX_V4SF);
17058 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17059 ALTIVEC_BUILTIN_STVX_V4SI);
17060 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17061 ALTIVEC_BUILTIN_STVX_V8HI);
17062 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17063 ALTIVEC_BUILTIN_STVX_V16QI);
17064 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17065 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17066 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17067 ALTIVEC_BUILTIN_STVXL_V2DF);
17068 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17069 ALTIVEC_BUILTIN_STVXL_V2DI);
17070 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17071 ALTIVEC_BUILTIN_STVXL_V4SF);
17072 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17073 ALTIVEC_BUILTIN_STVXL_V4SI);
17074 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17075 ALTIVEC_BUILTIN_STVXL_V8HI);
17076 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17077 ALTIVEC_BUILTIN_STVXL_V16QI);
17078 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17079 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17080 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17081 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17082 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17083 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17084 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17085 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17086 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17087 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17088 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17089 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17090 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17091 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17092 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17093 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17094
17095 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17096 VSX_BUILTIN_LXVD2X_V2DF);
17097 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17098 VSX_BUILTIN_LXVD2X_V2DI);
17099 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17100 VSX_BUILTIN_LXVW4X_V4SF);
17101 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17102 VSX_BUILTIN_LXVW4X_V4SI);
17103 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17104 VSX_BUILTIN_LXVW4X_V8HI);
17105 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17106 VSX_BUILTIN_LXVW4X_V16QI);
17107 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17108 VSX_BUILTIN_STXVD2X_V2DF);
17109 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17110 VSX_BUILTIN_STXVD2X_V2DI);
17111 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17112 VSX_BUILTIN_STXVW4X_V4SF);
17113 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17114 VSX_BUILTIN_STXVW4X_V4SI);
17115 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17116 VSX_BUILTIN_STXVW4X_V8HI);
17117 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17118 VSX_BUILTIN_STXVW4X_V16QI);
17119
17120 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17121 VSX_BUILTIN_LD_ELEMREV_V2DF);
17122 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17123 VSX_BUILTIN_LD_ELEMREV_V2DI);
17124 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17125 VSX_BUILTIN_LD_ELEMREV_V4SF);
17126 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17127 VSX_BUILTIN_LD_ELEMREV_V4SI);
17128 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17129 VSX_BUILTIN_LD_ELEMREV_V8HI);
17130 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17131 VSX_BUILTIN_LD_ELEMREV_V16QI);
17132 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17133 VSX_BUILTIN_ST_ELEMREV_V2DF);
17134 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
17135 VSX_BUILTIN_ST_ELEMREV_V1TI);
17136 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17137 VSX_BUILTIN_ST_ELEMREV_V2DI);
17138 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17139 VSX_BUILTIN_ST_ELEMREV_V4SF);
17140 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17141 VSX_BUILTIN_ST_ELEMREV_V4SI);
17142 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17143 VSX_BUILTIN_ST_ELEMREV_V8HI);
17144 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17145 VSX_BUILTIN_ST_ELEMREV_V16QI);
17146
17147 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17148 VSX_BUILTIN_VEC_LD);
17149 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17150 VSX_BUILTIN_VEC_ST);
17151 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17152 VSX_BUILTIN_VEC_XL);
17153 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17154 VSX_BUILTIN_VEC_XL_BE);
17155 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17156 VSX_BUILTIN_VEC_XST);
17157 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17158 VSX_BUILTIN_VEC_XST_BE);
17159
17160 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17161 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17162 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17163
17164 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17165 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17166 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17167 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17168 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17169 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17170 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17171 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17172 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17173 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17174 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17175 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17176
17177 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17178 ALTIVEC_BUILTIN_VEC_ADDE);
17179 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17180 ALTIVEC_BUILTIN_VEC_ADDEC);
17181 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17182 ALTIVEC_BUILTIN_VEC_CMPNE);
17183 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17184 ALTIVEC_BUILTIN_VEC_MUL);
17185 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17186 ALTIVEC_BUILTIN_VEC_SUBE);
17187 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17188 ALTIVEC_BUILTIN_VEC_SUBEC);
17189
17190 /* Cell builtins. */
17191 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17192 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17193 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17194 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17195
17196 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17197 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17198 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17199 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17200
17201 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17202 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17203 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17204 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17205
17206 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17207 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17208 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17209 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17210
17211 if (TARGET_P9_VECTOR)
17212 {
17213 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17214 P9V_BUILTIN_STXVL);
17215 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17216 P9V_BUILTIN_XST_LEN_R);
17217 }
17218
17219 /* Add the DST variants. */
17220 d = bdesc_dst;
17221 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17222 {
17223 HOST_WIDE_INT mask = d->mask;
17224
17225 /* It is expected that these dst built-in functions may have
17226 d->icode equal to CODE_FOR_nothing. */
17227 if ((mask & builtin_mask) != mask)
17228 {
17229 if (TARGET_DEBUG_BUILTIN)
17230 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17231 d->name);
17232 continue;
17233 }
17234 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17235 }
17236
17237 /* Initialize the predicates. */
17238 d = bdesc_altivec_preds;
17239 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17240 {
17241 machine_mode mode1;
17242 tree type;
17243 HOST_WIDE_INT mask = d->mask;
17244
17245 if ((mask & builtin_mask) != mask)
17246 {
17247 if (TARGET_DEBUG_BUILTIN)
17248 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17249 d->name);
17250 continue;
17251 }
17252
17253 if (rs6000_overloaded_builtin_p (d->code))
17254 mode1 = VOIDmode;
17255 else
17256 {
17257 /* Cannot define builtin if the instruction is disabled. */
17258 gcc_assert (d->icode != CODE_FOR_nothing);
17259 mode1 = insn_data[d->icode].operand[1].mode;
17260 }
17261
17262 switch (mode1)
17263 {
17264 case E_VOIDmode:
17265 type = int_ftype_int_opaque_opaque;
17266 break;
17267 case E_V2DImode:
17268 type = int_ftype_int_v2di_v2di;
17269 break;
17270 case E_V4SImode:
17271 type = int_ftype_int_v4si_v4si;
17272 break;
17273 case E_V8HImode:
17274 type = int_ftype_int_v8hi_v8hi;
17275 break;
17276 case E_V16QImode:
17277 type = int_ftype_int_v16qi_v16qi;
17278 break;
17279 case E_V4SFmode:
17280 type = int_ftype_int_v4sf_v4sf;
17281 break;
17282 case E_V2DFmode:
17283 type = int_ftype_int_v2df_v2df;
17284 break;
17285 default:
17286 gcc_unreachable ();
17287 }
17288
17289 def_builtin (d->name, type, d->code);
17290 }
17291
17292 /* Initialize the abs* operators. */
17293 d = bdesc_abs;
17294 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17295 {
17296 machine_mode mode0;
17297 tree type;
17298 HOST_WIDE_INT mask = d->mask;
17299
17300 if ((mask & builtin_mask) != mask)
17301 {
17302 if (TARGET_DEBUG_BUILTIN)
17303 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17304 d->name);
17305 continue;
17306 }
17307
17308 /* Cannot define builtin if the instruction is disabled. */
17309 gcc_assert (d->icode != CODE_FOR_nothing);
17310 mode0 = insn_data[d->icode].operand[0].mode;
17311
17312 switch (mode0)
17313 {
17314 case E_V2DImode:
17315 type = v2di_ftype_v2di;
17316 break;
17317 case E_V4SImode:
17318 type = v4si_ftype_v4si;
17319 break;
17320 case E_V8HImode:
17321 type = v8hi_ftype_v8hi;
17322 break;
17323 case E_V16QImode:
17324 type = v16qi_ftype_v16qi;
17325 break;
17326 case E_V4SFmode:
17327 type = v4sf_ftype_v4sf;
17328 break;
17329 case E_V2DFmode:
17330 type = v2df_ftype_v2df;
17331 break;
17332 default:
17333 gcc_unreachable ();
17334 }
17335
17336 def_builtin (d->name, type, d->code);
17337 }
17338
17339 /* Initialize target builtin that implements
17340 targetm.vectorize.builtin_mask_for_load. */
17341
17342 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17343 v16qi_ftype_long_pcvoid,
17344 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17345 BUILT_IN_MD, NULL, NULL_TREE);
17346 TREE_READONLY (decl) = 1;
17347 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17348 altivec_builtin_mask_for_load = decl;
17349
17350 /* Access to the vec_init patterns. */
17351 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17352 integer_type_node, integer_type_node,
17353 integer_type_node, NULL_TREE);
17354 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17355
17356 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17357 short_integer_type_node,
17358 short_integer_type_node,
17359 short_integer_type_node,
17360 short_integer_type_node,
17361 short_integer_type_node,
17362 short_integer_type_node,
17363 short_integer_type_node, NULL_TREE);
17364 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17365
17366 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17367 char_type_node, char_type_node,
17368 char_type_node, char_type_node,
17369 char_type_node, char_type_node,
17370 char_type_node, char_type_node,
17371 char_type_node, char_type_node,
17372 char_type_node, char_type_node,
17373 char_type_node, char_type_node,
17374 char_type_node, NULL_TREE);
17375 def_builtin ("__builtin_vec_init_v16qi", ftype,
17376 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17377
17378 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17379 float_type_node, float_type_node,
17380 float_type_node, NULL_TREE);
17381 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17382
17383 /* VSX builtins. */
17384 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17385 double_type_node, NULL_TREE);
17386 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17387
17388 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17389 intDI_type_node, NULL_TREE);
17390 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17391
17392 /* Access to the vec_set patterns. */
17393 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17394 intSI_type_node,
17395 integer_type_node, NULL_TREE);
17396 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17397
17398 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17399 intHI_type_node,
17400 integer_type_node, NULL_TREE);
17401 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17402
17403 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17404 intQI_type_node,
17405 integer_type_node, NULL_TREE);
17406 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17407
17408 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17409 float_type_node,
17410 integer_type_node, NULL_TREE);
17411 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17412
17413 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17414 double_type_node,
17415 integer_type_node, NULL_TREE);
17416 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17417
17418 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17419 intDI_type_node,
17420 integer_type_node, NULL_TREE);
17421 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17422
17423 /* Access to the vec_extract patterns. */
17424 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17425 integer_type_node, NULL_TREE);
17426 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17427
17428 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17429 integer_type_node, NULL_TREE);
17430 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17431
17432 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17433 integer_type_node, NULL_TREE);
17434 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17435
17436 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17437 integer_type_node, NULL_TREE);
17438 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17439
17440 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17441 integer_type_node, NULL_TREE);
17442 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17443
17444 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17445 integer_type_node, NULL_TREE);
17446 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17447
17448
17449 if (V1TI_type_node)
17450 {
17451 tree v1ti_ftype_long_pcvoid
17452 = build_function_type_list (V1TI_type_node,
17453 long_integer_type_node, pcvoid_type_node,
17454 NULL_TREE);
17455 tree void_ftype_v1ti_long_pvoid
17456 = build_function_type_list (void_type_node,
17457 V1TI_type_node, long_integer_type_node,
17458 pvoid_type_node, NULL_TREE);
17459 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17460 VSX_BUILTIN_LD_ELEMREV_V1TI);
17461 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17462 VSX_BUILTIN_LXVD2X_V1TI);
17463 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17464 VSX_BUILTIN_STXVD2X_V1TI);
17465 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17466 NULL_TREE, NULL_TREE);
17467 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17468 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17469 intTI_type_node,
17470 integer_type_node, NULL_TREE);
17471 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17472 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17473 integer_type_node, NULL_TREE);
17474 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17475 }
17476
17477 }
17478
17479 static void
17480 htm_init_builtins (void)
17481 {
17482 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17483 const struct builtin_description *d;
17484 size_t i;
17485
17486 d = bdesc_htm;
17487 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17488 {
17489 tree op[MAX_HTM_OPERANDS], type;
17490 HOST_WIDE_INT mask = d->mask;
17491 unsigned attr = rs6000_builtin_info[d->code].attr;
17492 bool void_func = (attr & RS6000_BTC_VOID);
17493 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17494 int nopnds = 0;
17495 tree gpr_type_node;
17496 tree rettype;
17497 tree argtype;
17498
17499 /* It is expected that these htm built-in functions may have
17500 d->icode equal to CODE_FOR_nothing. */
17501
17502 if (TARGET_32BIT && TARGET_POWERPC64)
17503 gpr_type_node = long_long_unsigned_type_node;
17504 else
17505 gpr_type_node = long_unsigned_type_node;
17506
17507 if (attr & RS6000_BTC_SPR)
17508 {
17509 rettype = gpr_type_node;
17510 argtype = gpr_type_node;
17511 }
17512 else if (d->code == HTM_BUILTIN_TABORTDC
17513 || d->code == HTM_BUILTIN_TABORTDCI)
17514 {
17515 rettype = unsigned_type_node;
17516 argtype = gpr_type_node;
17517 }
17518 else
17519 {
17520 rettype = unsigned_type_node;
17521 argtype = unsigned_type_node;
17522 }
17523
17524 if ((mask & builtin_mask) != mask)
17525 {
17526 if (TARGET_DEBUG_BUILTIN)
17527 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17528 continue;
17529 }
17530
17531 if (d->name == 0)
17532 {
17533 if (TARGET_DEBUG_BUILTIN)
17534 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17535 (long unsigned) i);
17536 continue;
17537 }
17538
17539 op[nopnds++] = (void_func) ? void_type_node : rettype;
17540
17541 if (attr_args == RS6000_BTC_UNARY)
17542 op[nopnds++] = argtype;
17543 else if (attr_args == RS6000_BTC_BINARY)
17544 {
17545 op[nopnds++] = argtype;
17546 op[nopnds++] = argtype;
17547 }
17548 else if (attr_args == RS6000_BTC_TERNARY)
17549 {
17550 op[nopnds++] = argtype;
17551 op[nopnds++] = argtype;
17552 op[nopnds++] = argtype;
17553 }
17554
17555 switch (nopnds)
17556 {
17557 case 1:
17558 type = build_function_type_list (op[0], NULL_TREE);
17559 break;
17560 case 2:
17561 type = build_function_type_list (op[0], op[1], NULL_TREE);
17562 break;
17563 case 3:
17564 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17565 break;
17566 case 4:
17567 type = build_function_type_list (op[0], op[1], op[2], op[3],
17568 NULL_TREE);
17569 break;
17570 default:
17571 gcc_unreachable ();
17572 }
17573
17574 def_builtin (d->name, type, d->code);
17575 }
17576 }
17577
17578 /* Hash function for builtin functions with up to 3 arguments and a return
17579 type. */
17580 hashval_t
17581 builtin_hasher::hash (builtin_hash_struct *bh)
17582 {
17583 unsigned ret = 0;
17584 int i;
17585
17586 for (i = 0; i < 4; i++)
17587 {
17588 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17589 ret = (ret * 2) + bh->uns_p[i];
17590 }
17591
17592 return ret;
17593 }
17594
17595 /* Compare builtin hash entries H1 and H2 for equivalence. */
17596 bool
17597 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17598 {
17599 return ((p1->mode[0] == p2->mode[0])
17600 && (p1->mode[1] == p2->mode[1])
17601 && (p1->mode[2] == p2->mode[2])
17602 && (p1->mode[3] == p2->mode[3])
17603 && (p1->uns_p[0] == p2->uns_p[0])
17604 && (p1->uns_p[1] == p2->uns_p[1])
17605 && (p1->uns_p[2] == p2->uns_p[2])
17606 && (p1->uns_p[3] == p2->uns_p[3]));
17607 }
17608
17609 /* Map types for builtin functions with an explicit return type and up to 3
17610 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17611 of the argument. */
17612 static tree
17613 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17614 machine_mode mode_arg1, machine_mode mode_arg2,
17615 enum rs6000_builtins builtin, const char *name)
17616 {
17617 struct builtin_hash_struct h;
17618 struct builtin_hash_struct *h2;
17619 int num_args = 3;
17620 int i;
17621 tree ret_type = NULL_TREE;
17622 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17623
17624 /* Create builtin_hash_table. */
17625 if (builtin_hash_table == NULL)
17626 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17627
17628 h.type = NULL_TREE;
17629 h.mode[0] = mode_ret;
17630 h.mode[1] = mode_arg0;
17631 h.mode[2] = mode_arg1;
17632 h.mode[3] = mode_arg2;
17633 h.uns_p[0] = 0;
17634 h.uns_p[1] = 0;
17635 h.uns_p[2] = 0;
17636 h.uns_p[3] = 0;
17637
17638 /* If the builtin is a type that produces unsigned results or takes unsigned
17639 arguments, and it is returned as a decl for the vectorizer (such as
17640 widening multiplies, permute), make sure the arguments and return value
17641 are type correct. */
17642 switch (builtin)
17643 {
17644 /* unsigned 1 argument functions. */
17645 case CRYPTO_BUILTIN_VSBOX:
17646 case P8V_BUILTIN_VGBBD:
17647 case MISC_BUILTIN_CDTBCD:
17648 case MISC_BUILTIN_CBCDTD:
17649 h.uns_p[0] = 1;
17650 h.uns_p[1] = 1;
17651 break;
17652
17653 /* unsigned 2 argument functions. */
17654 case ALTIVEC_BUILTIN_VMULEUB:
17655 case ALTIVEC_BUILTIN_VMULEUH:
17656 case P8V_BUILTIN_VMULEUW:
17657 case ALTIVEC_BUILTIN_VMULOUB:
17658 case ALTIVEC_BUILTIN_VMULOUH:
17659 case P8V_BUILTIN_VMULOUW:
17660 case CRYPTO_BUILTIN_VCIPHER:
17661 case CRYPTO_BUILTIN_VCIPHERLAST:
17662 case CRYPTO_BUILTIN_VNCIPHER:
17663 case CRYPTO_BUILTIN_VNCIPHERLAST:
17664 case CRYPTO_BUILTIN_VPMSUMB:
17665 case CRYPTO_BUILTIN_VPMSUMH:
17666 case CRYPTO_BUILTIN_VPMSUMW:
17667 case CRYPTO_BUILTIN_VPMSUMD:
17668 case CRYPTO_BUILTIN_VPMSUM:
17669 case MISC_BUILTIN_ADDG6S:
17670 case MISC_BUILTIN_DIVWEU:
17671 case MISC_BUILTIN_DIVDEU:
17672 case VSX_BUILTIN_UDIV_V2DI:
17673 case ALTIVEC_BUILTIN_VMAXUB:
17674 case ALTIVEC_BUILTIN_VMINUB:
17675 case ALTIVEC_BUILTIN_VMAXUH:
17676 case ALTIVEC_BUILTIN_VMINUH:
17677 case ALTIVEC_BUILTIN_VMAXUW:
17678 case ALTIVEC_BUILTIN_VMINUW:
17679 case P8V_BUILTIN_VMAXUD:
17680 case P8V_BUILTIN_VMINUD:
17681 h.uns_p[0] = 1;
17682 h.uns_p[1] = 1;
17683 h.uns_p[2] = 1;
17684 break;
17685
17686 /* unsigned 3 argument functions. */
17687 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17688 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17689 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17690 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17691 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17692 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17693 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17694 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17695 case VSX_BUILTIN_VPERM_16QI_UNS:
17696 case VSX_BUILTIN_VPERM_8HI_UNS:
17697 case VSX_BUILTIN_VPERM_4SI_UNS:
17698 case VSX_BUILTIN_VPERM_2DI_UNS:
17699 case VSX_BUILTIN_XXSEL_16QI_UNS:
17700 case VSX_BUILTIN_XXSEL_8HI_UNS:
17701 case VSX_BUILTIN_XXSEL_4SI_UNS:
17702 case VSX_BUILTIN_XXSEL_2DI_UNS:
17703 case CRYPTO_BUILTIN_VPERMXOR:
17704 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17705 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17706 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17707 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17708 case CRYPTO_BUILTIN_VSHASIGMAW:
17709 case CRYPTO_BUILTIN_VSHASIGMAD:
17710 case CRYPTO_BUILTIN_VSHASIGMA:
17711 h.uns_p[0] = 1;
17712 h.uns_p[1] = 1;
17713 h.uns_p[2] = 1;
17714 h.uns_p[3] = 1;
17715 break;
17716
17717 /* signed permute functions with unsigned char mask. */
17718 case ALTIVEC_BUILTIN_VPERM_16QI:
17719 case ALTIVEC_BUILTIN_VPERM_8HI:
17720 case ALTIVEC_BUILTIN_VPERM_4SI:
17721 case ALTIVEC_BUILTIN_VPERM_4SF:
17722 case ALTIVEC_BUILTIN_VPERM_2DI:
17723 case ALTIVEC_BUILTIN_VPERM_2DF:
17724 case VSX_BUILTIN_VPERM_16QI:
17725 case VSX_BUILTIN_VPERM_8HI:
17726 case VSX_BUILTIN_VPERM_4SI:
17727 case VSX_BUILTIN_VPERM_4SF:
17728 case VSX_BUILTIN_VPERM_2DI:
17729 case VSX_BUILTIN_VPERM_2DF:
17730 h.uns_p[3] = 1;
17731 break;
17732
17733 /* unsigned args, signed return. */
17734 case VSX_BUILTIN_XVCVUXDSP:
17735 case VSX_BUILTIN_XVCVUXDDP_UNS:
17736 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17737 h.uns_p[1] = 1;
17738 break;
17739
17740 /* signed args, unsigned return. */
17741 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17742 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17743 case MISC_BUILTIN_UNPACK_TD:
17744 case MISC_BUILTIN_UNPACK_V1TI:
17745 h.uns_p[0] = 1;
17746 break;
17747
17748 /* unsigned arguments, bool return (compares). */
17749 case ALTIVEC_BUILTIN_VCMPEQUB:
17750 case ALTIVEC_BUILTIN_VCMPEQUH:
17751 case ALTIVEC_BUILTIN_VCMPEQUW:
17752 case P8V_BUILTIN_VCMPEQUD:
17753 case VSX_BUILTIN_CMPGE_U16QI:
17754 case VSX_BUILTIN_CMPGE_U8HI:
17755 case VSX_BUILTIN_CMPGE_U4SI:
17756 case VSX_BUILTIN_CMPGE_U2DI:
17757 case ALTIVEC_BUILTIN_VCMPGTUB:
17758 case ALTIVEC_BUILTIN_VCMPGTUH:
17759 case ALTIVEC_BUILTIN_VCMPGTUW:
17760 case P8V_BUILTIN_VCMPGTUD:
17761 h.uns_p[1] = 1;
17762 h.uns_p[2] = 1;
17763 break;
17764
17765 /* unsigned arguments for 128-bit pack instructions. */
17766 case MISC_BUILTIN_PACK_TD:
17767 case MISC_BUILTIN_PACK_V1TI:
17768 h.uns_p[1] = 1;
17769 h.uns_p[2] = 1;
17770 break;
17771
17772 /* unsigned second arguments (vector shift right). */
17773 case ALTIVEC_BUILTIN_VSRB:
17774 case ALTIVEC_BUILTIN_VSRH:
17775 case ALTIVEC_BUILTIN_VSRW:
17776 case P8V_BUILTIN_VSRD:
17777 h.uns_p[2] = 1;
17778 break;
17779
17780 default:
17781 break;
17782 }
17783
17784 /* Figure out how many args are present. */
17785 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17786 num_args--;
17787
17788 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17789 if (!ret_type && h.uns_p[0])
17790 ret_type = builtin_mode_to_type[h.mode[0]][0];
17791
17792 if (!ret_type)
17793 fatal_error (input_location,
17794 "internal error: builtin function %qs had an unexpected "
17795 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17796
17797 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17798 arg_type[i] = NULL_TREE;
17799
17800 for (i = 0; i < num_args; i++)
17801 {
17802 int m = (int) h.mode[i+1];
17803 int uns_p = h.uns_p[i+1];
17804
17805 arg_type[i] = builtin_mode_to_type[m][uns_p];
17806 if (!arg_type[i] && uns_p)
17807 arg_type[i] = builtin_mode_to_type[m][0];
17808
17809 if (!arg_type[i])
17810 fatal_error (input_location,
17811 "internal error: builtin function %qs, argument %d "
17812 "had unexpected argument type %qs", name, i,
17813 GET_MODE_NAME (m));
17814 }
17815
17816 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17817 if (*found == NULL)
17818 {
17819 h2 = ggc_alloc<builtin_hash_struct> ();
17820 *h2 = h;
17821 *found = h2;
17822
17823 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17824 arg_type[2], NULL_TREE);
17825 }
17826
17827 return (*found)->type;
17828 }
17829
17830 static void
17831 rs6000_common_init_builtins (void)
17832 {
17833 const struct builtin_description *d;
17834 size_t i;
17835
17836 tree opaque_ftype_opaque = NULL_TREE;
17837 tree opaque_ftype_opaque_opaque = NULL_TREE;
17838 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17839 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17840
17841 /* Create Altivec and VSX builtins on machines with at least the
17842 general purpose extensions (970 and newer) to allow the use of
17843 the target attribute. */
17844
17845 if (TARGET_EXTRA_BUILTINS)
17846 builtin_mask |= RS6000_BTM_COMMON;
17847
17848 /* Add the ternary operators. */
17849 d = bdesc_3arg;
17850 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17851 {
17852 tree type;
17853 HOST_WIDE_INT mask = d->mask;
17854
17855 if ((mask & builtin_mask) != mask)
17856 {
17857 if (TARGET_DEBUG_BUILTIN)
17858 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17859 continue;
17860 }
17861
17862 if (rs6000_overloaded_builtin_p (d->code))
17863 {
17864 if (! (type = opaque_ftype_opaque_opaque_opaque))
17865 type = opaque_ftype_opaque_opaque_opaque
17866 = build_function_type_list (opaque_V4SI_type_node,
17867 opaque_V4SI_type_node,
17868 opaque_V4SI_type_node,
17869 opaque_V4SI_type_node,
17870 NULL_TREE);
17871 }
17872 else
17873 {
17874 enum insn_code icode = d->icode;
17875 if (d->name == 0)
17876 {
17877 if (TARGET_DEBUG_BUILTIN)
17878 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17879 (long unsigned)i);
17880
17881 continue;
17882 }
17883
17884 if (icode == CODE_FOR_nothing)
17885 {
17886 if (TARGET_DEBUG_BUILTIN)
17887 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17888 d->name);
17889
17890 continue;
17891 }
17892
17893 type = builtin_function_type (insn_data[icode].operand[0].mode,
17894 insn_data[icode].operand[1].mode,
17895 insn_data[icode].operand[2].mode,
17896 insn_data[icode].operand[3].mode,
17897 d->code, d->name);
17898 }
17899
17900 def_builtin (d->name, type, d->code);
17901 }
17902
17903 /* Add the binary operators. */
17904 d = bdesc_2arg;
17905 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17906 {
17907 machine_mode mode0, mode1, mode2;
17908 tree type;
17909 HOST_WIDE_INT mask = d->mask;
17910
17911 if ((mask & builtin_mask) != mask)
17912 {
17913 if (TARGET_DEBUG_BUILTIN)
17914 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17915 continue;
17916 }
17917
17918 if (rs6000_overloaded_builtin_p (d->code))
17919 {
17920 if (! (type = opaque_ftype_opaque_opaque))
17921 type = opaque_ftype_opaque_opaque
17922 = build_function_type_list (opaque_V4SI_type_node,
17923 opaque_V4SI_type_node,
17924 opaque_V4SI_type_node,
17925 NULL_TREE);
17926 }
17927 else
17928 {
17929 enum insn_code icode = d->icode;
17930 if (d->name == 0)
17931 {
17932 if (TARGET_DEBUG_BUILTIN)
17933 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17934 (long unsigned)i);
17935
17936 continue;
17937 }
17938
17939 if (icode == CODE_FOR_nothing)
17940 {
17941 if (TARGET_DEBUG_BUILTIN)
17942 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17943 d->name);
17944
17945 continue;
17946 }
17947
17948 mode0 = insn_data[icode].operand[0].mode;
17949 mode1 = insn_data[icode].operand[1].mode;
17950 mode2 = insn_data[icode].operand[2].mode;
17951
17952 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17953 d->code, d->name);
17954 }
17955
17956 def_builtin (d->name, type, d->code);
17957 }
17958
17959 /* Add the simple unary operators. */
17960 d = bdesc_1arg;
17961 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17962 {
17963 machine_mode mode0, mode1;
17964 tree type;
17965 HOST_WIDE_INT mask = d->mask;
17966
17967 if ((mask & builtin_mask) != mask)
17968 {
17969 if (TARGET_DEBUG_BUILTIN)
17970 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17971 continue;
17972 }
17973
17974 if (rs6000_overloaded_builtin_p (d->code))
17975 {
17976 if (! (type = opaque_ftype_opaque))
17977 type = opaque_ftype_opaque
17978 = build_function_type_list (opaque_V4SI_type_node,
17979 opaque_V4SI_type_node,
17980 NULL_TREE);
17981 }
17982 else
17983 {
17984 enum insn_code icode = d->icode;
17985 if (d->name == 0)
17986 {
17987 if (TARGET_DEBUG_BUILTIN)
17988 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17989 (long unsigned)i);
17990
17991 continue;
17992 }
17993
17994 if (icode == CODE_FOR_nothing)
17995 {
17996 if (TARGET_DEBUG_BUILTIN)
17997 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17998 d->name);
17999
18000 continue;
18001 }
18002
18003 mode0 = insn_data[icode].operand[0].mode;
18004 mode1 = insn_data[icode].operand[1].mode;
18005
18006 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18007 d->code, d->name);
18008 }
18009
18010 def_builtin (d->name, type, d->code);
18011 }
18012
18013 /* Add the simple no-argument operators. */
18014 d = bdesc_0arg;
18015 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18016 {
18017 machine_mode mode0;
18018 tree type;
18019 HOST_WIDE_INT mask = d->mask;
18020
18021 if ((mask & builtin_mask) != mask)
18022 {
18023 if (TARGET_DEBUG_BUILTIN)
18024 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18025 continue;
18026 }
18027 if (rs6000_overloaded_builtin_p (d->code))
18028 {
18029 if (!opaque_ftype_opaque)
18030 opaque_ftype_opaque
18031 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18032 type = opaque_ftype_opaque;
18033 }
18034 else
18035 {
18036 enum insn_code icode = d->icode;
18037 if (d->name == 0)
18038 {
18039 if (TARGET_DEBUG_BUILTIN)
18040 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18041 (long unsigned) i);
18042 continue;
18043 }
18044 if (icode == CODE_FOR_nothing)
18045 {
18046 if (TARGET_DEBUG_BUILTIN)
18047 fprintf (stderr,
18048 "rs6000_builtin, skip no-argument %s (no code)\n",
18049 d->name);
18050 continue;
18051 }
18052 mode0 = insn_data[icode].operand[0].mode;
18053 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18054 d->code, d->name);
18055 }
18056 def_builtin (d->name, type, d->code);
18057 }
18058 }
18059
18060 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18061 static void
18062 init_float128_ibm (machine_mode mode)
18063 {
18064 if (!TARGET_XL_COMPAT)
18065 {
18066 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18067 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18068 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18069 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18070
18071 if (!TARGET_HARD_FLOAT)
18072 {
18073 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18074 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18075 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18076 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18077 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18078 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18079 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18080 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18081
18082 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18083 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18084 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18085 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18086 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18087 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18088 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18089 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18090 }
18091 }
18092 else
18093 {
18094 set_optab_libfunc (add_optab, mode, "_xlqadd");
18095 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18096 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18097 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18098 }
18099
18100 /* Add various conversions for IFmode to use the traditional TFmode
18101 names. */
18102 if (mode == IFmode)
18103 {
18104 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
18105 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
18106 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
18107 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
18108 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
18109 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
18110
18111 if (TARGET_POWERPC64)
18112 {
18113 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18114 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18115 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18116 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18117 }
18118 }
18119 }
18120
18121 /* Create a decl for either complex long double multiply or complex long double
18122 divide when long double is IEEE 128-bit floating point. We can't use
18123 __multc3 and __divtc3 because the original long double using IBM extended
18124 double used those names. The complex multiply/divide functions are encoded
18125 as builtin functions with a complex result and 4 scalar inputs. */
18126
18127 static void
18128 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
18129 {
18130 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
18131 name, NULL_TREE);
18132
18133 set_builtin_decl (fncode, fndecl, true);
18134
18135 if (TARGET_DEBUG_BUILTIN)
18136 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
18137
18138 return;
18139 }
18140
18141 /* Set up IEEE 128-bit floating point routines. Use different names if the
18142 arguments can be passed in a vector register. The historical PowerPC
18143 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18144 continue to use that if we aren't using vector registers to pass IEEE
18145 128-bit floating point. */
18146
18147 static void
18148 init_float128_ieee (machine_mode mode)
18149 {
18150 if (FLOAT128_VECTOR_P (mode))
18151 {
18152 static bool complex_muldiv_init_p = false;
18153
18154 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
18155 we have clone or target attributes, this will be called a second
18156 time. We want to create the built-in function only once. */
18157 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
18158 {
18159 complex_muldiv_init_p = true;
18160 built_in_function fncode_mul =
18161 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
18162 - MIN_MODE_COMPLEX_FLOAT);
18163 built_in_function fncode_div =
18164 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
18165 - MIN_MODE_COMPLEX_FLOAT);
18166
18167 tree fntype = build_function_type_list (complex_long_double_type_node,
18168 long_double_type_node,
18169 long_double_type_node,
18170 long_double_type_node,
18171 long_double_type_node,
18172 NULL_TREE);
18173
18174 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
18175 create_complex_muldiv ("__divkc3", fncode_div, fntype);
18176 }
18177
18178 set_optab_libfunc (add_optab, mode, "__addkf3");
18179 set_optab_libfunc (sub_optab, mode, "__subkf3");
18180 set_optab_libfunc (neg_optab, mode, "__negkf2");
18181 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18182 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18183 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18184 set_optab_libfunc (abs_optab, mode, "__abskf2");
18185 set_optab_libfunc (powi_optab, mode, "__powikf2");
18186
18187 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18188 set_optab_libfunc (ne_optab, mode, "__nekf2");
18189 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18190 set_optab_libfunc (ge_optab, mode, "__gekf2");
18191 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18192 set_optab_libfunc (le_optab, mode, "__lekf2");
18193 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18194
18195 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18196 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18197 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18198 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18199
18200 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
18201 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18202 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
18203
18204 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
18205 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18206 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
18207
18208 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
18209 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
18210 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
18211 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
18212 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
18213 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
18214
18215 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18216 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18217 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18218 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18219
18220 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18221 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18222 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18223 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18224
18225 if (TARGET_POWERPC64)
18226 {
18227 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18228 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18229 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18230 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18231 }
18232 }
18233
18234 else
18235 {
18236 set_optab_libfunc (add_optab, mode, "_q_add");
18237 set_optab_libfunc (sub_optab, mode, "_q_sub");
18238 set_optab_libfunc (neg_optab, mode, "_q_neg");
18239 set_optab_libfunc (smul_optab, mode, "_q_mul");
18240 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18241 if (TARGET_PPC_GPOPT)
18242 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18243
18244 set_optab_libfunc (eq_optab, mode, "_q_feq");
18245 set_optab_libfunc (ne_optab, mode, "_q_fne");
18246 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18247 set_optab_libfunc (ge_optab, mode, "_q_fge");
18248 set_optab_libfunc (lt_optab, mode, "_q_flt");
18249 set_optab_libfunc (le_optab, mode, "_q_fle");
18250
18251 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18252 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18253 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18254 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18255 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18256 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18257 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18258 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18259 }
18260 }
18261
18262 static void
18263 rs6000_init_libfuncs (void)
18264 {
18265 /* __float128 support. */
18266 if (TARGET_FLOAT128_TYPE)
18267 {
18268 init_float128_ibm (IFmode);
18269 init_float128_ieee (KFmode);
18270 }
18271
18272 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18273 if (TARGET_LONG_DOUBLE_128)
18274 {
18275 if (!TARGET_IEEEQUAD)
18276 init_float128_ibm (TFmode);
18277
18278 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18279 else
18280 init_float128_ieee (TFmode);
18281 }
18282 }
18283
18284 /* Emit a potentially record-form instruction, setting DST from SRC.
18285 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18286 signed comparison of DST with zero. If DOT is 1, the generated RTL
18287 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18288 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18289 a separate COMPARE. */
18290
18291 void
18292 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18293 {
18294 if (dot == 0)
18295 {
18296 emit_move_insn (dst, src);
18297 return;
18298 }
18299
18300 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18301 {
18302 emit_move_insn (dst, src);
18303 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18304 return;
18305 }
18306
18307 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18308 if (dot == 1)
18309 {
18310 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18311 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18312 }
18313 else
18314 {
18315 rtx set = gen_rtx_SET (dst, src);
18316 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18317 }
18318 }
18319
18320 \f
18321 /* A validation routine: say whether CODE, a condition code, and MODE
18322 match. The other alternatives either don't make sense or should
18323 never be generated. */
18324
18325 void
18326 validate_condition_mode (enum rtx_code code, machine_mode mode)
18327 {
18328 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18329 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18330 && GET_MODE_CLASS (mode) == MODE_CC);
18331
18332 /* These don't make sense. */
18333 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18334 || mode != CCUNSmode);
18335
18336 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18337 || mode == CCUNSmode);
18338
18339 gcc_assert (mode == CCFPmode
18340 || (code != ORDERED && code != UNORDERED
18341 && code != UNEQ && code != LTGT
18342 && code != UNGT && code != UNLT
18343 && code != UNGE && code != UNLE));
18344
18345 /* These should never be generated except for
18346 flag_finite_math_only. */
18347 gcc_assert (mode != CCFPmode
18348 || flag_finite_math_only
18349 || (code != LE && code != GE
18350 && code != UNEQ && code != LTGT
18351 && code != UNGT && code != UNLT));
18352
18353 /* These are invalid; the information is not there. */
18354 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18355 }
18356
18357 \f
18358 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18359 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18360 not zero, store there the bit offset (counted from the right) where
18361 the single stretch of 1 bits begins; and similarly for B, the bit
18362 offset where it ends. */
18363
18364 bool
18365 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18366 {
18367 unsigned HOST_WIDE_INT val = INTVAL (mask);
18368 unsigned HOST_WIDE_INT bit;
18369 int nb, ne;
18370 int n = GET_MODE_PRECISION (mode);
18371
18372 if (mode != DImode && mode != SImode)
18373 return false;
18374
18375 if (INTVAL (mask) >= 0)
18376 {
18377 bit = val & -val;
18378 ne = exact_log2 (bit);
18379 nb = exact_log2 (val + bit);
18380 }
18381 else if (val + 1 == 0)
18382 {
18383 nb = n;
18384 ne = 0;
18385 }
18386 else if (val & 1)
18387 {
18388 val = ~val;
18389 bit = val & -val;
18390 nb = exact_log2 (bit);
18391 ne = exact_log2 (val + bit);
18392 }
18393 else
18394 {
18395 bit = val & -val;
18396 ne = exact_log2 (bit);
18397 if (val + bit == 0)
18398 nb = n;
18399 else
18400 nb = 0;
18401 }
18402
18403 nb--;
18404
18405 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18406 return false;
18407
18408 if (b)
18409 *b = nb;
18410 if (e)
18411 *e = ne;
18412
18413 return true;
18414 }
18415
18416 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18417 or rldicr instruction, to implement an AND with it in mode MODE. */
18418
18419 bool
18420 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18421 {
18422 int nb, ne;
18423
18424 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18425 return false;
18426
18427 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18428 does not wrap. */
18429 if (mode == DImode)
18430 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18431
18432 /* For SImode, rlwinm can do everything. */
18433 if (mode == SImode)
18434 return (nb < 32 && ne < 32);
18435
18436 return false;
18437 }
18438
18439 /* Return the instruction template for an AND with mask in mode MODE, with
18440 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18441
18442 const char *
18443 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18444 {
18445 int nb, ne;
18446
18447 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18448 gcc_unreachable ();
18449
18450 if (mode == DImode && ne == 0)
18451 {
18452 operands[3] = GEN_INT (63 - nb);
18453 if (dot)
18454 return "rldicl. %0,%1,0,%3";
18455 return "rldicl %0,%1,0,%3";
18456 }
18457
18458 if (mode == DImode && nb == 63)
18459 {
18460 operands[3] = GEN_INT (63 - ne);
18461 if (dot)
18462 return "rldicr. %0,%1,0,%3";
18463 return "rldicr %0,%1,0,%3";
18464 }
18465
18466 if (nb < 32 && ne < 32)
18467 {
18468 operands[3] = GEN_INT (31 - nb);
18469 operands[4] = GEN_INT (31 - ne);
18470 if (dot)
18471 return "rlwinm. %0,%1,0,%3,%4";
18472 return "rlwinm %0,%1,0,%3,%4";
18473 }
18474
18475 gcc_unreachable ();
18476 }
18477
18478 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18479 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18480 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18481
18482 bool
18483 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18484 {
18485 int nb, ne;
18486
18487 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18488 return false;
18489
18490 int n = GET_MODE_PRECISION (mode);
18491 int sh = -1;
18492
18493 if (CONST_INT_P (XEXP (shift, 1)))
18494 {
18495 sh = INTVAL (XEXP (shift, 1));
18496 if (sh < 0 || sh >= n)
18497 return false;
18498 }
18499
18500 rtx_code code = GET_CODE (shift);
18501
18502 /* Convert any shift by 0 to a rotate, to simplify below code. */
18503 if (sh == 0)
18504 code = ROTATE;
18505
18506 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18507 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18508 code = ASHIFT;
18509 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18510 {
18511 code = LSHIFTRT;
18512 sh = n - sh;
18513 }
18514
18515 /* DImode rotates need rld*. */
18516 if (mode == DImode && code == ROTATE)
18517 return (nb == 63 || ne == 0 || ne == sh);
18518
18519 /* SImode rotates need rlw*. */
18520 if (mode == SImode && code == ROTATE)
18521 return (nb < 32 && ne < 32 && sh < 32);
18522
18523 /* Wrap-around masks are only okay for rotates. */
18524 if (ne > nb)
18525 return false;
18526
18527 /* Variable shifts are only okay for rotates. */
18528 if (sh < 0)
18529 return false;
18530
18531 /* Don't allow ASHIFT if the mask is wrong for that. */
18532 if (code == ASHIFT && ne < sh)
18533 return false;
18534
18535 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18536 if the mask is wrong for that. */
18537 if (nb < 32 && ne < 32 && sh < 32
18538 && !(code == LSHIFTRT && nb >= 32 - sh))
18539 return true;
18540
18541 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18542 if the mask is wrong for that. */
18543 if (code == LSHIFTRT)
18544 sh = 64 - sh;
18545 if (nb == 63 || ne == 0 || ne == sh)
18546 return !(code == LSHIFTRT && nb >= sh);
18547
18548 return false;
18549 }
18550
18551 /* Return the instruction template for a shift with mask in mode MODE, with
18552 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18553
18554 const char *
18555 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18556 {
18557 int nb, ne;
18558
18559 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18560 gcc_unreachable ();
18561
18562 if (mode == DImode && ne == 0)
18563 {
18564 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18565 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18566 operands[3] = GEN_INT (63 - nb);
18567 if (dot)
18568 return "rld%I2cl. %0,%1,%2,%3";
18569 return "rld%I2cl %0,%1,%2,%3";
18570 }
18571
18572 if (mode == DImode && nb == 63)
18573 {
18574 operands[3] = GEN_INT (63 - ne);
18575 if (dot)
18576 return "rld%I2cr. %0,%1,%2,%3";
18577 return "rld%I2cr %0,%1,%2,%3";
18578 }
18579
18580 if (mode == DImode
18581 && GET_CODE (operands[4]) != LSHIFTRT
18582 && CONST_INT_P (operands[2])
18583 && ne == INTVAL (operands[2]))
18584 {
18585 operands[3] = GEN_INT (63 - nb);
18586 if (dot)
18587 return "rld%I2c. %0,%1,%2,%3";
18588 return "rld%I2c %0,%1,%2,%3";
18589 }
18590
18591 if (nb < 32 && ne < 32)
18592 {
18593 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18594 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18595 operands[3] = GEN_INT (31 - nb);
18596 operands[4] = GEN_INT (31 - ne);
18597 /* This insn can also be a 64-bit rotate with mask that really makes
18598 it just a shift right (with mask); the %h below are to adjust for
18599 that situation (shift count is >= 32 in that case). */
18600 if (dot)
18601 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18602 return "rlw%I2nm %0,%1,%h2,%3,%4";
18603 }
18604
18605 gcc_unreachable ();
18606 }
18607
18608 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18609 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18610 ASHIFT, or LSHIFTRT) in mode MODE. */
18611
18612 bool
18613 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18614 {
18615 int nb, ne;
18616
18617 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18618 return false;
18619
18620 int n = GET_MODE_PRECISION (mode);
18621
18622 int sh = INTVAL (XEXP (shift, 1));
18623 if (sh < 0 || sh >= n)
18624 return false;
18625
18626 rtx_code code = GET_CODE (shift);
18627
18628 /* Convert any shift by 0 to a rotate, to simplify below code. */
18629 if (sh == 0)
18630 code = ROTATE;
18631
18632 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18633 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18634 code = ASHIFT;
18635 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18636 {
18637 code = LSHIFTRT;
18638 sh = n - sh;
18639 }
18640
18641 /* DImode rotates need rldimi. */
18642 if (mode == DImode && code == ROTATE)
18643 return (ne == sh);
18644
18645 /* SImode rotates need rlwimi. */
18646 if (mode == SImode && code == ROTATE)
18647 return (nb < 32 && ne < 32 && sh < 32);
18648
18649 /* Wrap-around masks are only okay for rotates. */
18650 if (ne > nb)
18651 return false;
18652
18653 /* Don't allow ASHIFT if the mask is wrong for that. */
18654 if (code == ASHIFT && ne < sh)
18655 return false;
18656
18657 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18658 if the mask is wrong for that. */
18659 if (nb < 32 && ne < 32 && sh < 32
18660 && !(code == LSHIFTRT && nb >= 32 - sh))
18661 return true;
18662
18663 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18664 if the mask is wrong for that. */
18665 if (code == LSHIFTRT)
18666 sh = 64 - sh;
18667 if (ne == sh)
18668 return !(code == LSHIFTRT && nb >= sh);
18669
18670 return false;
18671 }
18672
18673 /* Return the instruction template for an insert with mask in mode MODE, with
18674 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18675
18676 const char *
18677 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18678 {
18679 int nb, ne;
18680
18681 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18682 gcc_unreachable ();
18683
18684 /* Prefer rldimi because rlwimi is cracked. */
18685 if (TARGET_POWERPC64
18686 && (!dot || mode == DImode)
18687 && GET_CODE (operands[4]) != LSHIFTRT
18688 && ne == INTVAL (operands[2]))
18689 {
18690 operands[3] = GEN_INT (63 - nb);
18691 if (dot)
18692 return "rldimi. %0,%1,%2,%3";
18693 return "rldimi %0,%1,%2,%3";
18694 }
18695
18696 if (nb < 32 && ne < 32)
18697 {
18698 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18699 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18700 operands[3] = GEN_INT (31 - nb);
18701 operands[4] = GEN_INT (31 - ne);
18702 if (dot)
18703 return "rlwimi. %0,%1,%2,%3,%4";
18704 return "rlwimi %0,%1,%2,%3,%4";
18705 }
18706
18707 gcc_unreachable ();
18708 }
18709
18710 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18711 using two machine instructions. */
18712
18713 bool
18714 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18715 {
18716 /* There are two kinds of AND we can handle with two insns:
18717 1) those we can do with two rl* insn;
18718 2) ori[s];xori[s].
18719
18720 We do not handle that last case yet. */
18721
18722 /* If there is just one stretch of ones, we can do it. */
18723 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18724 return true;
18725
18726 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18727 one insn, we can do the whole thing with two. */
18728 unsigned HOST_WIDE_INT val = INTVAL (c);
18729 unsigned HOST_WIDE_INT bit1 = val & -val;
18730 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18731 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18732 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18733 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18734 }
18735
18736 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18737 If EXPAND is true, split rotate-and-mask instructions we generate to
18738 their constituent parts as well (this is used during expand); if DOT
18739 is 1, make the last insn a record-form instruction clobbering the
18740 destination GPR and setting the CC reg (from operands[3]); if 2, set
18741 that GPR as well as the CC reg. */
18742
18743 void
18744 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18745 {
18746 gcc_assert (!(expand && dot));
18747
18748 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18749
18750 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18751 shift right. This generates better code than doing the masks without
18752 shifts, or shifting first right and then left. */
18753 int nb, ne;
18754 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18755 {
18756 gcc_assert (mode == DImode);
18757
18758 int shift = 63 - nb;
18759 if (expand)
18760 {
18761 rtx tmp1 = gen_reg_rtx (DImode);
18762 rtx tmp2 = gen_reg_rtx (DImode);
18763 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18764 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18765 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18766 }
18767 else
18768 {
18769 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18770 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18771 emit_move_insn (operands[0], tmp);
18772 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18773 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18774 }
18775 return;
18776 }
18777
18778 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18779 that does the rest. */
18780 unsigned HOST_WIDE_INT bit1 = val & -val;
18781 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18782 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18783 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18784
18785 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18786 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18787
18788 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18789
18790 /* Two "no-rotate"-and-mask instructions, for SImode. */
18791 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18792 {
18793 gcc_assert (mode == SImode);
18794
18795 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18796 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18797 emit_move_insn (reg, tmp);
18798 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18799 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18800 return;
18801 }
18802
18803 gcc_assert (mode == DImode);
18804
18805 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18806 insns; we have to do the first in SImode, because it wraps. */
18807 if (mask2 <= 0xffffffff
18808 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18809 {
18810 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18811 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18812 GEN_INT (mask1));
18813 rtx reg_low = gen_lowpart (SImode, reg);
18814 emit_move_insn (reg_low, tmp);
18815 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18816 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18817 return;
18818 }
18819
18820 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18821 at the top end), rotate back and clear the other hole. */
18822 int right = exact_log2 (bit3);
18823 int left = 64 - right;
18824
18825 /* Rotate the mask too. */
18826 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18827
18828 if (expand)
18829 {
18830 rtx tmp1 = gen_reg_rtx (DImode);
18831 rtx tmp2 = gen_reg_rtx (DImode);
18832 rtx tmp3 = gen_reg_rtx (DImode);
18833 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18834 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18835 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18836 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18837 }
18838 else
18839 {
18840 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18841 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18842 emit_move_insn (operands[0], tmp);
18843 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18844 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18845 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18846 }
18847 }
18848 \f
18849 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18850 for lfq and stfq insns iff the registers are hard registers. */
18851
18852 int
18853 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18854 {
18855 /* We might have been passed a SUBREG. */
18856 if (!REG_P (reg1) || !REG_P (reg2))
18857 return 0;
18858
18859 /* We might have been passed non floating point registers. */
18860 if (!FP_REGNO_P (REGNO (reg1))
18861 || !FP_REGNO_P (REGNO (reg2)))
18862 return 0;
18863
18864 return (REGNO (reg1) == REGNO (reg2) - 1);
18865 }
18866
18867 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18868 addr1 and addr2 must be in consecutive memory locations
18869 (addr2 == addr1 + 8). */
18870
18871 int
18872 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18873 {
18874 rtx addr1, addr2;
18875 unsigned int reg1, reg2;
18876 int offset1, offset2;
18877
18878 /* The mems cannot be volatile. */
18879 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18880 return 0;
18881
18882 addr1 = XEXP (mem1, 0);
18883 addr2 = XEXP (mem2, 0);
18884
18885 /* Extract an offset (if used) from the first addr. */
18886 if (GET_CODE (addr1) == PLUS)
18887 {
18888 /* If not a REG, return zero. */
18889 if (!REG_P (XEXP (addr1, 0)))
18890 return 0;
18891 else
18892 {
18893 reg1 = REGNO (XEXP (addr1, 0));
18894 /* The offset must be constant! */
18895 if (!CONST_INT_P (XEXP (addr1, 1)))
18896 return 0;
18897 offset1 = INTVAL (XEXP (addr1, 1));
18898 }
18899 }
18900 else if (!REG_P (addr1))
18901 return 0;
18902 else
18903 {
18904 reg1 = REGNO (addr1);
18905 /* This was a simple (mem (reg)) expression. Offset is 0. */
18906 offset1 = 0;
18907 }
18908
18909 /* And now for the second addr. */
18910 if (GET_CODE (addr2) == PLUS)
18911 {
18912 /* If not a REG, return zero. */
18913 if (!REG_P (XEXP (addr2, 0)))
18914 return 0;
18915 else
18916 {
18917 reg2 = REGNO (XEXP (addr2, 0));
18918 /* The offset must be constant. */
18919 if (!CONST_INT_P (XEXP (addr2, 1)))
18920 return 0;
18921 offset2 = INTVAL (XEXP (addr2, 1));
18922 }
18923 }
18924 else if (!REG_P (addr2))
18925 return 0;
18926 else
18927 {
18928 reg2 = REGNO (addr2);
18929 /* This was a simple (mem (reg)) expression. Offset is 0. */
18930 offset2 = 0;
18931 }
18932
18933 /* Both of these must have the same base register. */
18934 if (reg1 != reg2)
18935 return 0;
18936
18937 /* The offset for the second addr must be 8 more than the first addr. */
18938 if (offset2 != offset1 + 8)
18939 return 0;
18940
18941 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18942 instructions. */
18943 return 1;
18944 }
18945 \f
18946 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18947 need to use DDmode, in all other cases we can use the same mode. */
18948 static machine_mode
18949 rs6000_secondary_memory_needed_mode (machine_mode mode)
18950 {
18951 if (lra_in_progress && mode == SDmode)
18952 return DDmode;
18953 return mode;
18954 }
18955
18956 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18957 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18958 only work on the traditional altivec registers, note if an altivec register
18959 was chosen. */
18960
18961 static enum rs6000_reg_type
18962 register_to_reg_type (rtx reg, bool *is_altivec)
18963 {
18964 HOST_WIDE_INT regno;
18965 enum reg_class rclass;
18966
18967 if (SUBREG_P (reg))
18968 reg = SUBREG_REG (reg);
18969
18970 if (!REG_P (reg))
18971 return NO_REG_TYPE;
18972
18973 regno = REGNO (reg);
18974 if (!HARD_REGISTER_NUM_P (regno))
18975 {
18976 if (!lra_in_progress && !reload_completed)
18977 return PSEUDO_REG_TYPE;
18978
18979 regno = true_regnum (reg);
18980 if (regno < 0 || !HARD_REGISTER_NUM_P (regno))
18981 return PSEUDO_REG_TYPE;
18982 }
18983
18984 gcc_assert (regno >= 0);
18985
18986 if (is_altivec && ALTIVEC_REGNO_P (regno))
18987 *is_altivec = true;
18988
18989 rclass = rs6000_regno_regclass[regno];
18990 return reg_class_to_reg_type[(int)rclass];
18991 }
18992
18993 /* Helper function to return the cost of adding a TOC entry address. */
18994
18995 static inline int
18996 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18997 {
18998 int ret;
18999
19000 if (TARGET_CMODEL != CMODEL_SMALL)
19001 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19002
19003 else
19004 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19005
19006 return ret;
19007 }
19008
19009 /* Helper function for rs6000_secondary_reload to determine whether the memory
19010 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19011 needs reloading. Return negative if the memory is not handled by the memory
19012 helper functions and to try a different reload method, 0 if no additional
19013 instructions are need, and positive to give the extra cost for the
19014 memory. */
19015
19016 static int
19017 rs6000_secondary_reload_memory (rtx addr,
19018 enum reg_class rclass,
19019 machine_mode mode)
19020 {
19021 int extra_cost = 0;
19022 rtx reg, and_arg, plus_arg0, plus_arg1;
19023 addr_mask_type addr_mask;
19024 const char *type = NULL;
19025 const char *fail_msg = NULL;
19026
19027 if (GPR_REG_CLASS_P (rclass))
19028 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19029
19030 else if (rclass == FLOAT_REGS)
19031 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19032
19033 else if (rclass == ALTIVEC_REGS)
19034 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19035
19036 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19037 else if (rclass == VSX_REGS)
19038 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19039 & ~RELOAD_REG_AND_M16);
19040
19041 /* If the register allocator hasn't made up its mind yet on the register
19042 class to use, settle on defaults to use. */
19043 else if (rclass == NO_REGS)
19044 {
19045 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19046 & ~RELOAD_REG_AND_M16);
19047
19048 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19049 addr_mask &= ~(RELOAD_REG_INDEXED
19050 | RELOAD_REG_PRE_INCDEC
19051 | RELOAD_REG_PRE_MODIFY);
19052 }
19053
19054 else
19055 addr_mask = 0;
19056
19057 /* If the register isn't valid in this register class, just return now. */
19058 if ((addr_mask & RELOAD_REG_VALID) == 0)
19059 {
19060 if (TARGET_DEBUG_ADDR)
19061 {
19062 fprintf (stderr,
19063 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19064 "not valid in class\n",
19065 GET_MODE_NAME (mode), reg_class_names[rclass]);
19066 debug_rtx (addr);
19067 }
19068
19069 return -1;
19070 }
19071
19072 switch (GET_CODE (addr))
19073 {
19074 /* Does the register class supports auto update forms for this mode? We
19075 don't need a scratch register, since the powerpc only supports
19076 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19077 case PRE_INC:
19078 case PRE_DEC:
19079 reg = XEXP (addr, 0);
19080 if (!base_reg_operand (addr, GET_MODE (reg)))
19081 {
19082 fail_msg = "no base register #1";
19083 extra_cost = -1;
19084 }
19085
19086 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19087 {
19088 extra_cost = 1;
19089 type = "update";
19090 }
19091 break;
19092
19093 case PRE_MODIFY:
19094 reg = XEXP (addr, 0);
19095 plus_arg1 = XEXP (addr, 1);
19096 if (!base_reg_operand (reg, GET_MODE (reg))
19097 || GET_CODE (plus_arg1) != PLUS
19098 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19099 {
19100 fail_msg = "bad PRE_MODIFY";
19101 extra_cost = -1;
19102 }
19103
19104 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19105 {
19106 extra_cost = 1;
19107 type = "update";
19108 }
19109 break;
19110
19111 /* Do we need to simulate AND -16 to clear the bottom address bits used
19112 in VMX load/stores? Only allow the AND for vector sizes. */
19113 case AND:
19114 and_arg = XEXP (addr, 0);
19115 if (GET_MODE_SIZE (mode) != 16
19116 || !CONST_INT_P (XEXP (addr, 1))
19117 || INTVAL (XEXP (addr, 1)) != -16)
19118 {
19119 fail_msg = "bad Altivec AND #1";
19120 extra_cost = -1;
19121 }
19122
19123 if (rclass != ALTIVEC_REGS)
19124 {
19125 if (legitimate_indirect_address_p (and_arg, false))
19126 extra_cost = 1;
19127
19128 else if (legitimate_indexed_address_p (and_arg, false))
19129 extra_cost = 2;
19130
19131 else
19132 {
19133 fail_msg = "bad Altivec AND #2";
19134 extra_cost = -1;
19135 }
19136
19137 type = "and";
19138 }
19139 break;
19140
19141 /* If this is an indirect address, make sure it is a base register. */
19142 case REG:
19143 case SUBREG:
19144 if (!legitimate_indirect_address_p (addr, false))
19145 {
19146 extra_cost = 1;
19147 type = "move";
19148 }
19149 break;
19150
19151 /* If this is an indexed address, make sure the register class can handle
19152 indexed addresses for this mode. */
19153 case PLUS:
19154 plus_arg0 = XEXP (addr, 0);
19155 plus_arg1 = XEXP (addr, 1);
19156
19157 /* (plus (plus (reg) (constant)) (constant)) is generated during
19158 push_reload processing, so handle it now. */
19159 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19160 {
19161 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19162 {
19163 extra_cost = 1;
19164 type = "offset";
19165 }
19166 }
19167
19168 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19169 push_reload processing, so handle it now. */
19170 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19171 {
19172 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19173 {
19174 extra_cost = 1;
19175 type = "indexed #2";
19176 }
19177 }
19178
19179 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19180 {
19181 fail_msg = "no base register #2";
19182 extra_cost = -1;
19183 }
19184
19185 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19186 {
19187 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19188 || !legitimate_indexed_address_p (addr, false))
19189 {
19190 extra_cost = 1;
19191 type = "indexed";
19192 }
19193 }
19194
19195 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19196 && CONST_INT_P (plus_arg1))
19197 {
19198 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19199 {
19200 extra_cost = 1;
19201 type = "vector d-form offset";
19202 }
19203 }
19204
19205 /* Make sure the register class can handle offset addresses. */
19206 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19207 {
19208 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19209 {
19210 extra_cost = 1;
19211 type = "offset #2";
19212 }
19213 }
19214
19215 else
19216 {
19217 fail_msg = "bad PLUS";
19218 extra_cost = -1;
19219 }
19220
19221 break;
19222
19223 case LO_SUM:
19224 /* Quad offsets are restricted and can't handle normal addresses. */
19225 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19226 {
19227 extra_cost = -1;
19228 type = "vector d-form lo_sum";
19229 }
19230
19231 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19232 {
19233 fail_msg = "bad LO_SUM";
19234 extra_cost = -1;
19235 }
19236
19237 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19238 {
19239 extra_cost = 1;
19240 type = "lo_sum";
19241 }
19242 break;
19243
19244 /* Static addresses need to create a TOC entry. */
19245 case CONST:
19246 case SYMBOL_REF:
19247 case LABEL_REF:
19248 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19249 {
19250 extra_cost = -1;
19251 type = "vector d-form lo_sum #2";
19252 }
19253
19254 else
19255 {
19256 type = "address";
19257 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19258 }
19259 break;
19260
19261 /* TOC references look like offsetable memory. */
19262 case UNSPEC:
19263 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19264 {
19265 fail_msg = "bad UNSPEC";
19266 extra_cost = -1;
19267 }
19268
19269 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19270 {
19271 extra_cost = -1;
19272 type = "vector d-form lo_sum #3";
19273 }
19274
19275 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19276 {
19277 extra_cost = 1;
19278 type = "toc reference";
19279 }
19280 break;
19281
19282 default:
19283 {
19284 fail_msg = "bad address";
19285 extra_cost = -1;
19286 }
19287 }
19288
19289 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19290 {
19291 if (extra_cost < 0)
19292 fprintf (stderr,
19293 "rs6000_secondary_reload_memory error: mode = %s, "
19294 "class = %s, addr_mask = '%s', %s\n",
19295 GET_MODE_NAME (mode),
19296 reg_class_names[rclass],
19297 rs6000_debug_addr_mask (addr_mask, false),
19298 (fail_msg != NULL) ? fail_msg : "<bad address>");
19299
19300 else
19301 fprintf (stderr,
19302 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19303 "addr_mask = '%s', extra cost = %d, %s\n",
19304 GET_MODE_NAME (mode),
19305 reg_class_names[rclass],
19306 rs6000_debug_addr_mask (addr_mask, false),
19307 extra_cost,
19308 (type) ? type : "<none>");
19309
19310 debug_rtx (addr);
19311 }
19312
19313 return extra_cost;
19314 }
19315
19316 /* Helper function for rs6000_secondary_reload to return true if a move to a
19317 different register classe is really a simple move. */
19318
19319 static bool
19320 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19321 enum rs6000_reg_type from_type,
19322 machine_mode mode)
19323 {
19324 int size = GET_MODE_SIZE (mode);
19325
19326 /* Add support for various direct moves available. In this function, we only
19327 look at cases where we don't need any extra registers, and one or more
19328 simple move insns are issued. Originally small integers are not allowed
19329 in FPR/VSX registers. Single precision binary floating is not a simple
19330 move because we need to convert to the single precision memory layout.
19331 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19332 need special direct move handling, which we do not support yet. */
19333 if (TARGET_DIRECT_MOVE
19334 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19335 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19336 {
19337 if (TARGET_POWERPC64)
19338 {
19339 /* ISA 2.07: MTVSRD or MVFVSRD. */
19340 if (size == 8)
19341 return true;
19342
19343 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19344 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19345 return true;
19346 }
19347
19348 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19349 if (TARGET_P8_VECTOR)
19350 {
19351 if (mode == SImode)
19352 return true;
19353
19354 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19355 return true;
19356 }
19357
19358 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19359 if (mode == SDmode)
19360 return true;
19361 }
19362
19363 /* Power6+: MFTGPR or MFFGPR. */
19364 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19365 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19366 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19367 return true;
19368
19369 /* Move to/from SPR. */
19370 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19371 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19372 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19373 return true;
19374
19375 return false;
19376 }
19377
19378 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19379 special direct moves that involve allocating an extra register, return the
19380 insn code of the helper function if there is such a function or
19381 CODE_FOR_nothing if not. */
19382
19383 static bool
19384 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19385 enum rs6000_reg_type from_type,
19386 machine_mode mode,
19387 secondary_reload_info *sri,
19388 bool altivec_p)
19389 {
19390 bool ret = false;
19391 enum insn_code icode = CODE_FOR_nothing;
19392 int cost = 0;
19393 int size = GET_MODE_SIZE (mode);
19394
19395 if (TARGET_POWERPC64 && size == 16)
19396 {
19397 /* Handle moving 128-bit values from GPRs to VSX point registers on
19398 ISA 2.07 (power8, power9) when running in 64-bit mode using
19399 XXPERMDI to glue the two 64-bit values back together. */
19400 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19401 {
19402 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19403 icode = reg_addr[mode].reload_vsx_gpr;
19404 }
19405
19406 /* Handle moving 128-bit values from VSX point registers to GPRs on
19407 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19408 bottom 64-bit value. */
19409 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19410 {
19411 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19412 icode = reg_addr[mode].reload_gpr_vsx;
19413 }
19414 }
19415
19416 else if (TARGET_POWERPC64 && mode == SFmode)
19417 {
19418 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19419 {
19420 cost = 3; /* xscvdpspn, mfvsrd, and. */
19421 icode = reg_addr[mode].reload_gpr_vsx;
19422 }
19423
19424 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19425 {
19426 cost = 2; /* mtvsrz, xscvspdpn. */
19427 icode = reg_addr[mode].reload_vsx_gpr;
19428 }
19429 }
19430
19431 else if (!TARGET_POWERPC64 && size == 8)
19432 {
19433 /* Handle moving 64-bit values from GPRs to floating point registers on
19434 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19435 32-bit values back together. Altivec register classes must be handled
19436 specially since a different instruction is used, and the secondary
19437 reload support requires a single instruction class in the scratch
19438 register constraint. However, right now TFmode is not allowed in
19439 Altivec registers, so the pattern will never match. */
19440 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19441 {
19442 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19443 icode = reg_addr[mode].reload_fpr_gpr;
19444 }
19445 }
19446
19447 if (icode != CODE_FOR_nothing)
19448 {
19449 ret = true;
19450 if (sri)
19451 {
19452 sri->icode = icode;
19453 sri->extra_cost = cost;
19454 }
19455 }
19456
19457 return ret;
19458 }
19459
19460 /* Return whether a move between two register classes can be done either
19461 directly (simple move) or via a pattern that uses a single extra temporary
19462 (using ISA 2.07's direct move in this case. */
19463
19464 static bool
19465 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19466 enum rs6000_reg_type from_type,
19467 machine_mode mode,
19468 secondary_reload_info *sri,
19469 bool altivec_p)
19470 {
19471 /* Fall back to load/store reloads if either type is not a register. */
19472 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19473 return false;
19474
19475 /* If we haven't allocated registers yet, assume the move can be done for the
19476 standard register types. */
19477 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19478 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19479 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19480 return true;
19481
19482 /* Moves to the same set of registers is a simple move for non-specialized
19483 registers. */
19484 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19485 return true;
19486
19487 /* Check whether a simple move can be done directly. */
19488 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19489 {
19490 if (sri)
19491 {
19492 sri->icode = CODE_FOR_nothing;
19493 sri->extra_cost = 0;
19494 }
19495 return true;
19496 }
19497
19498 /* Now check if we can do it in a few steps. */
19499 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19500 altivec_p);
19501 }
19502
19503 /* Inform reload about cases where moving X with a mode MODE to a register in
19504 RCLASS requires an extra scratch or immediate register. Return the class
19505 needed for the immediate register.
19506
19507 For VSX and Altivec, we may need a register to convert sp+offset into
19508 reg+sp.
19509
19510 For misaligned 64-bit gpr loads and stores we need a register to
19511 convert an offset address to indirect. */
19512
19513 static reg_class_t
19514 rs6000_secondary_reload (bool in_p,
19515 rtx x,
19516 reg_class_t rclass_i,
19517 machine_mode mode,
19518 secondary_reload_info *sri)
19519 {
19520 enum reg_class rclass = (enum reg_class) rclass_i;
19521 reg_class_t ret = ALL_REGS;
19522 enum insn_code icode;
19523 bool default_p = false;
19524 bool done_p = false;
19525
19526 /* Allow subreg of memory before/during reload. */
19527 bool memory_p = (MEM_P (x)
19528 || (!reload_completed && SUBREG_P (x)
19529 && MEM_P (SUBREG_REG (x))));
19530
19531 sri->icode = CODE_FOR_nothing;
19532 sri->t_icode = CODE_FOR_nothing;
19533 sri->extra_cost = 0;
19534 icode = ((in_p)
19535 ? reg_addr[mode].reload_load
19536 : reg_addr[mode].reload_store);
19537
19538 if (REG_P (x) || register_operand (x, mode))
19539 {
19540 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19541 bool altivec_p = (rclass == ALTIVEC_REGS);
19542 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19543
19544 if (!in_p)
19545 std::swap (to_type, from_type);
19546
19547 /* Can we do a direct move of some sort? */
19548 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19549 altivec_p))
19550 {
19551 icode = (enum insn_code)sri->icode;
19552 default_p = false;
19553 done_p = true;
19554 ret = NO_REGS;
19555 }
19556 }
19557
19558 /* Make sure 0.0 is not reloaded or forced into memory. */
19559 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19560 {
19561 ret = NO_REGS;
19562 default_p = false;
19563 done_p = true;
19564 }
19565
19566 /* If this is a scalar floating point value and we want to load it into the
19567 traditional Altivec registers, do it via a move via a traditional floating
19568 point register, unless we have D-form addressing. Also make sure that
19569 non-zero constants use a FPR. */
19570 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19571 && !mode_supports_vmx_dform (mode)
19572 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19573 && (memory_p || CONST_DOUBLE_P (x)))
19574 {
19575 ret = FLOAT_REGS;
19576 default_p = false;
19577 done_p = true;
19578 }
19579
19580 /* Handle reload of load/stores if we have reload helper functions. */
19581 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19582 {
19583 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19584 mode);
19585
19586 if (extra_cost >= 0)
19587 {
19588 done_p = true;
19589 ret = NO_REGS;
19590 if (extra_cost > 0)
19591 {
19592 sri->extra_cost = extra_cost;
19593 sri->icode = icode;
19594 }
19595 }
19596 }
19597
19598 /* Handle unaligned loads and stores of integer registers. */
19599 if (!done_p && TARGET_POWERPC64
19600 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19601 && memory_p
19602 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19603 {
19604 rtx addr = XEXP (x, 0);
19605 rtx off = address_offset (addr);
19606
19607 if (off != NULL_RTX)
19608 {
19609 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19610 unsigned HOST_WIDE_INT offset = INTVAL (off);
19611
19612 /* We need a secondary reload when our legitimate_address_p
19613 says the address is good (as otherwise the entire address
19614 will be reloaded), and the offset is not a multiple of
19615 four or we have an address wrap. Address wrap will only
19616 occur for LO_SUMs since legitimate_offset_address_p
19617 rejects addresses for 16-byte mems that will wrap. */
19618 if (GET_CODE (addr) == LO_SUM
19619 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19620 && ((offset & 3) != 0
19621 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19622 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19623 && (offset & 3) != 0))
19624 {
19625 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19626 if (in_p)
19627 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19628 : CODE_FOR_reload_di_load);
19629 else
19630 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19631 : CODE_FOR_reload_di_store);
19632 sri->extra_cost = 2;
19633 ret = NO_REGS;
19634 done_p = true;
19635 }
19636 else
19637 default_p = true;
19638 }
19639 else
19640 default_p = true;
19641 }
19642
19643 if (!done_p && !TARGET_POWERPC64
19644 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19645 && memory_p
19646 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19647 {
19648 rtx addr = XEXP (x, 0);
19649 rtx off = address_offset (addr);
19650
19651 if (off != NULL_RTX)
19652 {
19653 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19654 unsigned HOST_WIDE_INT offset = INTVAL (off);
19655
19656 /* We need a secondary reload when our legitimate_address_p
19657 says the address is good (as otherwise the entire address
19658 will be reloaded), and we have a wrap.
19659
19660 legitimate_lo_sum_address_p allows LO_SUM addresses to
19661 have any offset so test for wrap in the low 16 bits.
19662
19663 legitimate_offset_address_p checks for the range
19664 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19665 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19666 [0x7ff4,0x7fff] respectively, so test for the
19667 intersection of these ranges, [0x7ffc,0x7fff] and
19668 [0x7ff4,0x7ff7] respectively.
19669
19670 Note that the address we see here may have been
19671 manipulated by legitimize_reload_address. */
19672 if (GET_CODE (addr) == LO_SUM
19673 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19674 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19675 {
19676 if (in_p)
19677 sri->icode = CODE_FOR_reload_si_load;
19678 else
19679 sri->icode = CODE_FOR_reload_si_store;
19680 sri->extra_cost = 2;
19681 ret = NO_REGS;
19682 done_p = true;
19683 }
19684 else
19685 default_p = true;
19686 }
19687 else
19688 default_p = true;
19689 }
19690
19691 if (!done_p)
19692 default_p = true;
19693
19694 if (default_p)
19695 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19696
19697 gcc_assert (ret != ALL_REGS);
19698
19699 if (TARGET_DEBUG_ADDR)
19700 {
19701 fprintf (stderr,
19702 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19703 "mode = %s",
19704 reg_class_names[ret],
19705 in_p ? "true" : "false",
19706 reg_class_names[rclass],
19707 GET_MODE_NAME (mode));
19708
19709 if (reload_completed)
19710 fputs (", after reload", stderr);
19711
19712 if (!done_p)
19713 fputs (", done_p not set", stderr);
19714
19715 if (default_p)
19716 fputs (", default secondary reload", stderr);
19717
19718 if (sri->icode != CODE_FOR_nothing)
19719 fprintf (stderr, ", reload func = %s, extra cost = %d",
19720 insn_data[sri->icode].name, sri->extra_cost);
19721
19722 else if (sri->extra_cost > 0)
19723 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19724
19725 fputs ("\n", stderr);
19726 debug_rtx (x);
19727 }
19728
19729 return ret;
19730 }
19731
19732 /* Better tracing for rs6000_secondary_reload_inner. */
19733
19734 static void
19735 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19736 bool store_p)
19737 {
19738 rtx set, clobber;
19739
19740 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19741
19742 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19743 store_p ? "store" : "load");
19744
19745 if (store_p)
19746 set = gen_rtx_SET (mem, reg);
19747 else
19748 set = gen_rtx_SET (reg, mem);
19749
19750 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19751 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19752 }
19753
19754 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19755 ATTRIBUTE_NORETURN;
19756
19757 static void
19758 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19759 bool store_p)
19760 {
19761 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19762 gcc_unreachable ();
19763 }
19764
19765 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19766 reload helper functions. These were identified in
19767 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19768 reload, it calls the insns:
19769 reload_<RELOAD:mode>_<P:mptrsize>_store
19770 reload_<RELOAD:mode>_<P:mptrsize>_load
19771
19772 which in turn calls this function, to do whatever is necessary to create
19773 valid addresses. */
19774
19775 void
19776 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19777 {
19778 int regno = true_regnum (reg);
19779 machine_mode mode = GET_MODE (reg);
19780 addr_mask_type addr_mask;
19781 rtx addr;
19782 rtx new_addr;
19783 rtx op_reg, op0, op1;
19784 rtx and_op;
19785 rtx cc_clobber;
19786 rtvec rv;
19787
19788 if (regno < 0 || !HARD_REGISTER_NUM_P (regno) || !MEM_P (mem)
19789 || !base_reg_operand (scratch, GET_MODE (scratch)))
19790 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19791
19792 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19793 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19794
19795 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19796 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19797
19798 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19799 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19800
19801 else
19802 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19803
19804 /* Make sure the mode is valid in this register class. */
19805 if ((addr_mask & RELOAD_REG_VALID) == 0)
19806 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19807
19808 if (TARGET_DEBUG_ADDR)
19809 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19810
19811 new_addr = addr = XEXP (mem, 0);
19812 switch (GET_CODE (addr))
19813 {
19814 /* Does the register class support auto update forms for this mode? If
19815 not, do the update now. We don't need a scratch register, since the
19816 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19817 case PRE_INC:
19818 case PRE_DEC:
19819 op_reg = XEXP (addr, 0);
19820 if (!base_reg_operand (op_reg, Pmode))
19821 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19822
19823 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19824 {
19825 int delta = GET_MODE_SIZE (mode);
19826 if (GET_CODE (addr) == PRE_DEC)
19827 delta = -delta;
19828 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19829 new_addr = op_reg;
19830 }
19831 break;
19832
19833 case PRE_MODIFY:
19834 op0 = XEXP (addr, 0);
19835 op1 = XEXP (addr, 1);
19836 if (!base_reg_operand (op0, Pmode)
19837 || GET_CODE (op1) != PLUS
19838 || !rtx_equal_p (op0, XEXP (op1, 0)))
19839 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19840
19841 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19842 {
19843 emit_insn (gen_rtx_SET (op0, op1));
19844 new_addr = reg;
19845 }
19846 break;
19847
19848 /* Do we need to simulate AND -16 to clear the bottom address bits used
19849 in VMX load/stores? */
19850 case AND:
19851 op0 = XEXP (addr, 0);
19852 op1 = XEXP (addr, 1);
19853 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19854 {
19855 if (REG_P (op0) || SUBREG_P (op0))
19856 op_reg = op0;
19857
19858 else if (GET_CODE (op1) == PLUS)
19859 {
19860 emit_insn (gen_rtx_SET (scratch, op1));
19861 op_reg = scratch;
19862 }
19863
19864 else
19865 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19866
19867 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19868 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19869 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19870 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19871 new_addr = scratch;
19872 }
19873 break;
19874
19875 /* If this is an indirect address, make sure it is a base register. */
19876 case REG:
19877 case SUBREG:
19878 if (!base_reg_operand (addr, GET_MODE (addr)))
19879 {
19880 emit_insn (gen_rtx_SET (scratch, addr));
19881 new_addr = scratch;
19882 }
19883 break;
19884
19885 /* If this is an indexed address, make sure the register class can handle
19886 indexed addresses for this mode. */
19887 case PLUS:
19888 op0 = XEXP (addr, 0);
19889 op1 = XEXP (addr, 1);
19890 if (!base_reg_operand (op0, Pmode))
19891 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19892
19893 else if (int_reg_operand (op1, Pmode))
19894 {
19895 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19896 {
19897 emit_insn (gen_rtx_SET (scratch, addr));
19898 new_addr = scratch;
19899 }
19900 }
19901
19902 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19903 {
19904 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19905 || !quad_address_p (addr, mode, false))
19906 {
19907 emit_insn (gen_rtx_SET (scratch, addr));
19908 new_addr = scratch;
19909 }
19910 }
19911
19912 /* Make sure the register class can handle offset addresses. */
19913 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19914 {
19915 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19916 {
19917 emit_insn (gen_rtx_SET (scratch, addr));
19918 new_addr = scratch;
19919 }
19920 }
19921
19922 else
19923 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19924
19925 break;
19926
19927 case LO_SUM:
19928 op0 = XEXP (addr, 0);
19929 op1 = XEXP (addr, 1);
19930 if (!base_reg_operand (op0, Pmode))
19931 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19932
19933 else if (int_reg_operand (op1, Pmode))
19934 {
19935 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19936 {
19937 emit_insn (gen_rtx_SET (scratch, addr));
19938 new_addr = scratch;
19939 }
19940 }
19941
19942 /* Quad offsets are restricted and can't handle normal addresses. */
19943 else if (mode_supports_dq_form (mode))
19944 {
19945 emit_insn (gen_rtx_SET (scratch, addr));
19946 new_addr = scratch;
19947 }
19948
19949 /* Make sure the register class can handle offset addresses. */
19950 else if (legitimate_lo_sum_address_p (mode, addr, false))
19951 {
19952 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19953 {
19954 emit_insn (gen_rtx_SET (scratch, addr));
19955 new_addr = scratch;
19956 }
19957 }
19958
19959 else
19960 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19961
19962 break;
19963
19964 case SYMBOL_REF:
19965 case CONST:
19966 case LABEL_REF:
19967 rs6000_emit_move (scratch, addr, Pmode);
19968 new_addr = scratch;
19969 break;
19970
19971 default:
19972 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19973 }
19974
19975 /* Adjust the address if it changed. */
19976 if (addr != new_addr)
19977 {
19978 mem = replace_equiv_address_nv (mem, new_addr);
19979 if (TARGET_DEBUG_ADDR)
19980 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19981 }
19982
19983 /* Now create the move. */
19984 if (store_p)
19985 emit_insn (gen_rtx_SET (mem, reg));
19986 else
19987 emit_insn (gen_rtx_SET (reg, mem));
19988
19989 return;
19990 }
19991
19992 /* Convert reloads involving 64-bit gprs and misaligned offset
19993 addressing, or multiple 32-bit gprs and offsets that are too large,
19994 to use indirect addressing. */
19995
19996 void
19997 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19998 {
19999 int regno = true_regnum (reg);
20000 enum reg_class rclass;
20001 rtx addr;
20002 rtx scratch_or_premodify = scratch;
20003
20004 if (TARGET_DEBUG_ADDR)
20005 {
20006 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20007 store_p ? "store" : "load");
20008 fprintf (stderr, "reg:\n");
20009 debug_rtx (reg);
20010 fprintf (stderr, "mem:\n");
20011 debug_rtx (mem);
20012 fprintf (stderr, "scratch:\n");
20013 debug_rtx (scratch);
20014 }
20015
20016 gcc_assert (regno >= 0 && HARD_REGISTER_NUM_P (regno));
20017 gcc_assert (MEM_P (mem));
20018 rclass = REGNO_REG_CLASS (regno);
20019 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20020 addr = XEXP (mem, 0);
20021
20022 if (GET_CODE (addr) == PRE_MODIFY)
20023 {
20024 gcc_assert (REG_P (XEXP (addr, 0))
20025 && GET_CODE (XEXP (addr, 1)) == PLUS
20026 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20027 scratch_or_premodify = XEXP (addr, 0);
20028 addr = XEXP (addr, 1);
20029 }
20030 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20031
20032 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20033
20034 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20035
20036 /* Now create the move. */
20037 if (store_p)
20038 emit_insn (gen_rtx_SET (mem, reg));
20039 else
20040 emit_insn (gen_rtx_SET (reg, mem));
20041
20042 return;
20043 }
20044
20045 /* Given an rtx X being reloaded into a reg required to be
20046 in class CLASS, return the class of reg to actually use.
20047 In general this is just CLASS; but on some machines
20048 in some cases it is preferable to use a more restrictive class.
20049
20050 On the RS/6000, we have to return NO_REGS when we want to reload a
20051 floating-point CONST_DOUBLE to force it to be copied to memory.
20052
20053 We also don't want to reload integer values into floating-point
20054 registers if we can at all help it. In fact, this can
20055 cause reload to die, if it tries to generate a reload of CTR
20056 into a FP register and discovers it doesn't have the memory location
20057 required.
20058
20059 ??? Would it be a good idea to have reload do the converse, that is
20060 try to reload floating modes into FP registers if possible?
20061 */
20062
20063 static enum reg_class
20064 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20065 {
20066 machine_mode mode = GET_MODE (x);
20067 bool is_constant = CONSTANT_P (x);
20068
20069 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20070 reload class for it. */
20071 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20072 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20073 return NO_REGS;
20074
20075 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20076 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20077 return NO_REGS;
20078
20079 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20080 the reloading of address expressions using PLUS into floating point
20081 registers. */
20082 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20083 {
20084 if (is_constant)
20085 {
20086 /* Zero is always allowed in all VSX registers. */
20087 if (x == CONST0_RTX (mode))
20088 return rclass;
20089
20090 /* If this is a vector constant that can be formed with a few Altivec
20091 instructions, we want altivec registers. */
20092 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20093 return ALTIVEC_REGS;
20094
20095 /* If this is an integer constant that can easily be loaded into
20096 vector registers, allow it. */
20097 if (CONST_INT_P (x))
20098 {
20099 HOST_WIDE_INT value = INTVAL (x);
20100
20101 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20102 2.06 can generate it in the Altivec registers with
20103 VSPLTI<x>. */
20104 if (value == -1)
20105 {
20106 if (TARGET_P8_VECTOR)
20107 return rclass;
20108 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20109 return ALTIVEC_REGS;
20110 else
20111 return NO_REGS;
20112 }
20113
20114 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20115 a sign extend in the Altivec registers. */
20116 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20117 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20118 return ALTIVEC_REGS;
20119 }
20120
20121 /* Force constant to memory. */
20122 return NO_REGS;
20123 }
20124
20125 /* D-form addressing can easily reload the value. */
20126 if (mode_supports_vmx_dform (mode)
20127 || mode_supports_dq_form (mode))
20128 return rclass;
20129
20130 /* If this is a scalar floating point value and we don't have D-form
20131 addressing, prefer the traditional floating point registers so that we
20132 can use D-form (register+offset) addressing. */
20133 if (rclass == VSX_REGS
20134 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20135 return FLOAT_REGS;
20136
20137 /* Prefer the Altivec registers if Altivec is handling the vector
20138 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20139 loads. */
20140 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20141 || mode == V1TImode)
20142 return ALTIVEC_REGS;
20143
20144 return rclass;
20145 }
20146
20147 if (is_constant || GET_CODE (x) == PLUS)
20148 {
20149 if (reg_class_subset_p (GENERAL_REGS, rclass))
20150 return GENERAL_REGS;
20151 if (reg_class_subset_p (BASE_REGS, rclass))
20152 return BASE_REGS;
20153 return NO_REGS;
20154 }
20155
20156 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20157 return GENERAL_REGS;
20158
20159 return rclass;
20160 }
20161
20162 /* Debug version of rs6000_preferred_reload_class. */
20163 static enum reg_class
20164 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20165 {
20166 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20167
20168 fprintf (stderr,
20169 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20170 "mode = %s, x:\n",
20171 reg_class_names[ret], reg_class_names[rclass],
20172 GET_MODE_NAME (GET_MODE (x)));
20173 debug_rtx (x);
20174
20175 return ret;
20176 }
20177
20178 /* If we are copying between FP or AltiVec registers and anything else, we need
20179 a memory location. The exception is when we are targeting ppc64 and the
20180 move to/from fpr to gpr instructions are available. Also, under VSX, you
20181 can copy vector registers from the FP register set to the Altivec register
20182 set and vice versa. */
20183
20184 static bool
20185 rs6000_secondary_memory_needed (machine_mode mode,
20186 reg_class_t from_class,
20187 reg_class_t to_class)
20188 {
20189 enum rs6000_reg_type from_type, to_type;
20190 bool altivec_p = ((from_class == ALTIVEC_REGS)
20191 || (to_class == ALTIVEC_REGS));
20192
20193 /* If a simple/direct move is available, we don't need secondary memory */
20194 from_type = reg_class_to_reg_type[(int)from_class];
20195 to_type = reg_class_to_reg_type[(int)to_class];
20196
20197 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20198 (secondary_reload_info *)0, altivec_p))
20199 return false;
20200
20201 /* If we have a floating point or vector register class, we need to use
20202 memory to transfer the data. */
20203 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20204 return true;
20205
20206 return false;
20207 }
20208
20209 /* Debug version of rs6000_secondary_memory_needed. */
20210 static bool
20211 rs6000_debug_secondary_memory_needed (machine_mode mode,
20212 reg_class_t from_class,
20213 reg_class_t to_class)
20214 {
20215 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20216
20217 fprintf (stderr,
20218 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20219 "to_class = %s, mode = %s\n",
20220 ret ? "true" : "false",
20221 reg_class_names[from_class],
20222 reg_class_names[to_class],
20223 GET_MODE_NAME (mode));
20224
20225 return ret;
20226 }
20227
20228 /* Return the register class of a scratch register needed to copy IN into
20229 or out of a register in RCLASS in MODE. If it can be done directly,
20230 NO_REGS is returned. */
20231
20232 static enum reg_class
20233 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20234 rtx in)
20235 {
20236 int regno;
20237
20238 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20239 #if TARGET_MACHO
20240 && MACHOPIC_INDIRECT
20241 #endif
20242 ))
20243 {
20244 /* We cannot copy a symbolic operand directly into anything
20245 other than BASE_REGS for TARGET_ELF. So indicate that a
20246 register from BASE_REGS is needed as an intermediate
20247 register.
20248
20249 On Darwin, pic addresses require a load from memory, which
20250 needs a base register. */
20251 if (rclass != BASE_REGS
20252 && (SYMBOL_REF_P (in)
20253 || GET_CODE (in) == HIGH
20254 || GET_CODE (in) == LABEL_REF
20255 || GET_CODE (in) == CONST))
20256 return BASE_REGS;
20257 }
20258
20259 if (REG_P (in))
20260 {
20261 regno = REGNO (in);
20262 if (!HARD_REGISTER_NUM_P (regno))
20263 {
20264 regno = true_regnum (in);
20265 if (!HARD_REGISTER_NUM_P (regno))
20266 regno = -1;
20267 }
20268 }
20269 else if (SUBREG_P (in))
20270 {
20271 regno = true_regnum (in);
20272 if (!HARD_REGISTER_NUM_P (regno))
20273 regno = -1;
20274 }
20275 else
20276 regno = -1;
20277
20278 /* If we have VSX register moves, prefer moving scalar values between
20279 Altivec registers and GPR by going via an FPR (and then via memory)
20280 instead of reloading the secondary memory address for Altivec moves. */
20281 if (TARGET_VSX
20282 && GET_MODE_SIZE (mode) < 16
20283 && !mode_supports_vmx_dform (mode)
20284 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20285 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20286 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20287 && (regno >= 0 && INT_REGNO_P (regno)))))
20288 return FLOAT_REGS;
20289
20290 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20291 into anything. */
20292 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20293 || (regno >= 0 && INT_REGNO_P (regno)))
20294 return NO_REGS;
20295
20296 /* Constants, memory, and VSX registers can go into VSX registers (both the
20297 traditional floating point and the altivec registers). */
20298 if (rclass == VSX_REGS
20299 && (regno == -1 || VSX_REGNO_P (regno)))
20300 return NO_REGS;
20301
20302 /* Constants, memory, and FP registers can go into FP registers. */
20303 if ((regno == -1 || FP_REGNO_P (regno))
20304 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20305 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20306
20307 /* Memory, and AltiVec registers can go into AltiVec registers. */
20308 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20309 && rclass == ALTIVEC_REGS)
20310 return NO_REGS;
20311
20312 /* We can copy among the CR registers. */
20313 if ((rclass == CR_REGS || rclass == CR0_REGS)
20314 && regno >= 0 && CR_REGNO_P (regno))
20315 return NO_REGS;
20316
20317 /* Otherwise, we need GENERAL_REGS. */
20318 return GENERAL_REGS;
20319 }
20320
20321 /* Debug version of rs6000_secondary_reload_class. */
20322 static enum reg_class
20323 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20324 machine_mode mode, rtx in)
20325 {
20326 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20327 fprintf (stderr,
20328 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20329 "mode = %s, input rtx:\n",
20330 reg_class_names[ret], reg_class_names[rclass],
20331 GET_MODE_NAME (mode));
20332 debug_rtx (in);
20333
20334 return ret;
20335 }
20336
20337 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20338
20339 static bool
20340 rs6000_can_change_mode_class (machine_mode from,
20341 machine_mode to,
20342 reg_class_t rclass)
20343 {
20344 unsigned from_size = GET_MODE_SIZE (from);
20345 unsigned to_size = GET_MODE_SIZE (to);
20346
20347 if (from_size != to_size)
20348 {
20349 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20350
20351 if (reg_classes_intersect_p (xclass, rclass))
20352 {
20353 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20354 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20355 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20356 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20357
20358 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20359 single register under VSX because the scalar part of the register
20360 is in the upper 64-bits, and not the lower 64-bits. Types like
20361 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20362 IEEE floating point can't overlap, and neither can small
20363 values. */
20364
20365 if (to_float128_vector_p && from_float128_vector_p)
20366 return true;
20367
20368 else if (to_float128_vector_p || from_float128_vector_p)
20369 return false;
20370
20371 /* TDmode in floating-mode registers must always go into a register
20372 pair with the most significant word in the even-numbered register
20373 to match ISA requirements. In little-endian mode, this does not
20374 match subreg numbering, so we cannot allow subregs. */
20375 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20376 return false;
20377
20378 if (from_size < 8 || to_size < 8)
20379 return false;
20380
20381 if (from_size == 8 && (8 * to_nregs) != to_size)
20382 return false;
20383
20384 if (to_size == 8 && (8 * from_nregs) != from_size)
20385 return false;
20386
20387 return true;
20388 }
20389 else
20390 return true;
20391 }
20392
20393 /* Since the VSX register set includes traditional floating point registers
20394 and altivec registers, just check for the size being different instead of
20395 trying to check whether the modes are vector modes. Otherwise it won't
20396 allow say DF and DI to change classes. For types like TFmode and TDmode
20397 that take 2 64-bit registers, rather than a single 128-bit register, don't
20398 allow subregs of those types to other 128 bit types. */
20399 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20400 {
20401 unsigned num_regs = (from_size + 15) / 16;
20402 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20403 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20404 return false;
20405
20406 return (from_size == 8 || from_size == 16);
20407 }
20408
20409 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20410 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20411 return false;
20412
20413 return true;
20414 }
20415
20416 /* Debug version of rs6000_can_change_mode_class. */
20417 static bool
20418 rs6000_debug_can_change_mode_class (machine_mode from,
20419 machine_mode to,
20420 reg_class_t rclass)
20421 {
20422 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20423
20424 fprintf (stderr,
20425 "rs6000_can_change_mode_class, return %s, from = %s, "
20426 "to = %s, rclass = %s\n",
20427 ret ? "true" : "false",
20428 GET_MODE_NAME (from), GET_MODE_NAME (to),
20429 reg_class_names[rclass]);
20430
20431 return ret;
20432 }
20433 \f
20434 /* Return a string to do a move operation of 128 bits of data. */
20435
20436 const char *
20437 rs6000_output_move_128bit (rtx operands[])
20438 {
20439 rtx dest = operands[0];
20440 rtx src = operands[1];
20441 machine_mode mode = GET_MODE (dest);
20442 int dest_regno;
20443 int src_regno;
20444 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20445 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20446
20447 if (REG_P (dest))
20448 {
20449 dest_regno = REGNO (dest);
20450 dest_gpr_p = INT_REGNO_P (dest_regno);
20451 dest_fp_p = FP_REGNO_P (dest_regno);
20452 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20453 dest_vsx_p = dest_fp_p | dest_vmx_p;
20454 }
20455 else
20456 {
20457 dest_regno = -1;
20458 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20459 }
20460
20461 if (REG_P (src))
20462 {
20463 src_regno = REGNO (src);
20464 src_gpr_p = INT_REGNO_P (src_regno);
20465 src_fp_p = FP_REGNO_P (src_regno);
20466 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20467 src_vsx_p = src_fp_p | src_vmx_p;
20468 }
20469 else
20470 {
20471 src_regno = -1;
20472 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20473 }
20474
20475 /* Register moves. */
20476 if (dest_regno >= 0 && src_regno >= 0)
20477 {
20478 if (dest_gpr_p)
20479 {
20480 if (src_gpr_p)
20481 return "#";
20482
20483 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20484 return (WORDS_BIG_ENDIAN
20485 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20486 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20487
20488 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20489 return "#";
20490 }
20491
20492 else if (TARGET_VSX && dest_vsx_p)
20493 {
20494 if (src_vsx_p)
20495 return "xxlor %x0,%x1,%x1";
20496
20497 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20498 return (WORDS_BIG_ENDIAN
20499 ? "mtvsrdd %x0,%1,%L1"
20500 : "mtvsrdd %x0,%L1,%1");
20501
20502 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20503 return "#";
20504 }
20505
20506 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20507 return "vor %0,%1,%1";
20508
20509 else if (dest_fp_p && src_fp_p)
20510 return "#";
20511 }
20512
20513 /* Loads. */
20514 else if (dest_regno >= 0 && MEM_P (src))
20515 {
20516 if (dest_gpr_p)
20517 {
20518 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20519 return "lq %0,%1";
20520 else
20521 return "#";
20522 }
20523
20524 else if (TARGET_ALTIVEC && dest_vmx_p
20525 && altivec_indexed_or_indirect_operand (src, mode))
20526 return "lvx %0,%y1";
20527
20528 else if (TARGET_VSX && dest_vsx_p)
20529 {
20530 if (mode_supports_dq_form (mode)
20531 && quad_address_p (XEXP (src, 0), mode, true))
20532 return "lxv %x0,%1";
20533
20534 else if (TARGET_P9_VECTOR)
20535 return "lxvx %x0,%y1";
20536
20537 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20538 return "lxvw4x %x0,%y1";
20539
20540 else
20541 return "lxvd2x %x0,%y1";
20542 }
20543
20544 else if (TARGET_ALTIVEC && dest_vmx_p)
20545 return "lvx %0,%y1";
20546
20547 else if (dest_fp_p)
20548 return "#";
20549 }
20550
20551 /* Stores. */
20552 else if (src_regno >= 0 && MEM_P (dest))
20553 {
20554 if (src_gpr_p)
20555 {
20556 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20557 return "stq %1,%0";
20558 else
20559 return "#";
20560 }
20561
20562 else if (TARGET_ALTIVEC && src_vmx_p
20563 && altivec_indexed_or_indirect_operand (dest, mode))
20564 return "stvx %1,%y0";
20565
20566 else if (TARGET_VSX && src_vsx_p)
20567 {
20568 if (mode_supports_dq_form (mode)
20569 && quad_address_p (XEXP (dest, 0), mode, true))
20570 return "stxv %x1,%0";
20571
20572 else if (TARGET_P9_VECTOR)
20573 return "stxvx %x1,%y0";
20574
20575 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20576 return "stxvw4x %x1,%y0";
20577
20578 else
20579 return "stxvd2x %x1,%y0";
20580 }
20581
20582 else if (TARGET_ALTIVEC && src_vmx_p)
20583 return "stvx %1,%y0";
20584
20585 else if (src_fp_p)
20586 return "#";
20587 }
20588
20589 /* Constants. */
20590 else if (dest_regno >= 0
20591 && (CONST_INT_P (src)
20592 || CONST_WIDE_INT_P (src)
20593 || CONST_DOUBLE_P (src)
20594 || GET_CODE (src) == CONST_VECTOR))
20595 {
20596 if (dest_gpr_p)
20597 return "#";
20598
20599 else if ((dest_vmx_p && TARGET_ALTIVEC)
20600 || (dest_vsx_p && TARGET_VSX))
20601 return output_vec_const_move (operands);
20602 }
20603
20604 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20605 }
20606
20607 /* Validate a 128-bit move. */
20608 bool
20609 rs6000_move_128bit_ok_p (rtx operands[])
20610 {
20611 machine_mode mode = GET_MODE (operands[0]);
20612 return (gpc_reg_operand (operands[0], mode)
20613 || gpc_reg_operand (operands[1], mode));
20614 }
20615
20616 /* Return true if a 128-bit move needs to be split. */
20617 bool
20618 rs6000_split_128bit_ok_p (rtx operands[])
20619 {
20620 if (!reload_completed)
20621 return false;
20622
20623 if (!gpr_or_gpr_p (operands[0], operands[1]))
20624 return false;
20625
20626 if (quad_load_store_p (operands[0], operands[1]))
20627 return false;
20628
20629 return true;
20630 }
20631
20632 \f
20633 /* Given a comparison operation, return the bit number in CCR to test. We
20634 know this is a valid comparison.
20635
20636 SCC_P is 1 if this is for an scc. That means that %D will have been
20637 used instead of %C, so the bits will be in different places.
20638
20639 Return -1 if OP isn't a valid comparison for some reason. */
20640
20641 int
20642 ccr_bit (rtx op, int scc_p)
20643 {
20644 enum rtx_code code = GET_CODE (op);
20645 machine_mode cc_mode;
20646 int cc_regnum;
20647 int base_bit;
20648 rtx reg;
20649
20650 if (!COMPARISON_P (op))
20651 return -1;
20652
20653 reg = XEXP (op, 0);
20654
20655 if (!REG_P (reg) || !CR_REGNO_P (REGNO (reg)))
20656 return -1;
20657
20658 cc_mode = GET_MODE (reg);
20659 cc_regnum = REGNO (reg);
20660 base_bit = 4 * (cc_regnum - CR0_REGNO);
20661
20662 validate_condition_mode (code, cc_mode);
20663
20664 /* When generating a sCOND operation, only positive conditions are
20665 allowed. */
20666 if (scc_p)
20667 switch (code)
20668 {
20669 case EQ:
20670 case GT:
20671 case LT:
20672 case UNORDERED:
20673 case GTU:
20674 case LTU:
20675 break;
20676 default:
20677 return -1;
20678 }
20679
20680 switch (code)
20681 {
20682 case NE:
20683 return scc_p ? base_bit + 3 : base_bit + 2;
20684 case EQ:
20685 return base_bit + 2;
20686 case GT: case GTU: case UNLE:
20687 return base_bit + 1;
20688 case LT: case LTU: case UNGE:
20689 return base_bit;
20690 case ORDERED: case UNORDERED:
20691 return base_bit + 3;
20692
20693 case GE: case GEU:
20694 /* If scc, we will have done a cror to put the bit in the
20695 unordered position. So test that bit. For integer, this is ! LT
20696 unless this is an scc insn. */
20697 return scc_p ? base_bit + 3 : base_bit;
20698
20699 case LE: case LEU:
20700 return scc_p ? base_bit + 3 : base_bit + 1;
20701
20702 default:
20703 return -1;
20704 }
20705 }
20706 \f
20707 /* Return the GOT register. */
20708
20709 rtx
20710 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20711 {
20712 /* The second flow pass currently (June 1999) can't update
20713 regs_ever_live without disturbing other parts of the compiler, so
20714 update it here to make the prolog/epilogue code happy. */
20715 if (!can_create_pseudo_p ()
20716 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20717 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20718
20719 crtl->uses_pic_offset_table = 1;
20720
20721 return pic_offset_table_rtx;
20722 }
20723 \f
20724 static rs6000_stack_t stack_info;
20725
20726 /* Function to init struct machine_function.
20727 This will be called, via a pointer variable,
20728 from push_function_context. */
20729
20730 static struct machine_function *
20731 rs6000_init_machine_status (void)
20732 {
20733 stack_info.reload_completed = 0;
20734 return ggc_cleared_alloc<machine_function> ();
20735 }
20736 \f
20737 #define INT_P(X) (CONST_INT_P (X) && GET_MODE (X) == VOIDmode)
20738
20739 /* Write out a function code label. */
20740
20741 void
20742 rs6000_output_function_entry (FILE *file, const char *fname)
20743 {
20744 if (fname[0] != '.')
20745 {
20746 switch (DEFAULT_ABI)
20747 {
20748 default:
20749 gcc_unreachable ();
20750
20751 case ABI_AIX:
20752 if (DOT_SYMBOLS)
20753 putc ('.', file);
20754 else
20755 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20756 break;
20757
20758 case ABI_ELFv2:
20759 case ABI_V4:
20760 case ABI_DARWIN:
20761 break;
20762 }
20763 }
20764
20765 RS6000_OUTPUT_BASENAME (file, fname);
20766 }
20767
20768 /* Print an operand. Recognize special options, documented below. */
20769
20770 #if TARGET_ELF
20771 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20772 only introduced by the linker, when applying the sda21
20773 relocation. */
20774 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20775 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20776 #else
20777 #define SMALL_DATA_RELOC "sda21"
20778 #define SMALL_DATA_REG 0
20779 #endif
20780
20781 void
20782 print_operand (FILE *file, rtx x, int code)
20783 {
20784 int i;
20785 unsigned HOST_WIDE_INT uval;
20786
20787 switch (code)
20788 {
20789 /* %a is output_address. */
20790
20791 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20792 output_operand. */
20793
20794 case 'D':
20795 /* Like 'J' but get to the GT bit only. */
20796 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20797 {
20798 output_operand_lossage ("invalid %%D value");
20799 return;
20800 }
20801
20802 /* Bit 1 is GT bit. */
20803 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20804
20805 /* Add one for shift count in rlinm for scc. */
20806 fprintf (file, "%d", i + 1);
20807 return;
20808
20809 case 'e':
20810 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20811 if (! INT_P (x))
20812 {
20813 output_operand_lossage ("invalid %%e value");
20814 return;
20815 }
20816
20817 uval = INTVAL (x);
20818 if ((uval & 0xffff) == 0 && uval != 0)
20819 putc ('s', file);
20820 return;
20821
20822 case 'E':
20823 /* X is a CR register. Print the number of the EQ bit of the CR */
20824 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20825 output_operand_lossage ("invalid %%E value");
20826 else
20827 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20828 return;
20829
20830 case 'f':
20831 /* X is a CR register. Print the shift count needed to move it
20832 to the high-order four bits. */
20833 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20834 output_operand_lossage ("invalid %%f value");
20835 else
20836 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20837 return;
20838
20839 case 'F':
20840 /* Similar, but print the count for the rotate in the opposite
20841 direction. */
20842 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20843 output_operand_lossage ("invalid %%F value");
20844 else
20845 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20846 return;
20847
20848 case 'G':
20849 /* X is a constant integer. If it is negative, print "m",
20850 otherwise print "z". This is to make an aze or ame insn. */
20851 if (!CONST_INT_P (x))
20852 output_operand_lossage ("invalid %%G value");
20853 else if (INTVAL (x) >= 0)
20854 putc ('z', file);
20855 else
20856 putc ('m', file);
20857 return;
20858
20859 case 'h':
20860 /* If constant, output low-order five bits. Otherwise, write
20861 normally. */
20862 if (INT_P (x))
20863 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20864 else
20865 print_operand (file, x, 0);
20866 return;
20867
20868 case 'H':
20869 /* If constant, output low-order six bits. Otherwise, write
20870 normally. */
20871 if (INT_P (x))
20872 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20873 else
20874 print_operand (file, x, 0);
20875 return;
20876
20877 case 'I':
20878 /* Print `i' if this is a constant, else nothing. */
20879 if (INT_P (x))
20880 putc ('i', file);
20881 return;
20882
20883 case 'j':
20884 /* Write the bit number in CCR for jump. */
20885 i = ccr_bit (x, 0);
20886 if (i == -1)
20887 output_operand_lossage ("invalid %%j code");
20888 else
20889 fprintf (file, "%d", i);
20890 return;
20891
20892 case 'J':
20893 /* Similar, but add one for shift count in rlinm for scc and pass
20894 scc flag to `ccr_bit'. */
20895 i = ccr_bit (x, 1);
20896 if (i == -1)
20897 output_operand_lossage ("invalid %%J code");
20898 else
20899 /* If we want bit 31, write a shift count of zero, not 32. */
20900 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20901 return;
20902
20903 case 'k':
20904 /* X must be a constant. Write the 1's complement of the
20905 constant. */
20906 if (! INT_P (x))
20907 output_operand_lossage ("invalid %%k value");
20908 else
20909 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20910 return;
20911
20912 case 'K':
20913 /* X must be a symbolic constant on ELF. Write an
20914 expression suitable for an 'addi' that adds in the low 16
20915 bits of the MEM. */
20916 if (GET_CODE (x) == CONST)
20917 {
20918 if (GET_CODE (XEXP (x, 0)) != PLUS
20919 || (!SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
20920 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20921 || !CONST_INT_P (XEXP (XEXP (x, 0), 1)))
20922 output_operand_lossage ("invalid %%K value");
20923 }
20924 print_operand_address (file, x);
20925 fputs ("@l", file);
20926 return;
20927
20928 /* %l is output_asm_label. */
20929
20930 case 'L':
20931 /* Write second word of DImode or DFmode reference. Works on register
20932 or non-indexed memory only. */
20933 if (REG_P (x))
20934 fputs (reg_names[REGNO (x) + 1], file);
20935 else if (MEM_P (x))
20936 {
20937 machine_mode mode = GET_MODE (x);
20938 /* Handle possible auto-increment. Since it is pre-increment and
20939 we have already done it, we can just use an offset of word. */
20940 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20941 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20942 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20943 UNITS_PER_WORD));
20944 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20945 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20946 UNITS_PER_WORD));
20947 else
20948 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20949 UNITS_PER_WORD),
20950 0));
20951
20952 if (small_data_operand (x, GET_MODE (x)))
20953 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20954 reg_names[SMALL_DATA_REG]);
20955 }
20956 return;
20957
20958 case 'N': /* Unused */
20959 /* Write the number of elements in the vector times 4. */
20960 if (GET_CODE (x) != PARALLEL)
20961 output_operand_lossage ("invalid %%N value");
20962 else
20963 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20964 return;
20965
20966 case 'O': /* Unused */
20967 /* Similar, but subtract 1 first. */
20968 if (GET_CODE (x) != PARALLEL)
20969 output_operand_lossage ("invalid %%O value");
20970 else
20971 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20972 return;
20973
20974 case 'p':
20975 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20976 if (! INT_P (x)
20977 || INTVAL (x) < 0
20978 || (i = exact_log2 (INTVAL (x))) < 0)
20979 output_operand_lossage ("invalid %%p value");
20980 else
20981 fprintf (file, "%d", i);
20982 return;
20983
20984 case 'P':
20985 /* The operand must be an indirect memory reference. The result
20986 is the register name. */
20987 if (!MEM_P (x) || !REG_P (XEXP (x, 0))
20988 || REGNO (XEXP (x, 0)) >= 32)
20989 output_operand_lossage ("invalid %%P value");
20990 else
20991 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20992 return;
20993
20994 case 'q':
20995 /* This outputs the logical code corresponding to a boolean
20996 expression. The expression may have one or both operands
20997 negated (if one, only the first one). For condition register
20998 logical operations, it will also treat the negated
20999 CR codes as NOTs, but not handle NOTs of them. */
21000 {
21001 const char *const *t = 0;
21002 const char *s;
21003 enum rtx_code code = GET_CODE (x);
21004 static const char * const tbl[3][3] = {
21005 { "and", "andc", "nor" },
21006 { "or", "orc", "nand" },
21007 { "xor", "eqv", "xor" } };
21008
21009 if (code == AND)
21010 t = tbl[0];
21011 else if (code == IOR)
21012 t = tbl[1];
21013 else if (code == XOR)
21014 t = tbl[2];
21015 else
21016 output_operand_lossage ("invalid %%q value");
21017
21018 if (GET_CODE (XEXP (x, 0)) != NOT)
21019 s = t[0];
21020 else
21021 {
21022 if (GET_CODE (XEXP (x, 1)) == NOT)
21023 s = t[2];
21024 else
21025 s = t[1];
21026 }
21027
21028 fputs (s, file);
21029 }
21030 return;
21031
21032 case 'Q':
21033 if (! TARGET_MFCRF)
21034 return;
21035 fputc (',', file);
21036 /* FALLTHRU */
21037
21038 case 'R':
21039 /* X is a CR register. Print the mask for `mtcrf'. */
21040 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21041 output_operand_lossage ("invalid %%R value");
21042 else
21043 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21044 return;
21045
21046 case 's':
21047 /* Low 5 bits of 32 - value */
21048 if (! INT_P (x))
21049 output_operand_lossage ("invalid %%s value");
21050 else
21051 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21052 return;
21053
21054 case 't':
21055 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21056 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21057 {
21058 output_operand_lossage ("invalid %%t value");
21059 return;
21060 }
21061
21062 /* Bit 3 is OV bit. */
21063 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21064
21065 /* If we want bit 31, write a shift count of zero, not 32. */
21066 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21067 return;
21068
21069 case 'T':
21070 /* Print the symbolic name of a branch target register. */
21071 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21072 x = XVECEXP (x, 0, 0);
21073 if (!REG_P (x) || (REGNO (x) != LR_REGNO
21074 && REGNO (x) != CTR_REGNO))
21075 output_operand_lossage ("invalid %%T value");
21076 else if (REGNO (x) == LR_REGNO)
21077 fputs ("lr", file);
21078 else
21079 fputs ("ctr", file);
21080 return;
21081
21082 case 'u':
21083 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21084 for use in unsigned operand. */
21085 if (! INT_P (x))
21086 {
21087 output_operand_lossage ("invalid %%u value");
21088 return;
21089 }
21090
21091 uval = INTVAL (x);
21092 if ((uval & 0xffff) == 0)
21093 uval >>= 16;
21094
21095 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21096 return;
21097
21098 case 'v':
21099 /* High-order 16 bits of constant for use in signed operand. */
21100 if (! INT_P (x))
21101 output_operand_lossage ("invalid %%v value");
21102 else
21103 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21104 (INTVAL (x) >> 16) & 0xffff);
21105 return;
21106
21107 case 'U':
21108 /* Print `u' if this has an auto-increment or auto-decrement. */
21109 if (MEM_P (x)
21110 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21111 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21112 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21113 putc ('u', file);
21114 return;
21115
21116 case 'V':
21117 /* Print the trap code for this operand. */
21118 switch (GET_CODE (x))
21119 {
21120 case EQ:
21121 fputs ("eq", file); /* 4 */
21122 break;
21123 case NE:
21124 fputs ("ne", file); /* 24 */
21125 break;
21126 case LT:
21127 fputs ("lt", file); /* 16 */
21128 break;
21129 case LE:
21130 fputs ("le", file); /* 20 */
21131 break;
21132 case GT:
21133 fputs ("gt", file); /* 8 */
21134 break;
21135 case GE:
21136 fputs ("ge", file); /* 12 */
21137 break;
21138 case LTU:
21139 fputs ("llt", file); /* 2 */
21140 break;
21141 case LEU:
21142 fputs ("lle", file); /* 6 */
21143 break;
21144 case GTU:
21145 fputs ("lgt", file); /* 1 */
21146 break;
21147 case GEU:
21148 fputs ("lge", file); /* 5 */
21149 break;
21150 default:
21151 output_operand_lossage ("invalid %%V value");
21152 }
21153 break;
21154
21155 case 'w':
21156 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21157 normally. */
21158 if (INT_P (x))
21159 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21160 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21161 else
21162 print_operand (file, x, 0);
21163 return;
21164
21165 case 'x':
21166 /* X is a FPR or Altivec register used in a VSX context. */
21167 if (!REG_P (x) || !VSX_REGNO_P (REGNO (x)))
21168 output_operand_lossage ("invalid %%x value");
21169 else
21170 {
21171 int reg = REGNO (x);
21172 int vsx_reg = (FP_REGNO_P (reg)
21173 ? reg - 32
21174 : reg - FIRST_ALTIVEC_REGNO + 32);
21175
21176 #ifdef TARGET_REGNAMES
21177 if (TARGET_REGNAMES)
21178 fprintf (file, "%%vs%d", vsx_reg);
21179 else
21180 #endif
21181 fprintf (file, "%d", vsx_reg);
21182 }
21183 return;
21184
21185 case 'X':
21186 if (MEM_P (x)
21187 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21188 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21189 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21190 putc ('x', file);
21191 return;
21192
21193 case 'Y':
21194 /* Like 'L', for third word of TImode/PTImode */
21195 if (REG_P (x))
21196 fputs (reg_names[REGNO (x) + 2], file);
21197 else if (MEM_P (x))
21198 {
21199 machine_mode mode = GET_MODE (x);
21200 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21201 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21202 output_address (mode, plus_constant (Pmode,
21203 XEXP (XEXP (x, 0), 0), 8));
21204 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21205 output_address (mode, plus_constant (Pmode,
21206 XEXP (XEXP (x, 0), 0), 8));
21207 else
21208 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21209 if (small_data_operand (x, GET_MODE (x)))
21210 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21211 reg_names[SMALL_DATA_REG]);
21212 }
21213 return;
21214
21215 case 'z':
21216 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21217 x = XVECEXP (x, 0, 1);
21218 /* X is a SYMBOL_REF. Write out the name preceded by a
21219 period and without any trailing data in brackets. Used for function
21220 names. If we are configured for System V (or the embedded ABI) on
21221 the PowerPC, do not emit the period, since those systems do not use
21222 TOCs and the like. */
21223 if (!SYMBOL_REF_P (x))
21224 {
21225 output_operand_lossage ("invalid %%z value");
21226 return;
21227 }
21228
21229 /* For macho, check to see if we need a stub. */
21230 if (TARGET_MACHO)
21231 {
21232 const char *name = XSTR (x, 0);
21233 #if TARGET_MACHO
21234 if (darwin_emit_branch_islands
21235 && MACHOPIC_INDIRECT
21236 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21237 name = machopic_indirection_name (x, /*stub_p=*/true);
21238 #endif
21239 assemble_name (file, name);
21240 }
21241 else if (!DOT_SYMBOLS)
21242 assemble_name (file, XSTR (x, 0));
21243 else
21244 rs6000_output_function_entry (file, XSTR (x, 0));
21245 return;
21246
21247 case 'Z':
21248 /* Like 'L', for last word of TImode/PTImode. */
21249 if (REG_P (x))
21250 fputs (reg_names[REGNO (x) + 3], file);
21251 else if (MEM_P (x))
21252 {
21253 machine_mode mode = GET_MODE (x);
21254 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21255 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21256 output_address (mode, plus_constant (Pmode,
21257 XEXP (XEXP (x, 0), 0), 12));
21258 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21259 output_address (mode, plus_constant (Pmode,
21260 XEXP (XEXP (x, 0), 0), 12));
21261 else
21262 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21263 if (small_data_operand (x, GET_MODE (x)))
21264 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21265 reg_names[SMALL_DATA_REG]);
21266 }
21267 return;
21268
21269 /* Print AltiVec memory operand. */
21270 case 'y':
21271 {
21272 rtx tmp;
21273
21274 gcc_assert (MEM_P (x));
21275
21276 tmp = XEXP (x, 0);
21277
21278 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21279 && GET_CODE (tmp) == AND
21280 && CONST_INT_P (XEXP (tmp, 1))
21281 && INTVAL (XEXP (tmp, 1)) == -16)
21282 tmp = XEXP (tmp, 0);
21283 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21284 && GET_CODE (tmp) == PRE_MODIFY)
21285 tmp = XEXP (tmp, 1);
21286 if (REG_P (tmp))
21287 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21288 else
21289 {
21290 if (GET_CODE (tmp) != PLUS
21291 || !REG_P (XEXP (tmp, 0))
21292 || !REG_P (XEXP (tmp, 1)))
21293 {
21294 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21295 break;
21296 }
21297
21298 if (REGNO (XEXP (tmp, 0)) == 0)
21299 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21300 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21301 else
21302 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21303 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21304 }
21305 break;
21306 }
21307
21308 case 0:
21309 if (REG_P (x))
21310 fprintf (file, "%s", reg_names[REGNO (x)]);
21311 else if (MEM_P (x))
21312 {
21313 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21314 know the width from the mode. */
21315 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21316 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21317 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21318 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21319 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21320 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21321 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21322 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21323 else
21324 output_address (GET_MODE (x), XEXP (x, 0));
21325 }
21326 else if (toc_relative_expr_p (x, false,
21327 &tocrel_base_oac, &tocrel_offset_oac))
21328 /* This hack along with a corresponding hack in
21329 rs6000_output_addr_const_extra arranges to output addends
21330 where the assembler expects to find them. eg.
21331 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21332 without this hack would be output as "x@toc+4". We
21333 want "x+4@toc". */
21334 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21335 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
21336 output_addr_const (file, XVECEXP (x, 0, 0));
21337 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21338 output_addr_const (file, XVECEXP (x, 0, 1));
21339 else
21340 output_addr_const (file, x);
21341 return;
21342
21343 case '&':
21344 if (const char *name = get_some_local_dynamic_name ())
21345 assemble_name (file, name);
21346 else
21347 output_operand_lossage ("'%%&' used without any "
21348 "local dynamic TLS references");
21349 return;
21350
21351 default:
21352 output_operand_lossage ("invalid %%xn code");
21353 }
21354 }
21355 \f
21356 /* Print the address of an operand. */
21357
21358 void
21359 print_operand_address (FILE *file, rtx x)
21360 {
21361 if (REG_P (x))
21362 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21363 else if (SYMBOL_REF_P (x) || GET_CODE (x) == CONST
21364 || GET_CODE (x) == LABEL_REF)
21365 {
21366 output_addr_const (file, x);
21367 if (small_data_operand (x, GET_MODE (x)))
21368 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21369 reg_names[SMALL_DATA_REG]);
21370 else
21371 gcc_assert (!TARGET_TOC);
21372 }
21373 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21374 && REG_P (XEXP (x, 1)))
21375 {
21376 if (REGNO (XEXP (x, 0)) == 0)
21377 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21378 reg_names[ REGNO (XEXP (x, 0)) ]);
21379 else
21380 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21381 reg_names[ REGNO (XEXP (x, 1)) ]);
21382 }
21383 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21384 && CONST_INT_P (XEXP (x, 1)))
21385 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21386 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21387 #if TARGET_MACHO
21388 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21389 && CONSTANT_P (XEXP (x, 1)))
21390 {
21391 fprintf (file, "lo16(");
21392 output_addr_const (file, XEXP (x, 1));
21393 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21394 }
21395 #endif
21396 #if TARGET_ELF
21397 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21398 && CONSTANT_P (XEXP (x, 1)))
21399 {
21400 output_addr_const (file, XEXP (x, 1));
21401 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21402 }
21403 #endif
21404 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21405 {
21406 /* This hack along with a corresponding hack in
21407 rs6000_output_addr_const_extra arranges to output addends
21408 where the assembler expects to find them. eg.
21409 (lo_sum (reg 9)
21410 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21411 without this hack would be output as "x@toc+8@l(9)". We
21412 want "x+8@toc@l(9)". */
21413 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21414 if (GET_CODE (x) == LO_SUM)
21415 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21416 else
21417 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21418 }
21419 else
21420 output_addr_const (file, x);
21421 }
21422 \f
21423 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21424
21425 static bool
21426 rs6000_output_addr_const_extra (FILE *file, rtx x)
21427 {
21428 if (GET_CODE (x) == UNSPEC)
21429 switch (XINT (x, 1))
21430 {
21431 case UNSPEC_TOCREL:
21432 gcc_checking_assert (SYMBOL_REF_P (XVECEXP (x, 0, 0))
21433 && REG_P (XVECEXP (x, 0, 1))
21434 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21435 output_addr_const (file, XVECEXP (x, 0, 0));
21436 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21437 {
21438 if (INTVAL (tocrel_offset_oac) >= 0)
21439 fprintf (file, "+");
21440 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21441 }
21442 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21443 {
21444 putc ('-', file);
21445 assemble_name (file, toc_label_name);
21446 need_toc_init = 1;
21447 }
21448 else if (TARGET_ELF)
21449 fputs ("@toc", file);
21450 return true;
21451
21452 #if TARGET_MACHO
21453 case UNSPEC_MACHOPIC_OFFSET:
21454 output_addr_const (file, XVECEXP (x, 0, 0));
21455 putc ('-', file);
21456 machopic_output_function_base_name (file);
21457 return true;
21458 #endif
21459 }
21460 return false;
21461 }
21462 \f
21463 /* Target hook for assembling integer objects. The PowerPC version has
21464 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21465 is defined. It also needs to handle DI-mode objects on 64-bit
21466 targets. */
21467
21468 static bool
21469 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21470 {
21471 #ifdef RELOCATABLE_NEEDS_FIXUP
21472 /* Special handling for SI values. */
21473 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21474 {
21475 static int recurse = 0;
21476
21477 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21478 the .fixup section. Since the TOC section is already relocated, we
21479 don't need to mark it here. We used to skip the text section, but it
21480 should never be valid for relocated addresses to be placed in the text
21481 section. */
21482 if (DEFAULT_ABI == ABI_V4
21483 && (TARGET_RELOCATABLE || flag_pic > 1)
21484 && in_section != toc_section
21485 && !recurse
21486 && !CONST_SCALAR_INT_P (x)
21487 && CONSTANT_P (x))
21488 {
21489 char buf[256];
21490
21491 recurse = 1;
21492 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21493 fixuplabelno++;
21494 ASM_OUTPUT_LABEL (asm_out_file, buf);
21495 fprintf (asm_out_file, "\t.long\t(");
21496 output_addr_const (asm_out_file, x);
21497 fprintf (asm_out_file, ")@fixup\n");
21498 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21499 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21500 fprintf (asm_out_file, "\t.long\t");
21501 assemble_name (asm_out_file, buf);
21502 fprintf (asm_out_file, "\n\t.previous\n");
21503 recurse = 0;
21504 return true;
21505 }
21506 /* Remove initial .'s to turn a -mcall-aixdesc function
21507 address into the address of the descriptor, not the function
21508 itself. */
21509 else if (SYMBOL_REF_P (x)
21510 && XSTR (x, 0)[0] == '.'
21511 && DEFAULT_ABI == ABI_AIX)
21512 {
21513 const char *name = XSTR (x, 0);
21514 while (*name == '.')
21515 name++;
21516
21517 fprintf (asm_out_file, "\t.long\t%s\n", name);
21518 return true;
21519 }
21520 }
21521 #endif /* RELOCATABLE_NEEDS_FIXUP */
21522 return default_assemble_integer (x, size, aligned_p);
21523 }
21524
21525 /* Return a template string for assembly to emit when making an
21526 external call. FUNOP is the call mem argument operand number. */
21527
21528 static const char *
21529 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21530 {
21531 /* -Wformat-overflow workaround, without which gcc thinks that %u
21532 might produce 10 digits. */
21533 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21534
21535 char arg[12];
21536 arg[0] = 0;
21537 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21538 {
21539 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21540 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21541 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21542 sprintf (arg, "(%%&@tlsld)");
21543 else
21544 gcc_unreachable ();
21545 }
21546
21547 /* The magic 32768 offset here corresponds to the offset of
21548 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21549 char z[11];
21550 sprintf (z, "%%z%u%s", funop,
21551 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21552 ? "+32768" : ""));
21553
21554 static char str[32]; /* 2 spare */
21555 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21556 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21557 sibcall ? "" : "\n\tnop");
21558 else if (DEFAULT_ABI == ABI_V4)
21559 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21560 flag_pic ? "@plt" : "");
21561 #if TARGET_MACHO
21562 /* If/when we remove the mlongcall opt, we can share the AIX/ELGv2 case. */
21563 else if (DEFAULT_ABI == ABI_DARWIN)
21564 {
21565 /* The cookie is in operand func+2. */
21566 gcc_checking_assert (GET_CODE (operands[funop + 2]) == CONST_INT);
21567 int cookie = INTVAL (operands[funop + 2]);
21568 if (cookie & CALL_LONG)
21569 {
21570 tree funname = get_identifier (XSTR (operands[funop], 0));
21571 tree labelname = get_prev_label (funname);
21572 gcc_checking_assert (labelname && !sibcall);
21573
21574 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21575 instruction will reach 'foo', otherwise link as 'bl L42'".
21576 "L42" should be a 'branch island', that will do a far jump to
21577 'foo'. Branch islands are generated in
21578 macho_branch_islands(). */
21579 sprintf (str, "jbsr %%z%u,%.10s", funop,
21580 IDENTIFIER_POINTER (labelname));
21581 }
21582 else
21583 /* Same as AIX or ELFv2, except to keep backwards compat, no nop
21584 after the call. */
21585 sprintf (str, "b%s %s%s", sibcall ? "" : "l", z, arg);
21586 }
21587 #endif
21588 else
21589 gcc_unreachable ();
21590 return str;
21591 }
21592
21593 const char *
21594 rs6000_call_template (rtx *operands, unsigned int funop)
21595 {
21596 return rs6000_call_template_1 (operands, funop, false);
21597 }
21598
21599 const char *
21600 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21601 {
21602 return rs6000_call_template_1 (operands, funop, true);
21603 }
21604
21605 /* As above, for indirect calls. */
21606
21607 static const char *
21608 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21609 bool sibcall)
21610 {
21611 /* -Wformat-overflow workaround, without which gcc thinks that %u
21612 might produce 10 digits. */
21613 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21614
21615 static char str[144]; /* 1 spare */
21616 char *s = str;
21617 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21618
21619 if (DEFAULT_ABI == ABI_AIX)
21620 s += sprintf (s,
21621 "l%s 2,%%%u\n\t",
21622 ptrload, funop + 2);
21623
21624 /* We don't need the extra code to stop indirect call speculation if
21625 calling via LR. */
21626 bool speculate = (TARGET_MACHO
21627 || rs6000_speculate_indirect_jumps
21628 || (REG_P (operands[funop])
21629 && REGNO (operands[funop]) == LR_REGNO));
21630
21631 if (!TARGET_MACHO && HAVE_AS_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21632 {
21633 const char *rel64 = TARGET_64BIT ? "64" : "";
21634 char tls[29];
21635 tls[0] = 0;
21636 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21637 {
21638 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21639 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21640 rel64, funop + 1);
21641 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21642 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21643 rel64);
21644 else
21645 gcc_unreachable ();
21646 }
21647
21648 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21649 && flag_pic == 2 ? "+32768" : "");
21650 if (!speculate)
21651 {
21652 s += sprintf (s,
21653 "%s.reloc .,R_PPC%s_PLTSEQ,%%z%u%s\n\t",
21654 tls, rel64, funop, addend);
21655 s += sprintf (s, "crset 2\n\t");
21656 }
21657 s += sprintf (s,
21658 "%s.reloc .,R_PPC%s_PLTCALL,%%z%u%s\n\t",
21659 tls, rel64, funop, addend);
21660 }
21661 else if (!speculate)
21662 s += sprintf (s, "crset 2\n\t");
21663
21664 if (DEFAULT_ABI == ABI_AIX)
21665 {
21666 if (speculate)
21667 sprintf (s,
21668 "b%%T%ul\n\t"
21669 "l%s 2,%%%u(1)",
21670 funop, ptrload, funop + 3);
21671 else
21672 sprintf (s,
21673 "beq%%T%ul-\n\t"
21674 "l%s 2,%%%u(1)",
21675 funop, ptrload, funop + 3);
21676 }
21677 else if (DEFAULT_ABI == ABI_ELFv2)
21678 {
21679 if (speculate)
21680 sprintf (s,
21681 "b%%T%ul\n\t"
21682 "l%s 2,%%%u(1)",
21683 funop, ptrload, funop + 2);
21684 else
21685 sprintf (s,
21686 "beq%%T%ul-\n\t"
21687 "l%s 2,%%%u(1)",
21688 funop, ptrload, funop + 2);
21689 }
21690 else
21691 {
21692 if (speculate)
21693 sprintf (s,
21694 "b%%T%u%s",
21695 funop, sibcall ? "" : "l");
21696 else
21697 sprintf (s,
21698 "beq%%T%u%s-%s",
21699 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21700 }
21701 return str;
21702 }
21703
21704 const char *
21705 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21706 {
21707 return rs6000_indirect_call_template_1 (operands, funop, false);
21708 }
21709
21710 const char *
21711 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21712 {
21713 return rs6000_indirect_call_template_1 (operands, funop, true);
21714 }
21715
21716 #if HAVE_AS_PLTSEQ
21717 /* Output indirect call insns.
21718 WHICH is 0 for tocsave, 1 for plt16_ha, 2 for plt16_lo, 3 for mtctr. */
21719 const char *
21720 rs6000_pltseq_template (rtx *operands, int which)
21721 {
21722 const char *rel64 = TARGET_64BIT ? "64" : "";
21723 char tls[28];
21724 tls[0] = 0;
21725 if (TARGET_TLS_MARKERS && GET_CODE (operands[3]) == UNSPEC)
21726 {
21727 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21728 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%3\n\t",
21729 rel64);
21730 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21731 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21732 rel64);
21733 else
21734 gcc_unreachable ();
21735 }
21736
21737 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21738 static char str[96]; /* 15 spare */
21739 const char *off = WORDS_BIG_ENDIAN ? "+2" : "";
21740 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21741 && flag_pic == 2 ? "+32768" : "");
21742 switch (which)
21743 {
21744 case 0:
21745 sprintf (str,
21746 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2\n\t"
21747 "st%s",
21748 tls, rel64, TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)");
21749 break;
21750 case 1:
21751 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21752 sprintf (str,
21753 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2\n\t"
21754 "lis %%0,0",
21755 tls, off, rel64);
21756 else
21757 sprintf (str,
21758 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2%s\n\t"
21759 "addis %%0,%%1,0",
21760 tls, off, rel64, addend);
21761 break;
21762 case 2:
21763 sprintf (str,
21764 "%s.reloc .%s,R_PPC%s_PLT16_LO%s,%%z2%s\n\t"
21765 "l%s %%0,0(%%1)",
21766 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend,
21767 TARGET_64BIT ? "d" : "wz");
21768 break;
21769 case 3:
21770 sprintf (str,
21771 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2%s\n\t"
21772 "mtctr %%1",
21773 tls, rel64, addend);
21774 break;
21775 default:
21776 gcc_unreachable ();
21777 }
21778 return str;
21779 }
21780 #endif
21781
21782 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21783 /* Emit an assembler directive to set symbol visibility for DECL to
21784 VISIBILITY_TYPE. */
21785
21786 static void
21787 rs6000_assemble_visibility (tree decl, int vis)
21788 {
21789 if (TARGET_XCOFF)
21790 return;
21791
21792 /* Functions need to have their entry point symbol visibility set as
21793 well as their descriptor symbol visibility. */
21794 if (DEFAULT_ABI == ABI_AIX
21795 && DOT_SYMBOLS
21796 && TREE_CODE (decl) == FUNCTION_DECL)
21797 {
21798 static const char * const visibility_types[] = {
21799 NULL, "protected", "hidden", "internal"
21800 };
21801
21802 const char *name, *type;
21803
21804 name = ((* targetm.strip_name_encoding)
21805 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21806 type = visibility_types[vis];
21807
21808 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21809 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21810 }
21811 else
21812 default_assemble_visibility (decl, vis);
21813 }
21814 #endif
21815 \f
21816 enum rtx_code
21817 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21818 {
21819 /* Reversal of FP compares takes care -- an ordered compare
21820 becomes an unordered compare and vice versa. */
21821 if (mode == CCFPmode
21822 && (!flag_finite_math_only
21823 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21824 || code == UNEQ || code == LTGT))
21825 return reverse_condition_maybe_unordered (code);
21826 else
21827 return reverse_condition (code);
21828 }
21829
21830 /* Generate a compare for CODE. Return a brand-new rtx that
21831 represents the result of the compare. */
21832
21833 static rtx
21834 rs6000_generate_compare (rtx cmp, machine_mode mode)
21835 {
21836 machine_mode comp_mode;
21837 rtx compare_result;
21838 enum rtx_code code = GET_CODE (cmp);
21839 rtx op0 = XEXP (cmp, 0);
21840 rtx op1 = XEXP (cmp, 1);
21841
21842 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21843 comp_mode = CCmode;
21844 else if (FLOAT_MODE_P (mode))
21845 comp_mode = CCFPmode;
21846 else if (code == GTU || code == LTU
21847 || code == GEU || code == LEU)
21848 comp_mode = CCUNSmode;
21849 else if ((code == EQ || code == NE)
21850 && unsigned_reg_p (op0)
21851 && (unsigned_reg_p (op1)
21852 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21853 /* These are unsigned values, perhaps there will be a later
21854 ordering compare that can be shared with this one. */
21855 comp_mode = CCUNSmode;
21856 else
21857 comp_mode = CCmode;
21858
21859 /* If we have an unsigned compare, make sure we don't have a signed value as
21860 an immediate. */
21861 if (comp_mode == CCUNSmode && CONST_INT_P (op1)
21862 && INTVAL (op1) < 0)
21863 {
21864 op0 = copy_rtx_if_shared (op0);
21865 op1 = force_reg (GET_MODE (op0), op1);
21866 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21867 }
21868
21869 /* First, the compare. */
21870 compare_result = gen_reg_rtx (comp_mode);
21871
21872 /* IEEE 128-bit support in VSX registers when we do not have hardware
21873 support. */
21874 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21875 {
21876 rtx libfunc = NULL_RTX;
21877 bool check_nan = false;
21878 rtx dest;
21879
21880 switch (code)
21881 {
21882 case EQ:
21883 case NE:
21884 libfunc = optab_libfunc (eq_optab, mode);
21885 break;
21886
21887 case GT:
21888 case GE:
21889 libfunc = optab_libfunc (ge_optab, mode);
21890 break;
21891
21892 case LT:
21893 case LE:
21894 libfunc = optab_libfunc (le_optab, mode);
21895 break;
21896
21897 case UNORDERED:
21898 case ORDERED:
21899 libfunc = optab_libfunc (unord_optab, mode);
21900 code = (code == UNORDERED) ? NE : EQ;
21901 break;
21902
21903 case UNGE:
21904 case UNGT:
21905 check_nan = true;
21906 libfunc = optab_libfunc (ge_optab, mode);
21907 code = (code == UNGE) ? GE : GT;
21908 break;
21909
21910 case UNLE:
21911 case UNLT:
21912 check_nan = true;
21913 libfunc = optab_libfunc (le_optab, mode);
21914 code = (code == UNLE) ? LE : LT;
21915 break;
21916
21917 case UNEQ:
21918 case LTGT:
21919 check_nan = true;
21920 libfunc = optab_libfunc (eq_optab, mode);
21921 code = (code = UNEQ) ? EQ : NE;
21922 break;
21923
21924 default:
21925 gcc_unreachable ();
21926 }
21927
21928 gcc_assert (libfunc);
21929
21930 if (!check_nan)
21931 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21932 SImode, op0, mode, op1, mode);
21933
21934 /* The library signals an exception for signalling NaNs, so we need to
21935 handle isgreater, etc. by first checking isordered. */
21936 else
21937 {
21938 rtx ne_rtx, normal_dest, unord_dest;
21939 rtx unord_func = optab_libfunc (unord_optab, mode);
21940 rtx join_label = gen_label_rtx ();
21941 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21942 rtx unord_cmp = gen_reg_rtx (comp_mode);
21943
21944
21945 /* Test for either value being a NaN. */
21946 gcc_assert (unord_func);
21947 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21948 SImode, op0, mode, op1, mode);
21949
21950 /* Set value (0) if either value is a NaN, and jump to the join
21951 label. */
21952 dest = gen_reg_rtx (SImode);
21953 emit_move_insn (dest, const1_rtx);
21954 emit_insn (gen_rtx_SET (unord_cmp,
21955 gen_rtx_COMPARE (comp_mode, unord_dest,
21956 const0_rtx)));
21957
21958 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21959 emit_jump_insn (gen_rtx_SET (pc_rtx,
21960 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21961 join_ref,
21962 pc_rtx)));
21963
21964 /* Do the normal comparison, knowing that the values are not
21965 NaNs. */
21966 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21967 SImode, op0, mode, op1, mode);
21968
21969 emit_insn (gen_cstoresi4 (dest,
21970 gen_rtx_fmt_ee (code, SImode, normal_dest,
21971 const0_rtx),
21972 normal_dest, const0_rtx));
21973
21974 /* Join NaN and non-Nan paths. Compare dest against 0. */
21975 emit_label (join_label);
21976 code = NE;
21977 }
21978
21979 emit_insn (gen_rtx_SET (compare_result,
21980 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21981 }
21982
21983 else
21984 {
21985 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21986 CLOBBERs to match cmptf_internal2 pattern. */
21987 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21988 && FLOAT128_IBM_P (GET_MODE (op0))
21989 && TARGET_HARD_FLOAT)
21990 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21991 gen_rtvec (10,
21992 gen_rtx_SET (compare_result,
21993 gen_rtx_COMPARE (comp_mode, op0, op1)),
21994 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21995 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21996 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21997 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21998 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21999 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22000 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22001 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22002 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
22003 else if (GET_CODE (op1) == UNSPEC
22004 && XINT (op1, 1) == UNSPEC_SP_TEST)
22005 {
22006 rtx op1b = XVECEXP (op1, 0, 0);
22007 comp_mode = CCEQmode;
22008 compare_result = gen_reg_rtx (CCEQmode);
22009 if (TARGET_64BIT)
22010 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
22011 else
22012 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
22013 }
22014 else
22015 emit_insn (gen_rtx_SET (compare_result,
22016 gen_rtx_COMPARE (comp_mode, op0, op1)));
22017 }
22018
22019 /* Some kinds of FP comparisons need an OR operation;
22020 under flag_finite_math_only we don't bother. */
22021 if (FLOAT_MODE_P (mode)
22022 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22023 && !flag_finite_math_only
22024 && (code == LE || code == GE
22025 || code == UNEQ || code == LTGT
22026 || code == UNGT || code == UNLT))
22027 {
22028 enum rtx_code or1, or2;
22029 rtx or1_rtx, or2_rtx, compare2_rtx;
22030 rtx or_result = gen_reg_rtx (CCEQmode);
22031
22032 switch (code)
22033 {
22034 case LE: or1 = LT; or2 = EQ; break;
22035 case GE: or1 = GT; or2 = EQ; break;
22036 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22037 case LTGT: or1 = LT; or2 = GT; break;
22038 case UNGT: or1 = UNORDERED; or2 = GT; break;
22039 case UNLT: or1 = UNORDERED; or2 = LT; break;
22040 default: gcc_unreachable ();
22041 }
22042 validate_condition_mode (or1, comp_mode);
22043 validate_condition_mode (or2, comp_mode);
22044 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22045 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22046 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22047 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22048 const_true_rtx);
22049 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22050
22051 compare_result = or_result;
22052 code = EQ;
22053 }
22054
22055 validate_condition_mode (code, GET_MODE (compare_result));
22056
22057 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22058 }
22059
22060 \f
22061 /* Return the diagnostic message string if the binary operation OP is
22062 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22063
22064 static const char*
22065 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22066 const_tree type1,
22067 const_tree type2)
22068 {
22069 machine_mode mode1 = TYPE_MODE (type1);
22070 machine_mode mode2 = TYPE_MODE (type2);
22071
22072 /* For complex modes, use the inner type. */
22073 if (COMPLEX_MODE_P (mode1))
22074 mode1 = GET_MODE_INNER (mode1);
22075
22076 if (COMPLEX_MODE_P (mode2))
22077 mode2 = GET_MODE_INNER (mode2);
22078
22079 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22080 double to intermix unless -mfloat128-convert. */
22081 if (mode1 == mode2)
22082 return NULL;
22083
22084 if (!TARGET_FLOAT128_CVT)
22085 {
22086 if ((mode1 == KFmode && mode2 == IFmode)
22087 || (mode1 == IFmode && mode2 == KFmode))
22088 return N_("__float128 and __ibm128 cannot be used in the same "
22089 "expression");
22090
22091 if (TARGET_IEEEQUAD
22092 && ((mode1 == IFmode && mode2 == TFmode)
22093 || (mode1 == TFmode && mode2 == IFmode)))
22094 return N_("__ibm128 and long double cannot be used in the same "
22095 "expression");
22096
22097 if (!TARGET_IEEEQUAD
22098 && ((mode1 == KFmode && mode2 == TFmode)
22099 || (mode1 == TFmode && mode2 == KFmode)))
22100 return N_("__float128 and long double cannot be used in the same "
22101 "expression");
22102 }
22103
22104 return NULL;
22105 }
22106
22107 \f
22108 /* Expand floating point conversion to/from __float128 and __ibm128. */
22109
22110 void
22111 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22112 {
22113 machine_mode dest_mode = GET_MODE (dest);
22114 machine_mode src_mode = GET_MODE (src);
22115 convert_optab cvt = unknown_optab;
22116 bool do_move = false;
22117 rtx libfunc = NULL_RTX;
22118 rtx dest2;
22119 typedef rtx (*rtx_2func_t) (rtx, rtx);
22120 rtx_2func_t hw_convert = (rtx_2func_t)0;
22121 size_t kf_or_tf;
22122
22123 struct hw_conv_t {
22124 rtx_2func_t from_df;
22125 rtx_2func_t from_sf;
22126 rtx_2func_t from_si_sign;
22127 rtx_2func_t from_si_uns;
22128 rtx_2func_t from_di_sign;
22129 rtx_2func_t from_di_uns;
22130 rtx_2func_t to_df;
22131 rtx_2func_t to_sf;
22132 rtx_2func_t to_si_sign;
22133 rtx_2func_t to_si_uns;
22134 rtx_2func_t to_di_sign;
22135 rtx_2func_t to_di_uns;
22136 } hw_conversions[2] = {
22137 /* convertions to/from KFmode */
22138 {
22139 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22140 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22141 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22142 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22143 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22144 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22145 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22146 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22147 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22148 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22149 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22150 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22151 },
22152
22153 /* convertions to/from TFmode */
22154 {
22155 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22156 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22157 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22158 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22159 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22160 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22161 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22162 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22163 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22164 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22165 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22166 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22167 },
22168 };
22169
22170 if (dest_mode == src_mode)
22171 gcc_unreachable ();
22172
22173 /* Eliminate memory operations. */
22174 if (MEM_P (src))
22175 src = force_reg (src_mode, src);
22176
22177 if (MEM_P (dest))
22178 {
22179 rtx tmp = gen_reg_rtx (dest_mode);
22180 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22181 rs6000_emit_move (dest, tmp, dest_mode);
22182 return;
22183 }
22184
22185 /* Convert to IEEE 128-bit floating point. */
22186 if (FLOAT128_IEEE_P (dest_mode))
22187 {
22188 if (dest_mode == KFmode)
22189 kf_or_tf = 0;
22190 else if (dest_mode == TFmode)
22191 kf_or_tf = 1;
22192 else
22193 gcc_unreachable ();
22194
22195 switch (src_mode)
22196 {
22197 case E_DFmode:
22198 cvt = sext_optab;
22199 hw_convert = hw_conversions[kf_or_tf].from_df;
22200 break;
22201
22202 case E_SFmode:
22203 cvt = sext_optab;
22204 hw_convert = hw_conversions[kf_or_tf].from_sf;
22205 break;
22206
22207 case E_KFmode:
22208 case E_IFmode:
22209 case E_TFmode:
22210 if (FLOAT128_IBM_P (src_mode))
22211 cvt = sext_optab;
22212 else
22213 do_move = true;
22214 break;
22215
22216 case E_SImode:
22217 if (unsigned_p)
22218 {
22219 cvt = ufloat_optab;
22220 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22221 }
22222 else
22223 {
22224 cvt = sfloat_optab;
22225 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22226 }
22227 break;
22228
22229 case E_DImode:
22230 if (unsigned_p)
22231 {
22232 cvt = ufloat_optab;
22233 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22234 }
22235 else
22236 {
22237 cvt = sfloat_optab;
22238 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22239 }
22240 break;
22241
22242 default:
22243 gcc_unreachable ();
22244 }
22245 }
22246
22247 /* Convert from IEEE 128-bit floating point. */
22248 else if (FLOAT128_IEEE_P (src_mode))
22249 {
22250 if (src_mode == KFmode)
22251 kf_or_tf = 0;
22252 else if (src_mode == TFmode)
22253 kf_or_tf = 1;
22254 else
22255 gcc_unreachable ();
22256
22257 switch (dest_mode)
22258 {
22259 case E_DFmode:
22260 cvt = trunc_optab;
22261 hw_convert = hw_conversions[kf_or_tf].to_df;
22262 break;
22263
22264 case E_SFmode:
22265 cvt = trunc_optab;
22266 hw_convert = hw_conversions[kf_or_tf].to_sf;
22267 break;
22268
22269 case E_KFmode:
22270 case E_IFmode:
22271 case E_TFmode:
22272 if (FLOAT128_IBM_P (dest_mode))
22273 cvt = trunc_optab;
22274 else
22275 do_move = true;
22276 break;
22277
22278 case E_SImode:
22279 if (unsigned_p)
22280 {
22281 cvt = ufix_optab;
22282 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22283 }
22284 else
22285 {
22286 cvt = sfix_optab;
22287 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22288 }
22289 break;
22290
22291 case E_DImode:
22292 if (unsigned_p)
22293 {
22294 cvt = ufix_optab;
22295 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22296 }
22297 else
22298 {
22299 cvt = sfix_optab;
22300 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22301 }
22302 break;
22303
22304 default:
22305 gcc_unreachable ();
22306 }
22307 }
22308
22309 /* Both IBM format. */
22310 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22311 do_move = true;
22312
22313 else
22314 gcc_unreachable ();
22315
22316 /* Handle conversion between TFmode/KFmode/IFmode. */
22317 if (do_move)
22318 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22319
22320 /* Handle conversion if we have hardware support. */
22321 else if (TARGET_FLOAT128_HW && hw_convert)
22322 emit_insn ((hw_convert) (dest, src));
22323
22324 /* Call an external function to do the conversion. */
22325 else if (cvt != unknown_optab)
22326 {
22327 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22328 gcc_assert (libfunc != NULL_RTX);
22329
22330 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22331 src, src_mode);
22332
22333 gcc_assert (dest2 != NULL_RTX);
22334 if (!rtx_equal_p (dest, dest2))
22335 emit_move_insn (dest, dest2);
22336 }
22337
22338 else
22339 gcc_unreachable ();
22340
22341 return;
22342 }
22343
22344 \f
22345 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22346 can be used as that dest register. Return the dest register. */
22347
22348 rtx
22349 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22350 {
22351 if (op2 == const0_rtx)
22352 return op1;
22353
22354 if (GET_CODE (scratch) == SCRATCH)
22355 scratch = gen_reg_rtx (mode);
22356
22357 if (logical_operand (op2, mode))
22358 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22359 else
22360 emit_insn (gen_rtx_SET (scratch,
22361 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22362
22363 return scratch;
22364 }
22365
22366 void
22367 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22368 {
22369 rtx condition_rtx;
22370 machine_mode op_mode;
22371 enum rtx_code cond_code;
22372 rtx result = operands[0];
22373
22374 condition_rtx = rs6000_generate_compare (operands[1], mode);
22375 cond_code = GET_CODE (condition_rtx);
22376
22377 if (cond_code == NE
22378 || cond_code == GE || cond_code == LE
22379 || cond_code == GEU || cond_code == LEU
22380 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22381 {
22382 rtx not_result = gen_reg_rtx (CCEQmode);
22383 rtx not_op, rev_cond_rtx;
22384 machine_mode cc_mode;
22385
22386 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22387
22388 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22389 SImode, XEXP (condition_rtx, 0), const0_rtx);
22390 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22391 emit_insn (gen_rtx_SET (not_result, not_op));
22392 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22393 }
22394
22395 op_mode = GET_MODE (XEXP (operands[1], 0));
22396 if (op_mode == VOIDmode)
22397 op_mode = GET_MODE (XEXP (operands[1], 1));
22398
22399 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22400 {
22401 PUT_MODE (condition_rtx, DImode);
22402 convert_move (result, condition_rtx, 0);
22403 }
22404 else
22405 {
22406 PUT_MODE (condition_rtx, SImode);
22407 emit_insn (gen_rtx_SET (result, condition_rtx));
22408 }
22409 }
22410
22411 /* Emit a branch of kind CODE to location LOC. */
22412
22413 void
22414 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22415 {
22416 rtx condition_rtx, loc_ref;
22417
22418 condition_rtx = rs6000_generate_compare (operands[0], mode);
22419 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22420 emit_jump_insn (gen_rtx_SET (pc_rtx,
22421 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22422 loc_ref, pc_rtx)));
22423 }
22424
22425 /* Return the string to output a conditional branch to LABEL, which is
22426 the operand template of the label, or NULL if the branch is really a
22427 conditional return.
22428
22429 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22430 condition code register and its mode specifies what kind of
22431 comparison we made.
22432
22433 REVERSED is nonzero if we should reverse the sense of the comparison.
22434
22435 INSN is the insn. */
22436
22437 char *
22438 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22439 {
22440 static char string[64];
22441 enum rtx_code code = GET_CODE (op);
22442 rtx cc_reg = XEXP (op, 0);
22443 machine_mode mode = GET_MODE (cc_reg);
22444 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22445 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22446 int really_reversed = reversed ^ need_longbranch;
22447 char *s = string;
22448 const char *ccode;
22449 const char *pred;
22450 rtx note;
22451
22452 validate_condition_mode (code, mode);
22453
22454 /* Work out which way this really branches. We could use
22455 reverse_condition_maybe_unordered here always but this
22456 makes the resulting assembler clearer. */
22457 if (really_reversed)
22458 {
22459 /* Reversal of FP compares takes care -- an ordered compare
22460 becomes an unordered compare and vice versa. */
22461 if (mode == CCFPmode)
22462 code = reverse_condition_maybe_unordered (code);
22463 else
22464 code = reverse_condition (code);
22465 }
22466
22467 switch (code)
22468 {
22469 /* Not all of these are actually distinct opcodes, but
22470 we distinguish them for clarity of the resulting assembler. */
22471 case NE: case LTGT:
22472 ccode = "ne"; break;
22473 case EQ: case UNEQ:
22474 ccode = "eq"; break;
22475 case GE: case GEU:
22476 ccode = "ge"; break;
22477 case GT: case GTU: case UNGT:
22478 ccode = "gt"; break;
22479 case LE: case LEU:
22480 ccode = "le"; break;
22481 case LT: case LTU: case UNLT:
22482 ccode = "lt"; break;
22483 case UNORDERED: ccode = "un"; break;
22484 case ORDERED: ccode = "nu"; break;
22485 case UNGE: ccode = "nl"; break;
22486 case UNLE: ccode = "ng"; break;
22487 default:
22488 gcc_unreachable ();
22489 }
22490
22491 /* Maybe we have a guess as to how likely the branch is. */
22492 pred = "";
22493 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22494 if (note != NULL_RTX)
22495 {
22496 /* PROB is the difference from 50%. */
22497 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22498 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22499
22500 /* Only hint for highly probable/improbable branches on newer cpus when
22501 we have real profile data, as static prediction overrides processor
22502 dynamic prediction. For older cpus we may as well always hint, but
22503 assume not taken for branches that are very close to 50% as a
22504 mispredicted taken branch is more expensive than a
22505 mispredicted not-taken branch. */
22506 if (rs6000_always_hint
22507 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22508 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22509 && br_prob_note_reliable_p (note)))
22510 {
22511 if (abs (prob) > REG_BR_PROB_BASE / 20
22512 && ((prob > 0) ^ need_longbranch))
22513 pred = "+";
22514 else
22515 pred = "-";
22516 }
22517 }
22518
22519 if (label == NULL)
22520 s += sprintf (s, "b%slr%s ", ccode, pred);
22521 else
22522 s += sprintf (s, "b%s%s ", ccode, pred);
22523
22524 /* We need to escape any '%' characters in the reg_names string.
22525 Assume they'd only be the first character.... */
22526 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22527 *s++ = '%';
22528 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22529
22530 if (label != NULL)
22531 {
22532 /* If the branch distance was too far, we may have to use an
22533 unconditional branch to go the distance. */
22534 if (need_longbranch)
22535 s += sprintf (s, ",$+8\n\tb %s", label);
22536 else
22537 s += sprintf (s, ",%s", label);
22538 }
22539
22540 return string;
22541 }
22542
22543 /* Return insn for VSX or Altivec comparisons. */
22544
22545 static rtx
22546 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22547 {
22548 rtx mask;
22549 machine_mode mode = GET_MODE (op0);
22550
22551 switch (code)
22552 {
22553 default:
22554 break;
22555
22556 case GE:
22557 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22558 return NULL_RTX;
22559 /* FALLTHRU */
22560
22561 case EQ:
22562 case GT:
22563 case GTU:
22564 case ORDERED:
22565 case UNORDERED:
22566 case UNEQ:
22567 case LTGT:
22568 mask = gen_reg_rtx (mode);
22569 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22570 return mask;
22571 }
22572
22573 return NULL_RTX;
22574 }
22575
22576 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22577 DMODE is expected destination mode. This is a recursive function. */
22578
22579 static rtx
22580 rs6000_emit_vector_compare (enum rtx_code rcode,
22581 rtx op0, rtx op1,
22582 machine_mode dmode)
22583 {
22584 rtx mask;
22585 bool swap_operands = false;
22586 bool try_again = false;
22587
22588 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22589 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22590
22591 /* See if the comparison works as is. */
22592 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22593 if (mask)
22594 return mask;
22595
22596 switch (rcode)
22597 {
22598 case LT:
22599 rcode = GT;
22600 swap_operands = true;
22601 try_again = true;
22602 break;
22603 case LTU:
22604 rcode = GTU;
22605 swap_operands = true;
22606 try_again = true;
22607 break;
22608 case NE:
22609 case UNLE:
22610 case UNLT:
22611 case UNGE:
22612 case UNGT:
22613 /* Invert condition and try again.
22614 e.g., A != B becomes ~(A==B). */
22615 {
22616 enum rtx_code rev_code;
22617 enum insn_code nor_code;
22618 rtx mask2;
22619
22620 rev_code = reverse_condition_maybe_unordered (rcode);
22621 if (rev_code == UNKNOWN)
22622 return NULL_RTX;
22623
22624 nor_code = optab_handler (one_cmpl_optab, dmode);
22625 if (nor_code == CODE_FOR_nothing)
22626 return NULL_RTX;
22627
22628 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22629 if (!mask2)
22630 return NULL_RTX;
22631
22632 mask = gen_reg_rtx (dmode);
22633 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22634 return mask;
22635 }
22636 break;
22637 case GE:
22638 case GEU:
22639 case LE:
22640 case LEU:
22641 /* Try GT/GTU/LT/LTU OR EQ */
22642 {
22643 rtx c_rtx, eq_rtx;
22644 enum insn_code ior_code;
22645 enum rtx_code new_code;
22646
22647 switch (rcode)
22648 {
22649 case GE:
22650 new_code = GT;
22651 break;
22652
22653 case GEU:
22654 new_code = GTU;
22655 break;
22656
22657 case LE:
22658 new_code = LT;
22659 break;
22660
22661 case LEU:
22662 new_code = LTU;
22663 break;
22664
22665 default:
22666 gcc_unreachable ();
22667 }
22668
22669 ior_code = optab_handler (ior_optab, dmode);
22670 if (ior_code == CODE_FOR_nothing)
22671 return NULL_RTX;
22672
22673 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22674 if (!c_rtx)
22675 return NULL_RTX;
22676
22677 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22678 if (!eq_rtx)
22679 return NULL_RTX;
22680
22681 mask = gen_reg_rtx (dmode);
22682 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22683 return mask;
22684 }
22685 break;
22686 default:
22687 return NULL_RTX;
22688 }
22689
22690 if (try_again)
22691 {
22692 if (swap_operands)
22693 std::swap (op0, op1);
22694
22695 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22696 if (mask)
22697 return mask;
22698 }
22699
22700 /* You only get two chances. */
22701 return NULL_RTX;
22702 }
22703
22704 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22705 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22706 operands for the relation operation COND. */
22707
22708 int
22709 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22710 rtx cond, rtx cc_op0, rtx cc_op1)
22711 {
22712 machine_mode dest_mode = GET_MODE (dest);
22713 machine_mode mask_mode = GET_MODE (cc_op0);
22714 enum rtx_code rcode = GET_CODE (cond);
22715 machine_mode cc_mode = CCmode;
22716 rtx mask;
22717 rtx cond2;
22718 bool invert_move = false;
22719
22720 if (VECTOR_UNIT_NONE_P (dest_mode))
22721 return 0;
22722
22723 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22724 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22725
22726 switch (rcode)
22727 {
22728 /* Swap operands if we can, and fall back to doing the operation as
22729 specified, and doing a NOR to invert the test. */
22730 case NE:
22731 case UNLE:
22732 case UNLT:
22733 case UNGE:
22734 case UNGT:
22735 /* Invert condition and try again.
22736 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22737 invert_move = true;
22738 rcode = reverse_condition_maybe_unordered (rcode);
22739 if (rcode == UNKNOWN)
22740 return 0;
22741 break;
22742
22743 case GE:
22744 case LE:
22745 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22746 {
22747 /* Invert condition to avoid compound test. */
22748 invert_move = true;
22749 rcode = reverse_condition (rcode);
22750 }
22751 break;
22752
22753 case GTU:
22754 case GEU:
22755 case LTU:
22756 case LEU:
22757 /* Mark unsigned tests with CCUNSmode. */
22758 cc_mode = CCUNSmode;
22759
22760 /* Invert condition to avoid compound test if necessary. */
22761 if (rcode == GEU || rcode == LEU)
22762 {
22763 invert_move = true;
22764 rcode = reverse_condition (rcode);
22765 }
22766 break;
22767
22768 default:
22769 break;
22770 }
22771
22772 /* Get the vector mask for the given relational operations. */
22773 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22774
22775 if (!mask)
22776 return 0;
22777
22778 if (invert_move)
22779 std::swap (op_true, op_false);
22780
22781 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22782 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22783 && (GET_CODE (op_true) == CONST_VECTOR
22784 || GET_CODE (op_false) == CONST_VECTOR))
22785 {
22786 rtx constant_0 = CONST0_RTX (dest_mode);
22787 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22788
22789 if (op_true == constant_m1 && op_false == constant_0)
22790 {
22791 emit_move_insn (dest, mask);
22792 return 1;
22793 }
22794
22795 else if (op_true == constant_0 && op_false == constant_m1)
22796 {
22797 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22798 return 1;
22799 }
22800
22801 /* If we can't use the vector comparison directly, perhaps we can use
22802 the mask for the true or false fields, instead of loading up a
22803 constant. */
22804 if (op_true == constant_m1)
22805 op_true = mask;
22806
22807 if (op_false == constant_0)
22808 op_false = mask;
22809 }
22810
22811 if (!REG_P (op_true) && !SUBREG_P (op_true))
22812 op_true = force_reg (dest_mode, op_true);
22813
22814 if (!REG_P (op_false) && !SUBREG_P (op_false))
22815 op_false = force_reg (dest_mode, op_false);
22816
22817 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22818 CONST0_RTX (dest_mode));
22819 emit_insn (gen_rtx_SET (dest,
22820 gen_rtx_IF_THEN_ELSE (dest_mode,
22821 cond2,
22822 op_true,
22823 op_false)));
22824 return 1;
22825 }
22826
22827 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22828 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22829 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22830 hardware has no such operation. */
22831
22832 static int
22833 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22834 {
22835 enum rtx_code code = GET_CODE (op);
22836 rtx op0 = XEXP (op, 0);
22837 rtx op1 = XEXP (op, 1);
22838 machine_mode compare_mode = GET_MODE (op0);
22839 machine_mode result_mode = GET_MODE (dest);
22840 bool max_p = false;
22841
22842 if (result_mode != compare_mode)
22843 return 0;
22844
22845 if (code == GE || code == GT)
22846 max_p = true;
22847 else if (code == LE || code == LT)
22848 max_p = false;
22849 else
22850 return 0;
22851
22852 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22853 ;
22854
22855 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22856 max_p = !max_p;
22857
22858 else
22859 return 0;
22860
22861 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22862 return 1;
22863 }
22864
22865 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22866 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22867 operands of the last comparison is nonzero/true, FALSE_COND if it is
22868 zero/false. Return 0 if the hardware has no such operation. */
22869
22870 static int
22871 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22872 {
22873 enum rtx_code code = GET_CODE (op);
22874 rtx op0 = XEXP (op, 0);
22875 rtx op1 = XEXP (op, 1);
22876 machine_mode result_mode = GET_MODE (dest);
22877 rtx compare_rtx;
22878 rtx cmove_rtx;
22879 rtx clobber_rtx;
22880
22881 if (!can_create_pseudo_p ())
22882 return 0;
22883
22884 switch (code)
22885 {
22886 case EQ:
22887 case GE:
22888 case GT:
22889 break;
22890
22891 case NE:
22892 case LT:
22893 case LE:
22894 code = swap_condition (code);
22895 std::swap (op0, op1);
22896 break;
22897
22898 default:
22899 return 0;
22900 }
22901
22902 /* Generate: [(parallel [(set (dest)
22903 (if_then_else (op (cmp1) (cmp2))
22904 (true)
22905 (false)))
22906 (clobber (scratch))])]. */
22907
22908 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22909 cmove_rtx = gen_rtx_SET (dest,
22910 gen_rtx_IF_THEN_ELSE (result_mode,
22911 compare_rtx,
22912 true_cond,
22913 false_cond));
22914
22915 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22916 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22917 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22918
22919 return 1;
22920 }
22921
22922 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22923 operands of the last comparison is nonzero/true, FALSE_COND if it
22924 is zero/false. Return 0 if the hardware has no such operation. */
22925
22926 int
22927 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22928 {
22929 enum rtx_code code = GET_CODE (op);
22930 rtx op0 = XEXP (op, 0);
22931 rtx op1 = XEXP (op, 1);
22932 machine_mode compare_mode = GET_MODE (op0);
22933 machine_mode result_mode = GET_MODE (dest);
22934 rtx temp;
22935 bool is_against_zero;
22936
22937 /* These modes should always match. */
22938 if (GET_MODE (op1) != compare_mode
22939 /* In the isel case however, we can use a compare immediate, so
22940 op1 may be a small constant. */
22941 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22942 return 0;
22943 if (GET_MODE (true_cond) != result_mode)
22944 return 0;
22945 if (GET_MODE (false_cond) != result_mode)
22946 return 0;
22947
22948 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22949 if (TARGET_P9_MINMAX
22950 && (compare_mode == SFmode || compare_mode == DFmode)
22951 && (result_mode == SFmode || result_mode == DFmode))
22952 {
22953 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22954 return 1;
22955
22956 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22957 return 1;
22958 }
22959
22960 /* Don't allow using floating point comparisons for integer results for
22961 now. */
22962 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22963 return 0;
22964
22965 /* First, work out if the hardware can do this at all, or
22966 if it's too slow.... */
22967 if (!FLOAT_MODE_P (compare_mode))
22968 {
22969 if (TARGET_ISEL)
22970 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22971 return 0;
22972 }
22973
22974 is_against_zero = op1 == CONST0_RTX (compare_mode);
22975
22976 /* A floating-point subtract might overflow, underflow, or produce
22977 an inexact result, thus changing the floating-point flags, so it
22978 can't be generated if we care about that. It's safe if one side
22979 of the construct is zero, since then no subtract will be
22980 generated. */
22981 if (SCALAR_FLOAT_MODE_P (compare_mode)
22982 && flag_trapping_math && ! is_against_zero)
22983 return 0;
22984
22985 /* Eliminate half of the comparisons by switching operands, this
22986 makes the remaining code simpler. */
22987 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22988 || code == LTGT || code == LT || code == UNLE)
22989 {
22990 code = reverse_condition_maybe_unordered (code);
22991 temp = true_cond;
22992 true_cond = false_cond;
22993 false_cond = temp;
22994 }
22995
22996 /* UNEQ and LTGT take four instructions for a comparison with zero,
22997 it'll probably be faster to use a branch here too. */
22998 if (code == UNEQ && HONOR_NANS (compare_mode))
22999 return 0;
23000
23001 /* We're going to try to implement comparisons by performing
23002 a subtract, then comparing against zero. Unfortunately,
23003 Inf - Inf is NaN which is not zero, and so if we don't
23004 know that the operand is finite and the comparison
23005 would treat EQ different to UNORDERED, we can't do it. */
23006 if (HONOR_INFINITIES (compare_mode)
23007 && code != GT && code != UNGE
23008 && (!CONST_DOUBLE_P (op1)
23009 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23010 /* Constructs of the form (a OP b ? a : b) are safe. */
23011 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23012 || (! rtx_equal_p (op0, true_cond)
23013 && ! rtx_equal_p (op1, true_cond))))
23014 return 0;
23015
23016 /* At this point we know we can use fsel. */
23017
23018 /* Reduce the comparison to a comparison against zero. */
23019 if (! is_against_zero)
23020 {
23021 temp = gen_reg_rtx (compare_mode);
23022 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23023 op0 = temp;
23024 op1 = CONST0_RTX (compare_mode);
23025 }
23026
23027 /* If we don't care about NaNs we can reduce some of the comparisons
23028 down to faster ones. */
23029 if (! HONOR_NANS (compare_mode))
23030 switch (code)
23031 {
23032 case GT:
23033 code = LE;
23034 temp = true_cond;
23035 true_cond = false_cond;
23036 false_cond = temp;
23037 break;
23038 case UNGE:
23039 code = GE;
23040 break;
23041 case UNEQ:
23042 code = EQ;
23043 break;
23044 default:
23045 break;
23046 }
23047
23048 /* Now, reduce everything down to a GE. */
23049 switch (code)
23050 {
23051 case GE:
23052 break;
23053
23054 case LE:
23055 temp = gen_reg_rtx (compare_mode);
23056 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23057 op0 = temp;
23058 break;
23059
23060 case ORDERED:
23061 temp = gen_reg_rtx (compare_mode);
23062 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23063 op0 = temp;
23064 break;
23065
23066 case EQ:
23067 temp = gen_reg_rtx (compare_mode);
23068 emit_insn (gen_rtx_SET (temp,
23069 gen_rtx_NEG (compare_mode,
23070 gen_rtx_ABS (compare_mode, op0))));
23071 op0 = temp;
23072 break;
23073
23074 case UNGE:
23075 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23076 temp = gen_reg_rtx (result_mode);
23077 emit_insn (gen_rtx_SET (temp,
23078 gen_rtx_IF_THEN_ELSE (result_mode,
23079 gen_rtx_GE (VOIDmode,
23080 op0, op1),
23081 true_cond, false_cond)));
23082 false_cond = true_cond;
23083 true_cond = temp;
23084
23085 temp = gen_reg_rtx (compare_mode);
23086 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23087 op0 = temp;
23088 break;
23089
23090 case GT:
23091 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23092 temp = gen_reg_rtx (result_mode);
23093 emit_insn (gen_rtx_SET (temp,
23094 gen_rtx_IF_THEN_ELSE (result_mode,
23095 gen_rtx_GE (VOIDmode,
23096 op0, op1),
23097 true_cond, false_cond)));
23098 true_cond = false_cond;
23099 false_cond = temp;
23100
23101 temp = gen_reg_rtx (compare_mode);
23102 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23103 op0 = temp;
23104 break;
23105
23106 default:
23107 gcc_unreachable ();
23108 }
23109
23110 emit_insn (gen_rtx_SET (dest,
23111 gen_rtx_IF_THEN_ELSE (result_mode,
23112 gen_rtx_GE (VOIDmode,
23113 op0, op1),
23114 true_cond, false_cond)));
23115 return 1;
23116 }
23117
23118 /* Same as above, but for ints (isel). */
23119
23120 int
23121 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23122 {
23123 rtx condition_rtx, cr;
23124 machine_mode mode = GET_MODE (dest);
23125 enum rtx_code cond_code;
23126 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23127 bool signedp;
23128
23129 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23130 return 0;
23131
23132 /* We still have to do the compare, because isel doesn't do a
23133 compare, it just looks at the CRx bits set by a previous compare
23134 instruction. */
23135 condition_rtx = rs6000_generate_compare (op, mode);
23136 cond_code = GET_CODE (condition_rtx);
23137 cr = XEXP (condition_rtx, 0);
23138 signedp = GET_MODE (cr) == CCmode;
23139
23140 isel_func = (mode == SImode
23141 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23142 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23143
23144 switch (cond_code)
23145 {
23146 case LT: case GT: case LTU: case GTU: case EQ:
23147 /* isel handles these directly. */
23148 break;
23149
23150 default:
23151 /* We need to swap the sense of the comparison. */
23152 {
23153 std::swap (false_cond, true_cond);
23154 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23155 }
23156 break;
23157 }
23158
23159 false_cond = force_reg (mode, false_cond);
23160 if (true_cond != const0_rtx)
23161 true_cond = force_reg (mode, true_cond);
23162
23163 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23164
23165 return 1;
23166 }
23167
23168 void
23169 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23170 {
23171 machine_mode mode = GET_MODE (op0);
23172 enum rtx_code c;
23173 rtx target;
23174
23175 /* VSX/altivec have direct min/max insns. */
23176 if ((code == SMAX || code == SMIN)
23177 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23178 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23179 {
23180 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23181 return;
23182 }
23183
23184 if (code == SMAX || code == SMIN)
23185 c = GE;
23186 else
23187 c = GEU;
23188
23189 if (code == SMAX || code == UMAX)
23190 target = emit_conditional_move (dest, c, op0, op1, mode,
23191 op0, op1, mode, 0);
23192 else
23193 target = emit_conditional_move (dest, c, op0, op1, mode,
23194 op1, op0, mode, 0);
23195 gcc_assert (target);
23196 if (target != dest)
23197 emit_move_insn (dest, target);
23198 }
23199
23200 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23201 COND is true. Mark the jump as unlikely to be taken. */
23202
23203 static void
23204 emit_unlikely_jump (rtx cond, rtx label)
23205 {
23206 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23207 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23208 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23209 }
23210
23211 /* A subroutine of the atomic operation splitters. Emit a load-locked
23212 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23213 the zero_extend operation. */
23214
23215 static void
23216 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23217 {
23218 rtx (*fn) (rtx, rtx) = NULL;
23219
23220 switch (mode)
23221 {
23222 case E_QImode:
23223 fn = gen_load_lockedqi;
23224 break;
23225 case E_HImode:
23226 fn = gen_load_lockedhi;
23227 break;
23228 case E_SImode:
23229 if (GET_MODE (mem) == QImode)
23230 fn = gen_load_lockedqi_si;
23231 else if (GET_MODE (mem) == HImode)
23232 fn = gen_load_lockedhi_si;
23233 else
23234 fn = gen_load_lockedsi;
23235 break;
23236 case E_DImode:
23237 fn = gen_load_lockeddi;
23238 break;
23239 case E_TImode:
23240 fn = gen_load_lockedti;
23241 break;
23242 default:
23243 gcc_unreachable ();
23244 }
23245 emit_insn (fn (reg, mem));
23246 }
23247
23248 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23249 instruction in MODE. */
23250
23251 static void
23252 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23253 {
23254 rtx (*fn) (rtx, rtx, rtx) = NULL;
23255
23256 switch (mode)
23257 {
23258 case E_QImode:
23259 fn = gen_store_conditionalqi;
23260 break;
23261 case E_HImode:
23262 fn = gen_store_conditionalhi;
23263 break;
23264 case E_SImode:
23265 fn = gen_store_conditionalsi;
23266 break;
23267 case E_DImode:
23268 fn = gen_store_conditionaldi;
23269 break;
23270 case E_TImode:
23271 fn = gen_store_conditionalti;
23272 break;
23273 default:
23274 gcc_unreachable ();
23275 }
23276
23277 /* Emit sync before stwcx. to address PPC405 Erratum. */
23278 if (PPC405_ERRATUM77)
23279 emit_insn (gen_hwsync ());
23280
23281 emit_insn (fn (res, mem, val));
23282 }
23283
23284 /* Expand barriers before and after a load_locked/store_cond sequence. */
23285
23286 static rtx
23287 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23288 {
23289 rtx addr = XEXP (mem, 0);
23290
23291 if (!legitimate_indirect_address_p (addr, reload_completed)
23292 && !legitimate_indexed_address_p (addr, reload_completed))
23293 {
23294 addr = force_reg (Pmode, addr);
23295 mem = replace_equiv_address_nv (mem, addr);
23296 }
23297
23298 switch (model)
23299 {
23300 case MEMMODEL_RELAXED:
23301 case MEMMODEL_CONSUME:
23302 case MEMMODEL_ACQUIRE:
23303 break;
23304 case MEMMODEL_RELEASE:
23305 case MEMMODEL_ACQ_REL:
23306 emit_insn (gen_lwsync ());
23307 break;
23308 case MEMMODEL_SEQ_CST:
23309 emit_insn (gen_hwsync ());
23310 break;
23311 default:
23312 gcc_unreachable ();
23313 }
23314 return mem;
23315 }
23316
23317 static void
23318 rs6000_post_atomic_barrier (enum memmodel model)
23319 {
23320 switch (model)
23321 {
23322 case MEMMODEL_RELAXED:
23323 case MEMMODEL_CONSUME:
23324 case MEMMODEL_RELEASE:
23325 break;
23326 case MEMMODEL_ACQUIRE:
23327 case MEMMODEL_ACQ_REL:
23328 case MEMMODEL_SEQ_CST:
23329 emit_insn (gen_isync ());
23330 break;
23331 default:
23332 gcc_unreachable ();
23333 }
23334 }
23335
23336 /* A subroutine of the various atomic expanders. For sub-word operations,
23337 we must adjust things to operate on SImode. Given the original MEM,
23338 return a new aligned memory. Also build and return the quantities by
23339 which to shift and mask. */
23340
23341 static rtx
23342 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23343 {
23344 rtx addr, align, shift, mask, mem;
23345 HOST_WIDE_INT shift_mask;
23346 machine_mode mode = GET_MODE (orig_mem);
23347
23348 /* For smaller modes, we have to implement this via SImode. */
23349 shift_mask = (mode == QImode ? 0x18 : 0x10);
23350
23351 addr = XEXP (orig_mem, 0);
23352 addr = force_reg (GET_MODE (addr), addr);
23353
23354 /* Aligned memory containing subword. Generate a new memory. We
23355 do not want any of the existing MEM_ATTR data, as we're now
23356 accessing memory outside the original object. */
23357 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23358 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23359 mem = gen_rtx_MEM (SImode, align);
23360 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23361 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23362 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23363
23364 /* Shift amount for subword relative to aligned word. */
23365 shift = gen_reg_rtx (SImode);
23366 addr = gen_lowpart (SImode, addr);
23367 rtx tmp = gen_reg_rtx (SImode);
23368 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23369 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23370 if (BYTES_BIG_ENDIAN)
23371 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23372 shift, 1, OPTAB_LIB_WIDEN);
23373 *pshift = shift;
23374
23375 /* Mask for insertion. */
23376 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23377 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23378 *pmask = mask;
23379
23380 return mem;
23381 }
23382
23383 /* A subroutine of the various atomic expanders. For sub-word operands,
23384 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23385
23386 static rtx
23387 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23388 {
23389 rtx x;
23390
23391 x = gen_reg_rtx (SImode);
23392 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23393 gen_rtx_NOT (SImode, mask),
23394 oldval)));
23395
23396 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23397
23398 return x;
23399 }
23400
23401 /* A subroutine of the various atomic expanders. For sub-word operands,
23402 extract WIDE to NARROW via SHIFT. */
23403
23404 static void
23405 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23406 {
23407 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23408 wide, 1, OPTAB_LIB_WIDEN);
23409 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23410 }
23411
23412 /* Expand an atomic compare and swap operation. */
23413
23414 void
23415 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23416 {
23417 rtx boolval, retval, mem, oldval, newval, cond;
23418 rtx label1, label2, x, mask, shift;
23419 machine_mode mode, orig_mode;
23420 enum memmodel mod_s, mod_f;
23421 bool is_weak;
23422
23423 boolval = operands[0];
23424 retval = operands[1];
23425 mem = operands[2];
23426 oldval = operands[3];
23427 newval = operands[4];
23428 is_weak = (INTVAL (operands[5]) != 0);
23429 mod_s = memmodel_base (INTVAL (operands[6]));
23430 mod_f = memmodel_base (INTVAL (operands[7]));
23431 orig_mode = mode = GET_MODE (mem);
23432
23433 mask = shift = NULL_RTX;
23434 if (mode == QImode || mode == HImode)
23435 {
23436 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23437 lwarx and shift/mask operations. With power8, we need to do the
23438 comparison in SImode, but the store is still done in QI/HImode. */
23439 oldval = convert_modes (SImode, mode, oldval, 1);
23440
23441 if (!TARGET_SYNC_HI_QI)
23442 {
23443 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23444
23445 /* Shift and mask OLDVAL into position with the word. */
23446 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23447 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23448
23449 /* Shift and mask NEWVAL into position within the word. */
23450 newval = convert_modes (SImode, mode, newval, 1);
23451 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23452 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23453 }
23454
23455 /* Prepare to adjust the return value. */
23456 retval = gen_reg_rtx (SImode);
23457 mode = SImode;
23458 }
23459 else if (reg_overlap_mentioned_p (retval, oldval))
23460 oldval = copy_to_reg (oldval);
23461
23462 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23463 oldval = copy_to_mode_reg (mode, oldval);
23464
23465 if (reg_overlap_mentioned_p (retval, newval))
23466 newval = copy_to_reg (newval);
23467
23468 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23469
23470 label1 = NULL_RTX;
23471 if (!is_weak)
23472 {
23473 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23474 emit_label (XEXP (label1, 0));
23475 }
23476 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23477
23478 emit_load_locked (mode, retval, mem);
23479
23480 x = retval;
23481 if (mask)
23482 x = expand_simple_binop (SImode, AND, retval, mask,
23483 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23484
23485 cond = gen_reg_rtx (CCmode);
23486 /* If we have TImode, synthesize a comparison. */
23487 if (mode != TImode)
23488 x = gen_rtx_COMPARE (CCmode, x, oldval);
23489 else
23490 {
23491 rtx xor1_result = gen_reg_rtx (DImode);
23492 rtx xor2_result = gen_reg_rtx (DImode);
23493 rtx or_result = gen_reg_rtx (DImode);
23494 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23495 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23496 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23497 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23498
23499 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23500 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23501 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23502 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23503 }
23504
23505 emit_insn (gen_rtx_SET (cond, x));
23506
23507 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23508 emit_unlikely_jump (x, label2);
23509
23510 x = newval;
23511 if (mask)
23512 x = rs6000_mask_atomic_subword (retval, newval, mask);
23513
23514 emit_store_conditional (orig_mode, cond, mem, x);
23515
23516 if (!is_weak)
23517 {
23518 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23519 emit_unlikely_jump (x, label1);
23520 }
23521
23522 if (!is_mm_relaxed (mod_f))
23523 emit_label (XEXP (label2, 0));
23524
23525 rs6000_post_atomic_barrier (mod_s);
23526
23527 if (is_mm_relaxed (mod_f))
23528 emit_label (XEXP (label2, 0));
23529
23530 if (shift)
23531 rs6000_finish_atomic_subword (operands[1], retval, shift);
23532 else if (mode != GET_MODE (operands[1]))
23533 convert_move (operands[1], retval, 1);
23534
23535 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23536 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23537 emit_insn (gen_rtx_SET (boolval, x));
23538 }
23539
23540 /* Expand an atomic exchange operation. */
23541
23542 void
23543 rs6000_expand_atomic_exchange (rtx operands[])
23544 {
23545 rtx retval, mem, val, cond;
23546 machine_mode mode;
23547 enum memmodel model;
23548 rtx label, x, mask, shift;
23549
23550 retval = operands[0];
23551 mem = operands[1];
23552 val = operands[2];
23553 model = memmodel_base (INTVAL (operands[3]));
23554 mode = GET_MODE (mem);
23555
23556 mask = shift = NULL_RTX;
23557 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23558 {
23559 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23560
23561 /* Shift and mask VAL into position with the word. */
23562 val = convert_modes (SImode, mode, val, 1);
23563 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23564 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23565
23566 /* Prepare to adjust the return value. */
23567 retval = gen_reg_rtx (SImode);
23568 mode = SImode;
23569 }
23570
23571 mem = rs6000_pre_atomic_barrier (mem, model);
23572
23573 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23574 emit_label (XEXP (label, 0));
23575
23576 emit_load_locked (mode, retval, mem);
23577
23578 x = val;
23579 if (mask)
23580 x = rs6000_mask_atomic_subword (retval, val, mask);
23581
23582 cond = gen_reg_rtx (CCmode);
23583 emit_store_conditional (mode, cond, mem, x);
23584
23585 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23586 emit_unlikely_jump (x, label);
23587
23588 rs6000_post_atomic_barrier (model);
23589
23590 if (shift)
23591 rs6000_finish_atomic_subword (operands[0], retval, shift);
23592 }
23593
23594 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23595 to perform. MEM is the memory on which to operate. VAL is the second
23596 operand of the binary operator. BEFORE and AFTER are optional locations to
23597 return the value of MEM either before of after the operation. MODEL_RTX
23598 is a CONST_INT containing the memory model to use. */
23599
23600 void
23601 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23602 rtx orig_before, rtx orig_after, rtx model_rtx)
23603 {
23604 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23605 machine_mode mode = GET_MODE (mem);
23606 machine_mode store_mode = mode;
23607 rtx label, x, cond, mask, shift;
23608 rtx before = orig_before, after = orig_after;
23609
23610 mask = shift = NULL_RTX;
23611 /* On power8, we want to use SImode for the operation. On previous systems,
23612 use the operation in a subword and shift/mask to get the proper byte or
23613 halfword. */
23614 if (mode == QImode || mode == HImode)
23615 {
23616 if (TARGET_SYNC_HI_QI)
23617 {
23618 val = convert_modes (SImode, mode, val, 1);
23619
23620 /* Prepare to adjust the return value. */
23621 before = gen_reg_rtx (SImode);
23622 if (after)
23623 after = gen_reg_rtx (SImode);
23624 mode = SImode;
23625 }
23626 else
23627 {
23628 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23629
23630 /* Shift and mask VAL into position with the word. */
23631 val = convert_modes (SImode, mode, val, 1);
23632 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23633 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23634
23635 switch (code)
23636 {
23637 case IOR:
23638 case XOR:
23639 /* We've already zero-extended VAL. That is sufficient to
23640 make certain that it does not affect other bits. */
23641 mask = NULL;
23642 break;
23643
23644 case AND:
23645 /* If we make certain that all of the other bits in VAL are
23646 set, that will be sufficient to not affect other bits. */
23647 x = gen_rtx_NOT (SImode, mask);
23648 x = gen_rtx_IOR (SImode, x, val);
23649 emit_insn (gen_rtx_SET (val, x));
23650 mask = NULL;
23651 break;
23652
23653 case NOT:
23654 case PLUS:
23655 case MINUS:
23656 /* These will all affect bits outside the field and need
23657 adjustment via MASK within the loop. */
23658 break;
23659
23660 default:
23661 gcc_unreachable ();
23662 }
23663
23664 /* Prepare to adjust the return value. */
23665 before = gen_reg_rtx (SImode);
23666 if (after)
23667 after = gen_reg_rtx (SImode);
23668 store_mode = mode = SImode;
23669 }
23670 }
23671
23672 mem = rs6000_pre_atomic_barrier (mem, model);
23673
23674 label = gen_label_rtx ();
23675 emit_label (label);
23676 label = gen_rtx_LABEL_REF (VOIDmode, label);
23677
23678 if (before == NULL_RTX)
23679 before = gen_reg_rtx (mode);
23680
23681 emit_load_locked (mode, before, mem);
23682
23683 if (code == NOT)
23684 {
23685 x = expand_simple_binop (mode, AND, before, val,
23686 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23687 after = expand_simple_unop (mode, NOT, x, after, 1);
23688 }
23689 else
23690 {
23691 after = expand_simple_binop (mode, code, before, val,
23692 after, 1, OPTAB_LIB_WIDEN);
23693 }
23694
23695 x = after;
23696 if (mask)
23697 {
23698 x = expand_simple_binop (SImode, AND, after, mask,
23699 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23700 x = rs6000_mask_atomic_subword (before, x, mask);
23701 }
23702 else if (store_mode != mode)
23703 x = convert_modes (store_mode, mode, x, 1);
23704
23705 cond = gen_reg_rtx (CCmode);
23706 emit_store_conditional (store_mode, cond, mem, x);
23707
23708 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23709 emit_unlikely_jump (x, label);
23710
23711 rs6000_post_atomic_barrier (model);
23712
23713 if (shift)
23714 {
23715 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23716 then do the calcuations in a SImode register. */
23717 if (orig_before)
23718 rs6000_finish_atomic_subword (orig_before, before, shift);
23719 if (orig_after)
23720 rs6000_finish_atomic_subword (orig_after, after, shift);
23721 }
23722 else if (store_mode != mode)
23723 {
23724 /* QImode/HImode on machines with lbarx/lharx where we do the native
23725 operation and then do the calcuations in a SImode register. */
23726 if (orig_before)
23727 convert_move (orig_before, before, 1);
23728 if (orig_after)
23729 convert_move (orig_after, after, 1);
23730 }
23731 else if (orig_after && after != orig_after)
23732 emit_move_insn (orig_after, after);
23733 }
23734
23735 /* Emit instructions to move SRC to DST. Called by splitters for
23736 multi-register moves. It will emit at most one instruction for
23737 each register that is accessed; that is, it won't emit li/lis pairs
23738 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23739 register. */
23740
23741 void
23742 rs6000_split_multireg_move (rtx dst, rtx src)
23743 {
23744 /* The register number of the first register being moved. */
23745 int reg;
23746 /* The mode that is to be moved. */
23747 machine_mode mode;
23748 /* The mode that the move is being done in, and its size. */
23749 machine_mode reg_mode;
23750 int reg_mode_size;
23751 /* The number of registers that will be moved. */
23752 int nregs;
23753
23754 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23755 mode = GET_MODE (dst);
23756 nregs = hard_regno_nregs (reg, mode);
23757 if (FP_REGNO_P (reg))
23758 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23759 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23760 else if (ALTIVEC_REGNO_P (reg))
23761 reg_mode = V16QImode;
23762 else
23763 reg_mode = word_mode;
23764 reg_mode_size = GET_MODE_SIZE (reg_mode);
23765
23766 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23767
23768 /* TDmode residing in FP registers is special, since the ISA requires that
23769 the lower-numbered word of a register pair is always the most significant
23770 word, even in little-endian mode. This does not match the usual subreg
23771 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23772 the appropriate constituent registers "by hand" in little-endian mode.
23773
23774 Note we do not need to check for destructive overlap here since TDmode
23775 can only reside in even/odd register pairs. */
23776 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23777 {
23778 rtx p_src, p_dst;
23779 int i;
23780
23781 for (i = 0; i < nregs; i++)
23782 {
23783 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23784 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23785 else
23786 p_src = simplify_gen_subreg (reg_mode, src, mode,
23787 i * reg_mode_size);
23788
23789 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23790 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23791 else
23792 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23793 i * reg_mode_size);
23794
23795 emit_insn (gen_rtx_SET (p_dst, p_src));
23796 }
23797
23798 return;
23799 }
23800
23801 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23802 {
23803 /* Move register range backwards, if we might have destructive
23804 overlap. */
23805 int i;
23806 for (i = nregs - 1; i >= 0; i--)
23807 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23808 i * reg_mode_size),
23809 simplify_gen_subreg (reg_mode, src, mode,
23810 i * reg_mode_size)));
23811 }
23812 else
23813 {
23814 int i;
23815 int j = -1;
23816 bool used_update = false;
23817 rtx restore_basereg = NULL_RTX;
23818
23819 if (MEM_P (src) && INT_REGNO_P (reg))
23820 {
23821 rtx breg;
23822
23823 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23824 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23825 {
23826 rtx delta_rtx;
23827 breg = XEXP (XEXP (src, 0), 0);
23828 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23829 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23830 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23831 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23832 src = replace_equiv_address (src, breg);
23833 }
23834 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23835 {
23836 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23837 {
23838 rtx basereg = XEXP (XEXP (src, 0), 0);
23839 if (TARGET_UPDATE)
23840 {
23841 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23842 emit_insn (gen_rtx_SET (ndst,
23843 gen_rtx_MEM (reg_mode,
23844 XEXP (src, 0))));
23845 used_update = true;
23846 }
23847 else
23848 emit_insn (gen_rtx_SET (basereg,
23849 XEXP (XEXP (src, 0), 1)));
23850 src = replace_equiv_address (src, basereg);
23851 }
23852 else
23853 {
23854 rtx basereg = gen_rtx_REG (Pmode, reg);
23855 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23856 src = replace_equiv_address (src, basereg);
23857 }
23858 }
23859
23860 breg = XEXP (src, 0);
23861 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23862 breg = XEXP (breg, 0);
23863
23864 /* If the base register we are using to address memory is
23865 also a destination reg, then change that register last. */
23866 if (REG_P (breg)
23867 && REGNO (breg) >= REGNO (dst)
23868 && REGNO (breg) < REGNO (dst) + nregs)
23869 j = REGNO (breg) - REGNO (dst);
23870 }
23871 else if (MEM_P (dst) && INT_REGNO_P (reg))
23872 {
23873 rtx breg;
23874
23875 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23876 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23877 {
23878 rtx delta_rtx;
23879 breg = XEXP (XEXP (dst, 0), 0);
23880 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23881 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23882 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23883
23884 /* We have to update the breg before doing the store.
23885 Use store with update, if available. */
23886
23887 if (TARGET_UPDATE)
23888 {
23889 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23890 emit_insn (TARGET_32BIT
23891 ? (TARGET_POWERPC64
23892 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23893 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23894 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23895 used_update = true;
23896 }
23897 else
23898 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23899 dst = replace_equiv_address (dst, breg);
23900 }
23901 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23902 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23903 {
23904 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23905 {
23906 rtx basereg = XEXP (XEXP (dst, 0), 0);
23907 if (TARGET_UPDATE)
23908 {
23909 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23910 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23911 XEXP (dst, 0)),
23912 nsrc));
23913 used_update = true;
23914 }
23915 else
23916 emit_insn (gen_rtx_SET (basereg,
23917 XEXP (XEXP (dst, 0), 1)));
23918 dst = replace_equiv_address (dst, basereg);
23919 }
23920 else
23921 {
23922 rtx basereg = XEXP (XEXP (dst, 0), 0);
23923 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23924 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23925 && REG_P (basereg)
23926 && REG_P (offsetreg)
23927 && REGNO (basereg) != REGNO (offsetreg));
23928 if (REGNO (basereg) == 0)
23929 {
23930 rtx tmp = offsetreg;
23931 offsetreg = basereg;
23932 basereg = tmp;
23933 }
23934 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23935 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23936 dst = replace_equiv_address (dst, basereg);
23937 }
23938 }
23939 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23940 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23941 }
23942
23943 for (i = 0; i < nregs; i++)
23944 {
23945 /* Calculate index to next subword. */
23946 ++j;
23947 if (j == nregs)
23948 j = 0;
23949
23950 /* If compiler already emitted move of first word by
23951 store with update, no need to do anything. */
23952 if (j == 0 && used_update)
23953 continue;
23954
23955 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23956 j * reg_mode_size),
23957 simplify_gen_subreg (reg_mode, src, mode,
23958 j * reg_mode_size)));
23959 }
23960 if (restore_basereg != NULL_RTX)
23961 emit_insn (restore_basereg);
23962 }
23963 }
23964
23965 \f
23966 /* This page contains routines that are used to determine what the
23967 function prologue and epilogue code will do and write them out. */
23968
23969 /* Determine whether the REG is really used. */
23970
23971 static bool
23972 save_reg_p (int reg)
23973 {
23974 /* We need to mark the PIC offset register live for the same conditions
23975 as it is set up, or otherwise it won't be saved before we clobber it. */
23976
23977 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23978 {
23979 /* When calling eh_return, we must return true for all the cases
23980 where conditional_register_usage marks the PIC offset reg
23981 call used. */
23982 if (TARGET_TOC && TARGET_MINIMAL_TOC
23983 && (crtl->calls_eh_return
23984 || df_regs_ever_live_p (reg)
23985 || !constant_pool_empty_p ()))
23986 return true;
23987
23988 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
23989 && flag_pic && crtl->uses_pic_offset_table)
23990 return true;
23991 }
23992
23993 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
23994 }
23995
23996 /* Return the first fixed-point register that is required to be
23997 saved. 32 if none. */
23998
23999 int
24000 first_reg_to_save (void)
24001 {
24002 int first_reg;
24003
24004 /* Find lowest numbered live register. */
24005 for (first_reg = 13; first_reg <= 31; first_reg++)
24006 if (save_reg_p (first_reg))
24007 break;
24008
24009 return first_reg;
24010 }
24011
24012 /* Similar, for FP regs. */
24013
24014 int
24015 first_fp_reg_to_save (void)
24016 {
24017 int first_reg;
24018
24019 /* Find lowest numbered live register. */
24020 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24021 if (save_reg_p (first_reg))
24022 break;
24023
24024 return first_reg;
24025 }
24026
24027 /* Similar, for AltiVec regs. */
24028
24029 static int
24030 first_altivec_reg_to_save (void)
24031 {
24032 int i;
24033
24034 /* Stack frame remains as is unless we are in AltiVec ABI. */
24035 if (! TARGET_ALTIVEC_ABI)
24036 return LAST_ALTIVEC_REGNO + 1;
24037
24038 /* On Darwin, the unwind routines are compiled without
24039 TARGET_ALTIVEC, and use save_world to save/restore the
24040 altivec registers when necessary. */
24041 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24042 && ! TARGET_ALTIVEC)
24043 return FIRST_ALTIVEC_REGNO + 20;
24044
24045 /* Find lowest numbered live register. */
24046 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24047 if (save_reg_p (i))
24048 break;
24049
24050 return i;
24051 }
24052
24053 /* Return a 32-bit mask of the AltiVec registers we need to set in
24054 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24055 the 32-bit word is 0. */
24056
24057 static unsigned int
24058 compute_vrsave_mask (void)
24059 {
24060 unsigned int i, mask = 0;
24061
24062 /* On Darwin, the unwind routines are compiled without
24063 TARGET_ALTIVEC, and use save_world to save/restore the
24064 call-saved altivec registers when necessary. */
24065 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24066 && ! TARGET_ALTIVEC)
24067 mask |= 0xFFF;
24068
24069 /* First, find out if we use _any_ altivec registers. */
24070 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24071 if (df_regs_ever_live_p (i))
24072 mask |= ALTIVEC_REG_BIT (i);
24073
24074 if (mask == 0)
24075 return mask;
24076
24077 /* Next, remove the argument registers from the set. These must
24078 be in the VRSAVE mask set by the caller, so we don't need to add
24079 them in again. More importantly, the mask we compute here is
24080 used to generate CLOBBERs in the set_vrsave insn, and we do not
24081 wish the argument registers to die. */
24082 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24083 mask &= ~ALTIVEC_REG_BIT (i);
24084
24085 /* Similarly, remove the return value from the set. */
24086 {
24087 bool yes = false;
24088 diddle_return_value (is_altivec_return_reg, &yes);
24089 if (yes)
24090 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24091 }
24092
24093 return mask;
24094 }
24095
24096 /* For a very restricted set of circumstances, we can cut down the
24097 size of prologues/epilogues by calling our own save/restore-the-world
24098 routines. */
24099
24100 static void
24101 compute_save_world_info (rs6000_stack_t *info)
24102 {
24103 info->world_save_p = 1;
24104 info->world_save_p
24105 = (WORLD_SAVE_P (info)
24106 && DEFAULT_ABI == ABI_DARWIN
24107 && !cfun->has_nonlocal_label
24108 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24109 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24110 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24111 && info->cr_save_p);
24112
24113 /* This will not work in conjunction with sibcalls. Make sure there
24114 are none. (This check is expensive, but seldom executed.) */
24115 if (WORLD_SAVE_P (info))
24116 {
24117 rtx_insn *insn;
24118 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24119 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24120 {
24121 info->world_save_p = 0;
24122 break;
24123 }
24124 }
24125
24126 if (WORLD_SAVE_P (info))
24127 {
24128 /* Even if we're not touching VRsave, make sure there's room on the
24129 stack for it, if it looks like we're calling SAVE_WORLD, which
24130 will attempt to save it. */
24131 info->vrsave_size = 4;
24132
24133 /* If we are going to save the world, we need to save the link register too. */
24134 info->lr_save_p = 1;
24135
24136 /* "Save" the VRsave register too if we're saving the world. */
24137 if (info->vrsave_mask == 0)
24138 info->vrsave_mask = compute_vrsave_mask ();
24139
24140 /* Because the Darwin register save/restore routines only handle
24141 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24142 check. */
24143 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24144 && (info->first_altivec_reg_save
24145 >= FIRST_SAVED_ALTIVEC_REGNO));
24146 }
24147
24148 return;
24149 }
24150
24151
24152 static void
24153 is_altivec_return_reg (rtx reg, void *xyes)
24154 {
24155 bool *yes = (bool *) xyes;
24156 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24157 *yes = true;
24158 }
24159
24160 \f
24161 /* Return whether REG is a global user reg or has been specifed by
24162 -ffixed-REG. We should not restore these, and so cannot use
24163 lmw or out-of-line restore functions if there are any. We also
24164 can't save them (well, emit frame notes for them), because frame
24165 unwinding during exception handling will restore saved registers. */
24166
24167 static bool
24168 fixed_reg_p (int reg)
24169 {
24170 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24171 backend sets it, overriding anything the user might have given. */
24172 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24173 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24174 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24175 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24176 return false;
24177
24178 return fixed_regs[reg];
24179 }
24180
24181 /* Determine the strategy for savings/restoring registers. */
24182
24183 enum {
24184 SAVE_MULTIPLE = 0x1,
24185 SAVE_INLINE_GPRS = 0x2,
24186 SAVE_INLINE_FPRS = 0x4,
24187 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24188 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24189 SAVE_INLINE_VRS = 0x20,
24190 REST_MULTIPLE = 0x100,
24191 REST_INLINE_GPRS = 0x200,
24192 REST_INLINE_FPRS = 0x400,
24193 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24194 REST_INLINE_VRS = 0x1000
24195 };
24196
24197 static int
24198 rs6000_savres_strategy (rs6000_stack_t *info,
24199 bool using_static_chain_p)
24200 {
24201 int strategy = 0;
24202
24203 /* Select between in-line and out-of-line save and restore of regs.
24204 First, all the obvious cases where we don't use out-of-line. */
24205 if (crtl->calls_eh_return
24206 || cfun->machine->ra_need_lr)
24207 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24208 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24209 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24210
24211 if (info->first_gp_reg_save == 32)
24212 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24213
24214 if (info->first_fp_reg_save == 64)
24215 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24216
24217 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24218 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24219
24220 /* Define cutoff for using out-of-line functions to save registers. */
24221 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24222 {
24223 if (!optimize_size)
24224 {
24225 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24226 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24227 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24228 }
24229 else
24230 {
24231 /* Prefer out-of-line restore if it will exit. */
24232 if (info->first_fp_reg_save > 61)
24233 strategy |= SAVE_INLINE_FPRS;
24234 if (info->first_gp_reg_save > 29)
24235 {
24236 if (info->first_fp_reg_save == 64)
24237 strategy |= SAVE_INLINE_GPRS;
24238 else
24239 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24240 }
24241 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24242 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24243 }
24244 }
24245 else if (DEFAULT_ABI == ABI_DARWIN)
24246 {
24247 if (info->first_fp_reg_save > 60)
24248 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24249 if (info->first_gp_reg_save > 29)
24250 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24251 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24252 }
24253 else
24254 {
24255 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24256 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24257 || info->first_fp_reg_save > 61)
24258 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24259 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24260 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24261 }
24262
24263 /* Don't bother to try to save things out-of-line if r11 is occupied
24264 by the static chain. It would require too much fiddling and the
24265 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24266 pointer on Darwin, and AIX uses r1 or r12. */
24267 if (using_static_chain_p
24268 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24269 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24270 | SAVE_INLINE_GPRS
24271 | SAVE_INLINE_VRS);
24272
24273 /* Don't ever restore fixed regs. That means we can't use the
24274 out-of-line register restore functions if a fixed reg is in the
24275 range of regs restored. */
24276 if (!(strategy & REST_INLINE_FPRS))
24277 for (int i = info->first_fp_reg_save; i < 64; i++)
24278 if (fixed_regs[i])
24279 {
24280 strategy |= REST_INLINE_FPRS;
24281 break;
24282 }
24283
24284 /* We can only use the out-of-line routines to restore fprs if we've
24285 saved all the registers from first_fp_reg_save in the prologue.
24286 Otherwise, we risk loading garbage. Of course, if we have saved
24287 out-of-line then we know we haven't skipped any fprs. */
24288 if ((strategy & SAVE_INLINE_FPRS)
24289 && !(strategy & REST_INLINE_FPRS))
24290 for (int i = info->first_fp_reg_save; i < 64; i++)
24291 if (!save_reg_p (i))
24292 {
24293 strategy |= REST_INLINE_FPRS;
24294 break;
24295 }
24296
24297 /* Similarly, for altivec regs. */
24298 if (!(strategy & REST_INLINE_VRS))
24299 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24300 if (fixed_regs[i])
24301 {
24302 strategy |= REST_INLINE_VRS;
24303 break;
24304 }
24305
24306 if ((strategy & SAVE_INLINE_VRS)
24307 && !(strategy & REST_INLINE_VRS))
24308 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24309 if (!save_reg_p (i))
24310 {
24311 strategy |= REST_INLINE_VRS;
24312 break;
24313 }
24314
24315 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24316 saved is an out-of-line save or restore. Set up the value for
24317 the next test (excluding out-of-line gprs). */
24318 bool lr_save_p = (info->lr_save_p
24319 || !(strategy & SAVE_INLINE_FPRS)
24320 || !(strategy & SAVE_INLINE_VRS)
24321 || !(strategy & REST_INLINE_FPRS)
24322 || !(strategy & REST_INLINE_VRS));
24323
24324 if (TARGET_MULTIPLE
24325 && !TARGET_POWERPC64
24326 && info->first_gp_reg_save < 31
24327 && !(flag_shrink_wrap
24328 && flag_shrink_wrap_separate
24329 && optimize_function_for_speed_p (cfun)))
24330 {
24331 int count = 0;
24332 for (int i = info->first_gp_reg_save; i < 32; i++)
24333 if (save_reg_p (i))
24334 count++;
24335
24336 if (count <= 1)
24337 /* Don't use store multiple if only one reg needs to be
24338 saved. This can occur for example when the ABI_V4 pic reg
24339 (r30) needs to be saved to make calls, but r31 is not
24340 used. */
24341 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24342 else
24343 {
24344 /* Prefer store multiple for saves over out-of-line
24345 routines, since the store-multiple instruction will
24346 always be smaller. */
24347 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24348
24349 /* The situation is more complicated with load multiple.
24350 We'd prefer to use the out-of-line routines for restores,
24351 since the "exit" out-of-line routines can handle the
24352 restore of LR and the frame teardown. However if doesn't
24353 make sense to use the out-of-line routine if that is the
24354 only reason we'd need to save LR, and we can't use the
24355 "exit" out-of-line gpr restore if we have saved some
24356 fprs; In those cases it is advantageous to use load
24357 multiple when available. */
24358 if (info->first_fp_reg_save != 64 || !lr_save_p)
24359 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24360 }
24361 }
24362
24363 /* Using the "exit" out-of-line routine does not improve code size
24364 if using it would require lr to be saved and if only saving one
24365 or two gprs. */
24366 else if (!lr_save_p && info->first_gp_reg_save > 29)
24367 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24368
24369 /* Don't ever restore fixed regs. */
24370 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24371 for (int i = info->first_gp_reg_save; i < 32; i++)
24372 if (fixed_reg_p (i))
24373 {
24374 strategy |= REST_INLINE_GPRS;
24375 strategy &= ~REST_MULTIPLE;
24376 break;
24377 }
24378
24379 /* We can only use load multiple or the out-of-line routines to
24380 restore gprs if we've saved all the registers from
24381 first_gp_reg_save. Otherwise, we risk loading garbage.
24382 Of course, if we have saved out-of-line or used stmw then we know
24383 we haven't skipped any gprs. */
24384 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24385 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24386 for (int i = info->first_gp_reg_save; i < 32; i++)
24387 if (!save_reg_p (i))
24388 {
24389 strategy |= REST_INLINE_GPRS;
24390 strategy &= ~REST_MULTIPLE;
24391 break;
24392 }
24393
24394 if (TARGET_ELF && TARGET_64BIT)
24395 {
24396 if (!(strategy & SAVE_INLINE_FPRS))
24397 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24398 else if (!(strategy & SAVE_INLINE_GPRS)
24399 && info->first_fp_reg_save == 64)
24400 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24401 }
24402 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24403 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24404
24405 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24406 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24407
24408 return strategy;
24409 }
24410
24411 /* Calculate the stack information for the current function. This is
24412 complicated by having two separate calling sequences, the AIX calling
24413 sequence and the V.4 calling sequence.
24414
24415 AIX (and Darwin/Mac OS X) stack frames look like:
24416 32-bit 64-bit
24417 SP----> +---------------------------------------+
24418 | back chain to caller | 0 0
24419 +---------------------------------------+
24420 | saved CR | 4 8 (8-11)
24421 +---------------------------------------+
24422 | saved LR | 8 16
24423 +---------------------------------------+
24424 | reserved for compilers | 12 24
24425 +---------------------------------------+
24426 | reserved for binders | 16 32
24427 +---------------------------------------+
24428 | saved TOC pointer | 20 40
24429 +---------------------------------------+
24430 | Parameter save area (+padding*) (P) | 24 48
24431 +---------------------------------------+
24432 | Alloca space (A) | 24+P etc.
24433 +---------------------------------------+
24434 | Local variable space (L) | 24+P+A
24435 +---------------------------------------+
24436 | Float/int conversion temporary (X) | 24+P+A+L
24437 +---------------------------------------+
24438 | Save area for AltiVec registers (W) | 24+P+A+L+X
24439 +---------------------------------------+
24440 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24441 +---------------------------------------+
24442 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24443 +---------------------------------------+
24444 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24445 +---------------------------------------+
24446 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24447 +---------------------------------------+
24448 old SP->| back chain to caller's caller |
24449 +---------------------------------------+
24450
24451 * If the alloca area is present, the parameter save area is
24452 padded so that the former starts 16-byte aligned.
24453
24454 The required alignment for AIX configurations is two words (i.e., 8
24455 or 16 bytes).
24456
24457 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24458
24459 SP----> +---------------------------------------+
24460 | Back chain to caller | 0
24461 +---------------------------------------+
24462 | Save area for CR | 8
24463 +---------------------------------------+
24464 | Saved LR | 16
24465 +---------------------------------------+
24466 | Saved TOC pointer | 24
24467 +---------------------------------------+
24468 | Parameter save area (+padding*) (P) | 32
24469 +---------------------------------------+
24470 | Alloca space (A) | 32+P
24471 +---------------------------------------+
24472 | Local variable space (L) | 32+P+A
24473 +---------------------------------------+
24474 | Save area for AltiVec registers (W) | 32+P+A+L
24475 +---------------------------------------+
24476 | AltiVec alignment padding (Y) | 32+P+A+L+W
24477 +---------------------------------------+
24478 | Save area for GP registers (G) | 32+P+A+L+W+Y
24479 +---------------------------------------+
24480 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24481 +---------------------------------------+
24482 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24483 +---------------------------------------+
24484
24485 * If the alloca area is present, the parameter save area is
24486 padded so that the former starts 16-byte aligned.
24487
24488 V.4 stack frames look like:
24489
24490 SP----> +---------------------------------------+
24491 | back chain to caller | 0
24492 +---------------------------------------+
24493 | caller's saved LR | 4
24494 +---------------------------------------+
24495 | Parameter save area (+padding*) (P) | 8
24496 +---------------------------------------+
24497 | Alloca space (A) | 8+P
24498 +---------------------------------------+
24499 | Varargs save area (V) | 8+P+A
24500 +---------------------------------------+
24501 | Local variable space (L) | 8+P+A+V
24502 +---------------------------------------+
24503 | Float/int conversion temporary (X) | 8+P+A+V+L
24504 +---------------------------------------+
24505 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24506 +---------------------------------------+
24507 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24508 +---------------------------------------+
24509 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24510 +---------------------------------------+
24511 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24512 +---------------------------------------+
24513 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24514 +---------------------------------------+
24515 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24516 +---------------------------------------+
24517 old SP->| back chain to caller's caller |
24518 +---------------------------------------+
24519
24520 * If the alloca area is present and the required alignment is
24521 16 bytes, the parameter save area is padded so that the
24522 alloca area starts 16-byte aligned.
24523
24524 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24525 given. (But note below and in sysv4.h that we require only 8 and
24526 may round up the size of our stack frame anyways. The historical
24527 reason is early versions of powerpc-linux which didn't properly
24528 align the stack at program startup. A happy side-effect is that
24529 -mno-eabi libraries can be used with -meabi programs.)
24530
24531 The EABI configuration defaults to the V.4 layout. However,
24532 the stack alignment requirements may differ. If -mno-eabi is not
24533 given, the required stack alignment is 8 bytes; if -mno-eabi is
24534 given, the required alignment is 16 bytes. (But see V.4 comment
24535 above.) */
24536
24537 #ifndef ABI_STACK_BOUNDARY
24538 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24539 #endif
24540
24541 static rs6000_stack_t *
24542 rs6000_stack_info (void)
24543 {
24544 /* We should never be called for thunks, we are not set up for that. */
24545 gcc_assert (!cfun->is_thunk);
24546
24547 rs6000_stack_t *info = &stack_info;
24548 int reg_size = TARGET_32BIT ? 4 : 8;
24549 int ehrd_size;
24550 int ehcr_size;
24551 int save_align;
24552 int first_gp;
24553 HOST_WIDE_INT non_fixed_size;
24554 bool using_static_chain_p;
24555
24556 if (reload_completed && info->reload_completed)
24557 return info;
24558
24559 memset (info, 0, sizeof (*info));
24560 info->reload_completed = reload_completed;
24561
24562 /* Select which calling sequence. */
24563 info->abi = DEFAULT_ABI;
24564
24565 /* Calculate which registers need to be saved & save area size. */
24566 info->first_gp_reg_save = first_reg_to_save ();
24567 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24568 even if it currently looks like we won't. Reload may need it to
24569 get at a constant; if so, it will have already created a constant
24570 pool entry for it. */
24571 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24572 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24573 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24574 && crtl->uses_const_pool
24575 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24576 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24577 else
24578 first_gp = info->first_gp_reg_save;
24579
24580 info->gp_size = reg_size * (32 - first_gp);
24581
24582 info->first_fp_reg_save = first_fp_reg_to_save ();
24583 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24584
24585 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24586 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24587 - info->first_altivec_reg_save);
24588
24589 /* Does this function call anything? */
24590 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24591
24592 /* Determine if we need to save the condition code registers. */
24593 if (save_reg_p (CR2_REGNO)
24594 || save_reg_p (CR3_REGNO)
24595 || save_reg_p (CR4_REGNO))
24596 {
24597 info->cr_save_p = 1;
24598 if (DEFAULT_ABI == ABI_V4)
24599 info->cr_size = reg_size;
24600 }
24601
24602 /* If the current function calls __builtin_eh_return, then we need
24603 to allocate stack space for registers that will hold data for
24604 the exception handler. */
24605 if (crtl->calls_eh_return)
24606 {
24607 unsigned int i;
24608 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24609 continue;
24610
24611 ehrd_size = i * UNITS_PER_WORD;
24612 }
24613 else
24614 ehrd_size = 0;
24615
24616 /* In the ELFv2 ABI, we also need to allocate space for separate
24617 CR field save areas if the function calls __builtin_eh_return. */
24618 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24619 {
24620 /* This hard-codes that we have three call-saved CR fields. */
24621 ehcr_size = 3 * reg_size;
24622 /* We do *not* use the regular CR save mechanism. */
24623 info->cr_save_p = 0;
24624 }
24625 else
24626 ehcr_size = 0;
24627
24628 /* Determine various sizes. */
24629 info->reg_size = reg_size;
24630 info->fixed_size = RS6000_SAVE_AREA;
24631 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24632 if (cfun->calls_alloca)
24633 info->parm_size =
24634 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24635 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24636 else
24637 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24638 TARGET_ALTIVEC ? 16 : 8);
24639 if (FRAME_GROWS_DOWNWARD)
24640 info->vars_size
24641 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24642 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24643 - (info->fixed_size + info->vars_size + info->parm_size);
24644
24645 if (TARGET_ALTIVEC_ABI)
24646 info->vrsave_mask = compute_vrsave_mask ();
24647
24648 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24649 info->vrsave_size = 4;
24650
24651 compute_save_world_info (info);
24652
24653 /* Calculate the offsets. */
24654 switch (DEFAULT_ABI)
24655 {
24656 case ABI_NONE:
24657 default:
24658 gcc_unreachable ();
24659
24660 case ABI_AIX:
24661 case ABI_ELFv2:
24662 case ABI_DARWIN:
24663 info->fp_save_offset = -info->fp_size;
24664 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24665
24666 if (TARGET_ALTIVEC_ABI)
24667 {
24668 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24669
24670 /* Align stack so vector save area is on a quadword boundary.
24671 The padding goes above the vectors. */
24672 if (info->altivec_size != 0)
24673 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24674
24675 info->altivec_save_offset = info->vrsave_save_offset
24676 - info->altivec_padding_size
24677 - info->altivec_size;
24678 gcc_assert (info->altivec_size == 0
24679 || info->altivec_save_offset % 16 == 0);
24680
24681 /* Adjust for AltiVec case. */
24682 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24683 }
24684 else
24685 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24686
24687 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24688 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24689 info->lr_save_offset = 2*reg_size;
24690 break;
24691
24692 case ABI_V4:
24693 info->fp_save_offset = -info->fp_size;
24694 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24695 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24696
24697 if (TARGET_ALTIVEC_ABI)
24698 {
24699 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24700
24701 /* Align stack so vector save area is on a quadword boundary. */
24702 if (info->altivec_size != 0)
24703 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24704
24705 info->altivec_save_offset = info->vrsave_save_offset
24706 - info->altivec_padding_size
24707 - info->altivec_size;
24708
24709 /* Adjust for AltiVec case. */
24710 info->ehrd_offset = info->altivec_save_offset;
24711 }
24712 else
24713 info->ehrd_offset = info->cr_save_offset;
24714
24715 info->ehrd_offset -= ehrd_size;
24716 info->lr_save_offset = reg_size;
24717 }
24718
24719 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24720 info->save_size = RS6000_ALIGN (info->fp_size
24721 + info->gp_size
24722 + info->altivec_size
24723 + info->altivec_padding_size
24724 + ehrd_size
24725 + ehcr_size
24726 + info->cr_size
24727 + info->vrsave_size,
24728 save_align);
24729
24730 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24731
24732 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24733 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24734
24735 /* Determine if we need to save the link register. */
24736 if (info->calls_p
24737 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24738 && crtl->profile
24739 && !TARGET_PROFILE_KERNEL)
24740 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24741 #ifdef TARGET_RELOCATABLE
24742 || (DEFAULT_ABI == ABI_V4
24743 && (TARGET_RELOCATABLE || flag_pic > 1)
24744 && !constant_pool_empty_p ())
24745 #endif
24746 || rs6000_ra_ever_killed ())
24747 info->lr_save_p = 1;
24748
24749 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24750 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24751 && call_used_regs[STATIC_CHAIN_REGNUM]);
24752 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24753
24754 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24755 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24756 || !(info->savres_strategy & SAVE_INLINE_VRS)
24757 || !(info->savres_strategy & REST_INLINE_GPRS)
24758 || !(info->savres_strategy & REST_INLINE_FPRS)
24759 || !(info->savres_strategy & REST_INLINE_VRS))
24760 info->lr_save_p = 1;
24761
24762 if (info->lr_save_p)
24763 df_set_regs_ever_live (LR_REGNO, true);
24764
24765 /* Determine if we need to allocate any stack frame:
24766
24767 For AIX we need to push the stack if a frame pointer is needed
24768 (because the stack might be dynamically adjusted), if we are
24769 debugging, if we make calls, or if the sum of fp_save, gp_save,
24770 and local variables are more than the space needed to save all
24771 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24772 + 18*8 = 288 (GPR13 reserved).
24773
24774 For V.4 we don't have the stack cushion that AIX uses, but assume
24775 that the debugger can handle stackless frames. */
24776
24777 if (info->calls_p)
24778 info->push_p = 1;
24779
24780 else if (DEFAULT_ABI == ABI_V4)
24781 info->push_p = non_fixed_size != 0;
24782
24783 else if (frame_pointer_needed)
24784 info->push_p = 1;
24785
24786 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24787 info->push_p = 1;
24788
24789 else
24790 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24791
24792 return info;
24793 }
24794
24795 static void
24796 debug_stack_info (rs6000_stack_t *info)
24797 {
24798 const char *abi_string;
24799
24800 if (! info)
24801 info = rs6000_stack_info ();
24802
24803 fprintf (stderr, "\nStack information for function %s:\n",
24804 ((current_function_decl && DECL_NAME (current_function_decl))
24805 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24806 : "<unknown>"));
24807
24808 switch (info->abi)
24809 {
24810 default: abi_string = "Unknown"; break;
24811 case ABI_NONE: abi_string = "NONE"; break;
24812 case ABI_AIX: abi_string = "AIX"; break;
24813 case ABI_ELFv2: abi_string = "ELFv2"; break;
24814 case ABI_DARWIN: abi_string = "Darwin"; break;
24815 case ABI_V4: abi_string = "V.4"; break;
24816 }
24817
24818 fprintf (stderr, "\tABI = %5s\n", abi_string);
24819
24820 if (TARGET_ALTIVEC_ABI)
24821 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24822
24823 if (info->first_gp_reg_save != 32)
24824 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24825
24826 if (info->first_fp_reg_save != 64)
24827 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24828
24829 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24830 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24831 info->first_altivec_reg_save);
24832
24833 if (info->lr_save_p)
24834 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24835
24836 if (info->cr_save_p)
24837 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24838
24839 if (info->vrsave_mask)
24840 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24841
24842 if (info->push_p)
24843 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24844
24845 if (info->calls_p)
24846 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24847
24848 if (info->gp_size)
24849 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24850
24851 if (info->fp_size)
24852 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24853
24854 if (info->altivec_size)
24855 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24856 info->altivec_save_offset);
24857
24858 if (info->vrsave_size)
24859 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24860 info->vrsave_save_offset);
24861
24862 if (info->lr_save_p)
24863 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24864
24865 if (info->cr_save_p)
24866 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24867
24868 if (info->varargs_save_offset)
24869 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24870
24871 if (info->total_size)
24872 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24873 info->total_size);
24874
24875 if (info->vars_size)
24876 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24877 info->vars_size);
24878
24879 if (info->parm_size)
24880 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24881
24882 if (info->fixed_size)
24883 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24884
24885 if (info->gp_size)
24886 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24887
24888 if (info->fp_size)
24889 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24890
24891 if (info->altivec_size)
24892 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24893
24894 if (info->vrsave_size)
24895 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24896
24897 if (info->altivec_padding_size)
24898 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24899 info->altivec_padding_size);
24900
24901 if (info->cr_size)
24902 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24903
24904 if (info->save_size)
24905 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24906
24907 if (info->reg_size != 4)
24908 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24909
24910 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24911
24912 fprintf (stderr, "\n");
24913 }
24914
24915 rtx
24916 rs6000_return_addr (int count, rtx frame)
24917 {
24918 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24919 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24920 if (count != 0
24921 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24922 {
24923 cfun->machine->ra_needs_full_frame = 1;
24924
24925 if (count == 0)
24926 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24927 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24928 frame = stack_pointer_rtx;
24929 rtx prev_frame_addr = memory_address (Pmode, frame);
24930 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
24931 rtx lr_save_off = plus_constant (Pmode,
24932 prev_frame, RETURN_ADDRESS_OFFSET);
24933 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
24934 return gen_rtx_MEM (Pmode, lr_save_addr);
24935 }
24936
24937 cfun->machine->ra_need_lr = 1;
24938 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24939 }
24940
24941 /* Say whether a function is a candidate for sibcall handling or not. */
24942
24943 static bool
24944 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24945 {
24946 tree fntype;
24947
24948 /* The sibcall epilogue may clobber the static chain register.
24949 ??? We could work harder and avoid that, but it's probably
24950 not worth the hassle in practice. */
24951 if (CALL_EXPR_STATIC_CHAIN (exp))
24952 return false;
24953
24954 if (decl)
24955 fntype = TREE_TYPE (decl);
24956 else
24957 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24958
24959 /* We can't do it if the called function has more vector parameters
24960 than the current function; there's nowhere to put the VRsave code. */
24961 if (TARGET_ALTIVEC_ABI
24962 && TARGET_ALTIVEC_VRSAVE
24963 && !(decl && decl == current_function_decl))
24964 {
24965 function_args_iterator args_iter;
24966 tree type;
24967 int nvreg = 0;
24968
24969 /* Functions with vector parameters are required to have a
24970 prototype, so the argument type info must be available
24971 here. */
24972 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24973 if (TREE_CODE (type) == VECTOR_TYPE
24974 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24975 nvreg++;
24976
24977 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24978 if (TREE_CODE (type) == VECTOR_TYPE
24979 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24980 nvreg--;
24981
24982 if (nvreg > 0)
24983 return false;
24984 }
24985
24986 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24987 functions, because the callee may have a different TOC pointer to
24988 the caller and there's no way to ensure we restore the TOC when
24989 we return. With the secure-plt SYSV ABI we can't make non-local
24990 calls when -fpic/PIC because the plt call stubs use r30. */
24991 if (DEFAULT_ABI == ABI_DARWIN
24992 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24993 && decl
24994 && !DECL_EXTERNAL (decl)
24995 && !DECL_WEAK (decl)
24996 && (*targetm.binds_local_p) (decl))
24997 || (DEFAULT_ABI == ABI_V4
24998 && (!TARGET_SECURE_PLT
24999 || !flag_pic
25000 || (decl
25001 && (*targetm.binds_local_p) (decl)))))
25002 {
25003 tree attr_list = TYPE_ATTRIBUTES (fntype);
25004
25005 if (!lookup_attribute ("longcall", attr_list)
25006 || lookup_attribute ("shortcall", attr_list))
25007 return true;
25008 }
25009
25010 return false;
25011 }
25012
25013 static int
25014 rs6000_ra_ever_killed (void)
25015 {
25016 rtx_insn *top;
25017 rtx reg;
25018 rtx_insn *insn;
25019
25020 if (cfun->is_thunk)
25021 return 0;
25022
25023 if (cfun->machine->lr_save_state)
25024 return cfun->machine->lr_save_state - 1;
25025
25026 /* regs_ever_live has LR marked as used if any sibcalls are present,
25027 but this should not force saving and restoring in the
25028 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25029 clobbers LR, so that is inappropriate. */
25030
25031 /* Also, the prologue can generate a store into LR that
25032 doesn't really count, like this:
25033
25034 move LR->R0
25035 bcl to set PIC register
25036 move LR->R31
25037 move R0->LR
25038
25039 When we're called from the epilogue, we need to avoid counting
25040 this as a store. */
25041
25042 push_topmost_sequence ();
25043 top = get_insns ();
25044 pop_topmost_sequence ();
25045 reg = gen_rtx_REG (Pmode, LR_REGNO);
25046
25047 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25048 {
25049 if (INSN_P (insn))
25050 {
25051 if (CALL_P (insn))
25052 {
25053 if (!SIBLING_CALL_P (insn))
25054 return 1;
25055 }
25056 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25057 return 1;
25058 else if (set_of (reg, insn) != NULL_RTX
25059 && !prologue_epilogue_contains (insn))
25060 return 1;
25061 }
25062 }
25063 return 0;
25064 }
25065 \f
25066 /* Emit instructions needed to load the TOC register.
25067 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25068 a constant pool; or for SVR4 -fpic. */
25069
25070 void
25071 rs6000_emit_load_toc_table (int fromprolog)
25072 {
25073 rtx dest;
25074 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25075
25076 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25077 {
25078 char buf[30];
25079 rtx lab, tmp1, tmp2, got;
25080
25081 lab = gen_label_rtx ();
25082 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25083 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25084 if (flag_pic == 2)
25085 {
25086 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25087 need_toc_init = 1;
25088 }
25089 else
25090 got = rs6000_got_sym ();
25091 tmp1 = tmp2 = dest;
25092 if (!fromprolog)
25093 {
25094 tmp1 = gen_reg_rtx (Pmode);
25095 tmp2 = gen_reg_rtx (Pmode);
25096 }
25097 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25098 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25099 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25100 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25101 }
25102 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25103 {
25104 emit_insn (gen_load_toc_v4_pic_si ());
25105 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25106 }
25107 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25108 {
25109 char buf[30];
25110 rtx temp0 = (fromprolog
25111 ? gen_rtx_REG (Pmode, 0)
25112 : gen_reg_rtx (Pmode));
25113
25114 if (fromprolog)
25115 {
25116 rtx symF, symL;
25117
25118 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25119 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25120
25121 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25122 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25123
25124 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25125 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25126 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25127 }
25128 else
25129 {
25130 rtx tocsym, lab;
25131
25132 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25133 need_toc_init = 1;
25134 lab = gen_label_rtx ();
25135 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25136 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25137 if (TARGET_LINK_STACK)
25138 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25139 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25140 }
25141 emit_insn (gen_addsi3 (dest, temp0, dest));
25142 }
25143 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25144 {
25145 /* This is for AIX code running in non-PIC ELF32. */
25146 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25147
25148 need_toc_init = 1;
25149 emit_insn (gen_elf_high (dest, realsym));
25150 emit_insn (gen_elf_low (dest, dest, realsym));
25151 }
25152 else
25153 {
25154 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25155
25156 if (TARGET_32BIT)
25157 emit_insn (gen_load_toc_aix_si (dest));
25158 else
25159 emit_insn (gen_load_toc_aix_di (dest));
25160 }
25161 }
25162
25163 /* Emit instructions to restore the link register after determining where
25164 its value has been stored. */
25165
25166 void
25167 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25168 {
25169 rs6000_stack_t *info = rs6000_stack_info ();
25170 rtx operands[2];
25171
25172 operands[0] = source;
25173 operands[1] = scratch;
25174
25175 if (info->lr_save_p)
25176 {
25177 rtx frame_rtx = stack_pointer_rtx;
25178 HOST_WIDE_INT sp_offset = 0;
25179 rtx tmp;
25180
25181 if (frame_pointer_needed
25182 || cfun->calls_alloca
25183 || info->total_size > 32767)
25184 {
25185 tmp = gen_frame_mem (Pmode, frame_rtx);
25186 emit_move_insn (operands[1], tmp);
25187 frame_rtx = operands[1];
25188 }
25189 else if (info->push_p)
25190 sp_offset = info->total_size;
25191
25192 tmp = plus_constant (Pmode, frame_rtx,
25193 info->lr_save_offset + sp_offset);
25194 tmp = gen_frame_mem (Pmode, tmp);
25195 emit_move_insn (tmp, operands[0]);
25196 }
25197 else
25198 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25199
25200 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25201 state of lr_save_p so any change from here on would be a bug. In
25202 particular, stop rs6000_ra_ever_killed from considering the SET
25203 of lr we may have added just above. */
25204 cfun->machine->lr_save_state = info->lr_save_p + 1;
25205 }
25206
25207 static GTY(()) alias_set_type set = -1;
25208
25209 alias_set_type
25210 get_TOC_alias_set (void)
25211 {
25212 if (set == -1)
25213 set = new_alias_set ();
25214 return set;
25215 }
25216
25217 /* This returns nonzero if the current function uses the TOC. This is
25218 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25219 is generated by the ABI_V4 load_toc_* patterns.
25220 Return 2 instead of 1 if the load_toc_* pattern is in the function
25221 partition that doesn't start the function. */
25222 #if TARGET_ELF
25223 static int
25224 uses_TOC (void)
25225 {
25226 rtx_insn *insn;
25227 int ret = 1;
25228
25229 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25230 {
25231 if (INSN_P (insn))
25232 {
25233 rtx pat = PATTERN (insn);
25234 int i;
25235
25236 if (GET_CODE (pat) == PARALLEL)
25237 for (i = 0; i < XVECLEN (pat, 0); i++)
25238 {
25239 rtx sub = XVECEXP (pat, 0, i);
25240 if (GET_CODE (sub) == USE)
25241 {
25242 sub = XEXP (sub, 0);
25243 if (GET_CODE (sub) == UNSPEC
25244 && XINT (sub, 1) == UNSPEC_TOC)
25245 return ret;
25246 }
25247 }
25248 }
25249 else if (crtl->has_bb_partition
25250 && NOTE_P (insn)
25251 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25252 ret = 2;
25253 }
25254 return 0;
25255 }
25256 #endif
25257
25258 rtx
25259 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25260 {
25261 rtx tocrel, tocreg, hi;
25262
25263 if (TARGET_DEBUG_ADDR)
25264 {
25265 if (SYMBOL_REF_P (symbol))
25266 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25267 XSTR (symbol, 0));
25268 else
25269 {
25270 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25271 GET_RTX_NAME (GET_CODE (symbol)));
25272 debug_rtx (symbol);
25273 }
25274 }
25275
25276 if (!can_create_pseudo_p ())
25277 df_set_regs_ever_live (TOC_REGISTER, true);
25278
25279 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25280 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25281 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25282 return tocrel;
25283
25284 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25285 if (largetoc_reg != NULL)
25286 {
25287 emit_move_insn (largetoc_reg, hi);
25288 hi = largetoc_reg;
25289 }
25290 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25291 }
25292
25293 /* Issue assembly directives that create a reference to the given DWARF
25294 FRAME_TABLE_LABEL from the current function section. */
25295 void
25296 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25297 {
25298 fprintf (asm_out_file, "\t.ref %s\n",
25299 (* targetm.strip_name_encoding) (frame_table_label));
25300 }
25301 \f
25302 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25303 and the change to the stack pointer. */
25304
25305 static void
25306 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25307 {
25308 rtvec p;
25309 int i;
25310 rtx regs[3];
25311
25312 i = 0;
25313 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25314 if (hard_frame_needed)
25315 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25316 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25317 || (hard_frame_needed
25318 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25319 regs[i++] = fp;
25320
25321 p = rtvec_alloc (i);
25322 while (--i >= 0)
25323 {
25324 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25325 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25326 }
25327
25328 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25329 }
25330
25331 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25332 and set the appropriate attributes for the generated insn. Return the
25333 first insn which adjusts the stack pointer or the last insn before
25334 the stack adjustment loop.
25335
25336 SIZE_INT is used to create the CFI note for the allocation.
25337
25338 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25339 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25340
25341 ORIG_SP contains the backchain value that must be stored at *sp. */
25342
25343 static rtx_insn *
25344 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25345 {
25346 rtx_insn *insn;
25347
25348 rtx size_rtx = GEN_INT (-size_int);
25349 if (size_int > 32767)
25350 {
25351 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25352 /* Need a note here so that try_split doesn't get confused. */
25353 if (get_last_insn () == NULL_RTX)
25354 emit_note (NOTE_INSN_DELETED);
25355 insn = emit_move_insn (tmp_reg, size_rtx);
25356 try_split (PATTERN (insn), insn, 0);
25357 size_rtx = tmp_reg;
25358 }
25359
25360 if (Pmode == SImode)
25361 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25362 stack_pointer_rtx,
25363 size_rtx,
25364 orig_sp));
25365 else
25366 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
25367 stack_pointer_rtx,
25368 size_rtx,
25369 orig_sp));
25370 rtx par = PATTERN (insn);
25371 gcc_assert (GET_CODE (par) == PARALLEL);
25372 rtx set = XVECEXP (par, 0, 0);
25373 gcc_assert (GET_CODE (set) == SET);
25374 rtx mem = SET_DEST (set);
25375 gcc_assert (MEM_P (mem));
25376 MEM_NOTRAP_P (mem) = 1;
25377 set_mem_alias_set (mem, get_frame_alias_set ());
25378
25379 RTX_FRAME_RELATED_P (insn) = 1;
25380 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25381 gen_rtx_SET (stack_pointer_rtx,
25382 gen_rtx_PLUS (Pmode,
25383 stack_pointer_rtx,
25384 GEN_INT (-size_int))));
25385
25386 /* Emit a blockage to ensure the allocation/probing insns are
25387 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25388 note for similar reasons. */
25389 if (flag_stack_clash_protection)
25390 {
25391 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25392 emit_insn (gen_blockage ());
25393 }
25394
25395 return insn;
25396 }
25397
25398 static HOST_WIDE_INT
25399 get_stack_clash_protection_probe_interval (void)
25400 {
25401 return (HOST_WIDE_INT_1U
25402 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25403 }
25404
25405 static HOST_WIDE_INT
25406 get_stack_clash_protection_guard_size (void)
25407 {
25408 return (HOST_WIDE_INT_1U
25409 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25410 }
25411
25412 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25413 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25414
25415 COPY_REG, if non-null, should contain a copy of the original
25416 stack pointer at exit from this function.
25417
25418 This is subtly different than the Ada probing in that it tries hard to
25419 prevent attacks that jump the stack guard. Thus it is never allowed to
25420 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25421 space without a suitable probe. */
25422 static rtx_insn *
25423 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25424 rtx copy_reg)
25425 {
25426 rtx orig_sp = copy_reg;
25427
25428 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25429
25430 /* Round the size down to a multiple of PROBE_INTERVAL. */
25431 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25432
25433 /* If explicitly requested,
25434 or the rounded size is not the same as the original size
25435 or the the rounded size is greater than a page,
25436 then we will need a copy of the original stack pointer. */
25437 if (rounded_size != orig_size
25438 || rounded_size > probe_interval
25439 || copy_reg)
25440 {
25441 /* If the caller did not request a copy of the incoming stack
25442 pointer, then we use r0 to hold the copy. */
25443 if (!copy_reg)
25444 orig_sp = gen_rtx_REG (Pmode, 0);
25445 emit_move_insn (orig_sp, stack_pointer_rtx);
25446 }
25447
25448 /* There's three cases here.
25449
25450 One is a single probe which is the most common and most efficiently
25451 implemented as it does not have to have a copy of the original
25452 stack pointer if there are no residuals.
25453
25454 Second is unrolled allocation/probes which we use if there's just
25455 a few of them. It needs to save the original stack pointer into a
25456 temporary for use as a source register in the allocation/probe.
25457
25458 Last is a loop. This is the most uncommon case and least efficient. */
25459 rtx_insn *retval = NULL;
25460 if (rounded_size == probe_interval)
25461 {
25462 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25463
25464 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25465 }
25466 else if (rounded_size <= 8 * probe_interval)
25467 {
25468 /* The ABI requires using the store with update insns to allocate
25469 space and store the backchain into the stack
25470
25471 So we save the current stack pointer into a temporary, then
25472 emit the store-with-update insns to store the saved stack pointer
25473 into the right location in each new page. */
25474 for (int i = 0; i < rounded_size; i += probe_interval)
25475 {
25476 rtx_insn *insn
25477 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25478
25479 /* Save the first stack adjustment in RETVAL. */
25480 if (i == 0)
25481 retval = insn;
25482 }
25483
25484 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25485 }
25486 else
25487 {
25488 /* Compute the ending address. */
25489 rtx end_addr
25490 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25491 rtx rs = GEN_INT (-rounded_size);
25492 rtx_insn *insn;
25493 if (add_operand (rs, Pmode))
25494 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25495 else
25496 {
25497 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25498 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25499 stack_pointer_rtx));
25500 /* Describe the effect of INSN to the CFI engine. */
25501 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25502 gen_rtx_SET (end_addr,
25503 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25504 rs)));
25505 }
25506 RTX_FRAME_RELATED_P (insn) = 1;
25507
25508 /* Emit the loop. */
25509 if (TARGET_64BIT)
25510 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25511 stack_pointer_rtx, orig_sp,
25512 end_addr));
25513 else
25514 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25515 stack_pointer_rtx, orig_sp,
25516 end_addr));
25517 RTX_FRAME_RELATED_P (retval) = 1;
25518 /* Describe the effect of INSN to the CFI engine. */
25519 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25520 gen_rtx_SET (stack_pointer_rtx, end_addr));
25521
25522 /* Emit a blockage to ensure the allocation/probing insns are
25523 not optimized, combined, removed, etc. Other cases handle this
25524 within their call to rs6000_emit_allocate_stack_1. */
25525 emit_insn (gen_blockage ());
25526
25527 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25528 }
25529
25530 if (orig_size != rounded_size)
25531 {
25532 /* Allocate (and implicitly probe) any residual space. */
25533 HOST_WIDE_INT residual = orig_size - rounded_size;
25534
25535 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25536
25537 /* If the residual was the only allocation, then we can return the
25538 allocating insn. */
25539 if (!retval)
25540 retval = insn;
25541 }
25542
25543 return retval;
25544 }
25545
25546 /* Emit the correct code for allocating stack space, as insns.
25547 If COPY_REG, make sure a copy of the old frame is left there.
25548 The generated code may use hard register 0 as a temporary. */
25549
25550 static rtx_insn *
25551 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25552 {
25553 rtx_insn *insn;
25554 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25555 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25556 rtx todec = gen_int_mode (-size, Pmode);
25557
25558 if (INTVAL (todec) != -size)
25559 {
25560 warning (0, "stack frame too large");
25561 emit_insn (gen_trap ());
25562 return 0;
25563 }
25564
25565 if (crtl->limit_stack)
25566 {
25567 if (REG_P (stack_limit_rtx)
25568 && REGNO (stack_limit_rtx) > 1
25569 && REGNO (stack_limit_rtx) <= 31)
25570 {
25571 rtx_insn *insn
25572 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25573 gcc_assert (insn);
25574 emit_insn (insn);
25575 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25576 }
25577 else if (SYMBOL_REF_P (stack_limit_rtx)
25578 && TARGET_32BIT
25579 && DEFAULT_ABI == ABI_V4
25580 && !flag_pic)
25581 {
25582 rtx toload = gen_rtx_CONST (VOIDmode,
25583 gen_rtx_PLUS (Pmode,
25584 stack_limit_rtx,
25585 GEN_INT (size)));
25586
25587 emit_insn (gen_elf_high (tmp_reg, toload));
25588 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25589 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25590 const0_rtx));
25591 }
25592 else
25593 warning (0, "stack limit expression is not supported");
25594 }
25595
25596 if (flag_stack_clash_protection)
25597 {
25598 if (size < get_stack_clash_protection_guard_size ())
25599 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25600 else
25601 {
25602 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25603 copy_reg);
25604
25605 /* If we asked for a copy with an offset, then we still need add in
25606 the offset. */
25607 if (copy_reg && copy_off)
25608 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25609 return insn;
25610 }
25611 }
25612
25613 if (copy_reg)
25614 {
25615 if (copy_off != 0)
25616 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25617 else
25618 emit_move_insn (copy_reg, stack_reg);
25619 }
25620
25621 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25622 it now and set the alias set/attributes. The above gen_*_update
25623 calls will generate a PARALLEL with the MEM set being the first
25624 operation. */
25625 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25626 return insn;
25627 }
25628
25629 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25630
25631 #if PROBE_INTERVAL > 32768
25632 #error Cannot use indexed addressing mode for stack probing
25633 #endif
25634
25635 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25636 inclusive. These are offsets from the current stack pointer. */
25637
25638 static void
25639 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25640 {
25641 /* See if we have a constant small number of probes to generate. If so,
25642 that's the easy case. */
25643 if (first + size <= 32768)
25644 {
25645 HOST_WIDE_INT i;
25646
25647 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25648 it exceeds SIZE. If only one probe is needed, this will not
25649 generate any code. Then probe at FIRST + SIZE. */
25650 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25651 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25652 -(first + i)));
25653
25654 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25655 -(first + size)));
25656 }
25657
25658 /* Otherwise, do the same as above, but in a loop. Note that we must be
25659 extra careful with variables wrapping around because we might be at
25660 the very top (or the very bottom) of the address space and we have
25661 to be able to handle this case properly; in particular, we use an
25662 equality test for the loop condition. */
25663 else
25664 {
25665 HOST_WIDE_INT rounded_size;
25666 rtx r12 = gen_rtx_REG (Pmode, 12);
25667 rtx r0 = gen_rtx_REG (Pmode, 0);
25668
25669 /* Sanity check for the addressing mode we're going to use. */
25670 gcc_assert (first <= 32768);
25671
25672 /* Step 1: round SIZE to the previous multiple of the interval. */
25673
25674 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25675
25676
25677 /* Step 2: compute initial and final value of the loop counter. */
25678
25679 /* TEST_ADDR = SP + FIRST. */
25680 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25681 -first)));
25682
25683 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25684 if (rounded_size > 32768)
25685 {
25686 emit_move_insn (r0, GEN_INT (-rounded_size));
25687 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25688 }
25689 else
25690 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25691 -rounded_size)));
25692
25693
25694 /* Step 3: the loop
25695
25696 do
25697 {
25698 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25699 probe at TEST_ADDR
25700 }
25701 while (TEST_ADDR != LAST_ADDR)
25702
25703 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25704 until it is equal to ROUNDED_SIZE. */
25705
25706 if (TARGET_64BIT)
25707 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25708 else
25709 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25710
25711
25712 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25713 that SIZE is equal to ROUNDED_SIZE. */
25714
25715 if (size != rounded_size)
25716 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25717 }
25718 }
25719
25720 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25721 addresses, not offsets. */
25722
25723 static const char *
25724 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25725 {
25726 static int labelno = 0;
25727 char loop_lab[32];
25728 rtx xops[2];
25729
25730 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25731
25732 /* Loop. */
25733 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25734
25735 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25736 xops[0] = reg1;
25737 xops[1] = GEN_INT (-PROBE_INTERVAL);
25738 output_asm_insn ("addi %0,%0,%1", xops);
25739
25740 /* Probe at TEST_ADDR. */
25741 xops[1] = gen_rtx_REG (Pmode, 0);
25742 output_asm_insn ("stw %1,0(%0)", xops);
25743
25744 /* Test if TEST_ADDR == LAST_ADDR. */
25745 xops[1] = reg2;
25746 if (TARGET_64BIT)
25747 output_asm_insn ("cmpd 0,%0,%1", xops);
25748 else
25749 output_asm_insn ("cmpw 0,%0,%1", xops);
25750
25751 /* Branch. */
25752 fputs ("\tbne 0,", asm_out_file);
25753 assemble_name_raw (asm_out_file, loop_lab);
25754 fputc ('\n', asm_out_file);
25755
25756 return "";
25757 }
25758
25759 /* This function is called when rs6000_frame_related is processing
25760 SETs within a PARALLEL, and returns whether the REGNO save ought to
25761 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25762 for out-of-line register save functions, store multiple, and the
25763 Darwin world_save. They may contain registers that don't really
25764 need saving. */
25765
25766 static bool
25767 interesting_frame_related_regno (unsigned int regno)
25768 {
25769 /* Saves apparently of r0 are actually saving LR. It doesn't make
25770 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25771 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25772 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25773 as frame related. */
25774 if (regno == 0)
25775 return true;
25776 /* If we see CR2 then we are here on a Darwin world save. Saves of
25777 CR2 signify the whole CR is being saved. This is a long-standing
25778 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25779 that CR needs to be saved. */
25780 if (regno == CR2_REGNO)
25781 return true;
25782 /* Omit frame info for any user-defined global regs. If frame info
25783 is supplied for them, frame unwinding will restore a user reg.
25784 Also omit frame info for any reg we don't need to save, as that
25785 bloats frame info and can cause problems with shrink wrapping.
25786 Since global regs won't be seen as needing to be saved, both of
25787 these conditions are covered by save_reg_p. */
25788 return save_reg_p (regno);
25789 }
25790
25791 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25792 addresses, not offsets.
25793
25794 REG2 contains the backchain that must be stored into *sp at each allocation.
25795
25796 This is subtly different than the Ada probing above in that it tries hard
25797 to prevent attacks that jump the stack guard. Thus, it is never allowed
25798 to allocate more than PROBE_INTERVAL bytes of stack space without a
25799 suitable probe. */
25800
25801 static const char *
25802 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25803 {
25804 static int labelno = 0;
25805 char loop_lab[32];
25806 rtx xops[3];
25807
25808 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25809
25810 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25811
25812 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25813
25814 /* This allocates and probes. */
25815 xops[0] = reg1;
25816 xops[1] = reg2;
25817 xops[2] = GEN_INT (-probe_interval);
25818 if (TARGET_64BIT)
25819 output_asm_insn ("stdu %1,%2(%0)", xops);
25820 else
25821 output_asm_insn ("stwu %1,%2(%0)", xops);
25822
25823 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25824 xops[0] = reg1;
25825 xops[1] = reg3;
25826 if (TARGET_64BIT)
25827 output_asm_insn ("cmpd 0,%0,%1", xops);
25828 else
25829 output_asm_insn ("cmpw 0,%0,%1", xops);
25830
25831 fputs ("\tbne 0,", asm_out_file);
25832 assemble_name_raw (asm_out_file, loop_lab);
25833 fputc ('\n', asm_out_file);
25834
25835 return "";
25836 }
25837
25838 /* Wrapper around the output_probe_stack_range routines. */
25839 const char *
25840 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25841 {
25842 if (flag_stack_clash_protection)
25843 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25844 else
25845 return output_probe_stack_range_1 (reg1, reg3);
25846 }
25847
25848 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25849 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25850 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25851 deduce these equivalences by itself so it wasn't necessary to hold
25852 its hand so much. Don't be tempted to always supply d2_f_d_e with
25853 the actual cfa register, ie. r31 when we are using a hard frame
25854 pointer. That fails when saving regs off r1, and sched moves the
25855 r31 setup past the reg saves. */
25856
25857 static rtx_insn *
25858 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25859 rtx reg2, rtx repl2)
25860 {
25861 rtx repl;
25862
25863 if (REGNO (reg) == STACK_POINTER_REGNUM)
25864 {
25865 gcc_checking_assert (val == 0);
25866 repl = NULL_RTX;
25867 }
25868 else
25869 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25870 GEN_INT (val));
25871
25872 rtx pat = PATTERN (insn);
25873 if (!repl && !reg2)
25874 {
25875 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25876 if (GET_CODE (pat) == PARALLEL)
25877 for (int i = 0; i < XVECLEN (pat, 0); i++)
25878 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25879 {
25880 rtx set = XVECEXP (pat, 0, i);
25881
25882 if (!REG_P (SET_SRC (set))
25883 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25884 RTX_FRAME_RELATED_P (set) = 1;
25885 }
25886 RTX_FRAME_RELATED_P (insn) = 1;
25887 return insn;
25888 }
25889
25890 /* We expect that 'pat' is either a SET or a PARALLEL containing
25891 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25892 are important so they all have to be marked RTX_FRAME_RELATED_P.
25893 Call simplify_replace_rtx on the SETs rather than the whole insn
25894 so as to leave the other stuff alone (for example USE of r12). */
25895
25896 set_used_flags (pat);
25897 if (GET_CODE (pat) == SET)
25898 {
25899 if (repl)
25900 pat = simplify_replace_rtx (pat, reg, repl);
25901 if (reg2)
25902 pat = simplify_replace_rtx (pat, reg2, repl2);
25903 }
25904 else if (GET_CODE (pat) == PARALLEL)
25905 {
25906 pat = shallow_copy_rtx (pat);
25907 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25908
25909 for (int i = 0; i < XVECLEN (pat, 0); i++)
25910 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25911 {
25912 rtx set = XVECEXP (pat, 0, i);
25913
25914 if (repl)
25915 set = simplify_replace_rtx (set, reg, repl);
25916 if (reg2)
25917 set = simplify_replace_rtx (set, reg2, repl2);
25918 XVECEXP (pat, 0, i) = set;
25919
25920 if (!REG_P (SET_SRC (set))
25921 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25922 RTX_FRAME_RELATED_P (set) = 1;
25923 }
25924 }
25925 else
25926 gcc_unreachable ();
25927
25928 RTX_FRAME_RELATED_P (insn) = 1;
25929 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25930
25931 return insn;
25932 }
25933
25934 /* Returns an insn that has a vrsave set operation with the
25935 appropriate CLOBBERs. */
25936
25937 static rtx
25938 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25939 {
25940 int nclobs, i;
25941 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25942 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25943
25944 clobs[0]
25945 = gen_rtx_SET (vrsave,
25946 gen_rtx_UNSPEC_VOLATILE (SImode,
25947 gen_rtvec (2, reg, vrsave),
25948 UNSPECV_SET_VRSAVE));
25949
25950 nclobs = 1;
25951
25952 /* We need to clobber the registers in the mask so the scheduler
25953 does not move sets to VRSAVE before sets of AltiVec registers.
25954
25955 However, if the function receives nonlocal gotos, reload will set
25956 all call saved registers live. We will end up with:
25957
25958 (set (reg 999) (mem))
25959 (parallel [ (set (reg vrsave) (unspec blah))
25960 (clobber (reg 999))])
25961
25962 The clobber will cause the store into reg 999 to be dead, and
25963 flow will attempt to delete an epilogue insn. In this case, we
25964 need an unspec use/set of the register. */
25965
25966 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25967 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25968 {
25969 if (!epiloguep || call_used_regs [i])
25970 clobs[nclobs++] = gen_hard_reg_clobber (V4SImode, i);
25971 else
25972 {
25973 rtx reg = gen_rtx_REG (V4SImode, i);
25974
25975 clobs[nclobs++]
25976 = gen_rtx_SET (reg,
25977 gen_rtx_UNSPEC (V4SImode,
25978 gen_rtvec (1, reg), 27));
25979 }
25980 }
25981
25982 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25983
25984 for (i = 0; i < nclobs; ++i)
25985 XVECEXP (insn, 0, i) = clobs[i];
25986
25987 return insn;
25988 }
25989
25990 static rtx
25991 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25992 {
25993 rtx addr, mem;
25994
25995 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25996 mem = gen_frame_mem (GET_MODE (reg), addr);
25997 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25998 }
25999
26000 static rtx
26001 gen_frame_load (rtx reg, rtx frame_reg, int offset)
26002 {
26003 return gen_frame_set (reg, frame_reg, offset, false);
26004 }
26005
26006 static rtx
26007 gen_frame_store (rtx reg, rtx frame_reg, int offset)
26008 {
26009 return gen_frame_set (reg, frame_reg, offset, true);
26010 }
26011
26012 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
26013 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
26014
26015 static rtx_insn *
26016 emit_frame_save (rtx frame_reg, machine_mode mode,
26017 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
26018 {
26019 rtx reg;
26020
26021 /* Some cases that need register indexed addressing. */
26022 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
26023 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
26024
26025 reg = gen_rtx_REG (mode, regno);
26026 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
26027 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
26028 NULL_RTX, NULL_RTX);
26029 }
26030
26031 /* Emit an offset memory reference suitable for a frame store, while
26032 converting to a valid addressing mode. */
26033
26034 static rtx
26035 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
26036 {
26037 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
26038 }
26039
26040 #ifndef TARGET_FIX_AND_CONTINUE
26041 #define TARGET_FIX_AND_CONTINUE 0
26042 #endif
26043
26044 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26045 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26046 #define LAST_SAVRES_REGISTER 31
26047 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26048
26049 enum {
26050 SAVRES_LR = 0x1,
26051 SAVRES_SAVE = 0x2,
26052 SAVRES_REG = 0x0c,
26053 SAVRES_GPR = 0,
26054 SAVRES_FPR = 4,
26055 SAVRES_VR = 8
26056 };
26057
26058 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
26059
26060 /* Temporary holding space for an out-of-line register save/restore
26061 routine name. */
26062 static char savres_routine_name[30];
26063
26064 /* Return the name for an out-of-line register save/restore routine.
26065 We are saving/restoring GPRs if GPR is true. */
26066
26067 static char *
26068 rs6000_savres_routine_name (int regno, int sel)
26069 {
26070 const char *prefix = "";
26071 const char *suffix = "";
26072
26073 /* Different targets are supposed to define
26074 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26075 routine name could be defined with:
26076
26077 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26078
26079 This is a nice idea in practice, but in reality, things are
26080 complicated in several ways:
26081
26082 - ELF targets have save/restore routines for GPRs.
26083
26084 - PPC64 ELF targets have routines for save/restore of GPRs that
26085 differ in what they do with the link register, so having a set
26086 prefix doesn't work. (We only use one of the save routines at
26087 the moment, though.)
26088
26089 - PPC32 elf targets have "exit" versions of the restore routines
26090 that restore the link register and can save some extra space.
26091 These require an extra suffix. (There are also "tail" versions
26092 of the restore routines and "GOT" versions of the save routines,
26093 but we don't generate those at present. Same problems apply,
26094 though.)
26095
26096 We deal with all this by synthesizing our own prefix/suffix and
26097 using that for the simple sprintf call shown above. */
26098 if (DEFAULT_ABI == ABI_V4)
26099 {
26100 if (TARGET_64BIT)
26101 goto aix_names;
26102
26103 if ((sel & SAVRES_REG) == SAVRES_GPR)
26104 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
26105 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26106 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
26107 else if ((sel & SAVRES_REG) == SAVRES_VR)
26108 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26109 else
26110 abort ();
26111
26112 if ((sel & SAVRES_LR))
26113 suffix = "_x";
26114 }
26115 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26116 {
26117 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26118 /* No out-of-line save/restore routines for GPRs on AIX. */
26119 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26120 #endif
26121
26122 aix_names:
26123 if ((sel & SAVRES_REG) == SAVRES_GPR)
26124 prefix = ((sel & SAVRES_SAVE)
26125 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26126 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26127 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26128 {
26129 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26130 if ((sel & SAVRES_LR))
26131 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26132 else
26133 #endif
26134 {
26135 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26136 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26137 }
26138 }
26139 else if ((sel & SAVRES_REG) == SAVRES_VR)
26140 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26141 else
26142 abort ();
26143 }
26144
26145 if (DEFAULT_ABI == ABI_DARWIN)
26146 {
26147 /* The Darwin approach is (slightly) different, in order to be
26148 compatible with code generated by the system toolchain. There is a
26149 single symbol for the start of save sequence, and the code here
26150 embeds an offset into that code on the basis of the first register
26151 to be saved. */
26152 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26153 if ((sel & SAVRES_REG) == SAVRES_GPR)
26154 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26155 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26156 (regno - 13) * 4, prefix, regno);
26157 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26158 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26159 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26160 else if ((sel & SAVRES_REG) == SAVRES_VR)
26161 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26162 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26163 else
26164 abort ();
26165 }
26166 else
26167 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26168
26169 return savres_routine_name;
26170 }
26171
26172 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26173 We are saving/restoring GPRs if GPR is true. */
26174
26175 static rtx
26176 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26177 {
26178 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26179 ? info->first_gp_reg_save
26180 : (sel & SAVRES_REG) == SAVRES_FPR
26181 ? info->first_fp_reg_save - 32
26182 : (sel & SAVRES_REG) == SAVRES_VR
26183 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26184 : -1);
26185 rtx sym;
26186 int select = sel;
26187
26188 /* Don't generate bogus routine names. */
26189 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26190 && regno <= LAST_SAVRES_REGISTER
26191 && select >= 0 && select <= 12);
26192
26193 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26194
26195 if (sym == NULL)
26196 {
26197 char *name;
26198
26199 name = rs6000_savres_routine_name (regno, sel);
26200
26201 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26202 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26203 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26204 }
26205
26206 return sym;
26207 }
26208
26209 /* Emit a sequence of insns, including a stack tie if needed, for
26210 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26211 reset the stack pointer, but move the base of the frame into
26212 reg UPDT_REGNO for use by out-of-line register restore routines. */
26213
26214 static rtx
26215 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26216 unsigned updt_regno)
26217 {
26218 /* If there is nothing to do, don't do anything. */
26219 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26220 return NULL_RTX;
26221
26222 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26223
26224 /* This blockage is needed so that sched doesn't decide to move
26225 the sp change before the register restores. */
26226 if (DEFAULT_ABI == ABI_V4)
26227 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26228 GEN_INT (frame_off)));
26229
26230 /* If we are restoring registers out-of-line, we will be using the
26231 "exit" variants of the restore routines, which will reset the
26232 stack for us. But we do need to point updt_reg into the
26233 right place for those routines. */
26234 if (frame_off != 0)
26235 return emit_insn (gen_add3_insn (updt_reg_rtx,
26236 frame_reg_rtx, GEN_INT (frame_off)));
26237 else
26238 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26239
26240 return NULL_RTX;
26241 }
26242
26243 /* Return the register number used as a pointer by out-of-line
26244 save/restore functions. */
26245
26246 static inline unsigned
26247 ptr_regno_for_savres (int sel)
26248 {
26249 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26250 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26251 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26252 }
26253
26254 /* Construct a parallel rtx describing the effect of a call to an
26255 out-of-line register save/restore routine, and emit the insn
26256 or jump_insn as appropriate. */
26257
26258 static rtx_insn *
26259 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26260 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26261 machine_mode reg_mode, int sel)
26262 {
26263 int i;
26264 int offset, start_reg, end_reg, n_regs, use_reg;
26265 int reg_size = GET_MODE_SIZE (reg_mode);
26266 rtx sym;
26267 rtvec p;
26268 rtx par;
26269 rtx_insn *insn;
26270
26271 offset = 0;
26272 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26273 ? info->first_gp_reg_save
26274 : (sel & SAVRES_REG) == SAVRES_FPR
26275 ? info->first_fp_reg_save
26276 : (sel & SAVRES_REG) == SAVRES_VR
26277 ? info->first_altivec_reg_save
26278 : -1);
26279 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26280 ? 32
26281 : (sel & SAVRES_REG) == SAVRES_FPR
26282 ? 64
26283 : (sel & SAVRES_REG) == SAVRES_VR
26284 ? LAST_ALTIVEC_REGNO + 1
26285 : -1);
26286 n_regs = end_reg - start_reg;
26287 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26288 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26289 + n_regs);
26290
26291 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26292 RTVEC_ELT (p, offset++) = ret_rtx;
26293
26294 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
26295
26296 sym = rs6000_savres_routine_sym (info, sel);
26297 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26298
26299 use_reg = ptr_regno_for_savres (sel);
26300 if ((sel & SAVRES_REG) == SAVRES_VR)
26301 {
26302 /* Vector regs are saved/restored using [reg+reg] addressing. */
26303 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, use_reg);
26304 RTVEC_ELT (p, offset++)
26305 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26306 }
26307 else
26308 RTVEC_ELT (p, offset++)
26309 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26310
26311 for (i = 0; i < end_reg - start_reg; i++)
26312 RTVEC_ELT (p, i + offset)
26313 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26314 frame_reg_rtx, save_area_offset + reg_size * i,
26315 (sel & SAVRES_SAVE) != 0);
26316
26317 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26318 RTVEC_ELT (p, i + offset)
26319 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26320
26321 par = gen_rtx_PARALLEL (VOIDmode, p);
26322
26323 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26324 {
26325 insn = emit_jump_insn (par);
26326 JUMP_LABEL (insn) = ret_rtx;
26327 }
26328 else
26329 insn = emit_insn (par);
26330 return insn;
26331 }
26332
26333 /* Emit prologue code to store CR fields that need to be saved into REG. This
26334 function should only be called when moving the non-volatile CRs to REG, it
26335 is not a general purpose routine to move the entire set of CRs to REG.
26336 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26337 volatile CRs. */
26338
26339 static void
26340 rs6000_emit_prologue_move_from_cr (rtx reg)
26341 {
26342 /* Only the ELFv2 ABI allows storing only selected fields. */
26343 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26344 {
26345 int i, cr_reg[8], count = 0;
26346
26347 /* Collect CR fields that must be saved. */
26348 for (i = 0; i < 8; i++)
26349 if (save_reg_p (CR0_REGNO + i))
26350 cr_reg[count++] = i;
26351
26352 /* If it's just a single one, use mfcrf. */
26353 if (count == 1)
26354 {
26355 rtvec p = rtvec_alloc (1);
26356 rtvec r = rtvec_alloc (2);
26357 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26358 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26359 RTVEC_ELT (p, 0)
26360 = gen_rtx_SET (reg,
26361 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26362
26363 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26364 return;
26365 }
26366
26367 /* ??? It might be better to handle count == 2 / 3 cases here
26368 as well, using logical operations to combine the values. */
26369 }
26370
26371 emit_insn (gen_prologue_movesi_from_cr (reg));
26372 }
26373
26374 /* Return whether the split-stack arg pointer (r12) is used. */
26375
26376 static bool
26377 split_stack_arg_pointer_used_p (void)
26378 {
26379 /* If the pseudo holding the arg pointer is no longer a pseudo,
26380 then the arg pointer is used. */
26381 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26382 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26383 || HARD_REGISTER_P (cfun->machine->split_stack_arg_pointer)))
26384 return true;
26385
26386 /* Unfortunately we also need to do some code scanning, since
26387 r12 may have been substituted for the pseudo. */
26388 rtx_insn *insn;
26389 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26390 FOR_BB_INSNS (bb, insn)
26391 if (NONDEBUG_INSN_P (insn))
26392 {
26393 /* A call destroys r12. */
26394 if (CALL_P (insn))
26395 return false;
26396
26397 df_ref use;
26398 FOR_EACH_INSN_USE (use, insn)
26399 {
26400 rtx x = DF_REF_REG (use);
26401 if (REG_P (x) && REGNO (x) == 12)
26402 return true;
26403 }
26404 df_ref def;
26405 FOR_EACH_INSN_DEF (def, insn)
26406 {
26407 rtx x = DF_REF_REG (def);
26408 if (REG_P (x) && REGNO (x) == 12)
26409 return false;
26410 }
26411 }
26412 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26413 }
26414
26415 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26416
26417 static bool
26418 rs6000_global_entry_point_needed_p (void)
26419 {
26420 /* Only needed for the ELFv2 ABI. */
26421 if (DEFAULT_ABI != ABI_ELFv2)
26422 return false;
26423
26424 /* With -msingle-pic-base, we assume the whole program shares the same
26425 TOC, so no global entry point prologues are needed anywhere. */
26426 if (TARGET_SINGLE_PIC_BASE)
26427 return false;
26428
26429 /* Ensure we have a global entry point for thunks. ??? We could
26430 avoid that if the target routine doesn't need a global entry point,
26431 but we do not know whether this is the case at this point. */
26432 if (cfun->is_thunk)
26433 return true;
26434
26435 /* For regular functions, rs6000_emit_prologue sets this flag if the
26436 routine ever uses the TOC pointer. */
26437 return cfun->machine->r2_setup_needed;
26438 }
26439
26440 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26441 static sbitmap
26442 rs6000_get_separate_components (void)
26443 {
26444 rs6000_stack_t *info = rs6000_stack_info ();
26445
26446 if (WORLD_SAVE_P (info))
26447 return NULL;
26448
26449 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26450 && !(info->savres_strategy & REST_MULTIPLE));
26451
26452 /* Component 0 is the save/restore of LR (done via GPR0).
26453 Component 2 is the save of the TOC (GPR2).
26454 Components 13..31 are the save/restore of GPR13..GPR31.
26455 Components 46..63 are the save/restore of FPR14..FPR31. */
26456
26457 cfun->machine->n_components = 64;
26458
26459 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26460 bitmap_clear (components);
26461
26462 int reg_size = TARGET_32BIT ? 4 : 8;
26463 int fp_reg_size = 8;
26464
26465 /* The GPRs we need saved to the frame. */
26466 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26467 && (info->savres_strategy & REST_INLINE_GPRS))
26468 {
26469 int offset = info->gp_save_offset;
26470 if (info->push_p)
26471 offset += info->total_size;
26472
26473 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26474 {
26475 if (IN_RANGE (offset, -0x8000, 0x7fff)
26476 && save_reg_p (regno))
26477 bitmap_set_bit (components, regno);
26478
26479 offset += reg_size;
26480 }
26481 }
26482
26483 /* Don't mess with the hard frame pointer. */
26484 if (frame_pointer_needed)
26485 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26486
26487 /* Don't mess with the fixed TOC register. */
26488 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26489 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26490 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26491 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26492
26493 /* The FPRs we need saved to the frame. */
26494 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26495 && (info->savres_strategy & REST_INLINE_FPRS))
26496 {
26497 int offset = info->fp_save_offset;
26498 if (info->push_p)
26499 offset += info->total_size;
26500
26501 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26502 {
26503 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26504 bitmap_set_bit (components, regno);
26505
26506 offset += fp_reg_size;
26507 }
26508 }
26509
26510 /* Optimize LR save and restore if we can. This is component 0. Any
26511 out-of-line register save/restore routines need LR. */
26512 if (info->lr_save_p
26513 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26514 && (info->savres_strategy & SAVE_INLINE_GPRS)
26515 && (info->savres_strategy & REST_INLINE_GPRS)
26516 && (info->savres_strategy & SAVE_INLINE_FPRS)
26517 && (info->savres_strategy & REST_INLINE_FPRS)
26518 && (info->savres_strategy & SAVE_INLINE_VRS)
26519 && (info->savres_strategy & REST_INLINE_VRS))
26520 {
26521 int offset = info->lr_save_offset;
26522 if (info->push_p)
26523 offset += info->total_size;
26524 if (IN_RANGE (offset, -0x8000, 0x7fff))
26525 bitmap_set_bit (components, 0);
26526 }
26527
26528 /* Optimize saving the TOC. This is component 2. */
26529 if (cfun->machine->save_toc_in_prologue)
26530 bitmap_set_bit (components, 2);
26531
26532 return components;
26533 }
26534
26535 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26536 static sbitmap
26537 rs6000_components_for_bb (basic_block bb)
26538 {
26539 rs6000_stack_t *info = rs6000_stack_info ();
26540
26541 bitmap in = DF_LIVE_IN (bb);
26542 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26543 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26544
26545 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26546 bitmap_clear (components);
26547
26548 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26549
26550 /* GPRs. */
26551 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26552 if (bitmap_bit_p (in, regno)
26553 || bitmap_bit_p (gen, regno)
26554 || bitmap_bit_p (kill, regno))
26555 bitmap_set_bit (components, regno);
26556
26557 /* FPRs. */
26558 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26559 if (bitmap_bit_p (in, regno)
26560 || bitmap_bit_p (gen, regno)
26561 || bitmap_bit_p (kill, regno))
26562 bitmap_set_bit (components, regno);
26563
26564 /* The link register. */
26565 if (bitmap_bit_p (in, LR_REGNO)
26566 || bitmap_bit_p (gen, LR_REGNO)
26567 || bitmap_bit_p (kill, LR_REGNO))
26568 bitmap_set_bit (components, 0);
26569
26570 /* The TOC save. */
26571 if (bitmap_bit_p (in, TOC_REGNUM)
26572 || bitmap_bit_p (gen, TOC_REGNUM)
26573 || bitmap_bit_p (kill, TOC_REGNUM))
26574 bitmap_set_bit (components, 2);
26575
26576 return components;
26577 }
26578
26579 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26580 static void
26581 rs6000_disqualify_components (sbitmap components, edge e,
26582 sbitmap edge_components, bool /*is_prologue*/)
26583 {
26584 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26585 live where we want to place that code. */
26586 if (bitmap_bit_p (edge_components, 0)
26587 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26588 {
26589 if (dump_file)
26590 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26591 "on entry to bb %d\n", e->dest->index);
26592 bitmap_clear_bit (components, 0);
26593 }
26594 }
26595
26596 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26597 static void
26598 rs6000_emit_prologue_components (sbitmap components)
26599 {
26600 rs6000_stack_t *info = rs6000_stack_info ();
26601 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26602 ? HARD_FRAME_POINTER_REGNUM
26603 : STACK_POINTER_REGNUM);
26604
26605 machine_mode reg_mode = Pmode;
26606 int reg_size = TARGET_32BIT ? 4 : 8;
26607 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26608 int fp_reg_size = 8;
26609
26610 /* Prologue for LR. */
26611 if (bitmap_bit_p (components, 0))
26612 {
26613 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26614 rtx reg = gen_rtx_REG (reg_mode, 0);
26615 rtx_insn *insn = emit_move_insn (reg, lr);
26616 RTX_FRAME_RELATED_P (insn) = 1;
26617 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26618
26619 int offset = info->lr_save_offset;
26620 if (info->push_p)
26621 offset += info->total_size;
26622
26623 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26624 RTX_FRAME_RELATED_P (insn) = 1;
26625 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26626 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26627 }
26628
26629 /* Prologue for TOC. */
26630 if (bitmap_bit_p (components, 2))
26631 {
26632 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26633 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26634 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26635 }
26636
26637 /* Prologue for the GPRs. */
26638 int offset = info->gp_save_offset;
26639 if (info->push_p)
26640 offset += info->total_size;
26641
26642 for (int i = info->first_gp_reg_save; i < 32; i++)
26643 {
26644 if (bitmap_bit_p (components, i))
26645 {
26646 rtx reg = gen_rtx_REG (reg_mode, i);
26647 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26648 RTX_FRAME_RELATED_P (insn) = 1;
26649 rtx set = copy_rtx (single_set (insn));
26650 add_reg_note (insn, REG_CFA_OFFSET, set);
26651 }
26652
26653 offset += reg_size;
26654 }
26655
26656 /* Prologue for the FPRs. */
26657 offset = info->fp_save_offset;
26658 if (info->push_p)
26659 offset += info->total_size;
26660
26661 for (int i = info->first_fp_reg_save; i < 64; i++)
26662 {
26663 if (bitmap_bit_p (components, i))
26664 {
26665 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26666 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26667 RTX_FRAME_RELATED_P (insn) = 1;
26668 rtx set = copy_rtx (single_set (insn));
26669 add_reg_note (insn, REG_CFA_OFFSET, set);
26670 }
26671
26672 offset += fp_reg_size;
26673 }
26674 }
26675
26676 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26677 static void
26678 rs6000_emit_epilogue_components (sbitmap components)
26679 {
26680 rs6000_stack_t *info = rs6000_stack_info ();
26681 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26682 ? HARD_FRAME_POINTER_REGNUM
26683 : STACK_POINTER_REGNUM);
26684
26685 machine_mode reg_mode = Pmode;
26686 int reg_size = TARGET_32BIT ? 4 : 8;
26687
26688 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26689 int fp_reg_size = 8;
26690
26691 /* Epilogue for the FPRs. */
26692 int offset = info->fp_save_offset;
26693 if (info->push_p)
26694 offset += info->total_size;
26695
26696 for (int i = info->first_fp_reg_save; i < 64; i++)
26697 {
26698 if (bitmap_bit_p (components, i))
26699 {
26700 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26701 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26702 RTX_FRAME_RELATED_P (insn) = 1;
26703 add_reg_note (insn, REG_CFA_RESTORE, reg);
26704 }
26705
26706 offset += fp_reg_size;
26707 }
26708
26709 /* Epilogue for the GPRs. */
26710 offset = info->gp_save_offset;
26711 if (info->push_p)
26712 offset += info->total_size;
26713
26714 for (int i = info->first_gp_reg_save; i < 32; i++)
26715 {
26716 if (bitmap_bit_p (components, i))
26717 {
26718 rtx reg = gen_rtx_REG (reg_mode, i);
26719 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26720 RTX_FRAME_RELATED_P (insn) = 1;
26721 add_reg_note (insn, REG_CFA_RESTORE, reg);
26722 }
26723
26724 offset += reg_size;
26725 }
26726
26727 /* Epilogue for LR. */
26728 if (bitmap_bit_p (components, 0))
26729 {
26730 int offset = info->lr_save_offset;
26731 if (info->push_p)
26732 offset += info->total_size;
26733
26734 rtx reg = gen_rtx_REG (reg_mode, 0);
26735 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26736
26737 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26738 insn = emit_move_insn (lr, reg);
26739 RTX_FRAME_RELATED_P (insn) = 1;
26740 add_reg_note (insn, REG_CFA_RESTORE, lr);
26741 }
26742 }
26743
26744 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26745 static void
26746 rs6000_set_handled_components (sbitmap components)
26747 {
26748 rs6000_stack_t *info = rs6000_stack_info ();
26749
26750 for (int i = info->first_gp_reg_save; i < 32; i++)
26751 if (bitmap_bit_p (components, i))
26752 cfun->machine->gpr_is_wrapped_separately[i] = true;
26753
26754 for (int i = info->first_fp_reg_save; i < 64; i++)
26755 if (bitmap_bit_p (components, i))
26756 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26757
26758 if (bitmap_bit_p (components, 0))
26759 cfun->machine->lr_is_wrapped_separately = true;
26760
26761 if (bitmap_bit_p (components, 2))
26762 cfun->machine->toc_is_wrapped_separately = true;
26763 }
26764
26765 /* VRSAVE is a bit vector representing which AltiVec registers
26766 are used. The OS uses this to determine which vector
26767 registers to save on a context switch. We need to save
26768 VRSAVE on the stack frame, add whatever AltiVec registers we
26769 used in this function, and do the corresponding magic in the
26770 epilogue. */
26771 static void
26772 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26773 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26774 {
26775 /* Get VRSAVE into a GPR. */
26776 rtx reg = gen_rtx_REG (SImode, save_regno);
26777 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26778 if (TARGET_MACHO)
26779 emit_insn (gen_get_vrsave_internal (reg));
26780 else
26781 emit_insn (gen_rtx_SET (reg, vrsave));
26782
26783 /* Save VRSAVE. */
26784 int offset = info->vrsave_save_offset + frame_off;
26785 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26786
26787 /* Include the registers in the mask. */
26788 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26789
26790 emit_insn (generate_set_vrsave (reg, info, 0));
26791 }
26792
26793 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26794 called, it left the arg pointer to the old stack in r29. Otherwise, the
26795 arg pointer is the top of the current frame. */
26796 static void
26797 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26798 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26799 {
26800 cfun->machine->split_stack_argp_used = true;
26801
26802 if (sp_adjust)
26803 {
26804 rtx r12 = gen_rtx_REG (Pmode, 12);
26805 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26806 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26807 emit_insn_before (set_r12, sp_adjust);
26808 }
26809 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26810 {
26811 rtx r12 = gen_rtx_REG (Pmode, 12);
26812 if (frame_off == 0)
26813 emit_move_insn (r12, frame_reg_rtx);
26814 else
26815 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26816 }
26817
26818 if (info->push_p)
26819 {
26820 rtx r12 = gen_rtx_REG (Pmode, 12);
26821 rtx r29 = gen_rtx_REG (Pmode, 29);
26822 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26823 rtx not_more = gen_label_rtx ();
26824 rtx jump;
26825
26826 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26827 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26828 gen_rtx_LABEL_REF (VOIDmode, not_more),
26829 pc_rtx);
26830 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26831 JUMP_LABEL (jump) = not_more;
26832 LABEL_NUSES (not_more) += 1;
26833 emit_move_insn (r12, r29);
26834 emit_label (not_more);
26835 }
26836 }
26837
26838 /* Emit function prologue as insns. */
26839
26840 void
26841 rs6000_emit_prologue (void)
26842 {
26843 rs6000_stack_t *info = rs6000_stack_info ();
26844 machine_mode reg_mode = Pmode;
26845 int reg_size = TARGET_32BIT ? 4 : 8;
26846 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26847 int fp_reg_size = 8;
26848 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26849 rtx frame_reg_rtx = sp_reg_rtx;
26850 unsigned int cr_save_regno;
26851 rtx cr_save_rtx = NULL_RTX;
26852 rtx_insn *insn;
26853 int strategy;
26854 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26855 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26856 && call_used_regs[STATIC_CHAIN_REGNUM]);
26857 int using_split_stack = (flag_split_stack
26858 && (lookup_attribute ("no_split_stack",
26859 DECL_ATTRIBUTES (cfun->decl))
26860 == NULL));
26861
26862 /* Offset to top of frame for frame_reg and sp respectively. */
26863 HOST_WIDE_INT frame_off = 0;
26864 HOST_WIDE_INT sp_off = 0;
26865 /* sp_adjust is the stack adjusting instruction, tracked so that the
26866 insn setting up the split-stack arg pointer can be emitted just
26867 prior to it, when r12 is not used here for other purposes. */
26868 rtx_insn *sp_adjust = 0;
26869
26870 #if CHECKING_P
26871 /* Track and check usage of r0, r11, r12. */
26872 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26873 #define START_USE(R) do \
26874 { \
26875 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26876 reg_inuse |= 1 << (R); \
26877 } while (0)
26878 #define END_USE(R) do \
26879 { \
26880 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26881 reg_inuse &= ~(1 << (R)); \
26882 } while (0)
26883 #define NOT_INUSE(R) do \
26884 { \
26885 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26886 } while (0)
26887 #else
26888 #define START_USE(R) do {} while (0)
26889 #define END_USE(R) do {} while (0)
26890 #define NOT_INUSE(R) do {} while (0)
26891 #endif
26892
26893 if (DEFAULT_ABI == ABI_ELFv2
26894 && !TARGET_SINGLE_PIC_BASE)
26895 {
26896 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26897
26898 /* With -mminimal-toc we may generate an extra use of r2 below. */
26899 if (TARGET_TOC && TARGET_MINIMAL_TOC
26900 && !constant_pool_empty_p ())
26901 cfun->machine->r2_setup_needed = true;
26902 }
26903
26904
26905 if (flag_stack_usage_info)
26906 current_function_static_stack_size = info->total_size;
26907
26908 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26909 {
26910 HOST_WIDE_INT size = info->total_size;
26911
26912 if (crtl->is_leaf && !cfun->calls_alloca)
26913 {
26914 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
26915 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26916 size - get_stack_check_protect ());
26917 }
26918 else if (size > 0)
26919 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
26920 }
26921
26922 if (TARGET_FIX_AND_CONTINUE)
26923 {
26924 /* gdb on darwin arranges to forward a function from the old
26925 address by modifying the first 5 instructions of the function
26926 to branch to the overriding function. This is necessary to
26927 permit function pointers that point to the old function to
26928 actually forward to the new function. */
26929 emit_insn (gen_nop ());
26930 emit_insn (gen_nop ());
26931 emit_insn (gen_nop ());
26932 emit_insn (gen_nop ());
26933 emit_insn (gen_nop ());
26934 }
26935
26936 /* Handle world saves specially here. */
26937 if (WORLD_SAVE_P (info))
26938 {
26939 int i, j, sz;
26940 rtx treg;
26941 rtvec p;
26942 rtx reg0;
26943
26944 /* save_world expects lr in r0. */
26945 reg0 = gen_rtx_REG (Pmode, 0);
26946 if (info->lr_save_p)
26947 {
26948 insn = emit_move_insn (reg0,
26949 gen_rtx_REG (Pmode, LR_REGNO));
26950 RTX_FRAME_RELATED_P (insn) = 1;
26951 }
26952
26953 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26954 assumptions about the offsets of various bits of the stack
26955 frame. */
26956 gcc_assert (info->gp_save_offset == -220
26957 && info->fp_save_offset == -144
26958 && info->lr_save_offset == 8
26959 && info->cr_save_offset == 4
26960 && info->push_p
26961 && info->lr_save_p
26962 && (!crtl->calls_eh_return
26963 || info->ehrd_offset == -432)
26964 && info->vrsave_save_offset == -224
26965 && info->altivec_save_offset == -416);
26966
26967 treg = gen_rtx_REG (SImode, 11);
26968 emit_move_insn (treg, GEN_INT (-info->total_size));
26969
26970 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26971 in R11. It also clobbers R12, so beware! */
26972
26973 /* Preserve CR2 for save_world prologues */
26974 sz = 5;
26975 sz += 32 - info->first_gp_reg_save;
26976 sz += 64 - info->first_fp_reg_save;
26977 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26978 p = rtvec_alloc (sz);
26979 j = 0;
26980 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, LR_REGNO);
26981 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26982 gen_rtx_SYMBOL_REF (Pmode,
26983 "*save_world"));
26984 /* We do floats first so that the instruction pattern matches
26985 properly. */
26986 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26987 RTVEC_ELT (p, j++)
26988 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
26989 info->first_fp_reg_save + i),
26990 frame_reg_rtx,
26991 info->fp_save_offset + frame_off + 8 * i);
26992 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26993 RTVEC_ELT (p, j++)
26994 = gen_frame_store (gen_rtx_REG (V4SImode,
26995 info->first_altivec_reg_save + i),
26996 frame_reg_rtx,
26997 info->altivec_save_offset + frame_off + 16 * i);
26998 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26999 RTVEC_ELT (p, j++)
27000 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27001 frame_reg_rtx,
27002 info->gp_save_offset + frame_off + reg_size * i);
27003
27004 /* CR register traditionally saved as CR2. */
27005 RTVEC_ELT (p, j++)
27006 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
27007 frame_reg_rtx, info->cr_save_offset + frame_off);
27008 /* Explain about use of R0. */
27009 if (info->lr_save_p)
27010 RTVEC_ELT (p, j++)
27011 = gen_frame_store (reg0,
27012 frame_reg_rtx, info->lr_save_offset + frame_off);
27013 /* Explain what happens to the stack pointer. */
27014 {
27015 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
27016 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
27017 }
27018
27019 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27020 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27021 treg, GEN_INT (-info->total_size));
27022 sp_off = frame_off = info->total_size;
27023 }
27024
27025 strategy = info->savres_strategy;
27026
27027 /* For V.4, update stack before we do any saving and set back pointer. */
27028 if (! WORLD_SAVE_P (info)
27029 && info->push_p
27030 && (DEFAULT_ABI == ABI_V4
27031 || crtl->calls_eh_return))
27032 {
27033 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
27034 || !(strategy & SAVE_INLINE_GPRS)
27035 || !(strategy & SAVE_INLINE_VRS));
27036 int ptr_regno = -1;
27037 rtx ptr_reg = NULL_RTX;
27038 int ptr_off = 0;
27039
27040 if (info->total_size < 32767)
27041 frame_off = info->total_size;
27042 else if (need_r11)
27043 ptr_regno = 11;
27044 else if (info->cr_save_p
27045 || info->lr_save_p
27046 || info->first_fp_reg_save < 64
27047 || info->first_gp_reg_save < 32
27048 || info->altivec_size != 0
27049 || info->vrsave_size != 0
27050 || crtl->calls_eh_return)
27051 ptr_regno = 12;
27052 else
27053 {
27054 /* The prologue won't be saving any regs so there is no need
27055 to set up a frame register to access any frame save area.
27056 We also won't be using frame_off anywhere below, but set
27057 the correct value anyway to protect against future
27058 changes to this function. */
27059 frame_off = info->total_size;
27060 }
27061 if (ptr_regno != -1)
27062 {
27063 /* Set up the frame offset to that needed by the first
27064 out-of-line save function. */
27065 START_USE (ptr_regno);
27066 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27067 frame_reg_rtx = ptr_reg;
27068 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
27069 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
27070 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
27071 ptr_off = info->gp_save_offset + info->gp_size;
27072 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
27073 ptr_off = info->altivec_save_offset + info->altivec_size;
27074 frame_off = -ptr_off;
27075 }
27076 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27077 ptr_reg, ptr_off);
27078 if (REGNO (frame_reg_rtx) == 12)
27079 sp_adjust = 0;
27080 sp_off = info->total_size;
27081 if (frame_reg_rtx != sp_reg_rtx)
27082 rs6000_emit_stack_tie (frame_reg_rtx, false);
27083 }
27084
27085 /* If we use the link register, get it into r0. */
27086 if (!WORLD_SAVE_P (info) && info->lr_save_p
27087 && !cfun->machine->lr_is_wrapped_separately)
27088 {
27089 rtx addr, reg, mem;
27090
27091 reg = gen_rtx_REG (Pmode, 0);
27092 START_USE (0);
27093 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
27094 RTX_FRAME_RELATED_P (insn) = 1;
27095
27096 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
27097 | SAVE_NOINLINE_FPRS_SAVES_LR)))
27098 {
27099 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27100 GEN_INT (info->lr_save_offset + frame_off));
27101 mem = gen_rtx_MEM (Pmode, addr);
27102 /* This should not be of rs6000_sr_alias_set, because of
27103 __builtin_return_address. */
27104
27105 insn = emit_move_insn (mem, reg);
27106 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27107 NULL_RTX, NULL_RTX);
27108 END_USE (0);
27109 }
27110 }
27111
27112 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27113 r12 will be needed by out-of-line gpr save. */
27114 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27115 && !(strategy & (SAVE_INLINE_GPRS
27116 | SAVE_NOINLINE_GPRS_SAVES_LR))
27117 ? 11 : 12);
27118 if (!WORLD_SAVE_P (info)
27119 && info->cr_save_p
27120 && REGNO (frame_reg_rtx) != cr_save_regno
27121 && !(using_static_chain_p && cr_save_regno == 11)
27122 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
27123 {
27124 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
27125 START_USE (cr_save_regno);
27126 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27127 }
27128
27129 /* Do any required saving of fpr's. If only one or two to save, do
27130 it ourselves. Otherwise, call function. */
27131 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
27132 {
27133 int offset = info->fp_save_offset + frame_off;
27134 for (int i = info->first_fp_reg_save; i < 64; i++)
27135 {
27136 if (save_reg_p (i)
27137 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
27138 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
27139 sp_off - frame_off);
27140
27141 offset += fp_reg_size;
27142 }
27143 }
27144 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
27145 {
27146 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27147 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27148 unsigned ptr_regno = ptr_regno_for_savres (sel);
27149 rtx ptr_reg = frame_reg_rtx;
27150
27151 if (REGNO (frame_reg_rtx) == ptr_regno)
27152 gcc_checking_assert (frame_off == 0);
27153 else
27154 {
27155 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27156 NOT_INUSE (ptr_regno);
27157 emit_insn (gen_add3_insn (ptr_reg,
27158 frame_reg_rtx, GEN_INT (frame_off)));
27159 }
27160 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27161 info->fp_save_offset,
27162 info->lr_save_offset,
27163 DFmode, sel);
27164 rs6000_frame_related (insn, ptr_reg, sp_off,
27165 NULL_RTX, NULL_RTX);
27166 if (lr)
27167 END_USE (0);
27168 }
27169
27170 /* Save GPRs. This is done as a PARALLEL if we are using
27171 the store-multiple instructions. */
27172 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27173 {
27174 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27175 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27176 unsigned ptr_regno = ptr_regno_for_savres (sel);
27177 rtx ptr_reg = frame_reg_rtx;
27178 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27179 int end_save = info->gp_save_offset + info->gp_size;
27180 int ptr_off;
27181
27182 if (ptr_regno == 12)
27183 sp_adjust = 0;
27184 if (!ptr_set_up)
27185 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27186
27187 /* Need to adjust r11 (r12) if we saved any FPRs. */
27188 if (end_save + frame_off != 0)
27189 {
27190 rtx offset = GEN_INT (end_save + frame_off);
27191
27192 if (ptr_set_up)
27193 frame_off = -end_save;
27194 else
27195 NOT_INUSE (ptr_regno);
27196 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27197 }
27198 else if (!ptr_set_up)
27199 {
27200 NOT_INUSE (ptr_regno);
27201 emit_move_insn (ptr_reg, frame_reg_rtx);
27202 }
27203 ptr_off = -end_save;
27204 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27205 info->gp_save_offset + ptr_off,
27206 info->lr_save_offset + ptr_off,
27207 reg_mode, sel);
27208 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27209 NULL_RTX, NULL_RTX);
27210 if (lr)
27211 END_USE (0);
27212 }
27213 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27214 {
27215 rtvec p;
27216 int i;
27217 p = rtvec_alloc (32 - info->first_gp_reg_save);
27218 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27219 RTVEC_ELT (p, i)
27220 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27221 frame_reg_rtx,
27222 info->gp_save_offset + frame_off + reg_size * i);
27223 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27224 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27225 NULL_RTX, NULL_RTX);
27226 }
27227 else if (!WORLD_SAVE_P (info))
27228 {
27229 int offset = info->gp_save_offset + frame_off;
27230 for (int i = info->first_gp_reg_save; i < 32; i++)
27231 {
27232 if (save_reg_p (i)
27233 && !cfun->machine->gpr_is_wrapped_separately[i])
27234 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27235 sp_off - frame_off);
27236
27237 offset += reg_size;
27238 }
27239 }
27240
27241 if (crtl->calls_eh_return)
27242 {
27243 unsigned int i;
27244 rtvec p;
27245
27246 for (i = 0; ; ++i)
27247 {
27248 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27249 if (regno == INVALID_REGNUM)
27250 break;
27251 }
27252
27253 p = rtvec_alloc (i);
27254
27255 for (i = 0; ; ++i)
27256 {
27257 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27258 if (regno == INVALID_REGNUM)
27259 break;
27260
27261 rtx set
27262 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27263 sp_reg_rtx,
27264 info->ehrd_offset + sp_off + reg_size * (int) i);
27265 RTVEC_ELT (p, i) = set;
27266 RTX_FRAME_RELATED_P (set) = 1;
27267 }
27268
27269 insn = emit_insn (gen_blockage ());
27270 RTX_FRAME_RELATED_P (insn) = 1;
27271 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27272 }
27273
27274 /* In AIX ABI we need to make sure r2 is really saved. */
27275 if (TARGET_AIX && crtl->calls_eh_return)
27276 {
27277 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27278 rtx join_insn, note;
27279 rtx_insn *save_insn;
27280 long toc_restore_insn;
27281
27282 tmp_reg = gen_rtx_REG (Pmode, 11);
27283 tmp_reg_si = gen_rtx_REG (SImode, 11);
27284 if (using_static_chain_p)
27285 {
27286 START_USE (0);
27287 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27288 }
27289 else
27290 START_USE (11);
27291 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27292 /* Peek at instruction to which this function returns. If it's
27293 restoring r2, then we know we've already saved r2. We can't
27294 unconditionally save r2 because the value we have will already
27295 be updated if we arrived at this function via a plt call or
27296 toc adjusting stub. */
27297 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27298 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27299 + RS6000_TOC_SAVE_SLOT);
27300 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27301 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27302 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27303 validate_condition_mode (EQ, CCUNSmode);
27304 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27305 emit_insn (gen_rtx_SET (compare_result,
27306 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27307 toc_save_done = gen_label_rtx ();
27308 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27309 gen_rtx_EQ (VOIDmode, compare_result,
27310 const0_rtx),
27311 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27312 pc_rtx);
27313 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27314 JUMP_LABEL (jump) = toc_save_done;
27315 LABEL_NUSES (toc_save_done) += 1;
27316
27317 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27318 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27319 sp_off - frame_off);
27320
27321 emit_label (toc_save_done);
27322
27323 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27324 have a CFG that has different saves along different paths.
27325 Move the note to a dummy blockage insn, which describes that
27326 R2 is unconditionally saved after the label. */
27327 /* ??? An alternate representation might be a special insn pattern
27328 containing both the branch and the store. That might let the
27329 code that minimizes the number of DW_CFA_advance opcodes better
27330 freedom in placing the annotations. */
27331 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27332 if (note)
27333 remove_note (save_insn, note);
27334 else
27335 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27336 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27337 RTX_FRAME_RELATED_P (save_insn) = 0;
27338
27339 join_insn = emit_insn (gen_blockage ());
27340 REG_NOTES (join_insn) = note;
27341 RTX_FRAME_RELATED_P (join_insn) = 1;
27342
27343 if (using_static_chain_p)
27344 {
27345 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27346 END_USE (0);
27347 }
27348 else
27349 END_USE (11);
27350 }
27351
27352 /* Save CR if we use any that must be preserved. */
27353 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27354 {
27355 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27356 GEN_INT (info->cr_save_offset + frame_off));
27357 rtx mem = gen_frame_mem (SImode, addr);
27358
27359 /* If we didn't copy cr before, do so now using r0. */
27360 if (cr_save_rtx == NULL_RTX)
27361 {
27362 START_USE (0);
27363 cr_save_rtx = gen_rtx_REG (SImode, 0);
27364 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27365 }
27366
27367 /* Saving CR requires a two-instruction sequence: one instruction
27368 to move the CR to a general-purpose register, and a second
27369 instruction that stores the GPR to memory.
27370
27371 We do not emit any DWARF CFI records for the first of these,
27372 because we cannot properly represent the fact that CR is saved in
27373 a register. One reason is that we cannot express that multiple
27374 CR fields are saved; another reason is that on 64-bit, the size
27375 of the CR register in DWARF (4 bytes) differs from the size of
27376 a general-purpose register.
27377
27378 This means if any intervening instruction were to clobber one of
27379 the call-saved CR fields, we'd have incorrect CFI. To prevent
27380 this from happening, we mark the store to memory as a use of
27381 those CR fields, which prevents any such instruction from being
27382 scheduled in between the two instructions. */
27383 rtx crsave_v[9];
27384 int n_crsave = 0;
27385 int i;
27386
27387 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27388 for (i = 0; i < 8; i++)
27389 if (save_reg_p (CR0_REGNO + i))
27390 crsave_v[n_crsave++]
27391 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27392
27393 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27394 gen_rtvec_v (n_crsave, crsave_v)));
27395 END_USE (REGNO (cr_save_rtx));
27396
27397 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27398 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27399 so we need to construct a frame expression manually. */
27400 RTX_FRAME_RELATED_P (insn) = 1;
27401
27402 /* Update address to be stack-pointer relative, like
27403 rs6000_frame_related would do. */
27404 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27405 GEN_INT (info->cr_save_offset + sp_off));
27406 mem = gen_frame_mem (SImode, addr);
27407
27408 if (DEFAULT_ABI == ABI_ELFv2)
27409 {
27410 /* In the ELFv2 ABI we generate separate CFI records for each
27411 CR field that was actually saved. They all point to the
27412 same 32-bit stack slot. */
27413 rtx crframe[8];
27414 int n_crframe = 0;
27415
27416 for (i = 0; i < 8; i++)
27417 if (save_reg_p (CR0_REGNO + i))
27418 {
27419 crframe[n_crframe]
27420 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27421
27422 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27423 n_crframe++;
27424 }
27425
27426 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27427 gen_rtx_PARALLEL (VOIDmode,
27428 gen_rtvec_v (n_crframe, crframe)));
27429 }
27430 else
27431 {
27432 /* In other ABIs, by convention, we use a single CR regnum to
27433 represent the fact that all call-saved CR fields are saved.
27434 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27435 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27436 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27437 }
27438 }
27439
27440 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27441 *separate* slots if the routine calls __builtin_eh_return, so
27442 that they can be independently restored by the unwinder. */
27443 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27444 {
27445 int i, cr_off = info->ehcr_offset;
27446 rtx crsave;
27447
27448 /* ??? We might get better performance by using multiple mfocrf
27449 instructions. */
27450 crsave = gen_rtx_REG (SImode, 0);
27451 emit_insn (gen_prologue_movesi_from_cr (crsave));
27452
27453 for (i = 0; i < 8; i++)
27454 if (!call_used_regs[CR0_REGNO + i])
27455 {
27456 rtvec p = rtvec_alloc (2);
27457 RTVEC_ELT (p, 0)
27458 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27459 RTVEC_ELT (p, 1)
27460 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27461
27462 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27463
27464 RTX_FRAME_RELATED_P (insn) = 1;
27465 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27466 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27467 sp_reg_rtx, cr_off + sp_off));
27468
27469 cr_off += reg_size;
27470 }
27471 }
27472
27473 /* If we are emitting stack probes, but allocate no stack, then
27474 just note that in the dump file. */
27475 if (flag_stack_clash_protection
27476 && dump_file
27477 && !info->push_p)
27478 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27479
27480 /* Update stack and set back pointer unless this is V.4,
27481 for which it was done previously. */
27482 if (!WORLD_SAVE_P (info) && info->push_p
27483 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27484 {
27485 rtx ptr_reg = NULL;
27486 int ptr_off = 0;
27487
27488 /* If saving altivec regs we need to be able to address all save
27489 locations using a 16-bit offset. */
27490 if ((strategy & SAVE_INLINE_VRS) == 0
27491 || (info->altivec_size != 0
27492 && (info->altivec_save_offset + info->altivec_size - 16
27493 + info->total_size - frame_off) > 32767)
27494 || (info->vrsave_size != 0
27495 && (info->vrsave_save_offset
27496 + info->total_size - frame_off) > 32767))
27497 {
27498 int sel = SAVRES_SAVE | SAVRES_VR;
27499 unsigned ptr_regno = ptr_regno_for_savres (sel);
27500
27501 if (using_static_chain_p
27502 && ptr_regno == STATIC_CHAIN_REGNUM)
27503 ptr_regno = 12;
27504 if (REGNO (frame_reg_rtx) != ptr_regno)
27505 START_USE (ptr_regno);
27506 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27507 frame_reg_rtx = ptr_reg;
27508 ptr_off = info->altivec_save_offset + info->altivec_size;
27509 frame_off = -ptr_off;
27510 }
27511 else if (REGNO (frame_reg_rtx) == 1)
27512 frame_off = info->total_size;
27513 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27514 ptr_reg, ptr_off);
27515 if (REGNO (frame_reg_rtx) == 12)
27516 sp_adjust = 0;
27517 sp_off = info->total_size;
27518 if (frame_reg_rtx != sp_reg_rtx)
27519 rs6000_emit_stack_tie (frame_reg_rtx, false);
27520 }
27521
27522 /* Set frame pointer, if needed. */
27523 if (frame_pointer_needed)
27524 {
27525 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27526 sp_reg_rtx);
27527 RTX_FRAME_RELATED_P (insn) = 1;
27528 }
27529
27530 /* Save AltiVec registers if needed. Save here because the red zone does
27531 not always include AltiVec registers. */
27532 if (!WORLD_SAVE_P (info)
27533 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27534 {
27535 int end_save = info->altivec_save_offset + info->altivec_size;
27536 int ptr_off;
27537 /* Oddly, the vector save/restore functions point r0 at the end
27538 of the save area, then use r11 or r12 to load offsets for
27539 [reg+reg] addressing. */
27540 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27541 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27542 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27543
27544 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27545 NOT_INUSE (0);
27546 if (scratch_regno == 12)
27547 sp_adjust = 0;
27548 if (end_save + frame_off != 0)
27549 {
27550 rtx offset = GEN_INT (end_save + frame_off);
27551
27552 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27553 }
27554 else
27555 emit_move_insn (ptr_reg, frame_reg_rtx);
27556
27557 ptr_off = -end_save;
27558 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27559 info->altivec_save_offset + ptr_off,
27560 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27561 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27562 NULL_RTX, NULL_RTX);
27563 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27564 {
27565 /* The oddity mentioned above clobbered our frame reg. */
27566 emit_move_insn (frame_reg_rtx, ptr_reg);
27567 frame_off = ptr_off;
27568 }
27569 }
27570 else if (!WORLD_SAVE_P (info)
27571 && info->altivec_size != 0)
27572 {
27573 int i;
27574
27575 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27576 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27577 {
27578 rtx areg, savereg, mem;
27579 HOST_WIDE_INT offset;
27580
27581 offset = (info->altivec_save_offset + frame_off
27582 + 16 * (i - info->first_altivec_reg_save));
27583
27584 savereg = gen_rtx_REG (V4SImode, i);
27585
27586 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27587 {
27588 mem = gen_frame_mem (V4SImode,
27589 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27590 GEN_INT (offset)));
27591 insn = emit_insn (gen_rtx_SET (mem, savereg));
27592 areg = NULL_RTX;
27593 }
27594 else
27595 {
27596 NOT_INUSE (0);
27597 areg = gen_rtx_REG (Pmode, 0);
27598 emit_move_insn (areg, GEN_INT (offset));
27599
27600 /* AltiVec addressing mode is [reg+reg]. */
27601 mem = gen_frame_mem (V4SImode,
27602 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27603
27604 /* Rather than emitting a generic move, force use of the stvx
27605 instruction, which we always want on ISA 2.07 (power8) systems.
27606 In particular we don't want xxpermdi/stxvd2x for little
27607 endian. */
27608 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27609 }
27610
27611 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27612 areg, GEN_INT (offset));
27613 }
27614 }
27615
27616 /* VRSAVE is a bit vector representing which AltiVec registers
27617 are used. The OS uses this to determine which vector
27618 registers to save on a context switch. We need to save
27619 VRSAVE on the stack frame, add whatever AltiVec registers we
27620 used in this function, and do the corresponding magic in the
27621 epilogue. */
27622
27623 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27624 {
27625 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27626 be using r12 as frame_reg_rtx and r11 as the static chain
27627 pointer for nested functions. */
27628 int save_regno = 12;
27629 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27630 && !using_static_chain_p)
27631 save_regno = 11;
27632 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27633 {
27634 save_regno = 11;
27635 if (using_static_chain_p)
27636 save_regno = 0;
27637 }
27638 NOT_INUSE (save_regno);
27639
27640 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27641 }
27642
27643 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27644 if (!TARGET_SINGLE_PIC_BASE
27645 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27646 && !constant_pool_empty_p ())
27647 || (DEFAULT_ABI == ABI_V4
27648 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27649 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27650 {
27651 /* If emit_load_toc_table will use the link register, we need to save
27652 it. We use R12 for this purpose because emit_load_toc_table
27653 can use register 0. This allows us to use a plain 'blr' to return
27654 from the procedure more often. */
27655 int save_LR_around_toc_setup = (TARGET_ELF
27656 && DEFAULT_ABI == ABI_V4
27657 && flag_pic
27658 && ! info->lr_save_p
27659 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27660 if (save_LR_around_toc_setup)
27661 {
27662 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27663 rtx tmp = gen_rtx_REG (Pmode, 12);
27664
27665 sp_adjust = 0;
27666 insn = emit_move_insn (tmp, lr);
27667 RTX_FRAME_RELATED_P (insn) = 1;
27668
27669 rs6000_emit_load_toc_table (TRUE);
27670
27671 insn = emit_move_insn (lr, tmp);
27672 add_reg_note (insn, REG_CFA_RESTORE, lr);
27673 RTX_FRAME_RELATED_P (insn) = 1;
27674 }
27675 else
27676 rs6000_emit_load_toc_table (TRUE);
27677 }
27678
27679 #if TARGET_MACHO
27680 if (!TARGET_SINGLE_PIC_BASE
27681 && DEFAULT_ABI == ABI_DARWIN
27682 && flag_pic && crtl->uses_pic_offset_table)
27683 {
27684 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27685 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27686
27687 /* Save and restore LR locally around this call (in R0). */
27688 if (!info->lr_save_p)
27689 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27690
27691 emit_insn (gen_load_macho_picbase (src));
27692
27693 emit_move_insn (gen_rtx_REG (Pmode,
27694 RS6000_PIC_OFFSET_TABLE_REGNUM),
27695 lr);
27696
27697 if (!info->lr_save_p)
27698 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27699 }
27700 #endif
27701
27702 /* If we need to, save the TOC register after doing the stack setup.
27703 Do not emit eh frame info for this save. The unwinder wants info,
27704 conceptually attached to instructions in this function, about
27705 register values in the caller of this function. This R2 may have
27706 already been changed from the value in the caller.
27707 We don't attempt to write accurate DWARF EH frame info for R2
27708 because code emitted by gcc for a (non-pointer) function call
27709 doesn't save and restore R2. Instead, R2 is managed out-of-line
27710 by a linker generated plt call stub when the function resides in
27711 a shared library. This behavior is costly to describe in DWARF,
27712 both in terms of the size of DWARF info and the time taken in the
27713 unwinder to interpret it. R2 changes, apart from the
27714 calls_eh_return case earlier in this function, are handled by
27715 linux-unwind.h frob_update_context. */
27716 if (rs6000_save_toc_in_prologue_p ()
27717 && !cfun->machine->toc_is_wrapped_separately)
27718 {
27719 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27720 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27721 }
27722
27723 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27724 if (using_split_stack && split_stack_arg_pointer_used_p ())
27725 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27726 }
27727
27728 /* Output .extern statements for the save/restore routines we use. */
27729
27730 static void
27731 rs6000_output_savres_externs (FILE *file)
27732 {
27733 rs6000_stack_t *info = rs6000_stack_info ();
27734
27735 if (TARGET_DEBUG_STACK)
27736 debug_stack_info (info);
27737
27738 /* Write .extern for any function we will call to save and restore
27739 fp values. */
27740 if (info->first_fp_reg_save < 64
27741 && !TARGET_MACHO
27742 && !TARGET_ELF)
27743 {
27744 char *name;
27745 int regno = info->first_fp_reg_save - 32;
27746
27747 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27748 {
27749 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27750 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27751 name = rs6000_savres_routine_name (regno, sel);
27752 fprintf (file, "\t.extern %s\n", name);
27753 }
27754 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27755 {
27756 bool lr = (info->savres_strategy
27757 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27758 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27759 name = rs6000_savres_routine_name (regno, sel);
27760 fprintf (file, "\t.extern %s\n", name);
27761 }
27762 }
27763 }
27764
27765 /* Write function prologue. */
27766
27767 static void
27768 rs6000_output_function_prologue (FILE *file)
27769 {
27770 if (!cfun->is_thunk)
27771 rs6000_output_savres_externs (file);
27772
27773 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27774 immediately after the global entry point label. */
27775 if (rs6000_global_entry_point_needed_p ())
27776 {
27777 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27778
27779 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27780
27781 if (TARGET_CMODEL != CMODEL_LARGE)
27782 {
27783 /* In the small and medium code models, we assume the TOC is less
27784 2 GB away from the text section, so it can be computed via the
27785 following two-instruction sequence. */
27786 char buf[256];
27787
27788 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27789 fprintf (file, "0:\taddis 2,12,.TOC.-");
27790 assemble_name (file, buf);
27791 fprintf (file, "@ha\n");
27792 fprintf (file, "\taddi 2,2,.TOC.-");
27793 assemble_name (file, buf);
27794 fprintf (file, "@l\n");
27795 }
27796 else
27797 {
27798 /* In the large code model, we allow arbitrary offsets between the
27799 TOC and the text section, so we have to load the offset from
27800 memory. The data field is emitted directly before the global
27801 entry point in rs6000_elf_declare_function_name. */
27802 char buf[256];
27803
27804 #ifdef HAVE_AS_ENTRY_MARKERS
27805 /* If supported by the linker, emit a marker relocation. If the
27806 total code size of the final executable or shared library
27807 happens to fit into 2 GB after all, the linker will replace
27808 this code sequence with the sequence for the small or medium
27809 code model. */
27810 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27811 #endif
27812 fprintf (file, "\tld 2,");
27813 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27814 assemble_name (file, buf);
27815 fprintf (file, "-");
27816 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27817 assemble_name (file, buf);
27818 fprintf (file, "(12)\n");
27819 fprintf (file, "\tadd 2,2,12\n");
27820 }
27821
27822 fputs ("\t.localentry\t", file);
27823 assemble_name (file, name);
27824 fputs (",.-", file);
27825 assemble_name (file, name);
27826 fputs ("\n", file);
27827 }
27828
27829 /* Output -mprofile-kernel code. This needs to be done here instead of
27830 in output_function_profile since it must go after the ELFv2 ABI
27831 local entry point. */
27832 if (TARGET_PROFILE_KERNEL && crtl->profile)
27833 {
27834 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27835 gcc_assert (!TARGET_32BIT);
27836
27837 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27838
27839 /* In the ELFv2 ABI we have no compiler stack word. It must be
27840 the resposibility of _mcount to preserve the static chain
27841 register if required. */
27842 if (DEFAULT_ABI != ABI_ELFv2
27843 && cfun->static_chain_decl != NULL)
27844 {
27845 asm_fprintf (file, "\tstd %s,24(%s)\n",
27846 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27847 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27848 asm_fprintf (file, "\tld %s,24(%s)\n",
27849 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27850 }
27851 else
27852 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27853 }
27854
27855 rs6000_pic_labelno++;
27856 }
27857
27858 /* -mprofile-kernel code calls mcount before the function prolog,
27859 so a profiled leaf function should stay a leaf function. */
27860 static bool
27861 rs6000_keep_leaf_when_profiled ()
27862 {
27863 return TARGET_PROFILE_KERNEL;
27864 }
27865
27866 /* Non-zero if vmx regs are restored before the frame pop, zero if
27867 we restore after the pop when possible. */
27868 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27869
27870 /* Restoring cr is a two step process: loading a reg from the frame
27871 save, then moving the reg to cr. For ABI_V4 we must let the
27872 unwinder know that the stack location is no longer valid at or
27873 before the stack deallocation, but we can't emit a cfa_restore for
27874 cr at the stack deallocation like we do for other registers.
27875 The trouble is that it is possible for the move to cr to be
27876 scheduled after the stack deallocation. So say exactly where cr
27877 is located on each of the two insns. */
27878
27879 static rtx
27880 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27881 {
27882 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27883 rtx reg = gen_rtx_REG (SImode, regno);
27884 rtx_insn *insn = emit_move_insn (reg, mem);
27885
27886 if (!exit_func && DEFAULT_ABI == ABI_V4)
27887 {
27888 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27889 rtx set = gen_rtx_SET (reg, cr);
27890
27891 add_reg_note (insn, REG_CFA_REGISTER, set);
27892 RTX_FRAME_RELATED_P (insn) = 1;
27893 }
27894 return reg;
27895 }
27896
27897 /* Reload CR from REG. */
27898
27899 static void
27900 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27901 {
27902 int count = 0;
27903 int i;
27904
27905 if (using_mfcr_multiple)
27906 {
27907 for (i = 0; i < 8; i++)
27908 if (save_reg_p (CR0_REGNO + i))
27909 count++;
27910 gcc_assert (count);
27911 }
27912
27913 if (using_mfcr_multiple && count > 1)
27914 {
27915 rtx_insn *insn;
27916 rtvec p;
27917 int ndx;
27918
27919 p = rtvec_alloc (count);
27920
27921 ndx = 0;
27922 for (i = 0; i < 8; i++)
27923 if (save_reg_p (CR0_REGNO + i))
27924 {
27925 rtvec r = rtvec_alloc (2);
27926 RTVEC_ELT (r, 0) = reg;
27927 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27928 RTVEC_ELT (p, ndx) =
27929 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27930 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27931 ndx++;
27932 }
27933 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27934 gcc_assert (ndx == count);
27935
27936 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27937 CR field separately. */
27938 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27939 {
27940 for (i = 0; i < 8; i++)
27941 if (save_reg_p (CR0_REGNO + i))
27942 add_reg_note (insn, REG_CFA_RESTORE,
27943 gen_rtx_REG (SImode, CR0_REGNO + i));
27944
27945 RTX_FRAME_RELATED_P (insn) = 1;
27946 }
27947 }
27948 else
27949 for (i = 0; i < 8; i++)
27950 if (save_reg_p (CR0_REGNO + i))
27951 {
27952 rtx insn = emit_insn (gen_movsi_to_cr_one
27953 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27954
27955 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27956 CR field separately, attached to the insn that in fact
27957 restores this particular CR field. */
27958 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27959 {
27960 add_reg_note (insn, REG_CFA_RESTORE,
27961 gen_rtx_REG (SImode, CR0_REGNO + i));
27962
27963 RTX_FRAME_RELATED_P (insn) = 1;
27964 }
27965 }
27966
27967 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27968 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27969 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27970 {
27971 rtx_insn *insn = get_last_insn ();
27972 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27973
27974 add_reg_note (insn, REG_CFA_RESTORE, cr);
27975 RTX_FRAME_RELATED_P (insn) = 1;
27976 }
27977 }
27978
27979 /* Like cr, the move to lr instruction can be scheduled after the
27980 stack deallocation, but unlike cr, its stack frame save is still
27981 valid. So we only need to emit the cfa_restore on the correct
27982 instruction. */
27983
27984 static void
27985 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27986 {
27987 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27988 rtx reg = gen_rtx_REG (Pmode, regno);
27989
27990 emit_move_insn (reg, mem);
27991 }
27992
27993 static void
27994 restore_saved_lr (int regno, bool exit_func)
27995 {
27996 rtx reg = gen_rtx_REG (Pmode, regno);
27997 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27998 rtx_insn *insn = emit_move_insn (lr, reg);
27999
28000 if (!exit_func && flag_shrink_wrap)
28001 {
28002 add_reg_note (insn, REG_CFA_RESTORE, lr);
28003 RTX_FRAME_RELATED_P (insn) = 1;
28004 }
28005 }
28006
28007 static rtx
28008 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
28009 {
28010 if (DEFAULT_ABI == ABI_ELFv2)
28011 {
28012 int i;
28013 for (i = 0; i < 8; i++)
28014 if (save_reg_p (CR0_REGNO + i))
28015 {
28016 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
28017 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
28018 cfa_restores);
28019 }
28020 }
28021 else if (info->cr_save_p)
28022 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28023 gen_rtx_REG (SImode, CR2_REGNO),
28024 cfa_restores);
28025
28026 if (info->lr_save_p)
28027 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28028 gen_rtx_REG (Pmode, LR_REGNO),
28029 cfa_restores);
28030 return cfa_restores;
28031 }
28032
28033 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28034 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28035 below stack pointer not cloberred by signals. */
28036
28037 static inline bool
28038 offset_below_red_zone_p (HOST_WIDE_INT offset)
28039 {
28040 return offset < (DEFAULT_ABI == ABI_V4
28041 ? 0
28042 : TARGET_32BIT ? -220 : -288);
28043 }
28044
28045 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28046
28047 static void
28048 emit_cfa_restores (rtx cfa_restores)
28049 {
28050 rtx_insn *insn = get_last_insn ();
28051 rtx *loc = &REG_NOTES (insn);
28052
28053 while (*loc)
28054 loc = &XEXP (*loc, 1);
28055 *loc = cfa_restores;
28056 RTX_FRAME_RELATED_P (insn) = 1;
28057 }
28058
28059 /* Emit function epilogue as insns. */
28060
28061 void
28062 rs6000_emit_epilogue (int sibcall)
28063 {
28064 rs6000_stack_t *info;
28065 int restoring_GPRs_inline;
28066 int restoring_FPRs_inline;
28067 int using_load_multiple;
28068 int using_mtcr_multiple;
28069 int use_backchain_to_restore_sp;
28070 int restore_lr;
28071 int strategy;
28072 HOST_WIDE_INT frame_off = 0;
28073 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
28074 rtx frame_reg_rtx = sp_reg_rtx;
28075 rtx cfa_restores = NULL_RTX;
28076 rtx insn;
28077 rtx cr_save_reg = NULL_RTX;
28078 machine_mode reg_mode = Pmode;
28079 int reg_size = TARGET_32BIT ? 4 : 8;
28080 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
28081 int fp_reg_size = 8;
28082 int i;
28083 bool exit_func;
28084 unsigned ptr_regno;
28085
28086 info = rs6000_stack_info ();
28087
28088 strategy = info->savres_strategy;
28089 using_load_multiple = strategy & REST_MULTIPLE;
28090 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
28091 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
28092 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
28093 || rs6000_tune == PROCESSOR_PPC603
28094 || rs6000_tune == PROCESSOR_PPC750
28095 || optimize_size);
28096 /* Restore via the backchain when we have a large frame, since this
28097 is more efficient than an addis, addi pair. The second condition
28098 here will not trigger at the moment; We don't actually need a
28099 frame pointer for alloca, but the generic parts of the compiler
28100 give us one anyway. */
28101 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
28102 ? info->lr_save_offset
28103 : 0) > 32767
28104 || (cfun->calls_alloca
28105 && !frame_pointer_needed));
28106 restore_lr = (info->lr_save_p
28107 && (restoring_FPRs_inline
28108 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
28109 && (restoring_GPRs_inline
28110 || info->first_fp_reg_save < 64)
28111 && !cfun->machine->lr_is_wrapped_separately);
28112
28113
28114 if (WORLD_SAVE_P (info))
28115 {
28116 int i, j;
28117 char rname[30];
28118 const char *alloc_rname;
28119 rtvec p;
28120
28121 /* eh_rest_world_r10 will return to the location saved in the LR
28122 stack slot (which is not likely to be our caller.)
28123 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28124 rest_world is similar, except any R10 parameter is ignored.
28125 The exception-handling stuff that was here in 2.95 is no
28126 longer necessary. */
28127
28128 p = rtvec_alloc (9
28129 + 32 - info->first_gp_reg_save
28130 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
28131 + 63 + 1 - info->first_fp_reg_save);
28132
28133 strcpy (rname, ((crtl->calls_eh_return) ?
28134 "*eh_rest_world_r10" : "*rest_world"));
28135 alloc_rname = ggc_strdup (rname);
28136
28137 j = 0;
28138 RTVEC_ELT (p, j++) = ret_rtx;
28139 RTVEC_ELT (p, j++)
28140 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
28141 /* The instruction pattern requires a clobber here;
28142 it is shared with the restVEC helper. */
28143 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 11);
28144
28145 {
28146 /* CR register traditionally saved as CR2. */
28147 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
28148 RTVEC_ELT (p, j++)
28149 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28150 if (flag_shrink_wrap)
28151 {
28152 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28153 gen_rtx_REG (Pmode, LR_REGNO),
28154 cfa_restores);
28155 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28156 }
28157 }
28158
28159 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28160 {
28161 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28162 RTVEC_ELT (p, j++)
28163 = gen_frame_load (reg,
28164 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28165 if (flag_shrink_wrap
28166 && save_reg_p (info->first_gp_reg_save + i))
28167 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28168 }
28169 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28170 {
28171 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28172 RTVEC_ELT (p, j++)
28173 = gen_frame_load (reg,
28174 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28175 if (flag_shrink_wrap
28176 && save_reg_p (info->first_altivec_reg_save + i))
28177 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28178 }
28179 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28180 {
28181 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
28182 info->first_fp_reg_save + i);
28183 RTVEC_ELT (p, j++)
28184 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28185 if (flag_shrink_wrap
28186 && save_reg_p (info->first_fp_reg_save + i))
28187 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28188 }
28189 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 0);
28190 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 12);
28191 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 7);
28192 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 8);
28193 RTVEC_ELT (p, j++)
28194 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28195 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28196
28197 if (flag_shrink_wrap)
28198 {
28199 REG_NOTES (insn) = cfa_restores;
28200 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28201 RTX_FRAME_RELATED_P (insn) = 1;
28202 }
28203 return;
28204 }
28205
28206 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28207 if (info->push_p)
28208 frame_off = info->total_size;
28209
28210 /* Restore AltiVec registers if we must do so before adjusting the
28211 stack. */
28212 if (info->altivec_size != 0
28213 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28214 || (DEFAULT_ABI != ABI_V4
28215 && offset_below_red_zone_p (info->altivec_save_offset))))
28216 {
28217 int i;
28218 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28219
28220 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28221 if (use_backchain_to_restore_sp)
28222 {
28223 int frame_regno = 11;
28224
28225 if ((strategy & REST_INLINE_VRS) == 0)
28226 {
28227 /* Of r11 and r12, select the one not clobbered by an
28228 out-of-line restore function for the frame register. */
28229 frame_regno = 11 + 12 - scratch_regno;
28230 }
28231 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28232 emit_move_insn (frame_reg_rtx,
28233 gen_rtx_MEM (Pmode, sp_reg_rtx));
28234 frame_off = 0;
28235 }
28236 else if (frame_pointer_needed)
28237 frame_reg_rtx = hard_frame_pointer_rtx;
28238
28239 if ((strategy & REST_INLINE_VRS) == 0)
28240 {
28241 int end_save = info->altivec_save_offset + info->altivec_size;
28242 int ptr_off;
28243 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28244 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28245
28246 if (end_save + frame_off != 0)
28247 {
28248 rtx offset = GEN_INT (end_save + frame_off);
28249
28250 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28251 }
28252 else
28253 emit_move_insn (ptr_reg, frame_reg_rtx);
28254
28255 ptr_off = -end_save;
28256 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28257 info->altivec_save_offset + ptr_off,
28258 0, V4SImode, SAVRES_VR);
28259 }
28260 else
28261 {
28262 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28263 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28264 {
28265 rtx addr, areg, mem, insn;
28266 rtx reg = gen_rtx_REG (V4SImode, i);
28267 HOST_WIDE_INT offset
28268 = (info->altivec_save_offset + frame_off
28269 + 16 * (i - info->first_altivec_reg_save));
28270
28271 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28272 {
28273 mem = gen_frame_mem (V4SImode,
28274 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28275 GEN_INT (offset)));
28276 insn = gen_rtx_SET (reg, mem);
28277 }
28278 else
28279 {
28280 areg = gen_rtx_REG (Pmode, 0);
28281 emit_move_insn (areg, GEN_INT (offset));
28282
28283 /* AltiVec addressing mode is [reg+reg]. */
28284 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28285 mem = gen_frame_mem (V4SImode, addr);
28286
28287 /* Rather than emitting a generic move, force use of the
28288 lvx instruction, which we always want. In particular we
28289 don't want lxvd2x/xxpermdi for little endian. */
28290 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28291 }
28292
28293 (void) emit_insn (insn);
28294 }
28295 }
28296
28297 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28298 if (((strategy & REST_INLINE_VRS) == 0
28299 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28300 && (flag_shrink_wrap
28301 || (offset_below_red_zone_p
28302 (info->altivec_save_offset
28303 + 16 * (i - info->first_altivec_reg_save))))
28304 && save_reg_p (i))
28305 {
28306 rtx reg = gen_rtx_REG (V4SImode, i);
28307 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28308 }
28309 }
28310
28311 /* Restore VRSAVE if we must do so before adjusting the stack. */
28312 if (info->vrsave_size != 0
28313 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28314 || (DEFAULT_ABI != ABI_V4
28315 && offset_below_red_zone_p (info->vrsave_save_offset))))
28316 {
28317 rtx reg;
28318
28319 if (frame_reg_rtx == sp_reg_rtx)
28320 {
28321 if (use_backchain_to_restore_sp)
28322 {
28323 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28324 emit_move_insn (frame_reg_rtx,
28325 gen_rtx_MEM (Pmode, sp_reg_rtx));
28326 frame_off = 0;
28327 }
28328 else if (frame_pointer_needed)
28329 frame_reg_rtx = hard_frame_pointer_rtx;
28330 }
28331
28332 reg = gen_rtx_REG (SImode, 12);
28333 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28334 info->vrsave_save_offset + frame_off));
28335
28336 emit_insn (generate_set_vrsave (reg, info, 1));
28337 }
28338
28339 insn = NULL_RTX;
28340 /* If we have a large stack frame, restore the old stack pointer
28341 using the backchain. */
28342 if (use_backchain_to_restore_sp)
28343 {
28344 if (frame_reg_rtx == sp_reg_rtx)
28345 {
28346 /* Under V.4, don't reset the stack pointer until after we're done
28347 loading the saved registers. */
28348 if (DEFAULT_ABI == ABI_V4)
28349 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28350
28351 insn = emit_move_insn (frame_reg_rtx,
28352 gen_rtx_MEM (Pmode, sp_reg_rtx));
28353 frame_off = 0;
28354 }
28355 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28356 && DEFAULT_ABI == ABI_V4)
28357 /* frame_reg_rtx has been set up by the altivec restore. */
28358 ;
28359 else
28360 {
28361 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28362 frame_reg_rtx = sp_reg_rtx;
28363 }
28364 }
28365 /* If we have a frame pointer, we can restore the old stack pointer
28366 from it. */
28367 else if (frame_pointer_needed)
28368 {
28369 frame_reg_rtx = sp_reg_rtx;
28370 if (DEFAULT_ABI == ABI_V4)
28371 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28372 /* Prevent reordering memory accesses against stack pointer restore. */
28373 else if (cfun->calls_alloca
28374 || offset_below_red_zone_p (-info->total_size))
28375 rs6000_emit_stack_tie (frame_reg_rtx, true);
28376
28377 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28378 GEN_INT (info->total_size)));
28379 frame_off = 0;
28380 }
28381 else if (info->push_p
28382 && DEFAULT_ABI != ABI_V4
28383 && !crtl->calls_eh_return)
28384 {
28385 /* Prevent reordering memory accesses against stack pointer restore. */
28386 if (cfun->calls_alloca
28387 || offset_below_red_zone_p (-info->total_size))
28388 rs6000_emit_stack_tie (frame_reg_rtx, false);
28389 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28390 GEN_INT (info->total_size)));
28391 frame_off = 0;
28392 }
28393 if (insn && frame_reg_rtx == sp_reg_rtx)
28394 {
28395 if (cfa_restores)
28396 {
28397 REG_NOTES (insn) = cfa_restores;
28398 cfa_restores = NULL_RTX;
28399 }
28400 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28401 RTX_FRAME_RELATED_P (insn) = 1;
28402 }
28403
28404 /* Restore AltiVec registers if we have not done so already. */
28405 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28406 && info->altivec_size != 0
28407 && (DEFAULT_ABI == ABI_V4
28408 || !offset_below_red_zone_p (info->altivec_save_offset)))
28409 {
28410 int i;
28411
28412 if ((strategy & REST_INLINE_VRS) == 0)
28413 {
28414 int end_save = info->altivec_save_offset + info->altivec_size;
28415 int ptr_off;
28416 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28417 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28418 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28419
28420 if (end_save + frame_off != 0)
28421 {
28422 rtx offset = GEN_INT (end_save + frame_off);
28423
28424 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28425 }
28426 else
28427 emit_move_insn (ptr_reg, frame_reg_rtx);
28428
28429 ptr_off = -end_save;
28430 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28431 info->altivec_save_offset + ptr_off,
28432 0, V4SImode, SAVRES_VR);
28433 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28434 {
28435 /* Frame reg was clobbered by out-of-line save. Restore it
28436 from ptr_reg, and if we are calling out-of-line gpr or
28437 fpr restore set up the correct pointer and offset. */
28438 unsigned newptr_regno = 1;
28439 if (!restoring_GPRs_inline)
28440 {
28441 bool lr = info->gp_save_offset + info->gp_size == 0;
28442 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28443 newptr_regno = ptr_regno_for_savres (sel);
28444 end_save = info->gp_save_offset + info->gp_size;
28445 }
28446 else if (!restoring_FPRs_inline)
28447 {
28448 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28449 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28450 newptr_regno = ptr_regno_for_savres (sel);
28451 end_save = info->fp_save_offset + info->fp_size;
28452 }
28453
28454 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28455 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28456
28457 if (end_save + ptr_off != 0)
28458 {
28459 rtx offset = GEN_INT (end_save + ptr_off);
28460
28461 frame_off = -end_save;
28462 if (TARGET_32BIT)
28463 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28464 ptr_reg, offset));
28465 else
28466 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28467 ptr_reg, offset));
28468 }
28469 else
28470 {
28471 frame_off = ptr_off;
28472 emit_move_insn (frame_reg_rtx, ptr_reg);
28473 }
28474 }
28475 }
28476 else
28477 {
28478 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28479 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28480 {
28481 rtx addr, areg, mem, insn;
28482 rtx reg = gen_rtx_REG (V4SImode, i);
28483 HOST_WIDE_INT offset
28484 = (info->altivec_save_offset + frame_off
28485 + 16 * (i - info->first_altivec_reg_save));
28486
28487 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28488 {
28489 mem = gen_frame_mem (V4SImode,
28490 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28491 GEN_INT (offset)));
28492 insn = gen_rtx_SET (reg, mem);
28493 }
28494 else
28495 {
28496 areg = gen_rtx_REG (Pmode, 0);
28497 emit_move_insn (areg, GEN_INT (offset));
28498
28499 /* AltiVec addressing mode is [reg+reg]. */
28500 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28501 mem = gen_frame_mem (V4SImode, addr);
28502
28503 /* Rather than emitting a generic move, force use of the
28504 lvx instruction, which we always want. In particular we
28505 don't want lxvd2x/xxpermdi for little endian. */
28506 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28507 }
28508
28509 (void) emit_insn (insn);
28510 }
28511 }
28512
28513 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28514 if (((strategy & REST_INLINE_VRS) == 0
28515 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28516 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28517 && save_reg_p (i))
28518 {
28519 rtx reg = gen_rtx_REG (V4SImode, i);
28520 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28521 }
28522 }
28523
28524 /* Restore VRSAVE if we have not done so already. */
28525 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28526 && info->vrsave_size != 0
28527 && (DEFAULT_ABI == ABI_V4
28528 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28529 {
28530 rtx reg;
28531
28532 reg = gen_rtx_REG (SImode, 12);
28533 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28534 info->vrsave_save_offset + frame_off));
28535
28536 emit_insn (generate_set_vrsave (reg, info, 1));
28537 }
28538
28539 /* If we exit by an out-of-line restore function on ABI_V4 then that
28540 function will deallocate the stack, so we don't need to worry
28541 about the unwinder restoring cr from an invalid stack frame
28542 location. */
28543 exit_func = (!restoring_FPRs_inline
28544 || (!restoring_GPRs_inline
28545 && info->first_fp_reg_save == 64));
28546
28547 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28548 *separate* slots if the routine calls __builtin_eh_return, so
28549 that they can be independently restored by the unwinder. */
28550 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28551 {
28552 int i, cr_off = info->ehcr_offset;
28553
28554 for (i = 0; i < 8; i++)
28555 if (!call_used_regs[CR0_REGNO + i])
28556 {
28557 rtx reg = gen_rtx_REG (SImode, 0);
28558 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28559 cr_off + frame_off));
28560
28561 insn = emit_insn (gen_movsi_to_cr_one
28562 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28563
28564 if (!exit_func && flag_shrink_wrap)
28565 {
28566 add_reg_note (insn, REG_CFA_RESTORE,
28567 gen_rtx_REG (SImode, CR0_REGNO + i));
28568
28569 RTX_FRAME_RELATED_P (insn) = 1;
28570 }
28571
28572 cr_off += reg_size;
28573 }
28574 }
28575
28576 /* Get the old lr if we saved it. If we are restoring registers
28577 out-of-line, then the out-of-line routines can do this for us. */
28578 if (restore_lr && restoring_GPRs_inline)
28579 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28580
28581 /* Get the old cr if we saved it. */
28582 if (info->cr_save_p)
28583 {
28584 unsigned cr_save_regno = 12;
28585
28586 if (!restoring_GPRs_inline)
28587 {
28588 /* Ensure we don't use the register used by the out-of-line
28589 gpr register restore below. */
28590 bool lr = info->gp_save_offset + info->gp_size == 0;
28591 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28592 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28593
28594 if (gpr_ptr_regno == 12)
28595 cr_save_regno = 11;
28596 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28597 }
28598 else if (REGNO (frame_reg_rtx) == 12)
28599 cr_save_regno = 11;
28600
28601 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28602 info->cr_save_offset + frame_off,
28603 exit_func);
28604 }
28605
28606 /* Set LR here to try to overlap restores below. */
28607 if (restore_lr && restoring_GPRs_inline)
28608 restore_saved_lr (0, exit_func);
28609
28610 /* Load exception handler data registers, if needed. */
28611 if (crtl->calls_eh_return)
28612 {
28613 unsigned int i, regno;
28614
28615 if (TARGET_AIX)
28616 {
28617 rtx reg = gen_rtx_REG (reg_mode, 2);
28618 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28619 frame_off + RS6000_TOC_SAVE_SLOT));
28620 }
28621
28622 for (i = 0; ; ++i)
28623 {
28624 rtx mem;
28625
28626 regno = EH_RETURN_DATA_REGNO (i);
28627 if (regno == INVALID_REGNUM)
28628 break;
28629
28630 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28631 info->ehrd_offset + frame_off
28632 + reg_size * (int) i);
28633
28634 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28635 }
28636 }
28637
28638 /* Restore GPRs. This is done as a PARALLEL if we are using
28639 the load-multiple instructions. */
28640 if (!restoring_GPRs_inline)
28641 {
28642 /* We are jumping to an out-of-line function. */
28643 rtx ptr_reg;
28644 int end_save = info->gp_save_offset + info->gp_size;
28645 bool can_use_exit = end_save == 0;
28646 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28647 int ptr_off;
28648
28649 /* Emit stack reset code if we need it. */
28650 ptr_regno = ptr_regno_for_savres (sel);
28651 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28652 if (can_use_exit)
28653 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28654 else if (end_save + frame_off != 0)
28655 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28656 GEN_INT (end_save + frame_off)));
28657 else if (REGNO (frame_reg_rtx) != ptr_regno)
28658 emit_move_insn (ptr_reg, frame_reg_rtx);
28659 if (REGNO (frame_reg_rtx) == ptr_regno)
28660 frame_off = -end_save;
28661
28662 if (can_use_exit && info->cr_save_p)
28663 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28664
28665 ptr_off = -end_save;
28666 rs6000_emit_savres_rtx (info, ptr_reg,
28667 info->gp_save_offset + ptr_off,
28668 info->lr_save_offset + ptr_off,
28669 reg_mode, sel);
28670 }
28671 else if (using_load_multiple)
28672 {
28673 rtvec p;
28674 p = rtvec_alloc (32 - info->first_gp_reg_save);
28675 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28676 RTVEC_ELT (p, i)
28677 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28678 frame_reg_rtx,
28679 info->gp_save_offset + frame_off + reg_size * i);
28680 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28681 }
28682 else
28683 {
28684 int offset = info->gp_save_offset + frame_off;
28685 for (i = info->first_gp_reg_save; i < 32; i++)
28686 {
28687 if (save_reg_p (i)
28688 && !cfun->machine->gpr_is_wrapped_separately[i])
28689 {
28690 rtx reg = gen_rtx_REG (reg_mode, i);
28691 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28692 }
28693
28694 offset += reg_size;
28695 }
28696 }
28697
28698 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28699 {
28700 /* If the frame pointer was used then we can't delay emitting
28701 a REG_CFA_DEF_CFA note. This must happen on the insn that
28702 restores the frame pointer, r31. We may have already emitted
28703 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28704 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28705 be harmless if emitted. */
28706 if (frame_pointer_needed)
28707 {
28708 insn = get_last_insn ();
28709 add_reg_note (insn, REG_CFA_DEF_CFA,
28710 plus_constant (Pmode, frame_reg_rtx, frame_off));
28711 RTX_FRAME_RELATED_P (insn) = 1;
28712 }
28713
28714 /* Set up cfa_restores. We always need these when
28715 shrink-wrapping. If not shrink-wrapping then we only need
28716 the cfa_restore when the stack location is no longer valid.
28717 The cfa_restores must be emitted on or before the insn that
28718 invalidates the stack, and of course must not be emitted
28719 before the insn that actually does the restore. The latter
28720 is why it is a bad idea to emit the cfa_restores as a group
28721 on the last instruction here that actually does a restore:
28722 That insn may be reordered with respect to others doing
28723 restores. */
28724 if (flag_shrink_wrap
28725 && !restoring_GPRs_inline
28726 && info->first_fp_reg_save == 64)
28727 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28728
28729 for (i = info->first_gp_reg_save; i < 32; i++)
28730 if (save_reg_p (i)
28731 && !cfun->machine->gpr_is_wrapped_separately[i])
28732 {
28733 rtx reg = gen_rtx_REG (reg_mode, i);
28734 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28735 }
28736 }
28737
28738 if (!restoring_GPRs_inline
28739 && info->first_fp_reg_save == 64)
28740 {
28741 /* We are jumping to an out-of-line function. */
28742 if (cfa_restores)
28743 emit_cfa_restores (cfa_restores);
28744 return;
28745 }
28746
28747 if (restore_lr && !restoring_GPRs_inline)
28748 {
28749 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28750 restore_saved_lr (0, exit_func);
28751 }
28752
28753 /* Restore fpr's if we need to do it without calling a function. */
28754 if (restoring_FPRs_inline)
28755 {
28756 int offset = info->fp_save_offset + frame_off;
28757 for (i = info->first_fp_reg_save; i < 64; i++)
28758 {
28759 if (save_reg_p (i)
28760 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28761 {
28762 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28763 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28764 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28765 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28766 cfa_restores);
28767 }
28768
28769 offset += fp_reg_size;
28770 }
28771 }
28772
28773 /* If we saved cr, restore it here. Just those that were used. */
28774 if (info->cr_save_p)
28775 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28776
28777 /* If this is V.4, unwind the stack pointer after all of the loads
28778 have been done, or set up r11 if we are restoring fp out of line. */
28779 ptr_regno = 1;
28780 if (!restoring_FPRs_inline)
28781 {
28782 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28783 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28784 ptr_regno = ptr_regno_for_savres (sel);
28785 }
28786
28787 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28788 if (REGNO (frame_reg_rtx) == ptr_regno)
28789 frame_off = 0;
28790
28791 if (insn && restoring_FPRs_inline)
28792 {
28793 if (cfa_restores)
28794 {
28795 REG_NOTES (insn) = cfa_restores;
28796 cfa_restores = NULL_RTX;
28797 }
28798 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28799 RTX_FRAME_RELATED_P (insn) = 1;
28800 }
28801
28802 if (crtl->calls_eh_return)
28803 {
28804 rtx sa = EH_RETURN_STACKADJ_RTX;
28805 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28806 }
28807
28808 if (!sibcall && restoring_FPRs_inline)
28809 {
28810 if (cfa_restores)
28811 {
28812 /* We can't hang the cfa_restores off a simple return,
28813 since the shrink-wrap code sometimes uses an existing
28814 return. This means there might be a path from
28815 pre-prologue code to this return, and dwarf2cfi code
28816 wants the eh_frame unwinder state to be the same on
28817 all paths to any point. So we need to emit the
28818 cfa_restores before the return. For -m64 we really
28819 don't need epilogue cfa_restores at all, except for
28820 this irritating dwarf2cfi with shrink-wrap
28821 requirement; The stack red-zone means eh_frame info
28822 from the prologue telling the unwinder to restore
28823 from the stack is perfectly good right to the end of
28824 the function. */
28825 emit_insn (gen_blockage ());
28826 emit_cfa_restores (cfa_restores);
28827 cfa_restores = NULL_RTX;
28828 }
28829
28830 emit_jump_insn (targetm.gen_simple_return ());
28831 }
28832
28833 if (!sibcall && !restoring_FPRs_inline)
28834 {
28835 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28836 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28837 int elt = 0;
28838 RTVEC_ELT (p, elt++) = ret_rtx;
28839 if (lr)
28840 RTVEC_ELT (p, elt++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
28841
28842 /* We have to restore more than two FP registers, so branch to the
28843 restore function. It will return to our caller. */
28844 int i;
28845 int reg;
28846 rtx sym;
28847
28848 if (flag_shrink_wrap)
28849 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28850
28851 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28852 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28853 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28854 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28855
28856 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28857 {
28858 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28859
28860 RTVEC_ELT (p, elt++)
28861 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28862 if (flag_shrink_wrap
28863 && save_reg_p (info->first_fp_reg_save + i))
28864 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28865 }
28866
28867 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28868 }
28869
28870 if (cfa_restores)
28871 {
28872 if (sibcall)
28873 /* Ensure the cfa_restores are hung off an insn that won't
28874 be reordered above other restores. */
28875 emit_insn (gen_blockage ());
28876
28877 emit_cfa_restores (cfa_restores);
28878 }
28879 }
28880
28881 /* Write function epilogue. */
28882
28883 static void
28884 rs6000_output_function_epilogue (FILE *file)
28885 {
28886 #if TARGET_MACHO
28887 macho_branch_islands ();
28888
28889 {
28890 rtx_insn *insn = get_last_insn ();
28891 rtx_insn *deleted_debug_label = NULL;
28892
28893 /* Mach-O doesn't support labels at the end of objects, so if
28894 it looks like we might want one, take special action.
28895
28896 First, collect any sequence of deleted debug labels. */
28897 while (insn
28898 && NOTE_P (insn)
28899 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28900 {
28901 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28902 notes only, instead set their CODE_LABEL_NUMBER to -1,
28903 otherwise there would be code generation differences
28904 in between -g and -g0. */
28905 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28906 deleted_debug_label = insn;
28907 insn = PREV_INSN (insn);
28908 }
28909
28910 /* Second, if we have:
28911 label:
28912 barrier
28913 then this needs to be detected, so skip past the barrier. */
28914
28915 if (insn && BARRIER_P (insn))
28916 insn = PREV_INSN (insn);
28917
28918 /* Up to now we've only seen notes or barriers. */
28919 if (insn)
28920 {
28921 if (LABEL_P (insn)
28922 || (NOTE_P (insn)
28923 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28924 /* Trailing label: <barrier>. */
28925 fputs ("\tnop\n", file);
28926 else
28927 {
28928 /* Lastly, see if we have a completely empty function body. */
28929 while (insn && ! INSN_P (insn))
28930 insn = PREV_INSN (insn);
28931 /* If we don't find any insns, we've got an empty function body;
28932 I.e. completely empty - without a return or branch. This is
28933 taken as the case where a function body has been removed
28934 because it contains an inline __builtin_unreachable(). GCC
28935 states that reaching __builtin_unreachable() means UB so we're
28936 not obliged to do anything special; however, we want
28937 non-zero-sized function bodies. To meet this, and help the
28938 user out, let's trap the case. */
28939 if (insn == NULL)
28940 fputs ("\ttrap\n", file);
28941 }
28942 }
28943 else if (deleted_debug_label)
28944 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28945 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28946 CODE_LABEL_NUMBER (insn) = -1;
28947 }
28948 #endif
28949
28950 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28951 on its format.
28952
28953 We don't output a traceback table if -finhibit-size-directive was
28954 used. The documentation for -finhibit-size-directive reads
28955 ``don't output a @code{.size} assembler directive, or anything
28956 else that would cause trouble if the function is split in the
28957 middle, and the two halves are placed at locations far apart in
28958 memory.'' The traceback table has this property, since it
28959 includes the offset from the start of the function to the
28960 traceback table itself.
28961
28962 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28963 different traceback table. */
28964 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28965 && ! flag_inhibit_size_directive
28966 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28967 {
28968 const char *fname = NULL;
28969 const char *language_string = lang_hooks.name;
28970 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28971 int i;
28972 int optional_tbtab;
28973 rs6000_stack_t *info = rs6000_stack_info ();
28974
28975 if (rs6000_traceback == traceback_full)
28976 optional_tbtab = 1;
28977 else if (rs6000_traceback == traceback_part)
28978 optional_tbtab = 0;
28979 else
28980 optional_tbtab = !optimize_size && !TARGET_ELF;
28981
28982 if (optional_tbtab)
28983 {
28984 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28985 while (*fname == '.') /* V.4 encodes . in the name */
28986 fname++;
28987
28988 /* Need label immediately before tbtab, so we can compute
28989 its offset from the function start. */
28990 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28991 ASM_OUTPUT_LABEL (file, fname);
28992 }
28993
28994 /* The .tbtab pseudo-op can only be used for the first eight
28995 expressions, since it can't handle the possibly variable
28996 length fields that follow. However, if you omit the optional
28997 fields, the assembler outputs zeros for all optional fields
28998 anyways, giving each variable length field is minimum length
28999 (as defined in sys/debug.h). Thus we cannot use the .tbtab
29000 pseudo-op at all. */
29001
29002 /* An all-zero word flags the start of the tbtab, for debuggers
29003 that have to find it by searching forward from the entry
29004 point or from the current pc. */
29005 fputs ("\t.long 0\n", file);
29006
29007 /* Tbtab format type. Use format type 0. */
29008 fputs ("\t.byte 0,", file);
29009
29010 /* Language type. Unfortunately, there does not seem to be any
29011 official way to discover the language being compiled, so we
29012 use language_string.
29013 C is 0. Fortran is 1. Ada is 3. C++ is 9.
29014 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29015 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
29016 numbers either, so for now use 0. */
29017 if (lang_GNU_C ()
29018 || ! strcmp (language_string, "GNU GIMPLE")
29019 || ! strcmp (language_string, "GNU Go")
29020 || ! strcmp (language_string, "GNU D")
29021 || ! strcmp (language_string, "libgccjit"))
29022 i = 0;
29023 else if (! strcmp (language_string, "GNU F77")
29024 || lang_GNU_Fortran ())
29025 i = 1;
29026 else if (! strcmp (language_string, "GNU Ada"))
29027 i = 3;
29028 else if (lang_GNU_CXX ()
29029 || ! strcmp (language_string, "GNU Objective-C++"))
29030 i = 9;
29031 else if (! strcmp (language_string, "GNU Java"))
29032 i = 13;
29033 else if (! strcmp (language_string, "GNU Objective-C"))
29034 i = 14;
29035 else
29036 gcc_unreachable ();
29037 fprintf (file, "%d,", i);
29038
29039 /* 8 single bit fields: global linkage (not set for C extern linkage,
29040 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29041 from start of procedure stored in tbtab, internal function, function
29042 has controlled storage, function has no toc, function uses fp,
29043 function logs/aborts fp operations. */
29044 /* Assume that fp operations are used if any fp reg must be saved. */
29045 fprintf (file, "%d,",
29046 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
29047
29048 /* 6 bitfields: function is interrupt handler, name present in
29049 proc table, function calls alloca, on condition directives
29050 (controls stack walks, 3 bits), saves condition reg, saves
29051 link reg. */
29052 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29053 set up as a frame pointer, even when there is no alloca call. */
29054 fprintf (file, "%d,",
29055 ((optional_tbtab << 6)
29056 | ((optional_tbtab & frame_pointer_needed) << 5)
29057 | (info->cr_save_p << 1)
29058 | (info->lr_save_p)));
29059
29060 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29061 (6 bits). */
29062 fprintf (file, "%d,",
29063 (info->push_p << 7) | (64 - info->first_fp_reg_save));
29064
29065 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29066 fprintf (file, "%d,", (32 - first_reg_to_save ()));
29067
29068 if (optional_tbtab)
29069 {
29070 /* Compute the parameter info from the function decl argument
29071 list. */
29072 tree decl;
29073 int next_parm_info_bit = 31;
29074
29075 for (decl = DECL_ARGUMENTS (current_function_decl);
29076 decl; decl = DECL_CHAIN (decl))
29077 {
29078 rtx parameter = DECL_INCOMING_RTL (decl);
29079 machine_mode mode = GET_MODE (parameter);
29080
29081 if (REG_P (parameter))
29082 {
29083 if (SCALAR_FLOAT_MODE_P (mode))
29084 {
29085 int bits;
29086
29087 float_parms++;
29088
29089 switch (mode)
29090 {
29091 case E_SFmode:
29092 case E_SDmode:
29093 bits = 0x2;
29094 break;
29095
29096 case E_DFmode:
29097 case E_DDmode:
29098 case E_TFmode:
29099 case E_TDmode:
29100 case E_IFmode:
29101 case E_KFmode:
29102 bits = 0x3;
29103 break;
29104
29105 default:
29106 gcc_unreachable ();
29107 }
29108
29109 /* If only one bit will fit, don't or in this entry. */
29110 if (next_parm_info_bit > 0)
29111 parm_info |= (bits << (next_parm_info_bit - 1));
29112 next_parm_info_bit -= 2;
29113 }
29114 else
29115 {
29116 fixed_parms += ((GET_MODE_SIZE (mode)
29117 + (UNITS_PER_WORD - 1))
29118 / UNITS_PER_WORD);
29119 next_parm_info_bit -= 1;
29120 }
29121 }
29122 }
29123 }
29124
29125 /* Number of fixed point parameters. */
29126 /* This is actually the number of words of fixed point parameters; thus
29127 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29128 fprintf (file, "%d,", fixed_parms);
29129
29130 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29131 all on stack. */
29132 /* This is actually the number of fp registers that hold parameters;
29133 and thus the maximum value is 13. */
29134 /* Set parameters on stack bit if parameters are not in their original
29135 registers, regardless of whether they are on the stack? Xlc
29136 seems to set the bit when not optimizing. */
29137 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
29138
29139 if (optional_tbtab)
29140 {
29141 /* Optional fields follow. Some are variable length. */
29142
29143 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29144 float, 11 double float. */
29145 /* There is an entry for each parameter in a register, in the order
29146 that they occur in the parameter list. Any intervening arguments
29147 on the stack are ignored. If the list overflows a long (max
29148 possible length 34 bits) then completely leave off all elements
29149 that don't fit. */
29150 /* Only emit this long if there was at least one parameter. */
29151 if (fixed_parms || float_parms)
29152 fprintf (file, "\t.long %d\n", parm_info);
29153
29154 /* Offset from start of code to tb table. */
29155 fputs ("\t.long ", file);
29156 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29157 RS6000_OUTPUT_BASENAME (file, fname);
29158 putc ('-', file);
29159 rs6000_output_function_entry (file, fname);
29160 putc ('\n', file);
29161
29162 /* Interrupt handler mask. */
29163 /* Omit this long, since we never set the interrupt handler bit
29164 above. */
29165
29166 /* Number of CTL (controlled storage) anchors. */
29167 /* Omit this long, since the has_ctl bit is never set above. */
29168
29169 /* Displacement into stack of each CTL anchor. */
29170 /* Omit this list of longs, because there are no CTL anchors. */
29171
29172 /* Length of function name. */
29173 if (*fname == '*')
29174 ++fname;
29175 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29176
29177 /* Function name. */
29178 assemble_string (fname, strlen (fname));
29179
29180 /* Register for alloca automatic storage; this is always reg 31.
29181 Only emit this if the alloca bit was set above. */
29182 if (frame_pointer_needed)
29183 fputs ("\t.byte 31\n", file);
29184
29185 fputs ("\t.align 2\n", file);
29186 }
29187 }
29188
29189 /* Arrange to define .LCTOC1 label, if not already done. */
29190 if (need_toc_init)
29191 {
29192 need_toc_init = 0;
29193 if (!toc_initialized)
29194 {
29195 switch_to_section (toc_section);
29196 switch_to_section (current_function_section ());
29197 }
29198 }
29199 }
29200
29201 /* -fsplit-stack support. */
29202
29203 /* A SYMBOL_REF for __morestack. */
29204 static GTY(()) rtx morestack_ref;
29205
29206 static rtx
29207 gen_add3_const (rtx rt, rtx ra, long c)
29208 {
29209 if (TARGET_64BIT)
29210 return gen_adddi3 (rt, ra, GEN_INT (c));
29211 else
29212 return gen_addsi3 (rt, ra, GEN_INT (c));
29213 }
29214
29215 /* Emit -fsplit-stack prologue, which goes before the regular function
29216 prologue (at local entry point in the case of ELFv2). */
29217
29218 void
29219 rs6000_expand_split_stack_prologue (void)
29220 {
29221 rs6000_stack_t *info = rs6000_stack_info ();
29222 unsigned HOST_WIDE_INT allocate;
29223 long alloc_hi, alloc_lo;
29224 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29225 rtx_insn *insn;
29226
29227 gcc_assert (flag_split_stack && reload_completed);
29228
29229 if (!info->push_p)
29230 return;
29231
29232 if (global_regs[29])
29233 {
29234 error ("%qs uses register r29", "-fsplit-stack");
29235 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29236 "conflicts with %qD", global_regs_decl[29]);
29237 }
29238
29239 allocate = info->total_size;
29240 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29241 {
29242 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
29243 return;
29244 }
29245 if (morestack_ref == NULL_RTX)
29246 {
29247 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29248 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29249 | SYMBOL_FLAG_FUNCTION);
29250 }
29251
29252 r0 = gen_rtx_REG (Pmode, 0);
29253 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29254 r12 = gen_rtx_REG (Pmode, 12);
29255 emit_insn (gen_load_split_stack_limit (r0));
29256 /* Always emit two insns here to calculate the requested stack,
29257 so that the linker can edit them when adjusting size for calling
29258 non-split-stack code. */
29259 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29260 alloc_lo = -allocate - alloc_hi;
29261 if (alloc_hi != 0)
29262 {
29263 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29264 if (alloc_lo != 0)
29265 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29266 else
29267 emit_insn (gen_nop ());
29268 }
29269 else
29270 {
29271 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29272 emit_insn (gen_nop ());
29273 }
29274
29275 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29276 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29277 ok_label = gen_label_rtx ();
29278 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29279 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29280 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29281 pc_rtx);
29282 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29283 JUMP_LABEL (insn) = ok_label;
29284 /* Mark the jump as very likely to be taken. */
29285 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29286
29287 lr = gen_rtx_REG (Pmode, LR_REGNO);
29288 insn = emit_move_insn (r0, lr);
29289 RTX_FRAME_RELATED_P (insn) = 1;
29290 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29291 RTX_FRAME_RELATED_P (insn) = 1;
29292
29293 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29294 const0_rtx, const0_rtx));
29295 call_fusage = NULL_RTX;
29296 use_reg (&call_fusage, r12);
29297 /* Say the call uses r0, even though it doesn't, to stop regrename
29298 from twiddling with the insns saving lr, trashing args for cfun.
29299 The insns restoring lr are similarly protected by making
29300 split_stack_return use r0. */
29301 use_reg (&call_fusage, r0);
29302 add_function_usage_to (insn, call_fusage);
29303 /* Indicate that this function can't jump to non-local gotos. */
29304 make_reg_eh_region_note_nothrow_nononlocal (insn);
29305 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29306 insn = emit_move_insn (lr, r0);
29307 add_reg_note (insn, REG_CFA_RESTORE, lr);
29308 RTX_FRAME_RELATED_P (insn) = 1;
29309 emit_insn (gen_split_stack_return ());
29310
29311 emit_label (ok_label);
29312 LABEL_NUSES (ok_label) = 1;
29313 }
29314
29315 /* Return the internal arg pointer used for function incoming
29316 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29317 to copy it to a pseudo in order for it to be preserved over calls
29318 and suchlike. We'd really like to use a pseudo here for the
29319 internal arg pointer but data-flow analysis is not prepared to
29320 accept pseudos as live at the beginning of a function. */
29321
29322 static rtx
29323 rs6000_internal_arg_pointer (void)
29324 {
29325 if (flag_split_stack
29326 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29327 == NULL))
29328
29329 {
29330 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29331 {
29332 rtx pat;
29333
29334 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29335 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29336
29337 /* Put the pseudo initialization right after the note at the
29338 beginning of the function. */
29339 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29340 gen_rtx_REG (Pmode, 12));
29341 push_topmost_sequence ();
29342 emit_insn_after (pat, get_insns ());
29343 pop_topmost_sequence ();
29344 }
29345 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29346 FIRST_PARM_OFFSET (current_function_decl));
29347 return copy_to_reg (ret);
29348 }
29349 return virtual_incoming_args_rtx;
29350 }
29351
29352 /* We may have to tell the dataflow pass that the split stack prologue
29353 is initializing a register. */
29354
29355 static void
29356 rs6000_live_on_entry (bitmap regs)
29357 {
29358 if (flag_split_stack)
29359 bitmap_set_bit (regs, 12);
29360 }
29361
29362 /* Emit -fsplit-stack dynamic stack allocation space check. */
29363
29364 void
29365 rs6000_split_stack_space_check (rtx size, rtx label)
29366 {
29367 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29368 rtx limit = gen_reg_rtx (Pmode);
29369 rtx requested = gen_reg_rtx (Pmode);
29370 rtx cmp = gen_reg_rtx (CCUNSmode);
29371 rtx jump;
29372
29373 emit_insn (gen_load_split_stack_limit (limit));
29374 if (CONST_INT_P (size))
29375 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29376 else
29377 {
29378 size = force_reg (Pmode, size);
29379 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29380 }
29381 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29382 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29383 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29384 gen_rtx_LABEL_REF (VOIDmode, label),
29385 pc_rtx);
29386 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29387 JUMP_LABEL (jump) = label;
29388 }
29389 \f
29390 /* A C compound statement that outputs the assembler code for a thunk
29391 function, used to implement C++ virtual function calls with
29392 multiple inheritance. The thunk acts as a wrapper around a virtual
29393 function, adjusting the implicit object parameter before handing
29394 control off to the real function.
29395
29396 First, emit code to add the integer DELTA to the location that
29397 contains the incoming first argument. Assume that this argument
29398 contains a pointer, and is the one used to pass the `this' pointer
29399 in C++. This is the incoming argument *before* the function
29400 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29401 values of all other incoming arguments.
29402
29403 After the addition, emit code to jump to FUNCTION, which is a
29404 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29405 not touch the return address. Hence returning from FUNCTION will
29406 return to whoever called the current `thunk'.
29407
29408 The effect must be as if FUNCTION had been called directly with the
29409 adjusted first argument. This macro is responsible for emitting
29410 all of the code for a thunk function; output_function_prologue()
29411 and output_function_epilogue() are not invoked.
29412
29413 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29414 been extracted from it.) It might possibly be useful on some
29415 targets, but probably not.
29416
29417 If you do not define this macro, the target-independent code in the
29418 C++ frontend will generate a less efficient heavyweight thunk that
29419 calls FUNCTION instead of jumping to it. The generic approach does
29420 not support varargs. */
29421
29422 static void
29423 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29424 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29425 tree function)
29426 {
29427 rtx this_rtx, funexp;
29428 rtx_insn *insn;
29429
29430 reload_completed = 1;
29431 epilogue_completed = 1;
29432
29433 /* Mark the end of the (empty) prologue. */
29434 emit_note (NOTE_INSN_PROLOGUE_END);
29435
29436 /* Find the "this" pointer. If the function returns a structure,
29437 the structure return pointer is in r3. */
29438 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29439 this_rtx = gen_rtx_REG (Pmode, 4);
29440 else
29441 this_rtx = gen_rtx_REG (Pmode, 3);
29442
29443 /* Apply the constant offset, if required. */
29444 if (delta)
29445 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29446
29447 /* Apply the offset from the vtable, if required. */
29448 if (vcall_offset)
29449 {
29450 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29451 rtx tmp = gen_rtx_REG (Pmode, 12);
29452
29453 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29454 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29455 {
29456 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29457 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29458 }
29459 else
29460 {
29461 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29462
29463 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29464 }
29465 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29466 }
29467
29468 /* Generate a tail call to the target function. */
29469 if (!TREE_USED (function))
29470 {
29471 assemble_external (function);
29472 TREE_USED (function) = 1;
29473 }
29474 funexp = XEXP (DECL_RTL (function), 0);
29475 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29476
29477 #if TARGET_MACHO
29478 if (MACHOPIC_INDIRECT)
29479 funexp = machopic_indirect_call_target (funexp);
29480 #endif
29481
29482 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29483 generate sibcall RTL explicitly. */
29484 insn = emit_call_insn (
29485 gen_rtx_PARALLEL (VOIDmode,
29486 gen_rtvec (3,
29487 gen_rtx_CALL (VOIDmode,
29488 funexp, const0_rtx),
29489 gen_rtx_USE (VOIDmode, const0_rtx),
29490 simple_return_rtx)));
29491 SIBLING_CALL_P (insn) = 1;
29492 emit_barrier ();
29493
29494 /* Run just enough of rest_of_compilation to get the insns emitted.
29495 There's not really enough bulk here to make other passes such as
29496 instruction scheduling worth while. Note that use_thunk calls
29497 assemble_start_function and assemble_end_function. */
29498 insn = get_insns ();
29499 shorten_branches (insn);
29500 final_start_function (insn, file, 1);
29501 final (insn, file, 1);
29502 final_end_function ();
29503
29504 reload_completed = 0;
29505 epilogue_completed = 0;
29506 }
29507 \f
29508 /* A quick summary of the various types of 'constant-pool tables'
29509 under PowerPC:
29510
29511 Target Flags Name One table per
29512 AIX (none) AIX TOC object file
29513 AIX -mfull-toc AIX TOC object file
29514 AIX -mminimal-toc AIX minimal TOC translation unit
29515 SVR4/EABI (none) SVR4 SDATA object file
29516 SVR4/EABI -fpic SVR4 pic object file
29517 SVR4/EABI -fPIC SVR4 PIC translation unit
29518 SVR4/EABI -mrelocatable EABI TOC function
29519 SVR4/EABI -maix AIX TOC object file
29520 SVR4/EABI -maix -mminimal-toc
29521 AIX minimal TOC translation unit
29522
29523 Name Reg. Set by entries contains:
29524 made by addrs? fp? sum?
29525
29526 AIX TOC 2 crt0 as Y option option
29527 AIX minimal TOC 30 prolog gcc Y Y option
29528 SVR4 SDATA 13 crt0 gcc N Y N
29529 SVR4 pic 30 prolog ld Y not yet N
29530 SVR4 PIC 30 prolog gcc Y option option
29531 EABI TOC 30 prolog gcc Y option option
29532
29533 */
29534
29535 /* Hash functions for the hash table. */
29536
29537 static unsigned
29538 rs6000_hash_constant (rtx k)
29539 {
29540 enum rtx_code code = GET_CODE (k);
29541 machine_mode mode = GET_MODE (k);
29542 unsigned result = (code << 3) ^ mode;
29543 const char *format;
29544 int flen, fidx;
29545
29546 format = GET_RTX_FORMAT (code);
29547 flen = strlen (format);
29548 fidx = 0;
29549
29550 switch (code)
29551 {
29552 case LABEL_REF:
29553 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29554
29555 case CONST_WIDE_INT:
29556 {
29557 int i;
29558 flen = CONST_WIDE_INT_NUNITS (k);
29559 for (i = 0; i < flen; i++)
29560 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29561 return result;
29562 }
29563
29564 case CONST_DOUBLE:
29565 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29566
29567 case CODE_LABEL:
29568 fidx = 3;
29569 break;
29570
29571 default:
29572 break;
29573 }
29574
29575 for (; fidx < flen; fidx++)
29576 switch (format[fidx])
29577 {
29578 case 's':
29579 {
29580 unsigned i, len;
29581 const char *str = XSTR (k, fidx);
29582 len = strlen (str);
29583 result = result * 613 + len;
29584 for (i = 0; i < len; i++)
29585 result = result * 613 + (unsigned) str[i];
29586 break;
29587 }
29588 case 'u':
29589 case 'e':
29590 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29591 break;
29592 case 'i':
29593 case 'n':
29594 result = result * 613 + (unsigned) XINT (k, fidx);
29595 break;
29596 case 'w':
29597 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29598 result = result * 613 + (unsigned) XWINT (k, fidx);
29599 else
29600 {
29601 size_t i;
29602 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29603 result = result * 613 + (unsigned) (XWINT (k, fidx)
29604 >> CHAR_BIT * i);
29605 }
29606 break;
29607 case '0':
29608 break;
29609 default:
29610 gcc_unreachable ();
29611 }
29612
29613 return result;
29614 }
29615
29616 hashval_t
29617 toc_hasher::hash (toc_hash_struct *thc)
29618 {
29619 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29620 }
29621
29622 /* Compare H1 and H2 for equivalence. */
29623
29624 bool
29625 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29626 {
29627 rtx r1 = h1->key;
29628 rtx r2 = h2->key;
29629
29630 if (h1->key_mode != h2->key_mode)
29631 return 0;
29632
29633 return rtx_equal_p (r1, r2);
29634 }
29635
29636 /* These are the names given by the C++ front-end to vtables, and
29637 vtable-like objects. Ideally, this logic should not be here;
29638 instead, there should be some programmatic way of inquiring as
29639 to whether or not an object is a vtable. */
29640
29641 #define VTABLE_NAME_P(NAME) \
29642 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29643 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29644 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29645 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29646 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29647
29648 #ifdef NO_DOLLAR_IN_LABEL
29649 /* Return a GGC-allocated character string translating dollar signs in
29650 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29651
29652 const char *
29653 rs6000_xcoff_strip_dollar (const char *name)
29654 {
29655 char *strip, *p;
29656 const char *q;
29657 size_t len;
29658
29659 q = (const char *) strchr (name, '$');
29660
29661 if (q == 0 || q == name)
29662 return name;
29663
29664 len = strlen (name);
29665 strip = XALLOCAVEC (char, len + 1);
29666 strcpy (strip, name);
29667 p = strip + (q - name);
29668 while (p)
29669 {
29670 *p = '_';
29671 p = strchr (p + 1, '$');
29672 }
29673
29674 return ggc_alloc_string (strip, len);
29675 }
29676 #endif
29677
29678 void
29679 rs6000_output_symbol_ref (FILE *file, rtx x)
29680 {
29681 const char *name = XSTR (x, 0);
29682
29683 /* Currently C++ toc references to vtables can be emitted before it
29684 is decided whether the vtable is public or private. If this is
29685 the case, then the linker will eventually complain that there is
29686 a reference to an unknown section. Thus, for vtables only,
29687 we emit the TOC reference to reference the identifier and not the
29688 symbol. */
29689 if (VTABLE_NAME_P (name))
29690 {
29691 RS6000_OUTPUT_BASENAME (file, name);
29692 }
29693 else
29694 assemble_name (file, name);
29695 }
29696
29697 /* Output a TOC entry. We derive the entry name from what is being
29698 written. */
29699
29700 void
29701 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29702 {
29703 char buf[256];
29704 const char *name = buf;
29705 rtx base = x;
29706 HOST_WIDE_INT offset = 0;
29707
29708 gcc_assert (!TARGET_NO_TOC);
29709
29710 /* When the linker won't eliminate them, don't output duplicate
29711 TOC entries (this happens on AIX if there is any kind of TOC,
29712 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29713 CODE_LABELs. */
29714 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29715 {
29716 struct toc_hash_struct *h;
29717
29718 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29719 time because GGC is not initialized at that point. */
29720 if (toc_hash_table == NULL)
29721 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29722
29723 h = ggc_alloc<toc_hash_struct> ();
29724 h->key = x;
29725 h->key_mode = mode;
29726 h->labelno = labelno;
29727
29728 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29729 if (*found == NULL)
29730 *found = h;
29731 else /* This is indeed a duplicate.
29732 Set this label equal to that label. */
29733 {
29734 fputs ("\t.set ", file);
29735 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29736 fprintf (file, "%d,", labelno);
29737 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29738 fprintf (file, "%d\n", ((*found)->labelno));
29739
29740 #ifdef HAVE_AS_TLS
29741 if (TARGET_XCOFF && SYMBOL_REF_P (x)
29742 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29743 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29744 {
29745 fputs ("\t.set ", file);
29746 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29747 fprintf (file, "%d,", labelno);
29748 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29749 fprintf (file, "%d\n", ((*found)->labelno));
29750 }
29751 #endif
29752 return;
29753 }
29754 }
29755
29756 /* If we're going to put a double constant in the TOC, make sure it's
29757 aligned properly when strict alignment is on. */
29758 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29759 && STRICT_ALIGNMENT
29760 && GET_MODE_BITSIZE (mode) >= 64
29761 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29762 ASM_OUTPUT_ALIGN (file, 3);
29763 }
29764
29765 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29766
29767 /* Handle FP constants specially. Note that if we have a minimal
29768 TOC, things we put here aren't actually in the TOC, so we can allow
29769 FP constants. */
29770 if (CONST_DOUBLE_P (x)
29771 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29772 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29773 {
29774 long k[4];
29775
29776 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29777 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29778 else
29779 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29780
29781 if (TARGET_64BIT)
29782 {
29783 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29784 fputs (DOUBLE_INT_ASM_OP, file);
29785 else
29786 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29787 k[0] & 0xffffffff, k[1] & 0xffffffff,
29788 k[2] & 0xffffffff, k[3] & 0xffffffff);
29789 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29790 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29791 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29792 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29793 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29794 return;
29795 }
29796 else
29797 {
29798 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29799 fputs ("\t.long ", file);
29800 else
29801 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29802 k[0] & 0xffffffff, k[1] & 0xffffffff,
29803 k[2] & 0xffffffff, k[3] & 0xffffffff);
29804 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29805 k[0] & 0xffffffff, k[1] & 0xffffffff,
29806 k[2] & 0xffffffff, k[3] & 0xffffffff);
29807 return;
29808 }
29809 }
29810 else if (CONST_DOUBLE_P (x)
29811 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29812 {
29813 long k[2];
29814
29815 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29816 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29817 else
29818 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29819
29820 if (TARGET_64BIT)
29821 {
29822 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29823 fputs (DOUBLE_INT_ASM_OP, file);
29824 else
29825 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29826 k[0] & 0xffffffff, k[1] & 0xffffffff);
29827 fprintf (file, "0x%lx%08lx\n",
29828 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29829 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29830 return;
29831 }
29832 else
29833 {
29834 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29835 fputs ("\t.long ", file);
29836 else
29837 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29838 k[0] & 0xffffffff, k[1] & 0xffffffff);
29839 fprintf (file, "0x%lx,0x%lx\n",
29840 k[0] & 0xffffffff, k[1] & 0xffffffff);
29841 return;
29842 }
29843 }
29844 else if (CONST_DOUBLE_P (x)
29845 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29846 {
29847 long l;
29848
29849 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29850 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29851 else
29852 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29853
29854 if (TARGET_64BIT)
29855 {
29856 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29857 fputs (DOUBLE_INT_ASM_OP, file);
29858 else
29859 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29860 if (WORDS_BIG_ENDIAN)
29861 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29862 else
29863 fprintf (file, "0x%lx\n", l & 0xffffffff);
29864 return;
29865 }
29866 else
29867 {
29868 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29869 fputs ("\t.long ", file);
29870 else
29871 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29872 fprintf (file, "0x%lx\n", l & 0xffffffff);
29873 return;
29874 }
29875 }
29876 else if (GET_MODE (x) == VOIDmode && CONST_INT_P (x))
29877 {
29878 unsigned HOST_WIDE_INT low;
29879 HOST_WIDE_INT high;
29880
29881 low = INTVAL (x) & 0xffffffff;
29882 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29883
29884 /* TOC entries are always Pmode-sized, so when big-endian
29885 smaller integer constants in the TOC need to be padded.
29886 (This is still a win over putting the constants in
29887 a separate constant pool, because then we'd have
29888 to have both a TOC entry _and_ the actual constant.)
29889
29890 For a 32-bit target, CONST_INT values are loaded and shifted
29891 entirely within `low' and can be stored in one TOC entry. */
29892
29893 /* It would be easy to make this work, but it doesn't now. */
29894 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29895
29896 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29897 {
29898 low |= high << 32;
29899 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29900 high = (HOST_WIDE_INT) low >> 32;
29901 low &= 0xffffffff;
29902 }
29903
29904 if (TARGET_64BIT)
29905 {
29906 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29907 fputs (DOUBLE_INT_ASM_OP, file);
29908 else
29909 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29910 (long) high & 0xffffffff, (long) low & 0xffffffff);
29911 fprintf (file, "0x%lx%08lx\n",
29912 (long) high & 0xffffffff, (long) low & 0xffffffff);
29913 return;
29914 }
29915 else
29916 {
29917 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29918 {
29919 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29920 fputs ("\t.long ", file);
29921 else
29922 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29923 (long) high & 0xffffffff, (long) low & 0xffffffff);
29924 fprintf (file, "0x%lx,0x%lx\n",
29925 (long) high & 0xffffffff, (long) low & 0xffffffff);
29926 }
29927 else
29928 {
29929 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29930 fputs ("\t.long ", file);
29931 else
29932 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29933 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29934 }
29935 return;
29936 }
29937 }
29938
29939 if (GET_CODE (x) == CONST)
29940 {
29941 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29942 && CONST_INT_P (XEXP (XEXP (x, 0), 1)));
29943
29944 base = XEXP (XEXP (x, 0), 0);
29945 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29946 }
29947
29948 switch (GET_CODE (base))
29949 {
29950 case SYMBOL_REF:
29951 name = XSTR (base, 0);
29952 break;
29953
29954 case LABEL_REF:
29955 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29956 CODE_LABEL_NUMBER (XEXP (base, 0)));
29957 break;
29958
29959 case CODE_LABEL:
29960 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29961 break;
29962
29963 default:
29964 gcc_unreachable ();
29965 }
29966
29967 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29968 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29969 else
29970 {
29971 fputs ("\t.tc ", file);
29972 RS6000_OUTPUT_BASENAME (file, name);
29973
29974 if (offset < 0)
29975 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29976 else if (offset)
29977 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29978
29979 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29980 after other TOC symbols, reducing overflow of small TOC access
29981 to [TC] symbols. */
29982 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29983 ? "[TE]," : "[TC],", file);
29984 }
29985
29986 /* Currently C++ toc references to vtables can be emitted before it
29987 is decided whether the vtable is public or private. If this is
29988 the case, then the linker will eventually complain that there is
29989 a TOC reference to an unknown section. Thus, for vtables only,
29990 we emit the TOC reference to reference the symbol and not the
29991 section. */
29992 if (VTABLE_NAME_P (name))
29993 {
29994 RS6000_OUTPUT_BASENAME (file, name);
29995 if (offset < 0)
29996 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29997 else if (offset > 0)
29998 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29999 }
30000 else
30001 output_addr_const (file, x);
30002
30003 #if HAVE_AS_TLS
30004 if (TARGET_XCOFF && SYMBOL_REF_P (base))
30005 {
30006 switch (SYMBOL_REF_TLS_MODEL (base))
30007 {
30008 case 0:
30009 break;
30010 case TLS_MODEL_LOCAL_EXEC:
30011 fputs ("@le", file);
30012 break;
30013 case TLS_MODEL_INITIAL_EXEC:
30014 fputs ("@ie", file);
30015 break;
30016 /* Use global-dynamic for local-dynamic. */
30017 case TLS_MODEL_GLOBAL_DYNAMIC:
30018 case TLS_MODEL_LOCAL_DYNAMIC:
30019 putc ('\n', file);
30020 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
30021 fputs ("\t.tc .", file);
30022 RS6000_OUTPUT_BASENAME (file, name);
30023 fputs ("[TC],", file);
30024 output_addr_const (file, x);
30025 fputs ("@m", file);
30026 break;
30027 default:
30028 gcc_unreachable ();
30029 }
30030 }
30031 #endif
30032
30033 putc ('\n', file);
30034 }
30035 \f
30036 /* Output an assembler pseudo-op to write an ASCII string of N characters
30037 starting at P to FILE.
30038
30039 On the RS/6000, we have to do this using the .byte operation and
30040 write out special characters outside the quoted string.
30041 Also, the assembler is broken; very long strings are truncated,
30042 so we must artificially break them up early. */
30043
30044 void
30045 output_ascii (FILE *file, const char *p, int n)
30046 {
30047 char c;
30048 int i, count_string;
30049 const char *for_string = "\t.byte \"";
30050 const char *for_decimal = "\t.byte ";
30051 const char *to_close = NULL;
30052
30053 count_string = 0;
30054 for (i = 0; i < n; i++)
30055 {
30056 c = *p++;
30057 if (c >= ' ' && c < 0177)
30058 {
30059 if (for_string)
30060 fputs (for_string, file);
30061 putc (c, file);
30062
30063 /* Write two quotes to get one. */
30064 if (c == '"')
30065 {
30066 putc (c, file);
30067 ++count_string;
30068 }
30069
30070 for_string = NULL;
30071 for_decimal = "\"\n\t.byte ";
30072 to_close = "\"\n";
30073 ++count_string;
30074
30075 if (count_string >= 512)
30076 {
30077 fputs (to_close, file);
30078
30079 for_string = "\t.byte \"";
30080 for_decimal = "\t.byte ";
30081 to_close = NULL;
30082 count_string = 0;
30083 }
30084 }
30085 else
30086 {
30087 if (for_decimal)
30088 fputs (for_decimal, file);
30089 fprintf (file, "%d", c);
30090
30091 for_string = "\n\t.byte \"";
30092 for_decimal = ", ";
30093 to_close = "\n";
30094 count_string = 0;
30095 }
30096 }
30097
30098 /* Now close the string if we have written one. Then end the line. */
30099 if (to_close)
30100 fputs (to_close, file);
30101 }
30102 \f
30103 /* Generate a unique section name for FILENAME for a section type
30104 represented by SECTION_DESC. Output goes into BUF.
30105
30106 SECTION_DESC can be any string, as long as it is different for each
30107 possible section type.
30108
30109 We name the section in the same manner as xlc. The name begins with an
30110 underscore followed by the filename (after stripping any leading directory
30111 names) with the last period replaced by the string SECTION_DESC. If
30112 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30113 the name. */
30114
30115 void
30116 rs6000_gen_section_name (char **buf, const char *filename,
30117 const char *section_desc)
30118 {
30119 const char *q, *after_last_slash, *last_period = 0;
30120 char *p;
30121 int len;
30122
30123 after_last_slash = filename;
30124 for (q = filename; *q; q++)
30125 {
30126 if (*q == '/')
30127 after_last_slash = q + 1;
30128 else if (*q == '.')
30129 last_period = q;
30130 }
30131
30132 len = strlen (after_last_slash) + strlen (section_desc) + 2;
30133 *buf = (char *) xmalloc (len);
30134
30135 p = *buf;
30136 *p++ = '_';
30137
30138 for (q = after_last_slash; *q; q++)
30139 {
30140 if (q == last_period)
30141 {
30142 strcpy (p, section_desc);
30143 p += strlen (section_desc);
30144 break;
30145 }
30146
30147 else if (ISALNUM (*q))
30148 *p++ = *q;
30149 }
30150
30151 if (last_period == 0)
30152 strcpy (p, section_desc);
30153 else
30154 *p = '\0';
30155 }
30156 \f
30157 /* Emit profile function. */
30158
30159 void
30160 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30161 {
30162 /* Non-standard profiling for kernels, which just saves LR then calls
30163 _mcount without worrying about arg saves. The idea is to change
30164 the function prologue as little as possible as it isn't easy to
30165 account for arg save/restore code added just for _mcount. */
30166 if (TARGET_PROFILE_KERNEL)
30167 return;
30168
30169 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30170 {
30171 #ifndef NO_PROFILE_COUNTERS
30172 # define NO_PROFILE_COUNTERS 0
30173 #endif
30174 if (NO_PROFILE_COUNTERS)
30175 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30176 LCT_NORMAL, VOIDmode);
30177 else
30178 {
30179 char buf[30];
30180 const char *label_name;
30181 rtx fun;
30182
30183 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30184 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30185 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30186
30187 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30188 LCT_NORMAL, VOIDmode, fun, Pmode);
30189 }
30190 }
30191 else if (DEFAULT_ABI == ABI_DARWIN)
30192 {
30193 const char *mcount_name = RS6000_MCOUNT;
30194 int caller_addr_regno = LR_REGNO;
30195
30196 /* Be conservative and always set this, at least for now. */
30197 crtl->uses_pic_offset_table = 1;
30198
30199 #if TARGET_MACHO
30200 /* For PIC code, set up a stub and collect the caller's address
30201 from r0, which is where the prologue puts it. */
30202 if (MACHOPIC_INDIRECT
30203 && crtl->uses_pic_offset_table)
30204 caller_addr_regno = 0;
30205 #endif
30206 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30207 LCT_NORMAL, VOIDmode,
30208 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30209 }
30210 }
30211
30212 /* Write function profiler code. */
30213
30214 void
30215 output_function_profiler (FILE *file, int labelno)
30216 {
30217 char buf[100];
30218
30219 switch (DEFAULT_ABI)
30220 {
30221 default:
30222 gcc_unreachable ();
30223
30224 case ABI_V4:
30225 if (!TARGET_32BIT)
30226 {
30227 warning (0, "no profiling of 64-bit code for this ABI");
30228 return;
30229 }
30230 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30231 fprintf (file, "\tmflr %s\n", reg_names[0]);
30232 if (NO_PROFILE_COUNTERS)
30233 {
30234 asm_fprintf (file, "\tstw %s,4(%s)\n",
30235 reg_names[0], reg_names[1]);
30236 }
30237 else if (TARGET_SECURE_PLT && flag_pic)
30238 {
30239 if (TARGET_LINK_STACK)
30240 {
30241 char name[32];
30242 get_ppc476_thunk_name (name);
30243 asm_fprintf (file, "\tbl %s\n", name);
30244 }
30245 else
30246 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30247 asm_fprintf (file, "\tstw %s,4(%s)\n",
30248 reg_names[0], reg_names[1]);
30249 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30250 asm_fprintf (file, "\taddis %s,%s,",
30251 reg_names[12], reg_names[12]);
30252 assemble_name (file, buf);
30253 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30254 assemble_name (file, buf);
30255 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30256 }
30257 else if (flag_pic == 1)
30258 {
30259 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30260 asm_fprintf (file, "\tstw %s,4(%s)\n",
30261 reg_names[0], reg_names[1]);
30262 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30263 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30264 assemble_name (file, buf);
30265 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30266 }
30267 else if (flag_pic > 1)
30268 {
30269 asm_fprintf (file, "\tstw %s,4(%s)\n",
30270 reg_names[0], reg_names[1]);
30271 /* Now, we need to get the address of the label. */
30272 if (TARGET_LINK_STACK)
30273 {
30274 char name[32];
30275 get_ppc476_thunk_name (name);
30276 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30277 assemble_name (file, buf);
30278 fputs ("-.\n1:", file);
30279 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30280 asm_fprintf (file, "\taddi %s,%s,4\n",
30281 reg_names[11], reg_names[11]);
30282 }
30283 else
30284 {
30285 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30286 assemble_name (file, buf);
30287 fputs ("-.\n1:", file);
30288 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30289 }
30290 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30291 reg_names[0], reg_names[11]);
30292 asm_fprintf (file, "\tadd %s,%s,%s\n",
30293 reg_names[0], reg_names[0], reg_names[11]);
30294 }
30295 else
30296 {
30297 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30298 assemble_name (file, buf);
30299 fputs ("@ha\n", file);
30300 asm_fprintf (file, "\tstw %s,4(%s)\n",
30301 reg_names[0], reg_names[1]);
30302 asm_fprintf (file, "\tla %s,", reg_names[0]);
30303 assemble_name (file, buf);
30304 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30305 }
30306
30307 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30308 fprintf (file, "\tbl %s%s\n",
30309 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30310 break;
30311
30312 case ABI_AIX:
30313 case ABI_ELFv2:
30314 case ABI_DARWIN:
30315 /* Don't do anything, done in output_profile_hook (). */
30316 break;
30317 }
30318 }
30319
30320 \f
30321
30322 /* The following variable value is the last issued insn. */
30323
30324 static rtx_insn *last_scheduled_insn;
30325
30326 /* The following variable helps to balance issuing of load and
30327 store instructions */
30328
30329 static int load_store_pendulum;
30330
30331 /* The following variable helps pair divide insns during scheduling. */
30332 static int divide_cnt;
30333 /* The following variable helps pair and alternate vector and vector load
30334 insns during scheduling. */
30335 static int vec_pairing;
30336
30337
30338 /* Power4 load update and store update instructions are cracked into a
30339 load or store and an integer insn which are executed in the same cycle.
30340 Branches have their own dispatch slot which does not count against the
30341 GCC issue rate, but it changes the program flow so there are no other
30342 instructions to issue in this cycle. */
30343
30344 static int
30345 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30346 {
30347 last_scheduled_insn = insn;
30348 if (GET_CODE (PATTERN (insn)) == USE
30349 || GET_CODE (PATTERN (insn)) == CLOBBER)
30350 {
30351 cached_can_issue_more = more;
30352 return cached_can_issue_more;
30353 }
30354
30355 if (insn_terminates_group_p (insn, current_group))
30356 {
30357 cached_can_issue_more = 0;
30358 return cached_can_issue_more;
30359 }
30360
30361 /* If no reservation, but reach here */
30362 if (recog_memoized (insn) < 0)
30363 return more;
30364
30365 if (rs6000_sched_groups)
30366 {
30367 if (is_microcoded_insn (insn))
30368 cached_can_issue_more = 0;
30369 else if (is_cracked_insn (insn))
30370 cached_can_issue_more = more > 2 ? more - 2 : 0;
30371 else
30372 cached_can_issue_more = more - 1;
30373
30374 return cached_can_issue_more;
30375 }
30376
30377 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30378 return 0;
30379
30380 cached_can_issue_more = more - 1;
30381 return cached_can_issue_more;
30382 }
30383
30384 static int
30385 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30386 {
30387 int r = rs6000_variable_issue_1 (insn, more);
30388 if (verbose)
30389 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30390 return r;
30391 }
30392
30393 /* Adjust the cost of a scheduling dependency. Return the new cost of
30394 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30395
30396 static int
30397 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30398 unsigned int)
30399 {
30400 enum attr_type attr_type;
30401
30402 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30403 return cost;
30404
30405 switch (dep_type)
30406 {
30407 case REG_DEP_TRUE:
30408 {
30409 /* Data dependency; DEP_INSN writes a register that INSN reads
30410 some cycles later. */
30411
30412 /* Separate a load from a narrower, dependent store. */
30413 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30414 && GET_CODE (PATTERN (insn)) == SET
30415 && GET_CODE (PATTERN (dep_insn)) == SET
30416 && MEM_P (XEXP (PATTERN (insn), 1))
30417 && MEM_P (XEXP (PATTERN (dep_insn), 0))
30418 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30419 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30420 return cost + 14;
30421
30422 attr_type = get_attr_type (insn);
30423
30424 switch (attr_type)
30425 {
30426 case TYPE_JMPREG:
30427 /* Tell the first scheduling pass about the latency between
30428 a mtctr and bctr (and mtlr and br/blr). The first
30429 scheduling pass will not know about this latency since
30430 the mtctr instruction, which has the latency associated
30431 to it, will be generated by reload. */
30432 return 4;
30433 case TYPE_BRANCH:
30434 /* Leave some extra cycles between a compare and its
30435 dependent branch, to inhibit expensive mispredicts. */
30436 if ((rs6000_tune == PROCESSOR_PPC603
30437 || rs6000_tune == PROCESSOR_PPC604
30438 || rs6000_tune == PROCESSOR_PPC604e
30439 || rs6000_tune == PROCESSOR_PPC620
30440 || rs6000_tune == PROCESSOR_PPC630
30441 || rs6000_tune == PROCESSOR_PPC750
30442 || rs6000_tune == PROCESSOR_PPC7400
30443 || rs6000_tune == PROCESSOR_PPC7450
30444 || rs6000_tune == PROCESSOR_PPCE5500
30445 || rs6000_tune == PROCESSOR_PPCE6500
30446 || rs6000_tune == PROCESSOR_POWER4
30447 || rs6000_tune == PROCESSOR_POWER5
30448 || rs6000_tune == PROCESSOR_POWER7
30449 || rs6000_tune == PROCESSOR_POWER8
30450 || rs6000_tune == PROCESSOR_POWER9
30451 || rs6000_tune == PROCESSOR_CELL)
30452 && recog_memoized (dep_insn)
30453 && (INSN_CODE (dep_insn) >= 0))
30454
30455 switch (get_attr_type (dep_insn))
30456 {
30457 case TYPE_CMP:
30458 case TYPE_FPCOMPARE:
30459 case TYPE_CR_LOGICAL:
30460 return cost + 2;
30461 case TYPE_EXTS:
30462 case TYPE_MUL:
30463 if (get_attr_dot (dep_insn) == DOT_YES)
30464 return cost + 2;
30465 else
30466 break;
30467 case TYPE_SHIFT:
30468 if (get_attr_dot (dep_insn) == DOT_YES
30469 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30470 return cost + 2;
30471 else
30472 break;
30473 default:
30474 break;
30475 }
30476 break;
30477
30478 case TYPE_STORE:
30479 case TYPE_FPSTORE:
30480 if ((rs6000_tune == PROCESSOR_POWER6)
30481 && recog_memoized (dep_insn)
30482 && (INSN_CODE (dep_insn) >= 0))
30483 {
30484
30485 if (GET_CODE (PATTERN (insn)) != SET)
30486 /* If this happens, we have to extend this to schedule
30487 optimally. Return default for now. */
30488 return cost;
30489
30490 /* Adjust the cost for the case where the value written
30491 by a fixed point operation is used as the address
30492 gen value on a store. */
30493 switch (get_attr_type (dep_insn))
30494 {
30495 case TYPE_LOAD:
30496 case TYPE_CNTLZ:
30497 {
30498 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30499 return get_attr_sign_extend (dep_insn)
30500 == SIGN_EXTEND_YES ? 6 : 4;
30501 break;
30502 }
30503 case TYPE_SHIFT:
30504 {
30505 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30506 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30507 6 : 3;
30508 break;
30509 }
30510 case TYPE_INTEGER:
30511 case TYPE_ADD:
30512 case TYPE_LOGICAL:
30513 case TYPE_EXTS:
30514 case TYPE_INSERT:
30515 {
30516 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30517 return 3;
30518 break;
30519 }
30520 case TYPE_STORE:
30521 case TYPE_FPLOAD:
30522 case TYPE_FPSTORE:
30523 {
30524 if (get_attr_update (dep_insn) == UPDATE_YES
30525 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30526 return 3;
30527 break;
30528 }
30529 case TYPE_MUL:
30530 {
30531 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30532 return 17;
30533 break;
30534 }
30535 case TYPE_DIV:
30536 {
30537 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30538 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30539 break;
30540 }
30541 default:
30542 break;
30543 }
30544 }
30545 break;
30546
30547 case TYPE_LOAD:
30548 if ((rs6000_tune == PROCESSOR_POWER6)
30549 && recog_memoized (dep_insn)
30550 && (INSN_CODE (dep_insn) >= 0))
30551 {
30552
30553 /* Adjust the cost for the case where the value written
30554 by a fixed point instruction is used within the address
30555 gen portion of a subsequent load(u)(x) */
30556 switch (get_attr_type (dep_insn))
30557 {
30558 case TYPE_LOAD:
30559 case TYPE_CNTLZ:
30560 {
30561 if (set_to_load_agen (dep_insn, insn))
30562 return get_attr_sign_extend (dep_insn)
30563 == SIGN_EXTEND_YES ? 6 : 4;
30564 break;
30565 }
30566 case TYPE_SHIFT:
30567 {
30568 if (set_to_load_agen (dep_insn, insn))
30569 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30570 6 : 3;
30571 break;
30572 }
30573 case TYPE_INTEGER:
30574 case TYPE_ADD:
30575 case TYPE_LOGICAL:
30576 case TYPE_EXTS:
30577 case TYPE_INSERT:
30578 {
30579 if (set_to_load_agen (dep_insn, insn))
30580 return 3;
30581 break;
30582 }
30583 case TYPE_STORE:
30584 case TYPE_FPLOAD:
30585 case TYPE_FPSTORE:
30586 {
30587 if (get_attr_update (dep_insn) == UPDATE_YES
30588 && set_to_load_agen (dep_insn, insn))
30589 return 3;
30590 break;
30591 }
30592 case TYPE_MUL:
30593 {
30594 if (set_to_load_agen (dep_insn, insn))
30595 return 17;
30596 break;
30597 }
30598 case TYPE_DIV:
30599 {
30600 if (set_to_load_agen (dep_insn, insn))
30601 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30602 break;
30603 }
30604 default:
30605 break;
30606 }
30607 }
30608 break;
30609
30610 case TYPE_FPLOAD:
30611 if ((rs6000_tune == PROCESSOR_POWER6)
30612 && get_attr_update (insn) == UPDATE_NO
30613 && recog_memoized (dep_insn)
30614 && (INSN_CODE (dep_insn) >= 0)
30615 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30616 return 2;
30617
30618 default:
30619 break;
30620 }
30621
30622 /* Fall out to return default cost. */
30623 }
30624 break;
30625
30626 case REG_DEP_OUTPUT:
30627 /* Output dependency; DEP_INSN writes a register that INSN writes some
30628 cycles later. */
30629 if ((rs6000_tune == PROCESSOR_POWER6)
30630 && recog_memoized (dep_insn)
30631 && (INSN_CODE (dep_insn) >= 0))
30632 {
30633 attr_type = get_attr_type (insn);
30634
30635 switch (attr_type)
30636 {
30637 case TYPE_FP:
30638 case TYPE_FPSIMPLE:
30639 if (get_attr_type (dep_insn) == TYPE_FP
30640 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30641 return 1;
30642 break;
30643 case TYPE_FPLOAD:
30644 if (get_attr_update (insn) == UPDATE_NO
30645 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30646 return 2;
30647 break;
30648 default:
30649 break;
30650 }
30651 }
30652 /* Fall through, no cost for output dependency. */
30653 /* FALLTHRU */
30654
30655 case REG_DEP_ANTI:
30656 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30657 cycles later. */
30658 return 0;
30659
30660 default:
30661 gcc_unreachable ();
30662 }
30663
30664 return cost;
30665 }
30666
30667 /* Debug version of rs6000_adjust_cost. */
30668
30669 static int
30670 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30671 int cost, unsigned int dw)
30672 {
30673 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30674
30675 if (ret != cost)
30676 {
30677 const char *dep;
30678
30679 switch (dep_type)
30680 {
30681 default: dep = "unknown depencency"; break;
30682 case REG_DEP_TRUE: dep = "data dependency"; break;
30683 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30684 case REG_DEP_ANTI: dep = "anti depencency"; break;
30685 }
30686
30687 fprintf (stderr,
30688 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30689 "%s, insn:\n", ret, cost, dep);
30690
30691 debug_rtx (insn);
30692 }
30693
30694 return ret;
30695 }
30696
30697 /* The function returns a true if INSN is microcoded.
30698 Return false otherwise. */
30699
30700 static bool
30701 is_microcoded_insn (rtx_insn *insn)
30702 {
30703 if (!insn || !NONDEBUG_INSN_P (insn)
30704 || GET_CODE (PATTERN (insn)) == USE
30705 || GET_CODE (PATTERN (insn)) == CLOBBER)
30706 return false;
30707
30708 if (rs6000_tune == PROCESSOR_CELL)
30709 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30710
30711 if (rs6000_sched_groups
30712 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30713 {
30714 enum attr_type type = get_attr_type (insn);
30715 if ((type == TYPE_LOAD
30716 && get_attr_update (insn) == UPDATE_YES
30717 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30718 || ((type == TYPE_LOAD || type == TYPE_STORE)
30719 && get_attr_update (insn) == UPDATE_YES
30720 && get_attr_indexed (insn) == INDEXED_YES)
30721 || type == TYPE_MFCR)
30722 return true;
30723 }
30724
30725 return false;
30726 }
30727
30728 /* The function returns true if INSN is cracked into 2 instructions
30729 by the processor (and therefore occupies 2 issue slots). */
30730
30731 static bool
30732 is_cracked_insn (rtx_insn *insn)
30733 {
30734 if (!insn || !NONDEBUG_INSN_P (insn)
30735 || GET_CODE (PATTERN (insn)) == USE
30736 || GET_CODE (PATTERN (insn)) == CLOBBER)
30737 return false;
30738
30739 if (rs6000_sched_groups
30740 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30741 {
30742 enum attr_type type = get_attr_type (insn);
30743 if ((type == TYPE_LOAD
30744 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30745 && get_attr_update (insn) == UPDATE_NO)
30746 || (type == TYPE_LOAD
30747 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30748 && get_attr_update (insn) == UPDATE_YES
30749 && get_attr_indexed (insn) == INDEXED_NO)
30750 || (type == TYPE_STORE
30751 && get_attr_update (insn) == UPDATE_YES
30752 && get_attr_indexed (insn) == INDEXED_NO)
30753 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30754 && get_attr_update (insn) == UPDATE_YES)
30755 || (type == TYPE_CR_LOGICAL
30756 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30757 || (type == TYPE_EXTS
30758 && get_attr_dot (insn) == DOT_YES)
30759 || (type == TYPE_SHIFT
30760 && get_attr_dot (insn) == DOT_YES
30761 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30762 || (type == TYPE_MUL
30763 && get_attr_dot (insn) == DOT_YES)
30764 || type == TYPE_DIV
30765 || (type == TYPE_INSERT
30766 && get_attr_size (insn) == SIZE_32))
30767 return true;
30768 }
30769
30770 return false;
30771 }
30772
30773 /* The function returns true if INSN can be issued only from
30774 the branch slot. */
30775
30776 static bool
30777 is_branch_slot_insn (rtx_insn *insn)
30778 {
30779 if (!insn || !NONDEBUG_INSN_P (insn)
30780 || GET_CODE (PATTERN (insn)) == USE
30781 || GET_CODE (PATTERN (insn)) == CLOBBER)
30782 return false;
30783
30784 if (rs6000_sched_groups)
30785 {
30786 enum attr_type type = get_attr_type (insn);
30787 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30788 return true;
30789 return false;
30790 }
30791
30792 return false;
30793 }
30794
30795 /* The function returns true if out_inst sets a value that is
30796 used in the address generation computation of in_insn */
30797 static bool
30798 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30799 {
30800 rtx out_set, in_set;
30801
30802 /* For performance reasons, only handle the simple case where
30803 both loads are a single_set. */
30804 out_set = single_set (out_insn);
30805 if (out_set)
30806 {
30807 in_set = single_set (in_insn);
30808 if (in_set)
30809 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30810 }
30811
30812 return false;
30813 }
30814
30815 /* Try to determine base/offset/size parts of the given MEM.
30816 Return true if successful, false if all the values couldn't
30817 be determined.
30818
30819 This function only looks for REG or REG+CONST address forms.
30820 REG+REG address form will return false. */
30821
30822 static bool
30823 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30824 HOST_WIDE_INT *size)
30825 {
30826 rtx addr_rtx;
30827 if MEM_SIZE_KNOWN_P (mem)
30828 *size = MEM_SIZE (mem);
30829 else
30830 return false;
30831
30832 addr_rtx = (XEXP (mem, 0));
30833 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30834 addr_rtx = XEXP (addr_rtx, 1);
30835
30836 *offset = 0;
30837 while (GET_CODE (addr_rtx) == PLUS
30838 && CONST_INT_P (XEXP (addr_rtx, 1)))
30839 {
30840 *offset += INTVAL (XEXP (addr_rtx, 1));
30841 addr_rtx = XEXP (addr_rtx, 0);
30842 }
30843 if (!REG_P (addr_rtx))
30844 return false;
30845
30846 *base = addr_rtx;
30847 return true;
30848 }
30849
30850 /* The function returns true if the target storage location of
30851 mem1 is adjacent to the target storage location of mem2 */
30852 /* Return 1 if memory locations are adjacent. */
30853
30854 static bool
30855 adjacent_mem_locations (rtx mem1, rtx mem2)
30856 {
30857 rtx reg1, reg2;
30858 HOST_WIDE_INT off1, size1, off2, size2;
30859
30860 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30861 && get_memref_parts (mem2, &reg2, &off2, &size2))
30862 return ((REGNO (reg1) == REGNO (reg2))
30863 && ((off1 + size1 == off2)
30864 || (off2 + size2 == off1)));
30865
30866 return false;
30867 }
30868
30869 /* This function returns true if it can be determined that the two MEM
30870 locations overlap by at least 1 byte based on base reg/offset/size. */
30871
30872 static bool
30873 mem_locations_overlap (rtx mem1, rtx mem2)
30874 {
30875 rtx reg1, reg2;
30876 HOST_WIDE_INT off1, size1, off2, size2;
30877
30878 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30879 && get_memref_parts (mem2, &reg2, &off2, &size2))
30880 return ((REGNO (reg1) == REGNO (reg2))
30881 && (((off1 <= off2) && (off1 + size1 > off2))
30882 || ((off2 <= off1) && (off2 + size2 > off1))));
30883
30884 return false;
30885 }
30886
30887 /* A C statement (sans semicolon) to update the integer scheduling
30888 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30889 INSN earlier, reduce the priority to execute INSN later. Do not
30890 define this macro if you do not need to adjust the scheduling
30891 priorities of insns. */
30892
30893 static int
30894 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30895 {
30896 rtx load_mem, str_mem;
30897 /* On machines (like the 750) which have asymmetric integer units,
30898 where one integer unit can do multiply and divides and the other
30899 can't, reduce the priority of multiply/divide so it is scheduled
30900 before other integer operations. */
30901
30902 #if 0
30903 if (! INSN_P (insn))
30904 return priority;
30905
30906 if (GET_CODE (PATTERN (insn)) == USE)
30907 return priority;
30908
30909 switch (rs6000_tune) {
30910 case PROCESSOR_PPC750:
30911 switch (get_attr_type (insn))
30912 {
30913 default:
30914 break;
30915
30916 case TYPE_MUL:
30917 case TYPE_DIV:
30918 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30919 priority, priority);
30920 if (priority >= 0 && priority < 0x01000000)
30921 priority >>= 3;
30922 break;
30923 }
30924 }
30925 #endif
30926
30927 if (insn_must_be_first_in_group (insn)
30928 && reload_completed
30929 && current_sched_info->sched_max_insns_priority
30930 && rs6000_sched_restricted_insns_priority)
30931 {
30932
30933 /* Prioritize insns that can be dispatched only in the first
30934 dispatch slot. */
30935 if (rs6000_sched_restricted_insns_priority == 1)
30936 /* Attach highest priority to insn. This means that in
30937 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30938 precede 'priority' (critical path) considerations. */
30939 return current_sched_info->sched_max_insns_priority;
30940 else if (rs6000_sched_restricted_insns_priority == 2)
30941 /* Increase priority of insn by a minimal amount. This means that in
30942 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30943 considerations precede dispatch-slot restriction considerations. */
30944 return (priority + 1);
30945 }
30946
30947 if (rs6000_tune == PROCESSOR_POWER6
30948 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30949 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30950 /* Attach highest priority to insn if the scheduler has just issued two
30951 stores and this instruction is a load, or two loads and this instruction
30952 is a store. Power6 wants loads and stores scheduled alternately
30953 when possible */
30954 return current_sched_info->sched_max_insns_priority;
30955
30956 return priority;
30957 }
30958
30959 /* Return true if the instruction is nonpipelined on the Cell. */
30960 static bool
30961 is_nonpipeline_insn (rtx_insn *insn)
30962 {
30963 enum attr_type type;
30964 if (!insn || !NONDEBUG_INSN_P (insn)
30965 || GET_CODE (PATTERN (insn)) == USE
30966 || GET_CODE (PATTERN (insn)) == CLOBBER)
30967 return false;
30968
30969 type = get_attr_type (insn);
30970 if (type == TYPE_MUL
30971 || type == TYPE_DIV
30972 || type == TYPE_SDIV
30973 || type == TYPE_DDIV
30974 || type == TYPE_SSQRT
30975 || type == TYPE_DSQRT
30976 || type == TYPE_MFCR
30977 || type == TYPE_MFCRF
30978 || type == TYPE_MFJMPR)
30979 {
30980 return true;
30981 }
30982 return false;
30983 }
30984
30985
30986 /* Return how many instructions the machine can issue per cycle. */
30987
30988 static int
30989 rs6000_issue_rate (void)
30990 {
30991 /* Unless scheduling for register pressure, use issue rate of 1 for
30992 first scheduling pass to decrease degradation. */
30993 if (!reload_completed && !flag_sched_pressure)
30994 return 1;
30995
30996 switch (rs6000_tune) {
30997 case PROCESSOR_RS64A:
30998 case PROCESSOR_PPC601: /* ? */
30999 case PROCESSOR_PPC7450:
31000 return 3;
31001 case PROCESSOR_PPC440:
31002 case PROCESSOR_PPC603:
31003 case PROCESSOR_PPC750:
31004 case PROCESSOR_PPC7400:
31005 case PROCESSOR_PPC8540:
31006 case PROCESSOR_PPC8548:
31007 case PROCESSOR_CELL:
31008 case PROCESSOR_PPCE300C2:
31009 case PROCESSOR_PPCE300C3:
31010 case PROCESSOR_PPCE500MC:
31011 case PROCESSOR_PPCE500MC64:
31012 case PROCESSOR_PPCE5500:
31013 case PROCESSOR_PPCE6500:
31014 case PROCESSOR_TITAN:
31015 return 2;
31016 case PROCESSOR_PPC476:
31017 case PROCESSOR_PPC604:
31018 case PROCESSOR_PPC604e:
31019 case PROCESSOR_PPC620:
31020 case PROCESSOR_PPC630:
31021 return 4;
31022 case PROCESSOR_POWER4:
31023 case PROCESSOR_POWER5:
31024 case PROCESSOR_POWER6:
31025 case PROCESSOR_POWER7:
31026 return 5;
31027 case PROCESSOR_POWER8:
31028 return 7;
31029 case PROCESSOR_POWER9:
31030 return 6;
31031 default:
31032 return 1;
31033 }
31034 }
31035
31036 /* Return how many instructions to look ahead for better insn
31037 scheduling. */
31038
31039 static int
31040 rs6000_use_sched_lookahead (void)
31041 {
31042 switch (rs6000_tune)
31043 {
31044 case PROCESSOR_PPC8540:
31045 case PROCESSOR_PPC8548:
31046 return 4;
31047
31048 case PROCESSOR_CELL:
31049 return (reload_completed ? 8 : 0);
31050
31051 default:
31052 return 0;
31053 }
31054 }
31055
31056 /* We are choosing insn from the ready queue. Return zero if INSN can be
31057 chosen. */
31058 static int
31059 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
31060 {
31061 if (ready_index == 0)
31062 return 0;
31063
31064 if (rs6000_tune != PROCESSOR_CELL)
31065 return 0;
31066
31067 gcc_assert (insn != NULL_RTX && INSN_P (insn));
31068
31069 if (!reload_completed
31070 || is_nonpipeline_insn (insn)
31071 || is_microcoded_insn (insn))
31072 return 1;
31073
31074 return 0;
31075 }
31076
31077 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31078 and return true. */
31079
31080 static bool
31081 find_mem_ref (rtx pat, rtx *mem_ref)
31082 {
31083 const char * fmt;
31084 int i, j;
31085
31086 /* stack_tie does not produce any real memory traffic. */
31087 if (tie_operand (pat, VOIDmode))
31088 return false;
31089
31090 if (MEM_P (pat))
31091 {
31092 *mem_ref = pat;
31093 return true;
31094 }
31095
31096 /* Recursively process the pattern. */
31097 fmt = GET_RTX_FORMAT (GET_CODE (pat));
31098
31099 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
31100 {
31101 if (fmt[i] == 'e')
31102 {
31103 if (find_mem_ref (XEXP (pat, i), mem_ref))
31104 return true;
31105 }
31106 else if (fmt[i] == 'E')
31107 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
31108 {
31109 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
31110 return true;
31111 }
31112 }
31113
31114 return false;
31115 }
31116
31117 /* Determine if PAT is a PATTERN of a load insn. */
31118
31119 static bool
31120 is_load_insn1 (rtx pat, rtx *load_mem)
31121 {
31122 if (!pat || pat == NULL_RTX)
31123 return false;
31124
31125 if (GET_CODE (pat) == SET)
31126 return find_mem_ref (SET_SRC (pat), load_mem);
31127
31128 if (GET_CODE (pat) == PARALLEL)
31129 {
31130 int i;
31131
31132 for (i = 0; i < XVECLEN (pat, 0); i++)
31133 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
31134 return true;
31135 }
31136
31137 return false;
31138 }
31139
31140 /* Determine if INSN loads from memory. */
31141
31142 static bool
31143 is_load_insn (rtx insn, rtx *load_mem)
31144 {
31145 if (!insn || !INSN_P (insn))
31146 return false;
31147
31148 if (CALL_P (insn))
31149 return false;
31150
31151 return is_load_insn1 (PATTERN (insn), load_mem);
31152 }
31153
31154 /* Determine if PAT is a PATTERN of a store insn. */
31155
31156 static bool
31157 is_store_insn1 (rtx pat, rtx *str_mem)
31158 {
31159 if (!pat || pat == NULL_RTX)
31160 return false;
31161
31162 if (GET_CODE (pat) == SET)
31163 return find_mem_ref (SET_DEST (pat), str_mem);
31164
31165 if (GET_CODE (pat) == PARALLEL)
31166 {
31167 int i;
31168
31169 for (i = 0; i < XVECLEN (pat, 0); i++)
31170 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31171 return true;
31172 }
31173
31174 return false;
31175 }
31176
31177 /* Determine if INSN stores to memory. */
31178
31179 static bool
31180 is_store_insn (rtx insn, rtx *str_mem)
31181 {
31182 if (!insn || !INSN_P (insn))
31183 return false;
31184
31185 return is_store_insn1 (PATTERN (insn), str_mem);
31186 }
31187
31188 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31189
31190 static bool
31191 is_power9_pairable_vec_type (enum attr_type type)
31192 {
31193 switch (type)
31194 {
31195 case TYPE_VECSIMPLE:
31196 case TYPE_VECCOMPLEX:
31197 case TYPE_VECDIV:
31198 case TYPE_VECCMP:
31199 case TYPE_VECPERM:
31200 case TYPE_VECFLOAT:
31201 case TYPE_VECFDIV:
31202 case TYPE_VECDOUBLE:
31203 return true;
31204 default:
31205 break;
31206 }
31207 return false;
31208 }
31209
31210 /* Returns whether the dependence between INSN and NEXT is considered
31211 costly by the given target. */
31212
31213 static bool
31214 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31215 {
31216 rtx insn;
31217 rtx next;
31218 rtx load_mem, str_mem;
31219
31220 /* If the flag is not enabled - no dependence is considered costly;
31221 allow all dependent insns in the same group.
31222 This is the most aggressive option. */
31223 if (rs6000_sched_costly_dep == no_dep_costly)
31224 return false;
31225
31226 /* If the flag is set to 1 - a dependence is always considered costly;
31227 do not allow dependent instructions in the same group.
31228 This is the most conservative option. */
31229 if (rs6000_sched_costly_dep == all_deps_costly)
31230 return true;
31231
31232 insn = DEP_PRO (dep);
31233 next = DEP_CON (dep);
31234
31235 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31236 && is_load_insn (next, &load_mem)
31237 && is_store_insn (insn, &str_mem))
31238 /* Prevent load after store in the same group. */
31239 return true;
31240
31241 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31242 && is_load_insn (next, &load_mem)
31243 && is_store_insn (insn, &str_mem)
31244 && DEP_TYPE (dep) == REG_DEP_TRUE
31245 && mem_locations_overlap(str_mem, load_mem))
31246 /* Prevent load after store in the same group if it is a true
31247 dependence. */
31248 return true;
31249
31250 /* The flag is set to X; dependences with latency >= X are considered costly,
31251 and will not be scheduled in the same group. */
31252 if (rs6000_sched_costly_dep <= max_dep_latency
31253 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31254 return true;
31255
31256 return false;
31257 }
31258
31259 /* Return the next insn after INSN that is found before TAIL is reached,
31260 skipping any "non-active" insns - insns that will not actually occupy
31261 an issue slot. Return NULL_RTX if such an insn is not found. */
31262
31263 static rtx_insn *
31264 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31265 {
31266 if (insn == NULL_RTX || insn == tail)
31267 return NULL;
31268
31269 while (1)
31270 {
31271 insn = NEXT_INSN (insn);
31272 if (insn == NULL_RTX || insn == tail)
31273 return NULL;
31274
31275 if (CALL_P (insn)
31276 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31277 || (NONJUMP_INSN_P (insn)
31278 && GET_CODE (PATTERN (insn)) != USE
31279 && GET_CODE (PATTERN (insn)) != CLOBBER
31280 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31281 break;
31282 }
31283 return insn;
31284 }
31285
31286 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31287
31288 static int
31289 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31290 {
31291 int pos;
31292 int i;
31293 rtx_insn *tmp;
31294 enum attr_type type, type2;
31295
31296 type = get_attr_type (last_scheduled_insn);
31297
31298 /* Try to issue fixed point divides back-to-back in pairs so they will be
31299 routed to separate execution units and execute in parallel. */
31300 if (type == TYPE_DIV && divide_cnt == 0)
31301 {
31302 /* First divide has been scheduled. */
31303 divide_cnt = 1;
31304
31305 /* Scan the ready list looking for another divide, if found move it
31306 to the end of the list so it is chosen next. */
31307 pos = lastpos;
31308 while (pos >= 0)
31309 {
31310 if (recog_memoized (ready[pos]) >= 0
31311 && get_attr_type (ready[pos]) == TYPE_DIV)
31312 {
31313 tmp = ready[pos];
31314 for (i = pos; i < lastpos; i++)
31315 ready[i] = ready[i + 1];
31316 ready[lastpos] = tmp;
31317 break;
31318 }
31319 pos--;
31320 }
31321 }
31322 else
31323 {
31324 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31325 divide_cnt = 0;
31326
31327 /* The best dispatch throughput for vector and vector load insns can be
31328 achieved by interleaving a vector and vector load such that they'll
31329 dispatch to the same superslice. If this pairing cannot be achieved
31330 then it is best to pair vector insns together and vector load insns
31331 together.
31332
31333 To aid in this pairing, vec_pairing maintains the current state with
31334 the following values:
31335
31336 0 : Initial state, no vecload/vector pairing has been started.
31337
31338 1 : A vecload or vector insn has been issued and a candidate for
31339 pairing has been found and moved to the end of the ready
31340 list. */
31341 if (type == TYPE_VECLOAD)
31342 {
31343 /* Issued a vecload. */
31344 if (vec_pairing == 0)
31345 {
31346 int vecload_pos = -1;
31347 /* We issued a single vecload, look for a vector insn to pair it
31348 with. If one isn't found, try to pair another vecload. */
31349 pos = lastpos;
31350 while (pos >= 0)
31351 {
31352 if (recog_memoized (ready[pos]) >= 0)
31353 {
31354 type2 = get_attr_type (ready[pos]);
31355 if (is_power9_pairable_vec_type (type2))
31356 {
31357 /* Found a vector insn to pair with, move it to the
31358 end of the ready list so it is scheduled next. */
31359 tmp = ready[pos];
31360 for (i = pos; i < lastpos; i++)
31361 ready[i] = ready[i + 1];
31362 ready[lastpos] = tmp;
31363 vec_pairing = 1;
31364 return cached_can_issue_more;
31365 }
31366 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31367 /* Remember position of first vecload seen. */
31368 vecload_pos = pos;
31369 }
31370 pos--;
31371 }
31372 if (vecload_pos >= 0)
31373 {
31374 /* Didn't find a vector to pair with but did find a vecload,
31375 move it to the end of the ready list. */
31376 tmp = ready[vecload_pos];
31377 for (i = vecload_pos; i < lastpos; i++)
31378 ready[i] = ready[i + 1];
31379 ready[lastpos] = tmp;
31380 vec_pairing = 1;
31381 return cached_can_issue_more;
31382 }
31383 }
31384 }
31385 else if (is_power9_pairable_vec_type (type))
31386 {
31387 /* Issued a vector operation. */
31388 if (vec_pairing == 0)
31389 {
31390 int vec_pos = -1;
31391 /* We issued a single vector insn, look for a vecload to pair it
31392 with. If one isn't found, try to pair another vector. */
31393 pos = lastpos;
31394 while (pos >= 0)
31395 {
31396 if (recog_memoized (ready[pos]) >= 0)
31397 {
31398 type2 = get_attr_type (ready[pos]);
31399 if (type2 == TYPE_VECLOAD)
31400 {
31401 /* Found a vecload insn to pair with, move it to the
31402 end of the ready list so it is scheduled next. */
31403 tmp = ready[pos];
31404 for (i = pos; i < lastpos; i++)
31405 ready[i] = ready[i + 1];
31406 ready[lastpos] = tmp;
31407 vec_pairing = 1;
31408 return cached_can_issue_more;
31409 }
31410 else if (is_power9_pairable_vec_type (type2)
31411 && vec_pos == -1)
31412 /* Remember position of first vector insn seen. */
31413 vec_pos = pos;
31414 }
31415 pos--;
31416 }
31417 if (vec_pos >= 0)
31418 {
31419 /* Didn't find a vecload to pair with but did find a vector
31420 insn, move it to the end of the ready list. */
31421 tmp = ready[vec_pos];
31422 for (i = vec_pos; i < lastpos; i++)
31423 ready[i] = ready[i + 1];
31424 ready[lastpos] = tmp;
31425 vec_pairing = 1;
31426 return cached_can_issue_more;
31427 }
31428 }
31429 }
31430
31431 /* We've either finished a vec/vecload pair, couldn't find an insn to
31432 continue the current pair, or the last insn had nothing to do with
31433 with pairing. In any case, reset the state. */
31434 vec_pairing = 0;
31435 }
31436
31437 return cached_can_issue_more;
31438 }
31439
31440 /* We are about to begin issuing insns for this clock cycle. */
31441
31442 static int
31443 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31444 rtx_insn **ready ATTRIBUTE_UNUSED,
31445 int *pn_ready ATTRIBUTE_UNUSED,
31446 int clock_var ATTRIBUTE_UNUSED)
31447 {
31448 int n_ready = *pn_ready;
31449
31450 if (sched_verbose)
31451 fprintf (dump, "// rs6000_sched_reorder :\n");
31452
31453 /* Reorder the ready list, if the second to last ready insn
31454 is a nonepipeline insn. */
31455 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31456 {
31457 if (is_nonpipeline_insn (ready[n_ready - 1])
31458 && (recog_memoized (ready[n_ready - 2]) > 0))
31459 /* Simply swap first two insns. */
31460 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31461 }
31462
31463 if (rs6000_tune == PROCESSOR_POWER6)
31464 load_store_pendulum = 0;
31465
31466 return rs6000_issue_rate ();
31467 }
31468
31469 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31470
31471 static int
31472 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31473 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31474 {
31475 if (sched_verbose)
31476 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31477
31478 /* For Power6, we need to handle some special cases to try and keep the
31479 store queue from overflowing and triggering expensive flushes.
31480
31481 This code monitors how load and store instructions are being issued
31482 and skews the ready list one way or the other to increase the likelihood
31483 that a desired instruction is issued at the proper time.
31484
31485 A couple of things are done. First, we maintain a "load_store_pendulum"
31486 to track the current state of load/store issue.
31487
31488 - If the pendulum is at zero, then no loads or stores have been
31489 issued in the current cycle so we do nothing.
31490
31491 - If the pendulum is 1, then a single load has been issued in this
31492 cycle and we attempt to locate another load in the ready list to
31493 issue with it.
31494
31495 - If the pendulum is -2, then two stores have already been
31496 issued in this cycle, so we increase the priority of the first load
31497 in the ready list to increase it's likelihood of being chosen first
31498 in the next cycle.
31499
31500 - If the pendulum is -1, then a single store has been issued in this
31501 cycle and we attempt to locate another store in the ready list to
31502 issue with it, preferring a store to an adjacent memory location to
31503 facilitate store pairing in the store queue.
31504
31505 - If the pendulum is 2, then two loads have already been
31506 issued in this cycle, so we increase the priority of the first store
31507 in the ready list to increase it's likelihood of being chosen first
31508 in the next cycle.
31509
31510 - If the pendulum < -2 or > 2, then do nothing.
31511
31512 Note: This code covers the most common scenarios. There exist non
31513 load/store instructions which make use of the LSU and which
31514 would need to be accounted for to strictly model the behavior
31515 of the machine. Those instructions are currently unaccounted
31516 for to help minimize compile time overhead of this code.
31517 */
31518 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31519 {
31520 int pos;
31521 int i;
31522 rtx_insn *tmp;
31523 rtx load_mem, str_mem;
31524
31525 if (is_store_insn (last_scheduled_insn, &str_mem))
31526 /* Issuing a store, swing the load_store_pendulum to the left */
31527 load_store_pendulum--;
31528 else if (is_load_insn (last_scheduled_insn, &load_mem))
31529 /* Issuing a load, swing the load_store_pendulum to the right */
31530 load_store_pendulum++;
31531 else
31532 return cached_can_issue_more;
31533
31534 /* If the pendulum is balanced, or there is only one instruction on
31535 the ready list, then all is well, so return. */
31536 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31537 return cached_can_issue_more;
31538
31539 if (load_store_pendulum == 1)
31540 {
31541 /* A load has been issued in this cycle. Scan the ready list
31542 for another load to issue with it */
31543 pos = *pn_ready-1;
31544
31545 while (pos >= 0)
31546 {
31547 if (is_load_insn (ready[pos], &load_mem))
31548 {
31549 /* Found a load. Move it to the head of the ready list,
31550 and adjust it's priority so that it is more likely to
31551 stay there */
31552 tmp = ready[pos];
31553 for (i=pos; i<*pn_ready-1; i++)
31554 ready[i] = ready[i + 1];
31555 ready[*pn_ready-1] = tmp;
31556
31557 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31558 INSN_PRIORITY (tmp)++;
31559 break;
31560 }
31561 pos--;
31562 }
31563 }
31564 else if (load_store_pendulum == -2)
31565 {
31566 /* Two stores have been issued in this cycle. Increase the
31567 priority of the first load in the ready list to favor it for
31568 issuing in the next cycle. */
31569 pos = *pn_ready-1;
31570
31571 while (pos >= 0)
31572 {
31573 if (is_load_insn (ready[pos], &load_mem)
31574 && !sel_sched_p ()
31575 && INSN_PRIORITY_KNOWN (ready[pos]))
31576 {
31577 INSN_PRIORITY (ready[pos])++;
31578
31579 /* Adjust the pendulum to account for the fact that a load
31580 was found and increased in priority. This is to prevent
31581 increasing the priority of multiple loads */
31582 load_store_pendulum--;
31583
31584 break;
31585 }
31586 pos--;
31587 }
31588 }
31589 else if (load_store_pendulum == -1)
31590 {
31591 /* A store has been issued in this cycle. Scan the ready list for
31592 another store to issue with it, preferring a store to an adjacent
31593 memory location */
31594 int first_store_pos = -1;
31595
31596 pos = *pn_ready-1;
31597
31598 while (pos >= 0)
31599 {
31600 if (is_store_insn (ready[pos], &str_mem))
31601 {
31602 rtx str_mem2;
31603 /* Maintain the index of the first store found on the
31604 list */
31605 if (first_store_pos == -1)
31606 first_store_pos = pos;
31607
31608 if (is_store_insn (last_scheduled_insn, &str_mem2)
31609 && adjacent_mem_locations (str_mem, str_mem2))
31610 {
31611 /* Found an adjacent store. Move it to the head of the
31612 ready list, and adjust it's priority so that it is
31613 more likely to stay there */
31614 tmp = ready[pos];
31615 for (i=pos; i<*pn_ready-1; i++)
31616 ready[i] = ready[i + 1];
31617 ready[*pn_ready-1] = tmp;
31618
31619 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31620 INSN_PRIORITY (tmp)++;
31621
31622 first_store_pos = -1;
31623
31624 break;
31625 };
31626 }
31627 pos--;
31628 }
31629
31630 if (first_store_pos >= 0)
31631 {
31632 /* An adjacent store wasn't found, but a non-adjacent store was,
31633 so move the non-adjacent store to the front of the ready
31634 list, and adjust its priority so that it is more likely to
31635 stay there. */
31636 tmp = ready[first_store_pos];
31637 for (i=first_store_pos; i<*pn_ready-1; i++)
31638 ready[i] = ready[i + 1];
31639 ready[*pn_ready-1] = tmp;
31640 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31641 INSN_PRIORITY (tmp)++;
31642 }
31643 }
31644 else if (load_store_pendulum == 2)
31645 {
31646 /* Two loads have been issued in this cycle. Increase the priority
31647 of the first store in the ready list to favor it for issuing in
31648 the next cycle. */
31649 pos = *pn_ready-1;
31650
31651 while (pos >= 0)
31652 {
31653 if (is_store_insn (ready[pos], &str_mem)
31654 && !sel_sched_p ()
31655 && INSN_PRIORITY_KNOWN (ready[pos]))
31656 {
31657 INSN_PRIORITY (ready[pos])++;
31658
31659 /* Adjust the pendulum to account for the fact that a store
31660 was found and increased in priority. This is to prevent
31661 increasing the priority of multiple stores */
31662 load_store_pendulum++;
31663
31664 break;
31665 }
31666 pos--;
31667 }
31668 }
31669 }
31670
31671 /* Do Power9 dependent reordering if necessary. */
31672 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31673 && recog_memoized (last_scheduled_insn) >= 0)
31674 return power9_sched_reorder2 (ready, *pn_ready - 1);
31675
31676 return cached_can_issue_more;
31677 }
31678
31679 /* Return whether the presence of INSN causes a dispatch group termination
31680 of group WHICH_GROUP.
31681
31682 If WHICH_GROUP == current_group, this function will return true if INSN
31683 causes the termination of the current group (i.e, the dispatch group to
31684 which INSN belongs). This means that INSN will be the last insn in the
31685 group it belongs to.
31686
31687 If WHICH_GROUP == previous_group, this function will return true if INSN
31688 causes the termination of the previous group (i.e, the dispatch group that
31689 precedes the group to which INSN belongs). This means that INSN will be
31690 the first insn in the group it belongs to). */
31691
31692 static bool
31693 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31694 {
31695 bool first, last;
31696
31697 if (! insn)
31698 return false;
31699
31700 first = insn_must_be_first_in_group (insn);
31701 last = insn_must_be_last_in_group (insn);
31702
31703 if (first && last)
31704 return true;
31705
31706 if (which_group == current_group)
31707 return last;
31708 else if (which_group == previous_group)
31709 return first;
31710
31711 return false;
31712 }
31713
31714
31715 static bool
31716 insn_must_be_first_in_group (rtx_insn *insn)
31717 {
31718 enum attr_type type;
31719
31720 if (!insn
31721 || NOTE_P (insn)
31722 || DEBUG_INSN_P (insn)
31723 || GET_CODE (PATTERN (insn)) == USE
31724 || GET_CODE (PATTERN (insn)) == CLOBBER)
31725 return false;
31726
31727 switch (rs6000_tune)
31728 {
31729 case PROCESSOR_POWER5:
31730 if (is_cracked_insn (insn))
31731 return true;
31732 /* FALLTHRU */
31733 case PROCESSOR_POWER4:
31734 if (is_microcoded_insn (insn))
31735 return true;
31736
31737 if (!rs6000_sched_groups)
31738 return false;
31739
31740 type = get_attr_type (insn);
31741
31742 switch (type)
31743 {
31744 case TYPE_MFCR:
31745 case TYPE_MFCRF:
31746 case TYPE_MTCR:
31747 case TYPE_CR_LOGICAL:
31748 case TYPE_MTJMPR:
31749 case TYPE_MFJMPR:
31750 case TYPE_DIV:
31751 case TYPE_LOAD_L:
31752 case TYPE_STORE_C:
31753 case TYPE_ISYNC:
31754 case TYPE_SYNC:
31755 return true;
31756 default:
31757 break;
31758 }
31759 break;
31760 case PROCESSOR_POWER6:
31761 type = get_attr_type (insn);
31762
31763 switch (type)
31764 {
31765 case TYPE_EXTS:
31766 case TYPE_CNTLZ:
31767 case TYPE_TRAP:
31768 case TYPE_MUL:
31769 case TYPE_INSERT:
31770 case TYPE_FPCOMPARE:
31771 case TYPE_MFCR:
31772 case TYPE_MTCR:
31773 case TYPE_MFJMPR:
31774 case TYPE_MTJMPR:
31775 case TYPE_ISYNC:
31776 case TYPE_SYNC:
31777 case TYPE_LOAD_L:
31778 case TYPE_STORE_C:
31779 return true;
31780 case TYPE_SHIFT:
31781 if (get_attr_dot (insn) == DOT_NO
31782 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31783 return true;
31784 else
31785 break;
31786 case TYPE_DIV:
31787 if (get_attr_size (insn) == SIZE_32)
31788 return true;
31789 else
31790 break;
31791 case TYPE_LOAD:
31792 case TYPE_STORE:
31793 case TYPE_FPLOAD:
31794 case TYPE_FPSTORE:
31795 if (get_attr_update (insn) == UPDATE_YES)
31796 return true;
31797 else
31798 break;
31799 default:
31800 break;
31801 }
31802 break;
31803 case PROCESSOR_POWER7:
31804 type = get_attr_type (insn);
31805
31806 switch (type)
31807 {
31808 case TYPE_CR_LOGICAL:
31809 case TYPE_MFCR:
31810 case TYPE_MFCRF:
31811 case TYPE_MTCR:
31812 case TYPE_DIV:
31813 case TYPE_ISYNC:
31814 case TYPE_LOAD_L:
31815 case TYPE_STORE_C:
31816 case TYPE_MFJMPR:
31817 case TYPE_MTJMPR:
31818 return true;
31819 case TYPE_MUL:
31820 case TYPE_SHIFT:
31821 case TYPE_EXTS:
31822 if (get_attr_dot (insn) == DOT_YES)
31823 return true;
31824 else
31825 break;
31826 case TYPE_LOAD:
31827 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31828 || get_attr_update (insn) == UPDATE_YES)
31829 return true;
31830 else
31831 break;
31832 case TYPE_STORE:
31833 case TYPE_FPLOAD:
31834 case TYPE_FPSTORE:
31835 if (get_attr_update (insn) == UPDATE_YES)
31836 return true;
31837 else
31838 break;
31839 default:
31840 break;
31841 }
31842 break;
31843 case PROCESSOR_POWER8:
31844 type = get_attr_type (insn);
31845
31846 switch (type)
31847 {
31848 case TYPE_CR_LOGICAL:
31849 case TYPE_MFCR:
31850 case TYPE_MFCRF:
31851 case TYPE_MTCR:
31852 case TYPE_SYNC:
31853 case TYPE_ISYNC:
31854 case TYPE_LOAD_L:
31855 case TYPE_STORE_C:
31856 case TYPE_VECSTORE:
31857 case TYPE_MFJMPR:
31858 case TYPE_MTJMPR:
31859 return true;
31860 case TYPE_SHIFT:
31861 case TYPE_EXTS:
31862 case TYPE_MUL:
31863 if (get_attr_dot (insn) == DOT_YES)
31864 return true;
31865 else
31866 break;
31867 case TYPE_LOAD:
31868 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31869 || get_attr_update (insn) == UPDATE_YES)
31870 return true;
31871 else
31872 break;
31873 case TYPE_STORE:
31874 if (get_attr_update (insn) == UPDATE_YES
31875 && get_attr_indexed (insn) == INDEXED_YES)
31876 return true;
31877 else
31878 break;
31879 default:
31880 break;
31881 }
31882 break;
31883 default:
31884 break;
31885 }
31886
31887 return false;
31888 }
31889
31890 static bool
31891 insn_must_be_last_in_group (rtx_insn *insn)
31892 {
31893 enum attr_type type;
31894
31895 if (!insn
31896 || NOTE_P (insn)
31897 || DEBUG_INSN_P (insn)
31898 || GET_CODE (PATTERN (insn)) == USE
31899 || GET_CODE (PATTERN (insn)) == CLOBBER)
31900 return false;
31901
31902 switch (rs6000_tune) {
31903 case PROCESSOR_POWER4:
31904 case PROCESSOR_POWER5:
31905 if (is_microcoded_insn (insn))
31906 return true;
31907
31908 if (is_branch_slot_insn (insn))
31909 return true;
31910
31911 break;
31912 case PROCESSOR_POWER6:
31913 type = get_attr_type (insn);
31914
31915 switch (type)
31916 {
31917 case TYPE_EXTS:
31918 case TYPE_CNTLZ:
31919 case TYPE_TRAP:
31920 case TYPE_MUL:
31921 case TYPE_FPCOMPARE:
31922 case TYPE_MFCR:
31923 case TYPE_MTCR:
31924 case TYPE_MFJMPR:
31925 case TYPE_MTJMPR:
31926 case TYPE_ISYNC:
31927 case TYPE_SYNC:
31928 case TYPE_LOAD_L:
31929 case TYPE_STORE_C:
31930 return true;
31931 case TYPE_SHIFT:
31932 if (get_attr_dot (insn) == DOT_NO
31933 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31934 return true;
31935 else
31936 break;
31937 case TYPE_DIV:
31938 if (get_attr_size (insn) == SIZE_32)
31939 return true;
31940 else
31941 break;
31942 default:
31943 break;
31944 }
31945 break;
31946 case PROCESSOR_POWER7:
31947 type = get_attr_type (insn);
31948
31949 switch (type)
31950 {
31951 case TYPE_ISYNC:
31952 case TYPE_SYNC:
31953 case TYPE_LOAD_L:
31954 case TYPE_STORE_C:
31955 return true;
31956 case TYPE_LOAD:
31957 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31958 && get_attr_update (insn) == UPDATE_YES)
31959 return true;
31960 else
31961 break;
31962 case TYPE_STORE:
31963 if (get_attr_update (insn) == UPDATE_YES
31964 && get_attr_indexed (insn) == INDEXED_YES)
31965 return true;
31966 else
31967 break;
31968 default:
31969 break;
31970 }
31971 break;
31972 case PROCESSOR_POWER8:
31973 type = get_attr_type (insn);
31974
31975 switch (type)
31976 {
31977 case TYPE_MFCR:
31978 case TYPE_MTCR:
31979 case TYPE_ISYNC:
31980 case TYPE_SYNC:
31981 case TYPE_LOAD_L:
31982 case TYPE_STORE_C:
31983 return true;
31984 case TYPE_LOAD:
31985 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31986 && get_attr_update (insn) == UPDATE_YES)
31987 return true;
31988 else
31989 break;
31990 case TYPE_STORE:
31991 if (get_attr_update (insn) == UPDATE_YES
31992 && get_attr_indexed (insn) == INDEXED_YES)
31993 return true;
31994 else
31995 break;
31996 default:
31997 break;
31998 }
31999 break;
32000 default:
32001 break;
32002 }
32003
32004 return false;
32005 }
32006
32007 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
32008 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
32009
32010 static bool
32011 is_costly_group (rtx *group_insns, rtx next_insn)
32012 {
32013 int i;
32014 int issue_rate = rs6000_issue_rate ();
32015
32016 for (i = 0; i < issue_rate; i++)
32017 {
32018 sd_iterator_def sd_it;
32019 dep_t dep;
32020 rtx insn = group_insns[i];
32021
32022 if (!insn)
32023 continue;
32024
32025 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
32026 {
32027 rtx next = DEP_CON (dep);
32028
32029 if (next == next_insn
32030 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
32031 return true;
32032 }
32033 }
32034
32035 return false;
32036 }
32037
32038 /* Utility of the function redefine_groups.
32039 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32040 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32041 to keep it "far" (in a separate group) from GROUP_INSNS, following
32042 one of the following schemes, depending on the value of the flag
32043 -minsert_sched_nops = X:
32044 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32045 in order to force NEXT_INSN into a separate group.
32046 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32047 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32048 insertion (has a group just ended, how many vacant issue slots remain in the
32049 last group, and how many dispatch groups were encountered so far). */
32050
32051 static int
32052 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
32053 rtx_insn *next_insn, bool *group_end, int can_issue_more,
32054 int *group_count)
32055 {
32056 rtx nop;
32057 bool force;
32058 int issue_rate = rs6000_issue_rate ();
32059 bool end = *group_end;
32060 int i;
32061
32062 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
32063 return can_issue_more;
32064
32065 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
32066 return can_issue_more;
32067
32068 force = is_costly_group (group_insns, next_insn);
32069 if (!force)
32070 return can_issue_more;
32071
32072 if (sched_verbose > 6)
32073 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
32074 *group_count ,can_issue_more);
32075
32076 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
32077 {
32078 if (*group_end)
32079 can_issue_more = 0;
32080
32081 /* Since only a branch can be issued in the last issue_slot, it is
32082 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32083 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32084 in this case the last nop will start a new group and the branch
32085 will be forced to the new group. */
32086 if (can_issue_more && !is_branch_slot_insn (next_insn))
32087 can_issue_more--;
32088
32089 /* Do we have a special group ending nop? */
32090 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
32091 || rs6000_tune == PROCESSOR_POWER8)
32092 {
32093 nop = gen_group_ending_nop ();
32094 emit_insn_before (nop, next_insn);
32095 can_issue_more = 0;
32096 }
32097 else
32098 while (can_issue_more > 0)
32099 {
32100 nop = gen_nop ();
32101 emit_insn_before (nop, next_insn);
32102 can_issue_more--;
32103 }
32104
32105 *group_end = true;
32106 return 0;
32107 }
32108
32109 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
32110 {
32111 int n_nops = rs6000_sched_insert_nops;
32112
32113 /* Nops can't be issued from the branch slot, so the effective
32114 issue_rate for nops is 'issue_rate - 1'. */
32115 if (can_issue_more == 0)
32116 can_issue_more = issue_rate;
32117 can_issue_more--;
32118 if (can_issue_more == 0)
32119 {
32120 can_issue_more = issue_rate - 1;
32121 (*group_count)++;
32122 end = true;
32123 for (i = 0; i < issue_rate; i++)
32124 {
32125 group_insns[i] = 0;
32126 }
32127 }
32128
32129 while (n_nops > 0)
32130 {
32131 nop = gen_nop ();
32132 emit_insn_before (nop, next_insn);
32133 if (can_issue_more == issue_rate - 1) /* new group begins */
32134 end = false;
32135 can_issue_more--;
32136 if (can_issue_more == 0)
32137 {
32138 can_issue_more = issue_rate - 1;
32139 (*group_count)++;
32140 end = true;
32141 for (i = 0; i < issue_rate; i++)
32142 {
32143 group_insns[i] = 0;
32144 }
32145 }
32146 n_nops--;
32147 }
32148
32149 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32150 can_issue_more++;
32151
32152 /* Is next_insn going to start a new group? */
32153 *group_end
32154 = (end
32155 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32156 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32157 || (can_issue_more < issue_rate &&
32158 insn_terminates_group_p (next_insn, previous_group)));
32159 if (*group_end && end)
32160 (*group_count)--;
32161
32162 if (sched_verbose > 6)
32163 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32164 *group_count, can_issue_more);
32165 return can_issue_more;
32166 }
32167
32168 return can_issue_more;
32169 }
32170
32171 /* This function tries to synch the dispatch groups that the compiler "sees"
32172 with the dispatch groups that the processor dispatcher is expected to
32173 form in practice. It tries to achieve this synchronization by forcing the
32174 estimated processor grouping on the compiler (as opposed to the function
32175 'pad_goups' which tries to force the scheduler's grouping on the processor).
32176
32177 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32178 examines the (estimated) dispatch groups that will be formed by the processor
32179 dispatcher. It marks these group boundaries to reflect the estimated
32180 processor grouping, overriding the grouping that the scheduler had marked.
32181 Depending on the value of the flag '-minsert-sched-nops' this function can
32182 force certain insns into separate groups or force a certain distance between
32183 them by inserting nops, for example, if there exists a "costly dependence"
32184 between the insns.
32185
32186 The function estimates the group boundaries that the processor will form as
32187 follows: It keeps track of how many vacant issue slots are available after
32188 each insn. A subsequent insn will start a new group if one of the following
32189 4 cases applies:
32190 - no more vacant issue slots remain in the current dispatch group.
32191 - only the last issue slot, which is the branch slot, is vacant, but the next
32192 insn is not a branch.
32193 - only the last 2 or less issue slots, including the branch slot, are vacant,
32194 which means that a cracked insn (which occupies two issue slots) can't be
32195 issued in this group.
32196 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32197 start a new group. */
32198
32199 static int
32200 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32201 rtx_insn *tail)
32202 {
32203 rtx_insn *insn, *next_insn;
32204 int issue_rate;
32205 int can_issue_more;
32206 int slot, i;
32207 bool group_end;
32208 int group_count = 0;
32209 rtx *group_insns;
32210
32211 /* Initialize. */
32212 issue_rate = rs6000_issue_rate ();
32213 group_insns = XALLOCAVEC (rtx, issue_rate);
32214 for (i = 0; i < issue_rate; i++)
32215 {
32216 group_insns[i] = 0;
32217 }
32218 can_issue_more = issue_rate;
32219 slot = 0;
32220 insn = get_next_active_insn (prev_head_insn, tail);
32221 group_end = false;
32222
32223 while (insn != NULL_RTX)
32224 {
32225 slot = (issue_rate - can_issue_more);
32226 group_insns[slot] = insn;
32227 can_issue_more =
32228 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32229 if (insn_terminates_group_p (insn, current_group))
32230 can_issue_more = 0;
32231
32232 next_insn = get_next_active_insn (insn, tail);
32233 if (next_insn == NULL_RTX)
32234 return group_count + 1;
32235
32236 /* Is next_insn going to start a new group? */
32237 group_end
32238 = (can_issue_more == 0
32239 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32240 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32241 || (can_issue_more < issue_rate &&
32242 insn_terminates_group_p (next_insn, previous_group)));
32243
32244 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32245 next_insn, &group_end, can_issue_more,
32246 &group_count);
32247
32248 if (group_end)
32249 {
32250 group_count++;
32251 can_issue_more = 0;
32252 for (i = 0; i < issue_rate; i++)
32253 {
32254 group_insns[i] = 0;
32255 }
32256 }
32257
32258 if (GET_MODE (next_insn) == TImode && can_issue_more)
32259 PUT_MODE (next_insn, VOIDmode);
32260 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32261 PUT_MODE (next_insn, TImode);
32262
32263 insn = next_insn;
32264 if (can_issue_more == 0)
32265 can_issue_more = issue_rate;
32266 } /* while */
32267
32268 return group_count;
32269 }
32270
32271 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32272 dispatch group boundaries that the scheduler had marked. Pad with nops
32273 any dispatch groups which have vacant issue slots, in order to force the
32274 scheduler's grouping on the processor dispatcher. The function
32275 returns the number of dispatch groups found. */
32276
32277 static int
32278 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32279 rtx_insn *tail)
32280 {
32281 rtx_insn *insn, *next_insn;
32282 rtx nop;
32283 int issue_rate;
32284 int can_issue_more;
32285 int group_end;
32286 int group_count = 0;
32287
32288 /* Initialize issue_rate. */
32289 issue_rate = rs6000_issue_rate ();
32290 can_issue_more = issue_rate;
32291
32292 insn = get_next_active_insn (prev_head_insn, tail);
32293 next_insn = get_next_active_insn (insn, tail);
32294
32295 while (insn != NULL_RTX)
32296 {
32297 can_issue_more =
32298 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32299
32300 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32301
32302 if (next_insn == NULL_RTX)
32303 break;
32304
32305 if (group_end)
32306 {
32307 /* If the scheduler had marked group termination at this location
32308 (between insn and next_insn), and neither insn nor next_insn will
32309 force group termination, pad the group with nops to force group
32310 termination. */
32311 if (can_issue_more
32312 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32313 && !insn_terminates_group_p (insn, current_group)
32314 && !insn_terminates_group_p (next_insn, previous_group))
32315 {
32316 if (!is_branch_slot_insn (next_insn))
32317 can_issue_more--;
32318
32319 while (can_issue_more)
32320 {
32321 nop = gen_nop ();
32322 emit_insn_before (nop, next_insn);
32323 can_issue_more--;
32324 }
32325 }
32326
32327 can_issue_more = issue_rate;
32328 group_count++;
32329 }
32330
32331 insn = next_insn;
32332 next_insn = get_next_active_insn (insn, tail);
32333 }
32334
32335 return group_count;
32336 }
32337
32338 /* We're beginning a new block. Initialize data structures as necessary. */
32339
32340 static void
32341 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32342 int sched_verbose ATTRIBUTE_UNUSED,
32343 int max_ready ATTRIBUTE_UNUSED)
32344 {
32345 last_scheduled_insn = NULL;
32346 load_store_pendulum = 0;
32347 divide_cnt = 0;
32348 vec_pairing = 0;
32349 }
32350
32351 /* The following function is called at the end of scheduling BB.
32352 After reload, it inserts nops at insn group bundling. */
32353
32354 static void
32355 rs6000_sched_finish (FILE *dump, int sched_verbose)
32356 {
32357 int n_groups;
32358
32359 if (sched_verbose)
32360 fprintf (dump, "=== Finishing schedule.\n");
32361
32362 if (reload_completed && rs6000_sched_groups)
32363 {
32364 /* Do not run sched_finish hook when selective scheduling enabled. */
32365 if (sel_sched_p ())
32366 return;
32367
32368 if (rs6000_sched_insert_nops == sched_finish_none)
32369 return;
32370
32371 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32372 n_groups = pad_groups (dump, sched_verbose,
32373 current_sched_info->prev_head,
32374 current_sched_info->next_tail);
32375 else
32376 n_groups = redefine_groups (dump, sched_verbose,
32377 current_sched_info->prev_head,
32378 current_sched_info->next_tail);
32379
32380 if (sched_verbose >= 6)
32381 {
32382 fprintf (dump, "ngroups = %d\n", n_groups);
32383 print_rtl (dump, current_sched_info->prev_head);
32384 fprintf (dump, "Done finish_sched\n");
32385 }
32386 }
32387 }
32388
32389 struct rs6000_sched_context
32390 {
32391 short cached_can_issue_more;
32392 rtx_insn *last_scheduled_insn;
32393 int load_store_pendulum;
32394 int divide_cnt;
32395 int vec_pairing;
32396 };
32397
32398 typedef struct rs6000_sched_context rs6000_sched_context_def;
32399 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32400
32401 /* Allocate store for new scheduling context. */
32402 static void *
32403 rs6000_alloc_sched_context (void)
32404 {
32405 return xmalloc (sizeof (rs6000_sched_context_def));
32406 }
32407
32408 /* If CLEAN_P is true then initializes _SC with clean data,
32409 and from the global context otherwise. */
32410 static void
32411 rs6000_init_sched_context (void *_sc, bool clean_p)
32412 {
32413 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32414
32415 if (clean_p)
32416 {
32417 sc->cached_can_issue_more = 0;
32418 sc->last_scheduled_insn = NULL;
32419 sc->load_store_pendulum = 0;
32420 sc->divide_cnt = 0;
32421 sc->vec_pairing = 0;
32422 }
32423 else
32424 {
32425 sc->cached_can_issue_more = cached_can_issue_more;
32426 sc->last_scheduled_insn = last_scheduled_insn;
32427 sc->load_store_pendulum = load_store_pendulum;
32428 sc->divide_cnt = divide_cnt;
32429 sc->vec_pairing = vec_pairing;
32430 }
32431 }
32432
32433 /* Sets the global scheduling context to the one pointed to by _SC. */
32434 static void
32435 rs6000_set_sched_context (void *_sc)
32436 {
32437 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32438
32439 gcc_assert (sc != NULL);
32440
32441 cached_can_issue_more = sc->cached_can_issue_more;
32442 last_scheduled_insn = sc->last_scheduled_insn;
32443 load_store_pendulum = sc->load_store_pendulum;
32444 divide_cnt = sc->divide_cnt;
32445 vec_pairing = sc->vec_pairing;
32446 }
32447
32448 /* Free _SC. */
32449 static void
32450 rs6000_free_sched_context (void *_sc)
32451 {
32452 gcc_assert (_sc != NULL);
32453
32454 free (_sc);
32455 }
32456
32457 static bool
32458 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32459 {
32460 switch (get_attr_type (insn))
32461 {
32462 case TYPE_DIV:
32463 case TYPE_SDIV:
32464 case TYPE_DDIV:
32465 case TYPE_VECDIV:
32466 case TYPE_SSQRT:
32467 case TYPE_DSQRT:
32468 return false;
32469
32470 default:
32471 return true;
32472 }
32473 }
32474 \f
32475 /* Length in units of the trampoline for entering a nested function. */
32476
32477 int
32478 rs6000_trampoline_size (void)
32479 {
32480 int ret = 0;
32481
32482 switch (DEFAULT_ABI)
32483 {
32484 default:
32485 gcc_unreachable ();
32486
32487 case ABI_AIX:
32488 ret = (TARGET_32BIT) ? 12 : 24;
32489 break;
32490
32491 case ABI_ELFv2:
32492 gcc_assert (!TARGET_32BIT);
32493 ret = 32;
32494 break;
32495
32496 case ABI_DARWIN:
32497 case ABI_V4:
32498 ret = (TARGET_32BIT) ? 40 : 48;
32499 break;
32500 }
32501
32502 return ret;
32503 }
32504
32505 /* Emit RTL insns to initialize the variable parts of a trampoline.
32506 FNADDR is an RTX for the address of the function's pure code.
32507 CXT is an RTX for the static chain value for the function. */
32508
32509 static void
32510 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32511 {
32512 int regsize = (TARGET_32BIT) ? 4 : 8;
32513 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32514 rtx ctx_reg = force_reg (Pmode, cxt);
32515 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32516
32517 switch (DEFAULT_ABI)
32518 {
32519 default:
32520 gcc_unreachable ();
32521
32522 /* Under AIX, just build the 3 word function descriptor */
32523 case ABI_AIX:
32524 {
32525 rtx fnmem, fn_reg, toc_reg;
32526
32527 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32528 error ("you cannot take the address of a nested function if you use "
32529 "the %qs option", "-mno-pointers-to-nested-functions");
32530
32531 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32532 fn_reg = gen_reg_rtx (Pmode);
32533 toc_reg = gen_reg_rtx (Pmode);
32534
32535 /* Macro to shorten the code expansions below. */
32536 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32537
32538 m_tramp = replace_equiv_address (m_tramp, addr);
32539
32540 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32541 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32542 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32543 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32544 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32545
32546 # undef MEM_PLUS
32547 }
32548 break;
32549
32550 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32551 case ABI_ELFv2:
32552 case ABI_DARWIN:
32553 case ABI_V4:
32554 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32555 LCT_NORMAL, VOIDmode,
32556 addr, Pmode,
32557 GEN_INT (rs6000_trampoline_size ()), SImode,
32558 fnaddr, Pmode,
32559 ctx_reg, Pmode);
32560 break;
32561 }
32562 }
32563
32564 \f
32565 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32566 identifier as an argument, so the front end shouldn't look it up. */
32567
32568 static bool
32569 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32570 {
32571 return is_attribute_p ("altivec", attr_id);
32572 }
32573
32574 /* Handle the "altivec" attribute. The attribute may have
32575 arguments as follows:
32576
32577 __attribute__((altivec(vector__)))
32578 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32579 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32580
32581 and may appear more than once (e.g., 'vector bool char') in a
32582 given declaration. */
32583
32584 static tree
32585 rs6000_handle_altivec_attribute (tree *node,
32586 tree name ATTRIBUTE_UNUSED,
32587 tree args,
32588 int flags ATTRIBUTE_UNUSED,
32589 bool *no_add_attrs)
32590 {
32591 tree type = *node, result = NULL_TREE;
32592 machine_mode mode;
32593 int unsigned_p;
32594 char altivec_type
32595 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32596 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32597 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32598 : '?');
32599
32600 while (POINTER_TYPE_P (type)
32601 || TREE_CODE (type) == FUNCTION_TYPE
32602 || TREE_CODE (type) == METHOD_TYPE
32603 || TREE_CODE (type) == ARRAY_TYPE)
32604 type = TREE_TYPE (type);
32605
32606 mode = TYPE_MODE (type);
32607
32608 /* Check for invalid AltiVec type qualifiers. */
32609 if (type == long_double_type_node)
32610 error ("use of %<long double%> in AltiVec types is invalid");
32611 else if (type == boolean_type_node)
32612 error ("use of boolean types in AltiVec types is invalid");
32613 else if (TREE_CODE (type) == COMPLEX_TYPE)
32614 error ("use of %<complex%> in AltiVec types is invalid");
32615 else if (DECIMAL_FLOAT_MODE_P (mode))
32616 error ("use of decimal floating point types in AltiVec types is invalid");
32617 else if (!TARGET_VSX)
32618 {
32619 if (type == long_unsigned_type_node || type == long_integer_type_node)
32620 {
32621 if (TARGET_64BIT)
32622 error ("use of %<long%> in AltiVec types is invalid for "
32623 "64-bit code without %qs", "-mvsx");
32624 else if (rs6000_warn_altivec_long)
32625 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32626 "use %<int%>");
32627 }
32628 else if (type == long_long_unsigned_type_node
32629 || type == long_long_integer_type_node)
32630 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32631 "-mvsx");
32632 else if (type == double_type_node)
32633 error ("use of %<double%> in AltiVec types is invalid without %qs",
32634 "-mvsx");
32635 }
32636
32637 switch (altivec_type)
32638 {
32639 case 'v':
32640 unsigned_p = TYPE_UNSIGNED (type);
32641 switch (mode)
32642 {
32643 case E_TImode:
32644 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32645 break;
32646 case E_DImode:
32647 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32648 break;
32649 case E_SImode:
32650 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32651 break;
32652 case E_HImode:
32653 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32654 break;
32655 case E_QImode:
32656 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32657 break;
32658 case E_SFmode: result = V4SF_type_node; break;
32659 case E_DFmode: result = V2DF_type_node; break;
32660 /* If the user says 'vector int bool', we may be handed the 'bool'
32661 attribute _before_ the 'vector' attribute, and so select the
32662 proper type in the 'b' case below. */
32663 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32664 case E_V2DImode: case E_V2DFmode:
32665 result = type;
32666 default: break;
32667 }
32668 break;
32669 case 'b':
32670 switch (mode)
32671 {
32672 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32673 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32674 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32675 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32676 default: break;
32677 }
32678 break;
32679 case 'p':
32680 switch (mode)
32681 {
32682 case E_V8HImode: result = pixel_V8HI_type_node;
32683 default: break;
32684 }
32685 default: break;
32686 }
32687
32688 /* Propagate qualifiers attached to the element type
32689 onto the vector type. */
32690 if (result && result != type && TYPE_QUALS (type))
32691 result = build_qualified_type (result, TYPE_QUALS (type));
32692
32693 *no_add_attrs = true; /* No need to hang on to the attribute. */
32694
32695 if (result)
32696 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32697
32698 return NULL_TREE;
32699 }
32700
32701 /* AltiVec defines five built-in scalar types that serve as vector
32702 elements; we must teach the compiler how to mangle them. The 128-bit
32703 floating point mangling is target-specific as well. */
32704
32705 static const char *
32706 rs6000_mangle_type (const_tree type)
32707 {
32708 type = TYPE_MAIN_VARIANT (type);
32709
32710 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32711 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32712 return NULL;
32713
32714 if (type == bool_char_type_node) return "U6__boolc";
32715 if (type == bool_short_type_node) return "U6__bools";
32716 if (type == pixel_type_node) return "u7__pixel";
32717 if (type == bool_int_type_node) return "U6__booli";
32718 if (type == bool_long_long_type_node) return "U6__boolx";
32719
32720 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32721 return "g";
32722 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32723 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32724
32725 /* For all other types, use the default mangling. */
32726 return NULL;
32727 }
32728
32729 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32730 struct attribute_spec.handler. */
32731
32732 static tree
32733 rs6000_handle_longcall_attribute (tree *node, tree name,
32734 tree args ATTRIBUTE_UNUSED,
32735 int flags ATTRIBUTE_UNUSED,
32736 bool *no_add_attrs)
32737 {
32738 if (TREE_CODE (*node) != FUNCTION_TYPE
32739 && TREE_CODE (*node) != FIELD_DECL
32740 && TREE_CODE (*node) != TYPE_DECL)
32741 {
32742 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32743 name);
32744 *no_add_attrs = true;
32745 }
32746
32747 return NULL_TREE;
32748 }
32749
32750 /* Set longcall attributes on all functions declared when
32751 rs6000_default_long_calls is true. */
32752 static void
32753 rs6000_set_default_type_attributes (tree type)
32754 {
32755 if (rs6000_default_long_calls
32756 && (TREE_CODE (type) == FUNCTION_TYPE
32757 || TREE_CODE (type) == METHOD_TYPE))
32758 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32759 NULL_TREE,
32760 TYPE_ATTRIBUTES (type));
32761
32762 #if TARGET_MACHO
32763 darwin_set_default_type_attributes (type);
32764 #endif
32765 }
32766
32767 /* Return a reference suitable for calling a function with the
32768 longcall attribute. */
32769
32770 static rtx
32771 rs6000_longcall_ref (rtx call_ref, rtx arg)
32772 {
32773 /* System V adds '.' to the internal name, so skip them. */
32774 const char *call_name = XSTR (call_ref, 0);
32775 if (*call_name == '.')
32776 {
32777 while (*call_name == '.')
32778 call_name++;
32779
32780 tree node = get_identifier (call_name);
32781 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32782 }
32783
32784 if (HAVE_AS_PLTSEQ
32785 && (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4))
32786 {
32787 rtx base = const0_rtx;
32788 int regno;
32789 if (DEFAULT_ABI == ABI_ELFv2)
32790 {
32791 base = gen_rtx_REG (Pmode, TOC_REGISTER);
32792 regno = 12;
32793 }
32794 else
32795 {
32796 if (flag_pic)
32797 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32798 regno = 11;
32799 }
32800 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
32801 may be used by a function global entry point. For SysV4, r11
32802 is used by __glink_PLTresolve lazy resolver entry. */
32803 rtx reg = gen_rtx_REG (Pmode, regno);
32804 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
32805 UNSPEC_PLT16_HA);
32806 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
32807 UNSPEC_PLT16_LO);
32808 emit_insn (gen_rtx_SET (reg, hi));
32809 emit_insn (gen_rtx_SET (reg, lo));
32810 return reg;
32811 }
32812
32813 return force_reg (Pmode, call_ref);
32814 }
32815 \f
32816 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32817 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32818 #endif
32819
32820 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32821 struct attribute_spec.handler. */
32822 static tree
32823 rs6000_handle_struct_attribute (tree *node, tree name,
32824 tree args ATTRIBUTE_UNUSED,
32825 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32826 {
32827 tree *type = NULL;
32828 if (DECL_P (*node))
32829 {
32830 if (TREE_CODE (*node) == TYPE_DECL)
32831 type = &TREE_TYPE (*node);
32832 }
32833 else
32834 type = node;
32835
32836 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32837 || TREE_CODE (*type) == UNION_TYPE)))
32838 {
32839 warning (OPT_Wattributes, "%qE attribute ignored", name);
32840 *no_add_attrs = true;
32841 }
32842
32843 else if ((is_attribute_p ("ms_struct", name)
32844 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32845 || ((is_attribute_p ("gcc_struct", name)
32846 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32847 {
32848 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32849 name);
32850 *no_add_attrs = true;
32851 }
32852
32853 return NULL_TREE;
32854 }
32855
32856 static bool
32857 rs6000_ms_bitfield_layout_p (const_tree record_type)
32858 {
32859 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32860 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32861 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32862 }
32863 \f
32864 #ifdef USING_ELFOS_H
32865
32866 /* A get_unnamed_section callback, used for switching to toc_section. */
32867
32868 static void
32869 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32870 {
32871 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32872 && TARGET_MINIMAL_TOC)
32873 {
32874 if (!toc_initialized)
32875 {
32876 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32877 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32878 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32879 fprintf (asm_out_file, "\t.tc ");
32880 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32881 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32882 fprintf (asm_out_file, "\n");
32883
32884 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32885 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32886 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32887 fprintf (asm_out_file, " = .+32768\n");
32888 toc_initialized = 1;
32889 }
32890 else
32891 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32892 }
32893 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32894 {
32895 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32896 if (!toc_initialized)
32897 {
32898 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32899 toc_initialized = 1;
32900 }
32901 }
32902 else
32903 {
32904 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32905 if (!toc_initialized)
32906 {
32907 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32908 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32909 fprintf (asm_out_file, " = .+32768\n");
32910 toc_initialized = 1;
32911 }
32912 }
32913 }
32914
32915 /* Implement TARGET_ASM_INIT_SECTIONS. */
32916
32917 static void
32918 rs6000_elf_asm_init_sections (void)
32919 {
32920 toc_section
32921 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32922
32923 sdata2_section
32924 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32925 SDATA2_SECTION_ASM_OP);
32926 }
32927
32928 /* Implement TARGET_SELECT_RTX_SECTION. */
32929
32930 static section *
32931 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32932 unsigned HOST_WIDE_INT align)
32933 {
32934 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32935 return toc_section;
32936 else
32937 return default_elf_select_rtx_section (mode, x, align);
32938 }
32939 \f
32940 /* For a SYMBOL_REF, set generic flags and then perform some
32941 target-specific processing.
32942
32943 When the AIX ABI is requested on a non-AIX system, replace the
32944 function name with the real name (with a leading .) rather than the
32945 function descriptor name. This saves a lot of overriding code to
32946 read the prefixes. */
32947
32948 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32949 static void
32950 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32951 {
32952 default_encode_section_info (decl, rtl, first);
32953
32954 if (first
32955 && TREE_CODE (decl) == FUNCTION_DECL
32956 && !TARGET_AIX
32957 && DEFAULT_ABI == ABI_AIX)
32958 {
32959 rtx sym_ref = XEXP (rtl, 0);
32960 size_t len = strlen (XSTR (sym_ref, 0));
32961 char *str = XALLOCAVEC (char, len + 2);
32962 str[0] = '.';
32963 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32964 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32965 }
32966 }
32967
32968 static inline bool
32969 compare_section_name (const char *section, const char *templ)
32970 {
32971 int len;
32972
32973 len = strlen (templ);
32974 return (strncmp (section, templ, len) == 0
32975 && (section[len] == 0 || section[len] == '.'));
32976 }
32977
32978 bool
32979 rs6000_elf_in_small_data_p (const_tree decl)
32980 {
32981 if (rs6000_sdata == SDATA_NONE)
32982 return false;
32983
32984 /* We want to merge strings, so we never consider them small data. */
32985 if (TREE_CODE (decl) == STRING_CST)
32986 return false;
32987
32988 /* Functions are never in the small data area. */
32989 if (TREE_CODE (decl) == FUNCTION_DECL)
32990 return false;
32991
32992 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32993 {
32994 const char *section = DECL_SECTION_NAME (decl);
32995 if (compare_section_name (section, ".sdata")
32996 || compare_section_name (section, ".sdata2")
32997 || compare_section_name (section, ".gnu.linkonce.s")
32998 || compare_section_name (section, ".sbss")
32999 || compare_section_name (section, ".sbss2")
33000 || compare_section_name (section, ".gnu.linkonce.sb")
33001 || strcmp (section, ".PPC.EMB.sdata0") == 0
33002 || strcmp (section, ".PPC.EMB.sbss0") == 0)
33003 return true;
33004 }
33005 else
33006 {
33007 /* If we are told not to put readonly data in sdata, then don't. */
33008 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
33009 && !rs6000_readonly_in_sdata)
33010 return false;
33011
33012 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
33013
33014 if (size > 0
33015 && size <= g_switch_value
33016 /* If it's not public, and we're not going to reference it there,
33017 there's no need to put it in the small data section. */
33018 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
33019 return true;
33020 }
33021
33022 return false;
33023 }
33024
33025 #endif /* USING_ELFOS_H */
33026 \f
33027 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33028
33029 static bool
33030 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
33031 {
33032 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
33033 }
33034
33035 /* Do not place thread-local symbols refs in the object blocks. */
33036
33037 static bool
33038 rs6000_use_blocks_for_decl_p (const_tree decl)
33039 {
33040 return !DECL_THREAD_LOCAL_P (decl);
33041 }
33042 \f
33043 /* Return a REG that occurs in ADDR with coefficient 1.
33044 ADDR can be effectively incremented by incrementing REG.
33045
33046 r0 is special and we must not select it as an address
33047 register by this routine since our caller will try to
33048 increment the returned register via an "la" instruction. */
33049
33050 rtx
33051 find_addr_reg (rtx addr)
33052 {
33053 while (GET_CODE (addr) == PLUS)
33054 {
33055 if (REG_P (XEXP (addr, 0))
33056 && REGNO (XEXP (addr, 0)) != 0)
33057 addr = XEXP (addr, 0);
33058 else if (REG_P (XEXP (addr, 1))
33059 && REGNO (XEXP (addr, 1)) != 0)
33060 addr = XEXP (addr, 1);
33061 else if (CONSTANT_P (XEXP (addr, 0)))
33062 addr = XEXP (addr, 1);
33063 else if (CONSTANT_P (XEXP (addr, 1)))
33064 addr = XEXP (addr, 0);
33065 else
33066 gcc_unreachable ();
33067 }
33068 gcc_assert (REG_P (addr) && REGNO (addr) != 0);
33069 return addr;
33070 }
33071
33072 void
33073 rs6000_fatal_bad_address (rtx op)
33074 {
33075 fatal_insn ("bad address", op);
33076 }
33077
33078 #if TARGET_MACHO
33079
33080 typedef struct branch_island_d {
33081 tree function_name;
33082 tree label_name;
33083 int line_number;
33084 } branch_island;
33085
33086
33087 static vec<branch_island, va_gc> *branch_islands;
33088
33089 /* Remember to generate a branch island for far calls to the given
33090 function. */
33091
33092 static void
33093 add_compiler_branch_island (tree label_name, tree function_name,
33094 int line_number)
33095 {
33096 branch_island bi = {function_name, label_name, line_number};
33097 vec_safe_push (branch_islands, bi);
33098 }
33099
33100 /* Generate far-jump branch islands for everything recorded in
33101 branch_islands. Invoked immediately after the last instruction of
33102 the epilogue has been emitted; the branch islands must be appended
33103 to, and contiguous with, the function body. Mach-O stubs are
33104 generated in machopic_output_stub(). */
33105
33106 static void
33107 macho_branch_islands (void)
33108 {
33109 char tmp_buf[512];
33110
33111 while (!vec_safe_is_empty (branch_islands))
33112 {
33113 branch_island *bi = &branch_islands->last ();
33114 const char *label = IDENTIFIER_POINTER (bi->label_name);
33115 const char *name = IDENTIFIER_POINTER (bi->function_name);
33116 char name_buf[512];
33117 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33118 if (name[0] == '*' || name[0] == '&')
33119 strcpy (name_buf, name+1);
33120 else
33121 {
33122 name_buf[0] = '_';
33123 strcpy (name_buf+1, name);
33124 }
33125 strcpy (tmp_buf, "\n");
33126 strcat (tmp_buf, label);
33127 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33128 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33129 dbxout_stabd (N_SLINE, bi->line_number);
33130 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33131 if (flag_pic)
33132 {
33133 if (TARGET_LINK_STACK)
33134 {
33135 char name[32];
33136 get_ppc476_thunk_name (name);
33137 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
33138 strcat (tmp_buf, name);
33139 strcat (tmp_buf, "\n");
33140 strcat (tmp_buf, label);
33141 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33142 }
33143 else
33144 {
33145 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33146 strcat (tmp_buf, label);
33147 strcat (tmp_buf, "_pic\n");
33148 strcat (tmp_buf, label);
33149 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33150 }
33151
33152 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33153 strcat (tmp_buf, name_buf);
33154 strcat (tmp_buf, " - ");
33155 strcat (tmp_buf, label);
33156 strcat (tmp_buf, "_pic)\n");
33157
33158 strcat (tmp_buf, "\tmtlr r0\n");
33159
33160 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33161 strcat (tmp_buf, name_buf);
33162 strcat (tmp_buf, " - ");
33163 strcat (tmp_buf, label);
33164 strcat (tmp_buf, "_pic)\n");
33165
33166 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33167 }
33168 else
33169 {
33170 strcat (tmp_buf, ":\nlis r12,hi16(");
33171 strcat (tmp_buf, name_buf);
33172 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33173 strcat (tmp_buf, name_buf);
33174 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33175 }
33176 output_asm_insn (tmp_buf, 0);
33177 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33178 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33179 dbxout_stabd (N_SLINE, bi->line_number);
33180 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33181 branch_islands->pop ();
33182 }
33183 }
33184
33185 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33186 already there or not. */
33187
33188 static int
33189 no_previous_def (tree function_name)
33190 {
33191 branch_island *bi;
33192 unsigned ix;
33193
33194 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33195 if (function_name == bi->function_name)
33196 return 0;
33197 return 1;
33198 }
33199
33200 /* GET_PREV_LABEL gets the label name from the previous definition of
33201 the function. */
33202
33203 static tree
33204 get_prev_label (tree function_name)
33205 {
33206 branch_island *bi;
33207 unsigned ix;
33208
33209 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33210 if (function_name == bi->function_name)
33211 return bi->label_name;
33212 return NULL_TREE;
33213 }
33214
33215 /* Generate PIC and indirect symbol stubs. */
33216
33217 void
33218 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33219 {
33220 unsigned int length;
33221 char *symbol_name, *lazy_ptr_name;
33222 char *local_label_0;
33223 static int label = 0;
33224
33225 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33226 symb = (*targetm.strip_name_encoding) (symb);
33227
33228
33229 length = strlen (symb);
33230 symbol_name = XALLOCAVEC (char, length + 32);
33231 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33232
33233 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33234 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33235
33236 if (flag_pic == 2)
33237 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33238 else
33239 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33240
33241 if (flag_pic == 2)
33242 {
33243 fprintf (file, "\t.align 5\n");
33244
33245 fprintf (file, "%s:\n", stub);
33246 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33247
33248 label++;
33249 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33250 sprintf (local_label_0, "\"L%011d$spb\"", label);
33251
33252 fprintf (file, "\tmflr r0\n");
33253 if (TARGET_LINK_STACK)
33254 {
33255 char name[32];
33256 get_ppc476_thunk_name (name);
33257 fprintf (file, "\tbl %s\n", name);
33258 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33259 }
33260 else
33261 {
33262 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33263 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33264 }
33265 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33266 lazy_ptr_name, local_label_0);
33267 fprintf (file, "\tmtlr r0\n");
33268 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33269 (TARGET_64BIT ? "ldu" : "lwzu"),
33270 lazy_ptr_name, local_label_0);
33271 fprintf (file, "\tmtctr r12\n");
33272 fprintf (file, "\tbctr\n");
33273 }
33274 else
33275 {
33276 fprintf (file, "\t.align 4\n");
33277
33278 fprintf (file, "%s:\n", stub);
33279 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33280
33281 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33282 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33283 (TARGET_64BIT ? "ldu" : "lwzu"),
33284 lazy_ptr_name);
33285 fprintf (file, "\tmtctr r12\n");
33286 fprintf (file, "\tbctr\n");
33287 }
33288
33289 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33290 fprintf (file, "%s:\n", lazy_ptr_name);
33291 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33292 fprintf (file, "%sdyld_stub_binding_helper\n",
33293 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33294 }
33295
33296 /* Legitimize PIC addresses. If the address is already
33297 position-independent, we return ORIG. Newly generated
33298 position-independent addresses go into a reg. This is REG if non
33299 zero, otherwise we allocate register(s) as necessary. */
33300
33301 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33302
33303 rtx
33304 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33305 rtx reg)
33306 {
33307 rtx base, offset;
33308
33309 if (reg == NULL && !reload_completed)
33310 reg = gen_reg_rtx (Pmode);
33311
33312 if (GET_CODE (orig) == CONST)
33313 {
33314 rtx reg_temp;
33315
33316 if (GET_CODE (XEXP (orig, 0)) == PLUS
33317 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33318 return orig;
33319
33320 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33321
33322 /* Use a different reg for the intermediate value, as
33323 it will be marked UNCHANGING. */
33324 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33325 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33326 Pmode, reg_temp);
33327 offset =
33328 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33329 Pmode, reg);
33330
33331 if (CONST_INT_P (offset))
33332 {
33333 if (SMALL_INT (offset))
33334 return plus_constant (Pmode, base, INTVAL (offset));
33335 else if (!reload_completed)
33336 offset = force_reg (Pmode, offset);
33337 else
33338 {
33339 rtx mem = force_const_mem (Pmode, orig);
33340 return machopic_legitimize_pic_address (mem, Pmode, reg);
33341 }
33342 }
33343 return gen_rtx_PLUS (Pmode, base, offset);
33344 }
33345
33346 /* Fall back on generic machopic code. */
33347 return machopic_legitimize_pic_address (orig, mode, reg);
33348 }
33349
33350 /* Output a .machine directive for the Darwin assembler, and call
33351 the generic start_file routine. */
33352
33353 static void
33354 rs6000_darwin_file_start (void)
33355 {
33356 static const struct
33357 {
33358 const char *arg;
33359 const char *name;
33360 HOST_WIDE_INT if_set;
33361 } mapping[] = {
33362 { "ppc64", "ppc64", MASK_64BIT },
33363 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33364 { "power4", "ppc970", 0 },
33365 { "G5", "ppc970", 0 },
33366 { "7450", "ppc7450", 0 },
33367 { "7400", "ppc7400", MASK_ALTIVEC },
33368 { "G4", "ppc7400", 0 },
33369 { "750", "ppc750", 0 },
33370 { "740", "ppc750", 0 },
33371 { "G3", "ppc750", 0 },
33372 { "604e", "ppc604e", 0 },
33373 { "604", "ppc604", 0 },
33374 { "603e", "ppc603", 0 },
33375 { "603", "ppc603", 0 },
33376 { "601", "ppc601", 0 },
33377 { NULL, "ppc", 0 } };
33378 const char *cpu_id = "";
33379 size_t i;
33380
33381 rs6000_file_start ();
33382 darwin_file_start ();
33383
33384 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33385
33386 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33387 cpu_id = rs6000_default_cpu;
33388
33389 if (global_options_set.x_rs6000_cpu_index)
33390 cpu_id = processor_target_table[rs6000_cpu_index].name;
33391
33392 /* Look through the mapping array. Pick the first name that either
33393 matches the argument, has a bit set in IF_SET that is also set
33394 in the target flags, or has a NULL name. */
33395
33396 i = 0;
33397 while (mapping[i].arg != NULL
33398 && strcmp (mapping[i].arg, cpu_id) != 0
33399 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33400 i++;
33401
33402 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33403 }
33404
33405 #endif /* TARGET_MACHO */
33406
33407 #if TARGET_ELF
33408 static int
33409 rs6000_elf_reloc_rw_mask (void)
33410 {
33411 if (flag_pic)
33412 return 3;
33413 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33414 return 2;
33415 else
33416 return 0;
33417 }
33418
33419 /* Record an element in the table of global constructors. SYMBOL is
33420 a SYMBOL_REF of the function to be called; PRIORITY is a number
33421 between 0 and MAX_INIT_PRIORITY.
33422
33423 This differs from default_named_section_asm_out_constructor in
33424 that we have special handling for -mrelocatable. */
33425
33426 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33427 static void
33428 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33429 {
33430 const char *section = ".ctors";
33431 char buf[18];
33432
33433 if (priority != DEFAULT_INIT_PRIORITY)
33434 {
33435 sprintf (buf, ".ctors.%.5u",
33436 /* Invert the numbering so the linker puts us in the proper
33437 order; constructors are run from right to left, and the
33438 linker sorts in increasing order. */
33439 MAX_INIT_PRIORITY - priority);
33440 section = buf;
33441 }
33442
33443 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33444 assemble_align (POINTER_SIZE);
33445
33446 if (DEFAULT_ABI == ABI_V4
33447 && (TARGET_RELOCATABLE || flag_pic > 1))
33448 {
33449 fputs ("\t.long (", asm_out_file);
33450 output_addr_const (asm_out_file, symbol);
33451 fputs (")@fixup\n", asm_out_file);
33452 }
33453 else
33454 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33455 }
33456
33457 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33458 static void
33459 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33460 {
33461 const char *section = ".dtors";
33462 char buf[18];
33463
33464 if (priority != DEFAULT_INIT_PRIORITY)
33465 {
33466 sprintf (buf, ".dtors.%.5u",
33467 /* Invert the numbering so the linker puts us in the proper
33468 order; constructors are run from right to left, and the
33469 linker sorts in increasing order. */
33470 MAX_INIT_PRIORITY - priority);
33471 section = buf;
33472 }
33473
33474 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33475 assemble_align (POINTER_SIZE);
33476
33477 if (DEFAULT_ABI == ABI_V4
33478 && (TARGET_RELOCATABLE || flag_pic > 1))
33479 {
33480 fputs ("\t.long (", asm_out_file);
33481 output_addr_const (asm_out_file, symbol);
33482 fputs (")@fixup\n", asm_out_file);
33483 }
33484 else
33485 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33486 }
33487
33488 void
33489 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33490 {
33491 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33492 {
33493 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33494 ASM_OUTPUT_LABEL (file, name);
33495 fputs (DOUBLE_INT_ASM_OP, file);
33496 rs6000_output_function_entry (file, name);
33497 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33498 if (DOT_SYMBOLS)
33499 {
33500 fputs ("\t.size\t", file);
33501 assemble_name (file, name);
33502 fputs (",24\n\t.type\t.", file);
33503 assemble_name (file, name);
33504 fputs (",@function\n", file);
33505 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33506 {
33507 fputs ("\t.globl\t.", file);
33508 assemble_name (file, name);
33509 putc ('\n', file);
33510 }
33511 }
33512 else
33513 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33514 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33515 rs6000_output_function_entry (file, name);
33516 fputs (":\n", file);
33517 return;
33518 }
33519
33520 int uses_toc;
33521 if (DEFAULT_ABI == ABI_V4
33522 && (TARGET_RELOCATABLE || flag_pic > 1)
33523 && !TARGET_SECURE_PLT
33524 && (!constant_pool_empty_p () || crtl->profile)
33525 && (uses_toc = uses_TOC ()))
33526 {
33527 char buf[256];
33528
33529 if (uses_toc == 2)
33530 switch_to_other_text_partition ();
33531 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33532
33533 fprintf (file, "\t.long ");
33534 assemble_name (file, toc_label_name);
33535 need_toc_init = 1;
33536 putc ('-', file);
33537 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33538 assemble_name (file, buf);
33539 putc ('\n', file);
33540 if (uses_toc == 2)
33541 switch_to_other_text_partition ();
33542 }
33543
33544 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33545 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33546
33547 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33548 {
33549 char buf[256];
33550
33551 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33552
33553 fprintf (file, "\t.quad .TOC.-");
33554 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33555 assemble_name (file, buf);
33556 putc ('\n', file);
33557 }
33558
33559 if (DEFAULT_ABI == ABI_AIX)
33560 {
33561 const char *desc_name, *orig_name;
33562
33563 orig_name = (*targetm.strip_name_encoding) (name);
33564 desc_name = orig_name;
33565 while (*desc_name == '.')
33566 desc_name++;
33567
33568 if (TREE_PUBLIC (decl))
33569 fprintf (file, "\t.globl %s\n", desc_name);
33570
33571 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33572 fprintf (file, "%s:\n", desc_name);
33573 fprintf (file, "\t.long %s\n", orig_name);
33574 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33575 fputs ("\t.long 0\n", file);
33576 fprintf (file, "\t.previous\n");
33577 }
33578 ASM_OUTPUT_LABEL (file, name);
33579 }
33580
33581 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33582 static void
33583 rs6000_elf_file_end (void)
33584 {
33585 #ifdef HAVE_AS_GNU_ATTRIBUTE
33586 /* ??? The value emitted depends on options active at file end.
33587 Assume anyone using #pragma or attributes that might change
33588 options knows what they are doing. */
33589 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33590 && rs6000_passes_float)
33591 {
33592 int fp;
33593
33594 if (TARGET_HARD_FLOAT)
33595 fp = 1;
33596 else
33597 fp = 2;
33598 if (rs6000_passes_long_double)
33599 {
33600 if (!TARGET_LONG_DOUBLE_128)
33601 fp |= 2 * 4;
33602 else if (TARGET_IEEEQUAD)
33603 fp |= 3 * 4;
33604 else
33605 fp |= 1 * 4;
33606 }
33607 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33608 }
33609 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33610 {
33611 if (rs6000_passes_vector)
33612 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33613 (TARGET_ALTIVEC_ABI ? 2 : 1));
33614 if (rs6000_returns_struct)
33615 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33616 aix_struct_return ? 2 : 1);
33617 }
33618 #endif
33619 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33620 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33621 file_end_indicate_exec_stack ();
33622 #endif
33623
33624 if (flag_split_stack)
33625 file_end_indicate_split_stack ();
33626
33627 if (cpu_builtin_p)
33628 {
33629 /* We have expanded a CPU builtin, so we need to emit a reference to
33630 the special symbol that LIBC uses to declare it supports the
33631 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33632 switch_to_section (data_section);
33633 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33634 fprintf (asm_out_file, "\t%s %s\n",
33635 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33636 }
33637 }
33638 #endif
33639
33640 #if TARGET_XCOFF
33641
33642 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33643 #define HAVE_XCOFF_DWARF_EXTRAS 0
33644 #endif
33645
33646 static enum unwind_info_type
33647 rs6000_xcoff_debug_unwind_info (void)
33648 {
33649 return UI_NONE;
33650 }
33651
33652 static void
33653 rs6000_xcoff_asm_output_anchor (rtx symbol)
33654 {
33655 char buffer[100];
33656
33657 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33658 SYMBOL_REF_BLOCK_OFFSET (symbol));
33659 fprintf (asm_out_file, "%s", SET_ASM_OP);
33660 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33661 fprintf (asm_out_file, ",");
33662 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33663 fprintf (asm_out_file, "\n");
33664 }
33665
33666 static void
33667 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33668 {
33669 fputs (GLOBAL_ASM_OP, stream);
33670 RS6000_OUTPUT_BASENAME (stream, name);
33671 putc ('\n', stream);
33672 }
33673
33674 /* A get_unnamed_decl callback, used for read-only sections. PTR
33675 points to the section string variable. */
33676
33677 static void
33678 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33679 {
33680 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33681 *(const char *const *) directive,
33682 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33683 }
33684
33685 /* Likewise for read-write sections. */
33686
33687 static void
33688 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33689 {
33690 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33691 *(const char *const *) directive,
33692 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33693 }
33694
33695 static void
33696 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33697 {
33698 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33699 *(const char *const *) directive,
33700 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33701 }
33702
33703 /* A get_unnamed_section callback, used for switching to toc_section. */
33704
33705 static void
33706 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33707 {
33708 if (TARGET_MINIMAL_TOC)
33709 {
33710 /* toc_section is always selected at least once from
33711 rs6000_xcoff_file_start, so this is guaranteed to
33712 always be defined once and only once in each file. */
33713 if (!toc_initialized)
33714 {
33715 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33716 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33717 toc_initialized = 1;
33718 }
33719 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33720 (TARGET_32BIT ? "" : ",3"));
33721 }
33722 else
33723 fputs ("\t.toc\n", asm_out_file);
33724 }
33725
33726 /* Implement TARGET_ASM_INIT_SECTIONS. */
33727
33728 static void
33729 rs6000_xcoff_asm_init_sections (void)
33730 {
33731 read_only_data_section
33732 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33733 &xcoff_read_only_section_name);
33734
33735 private_data_section
33736 = get_unnamed_section (SECTION_WRITE,
33737 rs6000_xcoff_output_readwrite_section_asm_op,
33738 &xcoff_private_data_section_name);
33739
33740 tls_data_section
33741 = get_unnamed_section (SECTION_TLS,
33742 rs6000_xcoff_output_tls_section_asm_op,
33743 &xcoff_tls_data_section_name);
33744
33745 tls_private_data_section
33746 = get_unnamed_section (SECTION_TLS,
33747 rs6000_xcoff_output_tls_section_asm_op,
33748 &xcoff_private_data_section_name);
33749
33750 read_only_private_data_section
33751 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33752 &xcoff_private_data_section_name);
33753
33754 toc_section
33755 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33756
33757 readonly_data_section = read_only_data_section;
33758 }
33759
33760 static int
33761 rs6000_xcoff_reloc_rw_mask (void)
33762 {
33763 return 3;
33764 }
33765
33766 static void
33767 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33768 tree decl ATTRIBUTE_UNUSED)
33769 {
33770 int smclass;
33771 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33772
33773 if (flags & SECTION_EXCLUDE)
33774 smclass = 4;
33775 else if (flags & SECTION_DEBUG)
33776 {
33777 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33778 return;
33779 }
33780 else if (flags & SECTION_CODE)
33781 smclass = 0;
33782 else if (flags & SECTION_TLS)
33783 smclass = 3;
33784 else if (flags & SECTION_WRITE)
33785 smclass = 2;
33786 else
33787 smclass = 1;
33788
33789 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33790 (flags & SECTION_CODE) ? "." : "",
33791 name, suffix[smclass], flags & SECTION_ENTSIZE);
33792 }
33793
33794 #define IN_NAMED_SECTION(DECL) \
33795 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33796 && DECL_SECTION_NAME (DECL) != NULL)
33797
33798 static section *
33799 rs6000_xcoff_select_section (tree decl, int reloc,
33800 unsigned HOST_WIDE_INT align)
33801 {
33802 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33803 named section. */
33804 if (align > BIGGEST_ALIGNMENT)
33805 {
33806 resolve_unique_section (decl, reloc, true);
33807 if (IN_NAMED_SECTION (decl))
33808 return get_named_section (decl, NULL, reloc);
33809 }
33810
33811 if (decl_readonly_section (decl, reloc))
33812 {
33813 if (TREE_PUBLIC (decl))
33814 return read_only_data_section;
33815 else
33816 return read_only_private_data_section;
33817 }
33818 else
33819 {
33820 #if HAVE_AS_TLS
33821 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33822 {
33823 if (TREE_PUBLIC (decl))
33824 return tls_data_section;
33825 else if (bss_initializer_p (decl))
33826 {
33827 /* Convert to COMMON to emit in BSS. */
33828 DECL_COMMON (decl) = 1;
33829 return tls_comm_section;
33830 }
33831 else
33832 return tls_private_data_section;
33833 }
33834 else
33835 #endif
33836 if (TREE_PUBLIC (decl))
33837 return data_section;
33838 else
33839 return private_data_section;
33840 }
33841 }
33842
33843 static void
33844 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33845 {
33846 const char *name;
33847
33848 /* Use select_section for private data and uninitialized data with
33849 alignment <= BIGGEST_ALIGNMENT. */
33850 if (!TREE_PUBLIC (decl)
33851 || DECL_COMMON (decl)
33852 || (DECL_INITIAL (decl) == NULL_TREE
33853 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33854 || DECL_INITIAL (decl) == error_mark_node
33855 || (flag_zero_initialized_in_bss
33856 && initializer_zerop (DECL_INITIAL (decl))))
33857 return;
33858
33859 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33860 name = (*targetm.strip_name_encoding) (name);
33861 set_decl_section_name (decl, name);
33862 }
33863
33864 /* Select section for constant in constant pool.
33865
33866 On RS/6000, all constants are in the private read-only data area.
33867 However, if this is being placed in the TOC it must be output as a
33868 toc entry. */
33869
33870 static section *
33871 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33872 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33873 {
33874 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33875 return toc_section;
33876 else
33877 return read_only_private_data_section;
33878 }
33879
33880 /* Remove any trailing [DS] or the like from the symbol name. */
33881
33882 static const char *
33883 rs6000_xcoff_strip_name_encoding (const char *name)
33884 {
33885 size_t len;
33886 if (*name == '*')
33887 name++;
33888 len = strlen (name);
33889 if (name[len - 1] == ']')
33890 return ggc_alloc_string (name, len - 4);
33891 else
33892 return name;
33893 }
33894
33895 /* Section attributes. AIX is always PIC. */
33896
33897 static unsigned int
33898 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33899 {
33900 unsigned int align;
33901 unsigned int flags = default_section_type_flags (decl, name, reloc);
33902
33903 /* Align to at least UNIT size. */
33904 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33905 align = MIN_UNITS_PER_WORD;
33906 else
33907 /* Increase alignment of large objects if not already stricter. */
33908 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33909 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33910 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33911
33912 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33913 }
33914
33915 /* Output at beginning of assembler file.
33916
33917 Initialize the section names for the RS/6000 at this point.
33918
33919 Specify filename, including full path, to assembler.
33920
33921 We want to go into the TOC section so at least one .toc will be emitted.
33922 Also, in order to output proper .bs/.es pairs, we need at least one static
33923 [RW] section emitted.
33924
33925 Finally, declare mcount when profiling to make the assembler happy. */
33926
33927 static void
33928 rs6000_xcoff_file_start (void)
33929 {
33930 rs6000_gen_section_name (&xcoff_bss_section_name,
33931 main_input_filename, ".bss_");
33932 rs6000_gen_section_name (&xcoff_private_data_section_name,
33933 main_input_filename, ".rw_");
33934 rs6000_gen_section_name (&xcoff_read_only_section_name,
33935 main_input_filename, ".ro_");
33936 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33937 main_input_filename, ".tls_");
33938 rs6000_gen_section_name (&xcoff_tbss_section_name,
33939 main_input_filename, ".tbss_[UL]");
33940
33941 fputs ("\t.file\t", asm_out_file);
33942 output_quoted_string (asm_out_file, main_input_filename);
33943 fputc ('\n', asm_out_file);
33944 if (write_symbols != NO_DEBUG)
33945 switch_to_section (private_data_section);
33946 switch_to_section (toc_section);
33947 switch_to_section (text_section);
33948 if (profile_flag)
33949 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33950 rs6000_file_start ();
33951 }
33952
33953 /* Output at end of assembler file.
33954 On the RS/6000, referencing data should automatically pull in text. */
33955
33956 static void
33957 rs6000_xcoff_file_end (void)
33958 {
33959 switch_to_section (text_section);
33960 fputs ("_section_.text:\n", asm_out_file);
33961 switch_to_section (data_section);
33962 fputs (TARGET_32BIT
33963 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33964 asm_out_file);
33965 }
33966
33967 struct declare_alias_data
33968 {
33969 FILE *file;
33970 bool function_descriptor;
33971 };
33972
33973 /* Declare alias N. A helper function for for_node_and_aliases. */
33974
33975 static bool
33976 rs6000_declare_alias (struct symtab_node *n, void *d)
33977 {
33978 struct declare_alias_data *data = (struct declare_alias_data *)d;
33979 /* Main symbol is output specially, because varasm machinery does part of
33980 the job for us - we do not need to declare .globl/lglobs and such. */
33981 if (!n->alias || n->weakref)
33982 return false;
33983
33984 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33985 return false;
33986
33987 /* Prevent assemble_alias from trying to use .set pseudo operation
33988 that does not behave as expected by the middle-end. */
33989 TREE_ASM_WRITTEN (n->decl) = true;
33990
33991 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33992 char *buffer = (char *) alloca (strlen (name) + 2);
33993 char *p;
33994 int dollar_inside = 0;
33995
33996 strcpy (buffer, name);
33997 p = strchr (buffer, '$');
33998 while (p) {
33999 *p = '_';
34000 dollar_inside++;
34001 p = strchr (p + 1, '$');
34002 }
34003 if (TREE_PUBLIC (n->decl))
34004 {
34005 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
34006 {
34007 if (dollar_inside) {
34008 if (data->function_descriptor)
34009 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34010 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34011 }
34012 if (data->function_descriptor)
34013 {
34014 fputs ("\t.globl .", data->file);
34015 RS6000_OUTPUT_BASENAME (data->file, buffer);
34016 putc ('\n', data->file);
34017 }
34018 fputs ("\t.globl ", data->file);
34019 RS6000_OUTPUT_BASENAME (data->file, buffer);
34020 putc ('\n', data->file);
34021 }
34022 #ifdef ASM_WEAKEN_DECL
34023 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
34024 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
34025 #endif
34026 }
34027 else
34028 {
34029 if (dollar_inside)
34030 {
34031 if (data->function_descriptor)
34032 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34033 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34034 }
34035 if (data->function_descriptor)
34036 {
34037 fputs ("\t.lglobl .", data->file);
34038 RS6000_OUTPUT_BASENAME (data->file, buffer);
34039 putc ('\n', data->file);
34040 }
34041 fputs ("\t.lglobl ", data->file);
34042 RS6000_OUTPUT_BASENAME (data->file, buffer);
34043 putc ('\n', data->file);
34044 }
34045 if (data->function_descriptor)
34046 fputs (".", data->file);
34047 RS6000_OUTPUT_BASENAME (data->file, buffer);
34048 fputs (":\n", data->file);
34049 return false;
34050 }
34051
34052
34053 #ifdef HAVE_GAS_HIDDEN
34054 /* Helper function to calculate visibility of a DECL
34055 and return the value as a const string. */
34056
34057 static const char *
34058 rs6000_xcoff_visibility (tree decl)
34059 {
34060 static const char * const visibility_types[] = {
34061 "", ",protected", ",hidden", ",internal"
34062 };
34063
34064 enum symbol_visibility vis = DECL_VISIBILITY (decl);
34065 return visibility_types[vis];
34066 }
34067 #endif
34068
34069
34070 /* This macro produces the initial definition of a function name.
34071 On the RS/6000, we need to place an extra '.' in the function name and
34072 output the function descriptor.
34073 Dollar signs are converted to underscores.
34074
34075 The csect for the function will have already been created when
34076 text_section was selected. We do have to go back to that csect, however.
34077
34078 The third and fourth parameters to the .function pseudo-op (16 and 044)
34079 are placeholders which no longer have any use.
34080
34081 Because AIX assembler's .set command has unexpected semantics, we output
34082 all aliases as alternative labels in front of the definition. */
34083
34084 void
34085 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
34086 {
34087 char *buffer = (char *) alloca (strlen (name) + 1);
34088 char *p;
34089 int dollar_inside = 0;
34090 struct declare_alias_data data = {file, false};
34091
34092 strcpy (buffer, name);
34093 p = strchr (buffer, '$');
34094 while (p) {
34095 *p = '_';
34096 dollar_inside++;
34097 p = strchr (p + 1, '$');
34098 }
34099 if (TREE_PUBLIC (decl))
34100 {
34101 if (!RS6000_WEAK || !DECL_WEAK (decl))
34102 {
34103 if (dollar_inside) {
34104 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34105 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34106 }
34107 fputs ("\t.globl .", file);
34108 RS6000_OUTPUT_BASENAME (file, buffer);
34109 #ifdef HAVE_GAS_HIDDEN
34110 fputs (rs6000_xcoff_visibility (decl), file);
34111 #endif
34112 putc ('\n', file);
34113 }
34114 }
34115 else
34116 {
34117 if (dollar_inside) {
34118 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34119 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34120 }
34121 fputs ("\t.lglobl .", file);
34122 RS6000_OUTPUT_BASENAME (file, buffer);
34123 putc ('\n', file);
34124 }
34125 fputs ("\t.csect ", file);
34126 RS6000_OUTPUT_BASENAME (file, buffer);
34127 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34128 RS6000_OUTPUT_BASENAME (file, buffer);
34129 fputs (":\n", file);
34130 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34131 &data, true);
34132 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34133 RS6000_OUTPUT_BASENAME (file, buffer);
34134 fputs (", TOC[tc0], 0\n", file);
34135 in_section = NULL;
34136 switch_to_section (function_section (decl));
34137 putc ('.', file);
34138 RS6000_OUTPUT_BASENAME (file, buffer);
34139 fputs (":\n", file);
34140 data.function_descriptor = true;
34141 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34142 &data, true);
34143 if (!DECL_IGNORED_P (decl))
34144 {
34145 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34146 xcoffout_declare_function (file, decl, buffer);
34147 else if (write_symbols == DWARF2_DEBUG)
34148 {
34149 name = (*targetm.strip_name_encoding) (name);
34150 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34151 }
34152 }
34153 return;
34154 }
34155
34156
34157 /* Output assembly language to globalize a symbol from a DECL,
34158 possibly with visibility. */
34159
34160 void
34161 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34162 {
34163 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34164 fputs (GLOBAL_ASM_OP, stream);
34165 RS6000_OUTPUT_BASENAME (stream, name);
34166 #ifdef HAVE_GAS_HIDDEN
34167 fputs (rs6000_xcoff_visibility (decl), stream);
34168 #endif
34169 putc ('\n', stream);
34170 }
34171
34172 /* Output assembly language to define a symbol as COMMON from a DECL,
34173 possibly with visibility. */
34174
34175 void
34176 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34177 tree decl ATTRIBUTE_UNUSED,
34178 const char *name,
34179 unsigned HOST_WIDE_INT size,
34180 unsigned HOST_WIDE_INT align)
34181 {
34182 unsigned HOST_WIDE_INT align2 = 2;
34183
34184 if (align > 32)
34185 align2 = floor_log2 (align / BITS_PER_UNIT);
34186 else if (size > 4)
34187 align2 = 3;
34188
34189 fputs (COMMON_ASM_OP, stream);
34190 RS6000_OUTPUT_BASENAME (stream, name);
34191
34192 fprintf (stream,
34193 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34194 size, align2);
34195
34196 #ifdef HAVE_GAS_HIDDEN
34197 if (decl != NULL)
34198 fputs (rs6000_xcoff_visibility (decl), stream);
34199 #endif
34200 putc ('\n', stream);
34201 }
34202
34203 /* This macro produces the initial definition of a object (variable) name.
34204 Because AIX assembler's .set command has unexpected semantics, we output
34205 all aliases as alternative labels in front of the definition. */
34206
34207 void
34208 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34209 {
34210 struct declare_alias_data data = {file, false};
34211 RS6000_OUTPUT_BASENAME (file, name);
34212 fputs (":\n", file);
34213 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34214 &data, true);
34215 }
34216
34217 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34218
34219 void
34220 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34221 {
34222 fputs (integer_asm_op (size, FALSE), file);
34223 assemble_name (file, label);
34224 fputs ("-$", file);
34225 }
34226
34227 /* Output a symbol offset relative to the dbase for the current object.
34228 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34229 signed offsets.
34230
34231 __gcc_unwind_dbase is embedded in all executables/libraries through
34232 libgcc/config/rs6000/crtdbase.S. */
34233
34234 void
34235 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34236 {
34237 fputs (integer_asm_op (size, FALSE), file);
34238 assemble_name (file, label);
34239 fputs("-__gcc_unwind_dbase", file);
34240 }
34241
34242 #ifdef HAVE_AS_TLS
34243 static void
34244 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34245 {
34246 rtx symbol;
34247 int flags;
34248 const char *symname;
34249
34250 default_encode_section_info (decl, rtl, first);
34251
34252 /* Careful not to prod global register variables. */
34253 if (!MEM_P (rtl))
34254 return;
34255 symbol = XEXP (rtl, 0);
34256 if (!SYMBOL_REF_P (symbol))
34257 return;
34258
34259 flags = SYMBOL_REF_FLAGS (symbol);
34260
34261 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34262 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34263
34264 SYMBOL_REF_FLAGS (symbol) = flags;
34265
34266 /* Append mapping class to extern decls. */
34267 symname = XSTR (symbol, 0);
34268 if (decl /* sync condition with assemble_external () */
34269 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34270 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34271 || TREE_CODE (decl) == FUNCTION_DECL)
34272 && symname[strlen (symname) - 1] != ']')
34273 {
34274 char *newname = (char *) alloca (strlen (symname) + 5);
34275 strcpy (newname, symname);
34276 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34277 ? "[DS]" : "[UA]"));
34278 XSTR (symbol, 0) = ggc_strdup (newname);
34279 }
34280 }
34281 #endif /* HAVE_AS_TLS */
34282 #endif /* TARGET_XCOFF */
34283
34284 void
34285 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34286 const char *name, const char *val)
34287 {
34288 fputs ("\t.weak\t", stream);
34289 RS6000_OUTPUT_BASENAME (stream, name);
34290 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34291 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34292 {
34293 if (TARGET_XCOFF)
34294 fputs ("[DS]", stream);
34295 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34296 if (TARGET_XCOFF)
34297 fputs (rs6000_xcoff_visibility (decl), stream);
34298 #endif
34299 fputs ("\n\t.weak\t.", stream);
34300 RS6000_OUTPUT_BASENAME (stream, name);
34301 }
34302 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34303 if (TARGET_XCOFF)
34304 fputs (rs6000_xcoff_visibility (decl), stream);
34305 #endif
34306 fputc ('\n', stream);
34307 if (val)
34308 {
34309 #ifdef ASM_OUTPUT_DEF
34310 ASM_OUTPUT_DEF (stream, name, val);
34311 #endif
34312 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34313 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34314 {
34315 fputs ("\t.set\t.", stream);
34316 RS6000_OUTPUT_BASENAME (stream, name);
34317 fputs (",.", stream);
34318 RS6000_OUTPUT_BASENAME (stream, val);
34319 fputc ('\n', stream);
34320 }
34321 }
34322 }
34323
34324
34325 /* Return true if INSN should not be copied. */
34326
34327 static bool
34328 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34329 {
34330 return recog_memoized (insn) >= 0
34331 && get_attr_cannot_copy (insn);
34332 }
34333
34334 /* Compute a (partial) cost for rtx X. Return true if the complete
34335 cost has been computed, and false if subexpressions should be
34336 scanned. In either case, *TOTAL contains the cost result. */
34337
34338 static bool
34339 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34340 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34341 {
34342 int code = GET_CODE (x);
34343
34344 switch (code)
34345 {
34346 /* On the RS/6000, if it is valid in the insn, it is free. */
34347 case CONST_INT:
34348 if (((outer_code == SET
34349 || outer_code == PLUS
34350 || outer_code == MINUS)
34351 && (satisfies_constraint_I (x)
34352 || satisfies_constraint_L (x)))
34353 || (outer_code == AND
34354 && (satisfies_constraint_K (x)
34355 || (mode == SImode
34356 ? satisfies_constraint_L (x)
34357 : satisfies_constraint_J (x))))
34358 || ((outer_code == IOR || outer_code == XOR)
34359 && (satisfies_constraint_K (x)
34360 || (mode == SImode
34361 ? satisfies_constraint_L (x)
34362 : satisfies_constraint_J (x))))
34363 || outer_code == ASHIFT
34364 || outer_code == ASHIFTRT
34365 || outer_code == LSHIFTRT
34366 || outer_code == ROTATE
34367 || outer_code == ROTATERT
34368 || outer_code == ZERO_EXTRACT
34369 || (outer_code == MULT
34370 && satisfies_constraint_I (x))
34371 || ((outer_code == DIV || outer_code == UDIV
34372 || outer_code == MOD || outer_code == UMOD)
34373 && exact_log2 (INTVAL (x)) >= 0)
34374 || (outer_code == COMPARE
34375 && (satisfies_constraint_I (x)
34376 || satisfies_constraint_K (x)))
34377 || ((outer_code == EQ || outer_code == NE)
34378 && (satisfies_constraint_I (x)
34379 || satisfies_constraint_K (x)
34380 || (mode == SImode
34381 ? satisfies_constraint_L (x)
34382 : satisfies_constraint_J (x))))
34383 || (outer_code == GTU
34384 && satisfies_constraint_I (x))
34385 || (outer_code == LTU
34386 && satisfies_constraint_P (x)))
34387 {
34388 *total = 0;
34389 return true;
34390 }
34391 else if ((outer_code == PLUS
34392 && reg_or_add_cint_operand (x, VOIDmode))
34393 || (outer_code == MINUS
34394 && reg_or_sub_cint_operand (x, VOIDmode))
34395 || ((outer_code == SET
34396 || outer_code == IOR
34397 || outer_code == XOR)
34398 && (INTVAL (x)
34399 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34400 {
34401 *total = COSTS_N_INSNS (1);
34402 return true;
34403 }
34404 /* FALLTHRU */
34405
34406 case CONST_DOUBLE:
34407 case CONST_WIDE_INT:
34408 case CONST:
34409 case HIGH:
34410 case SYMBOL_REF:
34411 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34412 return true;
34413
34414 case MEM:
34415 /* When optimizing for size, MEM should be slightly more expensive
34416 than generating address, e.g., (plus (reg) (const)).
34417 L1 cache latency is about two instructions. */
34418 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34419 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34420 *total += COSTS_N_INSNS (100);
34421 return true;
34422
34423 case LABEL_REF:
34424 *total = 0;
34425 return true;
34426
34427 case PLUS:
34428 case MINUS:
34429 if (FLOAT_MODE_P (mode))
34430 *total = rs6000_cost->fp;
34431 else
34432 *total = COSTS_N_INSNS (1);
34433 return false;
34434
34435 case MULT:
34436 if (CONST_INT_P (XEXP (x, 1))
34437 && satisfies_constraint_I (XEXP (x, 1)))
34438 {
34439 if (INTVAL (XEXP (x, 1)) >= -256
34440 && INTVAL (XEXP (x, 1)) <= 255)
34441 *total = rs6000_cost->mulsi_const9;
34442 else
34443 *total = rs6000_cost->mulsi_const;
34444 }
34445 else if (mode == SFmode)
34446 *total = rs6000_cost->fp;
34447 else if (FLOAT_MODE_P (mode))
34448 *total = rs6000_cost->dmul;
34449 else if (mode == DImode)
34450 *total = rs6000_cost->muldi;
34451 else
34452 *total = rs6000_cost->mulsi;
34453 return false;
34454
34455 case FMA:
34456 if (mode == SFmode)
34457 *total = rs6000_cost->fp;
34458 else
34459 *total = rs6000_cost->dmul;
34460 break;
34461
34462 case DIV:
34463 case MOD:
34464 if (FLOAT_MODE_P (mode))
34465 {
34466 *total = mode == DFmode ? rs6000_cost->ddiv
34467 : rs6000_cost->sdiv;
34468 return false;
34469 }
34470 /* FALLTHRU */
34471
34472 case UDIV:
34473 case UMOD:
34474 if (CONST_INT_P (XEXP (x, 1))
34475 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34476 {
34477 if (code == DIV || code == MOD)
34478 /* Shift, addze */
34479 *total = COSTS_N_INSNS (2);
34480 else
34481 /* Shift */
34482 *total = COSTS_N_INSNS (1);
34483 }
34484 else
34485 {
34486 if (GET_MODE (XEXP (x, 1)) == DImode)
34487 *total = rs6000_cost->divdi;
34488 else
34489 *total = rs6000_cost->divsi;
34490 }
34491 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34492 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34493 *total += COSTS_N_INSNS (2);
34494 return false;
34495
34496 case CTZ:
34497 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34498 return false;
34499
34500 case FFS:
34501 *total = COSTS_N_INSNS (4);
34502 return false;
34503
34504 case POPCOUNT:
34505 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34506 return false;
34507
34508 case PARITY:
34509 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34510 return false;
34511
34512 case NOT:
34513 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34514 *total = 0;
34515 else
34516 *total = COSTS_N_INSNS (1);
34517 return false;
34518
34519 case AND:
34520 if (CONST_INT_P (XEXP (x, 1)))
34521 {
34522 rtx left = XEXP (x, 0);
34523 rtx_code left_code = GET_CODE (left);
34524
34525 /* rotate-and-mask: 1 insn. */
34526 if ((left_code == ROTATE
34527 || left_code == ASHIFT
34528 || left_code == LSHIFTRT)
34529 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34530 {
34531 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34532 if (!CONST_INT_P (XEXP (left, 1)))
34533 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34534 *total += COSTS_N_INSNS (1);
34535 return true;
34536 }
34537
34538 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34539 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34540 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34541 || (val & 0xffff) == val
34542 || (val & 0xffff0000) == val
34543 || ((val & 0xffff) == 0 && mode == SImode))
34544 {
34545 *total = rtx_cost (left, mode, AND, 0, speed);
34546 *total += COSTS_N_INSNS (1);
34547 return true;
34548 }
34549
34550 /* 2 insns. */
34551 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34552 {
34553 *total = rtx_cost (left, mode, AND, 0, speed);
34554 *total += COSTS_N_INSNS (2);
34555 return true;
34556 }
34557 }
34558
34559 *total = COSTS_N_INSNS (1);
34560 return false;
34561
34562 case IOR:
34563 /* FIXME */
34564 *total = COSTS_N_INSNS (1);
34565 return true;
34566
34567 case CLZ:
34568 case XOR:
34569 case ZERO_EXTRACT:
34570 *total = COSTS_N_INSNS (1);
34571 return false;
34572
34573 case ASHIFT:
34574 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34575 the sign extend and shift separately within the insn. */
34576 if (TARGET_EXTSWSLI && mode == DImode
34577 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34578 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34579 {
34580 *total = 0;
34581 return false;
34582 }
34583 /* fall through */
34584
34585 case ASHIFTRT:
34586 case LSHIFTRT:
34587 case ROTATE:
34588 case ROTATERT:
34589 /* Handle mul_highpart. */
34590 if (outer_code == TRUNCATE
34591 && GET_CODE (XEXP (x, 0)) == MULT)
34592 {
34593 if (mode == DImode)
34594 *total = rs6000_cost->muldi;
34595 else
34596 *total = rs6000_cost->mulsi;
34597 return true;
34598 }
34599 else if (outer_code == AND)
34600 *total = 0;
34601 else
34602 *total = COSTS_N_INSNS (1);
34603 return false;
34604
34605 case SIGN_EXTEND:
34606 case ZERO_EXTEND:
34607 if (MEM_P (XEXP (x, 0)))
34608 *total = 0;
34609 else
34610 *total = COSTS_N_INSNS (1);
34611 return false;
34612
34613 case COMPARE:
34614 case NEG:
34615 case ABS:
34616 if (!FLOAT_MODE_P (mode))
34617 {
34618 *total = COSTS_N_INSNS (1);
34619 return false;
34620 }
34621 /* FALLTHRU */
34622
34623 case FLOAT:
34624 case UNSIGNED_FLOAT:
34625 case FIX:
34626 case UNSIGNED_FIX:
34627 case FLOAT_TRUNCATE:
34628 *total = rs6000_cost->fp;
34629 return false;
34630
34631 case FLOAT_EXTEND:
34632 if (mode == DFmode)
34633 *total = rs6000_cost->sfdf_convert;
34634 else
34635 *total = rs6000_cost->fp;
34636 return false;
34637
34638 case UNSPEC:
34639 switch (XINT (x, 1))
34640 {
34641 case UNSPEC_FRSP:
34642 *total = rs6000_cost->fp;
34643 return true;
34644
34645 default:
34646 break;
34647 }
34648 break;
34649
34650 case CALL:
34651 case IF_THEN_ELSE:
34652 if (!speed)
34653 {
34654 *total = COSTS_N_INSNS (1);
34655 return true;
34656 }
34657 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34658 {
34659 *total = rs6000_cost->fp;
34660 return false;
34661 }
34662 break;
34663
34664 case NE:
34665 case EQ:
34666 case GTU:
34667 case LTU:
34668 /* Carry bit requires mode == Pmode.
34669 NEG or PLUS already counted so only add one. */
34670 if (mode == Pmode
34671 && (outer_code == NEG || outer_code == PLUS))
34672 {
34673 *total = COSTS_N_INSNS (1);
34674 return true;
34675 }
34676 /* FALLTHRU */
34677
34678 case GT:
34679 case LT:
34680 case UNORDERED:
34681 if (outer_code == SET)
34682 {
34683 if (XEXP (x, 1) == const0_rtx)
34684 {
34685 *total = COSTS_N_INSNS (2);
34686 return true;
34687 }
34688 else
34689 {
34690 *total = COSTS_N_INSNS (3);
34691 return false;
34692 }
34693 }
34694 /* CC COMPARE. */
34695 if (outer_code == COMPARE)
34696 {
34697 *total = 0;
34698 return true;
34699 }
34700 break;
34701
34702 default:
34703 break;
34704 }
34705
34706 return false;
34707 }
34708
34709 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34710
34711 static bool
34712 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34713 int opno, int *total, bool speed)
34714 {
34715 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34716
34717 fprintf (stderr,
34718 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34719 "opno = %d, total = %d, speed = %s, x:\n",
34720 ret ? "complete" : "scan inner",
34721 GET_MODE_NAME (mode),
34722 GET_RTX_NAME (outer_code),
34723 opno,
34724 *total,
34725 speed ? "true" : "false");
34726
34727 debug_rtx (x);
34728
34729 return ret;
34730 }
34731
34732 static int
34733 rs6000_insn_cost (rtx_insn *insn, bool speed)
34734 {
34735 if (recog_memoized (insn) < 0)
34736 return 0;
34737
34738 if (!speed)
34739 return get_attr_length (insn);
34740
34741 int cost = get_attr_cost (insn);
34742 if (cost > 0)
34743 return cost;
34744
34745 int n = get_attr_length (insn) / 4;
34746 enum attr_type type = get_attr_type (insn);
34747
34748 switch (type)
34749 {
34750 case TYPE_LOAD:
34751 case TYPE_FPLOAD:
34752 case TYPE_VECLOAD:
34753 cost = COSTS_N_INSNS (n + 1);
34754 break;
34755
34756 case TYPE_MUL:
34757 switch (get_attr_size (insn))
34758 {
34759 case SIZE_8:
34760 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34761 break;
34762 case SIZE_16:
34763 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34764 break;
34765 case SIZE_32:
34766 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34767 break;
34768 case SIZE_64:
34769 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34770 break;
34771 default:
34772 gcc_unreachable ();
34773 }
34774 break;
34775 case TYPE_DIV:
34776 switch (get_attr_size (insn))
34777 {
34778 case SIZE_32:
34779 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34780 break;
34781 case SIZE_64:
34782 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34783 break;
34784 default:
34785 gcc_unreachable ();
34786 }
34787 break;
34788
34789 case TYPE_FP:
34790 cost = n * rs6000_cost->fp;
34791 break;
34792 case TYPE_DMUL:
34793 cost = n * rs6000_cost->dmul;
34794 break;
34795 case TYPE_SDIV:
34796 cost = n * rs6000_cost->sdiv;
34797 break;
34798 case TYPE_DDIV:
34799 cost = n * rs6000_cost->ddiv;
34800 break;
34801
34802 case TYPE_SYNC:
34803 case TYPE_LOAD_L:
34804 case TYPE_MFCR:
34805 case TYPE_MFCRF:
34806 cost = COSTS_N_INSNS (n + 2);
34807 break;
34808
34809 default:
34810 cost = COSTS_N_INSNS (n);
34811 }
34812
34813 return cost;
34814 }
34815
34816 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34817
34818 static int
34819 rs6000_debug_address_cost (rtx x, machine_mode mode,
34820 addr_space_t as, bool speed)
34821 {
34822 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34823
34824 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34825 ret, speed ? "true" : "false");
34826 debug_rtx (x);
34827
34828 return ret;
34829 }
34830
34831
34832 /* A C expression returning the cost of moving data from a register of class
34833 CLASS1 to one of CLASS2. */
34834
34835 static int
34836 rs6000_register_move_cost (machine_mode mode,
34837 reg_class_t from, reg_class_t to)
34838 {
34839 int ret;
34840
34841 if (TARGET_DEBUG_COST)
34842 dbg_cost_ctrl++;
34843
34844 /* Moves from/to GENERAL_REGS. */
34845 if (reg_classes_intersect_p (to, GENERAL_REGS)
34846 || reg_classes_intersect_p (from, GENERAL_REGS))
34847 {
34848 reg_class_t rclass = from;
34849
34850 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34851 rclass = to;
34852
34853 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34854 ret = (rs6000_memory_move_cost (mode, rclass, false)
34855 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34856
34857 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34858 shift. */
34859 else if (rclass == CR_REGS)
34860 ret = 4;
34861
34862 /* For those processors that have slow LR/CTR moves, make them more
34863 expensive than memory in order to bias spills to memory .*/
34864 else if ((rs6000_tune == PROCESSOR_POWER6
34865 || rs6000_tune == PROCESSOR_POWER7
34866 || rs6000_tune == PROCESSOR_POWER8
34867 || rs6000_tune == PROCESSOR_POWER9)
34868 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34869 ret = 6 * hard_regno_nregs (0, mode);
34870
34871 else
34872 /* A move will cost one instruction per GPR moved. */
34873 ret = 2 * hard_regno_nregs (0, mode);
34874 }
34875
34876 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34877 else if (VECTOR_MEM_VSX_P (mode)
34878 && reg_classes_intersect_p (to, VSX_REGS)
34879 && reg_classes_intersect_p (from, VSX_REGS))
34880 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
34881
34882 /* Moving between two similar registers is just one instruction. */
34883 else if (reg_classes_intersect_p (to, from))
34884 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34885
34886 /* Everything else has to go through GENERAL_REGS. */
34887 else
34888 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34889 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34890
34891 if (TARGET_DEBUG_COST)
34892 {
34893 if (dbg_cost_ctrl == 1)
34894 fprintf (stderr,
34895 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34896 ret, GET_MODE_NAME (mode), reg_class_names[from],
34897 reg_class_names[to]);
34898 dbg_cost_ctrl--;
34899 }
34900
34901 return ret;
34902 }
34903
34904 /* A C expressions returning the cost of moving data of MODE from a register to
34905 or from memory. */
34906
34907 static int
34908 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34909 bool in ATTRIBUTE_UNUSED)
34910 {
34911 int ret;
34912
34913 if (TARGET_DEBUG_COST)
34914 dbg_cost_ctrl++;
34915
34916 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34917 ret = 4 * hard_regno_nregs (0, mode);
34918 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34919 || reg_classes_intersect_p (rclass, VSX_REGS)))
34920 ret = 4 * hard_regno_nregs (32, mode);
34921 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34922 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34923 else
34924 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34925
34926 if (TARGET_DEBUG_COST)
34927 {
34928 if (dbg_cost_ctrl == 1)
34929 fprintf (stderr,
34930 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34931 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34932 dbg_cost_ctrl--;
34933 }
34934
34935 return ret;
34936 }
34937
34938 /* Returns a code for a target-specific builtin that implements
34939 reciprocal of the function, or NULL_TREE if not available. */
34940
34941 static tree
34942 rs6000_builtin_reciprocal (tree fndecl)
34943 {
34944 switch (DECL_FUNCTION_CODE (fndecl))
34945 {
34946 case VSX_BUILTIN_XVSQRTDP:
34947 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34948 return NULL_TREE;
34949
34950 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34951
34952 case VSX_BUILTIN_XVSQRTSP:
34953 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34954 return NULL_TREE;
34955
34956 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34957
34958 default:
34959 return NULL_TREE;
34960 }
34961 }
34962
34963 /* Load up a constant. If the mode is a vector mode, splat the value across
34964 all of the vector elements. */
34965
34966 static rtx
34967 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34968 {
34969 rtx reg;
34970
34971 if (mode == SFmode || mode == DFmode)
34972 {
34973 rtx d = const_double_from_real_value (dconst, mode);
34974 reg = force_reg (mode, d);
34975 }
34976 else if (mode == V4SFmode)
34977 {
34978 rtx d = const_double_from_real_value (dconst, SFmode);
34979 rtvec v = gen_rtvec (4, d, d, d, d);
34980 reg = gen_reg_rtx (mode);
34981 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34982 }
34983 else if (mode == V2DFmode)
34984 {
34985 rtx d = const_double_from_real_value (dconst, DFmode);
34986 rtvec v = gen_rtvec (2, d, d);
34987 reg = gen_reg_rtx (mode);
34988 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34989 }
34990 else
34991 gcc_unreachable ();
34992
34993 return reg;
34994 }
34995
34996 /* Generate an FMA instruction. */
34997
34998 static void
34999 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
35000 {
35001 machine_mode mode = GET_MODE (target);
35002 rtx dst;
35003
35004 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
35005 gcc_assert (dst != NULL);
35006
35007 if (dst != target)
35008 emit_move_insn (target, dst);
35009 }
35010
35011 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
35012
35013 static void
35014 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
35015 {
35016 machine_mode mode = GET_MODE (dst);
35017 rtx r;
35018
35019 /* This is a tad more complicated, since the fnma_optab is for
35020 a different expression: fma(-m1, m2, a), which is the same
35021 thing except in the case of signed zeros.
35022
35023 Fortunately we know that if FMA is supported that FNMSUB is
35024 also supported in the ISA. Just expand it directly. */
35025
35026 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
35027
35028 r = gen_rtx_NEG (mode, a);
35029 r = gen_rtx_FMA (mode, m1, m2, r);
35030 r = gen_rtx_NEG (mode, r);
35031 emit_insn (gen_rtx_SET (dst, r));
35032 }
35033
35034 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35035 add a reg_note saying that this was a division. Support both scalar and
35036 vector divide. Assumes no trapping math and finite arguments. */
35037
35038 void
35039 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
35040 {
35041 machine_mode mode = GET_MODE (dst);
35042 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
35043 int i;
35044
35045 /* Low precision estimates guarantee 5 bits of accuracy. High
35046 precision estimates guarantee 14 bits of accuracy. SFmode
35047 requires 23 bits of accuracy. DFmode requires 52 bits of
35048 accuracy. Each pass at least doubles the accuracy, leading
35049 to the following. */
35050 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35051 if (mode == DFmode || mode == V2DFmode)
35052 passes++;
35053
35054 enum insn_code code = optab_handler (smul_optab, mode);
35055 insn_gen_fn gen_mul = GEN_FCN (code);
35056
35057 gcc_assert (code != CODE_FOR_nothing);
35058
35059 one = rs6000_load_constant_and_splat (mode, dconst1);
35060
35061 /* x0 = 1./d estimate */
35062 x0 = gen_reg_rtx (mode);
35063 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
35064 UNSPEC_FRES)));
35065
35066 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35067 if (passes > 1) {
35068
35069 /* e0 = 1. - d * x0 */
35070 e0 = gen_reg_rtx (mode);
35071 rs6000_emit_nmsub (e0, d, x0, one);
35072
35073 /* x1 = x0 + e0 * x0 */
35074 x1 = gen_reg_rtx (mode);
35075 rs6000_emit_madd (x1, e0, x0, x0);
35076
35077 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
35078 ++i, xprev = xnext, eprev = enext) {
35079
35080 /* enext = eprev * eprev */
35081 enext = gen_reg_rtx (mode);
35082 emit_insn (gen_mul (enext, eprev, eprev));
35083
35084 /* xnext = xprev + enext * xprev */
35085 xnext = gen_reg_rtx (mode);
35086 rs6000_emit_madd (xnext, enext, xprev, xprev);
35087 }
35088
35089 } else
35090 xprev = x0;
35091
35092 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35093
35094 /* u = n * xprev */
35095 u = gen_reg_rtx (mode);
35096 emit_insn (gen_mul (u, n, xprev));
35097
35098 /* v = n - (d * u) */
35099 v = gen_reg_rtx (mode);
35100 rs6000_emit_nmsub (v, d, u, n);
35101
35102 /* dst = (v * xprev) + u */
35103 rs6000_emit_madd (dst, v, xprev, u);
35104
35105 if (note_p)
35106 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
35107 }
35108
35109 /* Goldschmidt's Algorithm for single/double-precision floating point
35110 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35111
35112 void
35113 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
35114 {
35115 machine_mode mode = GET_MODE (src);
35116 rtx e = gen_reg_rtx (mode);
35117 rtx g = gen_reg_rtx (mode);
35118 rtx h = gen_reg_rtx (mode);
35119
35120 /* Low precision estimates guarantee 5 bits of accuracy. High
35121 precision estimates guarantee 14 bits of accuracy. SFmode
35122 requires 23 bits of accuracy. DFmode requires 52 bits of
35123 accuracy. Each pass at least doubles the accuracy, leading
35124 to the following. */
35125 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35126 if (mode == DFmode || mode == V2DFmode)
35127 passes++;
35128
35129 int i;
35130 rtx mhalf;
35131 enum insn_code code = optab_handler (smul_optab, mode);
35132 insn_gen_fn gen_mul = GEN_FCN (code);
35133
35134 gcc_assert (code != CODE_FOR_nothing);
35135
35136 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35137
35138 /* e = rsqrt estimate */
35139 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35140 UNSPEC_RSQRT)));
35141
35142 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35143 if (!recip)
35144 {
35145 rtx zero = force_reg (mode, CONST0_RTX (mode));
35146
35147 if (mode == SFmode)
35148 {
35149 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35150 e, zero, mode, 0);
35151 if (target != e)
35152 emit_move_insn (e, target);
35153 }
35154 else
35155 {
35156 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35157 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35158 }
35159 }
35160
35161 /* g = sqrt estimate. */
35162 emit_insn (gen_mul (g, e, src));
35163 /* h = 1/(2*sqrt) estimate. */
35164 emit_insn (gen_mul (h, e, mhalf));
35165
35166 if (recip)
35167 {
35168 if (passes == 1)
35169 {
35170 rtx t = gen_reg_rtx (mode);
35171 rs6000_emit_nmsub (t, g, h, mhalf);
35172 /* Apply correction directly to 1/rsqrt estimate. */
35173 rs6000_emit_madd (dst, e, t, e);
35174 }
35175 else
35176 {
35177 for (i = 0; i < passes; i++)
35178 {
35179 rtx t1 = gen_reg_rtx (mode);
35180 rtx g1 = gen_reg_rtx (mode);
35181 rtx h1 = gen_reg_rtx (mode);
35182
35183 rs6000_emit_nmsub (t1, g, h, mhalf);
35184 rs6000_emit_madd (g1, g, t1, g);
35185 rs6000_emit_madd (h1, h, t1, h);
35186
35187 g = g1;
35188 h = h1;
35189 }
35190 /* Multiply by 2 for 1/rsqrt. */
35191 emit_insn (gen_add3_insn (dst, h, h));
35192 }
35193 }
35194 else
35195 {
35196 rtx t = gen_reg_rtx (mode);
35197 rs6000_emit_nmsub (t, g, h, mhalf);
35198 rs6000_emit_madd (dst, g, t, g);
35199 }
35200
35201 return;
35202 }
35203
35204 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35205 (Power7) targets. DST is the target, and SRC is the argument operand. */
35206
35207 void
35208 rs6000_emit_popcount (rtx dst, rtx src)
35209 {
35210 machine_mode mode = GET_MODE (dst);
35211 rtx tmp1, tmp2;
35212
35213 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35214 if (TARGET_POPCNTD)
35215 {
35216 if (mode == SImode)
35217 emit_insn (gen_popcntdsi2 (dst, src));
35218 else
35219 emit_insn (gen_popcntddi2 (dst, src));
35220 return;
35221 }
35222
35223 tmp1 = gen_reg_rtx (mode);
35224
35225 if (mode == SImode)
35226 {
35227 emit_insn (gen_popcntbsi2 (tmp1, src));
35228 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35229 NULL_RTX, 0);
35230 tmp2 = force_reg (SImode, tmp2);
35231 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35232 }
35233 else
35234 {
35235 emit_insn (gen_popcntbdi2 (tmp1, src));
35236 tmp2 = expand_mult (DImode, tmp1,
35237 GEN_INT ((HOST_WIDE_INT)
35238 0x01010101 << 32 | 0x01010101),
35239 NULL_RTX, 0);
35240 tmp2 = force_reg (DImode, tmp2);
35241 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35242 }
35243 }
35244
35245
35246 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35247 target, and SRC is the argument operand. */
35248
35249 void
35250 rs6000_emit_parity (rtx dst, rtx src)
35251 {
35252 machine_mode mode = GET_MODE (dst);
35253 rtx tmp;
35254
35255 tmp = gen_reg_rtx (mode);
35256
35257 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35258 if (TARGET_CMPB)
35259 {
35260 if (mode == SImode)
35261 {
35262 emit_insn (gen_popcntbsi2 (tmp, src));
35263 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35264 }
35265 else
35266 {
35267 emit_insn (gen_popcntbdi2 (tmp, src));
35268 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35269 }
35270 return;
35271 }
35272
35273 if (mode == SImode)
35274 {
35275 /* Is mult+shift >= shift+xor+shift+xor? */
35276 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35277 {
35278 rtx tmp1, tmp2, tmp3, tmp4;
35279
35280 tmp1 = gen_reg_rtx (SImode);
35281 emit_insn (gen_popcntbsi2 (tmp1, src));
35282
35283 tmp2 = gen_reg_rtx (SImode);
35284 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35285 tmp3 = gen_reg_rtx (SImode);
35286 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35287
35288 tmp4 = gen_reg_rtx (SImode);
35289 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35290 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35291 }
35292 else
35293 rs6000_emit_popcount (tmp, src);
35294 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35295 }
35296 else
35297 {
35298 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35299 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35300 {
35301 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35302
35303 tmp1 = gen_reg_rtx (DImode);
35304 emit_insn (gen_popcntbdi2 (tmp1, src));
35305
35306 tmp2 = gen_reg_rtx (DImode);
35307 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35308 tmp3 = gen_reg_rtx (DImode);
35309 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35310
35311 tmp4 = gen_reg_rtx (DImode);
35312 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35313 tmp5 = gen_reg_rtx (DImode);
35314 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35315
35316 tmp6 = gen_reg_rtx (DImode);
35317 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35318 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35319 }
35320 else
35321 rs6000_emit_popcount (tmp, src);
35322 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35323 }
35324 }
35325
35326 /* Expand an Altivec constant permutation for little endian mode.
35327 OP0 and OP1 are the input vectors and TARGET is the output vector.
35328 SEL specifies the constant permutation vector.
35329
35330 There are two issues: First, the two input operands must be
35331 swapped so that together they form a double-wide array in LE
35332 order. Second, the vperm instruction has surprising behavior
35333 in LE mode: it interprets the elements of the source vectors
35334 in BE mode ("left to right") and interprets the elements of
35335 the destination vector in LE mode ("right to left"). To
35336 correct for this, we must subtract each element of the permute
35337 control vector from 31.
35338
35339 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35340 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35341 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35342 serve as the permute control vector. Then, in BE mode,
35343
35344 vperm 9,10,11,12
35345
35346 places the desired result in vr9. However, in LE mode the
35347 vector contents will be
35348
35349 vr10 = 00000003 00000002 00000001 00000000
35350 vr11 = 00000007 00000006 00000005 00000004
35351
35352 The result of the vperm using the same permute control vector is
35353
35354 vr9 = 05000000 07000000 01000000 03000000
35355
35356 That is, the leftmost 4 bytes of vr10 are interpreted as the
35357 source for the rightmost 4 bytes of vr9, and so on.
35358
35359 If we change the permute control vector to
35360
35361 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35362
35363 and issue
35364
35365 vperm 9,11,10,12
35366
35367 we get the desired
35368
35369 vr9 = 00000006 00000004 00000002 00000000. */
35370
35371 static void
35372 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35373 const vec_perm_indices &sel)
35374 {
35375 unsigned int i;
35376 rtx perm[16];
35377 rtx constv, unspec;
35378
35379 /* Unpack and adjust the constant selector. */
35380 for (i = 0; i < 16; ++i)
35381 {
35382 unsigned int elt = 31 - (sel[i] & 31);
35383 perm[i] = GEN_INT (elt);
35384 }
35385
35386 /* Expand to a permute, swapping the inputs and using the
35387 adjusted selector. */
35388 if (!REG_P (op0))
35389 op0 = force_reg (V16QImode, op0);
35390 if (!REG_P (op1))
35391 op1 = force_reg (V16QImode, op1);
35392
35393 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35394 constv = force_reg (V16QImode, constv);
35395 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35396 UNSPEC_VPERM);
35397 if (!REG_P (target))
35398 {
35399 rtx tmp = gen_reg_rtx (V16QImode);
35400 emit_move_insn (tmp, unspec);
35401 unspec = tmp;
35402 }
35403
35404 emit_move_insn (target, unspec);
35405 }
35406
35407 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35408 permute control vector. But here it's not a constant, so we must
35409 generate a vector NAND or NOR to do the adjustment. */
35410
35411 void
35412 altivec_expand_vec_perm_le (rtx operands[4])
35413 {
35414 rtx notx, iorx, unspec;
35415 rtx target = operands[0];
35416 rtx op0 = operands[1];
35417 rtx op1 = operands[2];
35418 rtx sel = operands[3];
35419 rtx tmp = target;
35420 rtx norreg = gen_reg_rtx (V16QImode);
35421 machine_mode mode = GET_MODE (target);
35422
35423 /* Get everything in regs so the pattern matches. */
35424 if (!REG_P (op0))
35425 op0 = force_reg (mode, op0);
35426 if (!REG_P (op1))
35427 op1 = force_reg (mode, op1);
35428 if (!REG_P (sel))
35429 sel = force_reg (V16QImode, sel);
35430 if (!REG_P (target))
35431 tmp = gen_reg_rtx (mode);
35432
35433 if (TARGET_P9_VECTOR)
35434 {
35435 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35436 UNSPEC_VPERMR);
35437 }
35438 else
35439 {
35440 /* Invert the selector with a VNAND if available, else a VNOR.
35441 The VNAND is preferred for future fusion opportunities. */
35442 notx = gen_rtx_NOT (V16QImode, sel);
35443 iorx = (TARGET_P8_VECTOR
35444 ? gen_rtx_IOR (V16QImode, notx, notx)
35445 : gen_rtx_AND (V16QImode, notx, notx));
35446 emit_insn (gen_rtx_SET (norreg, iorx));
35447
35448 /* Permute with operands reversed and adjusted selector. */
35449 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35450 UNSPEC_VPERM);
35451 }
35452
35453 /* Copy into target, possibly by way of a register. */
35454 if (!REG_P (target))
35455 {
35456 emit_move_insn (tmp, unspec);
35457 unspec = tmp;
35458 }
35459
35460 emit_move_insn (target, unspec);
35461 }
35462
35463 /* Expand an Altivec constant permutation. Return true if we match
35464 an efficient implementation; false to fall back to VPERM.
35465
35466 OP0 and OP1 are the input vectors and TARGET is the output vector.
35467 SEL specifies the constant permutation vector. */
35468
35469 static bool
35470 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35471 const vec_perm_indices &sel)
35472 {
35473 struct altivec_perm_insn {
35474 HOST_WIDE_INT mask;
35475 enum insn_code impl;
35476 unsigned char perm[16];
35477 };
35478 static const struct altivec_perm_insn patterns[] = {
35479 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35480 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35481 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35482 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35483 { OPTION_MASK_ALTIVEC,
35484 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35485 : CODE_FOR_altivec_vmrglb_direct),
35486 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35487 { OPTION_MASK_ALTIVEC,
35488 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35489 : CODE_FOR_altivec_vmrglh_direct),
35490 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35491 { OPTION_MASK_ALTIVEC,
35492 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35493 : CODE_FOR_altivec_vmrglw_direct),
35494 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35495 { OPTION_MASK_ALTIVEC,
35496 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35497 : CODE_FOR_altivec_vmrghb_direct),
35498 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35499 { OPTION_MASK_ALTIVEC,
35500 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35501 : CODE_FOR_altivec_vmrghh_direct),
35502 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35503 { OPTION_MASK_ALTIVEC,
35504 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35505 : CODE_FOR_altivec_vmrghw_direct),
35506 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35507 { OPTION_MASK_P8_VECTOR,
35508 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35509 : CODE_FOR_p8_vmrgow_v4sf_direct),
35510 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35511 { OPTION_MASK_P8_VECTOR,
35512 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35513 : CODE_FOR_p8_vmrgew_v4sf_direct),
35514 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35515 };
35516
35517 unsigned int i, j, elt, which;
35518 unsigned char perm[16];
35519 rtx x;
35520 bool one_vec;
35521
35522 /* Unpack the constant selector. */
35523 for (i = which = 0; i < 16; ++i)
35524 {
35525 elt = sel[i] & 31;
35526 which |= (elt < 16 ? 1 : 2);
35527 perm[i] = elt;
35528 }
35529
35530 /* Simplify the constant selector based on operands. */
35531 switch (which)
35532 {
35533 default:
35534 gcc_unreachable ();
35535
35536 case 3:
35537 one_vec = false;
35538 if (!rtx_equal_p (op0, op1))
35539 break;
35540 /* FALLTHRU */
35541
35542 case 2:
35543 for (i = 0; i < 16; ++i)
35544 perm[i] &= 15;
35545 op0 = op1;
35546 one_vec = true;
35547 break;
35548
35549 case 1:
35550 op1 = op0;
35551 one_vec = true;
35552 break;
35553 }
35554
35555 /* Look for splat patterns. */
35556 if (one_vec)
35557 {
35558 elt = perm[0];
35559
35560 for (i = 0; i < 16; ++i)
35561 if (perm[i] != elt)
35562 break;
35563 if (i == 16)
35564 {
35565 if (!BYTES_BIG_ENDIAN)
35566 elt = 15 - elt;
35567 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35568 return true;
35569 }
35570
35571 if (elt % 2 == 0)
35572 {
35573 for (i = 0; i < 16; i += 2)
35574 if (perm[i] != elt || perm[i + 1] != elt + 1)
35575 break;
35576 if (i == 16)
35577 {
35578 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35579 x = gen_reg_rtx (V8HImode);
35580 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35581 GEN_INT (field)));
35582 emit_move_insn (target, gen_lowpart (V16QImode, x));
35583 return true;
35584 }
35585 }
35586
35587 if (elt % 4 == 0)
35588 {
35589 for (i = 0; i < 16; i += 4)
35590 if (perm[i] != elt
35591 || perm[i + 1] != elt + 1
35592 || perm[i + 2] != elt + 2
35593 || perm[i + 3] != elt + 3)
35594 break;
35595 if (i == 16)
35596 {
35597 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35598 x = gen_reg_rtx (V4SImode);
35599 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35600 GEN_INT (field)));
35601 emit_move_insn (target, gen_lowpart (V16QImode, x));
35602 return true;
35603 }
35604 }
35605 }
35606
35607 /* Look for merge and pack patterns. */
35608 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35609 {
35610 bool swapped;
35611
35612 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35613 continue;
35614
35615 elt = patterns[j].perm[0];
35616 if (perm[0] == elt)
35617 swapped = false;
35618 else if (perm[0] == elt + 16)
35619 swapped = true;
35620 else
35621 continue;
35622 for (i = 1; i < 16; ++i)
35623 {
35624 elt = patterns[j].perm[i];
35625 if (swapped)
35626 elt = (elt >= 16 ? elt - 16 : elt + 16);
35627 else if (one_vec && elt >= 16)
35628 elt -= 16;
35629 if (perm[i] != elt)
35630 break;
35631 }
35632 if (i == 16)
35633 {
35634 enum insn_code icode = patterns[j].impl;
35635 machine_mode omode = insn_data[icode].operand[0].mode;
35636 machine_mode imode = insn_data[icode].operand[1].mode;
35637
35638 /* For little-endian, don't use vpkuwum and vpkuhum if the
35639 underlying vector type is not V4SI and V8HI, respectively.
35640 For example, using vpkuwum with a V8HI picks up the even
35641 halfwords (BE numbering) when the even halfwords (LE
35642 numbering) are what we need. */
35643 if (!BYTES_BIG_ENDIAN
35644 && icode == CODE_FOR_altivec_vpkuwum_direct
35645 && ((REG_P (op0)
35646 && GET_MODE (op0) != V4SImode)
35647 || (SUBREG_P (op0)
35648 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35649 continue;
35650 if (!BYTES_BIG_ENDIAN
35651 && icode == CODE_FOR_altivec_vpkuhum_direct
35652 && ((REG_P (op0)
35653 && GET_MODE (op0) != V8HImode)
35654 || (SUBREG_P (op0)
35655 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35656 continue;
35657
35658 /* For little-endian, the two input operands must be swapped
35659 (or swapped back) to ensure proper right-to-left numbering
35660 from 0 to 2N-1. */
35661 if (swapped ^ !BYTES_BIG_ENDIAN)
35662 std::swap (op0, op1);
35663 if (imode != V16QImode)
35664 {
35665 op0 = gen_lowpart (imode, op0);
35666 op1 = gen_lowpart (imode, op1);
35667 }
35668 if (omode == V16QImode)
35669 x = target;
35670 else
35671 x = gen_reg_rtx (omode);
35672 emit_insn (GEN_FCN (icode) (x, op0, op1));
35673 if (omode != V16QImode)
35674 emit_move_insn (target, gen_lowpart (V16QImode, x));
35675 return true;
35676 }
35677 }
35678
35679 if (!BYTES_BIG_ENDIAN)
35680 {
35681 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35682 return true;
35683 }
35684
35685 return false;
35686 }
35687
35688 /* Expand a VSX Permute Doubleword constant permutation.
35689 Return true if we match an efficient implementation. */
35690
35691 static bool
35692 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35693 unsigned char perm0, unsigned char perm1)
35694 {
35695 rtx x;
35696
35697 /* If both selectors come from the same operand, fold to single op. */
35698 if ((perm0 & 2) == (perm1 & 2))
35699 {
35700 if (perm0 & 2)
35701 op0 = op1;
35702 else
35703 op1 = op0;
35704 }
35705 /* If both operands are equal, fold to simpler permutation. */
35706 if (rtx_equal_p (op0, op1))
35707 {
35708 perm0 = perm0 & 1;
35709 perm1 = (perm1 & 1) + 2;
35710 }
35711 /* If the first selector comes from the second operand, swap. */
35712 else if (perm0 & 2)
35713 {
35714 if (perm1 & 2)
35715 return false;
35716 perm0 -= 2;
35717 perm1 += 2;
35718 std::swap (op0, op1);
35719 }
35720 /* If the second selector does not come from the second operand, fail. */
35721 else if ((perm1 & 2) == 0)
35722 return false;
35723
35724 /* Success! */
35725 if (target != NULL)
35726 {
35727 machine_mode vmode, dmode;
35728 rtvec v;
35729
35730 vmode = GET_MODE (target);
35731 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35732 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35733 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35734 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35735 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35736 emit_insn (gen_rtx_SET (target, x));
35737 }
35738 return true;
35739 }
35740
35741 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35742
35743 static bool
35744 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35745 rtx op1, const vec_perm_indices &sel)
35746 {
35747 bool testing_p = !target;
35748
35749 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35750 if (TARGET_ALTIVEC && testing_p)
35751 return true;
35752
35753 /* Check for ps_merge* or xxpermdi insns. */
35754 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35755 {
35756 if (testing_p)
35757 {
35758 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35759 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35760 }
35761 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35762 return true;
35763 }
35764
35765 if (TARGET_ALTIVEC)
35766 {
35767 /* Force the target-independent code to lower to V16QImode. */
35768 if (vmode != V16QImode)
35769 return false;
35770 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35771 return true;
35772 }
35773
35774 return false;
35775 }
35776
35777 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35778 OP0 and OP1 are the input vectors and TARGET is the output vector.
35779 PERM specifies the constant permutation vector. */
35780
35781 static void
35782 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35783 machine_mode vmode, const vec_perm_builder &perm)
35784 {
35785 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35786 if (x != target)
35787 emit_move_insn (target, x);
35788 }
35789
35790 /* Expand an extract even operation. */
35791
35792 void
35793 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35794 {
35795 machine_mode vmode = GET_MODE (target);
35796 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35797 vec_perm_builder perm (nelt, nelt, 1);
35798
35799 for (i = 0; i < nelt; i++)
35800 perm.quick_push (i * 2);
35801
35802 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35803 }
35804
35805 /* Expand a vector interleave operation. */
35806
35807 void
35808 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35809 {
35810 machine_mode vmode = GET_MODE (target);
35811 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35812 vec_perm_builder perm (nelt, nelt, 1);
35813
35814 high = (highp ? 0 : nelt / 2);
35815 for (i = 0; i < nelt / 2; i++)
35816 {
35817 perm.quick_push (i + high);
35818 perm.quick_push (i + nelt + high);
35819 }
35820
35821 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35822 }
35823
35824 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35825 void
35826 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35827 {
35828 HOST_WIDE_INT hwi_scale (scale);
35829 REAL_VALUE_TYPE r_pow;
35830 rtvec v = rtvec_alloc (2);
35831 rtx elt;
35832 rtx scale_vec = gen_reg_rtx (V2DFmode);
35833 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35834 elt = const_double_from_real_value (r_pow, DFmode);
35835 RTVEC_ELT (v, 0) = elt;
35836 RTVEC_ELT (v, 1) = elt;
35837 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35838 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35839 }
35840
35841 /* Return an RTX representing where to find the function value of a
35842 function returning MODE. */
35843 static rtx
35844 rs6000_complex_function_value (machine_mode mode)
35845 {
35846 unsigned int regno;
35847 rtx r1, r2;
35848 machine_mode inner = GET_MODE_INNER (mode);
35849 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35850
35851 if (TARGET_FLOAT128_TYPE
35852 && (mode == KCmode
35853 || (mode == TCmode && TARGET_IEEEQUAD)))
35854 regno = ALTIVEC_ARG_RETURN;
35855
35856 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35857 regno = FP_ARG_RETURN;
35858
35859 else
35860 {
35861 regno = GP_ARG_RETURN;
35862
35863 /* 32-bit is OK since it'll go in r3/r4. */
35864 if (TARGET_32BIT && inner_bytes >= 4)
35865 return gen_rtx_REG (mode, regno);
35866 }
35867
35868 if (inner_bytes >= 8)
35869 return gen_rtx_REG (mode, regno);
35870
35871 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35872 const0_rtx);
35873 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35874 GEN_INT (inner_bytes));
35875 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35876 }
35877
35878 /* Return an rtx describing a return value of MODE as a PARALLEL
35879 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35880 stride REG_STRIDE. */
35881
35882 static rtx
35883 rs6000_parallel_return (machine_mode mode,
35884 int n_elts, machine_mode elt_mode,
35885 unsigned int regno, unsigned int reg_stride)
35886 {
35887 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35888
35889 int i;
35890 for (i = 0; i < n_elts; i++)
35891 {
35892 rtx r = gen_rtx_REG (elt_mode, regno);
35893 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35894 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35895 regno += reg_stride;
35896 }
35897
35898 return par;
35899 }
35900
35901 /* Target hook for TARGET_FUNCTION_VALUE.
35902
35903 An integer value is in r3 and a floating-point value is in fp1,
35904 unless -msoft-float. */
35905
35906 static rtx
35907 rs6000_function_value (const_tree valtype,
35908 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35909 bool outgoing ATTRIBUTE_UNUSED)
35910 {
35911 machine_mode mode;
35912 unsigned int regno;
35913 machine_mode elt_mode;
35914 int n_elts;
35915
35916 /* Special handling for structs in darwin64. */
35917 if (TARGET_MACHO
35918 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35919 {
35920 CUMULATIVE_ARGS valcum;
35921 rtx valret;
35922
35923 valcum.words = 0;
35924 valcum.fregno = FP_ARG_MIN_REG;
35925 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35926 /* Do a trial code generation as if this were going to be passed as
35927 an argument; if any part goes in memory, we return NULL. */
35928 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35929 if (valret)
35930 return valret;
35931 /* Otherwise fall through to standard ABI rules. */
35932 }
35933
35934 mode = TYPE_MODE (valtype);
35935
35936 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35937 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35938 {
35939 int first_reg, n_regs;
35940
35941 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35942 {
35943 /* _Decimal128 must use even/odd register pairs. */
35944 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35945 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35946 }
35947 else
35948 {
35949 first_reg = ALTIVEC_ARG_RETURN;
35950 n_regs = 1;
35951 }
35952
35953 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35954 }
35955
35956 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35957 if (TARGET_32BIT && TARGET_POWERPC64)
35958 switch (mode)
35959 {
35960 default:
35961 break;
35962 case E_DImode:
35963 case E_SCmode:
35964 case E_DCmode:
35965 case E_TCmode:
35966 int count = GET_MODE_SIZE (mode) / 4;
35967 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35968 }
35969
35970 if ((INTEGRAL_TYPE_P (valtype)
35971 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35972 || POINTER_TYPE_P (valtype))
35973 mode = TARGET_32BIT ? SImode : DImode;
35974
35975 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35976 /* _Decimal128 must use an even/odd register pair. */
35977 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35978 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35979 && !FLOAT128_VECTOR_P (mode))
35980 regno = FP_ARG_RETURN;
35981 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35982 && targetm.calls.split_complex_arg)
35983 return rs6000_complex_function_value (mode);
35984 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35985 return register is used in both cases, and we won't see V2DImode/V2DFmode
35986 for pure altivec, combine the two cases. */
35987 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35988 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35989 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35990 regno = ALTIVEC_ARG_RETURN;
35991 else
35992 regno = GP_ARG_RETURN;
35993
35994 return gen_rtx_REG (mode, regno);
35995 }
35996
35997 /* Define how to find the value returned by a library function
35998 assuming the value has mode MODE. */
35999 rtx
36000 rs6000_libcall_value (machine_mode mode)
36001 {
36002 unsigned int regno;
36003
36004 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
36005 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
36006 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
36007
36008 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36009 /* _Decimal128 must use an even/odd register pair. */
36010 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36011 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
36012 regno = FP_ARG_RETURN;
36013 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36014 return register is used in both cases, and we won't see V2DImode/V2DFmode
36015 for pure altivec, combine the two cases. */
36016 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
36017 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
36018 regno = ALTIVEC_ARG_RETURN;
36019 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
36020 return rs6000_complex_function_value (mode);
36021 else
36022 regno = GP_ARG_RETURN;
36023
36024 return gen_rtx_REG (mode, regno);
36025 }
36026
36027 /* Compute register pressure classes. We implement the target hook to avoid
36028 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
36029 lead to incorrect estimates of number of available registers and therefor
36030 increased register pressure/spill. */
36031 static int
36032 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
36033 {
36034 int n;
36035
36036 n = 0;
36037 pressure_classes[n++] = GENERAL_REGS;
36038 if (TARGET_VSX)
36039 pressure_classes[n++] = VSX_REGS;
36040 else
36041 {
36042 if (TARGET_ALTIVEC)
36043 pressure_classes[n++] = ALTIVEC_REGS;
36044 if (TARGET_HARD_FLOAT)
36045 pressure_classes[n++] = FLOAT_REGS;
36046 }
36047 pressure_classes[n++] = CR_REGS;
36048 pressure_classes[n++] = SPECIAL_REGS;
36049
36050 return n;
36051 }
36052
36053 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36054 Frame pointer elimination is automatically handled.
36055
36056 For the RS/6000, if frame pointer elimination is being done, we would like
36057 to convert ap into fp, not sp.
36058
36059 We need r30 if -mminimal-toc was specified, and there are constant pool
36060 references. */
36061
36062 static bool
36063 rs6000_can_eliminate (const int from, const int to)
36064 {
36065 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
36066 ? ! frame_pointer_needed
36067 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
36068 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
36069 || constant_pool_empty_p ()
36070 : true);
36071 }
36072
36073 /* Define the offset between two registers, FROM to be eliminated and its
36074 replacement TO, at the start of a routine. */
36075 HOST_WIDE_INT
36076 rs6000_initial_elimination_offset (int from, int to)
36077 {
36078 rs6000_stack_t *info = rs6000_stack_info ();
36079 HOST_WIDE_INT offset;
36080
36081 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36082 offset = info->push_p ? 0 : -info->total_size;
36083 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36084 {
36085 offset = info->push_p ? 0 : -info->total_size;
36086 if (FRAME_GROWS_DOWNWARD)
36087 offset += info->fixed_size + info->vars_size + info->parm_size;
36088 }
36089 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36090 offset = FRAME_GROWS_DOWNWARD
36091 ? info->fixed_size + info->vars_size + info->parm_size
36092 : 0;
36093 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36094 offset = info->total_size;
36095 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36096 offset = info->push_p ? info->total_size : 0;
36097 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
36098 offset = 0;
36099 else
36100 gcc_unreachable ();
36101
36102 return offset;
36103 }
36104
36105 /* Fill in sizes of registers used by unwinder. */
36106
36107 static void
36108 rs6000_init_dwarf_reg_sizes_extra (tree address)
36109 {
36110 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36111 {
36112 int i;
36113 machine_mode mode = TYPE_MODE (char_type_node);
36114 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36115 rtx mem = gen_rtx_MEM (BLKmode, addr);
36116 rtx value = gen_int_mode (16, mode);
36117
36118 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36119 The unwinder still needs to know the size of Altivec registers. */
36120
36121 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36122 {
36123 int column = DWARF_REG_TO_UNWIND_COLUMN
36124 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36125 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36126
36127 emit_move_insn (adjust_address (mem, mode, offset), value);
36128 }
36129 }
36130 }
36131
36132 /* Map internal gcc register numbers to debug format register numbers.
36133 FORMAT specifies the type of debug register number to use:
36134 0 -- debug information, except for frame-related sections
36135 1 -- DWARF .debug_frame section
36136 2 -- DWARF .eh_frame section */
36137
36138 unsigned int
36139 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36140 {
36141 /* Except for the above, we use the internal number for non-DWARF
36142 debug information, and also for .eh_frame. */
36143 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
36144 return regno;
36145
36146 /* On some platforms, we use the standard DWARF register
36147 numbering for .debug_info and .debug_frame. */
36148 #ifdef RS6000_USE_DWARF_NUMBERING
36149 if (regno <= 63)
36150 return regno;
36151 if (regno == LR_REGNO)
36152 return 108;
36153 if (regno == CTR_REGNO)
36154 return 109;
36155 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36156 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36157 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36158 to the DWARF reg for CR. */
36159 if (format == 1 && regno == CR2_REGNO)
36160 return 64;
36161 if (CR_REGNO_P (regno))
36162 return regno - CR0_REGNO + 86;
36163 if (regno == CA_REGNO)
36164 return 101; /* XER */
36165 if (ALTIVEC_REGNO_P (regno))
36166 return regno - FIRST_ALTIVEC_REGNO + 1124;
36167 if (regno == VRSAVE_REGNO)
36168 return 356;
36169 if (regno == VSCR_REGNO)
36170 return 67;
36171 #endif
36172 return regno;
36173 }
36174
36175 /* target hook eh_return_filter_mode */
36176 static scalar_int_mode
36177 rs6000_eh_return_filter_mode (void)
36178 {
36179 return TARGET_32BIT ? SImode : word_mode;
36180 }
36181
36182 /* Target hook for translate_mode_attribute. */
36183 static machine_mode
36184 rs6000_translate_mode_attribute (machine_mode mode)
36185 {
36186 if ((FLOAT128_IEEE_P (mode)
36187 && ieee128_float_type_node == long_double_type_node)
36188 || (FLOAT128_IBM_P (mode)
36189 && ibm128_float_type_node == long_double_type_node))
36190 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
36191 return mode;
36192 }
36193
36194 /* Target hook for scalar_mode_supported_p. */
36195 static bool
36196 rs6000_scalar_mode_supported_p (scalar_mode mode)
36197 {
36198 /* -m32 does not support TImode. This is the default, from
36199 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36200 same ABI as for -m32. But default_scalar_mode_supported_p allows
36201 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36202 for -mpowerpc64. */
36203 if (TARGET_32BIT && mode == TImode)
36204 return false;
36205
36206 if (DECIMAL_FLOAT_MODE_P (mode))
36207 return default_decimal_float_supported_p ();
36208 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36209 return true;
36210 else
36211 return default_scalar_mode_supported_p (mode);
36212 }
36213
36214 /* Target hook for vector_mode_supported_p. */
36215 static bool
36216 rs6000_vector_mode_supported_p (machine_mode mode)
36217 {
36218 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36219 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36220 double-double. */
36221 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36222 return true;
36223
36224 else
36225 return false;
36226 }
36227
36228 /* Target hook for floatn_mode. */
36229 static opt_scalar_float_mode
36230 rs6000_floatn_mode (int n, bool extended)
36231 {
36232 if (extended)
36233 {
36234 switch (n)
36235 {
36236 case 32:
36237 return DFmode;
36238
36239 case 64:
36240 if (TARGET_FLOAT128_TYPE)
36241 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36242 else
36243 return opt_scalar_float_mode ();
36244
36245 case 128:
36246 return opt_scalar_float_mode ();
36247
36248 default:
36249 /* Those are the only valid _FloatNx types. */
36250 gcc_unreachable ();
36251 }
36252 }
36253 else
36254 {
36255 switch (n)
36256 {
36257 case 32:
36258 return SFmode;
36259
36260 case 64:
36261 return DFmode;
36262
36263 case 128:
36264 if (TARGET_FLOAT128_TYPE)
36265 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36266 else
36267 return opt_scalar_float_mode ();
36268
36269 default:
36270 return opt_scalar_float_mode ();
36271 }
36272 }
36273
36274 }
36275
36276 /* Target hook for c_mode_for_suffix. */
36277 static machine_mode
36278 rs6000_c_mode_for_suffix (char suffix)
36279 {
36280 if (TARGET_FLOAT128_TYPE)
36281 {
36282 if (suffix == 'q' || suffix == 'Q')
36283 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36284
36285 /* At the moment, we are not defining a suffix for IBM extended double.
36286 If/when the default for -mabi=ieeelongdouble is changed, and we want
36287 to support __ibm128 constants in legacy library code, we may need to
36288 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36289 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36290 __float80 constants. */
36291 }
36292
36293 return VOIDmode;
36294 }
36295
36296 /* Target hook for invalid_arg_for_unprototyped_fn. */
36297 static const char *
36298 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36299 {
36300 return (!rs6000_darwin64_abi
36301 && typelist == 0
36302 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36303 && (funcdecl == NULL_TREE
36304 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36305 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36306 ? N_("AltiVec argument passed to unprototyped function")
36307 : NULL;
36308 }
36309
36310 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36311 setup by using __stack_chk_fail_local hidden function instead of
36312 calling __stack_chk_fail directly. Otherwise it is better to call
36313 __stack_chk_fail directly. */
36314
36315 static tree ATTRIBUTE_UNUSED
36316 rs6000_stack_protect_fail (void)
36317 {
36318 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36319 ? default_hidden_stack_protect_fail ()
36320 : default_external_stack_protect_fail ();
36321 }
36322
36323 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36324
36325 #if TARGET_ELF
36326 static unsigned HOST_WIDE_INT
36327 rs6000_asan_shadow_offset (void)
36328 {
36329 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36330 }
36331 #endif
36332 \f
36333 /* Mask options that we want to support inside of attribute((target)) and
36334 #pragma GCC target operations. Note, we do not include things like
36335 64/32-bit, endianness, hard/soft floating point, etc. that would have
36336 different calling sequences. */
36337
36338 struct rs6000_opt_mask {
36339 const char *name; /* option name */
36340 HOST_WIDE_INT mask; /* mask to set */
36341 bool invert; /* invert sense of mask */
36342 bool valid_target; /* option is a target option */
36343 };
36344
36345 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36346 {
36347 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36348 { "cmpb", OPTION_MASK_CMPB, false, true },
36349 { "crypto", OPTION_MASK_CRYPTO, false, true },
36350 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36351 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36352 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36353 false, true },
36354 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36355 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36356 { "fprnd", OPTION_MASK_FPRND, false, true },
36357 { "hard-dfp", OPTION_MASK_DFP, false, true },
36358 { "htm", OPTION_MASK_HTM, false, true },
36359 { "isel", OPTION_MASK_ISEL, false, true },
36360 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36361 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36362 { "modulo", OPTION_MASK_MODULO, false, true },
36363 { "mulhw", OPTION_MASK_MULHW, false, true },
36364 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36365 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36366 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36367 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36368 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36369 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36370 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36371 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36372 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36373 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36374 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36375 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36376 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36377 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36378 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36379 { "string", 0, false, true },
36380 { "update", OPTION_MASK_NO_UPDATE, true , true },
36381 { "vsx", OPTION_MASK_VSX, false, true },
36382 #ifdef OPTION_MASK_64BIT
36383 #if TARGET_AIX_OS
36384 { "aix64", OPTION_MASK_64BIT, false, false },
36385 { "aix32", OPTION_MASK_64BIT, true, false },
36386 #else
36387 { "64", OPTION_MASK_64BIT, false, false },
36388 { "32", OPTION_MASK_64BIT, true, false },
36389 #endif
36390 #endif
36391 #ifdef OPTION_MASK_EABI
36392 { "eabi", OPTION_MASK_EABI, false, false },
36393 #endif
36394 #ifdef OPTION_MASK_LITTLE_ENDIAN
36395 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36396 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36397 #endif
36398 #ifdef OPTION_MASK_RELOCATABLE
36399 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36400 #endif
36401 #ifdef OPTION_MASK_STRICT_ALIGN
36402 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36403 #endif
36404 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36405 { "string", 0, false, false },
36406 };
36407
36408 /* Builtin mask mapping for printing the flags. */
36409 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36410 {
36411 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36412 { "vsx", RS6000_BTM_VSX, false, false },
36413 { "fre", RS6000_BTM_FRE, false, false },
36414 { "fres", RS6000_BTM_FRES, false, false },
36415 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36416 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36417 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36418 { "cell", RS6000_BTM_CELL, false, false },
36419 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36420 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36421 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36422 { "crypto", RS6000_BTM_CRYPTO, false, false },
36423 { "htm", RS6000_BTM_HTM, false, false },
36424 { "hard-dfp", RS6000_BTM_DFP, false, false },
36425 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36426 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36427 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36428 { "float128", RS6000_BTM_FLOAT128, false, false },
36429 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36430 };
36431
36432 /* Option variables that we want to support inside attribute((target)) and
36433 #pragma GCC target operations. */
36434
36435 struct rs6000_opt_var {
36436 const char *name; /* option name */
36437 size_t global_offset; /* offset of the option in global_options. */
36438 size_t target_offset; /* offset of the option in target options. */
36439 };
36440
36441 static struct rs6000_opt_var const rs6000_opt_vars[] =
36442 {
36443 { "friz",
36444 offsetof (struct gcc_options, x_TARGET_FRIZ),
36445 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36446 { "avoid-indexed-addresses",
36447 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36448 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36449 { "longcall",
36450 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36451 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36452 { "optimize-swaps",
36453 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36454 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36455 { "allow-movmisalign",
36456 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36457 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36458 { "sched-groups",
36459 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36460 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36461 { "always-hint",
36462 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36463 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36464 { "align-branch-targets",
36465 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36466 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36467 { "tls-markers",
36468 offsetof (struct gcc_options, x_tls_markers),
36469 offsetof (struct cl_target_option, x_tls_markers), },
36470 { "sched-prolog",
36471 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36472 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36473 { "sched-epilog",
36474 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36475 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36476 { "speculate-indirect-jumps",
36477 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36478 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36479 };
36480
36481 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36482 parsing. Return true if there were no errors. */
36483
36484 static bool
36485 rs6000_inner_target_options (tree args, bool attr_p)
36486 {
36487 bool ret = true;
36488
36489 if (args == NULL_TREE)
36490 ;
36491
36492 else if (TREE_CODE (args) == STRING_CST)
36493 {
36494 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36495 char *q;
36496
36497 while ((q = strtok (p, ",")) != NULL)
36498 {
36499 bool error_p = false;
36500 bool not_valid_p = false;
36501 const char *cpu_opt = NULL;
36502
36503 p = NULL;
36504 if (strncmp (q, "cpu=", 4) == 0)
36505 {
36506 int cpu_index = rs6000_cpu_name_lookup (q+4);
36507 if (cpu_index >= 0)
36508 rs6000_cpu_index = cpu_index;
36509 else
36510 {
36511 error_p = true;
36512 cpu_opt = q+4;
36513 }
36514 }
36515 else if (strncmp (q, "tune=", 5) == 0)
36516 {
36517 int tune_index = rs6000_cpu_name_lookup (q+5);
36518 if (tune_index >= 0)
36519 rs6000_tune_index = tune_index;
36520 else
36521 {
36522 error_p = true;
36523 cpu_opt = q+5;
36524 }
36525 }
36526 else
36527 {
36528 size_t i;
36529 bool invert = false;
36530 char *r = q;
36531
36532 error_p = true;
36533 if (strncmp (r, "no-", 3) == 0)
36534 {
36535 invert = true;
36536 r += 3;
36537 }
36538
36539 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36540 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36541 {
36542 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36543
36544 if (!rs6000_opt_masks[i].valid_target)
36545 not_valid_p = true;
36546 else
36547 {
36548 error_p = false;
36549 rs6000_isa_flags_explicit |= mask;
36550
36551 /* VSX needs altivec, so -mvsx automagically sets
36552 altivec and disables -mavoid-indexed-addresses. */
36553 if (!invert)
36554 {
36555 if (mask == OPTION_MASK_VSX)
36556 {
36557 mask |= OPTION_MASK_ALTIVEC;
36558 TARGET_AVOID_XFORM = 0;
36559 }
36560 }
36561
36562 if (rs6000_opt_masks[i].invert)
36563 invert = !invert;
36564
36565 if (invert)
36566 rs6000_isa_flags &= ~mask;
36567 else
36568 rs6000_isa_flags |= mask;
36569 }
36570 break;
36571 }
36572
36573 if (error_p && !not_valid_p)
36574 {
36575 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36576 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36577 {
36578 size_t j = rs6000_opt_vars[i].global_offset;
36579 *((int *) ((char *)&global_options + j)) = !invert;
36580 error_p = false;
36581 not_valid_p = false;
36582 break;
36583 }
36584 }
36585 }
36586
36587 if (error_p)
36588 {
36589 const char *eprefix, *esuffix;
36590
36591 ret = false;
36592 if (attr_p)
36593 {
36594 eprefix = "__attribute__((__target__(";
36595 esuffix = ")))";
36596 }
36597 else
36598 {
36599 eprefix = "#pragma GCC target ";
36600 esuffix = "";
36601 }
36602
36603 if (cpu_opt)
36604 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36605 q, esuffix);
36606 else if (not_valid_p)
36607 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36608 else
36609 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36610 }
36611 }
36612 }
36613
36614 else if (TREE_CODE (args) == TREE_LIST)
36615 {
36616 do
36617 {
36618 tree value = TREE_VALUE (args);
36619 if (value)
36620 {
36621 bool ret2 = rs6000_inner_target_options (value, attr_p);
36622 if (!ret2)
36623 ret = false;
36624 }
36625 args = TREE_CHAIN (args);
36626 }
36627 while (args != NULL_TREE);
36628 }
36629
36630 else
36631 {
36632 error ("attribute %<target%> argument not a string");
36633 return false;
36634 }
36635
36636 return ret;
36637 }
36638
36639 /* Print out the target options as a list for -mdebug=target. */
36640
36641 static void
36642 rs6000_debug_target_options (tree args, const char *prefix)
36643 {
36644 if (args == NULL_TREE)
36645 fprintf (stderr, "%s<NULL>", prefix);
36646
36647 else if (TREE_CODE (args) == STRING_CST)
36648 {
36649 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36650 char *q;
36651
36652 while ((q = strtok (p, ",")) != NULL)
36653 {
36654 p = NULL;
36655 fprintf (stderr, "%s\"%s\"", prefix, q);
36656 prefix = ", ";
36657 }
36658 }
36659
36660 else if (TREE_CODE (args) == TREE_LIST)
36661 {
36662 do
36663 {
36664 tree value = TREE_VALUE (args);
36665 if (value)
36666 {
36667 rs6000_debug_target_options (value, prefix);
36668 prefix = ", ";
36669 }
36670 args = TREE_CHAIN (args);
36671 }
36672 while (args != NULL_TREE);
36673 }
36674
36675 else
36676 gcc_unreachable ();
36677
36678 return;
36679 }
36680
36681 \f
36682 /* Hook to validate attribute((target("..."))). */
36683
36684 static bool
36685 rs6000_valid_attribute_p (tree fndecl,
36686 tree ARG_UNUSED (name),
36687 tree args,
36688 int flags)
36689 {
36690 struct cl_target_option cur_target;
36691 bool ret;
36692 tree old_optimize;
36693 tree new_target, new_optimize;
36694 tree func_optimize;
36695
36696 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36697
36698 if (TARGET_DEBUG_TARGET)
36699 {
36700 tree tname = DECL_NAME (fndecl);
36701 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36702 if (tname)
36703 fprintf (stderr, "function: %.*s\n",
36704 (int) IDENTIFIER_LENGTH (tname),
36705 IDENTIFIER_POINTER (tname));
36706 else
36707 fprintf (stderr, "function: unknown\n");
36708
36709 fprintf (stderr, "args:");
36710 rs6000_debug_target_options (args, " ");
36711 fprintf (stderr, "\n");
36712
36713 if (flags)
36714 fprintf (stderr, "flags: 0x%x\n", flags);
36715
36716 fprintf (stderr, "--------------------\n");
36717 }
36718
36719 /* attribute((target("default"))) does nothing, beyond
36720 affecting multi-versioning. */
36721 if (TREE_VALUE (args)
36722 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36723 && TREE_CHAIN (args) == NULL_TREE
36724 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36725 return true;
36726
36727 old_optimize = build_optimization_node (&global_options);
36728 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36729
36730 /* If the function changed the optimization levels as well as setting target
36731 options, start with the optimizations specified. */
36732 if (func_optimize && func_optimize != old_optimize)
36733 cl_optimization_restore (&global_options,
36734 TREE_OPTIMIZATION (func_optimize));
36735
36736 /* The target attributes may also change some optimization flags, so update
36737 the optimization options if necessary. */
36738 cl_target_option_save (&cur_target, &global_options);
36739 rs6000_cpu_index = rs6000_tune_index = -1;
36740 ret = rs6000_inner_target_options (args, true);
36741
36742 /* Set up any additional state. */
36743 if (ret)
36744 {
36745 ret = rs6000_option_override_internal (false);
36746 new_target = build_target_option_node (&global_options);
36747 }
36748 else
36749 new_target = NULL;
36750
36751 new_optimize = build_optimization_node (&global_options);
36752
36753 if (!new_target)
36754 ret = false;
36755
36756 else if (fndecl)
36757 {
36758 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36759
36760 if (old_optimize != new_optimize)
36761 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36762 }
36763
36764 cl_target_option_restore (&global_options, &cur_target);
36765
36766 if (old_optimize != new_optimize)
36767 cl_optimization_restore (&global_options,
36768 TREE_OPTIMIZATION (old_optimize));
36769
36770 return ret;
36771 }
36772
36773 \f
36774 /* Hook to validate the current #pragma GCC target and set the state, and
36775 update the macros based on what was changed. If ARGS is NULL, then
36776 POP_TARGET is used to reset the options. */
36777
36778 bool
36779 rs6000_pragma_target_parse (tree args, tree pop_target)
36780 {
36781 tree prev_tree = build_target_option_node (&global_options);
36782 tree cur_tree;
36783 struct cl_target_option *prev_opt, *cur_opt;
36784 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36785 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36786
36787 if (TARGET_DEBUG_TARGET)
36788 {
36789 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36790 fprintf (stderr, "args:");
36791 rs6000_debug_target_options (args, " ");
36792 fprintf (stderr, "\n");
36793
36794 if (pop_target)
36795 {
36796 fprintf (stderr, "pop_target:\n");
36797 debug_tree (pop_target);
36798 }
36799 else
36800 fprintf (stderr, "pop_target: <NULL>\n");
36801
36802 fprintf (stderr, "--------------------\n");
36803 }
36804
36805 if (! args)
36806 {
36807 cur_tree = ((pop_target)
36808 ? pop_target
36809 : target_option_default_node);
36810 cl_target_option_restore (&global_options,
36811 TREE_TARGET_OPTION (cur_tree));
36812 }
36813 else
36814 {
36815 rs6000_cpu_index = rs6000_tune_index = -1;
36816 if (!rs6000_inner_target_options (args, false)
36817 || !rs6000_option_override_internal (false)
36818 || (cur_tree = build_target_option_node (&global_options))
36819 == NULL_TREE)
36820 {
36821 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36822 fprintf (stderr, "invalid pragma\n");
36823
36824 return false;
36825 }
36826 }
36827
36828 target_option_current_node = cur_tree;
36829 rs6000_activate_target_options (target_option_current_node);
36830
36831 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36832 change the macros that are defined. */
36833 if (rs6000_target_modify_macros_ptr)
36834 {
36835 prev_opt = TREE_TARGET_OPTION (prev_tree);
36836 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36837 prev_flags = prev_opt->x_rs6000_isa_flags;
36838
36839 cur_opt = TREE_TARGET_OPTION (cur_tree);
36840 cur_flags = cur_opt->x_rs6000_isa_flags;
36841 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36842
36843 diff_bumask = (prev_bumask ^ cur_bumask);
36844 diff_flags = (prev_flags ^ cur_flags);
36845
36846 if ((diff_flags != 0) || (diff_bumask != 0))
36847 {
36848 /* Delete old macros. */
36849 rs6000_target_modify_macros_ptr (false,
36850 prev_flags & diff_flags,
36851 prev_bumask & diff_bumask);
36852
36853 /* Define new macros. */
36854 rs6000_target_modify_macros_ptr (true,
36855 cur_flags & diff_flags,
36856 cur_bumask & diff_bumask);
36857 }
36858 }
36859
36860 return true;
36861 }
36862
36863 \f
36864 /* Remember the last target of rs6000_set_current_function. */
36865 static GTY(()) tree rs6000_previous_fndecl;
36866
36867 /* Restore target's globals from NEW_TREE and invalidate the
36868 rs6000_previous_fndecl cache. */
36869
36870 void
36871 rs6000_activate_target_options (tree new_tree)
36872 {
36873 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36874 if (TREE_TARGET_GLOBALS (new_tree))
36875 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36876 else if (new_tree == target_option_default_node)
36877 restore_target_globals (&default_target_globals);
36878 else
36879 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36880 rs6000_previous_fndecl = NULL_TREE;
36881 }
36882
36883 /* Establish appropriate back-end context for processing the function
36884 FNDECL. The argument might be NULL to indicate processing at top
36885 level, outside of any function scope. */
36886 static void
36887 rs6000_set_current_function (tree fndecl)
36888 {
36889 if (TARGET_DEBUG_TARGET)
36890 {
36891 fprintf (stderr, "\n==================== rs6000_set_current_function");
36892
36893 if (fndecl)
36894 fprintf (stderr, ", fndecl %s (%p)",
36895 (DECL_NAME (fndecl)
36896 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36897 : "<unknown>"), (void *)fndecl);
36898
36899 if (rs6000_previous_fndecl)
36900 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36901
36902 fprintf (stderr, "\n");
36903 }
36904
36905 /* Only change the context if the function changes. This hook is called
36906 several times in the course of compiling a function, and we don't want to
36907 slow things down too much or call target_reinit when it isn't safe. */
36908 if (fndecl == rs6000_previous_fndecl)
36909 return;
36910
36911 tree old_tree;
36912 if (rs6000_previous_fndecl == NULL_TREE)
36913 old_tree = target_option_current_node;
36914 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36915 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36916 else
36917 old_tree = target_option_default_node;
36918
36919 tree new_tree;
36920 if (fndecl == NULL_TREE)
36921 {
36922 if (old_tree != target_option_current_node)
36923 new_tree = target_option_current_node;
36924 else
36925 new_tree = NULL_TREE;
36926 }
36927 else
36928 {
36929 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36930 if (new_tree == NULL_TREE)
36931 new_tree = target_option_default_node;
36932 }
36933
36934 if (TARGET_DEBUG_TARGET)
36935 {
36936 if (new_tree)
36937 {
36938 fprintf (stderr, "\nnew fndecl target specific options:\n");
36939 debug_tree (new_tree);
36940 }
36941
36942 if (old_tree)
36943 {
36944 fprintf (stderr, "\nold fndecl target specific options:\n");
36945 debug_tree (old_tree);
36946 }
36947
36948 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36949 fprintf (stderr, "--------------------\n");
36950 }
36951
36952 if (new_tree && old_tree != new_tree)
36953 rs6000_activate_target_options (new_tree);
36954
36955 if (fndecl)
36956 rs6000_previous_fndecl = fndecl;
36957 }
36958
36959 \f
36960 /* Save the current options */
36961
36962 static void
36963 rs6000_function_specific_save (struct cl_target_option *ptr,
36964 struct gcc_options *opts)
36965 {
36966 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36967 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36968 }
36969
36970 /* Restore the current options */
36971
36972 static void
36973 rs6000_function_specific_restore (struct gcc_options *opts,
36974 struct cl_target_option *ptr)
36975
36976 {
36977 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36978 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36979 (void) rs6000_option_override_internal (false);
36980 }
36981
36982 /* Print the current options */
36983
36984 static void
36985 rs6000_function_specific_print (FILE *file, int indent,
36986 struct cl_target_option *ptr)
36987 {
36988 rs6000_print_isa_options (file, indent, "Isa options set",
36989 ptr->x_rs6000_isa_flags);
36990
36991 rs6000_print_isa_options (file, indent, "Isa options explicit",
36992 ptr->x_rs6000_isa_flags_explicit);
36993 }
36994
36995 /* Helper function to print the current isa or misc options on a line. */
36996
36997 static void
36998 rs6000_print_options_internal (FILE *file,
36999 int indent,
37000 const char *string,
37001 HOST_WIDE_INT flags,
37002 const char *prefix,
37003 const struct rs6000_opt_mask *opts,
37004 size_t num_elements)
37005 {
37006 size_t i;
37007 size_t start_column = 0;
37008 size_t cur_column;
37009 size_t max_column = 120;
37010 size_t prefix_len = strlen (prefix);
37011 size_t comma_len = 0;
37012 const char *comma = "";
37013
37014 if (indent)
37015 start_column += fprintf (file, "%*s", indent, "");
37016
37017 if (!flags)
37018 {
37019 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
37020 return;
37021 }
37022
37023 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
37024
37025 /* Print the various mask options. */
37026 cur_column = start_column;
37027 for (i = 0; i < num_elements; i++)
37028 {
37029 bool invert = opts[i].invert;
37030 const char *name = opts[i].name;
37031 const char *no_str = "";
37032 HOST_WIDE_INT mask = opts[i].mask;
37033 size_t len = comma_len + prefix_len + strlen (name);
37034
37035 if (!invert)
37036 {
37037 if ((flags & mask) == 0)
37038 {
37039 no_str = "no-";
37040 len += sizeof ("no-") - 1;
37041 }
37042
37043 flags &= ~mask;
37044 }
37045
37046 else
37047 {
37048 if ((flags & mask) != 0)
37049 {
37050 no_str = "no-";
37051 len += sizeof ("no-") - 1;
37052 }
37053
37054 flags |= mask;
37055 }
37056
37057 cur_column += len;
37058 if (cur_column > max_column)
37059 {
37060 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
37061 cur_column = start_column + len;
37062 comma = "";
37063 }
37064
37065 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
37066 comma = ", ";
37067 comma_len = sizeof (", ") - 1;
37068 }
37069
37070 fputs ("\n", file);
37071 }
37072
37073 /* Helper function to print the current isa options on a line. */
37074
37075 static void
37076 rs6000_print_isa_options (FILE *file, int indent, const char *string,
37077 HOST_WIDE_INT flags)
37078 {
37079 rs6000_print_options_internal (file, indent, string, flags, "-m",
37080 &rs6000_opt_masks[0],
37081 ARRAY_SIZE (rs6000_opt_masks));
37082 }
37083
37084 static void
37085 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37086 HOST_WIDE_INT flags)
37087 {
37088 rs6000_print_options_internal (file, indent, string, flags, "",
37089 &rs6000_builtin_mask_names[0],
37090 ARRAY_SIZE (rs6000_builtin_mask_names));
37091 }
37092
37093 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37094 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37095 -mupper-regs-df, etc.).
37096
37097 If the user used -mno-power8-vector, we need to turn off all of the implicit
37098 ISA 2.07 and 3.0 options that relate to the vector unit.
37099
37100 If the user used -mno-power9-vector, we need to turn off all of the implicit
37101 ISA 3.0 options that relate to the vector unit.
37102
37103 This function does not handle explicit options such as the user specifying
37104 -mdirect-move. These are handled in rs6000_option_override_internal, and
37105 the appropriate error is given if needed.
37106
37107 We return a mask of all of the implicit options that should not be enabled
37108 by default. */
37109
37110 static HOST_WIDE_INT
37111 rs6000_disable_incompatible_switches (void)
37112 {
37113 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37114 size_t i, j;
37115
37116 static const struct {
37117 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37118 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37119 const char *const name; /* name of the switch. */
37120 } flags[] = {
37121 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37122 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37123 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37124 };
37125
37126 for (i = 0; i < ARRAY_SIZE (flags); i++)
37127 {
37128 HOST_WIDE_INT no_flag = flags[i].no_flag;
37129
37130 if ((rs6000_isa_flags & no_flag) == 0
37131 && (rs6000_isa_flags_explicit & no_flag) != 0)
37132 {
37133 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37134 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37135 & rs6000_isa_flags
37136 & dep_flags);
37137
37138 if (set_flags)
37139 {
37140 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37141 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37142 {
37143 set_flags &= ~rs6000_opt_masks[j].mask;
37144 error ("%<-mno-%s%> turns off %<-m%s%>",
37145 flags[i].name,
37146 rs6000_opt_masks[j].name);
37147 }
37148
37149 gcc_assert (!set_flags);
37150 }
37151
37152 rs6000_isa_flags &= ~dep_flags;
37153 ignore_masks |= no_flag | dep_flags;
37154 }
37155 }
37156
37157 return ignore_masks;
37158 }
37159
37160 \f
37161 /* Helper function for printing the function name when debugging. */
37162
37163 static const char *
37164 get_decl_name (tree fn)
37165 {
37166 tree name;
37167
37168 if (!fn)
37169 return "<null>";
37170
37171 name = DECL_NAME (fn);
37172 if (!name)
37173 return "<no-name>";
37174
37175 return IDENTIFIER_POINTER (name);
37176 }
37177
37178 /* Return the clone id of the target we are compiling code for in a target
37179 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37180 the priority list for the target clones (ordered from lowest to
37181 highest). */
37182
37183 static int
37184 rs6000_clone_priority (tree fndecl)
37185 {
37186 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37187 HOST_WIDE_INT isa_masks;
37188 int ret = CLONE_DEFAULT;
37189 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37190 const char *attrs_str = NULL;
37191
37192 attrs = TREE_VALUE (TREE_VALUE (attrs));
37193 attrs_str = TREE_STRING_POINTER (attrs);
37194
37195 /* Return priority zero for default function. Return the ISA needed for the
37196 function if it is not the default. */
37197 if (strcmp (attrs_str, "default") != 0)
37198 {
37199 if (fn_opts == NULL_TREE)
37200 fn_opts = target_option_default_node;
37201
37202 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37203 isa_masks = rs6000_isa_flags;
37204 else
37205 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37206
37207 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37208 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37209 break;
37210 }
37211
37212 if (TARGET_DEBUG_TARGET)
37213 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37214 get_decl_name (fndecl), ret);
37215
37216 return ret;
37217 }
37218
37219 /* This compares the priority of target features in function DECL1 and DECL2.
37220 It returns positive value if DECL1 is higher priority, negative value if
37221 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37222 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37223
37224 static int
37225 rs6000_compare_version_priority (tree decl1, tree decl2)
37226 {
37227 int priority1 = rs6000_clone_priority (decl1);
37228 int priority2 = rs6000_clone_priority (decl2);
37229 int ret = priority1 - priority2;
37230
37231 if (TARGET_DEBUG_TARGET)
37232 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37233 get_decl_name (decl1), get_decl_name (decl2), ret);
37234
37235 return ret;
37236 }
37237
37238 /* Make a dispatcher declaration for the multi-versioned function DECL.
37239 Calls to DECL function will be replaced with calls to the dispatcher
37240 by the front-end. Returns the decl of the dispatcher function. */
37241
37242 static tree
37243 rs6000_get_function_versions_dispatcher (void *decl)
37244 {
37245 tree fn = (tree) decl;
37246 struct cgraph_node *node = NULL;
37247 struct cgraph_node *default_node = NULL;
37248 struct cgraph_function_version_info *node_v = NULL;
37249 struct cgraph_function_version_info *first_v = NULL;
37250
37251 tree dispatch_decl = NULL;
37252
37253 struct cgraph_function_version_info *default_version_info = NULL;
37254 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37255
37256 if (TARGET_DEBUG_TARGET)
37257 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37258 get_decl_name (fn));
37259
37260 node = cgraph_node::get (fn);
37261 gcc_assert (node != NULL);
37262
37263 node_v = node->function_version ();
37264 gcc_assert (node_v != NULL);
37265
37266 if (node_v->dispatcher_resolver != NULL)
37267 return node_v->dispatcher_resolver;
37268
37269 /* Find the default version and make it the first node. */
37270 first_v = node_v;
37271 /* Go to the beginning of the chain. */
37272 while (first_v->prev != NULL)
37273 first_v = first_v->prev;
37274
37275 default_version_info = first_v;
37276 while (default_version_info != NULL)
37277 {
37278 const tree decl2 = default_version_info->this_node->decl;
37279 if (is_function_default_version (decl2))
37280 break;
37281 default_version_info = default_version_info->next;
37282 }
37283
37284 /* If there is no default node, just return NULL. */
37285 if (default_version_info == NULL)
37286 return NULL;
37287
37288 /* Make default info the first node. */
37289 if (first_v != default_version_info)
37290 {
37291 default_version_info->prev->next = default_version_info->next;
37292 if (default_version_info->next)
37293 default_version_info->next->prev = default_version_info->prev;
37294 first_v->prev = default_version_info;
37295 default_version_info->next = first_v;
37296 default_version_info->prev = NULL;
37297 }
37298
37299 default_node = default_version_info->this_node;
37300
37301 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37302 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37303 "target_clones attribute needs GLIBC (2.23 and newer) that "
37304 "exports hardware capability bits");
37305 #else
37306
37307 if (targetm.has_ifunc_p ())
37308 {
37309 struct cgraph_function_version_info *it_v = NULL;
37310 struct cgraph_node *dispatcher_node = NULL;
37311 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37312
37313 /* Right now, the dispatching is done via ifunc. */
37314 dispatch_decl = make_dispatcher_decl (default_node->decl);
37315
37316 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37317 gcc_assert (dispatcher_node != NULL);
37318 dispatcher_node->dispatcher_function = 1;
37319 dispatcher_version_info
37320 = dispatcher_node->insert_new_function_version ();
37321 dispatcher_version_info->next = default_version_info;
37322 dispatcher_node->definition = 1;
37323
37324 /* Set the dispatcher for all the versions. */
37325 it_v = default_version_info;
37326 while (it_v != NULL)
37327 {
37328 it_v->dispatcher_resolver = dispatch_decl;
37329 it_v = it_v->next;
37330 }
37331 }
37332 else
37333 {
37334 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37335 "multiversioning needs ifunc which is not supported "
37336 "on this target");
37337 }
37338 #endif
37339
37340 return dispatch_decl;
37341 }
37342
37343 /* Make the resolver function decl to dispatch the versions of a multi-
37344 versioned function, DEFAULT_DECL. Create an empty basic block in the
37345 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37346 function. */
37347
37348 static tree
37349 make_resolver_func (const tree default_decl,
37350 const tree dispatch_decl,
37351 basic_block *empty_bb)
37352 {
37353 /* Make the resolver function static. The resolver function returns
37354 void *. */
37355 tree decl_name = clone_function_name (default_decl, "resolver");
37356 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37357 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37358 tree decl = build_fn_decl (resolver_name, type);
37359 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37360
37361 DECL_NAME (decl) = decl_name;
37362 TREE_USED (decl) = 1;
37363 DECL_ARTIFICIAL (decl) = 1;
37364 DECL_IGNORED_P (decl) = 0;
37365 TREE_PUBLIC (decl) = 0;
37366 DECL_UNINLINABLE (decl) = 1;
37367
37368 /* Resolver is not external, body is generated. */
37369 DECL_EXTERNAL (decl) = 0;
37370 DECL_EXTERNAL (dispatch_decl) = 0;
37371
37372 DECL_CONTEXT (decl) = NULL_TREE;
37373 DECL_INITIAL (decl) = make_node (BLOCK);
37374 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37375
37376 /* Build result decl and add to function_decl. */
37377 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37378 DECL_ARTIFICIAL (t) = 1;
37379 DECL_IGNORED_P (t) = 1;
37380 DECL_RESULT (decl) = t;
37381
37382 gimplify_function_tree (decl);
37383 push_cfun (DECL_STRUCT_FUNCTION (decl));
37384 *empty_bb = init_lowered_empty_function (decl, false,
37385 profile_count::uninitialized ());
37386
37387 cgraph_node::add_new_function (decl, true);
37388 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37389
37390 pop_cfun ();
37391
37392 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37393 DECL_ATTRIBUTES (dispatch_decl)
37394 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37395
37396 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37397
37398 return decl;
37399 }
37400
37401 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37402 return a pointer to VERSION_DECL if we are running on a machine that
37403 supports the index CLONE_ISA hardware architecture bits. This function will
37404 be called during version dispatch to decide which function version to
37405 execute. It returns the basic block at the end, to which more conditions
37406 can be added. */
37407
37408 static basic_block
37409 add_condition_to_bb (tree function_decl, tree version_decl,
37410 int clone_isa, basic_block new_bb)
37411 {
37412 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37413
37414 gcc_assert (new_bb != NULL);
37415 gimple_seq gseq = bb_seq (new_bb);
37416
37417
37418 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37419 build_fold_addr_expr (version_decl));
37420 tree result_var = create_tmp_var (ptr_type_node);
37421 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37422 gimple *return_stmt = gimple_build_return (result_var);
37423
37424 if (clone_isa == CLONE_DEFAULT)
37425 {
37426 gimple_seq_add_stmt (&gseq, convert_stmt);
37427 gimple_seq_add_stmt (&gseq, return_stmt);
37428 set_bb_seq (new_bb, gseq);
37429 gimple_set_bb (convert_stmt, new_bb);
37430 gimple_set_bb (return_stmt, new_bb);
37431 pop_cfun ();
37432 return new_bb;
37433 }
37434
37435 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37436 tree cond_var = create_tmp_var (bool_int_type_node);
37437 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37438 const char *arg_str = rs6000_clone_map[clone_isa].name;
37439 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37440 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37441 gimple_call_set_lhs (call_cond_stmt, cond_var);
37442
37443 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37444 gimple_set_bb (call_cond_stmt, new_bb);
37445 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37446
37447 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37448 NULL_TREE, NULL_TREE);
37449 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37450 gimple_set_bb (if_else_stmt, new_bb);
37451 gimple_seq_add_stmt (&gseq, if_else_stmt);
37452
37453 gimple_seq_add_stmt (&gseq, convert_stmt);
37454 gimple_seq_add_stmt (&gseq, return_stmt);
37455 set_bb_seq (new_bb, gseq);
37456
37457 basic_block bb1 = new_bb;
37458 edge e12 = split_block (bb1, if_else_stmt);
37459 basic_block bb2 = e12->dest;
37460 e12->flags &= ~EDGE_FALLTHRU;
37461 e12->flags |= EDGE_TRUE_VALUE;
37462
37463 edge e23 = split_block (bb2, return_stmt);
37464 gimple_set_bb (convert_stmt, bb2);
37465 gimple_set_bb (return_stmt, bb2);
37466
37467 basic_block bb3 = e23->dest;
37468 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37469
37470 remove_edge (e23);
37471 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37472
37473 pop_cfun ();
37474 return bb3;
37475 }
37476
37477 /* This function generates the dispatch function for multi-versioned functions.
37478 DISPATCH_DECL is the function which will contain the dispatch logic.
37479 FNDECLS are the function choices for dispatch, and is a tree chain.
37480 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37481 code is generated. */
37482
37483 static int
37484 dispatch_function_versions (tree dispatch_decl,
37485 void *fndecls_p,
37486 basic_block *empty_bb)
37487 {
37488 int ix;
37489 tree ele;
37490 vec<tree> *fndecls;
37491 tree clones[CLONE_MAX];
37492
37493 if (TARGET_DEBUG_TARGET)
37494 fputs ("dispatch_function_versions, top\n", stderr);
37495
37496 gcc_assert (dispatch_decl != NULL
37497 && fndecls_p != NULL
37498 && empty_bb != NULL);
37499
37500 /* fndecls_p is actually a vector. */
37501 fndecls = static_cast<vec<tree> *> (fndecls_p);
37502
37503 /* At least one more version other than the default. */
37504 gcc_assert (fndecls->length () >= 2);
37505
37506 /* The first version in the vector is the default decl. */
37507 memset ((void *) clones, '\0', sizeof (clones));
37508 clones[CLONE_DEFAULT] = (*fndecls)[0];
37509
37510 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37511 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37512 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37513 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37514 to insert the code here to do the call. */
37515
37516 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37517 {
37518 int priority = rs6000_clone_priority (ele);
37519 if (!clones[priority])
37520 clones[priority] = ele;
37521 }
37522
37523 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37524 if (clones[ix])
37525 {
37526 if (TARGET_DEBUG_TARGET)
37527 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37528 ix, get_decl_name (clones[ix]));
37529
37530 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37531 *empty_bb);
37532 }
37533
37534 return 0;
37535 }
37536
37537 /* Generate the dispatching code body to dispatch multi-versioned function
37538 DECL. The target hook is called to process the "target" attributes and
37539 provide the code to dispatch the right function at run-time. NODE points
37540 to the dispatcher decl whose body will be created. */
37541
37542 static tree
37543 rs6000_generate_version_dispatcher_body (void *node_p)
37544 {
37545 tree resolver;
37546 basic_block empty_bb;
37547 struct cgraph_node *node = (cgraph_node *) node_p;
37548 struct cgraph_function_version_info *ninfo = node->function_version ();
37549
37550 if (ninfo->dispatcher_resolver)
37551 return ninfo->dispatcher_resolver;
37552
37553 /* node is going to be an alias, so remove the finalized bit. */
37554 node->definition = false;
37555
37556 /* The first version in the chain corresponds to the default version. */
37557 ninfo->dispatcher_resolver = resolver
37558 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37559
37560 if (TARGET_DEBUG_TARGET)
37561 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37562 get_decl_name (resolver));
37563
37564 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37565 auto_vec<tree, 2> fn_ver_vec;
37566
37567 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37568 vinfo;
37569 vinfo = vinfo->next)
37570 {
37571 struct cgraph_node *version = vinfo->this_node;
37572 /* Check for virtual functions here again, as by this time it should
37573 have been determined if this function needs a vtable index or
37574 not. This happens for methods in derived classes that override
37575 virtual methods in base classes but are not explicitly marked as
37576 virtual. */
37577 if (DECL_VINDEX (version->decl))
37578 sorry ("Virtual function multiversioning not supported");
37579
37580 fn_ver_vec.safe_push (version->decl);
37581 }
37582
37583 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37584 cgraph_edge::rebuild_edges ();
37585 pop_cfun ();
37586 return resolver;
37587 }
37588
37589 \f
37590 /* Hook to determine if one function can safely inline another. */
37591
37592 static bool
37593 rs6000_can_inline_p (tree caller, tree callee)
37594 {
37595 bool ret = false;
37596 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37597 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37598
37599 /* If callee has no option attributes, then it is ok to inline. */
37600 if (!callee_tree)
37601 ret = true;
37602
37603 /* If caller has no option attributes, but callee does then it is not ok to
37604 inline. */
37605 else if (!caller_tree)
37606 ret = false;
37607
37608 else
37609 {
37610 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37611 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37612
37613 /* Callee's options should a subset of the caller's, i.e. a vsx function
37614 can inline an altivec function but a non-vsx function can't inline a
37615 vsx function. */
37616 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37617 == callee_opts->x_rs6000_isa_flags)
37618 ret = true;
37619 }
37620
37621 if (TARGET_DEBUG_TARGET)
37622 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37623 get_decl_name (caller), get_decl_name (callee),
37624 (ret ? "can" : "cannot"));
37625
37626 return ret;
37627 }
37628 \f
37629 /* Allocate a stack temp and fixup the address so it meets the particular
37630 memory requirements (either offetable or REG+REG addressing). */
37631
37632 rtx
37633 rs6000_allocate_stack_temp (machine_mode mode,
37634 bool offsettable_p,
37635 bool reg_reg_p)
37636 {
37637 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37638 rtx addr = XEXP (stack, 0);
37639 int strict_p = reload_completed;
37640
37641 if (!legitimate_indirect_address_p (addr, strict_p))
37642 {
37643 if (offsettable_p
37644 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37645 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37646
37647 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37648 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37649 }
37650
37651 return stack;
37652 }
37653
37654 /* Given a memory reference, if it is not a reg or reg+reg addressing,
37655 convert to such a form to deal with memory reference instructions
37656 like STFIWX and LDBRX that only take reg+reg addressing. */
37657
37658 rtx
37659 rs6000_force_indexed_or_indirect_mem (rtx x)
37660 {
37661 machine_mode mode = GET_MODE (x);
37662
37663 gcc_assert (MEM_P (x));
37664 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
37665 {
37666 rtx addr = XEXP (x, 0);
37667 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37668 {
37669 rtx reg = XEXP (addr, 0);
37670 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37671 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37672 gcc_assert (REG_P (reg));
37673 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37674 addr = reg;
37675 }
37676 else if (GET_CODE (addr) == PRE_MODIFY)
37677 {
37678 rtx reg = XEXP (addr, 0);
37679 rtx expr = XEXP (addr, 1);
37680 gcc_assert (REG_P (reg));
37681 gcc_assert (GET_CODE (expr) == PLUS);
37682 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37683 addr = reg;
37684 }
37685
37686 x = replace_equiv_address (x, force_reg (Pmode, addr));
37687 }
37688
37689 return x;
37690 }
37691
37692 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37693
37694 On the RS/6000, all integer constants are acceptable, most won't be valid
37695 for particular insns, though. Only easy FP constants are acceptable. */
37696
37697 static bool
37698 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37699 {
37700 if (TARGET_ELF && tls_referenced_p (x))
37701 return false;
37702
37703 if (CONST_DOUBLE_P (x))
37704 return easy_fp_constant (x, mode);
37705
37706 if (GET_CODE (x) == CONST_VECTOR)
37707 return easy_vector_constant (x, mode);
37708
37709 return true;
37710 }
37711
37712 \f
37713 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37714
37715 static bool
37716 chain_already_loaded (rtx_insn *last)
37717 {
37718 for (; last != NULL; last = PREV_INSN (last))
37719 {
37720 if (NONJUMP_INSN_P (last))
37721 {
37722 rtx patt = PATTERN (last);
37723
37724 if (GET_CODE (patt) == SET)
37725 {
37726 rtx lhs = XEXP (patt, 0);
37727
37728 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37729 return true;
37730 }
37731 }
37732 }
37733 return false;
37734 }
37735
37736 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37737
37738 void
37739 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37740 {
37741 rtx func = func_desc;
37742 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37743 rtx toc_load = NULL_RTX;
37744 rtx toc_restore = NULL_RTX;
37745 rtx func_addr;
37746 rtx abi_reg = NULL_RTX;
37747 rtx call[4];
37748 int n_call;
37749 rtx insn;
37750
37751 if (global_tlsarg)
37752 tlsarg = global_tlsarg;
37753
37754 /* Handle longcall attributes. */
37755 if ((INTVAL (cookie) & CALL_LONG) != 0
37756 && GET_CODE (func_desc) == SYMBOL_REF)
37757 func = rs6000_longcall_ref (func_desc, tlsarg);
37758
37759 /* Handle indirect calls. */
37760 if (!SYMBOL_REF_P (func)
37761 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
37762 {
37763 /* Save the TOC into its reserved slot before the call,
37764 and prepare to restore it after the call. */
37765 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37766 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37767 gen_rtvec (1, stack_toc_offset),
37768 UNSPEC_TOCSLOT);
37769 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37770
37771 /* Can we optimize saving the TOC in the prologue or
37772 do we need to do it at every call? */
37773 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37774 cfun->machine->save_toc_in_prologue = true;
37775 else
37776 {
37777 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37778 rtx stack_toc_mem = gen_frame_mem (Pmode,
37779 gen_rtx_PLUS (Pmode, stack_ptr,
37780 stack_toc_offset));
37781 MEM_VOLATILE_P (stack_toc_mem) = 1;
37782 if (HAVE_AS_PLTSEQ
37783 && DEFAULT_ABI == ABI_ELFv2
37784 && GET_CODE (func_desc) == SYMBOL_REF)
37785 {
37786 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
37787 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37788 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
37789 }
37790 else
37791 emit_move_insn (stack_toc_mem, toc_reg);
37792 }
37793
37794 if (DEFAULT_ABI == ABI_ELFv2)
37795 {
37796 /* A function pointer in the ELFv2 ABI is just a plain address, but
37797 the ABI requires it to be loaded into r12 before the call. */
37798 func_addr = gen_rtx_REG (Pmode, 12);
37799 if (!rtx_equal_p (func_addr, func))
37800 emit_move_insn (func_addr, func);
37801 abi_reg = func_addr;
37802 /* Indirect calls via CTR are strongly preferred over indirect
37803 calls via LR, so move the address there. Needed to mark
37804 this insn for linker plt sequence editing too. */
37805 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37806 if (HAVE_AS_PLTSEQ
37807 && GET_CODE (func_desc) == SYMBOL_REF)
37808 {
37809 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
37810 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37811 emit_insn (gen_rtx_SET (func_addr, mark_func));
37812 v = gen_rtvec (2, func_addr, func_desc);
37813 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37814 }
37815 else
37816 emit_move_insn (func_addr, abi_reg);
37817 }
37818 else
37819 {
37820 /* A function pointer under AIX is a pointer to a data area whose
37821 first word contains the actual address of the function, whose
37822 second word contains a pointer to its TOC, and whose third word
37823 contains a value to place in the static chain register (r11).
37824 Note that if we load the static chain, our "trampoline" need
37825 not have any executable code. */
37826
37827 /* Load up address of the actual function. */
37828 func = force_reg (Pmode, func);
37829 func_addr = gen_reg_rtx (Pmode);
37830 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
37831
37832 /* Indirect calls via CTR are strongly preferred over indirect
37833 calls via LR, so move the address there. */
37834 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
37835 emit_move_insn (ctr_reg, func_addr);
37836 func_addr = ctr_reg;
37837
37838 /* Prepare to load the TOC of the called function. Note that the
37839 TOC load must happen immediately before the actual call so
37840 that unwinding the TOC registers works correctly. See the
37841 comment in frob_update_context. */
37842 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37843 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37844 gen_rtx_PLUS (Pmode, func,
37845 func_toc_offset));
37846 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37847
37848 /* If we have a static chain, load it up. But, if the call was
37849 originally direct, the 3rd word has not been written since no
37850 trampoline has been built, so we ought not to load it, lest we
37851 override a static chain value. */
37852 if (!(GET_CODE (func_desc) == SYMBOL_REF
37853 && SYMBOL_REF_FUNCTION_P (func_desc))
37854 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37855 && !chain_already_loaded (get_current_sequence ()->next->last))
37856 {
37857 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37858 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37859 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37860 gen_rtx_PLUS (Pmode, func,
37861 func_sc_offset));
37862 emit_move_insn (sc_reg, func_sc_mem);
37863 abi_reg = sc_reg;
37864 }
37865 }
37866 }
37867 else
37868 {
37869 /* Direct calls use the TOC: for local calls, the callee will
37870 assume the TOC register is set; for non-local calls, the
37871 PLT stub needs the TOC register. */
37872 abi_reg = toc_reg;
37873 func_addr = func;
37874 }
37875
37876 /* Create the call. */
37877 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37878 if (value != NULL_RTX)
37879 call[0] = gen_rtx_SET (value, call[0]);
37880 n_call = 1;
37881
37882 if (toc_load)
37883 call[n_call++] = toc_load;
37884 if (toc_restore)
37885 call[n_call++] = toc_restore;
37886
37887 call[n_call++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37888
37889 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37890 insn = emit_call_insn (insn);
37891
37892 /* Mention all registers defined by the ABI to hold information
37893 as uses in CALL_INSN_FUNCTION_USAGE. */
37894 if (abi_reg)
37895 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37896 }
37897
37898 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37899
37900 void
37901 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37902 {
37903 rtx call[2];
37904 rtx insn;
37905
37906 gcc_assert (INTVAL (cookie) == 0);
37907
37908 if (global_tlsarg)
37909 tlsarg = global_tlsarg;
37910
37911 /* Create the call. */
37912 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
37913 if (value != NULL_RTX)
37914 call[0] = gen_rtx_SET (value, call[0]);
37915
37916 call[1] = simple_return_rtx;
37917
37918 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37919 insn = emit_call_insn (insn);
37920
37921 /* Note use of the TOC register. */
37922 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37923 }
37924
37925 /* Expand code to perform a call under the SYSV4 ABI. */
37926
37927 void
37928 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37929 {
37930 rtx func = func_desc;
37931 rtx func_addr;
37932 rtx call[4];
37933 rtx insn;
37934 rtx abi_reg = NULL_RTX;
37935 int n;
37936
37937 if (global_tlsarg)
37938 tlsarg = global_tlsarg;
37939
37940 /* Handle longcall attributes. */
37941 if ((INTVAL (cookie) & CALL_LONG) != 0
37942 && GET_CODE (func_desc) == SYMBOL_REF)
37943 {
37944 func = rs6000_longcall_ref (func_desc, tlsarg);
37945 /* If the longcall was implemented using PLT16 relocs, then r11
37946 needs to be valid at the call for lazy linking. */
37947 if (HAVE_AS_PLTSEQ)
37948 abi_reg = func;
37949 }
37950
37951 /* Handle indirect calls. */
37952 if (GET_CODE (func) != SYMBOL_REF)
37953 {
37954 func = force_reg (Pmode, func);
37955
37956 /* Indirect calls via CTR are strongly preferred over indirect
37957 calls via LR, so move the address there. Needed to mark
37958 this insn for linker plt sequence editing too. */
37959 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37960 if (HAVE_AS_PLTSEQ
37961 && GET_CODE (func_desc) == SYMBOL_REF)
37962 {
37963 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
37964 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37965 emit_insn (gen_rtx_SET (func_addr, mark_func));
37966 v = gen_rtvec (2, func_addr, func_desc);
37967 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37968 }
37969 else
37970 emit_move_insn (func_addr, func);
37971 }
37972 else
37973 func_addr = func;
37974
37975 /* Create the call. */
37976 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37977 if (value != NULL_RTX)
37978 call[0] = gen_rtx_SET (value, call[0]);
37979
37980 call[1] = gen_rtx_USE (VOIDmode, cookie);
37981 n = 2;
37982 if (TARGET_SECURE_PLT
37983 && flag_pic
37984 && GET_CODE (func_addr) == SYMBOL_REF
37985 && !SYMBOL_REF_LOCAL_P (func_addr))
37986 call[n++] = gen_rtx_USE (VOIDmode, pic_offset_table_rtx);
37987
37988 call[n++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37989
37990 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n, call));
37991 insn = emit_call_insn (insn);
37992 if (abi_reg)
37993 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37994 }
37995
37996 /* Expand code to perform a sibling call under the SysV4 ABI. */
37997
37998 void
37999 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
38000 {
38001 rtx func = func_desc;
38002 rtx func_addr;
38003 rtx call[3];
38004 rtx insn;
38005 rtx abi_reg = NULL_RTX;
38006
38007 if (global_tlsarg)
38008 tlsarg = global_tlsarg;
38009
38010 /* Handle longcall attributes. */
38011 if ((INTVAL (cookie) & CALL_LONG) != 0
38012 && GET_CODE (func_desc) == SYMBOL_REF)
38013 {
38014 func = rs6000_longcall_ref (func_desc, tlsarg);
38015 /* If the longcall was implemented using PLT16 relocs, then r11
38016 needs to be valid at the call for lazy linking. */
38017 if (HAVE_AS_PLTSEQ)
38018 abi_reg = func;
38019 }
38020
38021 /* Handle indirect calls. */
38022 if (GET_CODE (func) != SYMBOL_REF)
38023 {
38024 func = force_reg (Pmode, func);
38025
38026 /* Indirect sibcalls must go via CTR. Needed to mark
38027 this insn for linker plt sequence editing too. */
38028 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38029 if (HAVE_AS_PLTSEQ
38030 && GET_CODE (func_desc) == SYMBOL_REF)
38031 {
38032 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
38033 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38034 emit_insn (gen_rtx_SET (func_addr, mark_func));
38035 v = gen_rtvec (2, func_addr, func_desc);
38036 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38037 }
38038 else
38039 emit_move_insn (func_addr, func);
38040 }
38041 else
38042 func_addr = func;
38043
38044 /* Create the call. */
38045 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38046 if (value != NULL_RTX)
38047 call[0] = gen_rtx_SET (value, call[0]);
38048
38049 call[1] = gen_rtx_USE (VOIDmode, cookie);
38050 call[2] = simple_return_rtx;
38051
38052 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38053 insn = emit_call_insn (insn);
38054 if (abi_reg)
38055 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38056 }
38057
38058 #if TARGET_MACHO
38059
38060 /* Expand code to perform a call under the Darwin ABI.
38061 Modulo handling of mlongcall, this is much the same as sysv.
38062 if/when the longcall optimisation is removed, we could drop this
38063 code and use the sysv case (taking care to avoid the tls stuff).
38064
38065 We can use this for sibcalls too, if needed. */
38066
38067 void
38068 rs6000_call_darwin_1 (rtx value, rtx func_desc, rtx tlsarg,
38069 rtx cookie, bool sibcall)
38070 {
38071 rtx func = func_desc;
38072 rtx func_addr;
38073 rtx call[3];
38074 rtx insn;
38075 int cookie_val = INTVAL (cookie);
38076 bool make_island = false;
38077
38078 /* Handle longcall attributes, there are two cases for Darwin:
38079 1) Newer linkers are capable of synthesising any branch islands needed.
38080 2) We need a helper branch island synthesised by the compiler.
38081 The second case has mostly been retired and we don't use it for m64.
38082 In fact, it's is an optimisation, we could just indirect as sysv does..
38083 ... however, backwards compatibility for now.
38084 If we're going to use this, then we need to keep the CALL_LONG bit set,
38085 so that we can pick up the special insn form later. */
38086 if ((cookie_val & CALL_LONG) != 0
38087 && GET_CODE (func_desc) == SYMBOL_REF)
38088 {
38089 if (darwin_emit_branch_islands && TARGET_32BIT)
38090 make_island = true; /* Do nothing yet, retain the CALL_LONG flag. */
38091 else
38092 {
38093 /* The linker is capable of doing this, but the user explicitly
38094 asked for -mlongcall, so we'll do the 'normal' version. */
38095 func = rs6000_longcall_ref (func_desc, NULL_RTX);
38096 cookie_val &= ~CALL_LONG; /* Handled, zap it. */
38097 }
38098 }
38099
38100 /* Handle indirect calls. */
38101 if (GET_CODE (func) != SYMBOL_REF)
38102 {
38103 func = force_reg (Pmode, func);
38104
38105 /* Indirect calls via CTR are strongly preferred over indirect
38106 calls via LR, and are required for indirect sibcalls, so move
38107 the address there. */
38108 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38109 emit_move_insn (func_addr, func);
38110 }
38111 else
38112 func_addr = func;
38113
38114 /* Create the call. */
38115 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38116 if (value != NULL_RTX)
38117 call[0] = gen_rtx_SET (value, call[0]);
38118
38119 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (cookie_val));
38120
38121 if (sibcall)
38122 call[2] = simple_return_rtx;
38123 else
38124 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38125
38126 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38127 insn = emit_call_insn (insn);
38128 /* Now we have the debug info in the insn, we can set up the branch island
38129 if we're using one. */
38130 if (make_island)
38131 {
38132 tree funname = get_identifier (XSTR (func_desc, 0));
38133
38134 if (no_previous_def (funname))
38135 {
38136 rtx label_rtx = gen_label_rtx ();
38137 char *label_buf, temp_buf[256];
38138 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
38139 CODE_LABEL_NUMBER (label_rtx));
38140 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
38141 tree labelname = get_identifier (label_buf);
38142 add_compiler_branch_island (labelname, funname,
38143 insn_line ((const rtx_insn*)insn));
38144 }
38145 }
38146 }
38147 #endif
38148
38149 void
38150 rs6000_call_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38151 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38152 {
38153 #if TARGET_MACHO
38154 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, false);
38155 #else
38156 gcc_unreachable();
38157 #endif
38158 }
38159
38160
38161 void
38162 rs6000_sibcall_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38163 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38164 {
38165 #if TARGET_MACHO
38166 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, true);
38167 #else
38168 gcc_unreachable();
38169 #endif
38170 }
38171
38172
38173 /* Return whether we need to always update the saved TOC pointer when we update
38174 the stack pointer. */
38175
38176 static bool
38177 rs6000_save_toc_in_prologue_p (void)
38178 {
38179 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38180 }
38181
38182 #ifdef HAVE_GAS_HIDDEN
38183 # define USE_HIDDEN_LINKONCE 1
38184 #else
38185 # define USE_HIDDEN_LINKONCE 0
38186 #endif
38187
38188 /* Fills in the label name that should be used for a 476 link stack thunk. */
38189
38190 void
38191 get_ppc476_thunk_name (char name[32])
38192 {
38193 gcc_assert (TARGET_LINK_STACK);
38194
38195 if (USE_HIDDEN_LINKONCE)
38196 sprintf (name, "__ppc476.get_thunk");
38197 else
38198 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38199 }
38200
38201 /* This function emits the simple thunk routine that is used to preserve
38202 the link stack on the 476 cpu. */
38203
38204 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38205 static void
38206 rs6000_code_end (void)
38207 {
38208 char name[32];
38209 tree decl;
38210
38211 if (!TARGET_LINK_STACK)
38212 return;
38213
38214 get_ppc476_thunk_name (name);
38215
38216 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38217 build_function_type_list (void_type_node, NULL_TREE));
38218 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38219 NULL_TREE, void_type_node);
38220 TREE_PUBLIC (decl) = 1;
38221 TREE_STATIC (decl) = 1;
38222
38223 #if RS6000_WEAK
38224 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38225 {
38226 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38227 targetm.asm_out.unique_section (decl, 0);
38228 switch_to_section (get_named_section (decl, NULL, 0));
38229 DECL_WEAK (decl) = 1;
38230 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38231 targetm.asm_out.globalize_label (asm_out_file, name);
38232 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38233 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38234 }
38235 else
38236 #endif
38237 {
38238 switch_to_section (text_section);
38239 ASM_OUTPUT_LABEL (asm_out_file, name);
38240 }
38241
38242 DECL_INITIAL (decl) = make_node (BLOCK);
38243 current_function_decl = decl;
38244 allocate_struct_function (decl, false);
38245 init_function_start (decl);
38246 first_function_block_is_cold = false;
38247 /* Make sure unwind info is emitted for the thunk if needed. */
38248 final_start_function (emit_barrier (), asm_out_file, 1);
38249
38250 fputs ("\tblr\n", asm_out_file);
38251
38252 final_end_function ();
38253 init_insn_lengths ();
38254 free_after_compilation (cfun);
38255 set_cfun (NULL);
38256 current_function_decl = NULL;
38257 }
38258
38259 /* Add r30 to hard reg set if the prologue sets it up and it is not
38260 pic_offset_table_rtx. */
38261
38262 static void
38263 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38264 {
38265 if (!TARGET_SINGLE_PIC_BASE
38266 && TARGET_TOC
38267 && TARGET_MINIMAL_TOC
38268 && !constant_pool_empty_p ())
38269 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38270 if (cfun->machine->split_stack_argp_used)
38271 add_to_hard_reg_set (&set->set, Pmode, 12);
38272
38273 /* Make sure the hard reg set doesn't include r2, which was possibly added
38274 via PIC_OFFSET_TABLE_REGNUM. */
38275 if (TARGET_TOC)
38276 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38277 }
38278
38279 \f
38280 /* Helper function for rs6000_split_logical to emit a logical instruction after
38281 spliting the operation to single GPR registers.
38282
38283 DEST is the destination register.
38284 OP1 and OP2 are the input source registers.
38285 CODE is the base operation (AND, IOR, XOR, NOT).
38286 MODE is the machine mode.
38287 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38288 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38289 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38290
38291 static void
38292 rs6000_split_logical_inner (rtx dest,
38293 rtx op1,
38294 rtx op2,
38295 enum rtx_code code,
38296 machine_mode mode,
38297 bool complement_final_p,
38298 bool complement_op1_p,
38299 bool complement_op2_p)
38300 {
38301 rtx bool_rtx;
38302
38303 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38304 if (op2 && CONST_INT_P (op2)
38305 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38306 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38307 {
38308 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38309 HOST_WIDE_INT value = INTVAL (op2) & mask;
38310
38311 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38312 if (code == AND)
38313 {
38314 if (value == 0)
38315 {
38316 emit_insn (gen_rtx_SET (dest, const0_rtx));
38317 return;
38318 }
38319
38320 else if (value == mask)
38321 {
38322 if (!rtx_equal_p (dest, op1))
38323 emit_insn (gen_rtx_SET (dest, op1));
38324 return;
38325 }
38326 }
38327
38328 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38329 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38330 else if (code == IOR || code == XOR)
38331 {
38332 if (value == 0)
38333 {
38334 if (!rtx_equal_p (dest, op1))
38335 emit_insn (gen_rtx_SET (dest, op1));
38336 return;
38337 }
38338 }
38339 }
38340
38341 if (code == AND && mode == SImode
38342 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38343 {
38344 emit_insn (gen_andsi3 (dest, op1, op2));
38345 return;
38346 }
38347
38348 if (complement_op1_p)
38349 op1 = gen_rtx_NOT (mode, op1);
38350
38351 if (complement_op2_p)
38352 op2 = gen_rtx_NOT (mode, op2);
38353
38354 /* For canonical RTL, if only one arm is inverted it is the first. */
38355 if (!complement_op1_p && complement_op2_p)
38356 std::swap (op1, op2);
38357
38358 bool_rtx = ((code == NOT)
38359 ? gen_rtx_NOT (mode, op1)
38360 : gen_rtx_fmt_ee (code, mode, op1, op2));
38361
38362 if (complement_final_p)
38363 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38364
38365 emit_insn (gen_rtx_SET (dest, bool_rtx));
38366 }
38367
38368 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38369 operations are split immediately during RTL generation to allow for more
38370 optimizations of the AND/IOR/XOR.
38371
38372 OPERANDS is an array containing the destination and two input operands.
38373 CODE is the base operation (AND, IOR, XOR, NOT).
38374 MODE is the machine mode.
38375 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38376 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38377 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38378 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38379 formation of the AND instructions. */
38380
38381 static void
38382 rs6000_split_logical_di (rtx operands[3],
38383 enum rtx_code code,
38384 bool complement_final_p,
38385 bool complement_op1_p,
38386 bool complement_op2_p)
38387 {
38388 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38389 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38390 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38391 enum hi_lo { hi = 0, lo = 1 };
38392 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38393 size_t i;
38394
38395 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38396 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38397 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38398 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38399
38400 if (code == NOT)
38401 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38402 else
38403 {
38404 if (!CONST_INT_P (operands[2]))
38405 {
38406 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38407 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38408 }
38409 else
38410 {
38411 HOST_WIDE_INT value = INTVAL (operands[2]);
38412 HOST_WIDE_INT value_hi_lo[2];
38413
38414 gcc_assert (!complement_final_p);
38415 gcc_assert (!complement_op1_p);
38416 gcc_assert (!complement_op2_p);
38417
38418 value_hi_lo[hi] = value >> 32;
38419 value_hi_lo[lo] = value & lower_32bits;
38420
38421 for (i = 0; i < 2; i++)
38422 {
38423 HOST_WIDE_INT sub_value = value_hi_lo[i];
38424
38425 if (sub_value & sign_bit)
38426 sub_value |= upper_32bits;
38427
38428 op2_hi_lo[i] = GEN_INT (sub_value);
38429
38430 /* If this is an AND instruction, check to see if we need to load
38431 the value in a register. */
38432 if (code == AND && sub_value != -1 && sub_value != 0
38433 && !and_operand (op2_hi_lo[i], SImode))
38434 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38435 }
38436 }
38437 }
38438
38439 for (i = 0; i < 2; i++)
38440 {
38441 /* Split large IOR/XOR operations. */
38442 if ((code == IOR || code == XOR)
38443 && CONST_INT_P (op2_hi_lo[i])
38444 && !complement_final_p
38445 && !complement_op1_p
38446 && !complement_op2_p
38447 && !logical_const_operand (op2_hi_lo[i], SImode))
38448 {
38449 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38450 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38451 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38452 rtx tmp = gen_reg_rtx (SImode);
38453
38454 /* Make sure the constant is sign extended. */
38455 if ((hi_16bits & sign_bit) != 0)
38456 hi_16bits |= upper_32bits;
38457
38458 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38459 code, SImode, false, false, false);
38460
38461 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38462 code, SImode, false, false, false);
38463 }
38464 else
38465 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38466 code, SImode, complement_final_p,
38467 complement_op1_p, complement_op2_p);
38468 }
38469
38470 return;
38471 }
38472
38473 /* Split the insns that make up boolean operations operating on multiple GPR
38474 registers. The boolean MD patterns ensure that the inputs either are
38475 exactly the same as the output registers, or there is no overlap.
38476
38477 OPERANDS is an array containing the destination and two input operands.
38478 CODE is the base operation (AND, IOR, XOR, NOT).
38479 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38480 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38481 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38482
38483 void
38484 rs6000_split_logical (rtx operands[3],
38485 enum rtx_code code,
38486 bool complement_final_p,
38487 bool complement_op1_p,
38488 bool complement_op2_p)
38489 {
38490 machine_mode mode = GET_MODE (operands[0]);
38491 machine_mode sub_mode;
38492 rtx op0, op1, op2;
38493 int sub_size, regno0, regno1, nregs, i;
38494
38495 /* If this is DImode, use the specialized version that can run before
38496 register allocation. */
38497 if (mode == DImode && !TARGET_POWERPC64)
38498 {
38499 rs6000_split_logical_di (operands, code, complement_final_p,
38500 complement_op1_p, complement_op2_p);
38501 return;
38502 }
38503
38504 op0 = operands[0];
38505 op1 = operands[1];
38506 op2 = (code == NOT) ? NULL_RTX : operands[2];
38507 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38508 sub_size = GET_MODE_SIZE (sub_mode);
38509 regno0 = REGNO (op0);
38510 regno1 = REGNO (op1);
38511
38512 gcc_assert (reload_completed);
38513 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38514 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38515
38516 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38517 gcc_assert (nregs > 1);
38518
38519 if (op2 && REG_P (op2))
38520 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38521
38522 for (i = 0; i < nregs; i++)
38523 {
38524 int offset = i * sub_size;
38525 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38526 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38527 rtx sub_op2 = ((code == NOT)
38528 ? NULL_RTX
38529 : simplify_subreg (sub_mode, op2, mode, offset));
38530
38531 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38532 complement_final_p, complement_op1_p,
38533 complement_op2_p);
38534 }
38535
38536 return;
38537 }
38538
38539 \f
38540 /* Return true if the peephole2 can combine a load involving a combination of
38541 an addis instruction and a load with an offset that can be fused together on
38542 a power8. */
38543
38544 bool
38545 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38546 rtx addis_value, /* addis value. */
38547 rtx target, /* target register that is loaded. */
38548 rtx mem) /* bottom part of the memory addr. */
38549 {
38550 rtx addr;
38551 rtx base_reg;
38552
38553 /* Validate arguments. */
38554 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38555 return false;
38556
38557 if (!base_reg_operand (target, GET_MODE (target)))
38558 return false;
38559
38560 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38561 return false;
38562
38563 /* Allow sign/zero extension. */
38564 if (GET_CODE (mem) == ZERO_EXTEND
38565 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38566 mem = XEXP (mem, 0);
38567
38568 if (!MEM_P (mem))
38569 return false;
38570
38571 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38572 return false;
38573
38574 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38575 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38576 return false;
38577
38578 /* Validate that the register used to load the high value is either the
38579 register being loaded, or we can safely replace its use.
38580
38581 This function is only called from the peephole2 pass and we assume that
38582 there are 2 instructions in the peephole (addis and load), so we want to
38583 check if the target register was not used in the memory address and the
38584 register to hold the addis result is dead after the peephole. */
38585 if (REGNO (addis_reg) != REGNO (target))
38586 {
38587 if (reg_mentioned_p (target, mem))
38588 return false;
38589
38590 if (!peep2_reg_dead_p (2, addis_reg))
38591 return false;
38592
38593 /* If the target register being loaded is the stack pointer, we must
38594 avoid loading any other value into it, even temporarily. */
38595 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38596 return false;
38597 }
38598
38599 base_reg = XEXP (addr, 0);
38600 return REGNO (addis_reg) == REGNO (base_reg);
38601 }
38602
38603 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38604 sequence. We adjust the addis register to use the target register. If the
38605 load sign extends, we adjust the code to do the zero extending load, and an
38606 explicit sign extension later since the fusion only covers zero extending
38607 loads.
38608
38609 The operands are:
38610 operands[0] register set with addis (to be replaced with target)
38611 operands[1] value set via addis
38612 operands[2] target register being loaded
38613 operands[3] D-form memory reference using operands[0]. */
38614
38615 void
38616 expand_fusion_gpr_load (rtx *operands)
38617 {
38618 rtx addis_value = operands[1];
38619 rtx target = operands[2];
38620 rtx orig_mem = operands[3];
38621 rtx new_addr, new_mem, orig_addr, offset;
38622 enum rtx_code plus_or_lo_sum;
38623 machine_mode target_mode = GET_MODE (target);
38624 machine_mode extend_mode = target_mode;
38625 machine_mode ptr_mode = Pmode;
38626 enum rtx_code extend = UNKNOWN;
38627
38628 if (GET_CODE (orig_mem) == ZERO_EXTEND
38629 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38630 {
38631 extend = GET_CODE (orig_mem);
38632 orig_mem = XEXP (orig_mem, 0);
38633 target_mode = GET_MODE (orig_mem);
38634 }
38635
38636 gcc_assert (MEM_P (orig_mem));
38637
38638 orig_addr = XEXP (orig_mem, 0);
38639 plus_or_lo_sum = GET_CODE (orig_addr);
38640 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38641
38642 offset = XEXP (orig_addr, 1);
38643 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38644 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38645
38646 if (extend != UNKNOWN)
38647 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38648
38649 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38650 UNSPEC_FUSION_GPR);
38651 emit_insn (gen_rtx_SET (target, new_mem));
38652
38653 if (extend == SIGN_EXTEND)
38654 {
38655 int sub_off = ((BYTES_BIG_ENDIAN)
38656 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38657 : 0);
38658 rtx sign_reg
38659 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38660
38661 emit_insn (gen_rtx_SET (target,
38662 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38663 }
38664
38665 return;
38666 }
38667
38668 /* Emit the addis instruction that will be part of a fused instruction
38669 sequence. */
38670
38671 void
38672 emit_fusion_addis (rtx target, rtx addis_value)
38673 {
38674 rtx fuse_ops[10];
38675 const char *addis_str = NULL;
38676
38677 /* Emit the addis instruction. */
38678 fuse_ops[0] = target;
38679 if (satisfies_constraint_L (addis_value))
38680 {
38681 fuse_ops[1] = addis_value;
38682 addis_str = "lis %0,%v1";
38683 }
38684
38685 else if (GET_CODE (addis_value) == PLUS)
38686 {
38687 rtx op0 = XEXP (addis_value, 0);
38688 rtx op1 = XEXP (addis_value, 1);
38689
38690 if (REG_P (op0) && CONST_INT_P (op1)
38691 && satisfies_constraint_L (op1))
38692 {
38693 fuse_ops[1] = op0;
38694 fuse_ops[2] = op1;
38695 addis_str = "addis %0,%1,%v2";
38696 }
38697 }
38698
38699 else if (GET_CODE (addis_value) == HIGH)
38700 {
38701 rtx value = XEXP (addis_value, 0);
38702 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38703 {
38704 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38705 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38706 if (TARGET_ELF)
38707 addis_str = "addis %0,%2,%1@toc@ha";
38708
38709 else if (TARGET_XCOFF)
38710 addis_str = "addis %0,%1@u(%2)";
38711
38712 else
38713 gcc_unreachable ();
38714 }
38715
38716 else if (GET_CODE (value) == PLUS)
38717 {
38718 rtx op0 = XEXP (value, 0);
38719 rtx op1 = XEXP (value, 1);
38720
38721 if (GET_CODE (op0) == UNSPEC
38722 && XINT (op0, 1) == UNSPEC_TOCREL
38723 && CONST_INT_P (op1))
38724 {
38725 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38726 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38727 fuse_ops[3] = op1;
38728 if (TARGET_ELF)
38729 addis_str = "addis %0,%2,%1+%3@toc@ha";
38730
38731 else if (TARGET_XCOFF)
38732 addis_str = "addis %0,%1+%3@u(%2)";
38733
38734 else
38735 gcc_unreachable ();
38736 }
38737 }
38738
38739 else if (satisfies_constraint_L (value))
38740 {
38741 fuse_ops[1] = value;
38742 addis_str = "lis %0,%v1";
38743 }
38744
38745 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38746 {
38747 fuse_ops[1] = value;
38748 addis_str = "lis %0,%1@ha";
38749 }
38750 }
38751
38752 if (!addis_str)
38753 fatal_insn ("Could not generate addis value for fusion", addis_value);
38754
38755 output_asm_insn (addis_str, fuse_ops);
38756 }
38757
38758 /* Emit a D-form load or store instruction that is the second instruction
38759 of a fusion sequence. */
38760
38761 static void
38762 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
38763 {
38764 rtx fuse_ops[10];
38765 char insn_template[80];
38766
38767 fuse_ops[0] = load_reg;
38768 fuse_ops[1] = addis_reg;
38769
38770 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38771 {
38772 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38773 fuse_ops[2] = offset;
38774 output_asm_insn (insn_template, fuse_ops);
38775 }
38776
38777 else if (GET_CODE (offset) == UNSPEC
38778 && XINT (offset, 1) == UNSPEC_TOCREL)
38779 {
38780 if (TARGET_ELF)
38781 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38782
38783 else if (TARGET_XCOFF)
38784 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38785
38786 else
38787 gcc_unreachable ();
38788
38789 fuse_ops[2] = XVECEXP (offset, 0, 0);
38790 output_asm_insn (insn_template, fuse_ops);
38791 }
38792
38793 else if (GET_CODE (offset) == PLUS
38794 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38795 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38796 && CONST_INT_P (XEXP (offset, 1)))
38797 {
38798 rtx tocrel_unspec = XEXP (offset, 0);
38799 if (TARGET_ELF)
38800 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38801
38802 else if (TARGET_XCOFF)
38803 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38804
38805 else
38806 gcc_unreachable ();
38807
38808 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38809 fuse_ops[3] = XEXP (offset, 1);
38810 output_asm_insn (insn_template, fuse_ops);
38811 }
38812
38813 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38814 {
38815 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38816
38817 fuse_ops[2] = offset;
38818 output_asm_insn (insn_template, fuse_ops);
38819 }
38820
38821 else
38822 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38823
38824 return;
38825 }
38826
38827 /* Given an address, convert it into the addis and load offset parts. Addresses
38828 created during the peephole2 process look like:
38829 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38830 (unspec [(...)] UNSPEC_TOCREL)) */
38831
38832 static void
38833 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38834 {
38835 rtx hi, lo;
38836
38837 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38838 {
38839 hi = XEXP (addr, 0);
38840 lo = XEXP (addr, 1);
38841 }
38842 else
38843 gcc_unreachable ();
38844
38845 *p_hi = hi;
38846 *p_lo = lo;
38847 }
38848
38849 /* Return a string to fuse an addis instruction with a gpr load to the same
38850 register that we loaded up the addis instruction. The address that is used
38851 is the logical address that was formed during peephole2:
38852 (lo_sum (high) (low-part))
38853
38854 The code is complicated, so we call output_asm_insn directly, and just
38855 return "". */
38856
38857 const char *
38858 emit_fusion_gpr_load (rtx target, rtx mem)
38859 {
38860 rtx addis_value;
38861 rtx addr;
38862 rtx load_offset;
38863 const char *load_str = NULL;
38864 machine_mode mode;
38865
38866 if (GET_CODE (mem) == ZERO_EXTEND)
38867 mem = XEXP (mem, 0);
38868
38869 gcc_assert (REG_P (target) && MEM_P (mem));
38870
38871 addr = XEXP (mem, 0);
38872 fusion_split_address (addr, &addis_value, &load_offset);
38873
38874 /* Now emit the load instruction to the same register. */
38875 mode = GET_MODE (mem);
38876 switch (mode)
38877 {
38878 case E_QImode:
38879 load_str = "lbz";
38880 break;
38881
38882 case E_HImode:
38883 load_str = "lhz";
38884 break;
38885
38886 case E_SImode:
38887 case E_SFmode:
38888 load_str = "lwz";
38889 break;
38890
38891 case E_DImode:
38892 case E_DFmode:
38893 gcc_assert (TARGET_POWERPC64);
38894 load_str = "ld";
38895 break;
38896
38897 default:
38898 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38899 }
38900
38901 /* Emit the addis instruction. */
38902 emit_fusion_addis (target, addis_value);
38903
38904 /* Emit the D-form load instruction. */
38905 emit_fusion_load (target, target, load_offset, load_str);
38906
38907 return "";
38908 }
38909 \f
38910
38911 #ifdef RS6000_GLIBC_ATOMIC_FENV
38912 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38913 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38914 #endif
38915
38916 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38917
38918 static void
38919 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38920 {
38921 if (!TARGET_HARD_FLOAT)
38922 {
38923 #ifdef RS6000_GLIBC_ATOMIC_FENV
38924 if (atomic_hold_decl == NULL_TREE)
38925 {
38926 atomic_hold_decl
38927 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38928 get_identifier ("__atomic_feholdexcept"),
38929 build_function_type_list (void_type_node,
38930 double_ptr_type_node,
38931 NULL_TREE));
38932 TREE_PUBLIC (atomic_hold_decl) = 1;
38933 DECL_EXTERNAL (atomic_hold_decl) = 1;
38934 }
38935
38936 if (atomic_clear_decl == NULL_TREE)
38937 {
38938 atomic_clear_decl
38939 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38940 get_identifier ("__atomic_feclearexcept"),
38941 build_function_type_list (void_type_node,
38942 NULL_TREE));
38943 TREE_PUBLIC (atomic_clear_decl) = 1;
38944 DECL_EXTERNAL (atomic_clear_decl) = 1;
38945 }
38946
38947 tree const_double = build_qualified_type (double_type_node,
38948 TYPE_QUAL_CONST);
38949 tree const_double_ptr = build_pointer_type (const_double);
38950 if (atomic_update_decl == NULL_TREE)
38951 {
38952 atomic_update_decl
38953 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38954 get_identifier ("__atomic_feupdateenv"),
38955 build_function_type_list (void_type_node,
38956 const_double_ptr,
38957 NULL_TREE));
38958 TREE_PUBLIC (atomic_update_decl) = 1;
38959 DECL_EXTERNAL (atomic_update_decl) = 1;
38960 }
38961
38962 tree fenv_var = create_tmp_var_raw (double_type_node);
38963 TREE_ADDRESSABLE (fenv_var) = 1;
38964 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38965
38966 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38967 *clear = build_call_expr (atomic_clear_decl, 0);
38968 *update = build_call_expr (atomic_update_decl, 1,
38969 fold_convert (const_double_ptr, fenv_addr));
38970 #endif
38971 return;
38972 }
38973
38974 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38975 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38976 tree call_mffs = build_call_expr (mffs, 0);
38977
38978 /* Generates the equivalent of feholdexcept (&fenv_var)
38979
38980 *fenv_var = __builtin_mffs ();
38981 double fenv_hold;
38982 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38983 __builtin_mtfsf (0xff, fenv_hold); */
38984
38985 /* Mask to clear everything except for the rounding modes and non-IEEE
38986 arithmetic flag. */
38987 const unsigned HOST_WIDE_INT hold_exception_mask =
38988 HOST_WIDE_INT_C (0xffffffff00000007);
38989
38990 tree fenv_var = create_tmp_var_raw (double_type_node);
38991
38992 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38993
38994 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38995 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38996 build_int_cst (uint64_type_node,
38997 hold_exception_mask));
38998
38999 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39000 fenv_llu_and);
39001
39002 tree hold_mtfsf = build_call_expr (mtfsf, 2,
39003 build_int_cst (unsigned_type_node, 0xff),
39004 fenv_hold_mtfsf);
39005
39006 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
39007
39008 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
39009
39010 double fenv_clear = __builtin_mffs ();
39011 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
39012 __builtin_mtfsf (0xff, fenv_clear); */
39013
39014 /* Mask to clear everything except for the rounding modes and non-IEEE
39015 arithmetic flag. */
39016 const unsigned HOST_WIDE_INT clear_exception_mask =
39017 HOST_WIDE_INT_C (0xffffffff00000000);
39018
39019 tree fenv_clear = create_tmp_var_raw (double_type_node);
39020
39021 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
39022
39023 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
39024 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
39025 fenv_clean_llu,
39026 build_int_cst (uint64_type_node,
39027 clear_exception_mask));
39028
39029 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39030 fenv_clear_llu_and);
39031
39032 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39033 build_int_cst (unsigned_type_node, 0xff),
39034 fenv_clear_mtfsf);
39035
39036 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39037
39038 /* Generates the equivalent of feupdateenv (&fenv_var)
39039
39040 double old_fenv = __builtin_mffs ();
39041 double fenv_update;
39042 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39043 (*(uint64_t*)fenv_var 0x1ff80fff);
39044 __builtin_mtfsf (0xff, fenv_update); */
39045
39046 const unsigned HOST_WIDE_INT update_exception_mask =
39047 HOST_WIDE_INT_C (0xffffffff1fffff00);
39048 const unsigned HOST_WIDE_INT new_exception_mask =
39049 HOST_WIDE_INT_C (0x1ff80fff);
39050
39051 tree old_fenv = create_tmp_var_raw (double_type_node);
39052 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39053
39054 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39055 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39056 build_int_cst (uint64_type_node,
39057 update_exception_mask));
39058
39059 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39060 build_int_cst (uint64_type_node,
39061 new_exception_mask));
39062
39063 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39064 old_llu_and, new_llu_and);
39065
39066 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39067 new_llu_mask);
39068
39069 tree update_mtfsf = build_call_expr (mtfsf, 2,
39070 build_int_cst (unsigned_type_node, 0xff),
39071 fenv_update_mtfsf);
39072
39073 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39074 }
39075
39076 void
39077 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
39078 {
39079 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39080
39081 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39082 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39083
39084 /* The destination of the vmrgew instruction layout is:
39085 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39086 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39087 vmrgew instruction will be correct. */
39088 if (BYTES_BIG_ENDIAN)
39089 {
39090 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
39091 GEN_INT (0)));
39092 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
39093 GEN_INT (3)));
39094 }
39095 else
39096 {
39097 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
39098 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
39099 }
39100
39101 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39102 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39103
39104 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
39105 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
39106
39107 if (BYTES_BIG_ENDIAN)
39108 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39109 else
39110 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39111 }
39112
39113 void
39114 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39115 {
39116 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39117
39118 rtx_tmp0 = gen_reg_rtx (V2DImode);
39119 rtx_tmp1 = gen_reg_rtx (V2DImode);
39120
39121 /* The destination of the vmrgew instruction layout is:
39122 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39123 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39124 vmrgew instruction will be correct. */
39125 if (BYTES_BIG_ENDIAN)
39126 {
39127 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39128 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39129 }
39130 else
39131 {
39132 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39133 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39134 }
39135
39136 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39137 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39138
39139 if (signed_convert)
39140 {
39141 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39142 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39143 }
39144 else
39145 {
39146 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39147 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39148 }
39149
39150 if (BYTES_BIG_ENDIAN)
39151 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39152 else
39153 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39154 }
39155
39156 void
39157 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39158 rtx src2)
39159 {
39160 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39161
39162 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39163 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39164
39165 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39166 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39167
39168 rtx_tmp2 = gen_reg_rtx (V4SImode);
39169 rtx_tmp3 = gen_reg_rtx (V4SImode);
39170
39171 if (signed_convert)
39172 {
39173 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39174 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39175 }
39176 else
39177 {
39178 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39179 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39180 }
39181
39182 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39183 }
39184
39185 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39186
39187 static bool
39188 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39189 optimization_type opt_type)
39190 {
39191 switch (op)
39192 {
39193 case rsqrt_optab:
39194 return (opt_type == OPTIMIZE_FOR_SPEED
39195 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39196
39197 default:
39198 return true;
39199 }
39200 }
39201
39202 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39203
39204 static HOST_WIDE_INT
39205 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39206 {
39207 if (TREE_CODE (exp) == STRING_CST
39208 && (STRICT_ALIGNMENT || !optimize_size))
39209 return MAX (align, BITS_PER_WORD);
39210 return align;
39211 }
39212
39213 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39214
39215 static HOST_WIDE_INT
39216 rs6000_starting_frame_offset (void)
39217 {
39218 if (FRAME_GROWS_DOWNWARD)
39219 return 0;
39220 return RS6000_STARTING_FRAME_OFFSET;
39221 }
39222 \f
39223
39224 /* Create an alias for a mangled name where we have changed the mangling (in
39225 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
39226 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
39227
39228 #if TARGET_ELF && RS6000_WEAK
39229 static void
39230 rs6000_globalize_decl_name (FILE * stream, tree decl)
39231 {
39232 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
39233
39234 targetm.asm_out.globalize_label (stream, name);
39235
39236 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
39237 {
39238 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
39239 const char *old_name;
39240
39241 ieee128_mangling_gcc_8_1 = true;
39242 lang_hooks.set_decl_assembler_name (decl);
39243 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
39244 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
39245 ieee128_mangling_gcc_8_1 = false;
39246
39247 if (strcmp (name, old_name) != 0)
39248 {
39249 fprintf (stream, "\t.weak %s\n", old_name);
39250 fprintf (stream, "\t.set %s,%s\n", old_name, name);
39251 }
39252 }
39253 }
39254 #endif
39255
39256 \f
39257 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
39258 function names from <foo>l to <foo>f128 if the default long double type is
39259 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
39260 include file switches the names on systems that support long double as IEEE
39261 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
39262 In the future, glibc will export names like __ieee128_sinf128 and we can
39263 switch to using those instead of using sinf128, which pollutes the user's
39264 namespace.
39265
39266 This will switch the names for Fortran math functions as well (which doesn't
39267 use math.h). However, Fortran needs other changes to the compiler and
39268 library before you can switch the real*16 type at compile time.
39269
39270 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
39271 only do this if the default is that long double is IBM extended double, and
39272 the user asked for IEEE 128-bit. */
39273
39274 static tree
39275 rs6000_mangle_decl_assembler_name (tree decl, tree id)
39276 {
39277 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
39278 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
39279 {
39280 size_t len = IDENTIFIER_LENGTH (id);
39281 const char *name = IDENTIFIER_POINTER (id);
39282
39283 if (name[len - 1] == 'l')
39284 {
39285 bool uses_ieee128_p = false;
39286 tree type = TREE_TYPE (decl);
39287 machine_mode ret_mode = TYPE_MODE (type);
39288
39289 /* See if the function returns a IEEE 128-bit floating point type or
39290 complex type. */
39291 if (ret_mode == TFmode || ret_mode == TCmode)
39292 uses_ieee128_p = true;
39293 else
39294 {
39295 function_args_iterator args_iter;
39296 tree arg;
39297
39298 /* See if the function passes a IEEE 128-bit floating point type
39299 or complex type. */
39300 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
39301 {
39302 machine_mode arg_mode = TYPE_MODE (arg);
39303 if (arg_mode == TFmode || arg_mode == TCmode)
39304 {
39305 uses_ieee128_p = true;
39306 break;
39307 }
39308 }
39309 }
39310
39311 /* If we passed or returned an IEEE 128-bit floating point type,
39312 change the name. */
39313 if (uses_ieee128_p)
39314 {
39315 char *name2 = (char *) alloca (len + 4);
39316 memcpy (name2, name, len - 1);
39317 strcpy (name2 + len - 1, "f128");
39318 id = get_identifier (name2);
39319 }
39320 }
39321 }
39322
39323 return id;
39324 }
39325
39326 \f
39327 struct gcc_target targetm = TARGET_INITIALIZER;
39328
39329 #include "gt-rs6000.h"