re PR target/89711 (ICE in insert_vi_for_tree, at tree-ssa-structalias.c:2832)
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2019 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84 #include "tree-vrp.h"
85 #include "tree-ssanames.h"
86
87 /* This file should be included last. */
88 #include "target-def.h"
89
90 #ifndef TARGET_NO_PROTOTYPE
91 #define TARGET_NO_PROTOTYPE 0
92 #endif
93
94 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
95 systems will also set long double to be IEEE 128-bit. AIX and Darwin
96 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
97 those systems will not pick up this default. This needs to be after all
98 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
99 properly defined. */
100 #ifndef TARGET_IEEEQUAD_DEFAULT
101 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
102 #define TARGET_IEEEQUAD_DEFAULT 1
103 #else
104 #define TARGET_IEEEQUAD_DEFAULT 0
105 #endif
106 #endif
107
108 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
109
110 /* Structure used to define the rs6000 stack */
111 typedef struct rs6000_stack {
112 int reload_completed; /* stack info won't change from here on */
113 int first_gp_reg_save; /* first callee saved GP register used */
114 int first_fp_reg_save; /* first callee saved FP register used */
115 int first_altivec_reg_save; /* first callee saved AltiVec register used */
116 int lr_save_p; /* true if the link reg needs to be saved */
117 int cr_save_p; /* true if the CR reg needs to be saved */
118 unsigned int vrsave_mask; /* mask of vec registers to save */
119 int push_p; /* true if we need to allocate stack space */
120 int calls_p; /* true if the function makes any calls */
121 int world_save_p; /* true if we're saving *everything*:
122 r13-r31, cr, f14-f31, vrsave, v20-v31 */
123 enum rs6000_abi abi; /* which ABI to use */
124 int gp_save_offset; /* offset to save GP regs from initial SP */
125 int fp_save_offset; /* offset to save FP regs from initial SP */
126 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
127 int lr_save_offset; /* offset to save LR from initial SP */
128 int cr_save_offset; /* offset to save CR from initial SP */
129 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
130 int varargs_save_offset; /* offset to save the varargs registers */
131 int ehrd_offset; /* offset to EH return data */
132 int ehcr_offset; /* offset to EH CR field data */
133 int reg_size; /* register size (4 or 8) */
134 HOST_WIDE_INT vars_size; /* variable save area size */
135 int parm_size; /* outgoing parameter size */
136 int save_size; /* save area size */
137 int fixed_size; /* fixed size of stack frame */
138 int gp_size; /* size of saved GP registers */
139 int fp_size; /* size of saved FP registers */
140 int altivec_size; /* size of saved AltiVec registers */
141 int cr_size; /* size to hold CR if not in fixed area */
142 int vrsave_size; /* size to hold VRSAVE */
143 int altivec_padding_size; /* size of altivec alignment padding */
144 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
145 int savres_strategy;
146 } rs6000_stack_t;
147
148 /* A C structure for machine-specific, per-function data.
149 This is added to the cfun structure. */
150 typedef struct GTY(()) machine_function
151 {
152 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
153 int ra_needs_full_frame;
154 /* Flags if __builtin_return_address (0) was used. */
155 int ra_need_lr;
156 /* Cache lr_save_p after expansion of builtin_eh_return. */
157 int lr_save_state;
158 /* Whether we need to save the TOC to the reserved stack location in the
159 function prologue. */
160 bool save_toc_in_prologue;
161 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
162 varargs save area. */
163 HOST_WIDE_INT varargs_save_offset;
164 /* Alternative internal arg pointer for -fsplit-stack. */
165 rtx split_stack_arg_pointer;
166 bool split_stack_argp_used;
167 /* Flag if r2 setup is needed with ELFv2 ABI. */
168 bool r2_setup_needed;
169 /* The number of components we use for separate shrink-wrapping. */
170 int n_components;
171 /* The components already handled by separate shrink-wrapping, which should
172 not be considered by the prologue and epilogue. */
173 bool gpr_is_wrapped_separately[32];
174 bool fpr_is_wrapped_separately[32];
175 bool lr_is_wrapped_separately;
176 bool toc_is_wrapped_separately;
177 } machine_function;
178
179 /* Support targetm.vectorize.builtin_mask_for_load. */
180 static GTY(()) tree altivec_builtin_mask_for_load;
181
182 /* Set to nonzero once AIX common-mode calls have been defined. */
183 static GTY(()) int common_mode_defined;
184
185 /* Label number of label created for -mrelocatable, to call to so we can
186 get the address of the GOT section */
187 static int rs6000_pic_labelno;
188
189 #ifdef USING_ELFOS_H
190 /* Counter for labels which are to be placed in .fixup. */
191 int fixuplabelno = 0;
192 #endif
193
194 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
195 int dot_symbols;
196
197 /* Specify the machine mode that pointers have. After generation of rtl, the
198 compiler makes no further distinction between pointers and any other objects
199 of this machine mode. */
200 scalar_int_mode rs6000_pmode;
201
202 #if TARGET_ELF
203 /* Note whether IEEE 128-bit floating point was passed or returned, either as
204 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
205 floating point. We changed the default C++ mangling for these types and we
206 may want to generate a weak alias of the old mangling (U10__float128) to the
207 new mangling (u9__ieee128). */
208 static bool rs6000_passes_ieee128;
209 #endif
210
211 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
212 name used in current releases (i.e. u9__ieee128). */
213 static bool ieee128_mangling_gcc_8_1;
214
215 /* Width in bits of a pointer. */
216 unsigned rs6000_pointer_size;
217
218 #ifdef HAVE_AS_GNU_ATTRIBUTE
219 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
220 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
221 # endif
222 /* Flag whether floating point values have been passed/returned.
223 Note that this doesn't say whether fprs are used, since the
224 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
225 should be set for soft-float values passed in gprs and ieee128
226 values passed in vsx registers. */
227 static bool rs6000_passes_float;
228 static bool rs6000_passes_long_double;
229 /* Flag whether vector values have been passed/returned. */
230 static bool rs6000_passes_vector;
231 /* Flag whether small (<= 8 byte) structures have been returned. */
232 static bool rs6000_returns_struct;
233 #endif
234
235 /* Value is TRUE if register/mode pair is acceptable. */
236 static bool rs6000_hard_regno_mode_ok_p
237 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
238
239 /* Maximum number of registers needed for a given register class and mode. */
240 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
241
242 /* How many registers are needed for a given register and mode. */
243 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
244
245 /* Map register number to register class. */
246 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
247
248 static int dbg_cost_ctrl;
249
250 /* Built in types. */
251 tree rs6000_builtin_types[RS6000_BTI_MAX];
252 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
253
254 /* Flag to say the TOC is initialized */
255 int toc_initialized, need_toc_init;
256 char toc_label_name[10];
257
258 /* Cached value of rs6000_variable_issue. This is cached in
259 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
260 static short cached_can_issue_more;
261
262 static GTY(()) section *read_only_data_section;
263 static GTY(()) section *private_data_section;
264 static GTY(()) section *tls_data_section;
265 static GTY(()) section *tls_private_data_section;
266 static GTY(()) section *read_only_private_data_section;
267 static GTY(()) section *sdata2_section;
268 static GTY(()) section *toc_section;
269
270 struct builtin_description
271 {
272 const HOST_WIDE_INT mask;
273 const enum insn_code icode;
274 const char *const name;
275 const enum rs6000_builtins code;
276 };
277
278 /* Describe the vector unit used for modes. */
279 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
280 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
281
282 /* Register classes for various constraints that are based on the target
283 switches. */
284 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
285
286 /* Describe the alignment of a vector. */
287 int rs6000_vector_align[NUM_MACHINE_MODES];
288
289 /* Map selected modes to types for builtins. */
290 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
291
292 /* What modes to automatically generate reciprocal divide estimate (fre) and
293 reciprocal sqrt (frsqrte) for. */
294 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
295
296 /* Masks to determine which reciprocal esitmate instructions to generate
297 automatically. */
298 enum rs6000_recip_mask {
299 RECIP_SF_DIV = 0x001, /* Use divide estimate */
300 RECIP_DF_DIV = 0x002,
301 RECIP_V4SF_DIV = 0x004,
302 RECIP_V2DF_DIV = 0x008,
303
304 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
305 RECIP_DF_RSQRT = 0x020,
306 RECIP_V4SF_RSQRT = 0x040,
307 RECIP_V2DF_RSQRT = 0x080,
308
309 /* Various combination of flags for -mrecip=xxx. */
310 RECIP_NONE = 0,
311 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
312 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
313 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
314
315 RECIP_HIGH_PRECISION = RECIP_ALL,
316
317 /* On low precision machines like the power5, don't enable double precision
318 reciprocal square root estimate, since it isn't accurate enough. */
319 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
320 };
321
322 /* -mrecip options. */
323 static struct
324 {
325 const char *string; /* option name */
326 unsigned int mask; /* mask bits to set */
327 } recip_options[] = {
328 { "all", RECIP_ALL },
329 { "none", RECIP_NONE },
330 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
331 | RECIP_V2DF_DIV) },
332 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
333 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
334 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
335 | RECIP_V2DF_RSQRT) },
336 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
337 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
338 };
339
340 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
341 static const struct
342 {
343 const char *cpu;
344 unsigned int cpuid;
345 } cpu_is_info[] = {
346 { "power9", PPC_PLATFORM_POWER9 },
347 { "power8", PPC_PLATFORM_POWER8 },
348 { "power7", PPC_PLATFORM_POWER7 },
349 { "power6x", PPC_PLATFORM_POWER6X },
350 { "power6", PPC_PLATFORM_POWER6 },
351 { "power5+", PPC_PLATFORM_POWER5_PLUS },
352 { "power5", PPC_PLATFORM_POWER5 },
353 { "ppc970", PPC_PLATFORM_PPC970 },
354 { "power4", PPC_PLATFORM_POWER4 },
355 { "ppca2", PPC_PLATFORM_PPCA2 },
356 { "ppc476", PPC_PLATFORM_PPC476 },
357 { "ppc464", PPC_PLATFORM_PPC464 },
358 { "ppc440", PPC_PLATFORM_PPC440 },
359 { "ppc405", PPC_PLATFORM_PPC405 },
360 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
361 };
362
363 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
364 static const struct
365 {
366 const char *hwcap;
367 int mask;
368 unsigned int id;
369 } cpu_supports_info[] = {
370 /* AT_HWCAP masks. */
371 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
372 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
373 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
374 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
375 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
376 { "booke", PPC_FEATURE_BOOKE, 0 },
377 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
378 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
379 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
380 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
381 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
382 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
383 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
384 { "notb", PPC_FEATURE_NO_TB, 0 },
385 { "pa6t", PPC_FEATURE_PA6T, 0 },
386 { "power4", PPC_FEATURE_POWER4, 0 },
387 { "power5", PPC_FEATURE_POWER5, 0 },
388 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
389 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
390 { "ppc32", PPC_FEATURE_32, 0 },
391 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
392 { "ppc64", PPC_FEATURE_64, 0 },
393 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
394 { "smt", PPC_FEATURE_SMT, 0 },
395 { "spe", PPC_FEATURE_HAS_SPE, 0 },
396 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
397 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
398 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
399
400 /* AT_HWCAP2 masks. */
401 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
402 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
403 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
404 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
405 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
406 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
407 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
408 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
409 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
410 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
411 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
412 { "darn", PPC_FEATURE2_DARN, 1 },
413 { "scv", PPC_FEATURE2_SCV, 1 }
414 };
415
416 /* On PowerPC, we have a limited number of target clones that we care about
417 which means we can use an array to hold the options, rather than having more
418 elaborate data structures to identify each possible variation. Order the
419 clones from the default to the highest ISA. */
420 enum {
421 CLONE_DEFAULT = 0, /* default clone. */
422 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
423 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
424 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
425 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
426 CLONE_MAX
427 };
428
429 /* Map compiler ISA bits into HWCAP names. */
430 struct clone_map {
431 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
432 const char *name; /* name to use in __builtin_cpu_supports. */
433 };
434
435 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
436 { 0, "" }, /* Default options. */
437 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
438 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
439 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
440 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
441 };
442
443
444 /* Newer LIBCs explicitly export this symbol to declare that they provide
445 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
446 reference to this symbol whenever we expand a CPU builtin, so that
447 we never link against an old LIBC. */
448 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
449
450 /* True if we have expanded a CPU builtin. */
451 bool cpu_builtin_p;
452
453 /* Pointer to function (in rs6000-c.c) that can define or undefine target
454 macros that have changed. Languages that don't support the preprocessor
455 don't link in rs6000-c.c, so we can't call it directly. */
456 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
457
458 /* Simplfy register classes into simpler classifications. We assume
459 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
460 check for standard register classes (gpr/floating/altivec/vsx) and
461 floating/vector classes (float/altivec/vsx). */
462
463 enum rs6000_reg_type {
464 NO_REG_TYPE,
465 PSEUDO_REG_TYPE,
466 GPR_REG_TYPE,
467 VSX_REG_TYPE,
468 ALTIVEC_REG_TYPE,
469 FPR_REG_TYPE,
470 SPR_REG_TYPE,
471 CR_REG_TYPE
472 };
473
474 /* Map register class to register type. */
475 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
476
477 /* First/last register type for the 'normal' register types (i.e. general
478 purpose, floating point, altivec, and VSX registers). */
479 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
480
481 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
482
483
484 /* Register classes we care about in secondary reload or go if legitimate
485 address. We only need to worry about GPR, FPR, and Altivec registers here,
486 along an ANY field that is the OR of the 3 register classes. */
487
488 enum rs6000_reload_reg_type {
489 RELOAD_REG_GPR, /* General purpose registers. */
490 RELOAD_REG_FPR, /* Traditional floating point regs. */
491 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
492 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
493 N_RELOAD_REG
494 };
495
496 /* For setting up register classes, loop through the 3 register classes mapping
497 into real registers, and skip the ANY class, which is just an OR of the
498 bits. */
499 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
500 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
501
502 /* Map reload register type to a register in the register class. */
503 struct reload_reg_map_type {
504 const char *name; /* Register class name. */
505 int reg; /* Register in the register class. */
506 };
507
508 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
509 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
510 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
511 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
512 { "Any", -1 }, /* RELOAD_REG_ANY. */
513 };
514
515 /* Mask bits for each register class, indexed per mode. Historically the
516 compiler has been more restrictive which types can do PRE_MODIFY instead of
517 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
518 typedef unsigned char addr_mask_type;
519
520 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
521 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
522 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
523 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
524 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
525 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
526 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
527 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
528
529 /* Register type masks based on the type, of valid addressing modes. */
530 struct rs6000_reg_addr {
531 enum insn_code reload_load; /* INSN to reload for loading. */
532 enum insn_code reload_store; /* INSN to reload for storing. */
533 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
534 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
535 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
536 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
537 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
538 };
539
540 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
541
542 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
543 static inline bool
544 mode_supports_pre_incdec_p (machine_mode mode)
545 {
546 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
547 != 0);
548 }
549
550 /* Helper function to say whether a mode supports PRE_MODIFY. */
551 static inline bool
552 mode_supports_pre_modify_p (machine_mode mode)
553 {
554 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
555 != 0);
556 }
557
558 /* Return true if we have D-form addressing in altivec registers. */
559 static inline bool
560 mode_supports_vmx_dform (machine_mode mode)
561 {
562 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
563 }
564
565 /* Return true if we have D-form addressing in VSX registers. This addressing
566 is more limited than normal d-form addressing in that the offset must be
567 aligned on a 16-byte boundary. */
568 static inline bool
569 mode_supports_dq_form (machine_mode mode)
570 {
571 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
572 != 0);
573 }
574
575 /* Given that there exists at least one variable that is set (produced)
576 by OUT_INSN and read (consumed) by IN_INSN, return true iff
577 IN_INSN represents one or more memory store operations and none of
578 the variables set by OUT_INSN is used by IN_INSN as the address of a
579 store operation. If either IN_INSN or OUT_INSN does not represent
580 a "single" RTL SET expression (as loosely defined by the
581 implementation of the single_set function) or a PARALLEL with only
582 SETs, CLOBBERs, and USEs inside, this function returns false.
583
584 This rs6000-specific version of store_data_bypass_p checks for
585 certain conditions that result in assertion failures (and internal
586 compiler errors) in the generic store_data_bypass_p function and
587 returns false rather than calling store_data_bypass_p if one of the
588 problematic conditions is detected. */
589
590 int
591 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
592 {
593 rtx out_set, in_set;
594 rtx out_pat, in_pat;
595 rtx out_exp, in_exp;
596 int i, j;
597
598 in_set = single_set (in_insn);
599 if (in_set)
600 {
601 if (MEM_P (SET_DEST (in_set)))
602 {
603 out_set = single_set (out_insn);
604 if (!out_set)
605 {
606 out_pat = PATTERN (out_insn);
607 if (GET_CODE (out_pat) == PARALLEL)
608 {
609 for (i = 0; i < XVECLEN (out_pat, 0); i++)
610 {
611 out_exp = XVECEXP (out_pat, 0, i);
612 if ((GET_CODE (out_exp) == CLOBBER)
613 || (GET_CODE (out_exp) == USE))
614 continue;
615 else if (GET_CODE (out_exp) != SET)
616 return false;
617 }
618 }
619 }
620 }
621 }
622 else
623 {
624 in_pat = PATTERN (in_insn);
625 if (GET_CODE (in_pat) != PARALLEL)
626 return false;
627
628 for (i = 0; i < XVECLEN (in_pat, 0); i++)
629 {
630 in_exp = XVECEXP (in_pat, 0, i);
631 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
632 continue;
633 else if (GET_CODE (in_exp) != SET)
634 return false;
635
636 if (MEM_P (SET_DEST (in_exp)))
637 {
638 out_set = single_set (out_insn);
639 if (!out_set)
640 {
641 out_pat = PATTERN (out_insn);
642 if (GET_CODE (out_pat) != PARALLEL)
643 return false;
644 for (j = 0; j < XVECLEN (out_pat, 0); j++)
645 {
646 out_exp = XVECEXP (out_pat, 0, j);
647 if ((GET_CODE (out_exp) == CLOBBER)
648 || (GET_CODE (out_exp) == USE))
649 continue;
650 else if (GET_CODE (out_exp) != SET)
651 return false;
652 }
653 }
654 }
655 }
656 }
657 return store_data_bypass_p (out_insn, in_insn);
658 }
659
660 \f
661 /* Processor costs (relative to an add) */
662
663 const struct processor_costs *rs6000_cost;
664
665 /* Instruction size costs on 32bit processors. */
666 static const
667 struct processor_costs size32_cost = {
668 COSTS_N_INSNS (1), /* mulsi */
669 COSTS_N_INSNS (1), /* mulsi_const */
670 COSTS_N_INSNS (1), /* mulsi_const9 */
671 COSTS_N_INSNS (1), /* muldi */
672 COSTS_N_INSNS (1), /* divsi */
673 COSTS_N_INSNS (1), /* divdi */
674 COSTS_N_INSNS (1), /* fp */
675 COSTS_N_INSNS (1), /* dmul */
676 COSTS_N_INSNS (1), /* sdiv */
677 COSTS_N_INSNS (1), /* ddiv */
678 32, /* cache line size */
679 0, /* l1 cache */
680 0, /* l2 cache */
681 0, /* streams */
682 0, /* SF->DF convert */
683 };
684
685 /* Instruction size costs on 64bit processors. */
686 static const
687 struct processor_costs size64_cost = {
688 COSTS_N_INSNS (1), /* mulsi */
689 COSTS_N_INSNS (1), /* mulsi_const */
690 COSTS_N_INSNS (1), /* mulsi_const9 */
691 COSTS_N_INSNS (1), /* muldi */
692 COSTS_N_INSNS (1), /* divsi */
693 COSTS_N_INSNS (1), /* divdi */
694 COSTS_N_INSNS (1), /* fp */
695 COSTS_N_INSNS (1), /* dmul */
696 COSTS_N_INSNS (1), /* sdiv */
697 COSTS_N_INSNS (1), /* ddiv */
698 128, /* cache line size */
699 0, /* l1 cache */
700 0, /* l2 cache */
701 0, /* streams */
702 0, /* SF->DF convert */
703 };
704
705 /* Instruction costs on RS64A processors. */
706 static const
707 struct processor_costs rs64a_cost = {
708 COSTS_N_INSNS (20), /* mulsi */
709 COSTS_N_INSNS (12), /* mulsi_const */
710 COSTS_N_INSNS (8), /* mulsi_const9 */
711 COSTS_N_INSNS (34), /* muldi */
712 COSTS_N_INSNS (65), /* divsi */
713 COSTS_N_INSNS (67), /* divdi */
714 COSTS_N_INSNS (4), /* fp */
715 COSTS_N_INSNS (4), /* dmul */
716 COSTS_N_INSNS (31), /* sdiv */
717 COSTS_N_INSNS (31), /* ddiv */
718 128, /* cache line size */
719 128, /* l1 cache */
720 2048, /* l2 cache */
721 1, /* streams */
722 0, /* SF->DF convert */
723 };
724
725 /* Instruction costs on MPCCORE processors. */
726 static const
727 struct processor_costs mpccore_cost = {
728 COSTS_N_INSNS (2), /* mulsi */
729 COSTS_N_INSNS (2), /* mulsi_const */
730 COSTS_N_INSNS (2), /* mulsi_const9 */
731 COSTS_N_INSNS (2), /* muldi */
732 COSTS_N_INSNS (6), /* divsi */
733 COSTS_N_INSNS (6), /* divdi */
734 COSTS_N_INSNS (4), /* fp */
735 COSTS_N_INSNS (5), /* dmul */
736 COSTS_N_INSNS (10), /* sdiv */
737 COSTS_N_INSNS (17), /* ddiv */
738 32, /* cache line size */
739 4, /* l1 cache */
740 16, /* l2 cache */
741 1, /* streams */
742 0, /* SF->DF convert */
743 };
744
745 /* Instruction costs on PPC403 processors. */
746 static const
747 struct processor_costs ppc403_cost = {
748 COSTS_N_INSNS (4), /* mulsi */
749 COSTS_N_INSNS (4), /* mulsi_const */
750 COSTS_N_INSNS (4), /* mulsi_const9 */
751 COSTS_N_INSNS (4), /* muldi */
752 COSTS_N_INSNS (33), /* divsi */
753 COSTS_N_INSNS (33), /* divdi */
754 COSTS_N_INSNS (11), /* fp */
755 COSTS_N_INSNS (11), /* dmul */
756 COSTS_N_INSNS (11), /* sdiv */
757 COSTS_N_INSNS (11), /* ddiv */
758 32, /* cache line size */
759 4, /* l1 cache */
760 16, /* l2 cache */
761 1, /* streams */
762 0, /* SF->DF convert */
763 };
764
765 /* Instruction costs on PPC405 processors. */
766 static const
767 struct processor_costs ppc405_cost = {
768 COSTS_N_INSNS (5), /* mulsi */
769 COSTS_N_INSNS (4), /* mulsi_const */
770 COSTS_N_INSNS (3), /* mulsi_const9 */
771 COSTS_N_INSNS (5), /* muldi */
772 COSTS_N_INSNS (35), /* divsi */
773 COSTS_N_INSNS (35), /* divdi */
774 COSTS_N_INSNS (11), /* fp */
775 COSTS_N_INSNS (11), /* dmul */
776 COSTS_N_INSNS (11), /* sdiv */
777 COSTS_N_INSNS (11), /* ddiv */
778 32, /* cache line size */
779 16, /* l1 cache */
780 128, /* l2 cache */
781 1, /* streams */
782 0, /* SF->DF convert */
783 };
784
785 /* Instruction costs on PPC440 processors. */
786 static const
787 struct processor_costs ppc440_cost = {
788 COSTS_N_INSNS (3), /* mulsi */
789 COSTS_N_INSNS (2), /* mulsi_const */
790 COSTS_N_INSNS (2), /* mulsi_const9 */
791 COSTS_N_INSNS (3), /* muldi */
792 COSTS_N_INSNS (34), /* divsi */
793 COSTS_N_INSNS (34), /* divdi */
794 COSTS_N_INSNS (5), /* fp */
795 COSTS_N_INSNS (5), /* dmul */
796 COSTS_N_INSNS (19), /* sdiv */
797 COSTS_N_INSNS (33), /* ddiv */
798 32, /* cache line size */
799 32, /* l1 cache */
800 256, /* l2 cache */
801 1, /* streams */
802 0, /* SF->DF convert */
803 };
804
805 /* Instruction costs on PPC476 processors. */
806 static const
807 struct processor_costs ppc476_cost = {
808 COSTS_N_INSNS (4), /* mulsi */
809 COSTS_N_INSNS (4), /* mulsi_const */
810 COSTS_N_INSNS (4), /* mulsi_const9 */
811 COSTS_N_INSNS (4), /* muldi */
812 COSTS_N_INSNS (11), /* divsi */
813 COSTS_N_INSNS (11), /* divdi */
814 COSTS_N_INSNS (6), /* fp */
815 COSTS_N_INSNS (6), /* dmul */
816 COSTS_N_INSNS (19), /* sdiv */
817 COSTS_N_INSNS (33), /* ddiv */
818 32, /* l1 cache line size */
819 32, /* l1 cache */
820 512, /* l2 cache */
821 1, /* streams */
822 0, /* SF->DF convert */
823 };
824
825 /* Instruction costs on PPC601 processors. */
826 static const
827 struct processor_costs ppc601_cost = {
828 COSTS_N_INSNS (5), /* mulsi */
829 COSTS_N_INSNS (5), /* mulsi_const */
830 COSTS_N_INSNS (5), /* mulsi_const9 */
831 COSTS_N_INSNS (5), /* muldi */
832 COSTS_N_INSNS (36), /* divsi */
833 COSTS_N_INSNS (36), /* divdi */
834 COSTS_N_INSNS (4), /* fp */
835 COSTS_N_INSNS (5), /* dmul */
836 COSTS_N_INSNS (17), /* sdiv */
837 COSTS_N_INSNS (31), /* ddiv */
838 32, /* cache line size */
839 32, /* l1 cache */
840 256, /* l2 cache */
841 1, /* streams */
842 0, /* SF->DF convert */
843 };
844
845 /* Instruction costs on PPC603 processors. */
846 static const
847 struct processor_costs ppc603_cost = {
848 COSTS_N_INSNS (5), /* mulsi */
849 COSTS_N_INSNS (3), /* mulsi_const */
850 COSTS_N_INSNS (2), /* mulsi_const9 */
851 COSTS_N_INSNS (5), /* muldi */
852 COSTS_N_INSNS (37), /* divsi */
853 COSTS_N_INSNS (37), /* divdi */
854 COSTS_N_INSNS (3), /* fp */
855 COSTS_N_INSNS (4), /* dmul */
856 COSTS_N_INSNS (18), /* sdiv */
857 COSTS_N_INSNS (33), /* ddiv */
858 32, /* cache line size */
859 8, /* l1 cache */
860 64, /* l2 cache */
861 1, /* streams */
862 0, /* SF->DF convert */
863 };
864
865 /* Instruction costs on PPC604 processors. */
866 static const
867 struct processor_costs ppc604_cost = {
868 COSTS_N_INSNS (4), /* mulsi */
869 COSTS_N_INSNS (4), /* mulsi_const */
870 COSTS_N_INSNS (4), /* mulsi_const9 */
871 COSTS_N_INSNS (4), /* muldi */
872 COSTS_N_INSNS (20), /* divsi */
873 COSTS_N_INSNS (20), /* divdi */
874 COSTS_N_INSNS (3), /* fp */
875 COSTS_N_INSNS (3), /* dmul */
876 COSTS_N_INSNS (18), /* sdiv */
877 COSTS_N_INSNS (32), /* ddiv */
878 32, /* cache line size */
879 16, /* l1 cache */
880 512, /* l2 cache */
881 1, /* streams */
882 0, /* SF->DF convert */
883 };
884
885 /* Instruction costs on PPC604e processors. */
886 static const
887 struct processor_costs ppc604e_cost = {
888 COSTS_N_INSNS (2), /* mulsi */
889 COSTS_N_INSNS (2), /* mulsi_const */
890 COSTS_N_INSNS (2), /* mulsi_const9 */
891 COSTS_N_INSNS (2), /* muldi */
892 COSTS_N_INSNS (20), /* divsi */
893 COSTS_N_INSNS (20), /* divdi */
894 COSTS_N_INSNS (3), /* fp */
895 COSTS_N_INSNS (3), /* dmul */
896 COSTS_N_INSNS (18), /* sdiv */
897 COSTS_N_INSNS (32), /* ddiv */
898 32, /* cache line size */
899 32, /* l1 cache */
900 1024, /* l2 cache */
901 1, /* streams */
902 0, /* SF->DF convert */
903 };
904
905 /* Instruction costs on PPC620 processors. */
906 static const
907 struct processor_costs ppc620_cost = {
908 COSTS_N_INSNS (5), /* mulsi */
909 COSTS_N_INSNS (4), /* mulsi_const */
910 COSTS_N_INSNS (3), /* mulsi_const9 */
911 COSTS_N_INSNS (7), /* muldi */
912 COSTS_N_INSNS (21), /* divsi */
913 COSTS_N_INSNS (37), /* divdi */
914 COSTS_N_INSNS (3), /* fp */
915 COSTS_N_INSNS (3), /* dmul */
916 COSTS_N_INSNS (18), /* sdiv */
917 COSTS_N_INSNS (32), /* ddiv */
918 128, /* cache line size */
919 32, /* l1 cache */
920 1024, /* l2 cache */
921 1, /* streams */
922 0, /* SF->DF convert */
923 };
924
925 /* Instruction costs on PPC630 processors. */
926 static const
927 struct processor_costs ppc630_cost = {
928 COSTS_N_INSNS (5), /* mulsi */
929 COSTS_N_INSNS (4), /* mulsi_const */
930 COSTS_N_INSNS (3), /* mulsi_const9 */
931 COSTS_N_INSNS (7), /* muldi */
932 COSTS_N_INSNS (21), /* divsi */
933 COSTS_N_INSNS (37), /* divdi */
934 COSTS_N_INSNS (3), /* fp */
935 COSTS_N_INSNS (3), /* dmul */
936 COSTS_N_INSNS (17), /* sdiv */
937 COSTS_N_INSNS (21), /* ddiv */
938 128, /* cache line size */
939 64, /* l1 cache */
940 1024, /* l2 cache */
941 1, /* streams */
942 0, /* SF->DF convert */
943 };
944
945 /* Instruction costs on Cell processor. */
946 /* COSTS_N_INSNS (1) ~ one add. */
947 static const
948 struct processor_costs ppccell_cost = {
949 COSTS_N_INSNS (9/2)+2, /* mulsi */
950 COSTS_N_INSNS (6/2), /* mulsi_const */
951 COSTS_N_INSNS (6/2), /* mulsi_const9 */
952 COSTS_N_INSNS (15/2)+2, /* muldi */
953 COSTS_N_INSNS (38/2), /* divsi */
954 COSTS_N_INSNS (70/2), /* divdi */
955 COSTS_N_INSNS (10/2), /* fp */
956 COSTS_N_INSNS (10/2), /* dmul */
957 COSTS_N_INSNS (74/2), /* sdiv */
958 COSTS_N_INSNS (74/2), /* ddiv */
959 128, /* cache line size */
960 32, /* l1 cache */
961 512, /* l2 cache */
962 6, /* streams */
963 0, /* SF->DF convert */
964 };
965
966 /* Instruction costs on PPC750 and PPC7400 processors. */
967 static const
968 struct processor_costs ppc750_cost = {
969 COSTS_N_INSNS (5), /* mulsi */
970 COSTS_N_INSNS (3), /* mulsi_const */
971 COSTS_N_INSNS (2), /* mulsi_const9 */
972 COSTS_N_INSNS (5), /* muldi */
973 COSTS_N_INSNS (17), /* divsi */
974 COSTS_N_INSNS (17), /* divdi */
975 COSTS_N_INSNS (3), /* fp */
976 COSTS_N_INSNS (3), /* dmul */
977 COSTS_N_INSNS (17), /* sdiv */
978 COSTS_N_INSNS (31), /* ddiv */
979 32, /* cache line size */
980 32, /* l1 cache */
981 512, /* l2 cache */
982 1, /* streams */
983 0, /* SF->DF convert */
984 };
985
986 /* Instruction costs on PPC7450 processors. */
987 static const
988 struct processor_costs ppc7450_cost = {
989 COSTS_N_INSNS (4), /* mulsi */
990 COSTS_N_INSNS (3), /* mulsi_const */
991 COSTS_N_INSNS (3), /* mulsi_const9 */
992 COSTS_N_INSNS (4), /* muldi */
993 COSTS_N_INSNS (23), /* divsi */
994 COSTS_N_INSNS (23), /* divdi */
995 COSTS_N_INSNS (5), /* fp */
996 COSTS_N_INSNS (5), /* dmul */
997 COSTS_N_INSNS (21), /* sdiv */
998 COSTS_N_INSNS (35), /* ddiv */
999 32, /* cache line size */
1000 32, /* l1 cache */
1001 1024, /* l2 cache */
1002 1, /* streams */
1003 0, /* SF->DF convert */
1004 };
1005
1006 /* Instruction costs on PPC8540 processors. */
1007 static const
1008 struct processor_costs ppc8540_cost = {
1009 COSTS_N_INSNS (4), /* mulsi */
1010 COSTS_N_INSNS (4), /* mulsi_const */
1011 COSTS_N_INSNS (4), /* mulsi_const9 */
1012 COSTS_N_INSNS (4), /* muldi */
1013 COSTS_N_INSNS (19), /* divsi */
1014 COSTS_N_INSNS (19), /* divdi */
1015 COSTS_N_INSNS (4), /* fp */
1016 COSTS_N_INSNS (4), /* dmul */
1017 COSTS_N_INSNS (29), /* sdiv */
1018 COSTS_N_INSNS (29), /* ddiv */
1019 32, /* cache line size */
1020 32, /* l1 cache */
1021 256, /* l2 cache */
1022 1, /* prefetch streams /*/
1023 0, /* SF->DF convert */
1024 };
1025
1026 /* Instruction costs on E300C2 and E300C3 cores. */
1027 static const
1028 struct processor_costs ppce300c2c3_cost = {
1029 COSTS_N_INSNS (4), /* mulsi */
1030 COSTS_N_INSNS (4), /* mulsi_const */
1031 COSTS_N_INSNS (4), /* mulsi_const9 */
1032 COSTS_N_INSNS (4), /* muldi */
1033 COSTS_N_INSNS (19), /* divsi */
1034 COSTS_N_INSNS (19), /* divdi */
1035 COSTS_N_INSNS (3), /* fp */
1036 COSTS_N_INSNS (4), /* dmul */
1037 COSTS_N_INSNS (18), /* sdiv */
1038 COSTS_N_INSNS (33), /* ddiv */
1039 32,
1040 16, /* l1 cache */
1041 16, /* l2 cache */
1042 1, /* prefetch streams /*/
1043 0, /* SF->DF convert */
1044 };
1045
1046 /* Instruction costs on PPCE500MC processors. */
1047 static const
1048 struct processor_costs ppce500mc_cost = {
1049 COSTS_N_INSNS (4), /* mulsi */
1050 COSTS_N_INSNS (4), /* mulsi_const */
1051 COSTS_N_INSNS (4), /* mulsi_const9 */
1052 COSTS_N_INSNS (4), /* muldi */
1053 COSTS_N_INSNS (14), /* divsi */
1054 COSTS_N_INSNS (14), /* divdi */
1055 COSTS_N_INSNS (8), /* fp */
1056 COSTS_N_INSNS (10), /* dmul */
1057 COSTS_N_INSNS (36), /* sdiv */
1058 COSTS_N_INSNS (66), /* ddiv */
1059 64, /* cache line size */
1060 32, /* l1 cache */
1061 128, /* l2 cache */
1062 1, /* prefetch streams /*/
1063 0, /* SF->DF convert */
1064 };
1065
1066 /* Instruction costs on PPCE500MC64 processors. */
1067 static const
1068 struct processor_costs ppce500mc64_cost = {
1069 COSTS_N_INSNS (4), /* mulsi */
1070 COSTS_N_INSNS (4), /* mulsi_const */
1071 COSTS_N_INSNS (4), /* mulsi_const9 */
1072 COSTS_N_INSNS (4), /* muldi */
1073 COSTS_N_INSNS (14), /* divsi */
1074 COSTS_N_INSNS (14), /* divdi */
1075 COSTS_N_INSNS (4), /* fp */
1076 COSTS_N_INSNS (10), /* dmul */
1077 COSTS_N_INSNS (36), /* sdiv */
1078 COSTS_N_INSNS (66), /* ddiv */
1079 64, /* cache line size */
1080 32, /* l1 cache */
1081 128, /* l2 cache */
1082 1, /* prefetch streams /*/
1083 0, /* SF->DF convert */
1084 };
1085
1086 /* Instruction costs on PPCE5500 processors. */
1087 static const
1088 struct processor_costs ppce5500_cost = {
1089 COSTS_N_INSNS (5), /* mulsi */
1090 COSTS_N_INSNS (5), /* mulsi_const */
1091 COSTS_N_INSNS (4), /* mulsi_const9 */
1092 COSTS_N_INSNS (5), /* muldi */
1093 COSTS_N_INSNS (14), /* divsi */
1094 COSTS_N_INSNS (14), /* divdi */
1095 COSTS_N_INSNS (7), /* fp */
1096 COSTS_N_INSNS (10), /* dmul */
1097 COSTS_N_INSNS (36), /* sdiv */
1098 COSTS_N_INSNS (66), /* ddiv */
1099 64, /* cache line size */
1100 32, /* l1 cache */
1101 128, /* l2 cache */
1102 1, /* prefetch streams /*/
1103 0, /* SF->DF convert */
1104 };
1105
1106 /* Instruction costs on PPCE6500 processors. */
1107 static const
1108 struct processor_costs ppce6500_cost = {
1109 COSTS_N_INSNS (5), /* mulsi */
1110 COSTS_N_INSNS (5), /* mulsi_const */
1111 COSTS_N_INSNS (4), /* mulsi_const9 */
1112 COSTS_N_INSNS (5), /* muldi */
1113 COSTS_N_INSNS (14), /* divsi */
1114 COSTS_N_INSNS (14), /* divdi */
1115 COSTS_N_INSNS (7), /* fp */
1116 COSTS_N_INSNS (10), /* dmul */
1117 COSTS_N_INSNS (36), /* sdiv */
1118 COSTS_N_INSNS (66), /* ddiv */
1119 64, /* cache line size */
1120 32, /* l1 cache */
1121 128, /* l2 cache */
1122 1, /* prefetch streams /*/
1123 0, /* SF->DF convert */
1124 };
1125
1126 /* Instruction costs on AppliedMicro Titan processors. */
1127 static const
1128 struct processor_costs titan_cost = {
1129 COSTS_N_INSNS (5), /* mulsi */
1130 COSTS_N_INSNS (5), /* mulsi_const */
1131 COSTS_N_INSNS (5), /* mulsi_const9 */
1132 COSTS_N_INSNS (5), /* muldi */
1133 COSTS_N_INSNS (18), /* divsi */
1134 COSTS_N_INSNS (18), /* divdi */
1135 COSTS_N_INSNS (10), /* fp */
1136 COSTS_N_INSNS (10), /* dmul */
1137 COSTS_N_INSNS (46), /* sdiv */
1138 COSTS_N_INSNS (72), /* ddiv */
1139 32, /* cache line size */
1140 32, /* l1 cache */
1141 512, /* l2 cache */
1142 1, /* prefetch streams /*/
1143 0, /* SF->DF convert */
1144 };
1145
1146 /* Instruction costs on POWER4 and POWER5 processors. */
1147 static const
1148 struct processor_costs power4_cost = {
1149 COSTS_N_INSNS (3), /* mulsi */
1150 COSTS_N_INSNS (2), /* mulsi_const */
1151 COSTS_N_INSNS (2), /* mulsi_const9 */
1152 COSTS_N_INSNS (4), /* muldi */
1153 COSTS_N_INSNS (18), /* divsi */
1154 COSTS_N_INSNS (34), /* divdi */
1155 COSTS_N_INSNS (3), /* fp */
1156 COSTS_N_INSNS (3), /* dmul */
1157 COSTS_N_INSNS (17), /* sdiv */
1158 COSTS_N_INSNS (17), /* ddiv */
1159 128, /* cache line size */
1160 32, /* l1 cache */
1161 1024, /* l2 cache */
1162 8, /* prefetch streams /*/
1163 0, /* SF->DF convert */
1164 };
1165
1166 /* Instruction costs on POWER6 processors. */
1167 static const
1168 struct processor_costs power6_cost = {
1169 COSTS_N_INSNS (8), /* mulsi */
1170 COSTS_N_INSNS (8), /* mulsi_const */
1171 COSTS_N_INSNS (8), /* mulsi_const9 */
1172 COSTS_N_INSNS (8), /* muldi */
1173 COSTS_N_INSNS (22), /* divsi */
1174 COSTS_N_INSNS (28), /* divdi */
1175 COSTS_N_INSNS (3), /* fp */
1176 COSTS_N_INSNS (3), /* dmul */
1177 COSTS_N_INSNS (13), /* sdiv */
1178 COSTS_N_INSNS (16), /* ddiv */
1179 128, /* cache line size */
1180 64, /* l1 cache */
1181 2048, /* l2 cache */
1182 16, /* prefetch streams */
1183 0, /* SF->DF convert */
1184 };
1185
1186 /* Instruction costs on POWER7 processors. */
1187 static const
1188 struct processor_costs power7_cost = {
1189 COSTS_N_INSNS (2), /* mulsi */
1190 COSTS_N_INSNS (2), /* mulsi_const */
1191 COSTS_N_INSNS (2), /* mulsi_const9 */
1192 COSTS_N_INSNS (2), /* muldi */
1193 COSTS_N_INSNS (18), /* divsi */
1194 COSTS_N_INSNS (34), /* divdi */
1195 COSTS_N_INSNS (3), /* fp */
1196 COSTS_N_INSNS (3), /* dmul */
1197 COSTS_N_INSNS (13), /* sdiv */
1198 COSTS_N_INSNS (16), /* ddiv */
1199 128, /* cache line size */
1200 32, /* l1 cache */
1201 256, /* l2 cache */
1202 12, /* prefetch streams */
1203 COSTS_N_INSNS (3), /* SF->DF convert */
1204 };
1205
1206 /* Instruction costs on POWER8 processors. */
1207 static const
1208 struct processor_costs power8_cost = {
1209 COSTS_N_INSNS (3), /* mulsi */
1210 COSTS_N_INSNS (3), /* mulsi_const */
1211 COSTS_N_INSNS (3), /* mulsi_const9 */
1212 COSTS_N_INSNS (3), /* muldi */
1213 COSTS_N_INSNS (19), /* divsi */
1214 COSTS_N_INSNS (35), /* divdi */
1215 COSTS_N_INSNS (3), /* fp */
1216 COSTS_N_INSNS (3), /* dmul */
1217 COSTS_N_INSNS (14), /* sdiv */
1218 COSTS_N_INSNS (17), /* ddiv */
1219 128, /* cache line size */
1220 32, /* l1 cache */
1221 256, /* l2 cache */
1222 12, /* prefetch streams */
1223 COSTS_N_INSNS (3), /* SF->DF convert */
1224 };
1225
1226 /* Instruction costs on POWER9 processors. */
1227 static const
1228 struct processor_costs power9_cost = {
1229 COSTS_N_INSNS (3), /* mulsi */
1230 COSTS_N_INSNS (3), /* mulsi_const */
1231 COSTS_N_INSNS (3), /* mulsi_const9 */
1232 COSTS_N_INSNS (3), /* muldi */
1233 COSTS_N_INSNS (8), /* divsi */
1234 COSTS_N_INSNS (12), /* divdi */
1235 COSTS_N_INSNS (3), /* fp */
1236 COSTS_N_INSNS (3), /* dmul */
1237 COSTS_N_INSNS (13), /* sdiv */
1238 COSTS_N_INSNS (18), /* ddiv */
1239 128, /* cache line size */
1240 32, /* l1 cache */
1241 512, /* l2 cache */
1242 8, /* prefetch streams */
1243 COSTS_N_INSNS (3), /* SF->DF convert */
1244 };
1245
1246 /* Instruction costs on POWER A2 processors. */
1247 static const
1248 struct processor_costs ppca2_cost = {
1249 COSTS_N_INSNS (16), /* mulsi */
1250 COSTS_N_INSNS (16), /* mulsi_const */
1251 COSTS_N_INSNS (16), /* mulsi_const9 */
1252 COSTS_N_INSNS (16), /* muldi */
1253 COSTS_N_INSNS (22), /* divsi */
1254 COSTS_N_INSNS (28), /* divdi */
1255 COSTS_N_INSNS (3), /* fp */
1256 COSTS_N_INSNS (3), /* dmul */
1257 COSTS_N_INSNS (59), /* sdiv */
1258 COSTS_N_INSNS (72), /* ddiv */
1259 64,
1260 16, /* l1 cache */
1261 2048, /* l2 cache */
1262 16, /* prefetch streams */
1263 0, /* SF->DF convert */
1264 };
1265
1266 \f
1267 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1268 #undef RS6000_BUILTIN_0
1269 #undef RS6000_BUILTIN_1
1270 #undef RS6000_BUILTIN_2
1271 #undef RS6000_BUILTIN_3
1272 #undef RS6000_BUILTIN_A
1273 #undef RS6000_BUILTIN_D
1274 #undef RS6000_BUILTIN_H
1275 #undef RS6000_BUILTIN_P
1276 #undef RS6000_BUILTIN_X
1277
1278 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1279 { NAME, ICODE, MASK, ATTR },
1280
1281 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1282 { NAME, ICODE, MASK, ATTR },
1283
1284 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1285 { NAME, ICODE, MASK, ATTR },
1286
1287 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1288 { NAME, ICODE, MASK, ATTR },
1289
1290 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1291 { NAME, ICODE, MASK, ATTR },
1292
1293 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1294 { NAME, ICODE, MASK, ATTR },
1295
1296 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1297 { NAME, ICODE, MASK, ATTR },
1298
1299 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1300 { NAME, ICODE, MASK, ATTR },
1301
1302 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1303 { NAME, ICODE, MASK, ATTR },
1304
1305 struct rs6000_builtin_info_type {
1306 const char *name;
1307 const enum insn_code icode;
1308 const HOST_WIDE_INT mask;
1309 const unsigned attr;
1310 };
1311
1312 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1313 {
1314 #include "rs6000-builtin.def"
1315 };
1316
1317 #undef RS6000_BUILTIN_0
1318 #undef RS6000_BUILTIN_1
1319 #undef RS6000_BUILTIN_2
1320 #undef RS6000_BUILTIN_3
1321 #undef RS6000_BUILTIN_A
1322 #undef RS6000_BUILTIN_D
1323 #undef RS6000_BUILTIN_H
1324 #undef RS6000_BUILTIN_P
1325 #undef RS6000_BUILTIN_X
1326
1327 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1328 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1329
1330 \f
1331 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1332 static struct machine_function * rs6000_init_machine_status (void);
1333 static int rs6000_ra_ever_killed (void);
1334 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1336 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1337 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1338 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1339 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1340 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1341 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1342 bool);
1343 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1344 unsigned int);
1345 static bool is_microcoded_insn (rtx_insn *);
1346 static bool is_nonpipeline_insn (rtx_insn *);
1347 static bool is_cracked_insn (rtx_insn *);
1348 static bool is_load_insn (rtx, rtx *);
1349 static bool is_store_insn (rtx, rtx *);
1350 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1351 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1352 static bool insn_must_be_first_in_group (rtx_insn *);
1353 static bool insn_must_be_last_in_group (rtx_insn *);
1354 static void altivec_init_builtins (void);
1355 static tree builtin_function_type (machine_mode, machine_mode,
1356 machine_mode, machine_mode,
1357 enum rs6000_builtins, const char *name);
1358 static void rs6000_common_init_builtins (void);
1359 static void htm_init_builtins (void);
1360 static rs6000_stack_t *rs6000_stack_info (void);
1361 static void is_altivec_return_reg (rtx, void *);
1362 int easy_vector_constant (rtx, machine_mode);
1363 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1364 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1365 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1366 bool, bool);
1367 #if TARGET_MACHO
1368 static void macho_branch_islands (void);
1369 static tree get_prev_label (tree);
1370 #endif
1371 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1372 int, int *);
1373 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1374 int, int, int *);
1375 static bool rs6000_mode_dependent_address (const_rtx);
1376 static bool rs6000_debug_mode_dependent_address (const_rtx);
1377 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1378 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1379 machine_mode, rtx);
1380 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1381 machine_mode,
1382 rtx);
1383 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1384 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1385 enum reg_class);
1386 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1387 reg_class_t,
1388 reg_class_t);
1389 static bool rs6000_debug_can_change_mode_class (machine_mode,
1390 machine_mode,
1391 reg_class_t);
1392 static bool rs6000_save_toc_in_prologue_p (void);
1393 static rtx rs6000_internal_arg_pointer (void);
1394
1395 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1396 int, int *)
1397 = rs6000_legitimize_reload_address;
1398
1399 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1400 = rs6000_mode_dependent_address;
1401
1402 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1403 machine_mode, rtx)
1404 = rs6000_secondary_reload_class;
1405
1406 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1407 = rs6000_preferred_reload_class;
1408
1409 const int INSN_NOT_AVAILABLE = -1;
1410
1411 static void rs6000_print_isa_options (FILE *, int, const char *,
1412 HOST_WIDE_INT);
1413 static void rs6000_print_builtin_options (FILE *, int, const char *,
1414 HOST_WIDE_INT);
1415 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1416
1417 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1418 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1419 enum rs6000_reg_type,
1420 machine_mode,
1421 secondary_reload_info *,
1422 bool);
1423 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1424 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1425 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1426
1427 /* Hash table stuff for keeping track of TOC entries. */
1428
1429 struct GTY((for_user)) toc_hash_struct
1430 {
1431 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1432 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1433 rtx key;
1434 machine_mode key_mode;
1435 int labelno;
1436 };
1437
1438 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1439 {
1440 static hashval_t hash (toc_hash_struct *);
1441 static bool equal (toc_hash_struct *, toc_hash_struct *);
1442 };
1443
1444 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1445
1446 /* Hash table to keep track of the argument types for builtin functions. */
1447
1448 struct GTY((for_user)) builtin_hash_struct
1449 {
1450 tree type;
1451 machine_mode mode[4]; /* return value + 3 arguments. */
1452 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1453 };
1454
1455 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1456 {
1457 static hashval_t hash (builtin_hash_struct *);
1458 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1459 };
1460
1461 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1462
1463 \f
1464 /* Default register names. */
1465 char rs6000_reg_names[][8] =
1466 {
1467 "0", "1", "2", "3", "4", "5", "6", "7",
1468 "8", "9", "10", "11", "12", "13", "14", "15",
1469 "16", "17", "18", "19", "20", "21", "22", "23",
1470 "24", "25", "26", "27", "28", "29", "30", "31",
1471 "0", "1", "2", "3", "4", "5", "6", "7",
1472 "8", "9", "10", "11", "12", "13", "14", "15",
1473 "16", "17", "18", "19", "20", "21", "22", "23",
1474 "24", "25", "26", "27", "28", "29", "30", "31",
1475 "mq", "lr", "ctr","ap",
1476 "0", "1", "2", "3", "4", "5", "6", "7",
1477 "ca",
1478 /* AltiVec registers. */
1479 "0", "1", "2", "3", "4", "5", "6", "7",
1480 "8", "9", "10", "11", "12", "13", "14", "15",
1481 "16", "17", "18", "19", "20", "21", "22", "23",
1482 "24", "25", "26", "27", "28", "29", "30", "31",
1483 "vrsave", "vscr",
1484 /* Soft frame pointer. */
1485 "sfp",
1486 /* HTM SPR registers. */
1487 "tfhar", "tfiar", "texasr"
1488 };
1489
1490 #ifdef TARGET_REGNAMES
1491 static const char alt_reg_names[][8] =
1492 {
1493 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1494 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1495 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1496 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1497 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1498 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1499 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1500 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1501 "mq", "lr", "ctr", "ap",
1502 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1503 "ca",
1504 /* AltiVec registers. */
1505 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1506 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1507 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1508 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1509 "vrsave", "vscr",
1510 /* Soft frame pointer. */
1511 "sfp",
1512 /* HTM SPR registers. */
1513 "tfhar", "tfiar", "texasr"
1514 };
1515 #endif
1516
1517 /* Table of valid machine attributes. */
1518
1519 static const struct attribute_spec rs6000_attribute_table[] =
1520 {
1521 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1522 affects_type_identity, handler, exclude } */
1523 { "altivec", 1, 1, false, true, false, false,
1524 rs6000_handle_altivec_attribute, NULL },
1525 { "longcall", 0, 0, false, true, true, false,
1526 rs6000_handle_longcall_attribute, NULL },
1527 { "shortcall", 0, 0, false, true, true, false,
1528 rs6000_handle_longcall_attribute, NULL },
1529 { "ms_struct", 0, 0, false, false, false, false,
1530 rs6000_handle_struct_attribute, NULL },
1531 { "gcc_struct", 0, 0, false, false, false, false,
1532 rs6000_handle_struct_attribute, NULL },
1533 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1534 SUBTARGET_ATTRIBUTE_TABLE,
1535 #endif
1536 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1537 };
1538 \f
1539 #ifndef TARGET_PROFILE_KERNEL
1540 #define TARGET_PROFILE_KERNEL 0
1541 #endif
1542
1543 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1544 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1545 \f
1546 /* Initialize the GCC target structure. */
1547 #undef TARGET_ATTRIBUTE_TABLE
1548 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1549 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1550 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1551 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1552 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1553
1554 #undef TARGET_ASM_ALIGNED_DI_OP
1555 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1556
1557 /* Default unaligned ops are only provided for ELF. Find the ops needed
1558 for non-ELF systems. */
1559 #ifndef OBJECT_FORMAT_ELF
1560 #if TARGET_XCOFF
1561 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1562 64-bit targets. */
1563 #undef TARGET_ASM_UNALIGNED_HI_OP
1564 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1565 #undef TARGET_ASM_UNALIGNED_SI_OP
1566 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1567 #undef TARGET_ASM_UNALIGNED_DI_OP
1568 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1569 #else
1570 /* For Darwin. */
1571 #undef TARGET_ASM_UNALIGNED_HI_OP
1572 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1573 #undef TARGET_ASM_UNALIGNED_SI_OP
1574 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1575 #undef TARGET_ASM_UNALIGNED_DI_OP
1576 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1577 #undef TARGET_ASM_ALIGNED_DI_OP
1578 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1579 #endif
1580 #endif
1581
1582 /* This hook deals with fixups for relocatable code and DI-mode objects
1583 in 64-bit code. */
1584 #undef TARGET_ASM_INTEGER
1585 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1586
1587 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1588 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1589 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1590 #endif
1591
1592 #undef TARGET_SET_UP_BY_PROLOGUE
1593 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1594
1595 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1596 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1597 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1598 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1599 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1600 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1601 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1602 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1603 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1604 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1605 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1606 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1607
1608 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1609 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1610
1611 #undef TARGET_INTERNAL_ARG_POINTER
1612 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1613
1614 #undef TARGET_HAVE_TLS
1615 #define TARGET_HAVE_TLS HAVE_AS_TLS
1616
1617 #undef TARGET_CANNOT_FORCE_CONST_MEM
1618 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1619
1620 #undef TARGET_DELEGITIMIZE_ADDRESS
1621 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1622
1623 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1624 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1625
1626 #undef TARGET_LEGITIMATE_COMBINED_INSN
1627 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1628
1629 #undef TARGET_ASM_FUNCTION_PROLOGUE
1630 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1631 #undef TARGET_ASM_FUNCTION_EPILOGUE
1632 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1633
1634 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1635 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1636
1637 #undef TARGET_LEGITIMIZE_ADDRESS
1638 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1639
1640 #undef TARGET_SCHED_VARIABLE_ISSUE
1641 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1642
1643 #undef TARGET_SCHED_ISSUE_RATE
1644 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1645 #undef TARGET_SCHED_ADJUST_COST
1646 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1647 #undef TARGET_SCHED_ADJUST_PRIORITY
1648 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1649 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1650 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1651 #undef TARGET_SCHED_INIT
1652 #define TARGET_SCHED_INIT rs6000_sched_init
1653 #undef TARGET_SCHED_FINISH
1654 #define TARGET_SCHED_FINISH rs6000_sched_finish
1655 #undef TARGET_SCHED_REORDER
1656 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1657 #undef TARGET_SCHED_REORDER2
1658 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1659
1660 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1661 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1662
1663 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1664 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1665
1666 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1667 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1668 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1669 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1670 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1671 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1672 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1673 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1674
1675 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1676 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1677
1678 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1679 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1680 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1681 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1682 rs6000_builtin_support_vector_misalignment
1683 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1684 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1685 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1686 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1687 rs6000_builtin_vectorization_cost
1688 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1689 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1690 rs6000_preferred_simd_mode
1691 #undef TARGET_VECTORIZE_INIT_COST
1692 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1693 #undef TARGET_VECTORIZE_ADD_STMT_COST
1694 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1695 #undef TARGET_VECTORIZE_FINISH_COST
1696 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1697 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1698 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1699
1700 #undef TARGET_INIT_BUILTINS
1701 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1702 #undef TARGET_BUILTIN_DECL
1703 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1704
1705 #undef TARGET_FOLD_BUILTIN
1706 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1707 #undef TARGET_GIMPLE_FOLD_BUILTIN
1708 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1709
1710 #undef TARGET_EXPAND_BUILTIN
1711 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1712
1713 #undef TARGET_MANGLE_TYPE
1714 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1715
1716 #undef TARGET_INIT_LIBFUNCS
1717 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1718
1719 #if TARGET_MACHO
1720 #undef TARGET_BINDS_LOCAL_P
1721 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1722 #endif
1723
1724 #undef TARGET_MS_BITFIELD_LAYOUT_P
1725 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1726
1727 #undef TARGET_ASM_OUTPUT_MI_THUNK
1728 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1729
1730 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1731 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1732
1733 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1734 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1735
1736 #undef TARGET_REGISTER_MOVE_COST
1737 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1738 #undef TARGET_MEMORY_MOVE_COST
1739 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1740 #undef TARGET_CANNOT_COPY_INSN_P
1741 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1742 #undef TARGET_RTX_COSTS
1743 #define TARGET_RTX_COSTS rs6000_rtx_costs
1744 #undef TARGET_ADDRESS_COST
1745 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1746 #undef TARGET_INSN_COST
1747 #define TARGET_INSN_COST rs6000_insn_cost
1748
1749 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1750 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1751
1752 #undef TARGET_PROMOTE_FUNCTION_MODE
1753 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1754
1755 #undef TARGET_RETURN_IN_MEMORY
1756 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1757
1758 #undef TARGET_RETURN_IN_MSB
1759 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1760
1761 #undef TARGET_SETUP_INCOMING_VARARGS
1762 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1763
1764 /* Always strict argument naming on rs6000. */
1765 #undef TARGET_STRICT_ARGUMENT_NAMING
1766 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1767 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1768 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1769 #undef TARGET_SPLIT_COMPLEX_ARG
1770 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1771 #undef TARGET_MUST_PASS_IN_STACK
1772 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1773 #undef TARGET_PASS_BY_REFERENCE
1774 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1775 #undef TARGET_ARG_PARTIAL_BYTES
1776 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1777 #undef TARGET_FUNCTION_ARG_ADVANCE
1778 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1779 #undef TARGET_FUNCTION_ARG
1780 #define TARGET_FUNCTION_ARG rs6000_function_arg
1781 #undef TARGET_FUNCTION_ARG_PADDING
1782 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1783 #undef TARGET_FUNCTION_ARG_BOUNDARY
1784 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1785
1786 #undef TARGET_BUILD_BUILTIN_VA_LIST
1787 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1788
1789 #undef TARGET_EXPAND_BUILTIN_VA_START
1790 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1791
1792 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1793 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1794
1795 #undef TARGET_EH_RETURN_FILTER_MODE
1796 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1797
1798 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1799 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1800
1801 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1802 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1803
1804 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1805 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1806
1807 #undef TARGET_FLOATN_MODE
1808 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1809
1810 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1811 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1812
1813 #undef TARGET_MD_ASM_ADJUST
1814 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1815
1816 #undef TARGET_OPTION_OVERRIDE
1817 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1818
1819 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1820 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1821 rs6000_builtin_vectorized_function
1822
1823 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1824 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1825 rs6000_builtin_md_vectorized_function
1826
1827 #undef TARGET_STACK_PROTECT_GUARD
1828 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1829
1830 #if !TARGET_MACHO
1831 #undef TARGET_STACK_PROTECT_FAIL
1832 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1833 #endif
1834
1835 #ifdef HAVE_AS_TLS
1836 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1837 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1838 #endif
1839
1840 /* Use a 32-bit anchor range. This leads to sequences like:
1841
1842 addis tmp,anchor,high
1843 add dest,tmp,low
1844
1845 where tmp itself acts as an anchor, and can be shared between
1846 accesses to the same 64k page. */
1847 #undef TARGET_MIN_ANCHOR_OFFSET
1848 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1849 #undef TARGET_MAX_ANCHOR_OFFSET
1850 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1851 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1852 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1853 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1854 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1855
1856 #undef TARGET_BUILTIN_RECIPROCAL
1857 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1858
1859 #undef TARGET_SECONDARY_RELOAD
1860 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1861 #undef TARGET_SECONDARY_MEMORY_NEEDED
1862 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1863 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1864 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1865
1866 #undef TARGET_LEGITIMATE_ADDRESS_P
1867 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1868
1869 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1870 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1871
1872 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1873 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1874
1875 #undef TARGET_CAN_ELIMINATE
1876 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1877
1878 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1879 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1880
1881 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1882 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1883
1884 #undef TARGET_TRAMPOLINE_INIT
1885 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1886
1887 #undef TARGET_FUNCTION_VALUE
1888 #define TARGET_FUNCTION_VALUE rs6000_function_value
1889
1890 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1891 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1892
1893 #undef TARGET_OPTION_SAVE
1894 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1895
1896 #undef TARGET_OPTION_RESTORE
1897 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1898
1899 #undef TARGET_OPTION_PRINT
1900 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1901
1902 #undef TARGET_CAN_INLINE_P
1903 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1904
1905 #undef TARGET_SET_CURRENT_FUNCTION
1906 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1907
1908 #undef TARGET_LEGITIMATE_CONSTANT_P
1909 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1910
1911 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1912 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1913
1914 #undef TARGET_CAN_USE_DOLOOP_P
1915 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1916
1917 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1918 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1919
1920 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1921 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1922 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1923 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1924 #undef TARGET_UNWIND_WORD_MODE
1925 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1926
1927 #undef TARGET_OFFLOAD_OPTIONS
1928 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1929
1930 #undef TARGET_C_MODE_FOR_SUFFIX
1931 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1932
1933 #undef TARGET_INVALID_BINARY_OP
1934 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1935
1936 #undef TARGET_OPTAB_SUPPORTED_P
1937 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1938
1939 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1940 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1941
1942 #undef TARGET_COMPARE_VERSION_PRIORITY
1943 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1944
1945 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1946 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1947 rs6000_generate_version_dispatcher_body
1948
1949 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1950 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1951 rs6000_get_function_versions_dispatcher
1952
1953 #undef TARGET_OPTION_FUNCTION_VERSIONS
1954 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1955
1956 #undef TARGET_HARD_REGNO_NREGS
1957 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1958 #undef TARGET_HARD_REGNO_MODE_OK
1959 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1960
1961 #undef TARGET_MODES_TIEABLE_P
1962 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1963
1964 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1965 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1966 rs6000_hard_regno_call_part_clobbered
1967
1968 #undef TARGET_SLOW_UNALIGNED_ACCESS
1969 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1970
1971 #undef TARGET_CAN_CHANGE_MODE_CLASS
1972 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1973
1974 #undef TARGET_CONSTANT_ALIGNMENT
1975 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1976
1977 #undef TARGET_STARTING_FRAME_OFFSET
1978 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1979
1980 #if TARGET_ELF && RS6000_WEAK
1981 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1982 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1983 #endif
1984
1985 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1986 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1987
1988 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1989 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1990 \f
1991
1992 /* Processor table. */
1993 struct rs6000_ptt
1994 {
1995 const char *const name; /* Canonical processor name. */
1996 const enum processor_type processor; /* Processor type enum value. */
1997 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1998 };
1999
2000 static struct rs6000_ptt const processor_target_table[] =
2001 {
2002 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
2003 #include "rs6000-cpus.def"
2004 #undef RS6000_CPU
2005 };
2006
2007 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2008 name is invalid. */
2009
2010 static int
2011 rs6000_cpu_name_lookup (const char *name)
2012 {
2013 size_t i;
2014
2015 if (name != NULL)
2016 {
2017 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2018 if (! strcmp (name, processor_target_table[i].name))
2019 return (int)i;
2020 }
2021
2022 return -1;
2023 }
2024
2025 \f
2026 /* Return number of consecutive hard regs needed starting at reg REGNO
2027 to hold something of mode MODE.
2028 This is ordinarily the length in words of a value of mode MODE
2029 but can be less for certain modes in special long registers.
2030
2031 POWER and PowerPC GPRs hold 32 bits worth;
2032 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2033
2034 static int
2035 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2036 {
2037 unsigned HOST_WIDE_INT reg_size;
2038
2039 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2040 128-bit floating point that can go in vector registers, which has VSX
2041 memory addressing. */
2042 if (FP_REGNO_P (regno))
2043 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2044 ? UNITS_PER_VSX_WORD
2045 : UNITS_PER_FP_WORD);
2046
2047 else if (ALTIVEC_REGNO_P (regno))
2048 reg_size = UNITS_PER_ALTIVEC_WORD;
2049
2050 else
2051 reg_size = UNITS_PER_WORD;
2052
2053 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2054 }
2055
2056 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2057 MODE. */
2058 static int
2059 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2060 {
2061 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2062
2063 if (COMPLEX_MODE_P (mode))
2064 mode = GET_MODE_INNER (mode);
2065
2066 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2067 register combinations, and use PTImode where we need to deal with quad
2068 word memory operations. Don't allow quad words in the argument or frame
2069 pointer registers, just registers 0..31. */
2070 if (mode == PTImode)
2071 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2072 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2073 && ((regno & 1) == 0));
2074
2075 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2076 implementations. Don't allow an item to be split between a FP register
2077 and an Altivec register. Allow TImode in all VSX registers if the user
2078 asked for it. */
2079 if (TARGET_VSX && VSX_REGNO_P (regno)
2080 && (VECTOR_MEM_VSX_P (mode)
2081 || FLOAT128_VECTOR_P (mode)
2082 || reg_addr[mode].scalar_in_vmx_p
2083 || mode == TImode
2084 || (TARGET_VADDUQM && mode == V1TImode)))
2085 {
2086 if (FP_REGNO_P (regno))
2087 return FP_REGNO_P (last_regno);
2088
2089 if (ALTIVEC_REGNO_P (regno))
2090 {
2091 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2092 return 0;
2093
2094 return ALTIVEC_REGNO_P (last_regno);
2095 }
2096 }
2097
2098 /* The GPRs can hold any mode, but values bigger than one register
2099 cannot go past R31. */
2100 if (INT_REGNO_P (regno))
2101 return INT_REGNO_P (last_regno);
2102
2103 /* The float registers (except for VSX vector modes) can only hold floating
2104 modes and DImode. */
2105 if (FP_REGNO_P (regno))
2106 {
2107 if (FLOAT128_VECTOR_P (mode))
2108 return false;
2109
2110 if (SCALAR_FLOAT_MODE_P (mode)
2111 && (mode != TDmode || (regno % 2) == 0)
2112 && FP_REGNO_P (last_regno))
2113 return 1;
2114
2115 if (GET_MODE_CLASS (mode) == MODE_INT)
2116 {
2117 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2118 return 1;
2119
2120 if (TARGET_P8_VECTOR && (mode == SImode))
2121 return 1;
2122
2123 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2124 return 1;
2125 }
2126
2127 return 0;
2128 }
2129
2130 /* The CR register can only hold CC modes. */
2131 if (CR_REGNO_P (regno))
2132 return GET_MODE_CLASS (mode) == MODE_CC;
2133
2134 if (CA_REGNO_P (regno))
2135 return mode == Pmode || mode == SImode;
2136
2137 /* AltiVec only in AldyVec registers. */
2138 if (ALTIVEC_REGNO_P (regno))
2139 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2140 || mode == V1TImode);
2141
2142 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2143 and it must be able to fit within the register set. */
2144
2145 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2146 }
2147
2148 /* Implement TARGET_HARD_REGNO_NREGS. */
2149
2150 static unsigned int
2151 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2152 {
2153 return rs6000_hard_regno_nregs[mode][regno];
2154 }
2155
2156 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2157
2158 static bool
2159 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2160 {
2161 return rs6000_hard_regno_mode_ok_p[mode][regno];
2162 }
2163
2164 /* Implement TARGET_MODES_TIEABLE_P.
2165
2166 PTImode cannot tie with other modes because PTImode is restricted to even
2167 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2168 57744).
2169
2170 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2171 128-bit floating point on VSX systems ties with other vectors. */
2172
2173 static bool
2174 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2175 {
2176 if (mode1 == PTImode)
2177 return mode2 == PTImode;
2178 if (mode2 == PTImode)
2179 return false;
2180
2181 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2182 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2183 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2184 return false;
2185
2186 if (SCALAR_FLOAT_MODE_P (mode1))
2187 return SCALAR_FLOAT_MODE_P (mode2);
2188 if (SCALAR_FLOAT_MODE_P (mode2))
2189 return false;
2190
2191 if (GET_MODE_CLASS (mode1) == MODE_CC)
2192 return GET_MODE_CLASS (mode2) == MODE_CC;
2193 if (GET_MODE_CLASS (mode2) == MODE_CC)
2194 return false;
2195
2196 return true;
2197 }
2198
2199 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2200
2201 static bool
2202 rs6000_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
2203 unsigned int regno, machine_mode mode)
2204 {
2205 if (TARGET_32BIT
2206 && TARGET_POWERPC64
2207 && GET_MODE_SIZE (mode) > 4
2208 && INT_REGNO_P (regno))
2209 return true;
2210
2211 if (TARGET_VSX
2212 && FP_REGNO_P (regno)
2213 && GET_MODE_SIZE (mode) > 8
2214 && !FLOAT128_2REG_P (mode))
2215 return true;
2216
2217 return false;
2218 }
2219
2220 /* Print interesting facts about registers. */
2221 static void
2222 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2223 {
2224 int r, m;
2225
2226 for (r = first_regno; r <= last_regno; ++r)
2227 {
2228 const char *comma = "";
2229 int len;
2230
2231 if (first_regno == last_regno)
2232 fprintf (stderr, "%s:\t", reg_name);
2233 else
2234 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2235
2236 len = 8;
2237 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2238 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2239 {
2240 if (len > 70)
2241 {
2242 fprintf (stderr, ",\n\t");
2243 len = 8;
2244 comma = "";
2245 }
2246
2247 if (rs6000_hard_regno_nregs[m][r] > 1)
2248 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2249 rs6000_hard_regno_nregs[m][r]);
2250 else
2251 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2252
2253 comma = ", ";
2254 }
2255
2256 if (call_used_regs[r])
2257 {
2258 if (len > 70)
2259 {
2260 fprintf (stderr, ",\n\t");
2261 len = 8;
2262 comma = "";
2263 }
2264
2265 len += fprintf (stderr, "%s%s", comma, "call-used");
2266 comma = ", ";
2267 }
2268
2269 if (fixed_regs[r])
2270 {
2271 if (len > 70)
2272 {
2273 fprintf (stderr, ",\n\t");
2274 len = 8;
2275 comma = "";
2276 }
2277
2278 len += fprintf (stderr, "%s%s", comma, "fixed");
2279 comma = ", ";
2280 }
2281
2282 if (len > 70)
2283 {
2284 fprintf (stderr, ",\n\t");
2285 comma = "";
2286 }
2287
2288 len += fprintf (stderr, "%sreg-class = %s", comma,
2289 reg_class_names[(int)rs6000_regno_regclass[r]]);
2290 comma = ", ";
2291
2292 if (len > 70)
2293 {
2294 fprintf (stderr, ",\n\t");
2295 comma = "";
2296 }
2297
2298 fprintf (stderr, "%sregno = %d\n", comma, r);
2299 }
2300 }
2301
2302 static const char *
2303 rs6000_debug_vector_unit (enum rs6000_vector v)
2304 {
2305 const char *ret;
2306
2307 switch (v)
2308 {
2309 case VECTOR_NONE: ret = "none"; break;
2310 case VECTOR_ALTIVEC: ret = "altivec"; break;
2311 case VECTOR_VSX: ret = "vsx"; break;
2312 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2313 default: ret = "unknown"; break;
2314 }
2315
2316 return ret;
2317 }
2318
2319 /* Inner function printing just the address mask for a particular reload
2320 register class. */
2321 DEBUG_FUNCTION char *
2322 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2323 {
2324 static char ret[8];
2325 char *p = ret;
2326
2327 if ((mask & RELOAD_REG_VALID) != 0)
2328 *p++ = 'v';
2329 else if (keep_spaces)
2330 *p++ = ' ';
2331
2332 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2333 *p++ = 'm';
2334 else if (keep_spaces)
2335 *p++ = ' ';
2336
2337 if ((mask & RELOAD_REG_INDEXED) != 0)
2338 *p++ = 'i';
2339 else if (keep_spaces)
2340 *p++ = ' ';
2341
2342 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2343 *p++ = 'O';
2344 else if ((mask & RELOAD_REG_OFFSET) != 0)
2345 *p++ = 'o';
2346 else if (keep_spaces)
2347 *p++ = ' ';
2348
2349 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2350 *p++ = '+';
2351 else if (keep_spaces)
2352 *p++ = ' ';
2353
2354 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2355 *p++ = '+';
2356 else if (keep_spaces)
2357 *p++ = ' ';
2358
2359 if ((mask & RELOAD_REG_AND_M16) != 0)
2360 *p++ = '&';
2361 else if (keep_spaces)
2362 *p++ = ' ';
2363
2364 *p = '\0';
2365
2366 return ret;
2367 }
2368
2369 /* Print the address masks in a human readble fashion. */
2370 DEBUG_FUNCTION void
2371 rs6000_debug_print_mode (ssize_t m)
2372 {
2373 ssize_t rc;
2374 int spaces = 0;
2375
2376 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2377 for (rc = 0; rc < N_RELOAD_REG; rc++)
2378 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2379 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2380
2381 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2382 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2383 {
2384 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2385 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2386 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2387 spaces = 0;
2388 }
2389 else
2390 spaces += sizeof (" Reload=sl") - 1;
2391
2392 if (reg_addr[m].scalar_in_vmx_p)
2393 {
2394 fprintf (stderr, "%*s Upper=y", spaces, "");
2395 spaces = 0;
2396 }
2397 else
2398 spaces += sizeof (" Upper=y") - 1;
2399
2400 if (rs6000_vector_unit[m] != VECTOR_NONE
2401 || rs6000_vector_mem[m] != VECTOR_NONE)
2402 {
2403 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2404 spaces, "",
2405 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2406 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2407 }
2408
2409 fputs ("\n", stderr);
2410 }
2411
2412 #define DEBUG_FMT_ID "%-32s= "
2413 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2414 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2415 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2416
2417 /* Print various interesting information with -mdebug=reg. */
2418 static void
2419 rs6000_debug_reg_global (void)
2420 {
2421 static const char *const tf[2] = { "false", "true" };
2422 const char *nl = (const char *)0;
2423 int m;
2424 size_t m1, m2, v;
2425 char costly_num[20];
2426 char nop_num[20];
2427 char flags_buffer[40];
2428 const char *costly_str;
2429 const char *nop_str;
2430 const char *trace_str;
2431 const char *abi_str;
2432 const char *cmodel_str;
2433 struct cl_target_option cl_opts;
2434
2435 /* Modes we want tieable information on. */
2436 static const machine_mode print_tieable_modes[] = {
2437 QImode,
2438 HImode,
2439 SImode,
2440 DImode,
2441 TImode,
2442 PTImode,
2443 SFmode,
2444 DFmode,
2445 TFmode,
2446 IFmode,
2447 KFmode,
2448 SDmode,
2449 DDmode,
2450 TDmode,
2451 V16QImode,
2452 V8HImode,
2453 V4SImode,
2454 V2DImode,
2455 V1TImode,
2456 V32QImode,
2457 V16HImode,
2458 V8SImode,
2459 V4DImode,
2460 V2TImode,
2461 V4SFmode,
2462 V2DFmode,
2463 V8SFmode,
2464 V4DFmode,
2465 CCmode,
2466 CCUNSmode,
2467 CCEQmode,
2468 };
2469
2470 /* Virtual regs we are interested in. */
2471 const static struct {
2472 int regno; /* register number. */
2473 const char *name; /* register name. */
2474 } virtual_regs[] = {
2475 { STACK_POINTER_REGNUM, "stack pointer:" },
2476 { TOC_REGNUM, "toc: " },
2477 { STATIC_CHAIN_REGNUM, "static chain: " },
2478 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2479 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2480 { ARG_POINTER_REGNUM, "arg pointer: " },
2481 { FRAME_POINTER_REGNUM, "frame pointer:" },
2482 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2483 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2484 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2485 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2486 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2487 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2488 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2489 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2490 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2491 };
2492
2493 fputs ("\nHard register information:\n", stderr);
2494 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2495 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2496 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2497 LAST_ALTIVEC_REGNO,
2498 "vs");
2499 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2500 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2501 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2502 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2503 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2504 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2505
2506 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2507 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2508 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2509
2510 fprintf (stderr,
2511 "\n"
2512 "d reg_class = %s\n"
2513 "f reg_class = %s\n"
2514 "v reg_class = %s\n"
2515 "wa reg_class = %s\n"
2516 "wb reg_class = %s\n"
2517 "wd reg_class = %s\n"
2518 "we reg_class = %s\n"
2519 "wf reg_class = %s\n"
2520 "wg reg_class = %s\n"
2521 "wh reg_class = %s\n"
2522 "wi reg_class = %s\n"
2523 "wj reg_class = %s\n"
2524 "wk reg_class = %s\n"
2525 "wl reg_class = %s\n"
2526 "wm reg_class = %s\n"
2527 "wo reg_class = %s\n"
2528 "wp reg_class = %s\n"
2529 "wq reg_class = %s\n"
2530 "wr reg_class = %s\n"
2531 "ws reg_class = %s\n"
2532 "wt reg_class = %s\n"
2533 "wu reg_class = %s\n"
2534 "wv reg_class = %s\n"
2535 "ww reg_class = %s\n"
2536 "wx reg_class = %s\n"
2537 "wy reg_class = %s\n"
2538 "wz reg_class = %s\n"
2539 "wA reg_class = %s\n"
2540 "wH reg_class = %s\n"
2541 "wI reg_class = %s\n"
2542 "wJ reg_class = %s\n"
2543 "wK reg_class = %s\n"
2544 "\n",
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2560 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2561 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2562 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2563 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2564 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2565 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2566 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2567 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2568 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2569 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2570 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2571 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2572 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2573 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2574 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2575 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2576 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2577
2578 nl = "\n";
2579 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2580 rs6000_debug_print_mode (m);
2581
2582 fputs ("\n", stderr);
2583
2584 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2585 {
2586 machine_mode mode1 = print_tieable_modes[m1];
2587 bool first_time = true;
2588
2589 nl = (const char *)0;
2590 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2591 {
2592 machine_mode mode2 = print_tieable_modes[m2];
2593 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2594 {
2595 if (first_time)
2596 {
2597 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2598 nl = "\n";
2599 first_time = false;
2600 }
2601
2602 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2603 }
2604 }
2605
2606 if (!first_time)
2607 fputs ("\n", stderr);
2608 }
2609
2610 if (nl)
2611 fputs (nl, stderr);
2612
2613 if (rs6000_recip_control)
2614 {
2615 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2616
2617 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2618 if (rs6000_recip_bits[m])
2619 {
2620 fprintf (stderr,
2621 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2622 GET_MODE_NAME (m),
2623 (RS6000_RECIP_AUTO_RE_P (m)
2624 ? "auto"
2625 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2626 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2627 ? "auto"
2628 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2629 }
2630
2631 fputs ("\n", stderr);
2632 }
2633
2634 if (rs6000_cpu_index >= 0)
2635 {
2636 const char *name = processor_target_table[rs6000_cpu_index].name;
2637 HOST_WIDE_INT flags
2638 = processor_target_table[rs6000_cpu_index].target_enable;
2639
2640 sprintf (flags_buffer, "-mcpu=%s flags", name);
2641 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2642 }
2643 else
2644 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2645
2646 if (rs6000_tune_index >= 0)
2647 {
2648 const char *name = processor_target_table[rs6000_tune_index].name;
2649 HOST_WIDE_INT flags
2650 = processor_target_table[rs6000_tune_index].target_enable;
2651
2652 sprintf (flags_buffer, "-mtune=%s flags", name);
2653 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2654 }
2655 else
2656 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2657
2658 cl_target_option_save (&cl_opts, &global_options);
2659 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2660 rs6000_isa_flags);
2661
2662 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2663 rs6000_isa_flags_explicit);
2664
2665 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2666 rs6000_builtin_mask);
2667
2668 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2669
2670 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2671 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2672
2673 switch (rs6000_sched_costly_dep)
2674 {
2675 case max_dep_latency:
2676 costly_str = "max_dep_latency";
2677 break;
2678
2679 case no_dep_costly:
2680 costly_str = "no_dep_costly";
2681 break;
2682
2683 case all_deps_costly:
2684 costly_str = "all_deps_costly";
2685 break;
2686
2687 case true_store_to_load_dep_costly:
2688 costly_str = "true_store_to_load_dep_costly";
2689 break;
2690
2691 case store_to_load_dep_costly:
2692 costly_str = "store_to_load_dep_costly";
2693 break;
2694
2695 default:
2696 costly_str = costly_num;
2697 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2698 break;
2699 }
2700
2701 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2702
2703 switch (rs6000_sched_insert_nops)
2704 {
2705 case sched_finish_regroup_exact:
2706 nop_str = "sched_finish_regroup_exact";
2707 break;
2708
2709 case sched_finish_pad_groups:
2710 nop_str = "sched_finish_pad_groups";
2711 break;
2712
2713 case sched_finish_none:
2714 nop_str = "sched_finish_none";
2715 break;
2716
2717 default:
2718 nop_str = nop_num;
2719 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2720 break;
2721 }
2722
2723 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2724
2725 switch (rs6000_sdata)
2726 {
2727 default:
2728 case SDATA_NONE:
2729 break;
2730
2731 case SDATA_DATA:
2732 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2733 break;
2734
2735 case SDATA_SYSV:
2736 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2737 break;
2738
2739 case SDATA_EABI:
2740 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2741 break;
2742
2743 }
2744
2745 switch (rs6000_traceback)
2746 {
2747 case traceback_default: trace_str = "default"; break;
2748 case traceback_none: trace_str = "none"; break;
2749 case traceback_part: trace_str = "part"; break;
2750 case traceback_full: trace_str = "full"; break;
2751 default: trace_str = "unknown"; break;
2752 }
2753
2754 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2755
2756 switch (rs6000_current_cmodel)
2757 {
2758 case CMODEL_SMALL: cmodel_str = "small"; break;
2759 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2760 case CMODEL_LARGE: cmodel_str = "large"; break;
2761 default: cmodel_str = "unknown"; break;
2762 }
2763
2764 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2765
2766 switch (rs6000_current_abi)
2767 {
2768 case ABI_NONE: abi_str = "none"; break;
2769 case ABI_AIX: abi_str = "aix"; break;
2770 case ABI_ELFv2: abi_str = "ELFv2"; break;
2771 case ABI_V4: abi_str = "V4"; break;
2772 case ABI_DARWIN: abi_str = "darwin"; break;
2773 default: abi_str = "unknown"; break;
2774 }
2775
2776 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2777
2778 if (rs6000_altivec_abi)
2779 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2780
2781 if (rs6000_darwin64_abi)
2782 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2783
2784 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2785 (TARGET_SOFT_FLOAT ? "true" : "false"));
2786
2787 if (TARGET_LINK_STACK)
2788 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2789
2790 if (TARGET_P8_FUSION)
2791 {
2792 char options[80];
2793
2794 strcpy (options, "power8");
2795 if (TARGET_P8_FUSION_SIGN)
2796 strcat (options, ", sign");
2797
2798 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2799 }
2800
2801 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2802 TARGET_SECURE_PLT ? "secure" : "bss");
2803 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2804 aix_struct_return ? "aix" : "sysv");
2805 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2806 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2807 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2808 tf[!!rs6000_align_branch_targets]);
2809 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2810 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2811 rs6000_long_double_type_size);
2812 if (rs6000_long_double_type_size > 64)
2813 {
2814 fprintf (stderr, DEBUG_FMT_S, "long double type",
2815 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2816 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2817 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2818 }
2819 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2820 (int)rs6000_sched_restricted_insns_priority);
2821 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2822 (int)END_BUILTINS);
2823 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2824 (int)RS6000_BUILTIN_COUNT);
2825
2826 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2827 (int)TARGET_FLOAT128_ENABLE_TYPE);
2828
2829 if (TARGET_VSX)
2830 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2831 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2832
2833 if (TARGET_DIRECT_MOVE_128)
2834 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2835 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2836 }
2837
2838 \f
2839 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2840 legitimate address support to figure out the appropriate addressing to
2841 use. */
2842
2843 static void
2844 rs6000_setup_reg_addr_masks (void)
2845 {
2846 ssize_t rc, reg, m, nregs;
2847 addr_mask_type any_addr_mask, addr_mask;
2848
2849 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2850 {
2851 machine_mode m2 = (machine_mode) m;
2852 bool complex_p = false;
2853 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2854 size_t msize;
2855
2856 if (COMPLEX_MODE_P (m2))
2857 {
2858 complex_p = true;
2859 m2 = GET_MODE_INNER (m2);
2860 }
2861
2862 msize = GET_MODE_SIZE (m2);
2863
2864 /* SDmode is special in that we want to access it only via REG+REG
2865 addressing on power7 and above, since we want to use the LFIWZX and
2866 STFIWZX instructions to load it. */
2867 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2868
2869 any_addr_mask = 0;
2870 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2871 {
2872 addr_mask = 0;
2873 reg = reload_reg_map[rc].reg;
2874
2875 /* Can mode values go in the GPR/FPR/Altivec registers? */
2876 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2877 {
2878 bool small_int_vsx_p = (small_int_p
2879 && (rc == RELOAD_REG_FPR
2880 || rc == RELOAD_REG_VMX));
2881
2882 nregs = rs6000_hard_regno_nregs[m][reg];
2883 addr_mask |= RELOAD_REG_VALID;
2884
2885 /* Indicate if the mode takes more than 1 physical register. If
2886 it takes a single register, indicate it can do REG+REG
2887 addressing. Small integers in VSX registers can only do
2888 REG+REG addressing. */
2889 if (small_int_vsx_p)
2890 addr_mask |= RELOAD_REG_INDEXED;
2891 else if (nregs > 1 || m == BLKmode || complex_p)
2892 addr_mask |= RELOAD_REG_MULTIPLE;
2893 else
2894 addr_mask |= RELOAD_REG_INDEXED;
2895
2896 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2897 addressing. If we allow scalars into Altivec registers,
2898 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2899
2900 For VSX systems, we don't allow update addressing for
2901 DFmode/SFmode if those registers can go in both the
2902 traditional floating point registers and Altivec registers.
2903 The load/store instructions for the Altivec registers do not
2904 have update forms. If we allowed update addressing, it seems
2905 to break IV-OPT code using floating point if the index type is
2906 int instead of long (PR target/81550 and target/84042). */
2907
2908 if (TARGET_UPDATE
2909 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2910 && msize <= 8
2911 && !VECTOR_MODE_P (m2)
2912 && !FLOAT128_VECTOR_P (m2)
2913 && !complex_p
2914 && (m != E_DFmode || !TARGET_VSX)
2915 && (m != E_SFmode || !TARGET_P8_VECTOR)
2916 && !small_int_vsx_p)
2917 {
2918 addr_mask |= RELOAD_REG_PRE_INCDEC;
2919
2920 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2921 we don't allow PRE_MODIFY for some multi-register
2922 operations. */
2923 switch (m)
2924 {
2925 default:
2926 addr_mask |= RELOAD_REG_PRE_MODIFY;
2927 break;
2928
2929 case E_DImode:
2930 if (TARGET_POWERPC64)
2931 addr_mask |= RELOAD_REG_PRE_MODIFY;
2932 break;
2933
2934 case E_DFmode:
2935 case E_DDmode:
2936 if (TARGET_HARD_FLOAT)
2937 addr_mask |= RELOAD_REG_PRE_MODIFY;
2938 break;
2939 }
2940 }
2941 }
2942
2943 /* GPR and FPR registers can do REG+OFFSET addressing, except
2944 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2945 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2946 if ((addr_mask != 0) && !indexed_only_p
2947 && msize <= 8
2948 && (rc == RELOAD_REG_GPR
2949 || ((msize == 8 || m2 == SFmode)
2950 && (rc == RELOAD_REG_FPR
2951 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2952 addr_mask |= RELOAD_REG_OFFSET;
2953
2954 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2955 instructions are enabled. The offset for 128-bit VSX registers is
2956 only 12-bits. While GPRs can handle the full offset range, VSX
2957 registers can only handle the restricted range. */
2958 else if ((addr_mask != 0) && !indexed_only_p
2959 && msize == 16 && TARGET_P9_VECTOR
2960 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2961 || (m2 == TImode && TARGET_VSX)))
2962 {
2963 addr_mask |= RELOAD_REG_OFFSET;
2964 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2965 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2966 }
2967
2968 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2969 addressing on 128-bit types. */
2970 if (rc == RELOAD_REG_VMX && msize == 16
2971 && (addr_mask & RELOAD_REG_VALID) != 0)
2972 addr_mask |= RELOAD_REG_AND_M16;
2973
2974 reg_addr[m].addr_mask[rc] = addr_mask;
2975 any_addr_mask |= addr_mask;
2976 }
2977
2978 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2979 }
2980 }
2981
2982 \f
2983 /* Initialize the various global tables that are based on register size. */
2984 static void
2985 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2986 {
2987 ssize_t r, m, c;
2988 int align64;
2989 int align32;
2990
2991 /* Precalculate REGNO_REG_CLASS. */
2992 rs6000_regno_regclass[0] = GENERAL_REGS;
2993 for (r = 1; r < 32; ++r)
2994 rs6000_regno_regclass[r] = BASE_REGS;
2995
2996 for (r = 32; r < 64; ++r)
2997 rs6000_regno_regclass[r] = FLOAT_REGS;
2998
2999 for (r = 64; HARD_REGISTER_NUM_P (r); ++r)
3000 rs6000_regno_regclass[r] = NO_REGS;
3001
3002 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
3003 rs6000_regno_regclass[r] = ALTIVEC_REGS;
3004
3005 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3006 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3007 rs6000_regno_regclass[r] = CR_REGS;
3008
3009 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3010 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3011 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3012 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3013 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3014 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
3015 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
3016 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
3017 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3018 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3019
3020 /* Precalculate register class to simpler reload register class. We don't
3021 need all of the register classes that are combinations of different
3022 classes, just the simple ones that have constraint letters. */
3023 for (c = 0; c < N_REG_CLASSES; c++)
3024 reg_class_to_reg_type[c] = NO_REG_TYPE;
3025
3026 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3027 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3028 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3029 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3030 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3031 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3032 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3033 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3034 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3035 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3036
3037 if (TARGET_VSX)
3038 {
3039 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3040 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3041 }
3042 else
3043 {
3044 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3045 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3046 }
3047
3048 /* Precalculate the valid memory formats as well as the vector information,
3049 this must be set up before the rs6000_hard_regno_nregs_internal calls
3050 below. */
3051 gcc_assert ((int)VECTOR_NONE == 0);
3052 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3053 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
3054
3055 gcc_assert ((int)CODE_FOR_nothing == 0);
3056 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3057
3058 gcc_assert ((int)NO_REGS == 0);
3059 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3060
3061 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3062 believes it can use native alignment or still uses 128-bit alignment. */
3063 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3064 {
3065 align64 = 64;
3066 align32 = 32;
3067 }
3068 else
3069 {
3070 align64 = 128;
3071 align32 = 128;
3072 }
3073
3074 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3075 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3076 if (TARGET_FLOAT128_TYPE)
3077 {
3078 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3079 rs6000_vector_align[KFmode] = 128;
3080
3081 if (FLOAT128_IEEE_P (TFmode))
3082 {
3083 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3084 rs6000_vector_align[TFmode] = 128;
3085 }
3086 }
3087
3088 /* V2DF mode, VSX only. */
3089 if (TARGET_VSX)
3090 {
3091 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3092 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3093 rs6000_vector_align[V2DFmode] = align64;
3094 }
3095
3096 /* V4SF mode, either VSX or Altivec. */
3097 if (TARGET_VSX)
3098 {
3099 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3100 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3101 rs6000_vector_align[V4SFmode] = align32;
3102 }
3103 else if (TARGET_ALTIVEC)
3104 {
3105 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3106 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3107 rs6000_vector_align[V4SFmode] = align32;
3108 }
3109
3110 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3111 and stores. */
3112 if (TARGET_ALTIVEC)
3113 {
3114 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3115 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3116 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3117 rs6000_vector_align[V4SImode] = align32;
3118 rs6000_vector_align[V8HImode] = align32;
3119 rs6000_vector_align[V16QImode] = align32;
3120
3121 if (TARGET_VSX)
3122 {
3123 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3124 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3125 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3126 }
3127 else
3128 {
3129 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3130 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3131 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3132 }
3133 }
3134
3135 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3136 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3137 if (TARGET_VSX)
3138 {
3139 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3140 rs6000_vector_unit[V2DImode]
3141 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3142 rs6000_vector_align[V2DImode] = align64;
3143
3144 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3145 rs6000_vector_unit[V1TImode]
3146 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3147 rs6000_vector_align[V1TImode] = 128;
3148 }
3149
3150 /* DFmode, see if we want to use the VSX unit. Memory is handled
3151 differently, so don't set rs6000_vector_mem. */
3152 if (TARGET_VSX)
3153 {
3154 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3155 rs6000_vector_align[DFmode] = 64;
3156 }
3157
3158 /* SFmode, see if we want to use the VSX unit. */
3159 if (TARGET_P8_VECTOR)
3160 {
3161 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3162 rs6000_vector_align[SFmode] = 32;
3163 }
3164
3165 /* Allow TImode in VSX register and set the VSX memory macros. */
3166 if (TARGET_VSX)
3167 {
3168 rs6000_vector_mem[TImode] = VECTOR_VSX;
3169 rs6000_vector_align[TImode] = align64;
3170 }
3171
3172 /* Register class constraints for the constraints that depend on compile
3173 switches. When the VSX code was added, different constraints were added
3174 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3175 of the VSX registers are used. The register classes for scalar floating
3176 point types is set, based on whether we allow that type into the upper
3177 (Altivec) registers. GCC has register classes to target the Altivec
3178 registers for load/store operations, to select using a VSX memory
3179 operation instead of the traditional floating point operation. The
3180 constraints are:
3181
3182 d - Register class to use with traditional DFmode instructions.
3183 f - Register class to use with traditional SFmode instructions.
3184 v - Altivec register.
3185 wa - Any VSX register.
3186 wc - Reserved to represent individual CR bits (used in LLVM).
3187 wd - Preferred register class for V2DFmode.
3188 wf - Preferred register class for V4SFmode.
3189 wg - Float register for power6x move insns.
3190 wh - FP register for direct move instructions.
3191 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3192 wj - FP or VSX register to hold 64-bit integers for direct moves.
3193 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3194 wl - Float register if we can do 32-bit signed int loads.
3195 wm - VSX register for ISA 2.07 direct move operations.
3196 wn - always NO_REGS.
3197 wr - GPR if 64-bit mode is permitted.
3198 ws - Register class to do ISA 2.06 DF operations.
3199 wt - VSX register for TImode in VSX registers.
3200 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3201 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3202 ww - Register class to do SF conversions in with VSX operations.
3203 wx - Float register if we can do 32-bit int stores.
3204 wy - Register class to do ISA 2.07 SF operations.
3205 wz - Float register if we can do 32-bit unsigned int loads.
3206 wH - Altivec register if SImode is allowed in VSX registers.
3207 wI - VSX register if SImode is allowed in VSX registers.
3208 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3209 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3210
3211 if (TARGET_HARD_FLOAT)
3212 {
3213 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3214 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3215 }
3216
3217 if (TARGET_VSX)
3218 {
3219 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3220 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3221 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3222 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3223 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3224 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3225 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3226 }
3227
3228 /* Add conditional constraints based on various options, to allow us to
3229 collapse multiple insn patterns. */
3230 if (TARGET_ALTIVEC)
3231 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3232
3233 if (TARGET_MFPGPR) /* DFmode */
3234 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3235
3236 if (TARGET_LFIWAX)
3237 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3238
3239 if (TARGET_DIRECT_MOVE)
3240 {
3241 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3242 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3243 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3244 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3245 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3246 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3247 }
3248
3249 if (TARGET_POWERPC64)
3250 {
3251 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3252 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3253 }
3254
3255 if (TARGET_P8_VECTOR) /* SFmode */
3256 {
3257 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3258 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3259 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3260 }
3261 else if (TARGET_VSX)
3262 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3263
3264 if (TARGET_STFIWX)
3265 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3266
3267 if (TARGET_LFIWZX)
3268 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3269
3270 if (TARGET_FLOAT128_TYPE)
3271 {
3272 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3273 if (FLOAT128_IEEE_P (TFmode))
3274 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3275 }
3276
3277 if (TARGET_P9_VECTOR)
3278 {
3279 /* Support for new D-form instructions. */
3280 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3281
3282 /* Support for ISA 3.0 (power9) vectors. */
3283 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3284 }
3285
3286 /* Support for new direct moves (ISA 3.0 + 64bit). */
3287 if (TARGET_DIRECT_MOVE_128)
3288 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3289
3290 /* Support small integers in VSX registers. */
3291 if (TARGET_P8_VECTOR)
3292 {
3293 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3294 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3295 if (TARGET_P9_VECTOR)
3296 {
3297 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3298 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3299 }
3300 }
3301
3302 /* Set up the reload helper and direct move functions. */
3303 if (TARGET_VSX || TARGET_ALTIVEC)
3304 {
3305 if (TARGET_64BIT)
3306 {
3307 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3308 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3309 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3310 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3311 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3312 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3313 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3314 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3315 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3316 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3317 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3318 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3319 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3320 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3321 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3322 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3323 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3324 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3325 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3326 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3327
3328 if (FLOAT128_VECTOR_P (KFmode))
3329 {
3330 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3331 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3332 }
3333
3334 if (FLOAT128_VECTOR_P (TFmode))
3335 {
3336 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3337 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3338 }
3339
3340 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3341 available. */
3342 if (TARGET_NO_SDMODE_STACK)
3343 {
3344 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3345 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3346 }
3347
3348 if (TARGET_VSX)
3349 {
3350 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3351 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3352 }
3353
3354 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3355 {
3356 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3357 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3358 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3359 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3360 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3361 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3362 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3363 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3364 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3365
3366 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3367 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3368 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3369 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3370 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3371 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3372 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3373 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3374 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3375
3376 if (FLOAT128_VECTOR_P (KFmode))
3377 {
3378 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3379 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3380 }
3381
3382 if (FLOAT128_VECTOR_P (TFmode))
3383 {
3384 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3385 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3386 }
3387 }
3388 }
3389 else
3390 {
3391 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3392 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3393 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3394 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3395 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3396 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3397 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3398 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3399 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3400 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3401 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3402 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3403 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3404 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3405 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3406 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3407 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3408 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3409 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3410 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3411
3412 if (FLOAT128_VECTOR_P (KFmode))
3413 {
3414 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3415 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3416 }
3417
3418 if (FLOAT128_IEEE_P (TFmode))
3419 {
3420 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3421 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3422 }
3423
3424 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3425 available. */
3426 if (TARGET_NO_SDMODE_STACK)
3427 {
3428 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3429 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3430 }
3431
3432 if (TARGET_VSX)
3433 {
3434 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3435 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3436 }
3437
3438 if (TARGET_DIRECT_MOVE)
3439 {
3440 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3441 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3442 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3443 }
3444 }
3445
3446 reg_addr[DFmode].scalar_in_vmx_p = true;
3447 reg_addr[DImode].scalar_in_vmx_p = true;
3448
3449 if (TARGET_P8_VECTOR)
3450 {
3451 reg_addr[SFmode].scalar_in_vmx_p = true;
3452 reg_addr[SImode].scalar_in_vmx_p = true;
3453
3454 if (TARGET_P9_VECTOR)
3455 {
3456 reg_addr[HImode].scalar_in_vmx_p = true;
3457 reg_addr[QImode].scalar_in_vmx_p = true;
3458 }
3459 }
3460 }
3461
3462 /* Precalculate HARD_REGNO_NREGS. */
3463 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3464 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3465 rs6000_hard_regno_nregs[m][r]
3466 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3467
3468 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3469 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3470 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3471 if (rs6000_hard_regno_mode_ok_uncached (r, (machine_mode)m))
3472 rs6000_hard_regno_mode_ok_p[m][r] = true;
3473
3474 /* Precalculate CLASS_MAX_NREGS sizes. */
3475 for (c = 0; c < LIM_REG_CLASSES; ++c)
3476 {
3477 int reg_size;
3478
3479 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3480 reg_size = UNITS_PER_VSX_WORD;
3481
3482 else if (c == ALTIVEC_REGS)
3483 reg_size = UNITS_PER_ALTIVEC_WORD;
3484
3485 else if (c == FLOAT_REGS)
3486 reg_size = UNITS_PER_FP_WORD;
3487
3488 else
3489 reg_size = UNITS_PER_WORD;
3490
3491 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3492 {
3493 machine_mode m2 = (machine_mode)m;
3494 int reg_size2 = reg_size;
3495
3496 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3497 in VSX. */
3498 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3499 reg_size2 = UNITS_PER_FP_WORD;
3500
3501 rs6000_class_max_nregs[m][c]
3502 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3503 }
3504 }
3505
3506 /* Calculate which modes to automatically generate code to use a the
3507 reciprocal divide and square root instructions. In the future, possibly
3508 automatically generate the instructions even if the user did not specify
3509 -mrecip. The older machines double precision reciprocal sqrt estimate is
3510 not accurate enough. */
3511 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3512 if (TARGET_FRES)
3513 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3514 if (TARGET_FRE)
3515 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3516 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3517 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3518 if (VECTOR_UNIT_VSX_P (V2DFmode))
3519 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3520
3521 if (TARGET_FRSQRTES)
3522 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3523 if (TARGET_FRSQRTE)
3524 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3525 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3526 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3527 if (VECTOR_UNIT_VSX_P (V2DFmode))
3528 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3529
3530 if (rs6000_recip_control)
3531 {
3532 if (!flag_finite_math_only)
3533 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3534 "-ffast-math");
3535 if (flag_trapping_math)
3536 warning (0, "%qs requires %qs or %qs", "-mrecip",
3537 "-fno-trapping-math", "-ffast-math");
3538 if (!flag_reciprocal_math)
3539 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3540 "-ffast-math");
3541 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3542 {
3543 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3544 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3545 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3546
3547 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3548 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3549 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3550
3551 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3552 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3553 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3554
3555 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3556 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3557 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3558
3559 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3560 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3561 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3562
3563 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3564 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3565 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3566
3567 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3568 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3569 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3570
3571 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3572 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3573 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3574 }
3575 }
3576
3577 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3578 legitimate address support to figure out the appropriate addressing to
3579 use. */
3580 rs6000_setup_reg_addr_masks ();
3581
3582 if (global_init_p || TARGET_DEBUG_TARGET)
3583 {
3584 if (TARGET_DEBUG_REG)
3585 rs6000_debug_reg_global ();
3586
3587 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3588 fprintf (stderr,
3589 "SImode variable mult cost = %d\n"
3590 "SImode constant mult cost = %d\n"
3591 "SImode short constant mult cost = %d\n"
3592 "DImode multipliciation cost = %d\n"
3593 "SImode division cost = %d\n"
3594 "DImode division cost = %d\n"
3595 "Simple fp operation cost = %d\n"
3596 "DFmode multiplication cost = %d\n"
3597 "SFmode division cost = %d\n"
3598 "DFmode division cost = %d\n"
3599 "cache line size = %d\n"
3600 "l1 cache size = %d\n"
3601 "l2 cache size = %d\n"
3602 "simultaneous prefetches = %d\n"
3603 "\n",
3604 rs6000_cost->mulsi,
3605 rs6000_cost->mulsi_const,
3606 rs6000_cost->mulsi_const9,
3607 rs6000_cost->muldi,
3608 rs6000_cost->divsi,
3609 rs6000_cost->divdi,
3610 rs6000_cost->fp,
3611 rs6000_cost->dmul,
3612 rs6000_cost->sdiv,
3613 rs6000_cost->ddiv,
3614 rs6000_cost->cache_line_size,
3615 rs6000_cost->l1_cache_size,
3616 rs6000_cost->l2_cache_size,
3617 rs6000_cost->simultaneous_prefetches);
3618 }
3619 }
3620
3621 #if TARGET_MACHO
3622 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3623
3624 static void
3625 darwin_rs6000_override_options (void)
3626 {
3627 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3628 off. */
3629 rs6000_altivec_abi = 1;
3630 TARGET_ALTIVEC_VRSAVE = 1;
3631 rs6000_current_abi = ABI_DARWIN;
3632
3633 if (DEFAULT_ABI == ABI_DARWIN
3634 && TARGET_64BIT)
3635 darwin_one_byte_bool = 1;
3636
3637 if (TARGET_64BIT && ! TARGET_POWERPC64)
3638 {
3639 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3640 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3641 }
3642 if (flag_mkernel)
3643 {
3644 rs6000_default_long_calls = 1;
3645 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3646 }
3647
3648 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3649 Altivec. */
3650 if (!flag_mkernel && !flag_apple_kext
3651 && TARGET_64BIT
3652 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3653 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3654
3655 /* Unless the user (not the configurer) has explicitly overridden
3656 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3657 G4 unless targeting the kernel. */
3658 if (!flag_mkernel
3659 && !flag_apple_kext
3660 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3661 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3662 && ! global_options_set.x_rs6000_cpu_index)
3663 {
3664 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3665 }
3666 }
3667 #endif
3668
3669 /* If not otherwise specified by a target, make 'long double' equivalent to
3670 'double'. */
3671
3672 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3673 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3674 #endif
3675
3676 /* Return the builtin mask of the various options used that could affect which
3677 builtins were used. In the past we used target_flags, but we've run out of
3678 bits, and some options are no longer in target_flags. */
3679
3680 HOST_WIDE_INT
3681 rs6000_builtin_mask_calculate (void)
3682 {
3683 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3684 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3685 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3686 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3687 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3688 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3689 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3690 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3691 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3692 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3693 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3694 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3695 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3696 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3697 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3698 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3699 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3700 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3701 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3702 | ((TARGET_LONG_DOUBLE_128
3703 && TARGET_HARD_FLOAT
3704 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3705 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3706 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3707 }
3708
3709 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3710 to clobber the XER[CA] bit because clobbering that bit without telling
3711 the compiler worked just fine with versions of GCC before GCC 5, and
3712 breaking a lot of older code in ways that are hard to track down is
3713 not such a great idea. */
3714
3715 static rtx_insn *
3716 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3717 vec<const char *> &/*constraints*/,
3718 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3719 {
3720 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3721 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3722 return NULL;
3723 }
3724
3725 /* Override command line options.
3726
3727 Combine build-specific configuration information with options
3728 specified on the command line to set various state variables which
3729 influence code generation, optimization, and expansion of built-in
3730 functions. Assure that command-line configuration preferences are
3731 compatible with each other and with the build configuration; issue
3732 warnings while adjusting configuration or error messages while
3733 rejecting configuration.
3734
3735 Upon entry to this function:
3736
3737 This function is called once at the beginning of
3738 compilation, and then again at the start and end of compiling
3739 each section of code that has a different configuration, as
3740 indicated, for example, by adding the
3741
3742 __attribute__((__target__("cpu=power9")))
3743
3744 qualifier to a function definition or, for example, by bracketing
3745 code between
3746
3747 #pragma GCC target("altivec")
3748
3749 and
3750
3751 #pragma GCC reset_options
3752
3753 directives. Parameter global_init_p is true for the initial
3754 invocation, which initializes global variables, and false for all
3755 subsequent invocations.
3756
3757
3758 Various global state information is assumed to be valid. This
3759 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3760 default CPU specified at build configure time, TARGET_DEFAULT,
3761 representing the default set of option flags for the default
3762 target, and global_options_set.x_rs6000_isa_flags, representing
3763 which options were requested on the command line.
3764
3765 Upon return from this function:
3766
3767 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3768 was set by name on the command line. Additionally, if certain
3769 attributes are automatically enabled or disabled by this function
3770 in order to assure compatibility between options and
3771 configuration, the flags associated with those attributes are
3772 also set. By setting these "explicit bits", we avoid the risk
3773 that other code might accidentally overwrite these particular
3774 attributes with "default values".
3775
3776 The various bits of rs6000_isa_flags are set to indicate the
3777 target options that have been selected for the most current
3778 compilation efforts. This has the effect of also turning on the
3779 associated TARGET_XXX values since these are macros which are
3780 generally defined to test the corresponding bit of the
3781 rs6000_isa_flags variable.
3782
3783 The variable rs6000_builtin_mask is set to represent the target
3784 options for the most current compilation efforts, consistent with
3785 the current contents of rs6000_isa_flags. This variable controls
3786 expansion of built-in functions.
3787
3788 Various other global variables and fields of global structures
3789 (over 50 in all) are initialized to reflect the desired options
3790 for the most current compilation efforts. */
3791
3792 static bool
3793 rs6000_option_override_internal (bool global_init_p)
3794 {
3795 bool ret = true;
3796
3797 HOST_WIDE_INT set_masks;
3798 HOST_WIDE_INT ignore_masks;
3799 int cpu_index = -1;
3800 int tune_index;
3801 struct cl_target_option *main_target_opt
3802 = ((global_init_p || target_option_default_node == NULL)
3803 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3804
3805 /* Print defaults. */
3806 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3807 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3808
3809 /* Remember the explicit arguments. */
3810 if (global_init_p)
3811 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3812
3813 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3814 library functions, so warn about it. The flag may be useful for
3815 performance studies from time to time though, so don't disable it
3816 entirely. */
3817 if (global_options_set.x_rs6000_alignment_flags
3818 && rs6000_alignment_flags == MASK_ALIGN_POWER
3819 && DEFAULT_ABI == ABI_DARWIN
3820 && TARGET_64BIT)
3821 warning (0, "%qs is not supported for 64-bit Darwin;"
3822 " it is incompatible with the installed C and C++ libraries",
3823 "-malign-power");
3824
3825 /* Numerous experiment shows that IRA based loop pressure
3826 calculation works better for RTL loop invariant motion on targets
3827 with enough (>= 32) registers. It is an expensive optimization.
3828 So it is on only for peak performance. */
3829 if (optimize >= 3 && global_init_p
3830 && !global_options_set.x_flag_ira_loop_pressure)
3831 flag_ira_loop_pressure = 1;
3832
3833 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3834 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3835 options were already specified. */
3836 if (flag_sanitize & SANITIZE_USER_ADDRESS
3837 && !global_options_set.x_flag_asynchronous_unwind_tables)
3838 flag_asynchronous_unwind_tables = 1;
3839
3840 /* Set the pointer size. */
3841 if (TARGET_64BIT)
3842 {
3843 rs6000_pmode = DImode;
3844 rs6000_pointer_size = 64;
3845 }
3846 else
3847 {
3848 rs6000_pmode = SImode;
3849 rs6000_pointer_size = 32;
3850 }
3851
3852 /* Some OSs don't support saving the high part of 64-bit registers on context
3853 switch. Other OSs don't support saving Altivec registers. On those OSs,
3854 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3855 if the user wants either, the user must explicitly specify them and we
3856 won't interfere with the user's specification. */
3857
3858 set_masks = POWERPC_MASKS;
3859 #ifdef OS_MISSING_POWERPC64
3860 if (OS_MISSING_POWERPC64)
3861 set_masks &= ~OPTION_MASK_POWERPC64;
3862 #endif
3863 #ifdef OS_MISSING_ALTIVEC
3864 if (OS_MISSING_ALTIVEC)
3865 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3866 | OTHER_VSX_VECTOR_MASKS);
3867 #endif
3868
3869 /* Don't override by the processor default if given explicitly. */
3870 set_masks &= ~rs6000_isa_flags_explicit;
3871
3872 if (global_init_p && rs6000_dejagnu_cpu_index >= 0)
3873 rs6000_cpu_index = rs6000_dejagnu_cpu_index;
3874
3875 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3876 the cpu in a target attribute or pragma, but did not specify a tuning
3877 option, use the cpu for the tuning option rather than the option specified
3878 with -mtune on the command line. Process a '--with-cpu' configuration
3879 request as an implicit --cpu. */
3880 if (rs6000_cpu_index >= 0)
3881 cpu_index = rs6000_cpu_index;
3882 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3883 cpu_index = main_target_opt->x_rs6000_cpu_index;
3884 else if (OPTION_TARGET_CPU_DEFAULT)
3885 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3886
3887 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3888 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3889 with those from the cpu, except for options that were explicitly set. If
3890 we don't have a cpu, do not override the target bits set in
3891 TARGET_DEFAULT. */
3892 if (cpu_index >= 0)
3893 {
3894 rs6000_cpu_index = cpu_index;
3895 rs6000_isa_flags &= ~set_masks;
3896 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3897 & set_masks);
3898 }
3899 else
3900 {
3901 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3902 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3903 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3904 to using rs6000_isa_flags, we need to do the initialization here.
3905
3906 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3907 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3908 HOST_WIDE_INT flags;
3909 if (TARGET_DEFAULT)
3910 flags = TARGET_DEFAULT;
3911 else
3912 {
3913 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3914 const char *default_cpu = (!TARGET_POWERPC64
3915 ? "powerpc"
3916 : (BYTES_BIG_ENDIAN
3917 ? "powerpc64"
3918 : "powerpc64le"));
3919 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3920 flags = processor_target_table[default_cpu_index].target_enable;
3921 }
3922 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3923 }
3924
3925 if (rs6000_tune_index >= 0)
3926 tune_index = rs6000_tune_index;
3927 else if (cpu_index >= 0)
3928 rs6000_tune_index = tune_index = cpu_index;
3929 else
3930 {
3931 size_t i;
3932 enum processor_type tune_proc
3933 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3934
3935 tune_index = -1;
3936 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3937 if (processor_target_table[i].processor == tune_proc)
3938 {
3939 tune_index = i;
3940 break;
3941 }
3942 }
3943
3944 if (cpu_index >= 0)
3945 rs6000_cpu = processor_target_table[cpu_index].processor;
3946 else
3947 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3948
3949 gcc_assert (tune_index >= 0);
3950 rs6000_tune = processor_target_table[tune_index].processor;
3951
3952 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3953 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3954 || rs6000_cpu == PROCESSOR_PPCE5500)
3955 {
3956 if (TARGET_ALTIVEC)
3957 error ("AltiVec not supported in this target");
3958 }
3959
3960 /* If we are optimizing big endian systems for space, use the load/store
3961 multiple instructions. */
3962 if (BYTES_BIG_ENDIAN && optimize_size)
3963 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3964
3965 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3966 because the hardware doesn't support the instructions used in little
3967 endian mode, and causes an alignment trap. The 750 does not cause an
3968 alignment trap (except when the target is unaligned). */
3969
3970 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3971 {
3972 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3973 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3974 warning (0, "%qs is not supported on little endian systems",
3975 "-mmultiple");
3976 }
3977
3978 /* If little-endian, default to -mstrict-align on older processors.
3979 Testing for htm matches power8 and later. */
3980 if (!BYTES_BIG_ENDIAN
3981 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3982 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3983
3984 if (!rs6000_fold_gimple)
3985 fprintf (stderr,
3986 "gimple folding of rs6000 builtins has been disabled.\n");
3987
3988 /* Add some warnings for VSX. */
3989 if (TARGET_VSX)
3990 {
3991 const char *msg = NULL;
3992 if (!TARGET_HARD_FLOAT)
3993 {
3994 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3995 msg = N_("%<-mvsx%> requires hardware floating point");
3996 else
3997 {
3998 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3999 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4000 }
4001 }
4002 else if (TARGET_AVOID_XFORM > 0)
4003 msg = N_("%<-mvsx%> needs indexed addressing");
4004 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4005 & OPTION_MASK_ALTIVEC))
4006 {
4007 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4008 msg = N_("%<-mvsx%> and %<-mno-altivec%> are incompatible");
4009 else
4010 msg = N_("%<-mno-altivec%> disables vsx");
4011 }
4012
4013 if (msg)
4014 {
4015 warning (0, msg);
4016 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4017 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4018 }
4019 }
4020
4021 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4022 the -mcpu setting to enable options that conflict. */
4023 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4024 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4025 | OPTION_MASK_ALTIVEC
4026 | OPTION_MASK_VSX)) != 0)
4027 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4028 | OPTION_MASK_DIRECT_MOVE)
4029 & ~rs6000_isa_flags_explicit);
4030
4031 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4032 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4033
4034 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4035 off all of the options that depend on those flags. */
4036 ignore_masks = rs6000_disable_incompatible_switches ();
4037
4038 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4039 unless the user explicitly used the -mno-<option> to disable the code. */
4040 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4041 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4042 else if (TARGET_P9_MINMAX)
4043 {
4044 if (cpu_index >= 0)
4045 {
4046 if (cpu_index == PROCESSOR_POWER9)
4047 {
4048 /* legacy behavior: allow -mcpu=power9 with certain
4049 capabilities explicitly disabled. */
4050 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4051 }
4052 else
4053 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4054 "for <xxx> less than power9", "-mcpu");
4055 }
4056 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4057 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4058 & rs6000_isa_flags_explicit))
4059 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4060 were explicitly cleared. */
4061 error ("%qs incompatible with explicitly disabled options",
4062 "-mpower9-minmax");
4063 else
4064 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4065 }
4066 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4067 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4068 else if (TARGET_VSX)
4069 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4070 else if (TARGET_POPCNTD)
4071 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4072 else if (TARGET_DFP)
4073 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4074 else if (TARGET_CMPB)
4075 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4076 else if (TARGET_FPRND)
4077 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4078 else if (TARGET_POPCNTB)
4079 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4080 else if (TARGET_ALTIVEC)
4081 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4082
4083 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4084 {
4085 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4086 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4087 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4088 }
4089
4090 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4091 {
4092 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4093 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4094 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4095 }
4096
4097 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4098 {
4099 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4100 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4101 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4102 }
4103
4104 if (TARGET_P8_VECTOR && !TARGET_VSX)
4105 {
4106 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4107 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4108 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4109 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4110 {
4111 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4112 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4113 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4114 }
4115 else
4116 {
4117 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4118 not explicit. */
4119 rs6000_isa_flags |= OPTION_MASK_VSX;
4120 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4121 }
4122 }
4123
4124 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4125 {
4126 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4127 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4128 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4129 }
4130
4131 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4132 silently turn off quad memory mode. */
4133 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4134 {
4135 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4136 warning (0, N_("%<-mquad-memory%> requires 64-bit mode"));
4137
4138 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4139 warning (0, N_("%<-mquad-memory-atomic%> requires 64-bit mode"));
4140
4141 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4142 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4143 }
4144
4145 /* Non-atomic quad memory load/store are disabled for little endian, since
4146 the words are reversed, but atomic operations can still be done by
4147 swapping the words. */
4148 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4149 {
4150 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4151 warning (0, N_("%<-mquad-memory%> is not available in little endian "
4152 "mode"));
4153
4154 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4155 }
4156
4157 /* Assume if the user asked for normal quad memory instructions, they want
4158 the atomic versions as well, unless they explicity told us not to use quad
4159 word atomic instructions. */
4160 if (TARGET_QUAD_MEMORY
4161 && !TARGET_QUAD_MEMORY_ATOMIC
4162 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4163 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4164
4165 /* If we can shrink-wrap the TOC register save separately, then use
4166 -msave-toc-indirect unless explicitly disabled. */
4167 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4168 && flag_shrink_wrap_separate
4169 && optimize_function_for_speed_p (cfun))
4170 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4171
4172 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4173 generating power8 instructions. Power9 does not optimize power8 fusion
4174 cases. */
4175 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4176 {
4177 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
4178 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4179 else
4180 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4181 }
4182
4183 /* Setting additional fusion flags turns on base fusion. */
4184 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4185 {
4186 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4187 {
4188 if (TARGET_P8_FUSION_SIGN)
4189 error ("%qs requires %qs", "-mpower8-fusion-sign",
4190 "-mpower8-fusion");
4191
4192 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4193 }
4194 else
4195 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4196 }
4197
4198 /* Power8 does not fuse sign extended loads with the addis. If we are
4199 optimizing at high levels for speed, convert a sign extended load into a
4200 zero extending load, and an explicit sign extension. */
4201 if (TARGET_P8_FUSION
4202 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4203 && optimize_function_for_speed_p (cfun)
4204 && optimize >= 3)
4205 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4206
4207 /* ISA 3.0 vector instructions include ISA 2.07. */
4208 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4209 {
4210 /* We prefer to not mention undocumented options in
4211 error messages. However, if users have managed to select
4212 power9-vector without selecting power8-vector, they
4213 already know about undocumented flags. */
4214 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4215 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4216 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4217 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4218 {
4219 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4220 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4221 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4222 }
4223 else
4224 {
4225 /* OPTION_MASK_P9_VECTOR is explicit and
4226 OPTION_MASK_P8_VECTOR is not explicit. */
4227 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4228 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4229 }
4230 }
4231
4232 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4233 support. If we only have ISA 2.06 support, and the user did not specify
4234 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4235 but we don't enable the full vectorization support */
4236 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4237 TARGET_ALLOW_MOVMISALIGN = 1;
4238
4239 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4240 {
4241 if (TARGET_ALLOW_MOVMISALIGN > 0
4242 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4243 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4244
4245 TARGET_ALLOW_MOVMISALIGN = 0;
4246 }
4247
4248 /* Determine when unaligned vector accesses are permitted, and when
4249 they are preferred over masked Altivec loads. Note that if
4250 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4251 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4252 not true. */
4253 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4254 {
4255 if (!TARGET_VSX)
4256 {
4257 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4258 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4259
4260 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4261 }
4262
4263 else if (!TARGET_ALLOW_MOVMISALIGN)
4264 {
4265 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4266 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4267 "-mallow-movmisalign");
4268
4269 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4270 }
4271 }
4272
4273 /* Use long double size to select the appropriate long double. We use
4274 TYPE_PRECISION to differentiate the 3 different long double types. We map
4275 128 into the precision used for TFmode. */
4276 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4277 ? 64
4278 : FLOAT_PRECISION_TFmode);
4279
4280 /* Set long double size before the IEEE 128-bit tests. */
4281 if (!global_options_set.x_rs6000_long_double_type_size)
4282 {
4283 if (main_target_opt != NULL
4284 && (main_target_opt->x_rs6000_long_double_type_size
4285 != default_long_double_size))
4286 error ("target attribute or pragma changes long double size");
4287 else
4288 rs6000_long_double_type_size = default_long_double_size;
4289 }
4290 else if (rs6000_long_double_type_size == 128)
4291 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4292 else if (global_options_set.x_rs6000_ieeequad)
4293 {
4294 if (global_options.x_rs6000_ieeequad)
4295 error ("%qs requires %qs", "-mabi=ieeelongdouble", "-mlong-double-128");
4296 else
4297 error ("%qs requires %qs", "-mabi=ibmlongdouble", "-mlong-double-128");
4298 }
4299
4300 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4301 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4302 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4303 those systems will not pick up this default. Warn if the user changes the
4304 default unless -Wno-psabi. */
4305 if (!global_options_set.x_rs6000_ieeequad)
4306 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4307
4308 else
4309 {
4310 if (global_options.x_rs6000_ieeequad
4311 && (!TARGET_POPCNTD || !TARGET_VSX))
4312 error ("%qs requires full ISA 2.06 support", "-mabi=ieeelongdouble");
4313
4314 if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4315 {
4316 static bool warned_change_long_double;
4317 if (!warned_change_long_double)
4318 {
4319 warned_change_long_double = true;
4320 if (TARGET_IEEEQUAD)
4321 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4322 else
4323 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4324 }
4325 }
4326 }
4327
4328 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4329 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4330 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4331 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4332 the keyword as well as the type. */
4333 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4334
4335 /* IEEE 128-bit floating point requires VSX support. */
4336 if (TARGET_FLOAT128_KEYWORD)
4337 {
4338 if (!TARGET_VSX)
4339 {
4340 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4341 error ("%qs requires VSX support", "%<-mfloat128%>");
4342
4343 TARGET_FLOAT128_TYPE = 0;
4344 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4345 | OPTION_MASK_FLOAT128_HW);
4346 }
4347 else if (!TARGET_FLOAT128_TYPE)
4348 {
4349 TARGET_FLOAT128_TYPE = 1;
4350 warning (0, "The %<-mfloat128%> option may not be fully supported");
4351 }
4352 }
4353
4354 /* Enable the __float128 keyword under Linux by default. */
4355 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4356 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4357 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4358
4359 /* If we have are supporting the float128 type and full ISA 3.0 support,
4360 enable -mfloat128-hardware by default. However, don't enable the
4361 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4362 because sometimes the compiler wants to put things in an integer
4363 container, and if we don't have __int128 support, it is impossible. */
4364 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4365 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4366 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4367 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4368
4369 if (TARGET_FLOAT128_HW
4370 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4371 {
4372 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4373 error ("%qs requires full ISA 3.0 support", "%<-mfloat128-hardware%>");
4374
4375 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4376 }
4377
4378 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4379 {
4380 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4381 error ("%qs requires %qs", "%<-mfloat128-hardware%>", "-m64");
4382
4383 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4384 }
4385
4386 /* Print the options after updating the defaults. */
4387 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4388 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4389
4390 /* E500mc does "better" if we inline more aggressively. Respect the
4391 user's opinion, though. */
4392 if (rs6000_block_move_inline_limit == 0
4393 && (rs6000_tune == PROCESSOR_PPCE500MC
4394 || rs6000_tune == PROCESSOR_PPCE500MC64
4395 || rs6000_tune == PROCESSOR_PPCE5500
4396 || rs6000_tune == PROCESSOR_PPCE6500))
4397 rs6000_block_move_inline_limit = 128;
4398
4399 /* store_one_arg depends on expand_block_move to handle at least the
4400 size of reg_parm_stack_space. */
4401 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4402 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4403
4404 if (global_init_p)
4405 {
4406 /* If the appropriate debug option is enabled, replace the target hooks
4407 with debug versions that call the real version and then prints
4408 debugging information. */
4409 if (TARGET_DEBUG_COST)
4410 {
4411 targetm.rtx_costs = rs6000_debug_rtx_costs;
4412 targetm.address_cost = rs6000_debug_address_cost;
4413 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4414 }
4415
4416 if (TARGET_DEBUG_ADDR)
4417 {
4418 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4419 targetm.legitimize_address = rs6000_debug_legitimize_address;
4420 rs6000_secondary_reload_class_ptr
4421 = rs6000_debug_secondary_reload_class;
4422 targetm.secondary_memory_needed
4423 = rs6000_debug_secondary_memory_needed;
4424 targetm.can_change_mode_class
4425 = rs6000_debug_can_change_mode_class;
4426 rs6000_preferred_reload_class_ptr
4427 = rs6000_debug_preferred_reload_class;
4428 rs6000_legitimize_reload_address_ptr
4429 = rs6000_debug_legitimize_reload_address;
4430 rs6000_mode_dependent_address_ptr
4431 = rs6000_debug_mode_dependent_address;
4432 }
4433
4434 if (rs6000_veclibabi_name)
4435 {
4436 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4437 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4438 else
4439 {
4440 error ("unknown vectorization library ABI type (%qs) for "
4441 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4442 ret = false;
4443 }
4444 }
4445 }
4446
4447 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4448 target attribute or pragma which automatically enables both options,
4449 unless the altivec ABI was set. This is set by default for 64-bit, but
4450 not for 32-bit. */
4451 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4452 {
4453 TARGET_FLOAT128_TYPE = 0;
4454 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4455 | OPTION_MASK_FLOAT128_KEYWORD)
4456 & ~rs6000_isa_flags_explicit);
4457 }
4458
4459 /* Enable Altivec ABI for AIX -maltivec. */
4460 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4461 {
4462 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4463 error ("target attribute or pragma changes AltiVec ABI");
4464 else
4465 rs6000_altivec_abi = 1;
4466 }
4467
4468 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4469 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4470 be explicitly overridden in either case. */
4471 if (TARGET_ELF)
4472 {
4473 if (!global_options_set.x_rs6000_altivec_abi
4474 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4475 {
4476 if (main_target_opt != NULL &&
4477 !main_target_opt->x_rs6000_altivec_abi)
4478 error ("target attribute or pragma changes AltiVec ABI");
4479 else
4480 rs6000_altivec_abi = 1;
4481 }
4482 }
4483
4484 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4485 So far, the only darwin64 targets are also MACH-O. */
4486 if (TARGET_MACHO
4487 && DEFAULT_ABI == ABI_DARWIN
4488 && TARGET_64BIT)
4489 {
4490 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4491 error ("target attribute or pragma changes darwin64 ABI");
4492 else
4493 {
4494 rs6000_darwin64_abi = 1;
4495 /* Default to natural alignment, for better performance. */
4496 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4497 }
4498 }
4499
4500 /* Place FP constants in the constant pool instead of TOC
4501 if section anchors enabled. */
4502 if (flag_section_anchors
4503 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4504 TARGET_NO_FP_IN_TOC = 1;
4505
4506 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4507 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4508
4509 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4510 SUBTARGET_OVERRIDE_OPTIONS;
4511 #endif
4512 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4513 SUBSUBTARGET_OVERRIDE_OPTIONS;
4514 #endif
4515 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4516 SUB3TARGET_OVERRIDE_OPTIONS;
4517 #endif
4518
4519 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4520 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4521
4522 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4523 && rs6000_tune != PROCESSOR_POWER5
4524 && rs6000_tune != PROCESSOR_POWER6
4525 && rs6000_tune != PROCESSOR_POWER7
4526 && rs6000_tune != PROCESSOR_POWER8
4527 && rs6000_tune != PROCESSOR_POWER9
4528 && rs6000_tune != PROCESSOR_PPCA2
4529 && rs6000_tune != PROCESSOR_CELL
4530 && rs6000_tune != PROCESSOR_PPC476);
4531 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4532 || rs6000_tune == PROCESSOR_POWER5
4533 || rs6000_tune == PROCESSOR_POWER7
4534 || rs6000_tune == PROCESSOR_POWER8);
4535 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4536 || rs6000_tune == PROCESSOR_POWER5
4537 || rs6000_tune == PROCESSOR_POWER6
4538 || rs6000_tune == PROCESSOR_POWER7
4539 || rs6000_tune == PROCESSOR_POWER8
4540 || rs6000_tune == PROCESSOR_POWER9
4541 || rs6000_tune == PROCESSOR_PPCE500MC
4542 || rs6000_tune == PROCESSOR_PPCE500MC64
4543 || rs6000_tune == PROCESSOR_PPCE5500
4544 || rs6000_tune == PROCESSOR_PPCE6500);
4545
4546 /* Allow debug switches to override the above settings. These are set to -1
4547 in rs6000.opt to indicate the user hasn't directly set the switch. */
4548 if (TARGET_ALWAYS_HINT >= 0)
4549 rs6000_always_hint = TARGET_ALWAYS_HINT;
4550
4551 if (TARGET_SCHED_GROUPS >= 0)
4552 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4553
4554 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4555 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4556
4557 rs6000_sched_restricted_insns_priority
4558 = (rs6000_sched_groups ? 1 : 0);
4559
4560 /* Handle -msched-costly-dep option. */
4561 rs6000_sched_costly_dep
4562 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4563
4564 if (rs6000_sched_costly_dep_str)
4565 {
4566 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4567 rs6000_sched_costly_dep = no_dep_costly;
4568 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4569 rs6000_sched_costly_dep = all_deps_costly;
4570 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4571 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4572 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4573 rs6000_sched_costly_dep = store_to_load_dep_costly;
4574 else
4575 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4576 atoi (rs6000_sched_costly_dep_str));
4577 }
4578
4579 /* Handle -minsert-sched-nops option. */
4580 rs6000_sched_insert_nops
4581 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4582
4583 if (rs6000_sched_insert_nops_str)
4584 {
4585 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4586 rs6000_sched_insert_nops = sched_finish_none;
4587 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4588 rs6000_sched_insert_nops = sched_finish_pad_groups;
4589 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4590 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4591 else
4592 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4593 atoi (rs6000_sched_insert_nops_str));
4594 }
4595
4596 /* Handle stack protector */
4597 if (!global_options_set.x_rs6000_stack_protector_guard)
4598 #ifdef TARGET_THREAD_SSP_OFFSET
4599 rs6000_stack_protector_guard = SSP_TLS;
4600 #else
4601 rs6000_stack_protector_guard = SSP_GLOBAL;
4602 #endif
4603
4604 #ifdef TARGET_THREAD_SSP_OFFSET
4605 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4606 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4607 #endif
4608
4609 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4610 {
4611 char *endp;
4612 const char *str = rs6000_stack_protector_guard_offset_str;
4613
4614 errno = 0;
4615 long offset = strtol (str, &endp, 0);
4616 if (!*str || *endp || errno)
4617 error ("%qs is not a valid number in %qs", str,
4618 "-mstack-protector-guard-offset=");
4619
4620 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4621 || (TARGET_64BIT && (offset & 3)))
4622 error ("%qs is not a valid offset in %qs", str,
4623 "-mstack-protector-guard-offset=");
4624
4625 rs6000_stack_protector_guard_offset = offset;
4626 }
4627
4628 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4629 {
4630 const char *str = rs6000_stack_protector_guard_reg_str;
4631 int reg = decode_reg_name (str);
4632
4633 if (!IN_RANGE (reg, 1, 31))
4634 error ("%qs is not a valid base register in %qs", str,
4635 "-mstack-protector-guard-reg=");
4636
4637 rs6000_stack_protector_guard_reg = reg;
4638 }
4639
4640 if (rs6000_stack_protector_guard == SSP_TLS
4641 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4642 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4643
4644 if (global_init_p)
4645 {
4646 #ifdef TARGET_REGNAMES
4647 /* If the user desires alternate register names, copy in the
4648 alternate names now. */
4649 if (TARGET_REGNAMES)
4650 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4651 #endif
4652
4653 /* Set aix_struct_return last, after the ABI is determined.
4654 If -maix-struct-return or -msvr4-struct-return was explicitly
4655 used, don't override with the ABI default. */
4656 if (!global_options_set.x_aix_struct_return)
4657 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4658
4659 #if 0
4660 /* IBM XL compiler defaults to unsigned bitfields. */
4661 if (TARGET_XL_COMPAT)
4662 flag_signed_bitfields = 0;
4663 #endif
4664
4665 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4666 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4667
4668 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4669
4670 /* We can only guarantee the availability of DI pseudo-ops when
4671 assembling for 64-bit targets. */
4672 if (!TARGET_64BIT)
4673 {
4674 targetm.asm_out.aligned_op.di = NULL;
4675 targetm.asm_out.unaligned_op.di = NULL;
4676 }
4677
4678
4679 /* Set branch target alignment, if not optimizing for size. */
4680 if (!optimize_size)
4681 {
4682 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4683 aligned 8byte to avoid misprediction by the branch predictor. */
4684 if (rs6000_tune == PROCESSOR_TITAN
4685 || rs6000_tune == PROCESSOR_CELL)
4686 {
4687 if (flag_align_functions && !str_align_functions)
4688 str_align_functions = "8";
4689 if (flag_align_jumps && !str_align_jumps)
4690 str_align_jumps = "8";
4691 if (flag_align_loops && !str_align_loops)
4692 str_align_loops = "8";
4693 }
4694 if (rs6000_align_branch_targets)
4695 {
4696 if (flag_align_functions && !str_align_functions)
4697 str_align_functions = "16";
4698 if (flag_align_jumps && !str_align_jumps)
4699 str_align_jumps = "16";
4700 if (flag_align_loops && !str_align_loops)
4701 {
4702 can_override_loop_align = 1;
4703 str_align_loops = "16";
4704 }
4705 }
4706
4707 if (flag_align_jumps && !str_align_jumps)
4708 str_align_jumps = "16";
4709 if (flag_align_loops && !str_align_loops)
4710 str_align_loops = "16";
4711 }
4712
4713 /* Arrange to save and restore machine status around nested functions. */
4714 init_machine_status = rs6000_init_machine_status;
4715
4716 /* We should always be splitting complex arguments, but we can't break
4717 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4718 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4719 targetm.calls.split_complex_arg = NULL;
4720
4721 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4722 if (DEFAULT_ABI == ABI_AIX)
4723 targetm.calls.custom_function_descriptors = 0;
4724 }
4725
4726 /* Initialize rs6000_cost with the appropriate target costs. */
4727 if (optimize_size)
4728 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4729 else
4730 switch (rs6000_tune)
4731 {
4732 case PROCESSOR_RS64A:
4733 rs6000_cost = &rs64a_cost;
4734 break;
4735
4736 case PROCESSOR_MPCCORE:
4737 rs6000_cost = &mpccore_cost;
4738 break;
4739
4740 case PROCESSOR_PPC403:
4741 rs6000_cost = &ppc403_cost;
4742 break;
4743
4744 case PROCESSOR_PPC405:
4745 rs6000_cost = &ppc405_cost;
4746 break;
4747
4748 case PROCESSOR_PPC440:
4749 rs6000_cost = &ppc440_cost;
4750 break;
4751
4752 case PROCESSOR_PPC476:
4753 rs6000_cost = &ppc476_cost;
4754 break;
4755
4756 case PROCESSOR_PPC601:
4757 rs6000_cost = &ppc601_cost;
4758 break;
4759
4760 case PROCESSOR_PPC603:
4761 rs6000_cost = &ppc603_cost;
4762 break;
4763
4764 case PROCESSOR_PPC604:
4765 rs6000_cost = &ppc604_cost;
4766 break;
4767
4768 case PROCESSOR_PPC604e:
4769 rs6000_cost = &ppc604e_cost;
4770 break;
4771
4772 case PROCESSOR_PPC620:
4773 rs6000_cost = &ppc620_cost;
4774 break;
4775
4776 case PROCESSOR_PPC630:
4777 rs6000_cost = &ppc630_cost;
4778 break;
4779
4780 case PROCESSOR_CELL:
4781 rs6000_cost = &ppccell_cost;
4782 break;
4783
4784 case PROCESSOR_PPC750:
4785 case PROCESSOR_PPC7400:
4786 rs6000_cost = &ppc750_cost;
4787 break;
4788
4789 case PROCESSOR_PPC7450:
4790 rs6000_cost = &ppc7450_cost;
4791 break;
4792
4793 case PROCESSOR_PPC8540:
4794 case PROCESSOR_PPC8548:
4795 rs6000_cost = &ppc8540_cost;
4796 break;
4797
4798 case PROCESSOR_PPCE300C2:
4799 case PROCESSOR_PPCE300C3:
4800 rs6000_cost = &ppce300c2c3_cost;
4801 break;
4802
4803 case PROCESSOR_PPCE500MC:
4804 rs6000_cost = &ppce500mc_cost;
4805 break;
4806
4807 case PROCESSOR_PPCE500MC64:
4808 rs6000_cost = &ppce500mc64_cost;
4809 break;
4810
4811 case PROCESSOR_PPCE5500:
4812 rs6000_cost = &ppce5500_cost;
4813 break;
4814
4815 case PROCESSOR_PPCE6500:
4816 rs6000_cost = &ppce6500_cost;
4817 break;
4818
4819 case PROCESSOR_TITAN:
4820 rs6000_cost = &titan_cost;
4821 break;
4822
4823 case PROCESSOR_POWER4:
4824 case PROCESSOR_POWER5:
4825 rs6000_cost = &power4_cost;
4826 break;
4827
4828 case PROCESSOR_POWER6:
4829 rs6000_cost = &power6_cost;
4830 break;
4831
4832 case PROCESSOR_POWER7:
4833 rs6000_cost = &power7_cost;
4834 break;
4835
4836 case PROCESSOR_POWER8:
4837 rs6000_cost = &power8_cost;
4838 break;
4839
4840 case PROCESSOR_POWER9:
4841 rs6000_cost = &power9_cost;
4842 break;
4843
4844 case PROCESSOR_PPCA2:
4845 rs6000_cost = &ppca2_cost;
4846 break;
4847
4848 default:
4849 gcc_unreachable ();
4850 }
4851
4852 if (global_init_p)
4853 {
4854 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4855 rs6000_cost->simultaneous_prefetches,
4856 global_options.x_param_values,
4857 global_options_set.x_param_values);
4858 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4859 global_options.x_param_values,
4860 global_options_set.x_param_values);
4861 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4862 rs6000_cost->cache_line_size,
4863 global_options.x_param_values,
4864 global_options_set.x_param_values);
4865 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4866 global_options.x_param_values,
4867 global_options_set.x_param_values);
4868
4869 /* Increase loop peeling limits based on performance analysis. */
4870 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4871 global_options.x_param_values,
4872 global_options_set.x_param_values);
4873 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4874 global_options.x_param_values,
4875 global_options_set.x_param_values);
4876
4877 /* Use the 'model' -fsched-pressure algorithm by default. */
4878 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4879 SCHED_PRESSURE_MODEL,
4880 global_options.x_param_values,
4881 global_options_set.x_param_values);
4882
4883 /* If using typedef char *va_list, signal that
4884 __builtin_va_start (&ap, 0) can be optimized to
4885 ap = __builtin_next_arg (0). */
4886 if (DEFAULT_ABI != ABI_V4)
4887 targetm.expand_builtin_va_start = NULL;
4888 }
4889
4890 /* If not explicitly specified via option, decide whether to generate indexed
4891 load/store instructions. A value of -1 indicates that the
4892 initial value of this variable has not been overwritten. During
4893 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4894 if (TARGET_AVOID_XFORM == -1)
4895 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4896 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4897 need indexed accesses and the type used is the scalar type of the element
4898 being loaded or stored. */
4899 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4900 && !TARGET_ALTIVEC);
4901
4902 /* Set the -mrecip options. */
4903 if (rs6000_recip_name)
4904 {
4905 char *p = ASTRDUP (rs6000_recip_name);
4906 char *q;
4907 unsigned int mask, i;
4908 bool invert;
4909
4910 while ((q = strtok (p, ",")) != NULL)
4911 {
4912 p = NULL;
4913 if (*q == '!')
4914 {
4915 invert = true;
4916 q++;
4917 }
4918 else
4919 invert = false;
4920
4921 if (!strcmp (q, "default"))
4922 mask = ((TARGET_RECIP_PRECISION)
4923 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4924 else
4925 {
4926 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4927 if (!strcmp (q, recip_options[i].string))
4928 {
4929 mask = recip_options[i].mask;
4930 break;
4931 }
4932
4933 if (i == ARRAY_SIZE (recip_options))
4934 {
4935 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4936 invert = false;
4937 mask = 0;
4938 ret = false;
4939 }
4940 }
4941
4942 if (invert)
4943 rs6000_recip_control &= ~mask;
4944 else
4945 rs6000_recip_control |= mask;
4946 }
4947 }
4948
4949 /* Set the builtin mask of the various options used that could affect which
4950 builtins were used. In the past we used target_flags, but we've run out
4951 of bits, and some options are no longer in target_flags. */
4952 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4953 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4954 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4955 rs6000_builtin_mask);
4956
4957 /* Initialize all of the registers. */
4958 rs6000_init_hard_regno_mode_ok (global_init_p);
4959
4960 /* Save the initial options in case the user does function specific options */
4961 if (global_init_p)
4962 target_option_default_node = target_option_current_node
4963 = build_target_option_node (&global_options);
4964
4965 /* If not explicitly specified via option, decide whether to generate the
4966 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4967 if (TARGET_LINK_STACK == -1)
4968 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4969
4970 /* Deprecate use of -mno-speculate-indirect-jumps. */
4971 if (!rs6000_speculate_indirect_jumps)
4972 warning (0, "%qs is deprecated and not recommended in any circumstances",
4973 "-mno-speculate-indirect-jumps");
4974
4975 return ret;
4976 }
4977
4978 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4979 define the target cpu type. */
4980
4981 static void
4982 rs6000_option_override (void)
4983 {
4984 (void) rs6000_option_override_internal (true);
4985 }
4986
4987 \f
4988 /* Implement targetm.vectorize.builtin_mask_for_load. */
4989 static tree
4990 rs6000_builtin_mask_for_load (void)
4991 {
4992 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4993 if ((TARGET_ALTIVEC && !TARGET_VSX)
4994 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4995 return altivec_builtin_mask_for_load;
4996 else
4997 return 0;
4998 }
4999
5000 /* Implement LOOP_ALIGN. */
5001 align_flags
5002 rs6000_loop_align (rtx label)
5003 {
5004 basic_block bb;
5005 int ninsns;
5006
5007 /* Don't override loop alignment if -falign-loops was specified. */
5008 if (!can_override_loop_align)
5009 return align_loops;
5010
5011 bb = BLOCK_FOR_INSN (label);
5012 ninsns = num_loop_insns(bb->loop_father);
5013
5014 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5015 if (ninsns > 4 && ninsns <= 8
5016 && (rs6000_tune == PROCESSOR_POWER4
5017 || rs6000_tune == PROCESSOR_POWER5
5018 || rs6000_tune == PROCESSOR_POWER6
5019 || rs6000_tune == PROCESSOR_POWER7
5020 || rs6000_tune == PROCESSOR_POWER8))
5021 return align_flags (5);
5022 else
5023 return align_loops;
5024 }
5025
5026 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5027 after applying N number of iterations. This routine does not determine
5028 how may iterations are required to reach desired alignment. */
5029
5030 static bool
5031 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5032 {
5033 if (is_packed)
5034 return false;
5035
5036 if (TARGET_32BIT)
5037 {
5038 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5039 return true;
5040
5041 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5042 return true;
5043
5044 return false;
5045 }
5046 else
5047 {
5048 if (TARGET_MACHO)
5049 return false;
5050
5051 /* Assuming that all other types are naturally aligned. CHECKME! */
5052 return true;
5053 }
5054 }
5055
5056 /* Return true if the vector misalignment factor is supported by the
5057 target. */
5058 static bool
5059 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5060 const_tree type,
5061 int misalignment,
5062 bool is_packed)
5063 {
5064 if (TARGET_VSX)
5065 {
5066 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5067 return true;
5068
5069 /* Return if movmisalign pattern is not supported for this mode. */
5070 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5071 return false;
5072
5073 if (misalignment == -1)
5074 {
5075 /* Misalignment factor is unknown at compile time but we know
5076 it's word aligned. */
5077 if (rs6000_vector_alignment_reachable (type, is_packed))
5078 {
5079 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5080
5081 if (element_size == 64 || element_size == 32)
5082 return true;
5083 }
5084
5085 return false;
5086 }
5087
5088 /* VSX supports word-aligned vector. */
5089 if (misalignment % 4 == 0)
5090 return true;
5091 }
5092 return false;
5093 }
5094
5095 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5096 static int
5097 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5098 tree vectype, int misalign)
5099 {
5100 unsigned elements;
5101 tree elem_type;
5102
5103 switch (type_of_cost)
5104 {
5105 case scalar_stmt:
5106 case scalar_load:
5107 case scalar_store:
5108 case vector_stmt:
5109 case vector_load:
5110 case vector_store:
5111 case vec_to_scalar:
5112 case scalar_to_vec:
5113 case cond_branch_not_taken:
5114 return 1;
5115
5116 case vec_perm:
5117 if (TARGET_VSX)
5118 return 3;
5119 else
5120 return 1;
5121
5122 case vec_promote_demote:
5123 if (TARGET_VSX)
5124 return 4;
5125 else
5126 return 1;
5127
5128 case cond_branch_taken:
5129 return 3;
5130
5131 case unaligned_load:
5132 case vector_gather_load:
5133 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5134 return 1;
5135
5136 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5137 {
5138 elements = TYPE_VECTOR_SUBPARTS (vectype);
5139 if (elements == 2)
5140 /* Double word aligned. */
5141 return 2;
5142
5143 if (elements == 4)
5144 {
5145 switch (misalign)
5146 {
5147 case 8:
5148 /* Double word aligned. */
5149 return 2;
5150
5151 case -1:
5152 /* Unknown misalignment. */
5153 case 4:
5154 case 12:
5155 /* Word aligned. */
5156 return 22;
5157
5158 default:
5159 gcc_unreachable ();
5160 }
5161 }
5162 }
5163
5164 if (TARGET_ALTIVEC)
5165 /* Misaligned loads are not supported. */
5166 gcc_unreachable ();
5167
5168 return 2;
5169
5170 case unaligned_store:
5171 case vector_scatter_store:
5172 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5173 return 1;
5174
5175 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5176 {
5177 elements = TYPE_VECTOR_SUBPARTS (vectype);
5178 if (elements == 2)
5179 /* Double word aligned. */
5180 return 2;
5181
5182 if (elements == 4)
5183 {
5184 switch (misalign)
5185 {
5186 case 8:
5187 /* Double word aligned. */
5188 return 2;
5189
5190 case -1:
5191 /* Unknown misalignment. */
5192 case 4:
5193 case 12:
5194 /* Word aligned. */
5195 return 23;
5196
5197 default:
5198 gcc_unreachable ();
5199 }
5200 }
5201 }
5202
5203 if (TARGET_ALTIVEC)
5204 /* Misaligned stores are not supported. */
5205 gcc_unreachable ();
5206
5207 return 2;
5208
5209 case vec_construct:
5210 /* This is a rough approximation assuming non-constant elements
5211 constructed into a vector via element insertion. FIXME:
5212 vec_construct is not granular enough for uniformly good
5213 decisions. If the initialization is a splat, this is
5214 cheaper than we estimate. Improve this someday. */
5215 elem_type = TREE_TYPE (vectype);
5216 /* 32-bit vectors loaded into registers are stored as double
5217 precision, so we need 2 permutes, 2 converts, and 1 merge
5218 to construct a vector of short floats from them. */
5219 if (SCALAR_FLOAT_TYPE_P (elem_type)
5220 && TYPE_PRECISION (elem_type) == 32)
5221 return 5;
5222 /* On POWER9, integer vector types are built up in GPRs and then
5223 use a direct move (2 cycles). For POWER8 this is even worse,
5224 as we need two direct moves and a merge, and the direct moves
5225 are five cycles. */
5226 else if (INTEGRAL_TYPE_P (elem_type))
5227 {
5228 if (TARGET_P9_VECTOR)
5229 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5230 else
5231 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5232 }
5233 else
5234 /* V2DFmode doesn't need a direct move. */
5235 return 2;
5236
5237 default:
5238 gcc_unreachable ();
5239 }
5240 }
5241
5242 /* Implement targetm.vectorize.preferred_simd_mode. */
5243
5244 static machine_mode
5245 rs6000_preferred_simd_mode (scalar_mode mode)
5246 {
5247 if (TARGET_VSX)
5248 switch (mode)
5249 {
5250 case E_DFmode:
5251 return V2DFmode;
5252 default:;
5253 }
5254 if (TARGET_ALTIVEC || TARGET_VSX)
5255 switch (mode)
5256 {
5257 case E_SFmode:
5258 return V4SFmode;
5259 case E_TImode:
5260 return V1TImode;
5261 case E_DImode:
5262 return V2DImode;
5263 case E_SImode:
5264 return V4SImode;
5265 case E_HImode:
5266 return V8HImode;
5267 case E_QImode:
5268 return V16QImode;
5269 default:;
5270 }
5271 return word_mode;
5272 }
5273
5274 typedef struct _rs6000_cost_data
5275 {
5276 struct loop *loop_info;
5277 unsigned cost[3];
5278 } rs6000_cost_data;
5279
5280 /* Test for likely overcommitment of vector hardware resources. If a
5281 loop iteration is relatively large, and too large a percentage of
5282 instructions in the loop are vectorized, the cost model may not
5283 adequately reflect delays from unavailable vector resources.
5284 Penalize the loop body cost for this case. */
5285
5286 static void
5287 rs6000_density_test (rs6000_cost_data *data)
5288 {
5289 const int DENSITY_PCT_THRESHOLD = 85;
5290 const int DENSITY_SIZE_THRESHOLD = 70;
5291 const int DENSITY_PENALTY = 10;
5292 struct loop *loop = data->loop_info;
5293 basic_block *bbs = get_loop_body (loop);
5294 int nbbs = loop->num_nodes;
5295 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5296 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5297 int i, density_pct;
5298
5299 for (i = 0; i < nbbs; i++)
5300 {
5301 basic_block bb = bbs[i];
5302 gimple_stmt_iterator gsi;
5303
5304 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5305 {
5306 gimple *stmt = gsi_stmt (gsi);
5307 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5308
5309 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5310 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5311 not_vec_cost++;
5312 }
5313 }
5314
5315 free (bbs);
5316 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5317
5318 if (density_pct > DENSITY_PCT_THRESHOLD
5319 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5320 {
5321 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5322 if (dump_enabled_p ())
5323 dump_printf_loc (MSG_NOTE, vect_location,
5324 "density %d%%, cost %d exceeds threshold, penalizing "
5325 "loop body cost by %d%%", density_pct,
5326 vec_cost + not_vec_cost, DENSITY_PENALTY);
5327 }
5328 }
5329
5330 /* Implement targetm.vectorize.init_cost. */
5331
5332 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5333 instruction is needed by the vectorization. */
5334 static bool rs6000_vect_nonmem;
5335
5336 static void *
5337 rs6000_init_cost (struct loop *loop_info)
5338 {
5339 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5340 data->loop_info = loop_info;
5341 data->cost[vect_prologue] = 0;
5342 data->cost[vect_body] = 0;
5343 data->cost[vect_epilogue] = 0;
5344 rs6000_vect_nonmem = false;
5345 return data;
5346 }
5347
5348 /* Implement targetm.vectorize.add_stmt_cost. */
5349
5350 static unsigned
5351 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5352 struct _stmt_vec_info *stmt_info, int misalign,
5353 enum vect_cost_model_location where)
5354 {
5355 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5356 unsigned retval = 0;
5357
5358 if (flag_vect_cost_model)
5359 {
5360 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5361 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5362 misalign);
5363 /* Statements in an inner loop relative to the loop being
5364 vectorized are weighted more heavily. The value here is
5365 arbitrary and could potentially be improved with analysis. */
5366 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5367 count *= 50; /* FIXME. */
5368
5369 retval = (unsigned) (count * stmt_cost);
5370 cost_data->cost[where] += retval;
5371
5372 /* Check whether we're doing something other than just a copy loop.
5373 Not all such loops may be profitably vectorized; see
5374 rs6000_finish_cost. */
5375 if ((kind == vec_to_scalar || kind == vec_perm
5376 || kind == vec_promote_demote || kind == vec_construct
5377 || kind == scalar_to_vec)
5378 || (where == vect_body && kind == vector_stmt))
5379 rs6000_vect_nonmem = true;
5380 }
5381
5382 return retval;
5383 }
5384
5385 /* Implement targetm.vectorize.finish_cost. */
5386
5387 static void
5388 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5389 unsigned *body_cost, unsigned *epilogue_cost)
5390 {
5391 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5392
5393 if (cost_data->loop_info)
5394 rs6000_density_test (cost_data);
5395
5396 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5397 that require versioning for any reason. The vectorization is at
5398 best a wash inside the loop, and the versioning checks make
5399 profitability highly unlikely and potentially quite harmful. */
5400 if (cost_data->loop_info)
5401 {
5402 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5403 if (!rs6000_vect_nonmem
5404 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5405 && LOOP_REQUIRES_VERSIONING (vec_info))
5406 cost_data->cost[vect_body] += 10000;
5407 }
5408
5409 *prologue_cost = cost_data->cost[vect_prologue];
5410 *body_cost = cost_data->cost[vect_body];
5411 *epilogue_cost = cost_data->cost[vect_epilogue];
5412 }
5413
5414 /* Implement targetm.vectorize.destroy_cost_data. */
5415
5416 static void
5417 rs6000_destroy_cost_data (void *data)
5418 {
5419 free (data);
5420 }
5421
5422 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5423 library with vectorized intrinsics. */
5424
5425 static tree
5426 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5427 tree type_in)
5428 {
5429 char name[32];
5430 const char *suffix = NULL;
5431 tree fntype, new_fndecl, bdecl = NULL_TREE;
5432 int n_args = 1;
5433 const char *bname;
5434 machine_mode el_mode, in_mode;
5435 int n, in_n;
5436
5437 /* Libmass is suitable for unsafe math only as it does not correctly support
5438 parts of IEEE with the required precision such as denormals. Only support
5439 it if we have VSX to use the simd d2 or f4 functions.
5440 XXX: Add variable length support. */
5441 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5442 return NULL_TREE;
5443
5444 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5445 n = TYPE_VECTOR_SUBPARTS (type_out);
5446 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5447 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5448 if (el_mode != in_mode
5449 || n != in_n)
5450 return NULL_TREE;
5451
5452 switch (fn)
5453 {
5454 CASE_CFN_ATAN2:
5455 CASE_CFN_HYPOT:
5456 CASE_CFN_POW:
5457 n_args = 2;
5458 gcc_fallthrough ();
5459
5460 CASE_CFN_ACOS:
5461 CASE_CFN_ACOSH:
5462 CASE_CFN_ASIN:
5463 CASE_CFN_ASINH:
5464 CASE_CFN_ATAN:
5465 CASE_CFN_ATANH:
5466 CASE_CFN_CBRT:
5467 CASE_CFN_COS:
5468 CASE_CFN_COSH:
5469 CASE_CFN_ERF:
5470 CASE_CFN_ERFC:
5471 CASE_CFN_EXP2:
5472 CASE_CFN_EXP:
5473 CASE_CFN_EXPM1:
5474 CASE_CFN_LGAMMA:
5475 CASE_CFN_LOG10:
5476 CASE_CFN_LOG1P:
5477 CASE_CFN_LOG2:
5478 CASE_CFN_LOG:
5479 CASE_CFN_SIN:
5480 CASE_CFN_SINH:
5481 CASE_CFN_SQRT:
5482 CASE_CFN_TAN:
5483 CASE_CFN_TANH:
5484 if (el_mode == DFmode && n == 2)
5485 {
5486 bdecl = mathfn_built_in (double_type_node, fn);
5487 suffix = "d2"; /* pow -> powd2 */
5488 }
5489 else if (el_mode == SFmode && n == 4)
5490 {
5491 bdecl = mathfn_built_in (float_type_node, fn);
5492 suffix = "4"; /* powf -> powf4 */
5493 }
5494 else
5495 return NULL_TREE;
5496 if (!bdecl)
5497 return NULL_TREE;
5498 break;
5499
5500 default:
5501 return NULL_TREE;
5502 }
5503
5504 gcc_assert (suffix != NULL);
5505 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5506 if (!bname)
5507 return NULL_TREE;
5508
5509 strcpy (name, bname + sizeof ("__builtin_") - 1);
5510 strcat (name, suffix);
5511
5512 if (n_args == 1)
5513 fntype = build_function_type_list (type_out, type_in, NULL);
5514 else if (n_args == 2)
5515 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5516 else
5517 gcc_unreachable ();
5518
5519 /* Build a function declaration for the vectorized function. */
5520 new_fndecl = build_decl (BUILTINS_LOCATION,
5521 FUNCTION_DECL, get_identifier (name), fntype);
5522 TREE_PUBLIC (new_fndecl) = 1;
5523 DECL_EXTERNAL (new_fndecl) = 1;
5524 DECL_IS_NOVOPS (new_fndecl) = 1;
5525 TREE_READONLY (new_fndecl) = 1;
5526
5527 return new_fndecl;
5528 }
5529
5530 /* Returns a function decl for a vectorized version of the builtin function
5531 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5532 if it is not available. */
5533
5534 static tree
5535 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5536 tree type_in)
5537 {
5538 machine_mode in_mode, out_mode;
5539 int in_n, out_n;
5540
5541 if (TARGET_DEBUG_BUILTIN)
5542 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5543 combined_fn_name (combined_fn (fn)),
5544 GET_MODE_NAME (TYPE_MODE (type_out)),
5545 GET_MODE_NAME (TYPE_MODE (type_in)));
5546
5547 if (TREE_CODE (type_out) != VECTOR_TYPE
5548 || TREE_CODE (type_in) != VECTOR_TYPE)
5549 return NULL_TREE;
5550
5551 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5552 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5553 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5554 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5555
5556 switch (fn)
5557 {
5558 CASE_CFN_COPYSIGN:
5559 if (VECTOR_UNIT_VSX_P (V2DFmode)
5560 && out_mode == DFmode && out_n == 2
5561 && in_mode == DFmode && in_n == 2)
5562 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5563 if (VECTOR_UNIT_VSX_P (V4SFmode)
5564 && out_mode == SFmode && out_n == 4
5565 && in_mode == SFmode && in_n == 4)
5566 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5567 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5568 && out_mode == SFmode && out_n == 4
5569 && in_mode == SFmode && in_n == 4)
5570 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5571 break;
5572 CASE_CFN_CEIL:
5573 if (VECTOR_UNIT_VSX_P (V2DFmode)
5574 && out_mode == DFmode && out_n == 2
5575 && in_mode == DFmode && in_n == 2)
5576 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5577 if (VECTOR_UNIT_VSX_P (V4SFmode)
5578 && out_mode == SFmode && out_n == 4
5579 && in_mode == SFmode && in_n == 4)
5580 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5581 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5582 && out_mode == SFmode && out_n == 4
5583 && in_mode == SFmode && in_n == 4)
5584 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5585 break;
5586 CASE_CFN_FLOOR:
5587 if (VECTOR_UNIT_VSX_P (V2DFmode)
5588 && out_mode == DFmode && out_n == 2
5589 && in_mode == DFmode && in_n == 2)
5590 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5591 if (VECTOR_UNIT_VSX_P (V4SFmode)
5592 && out_mode == SFmode && out_n == 4
5593 && in_mode == SFmode && in_n == 4)
5594 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5595 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5596 && out_mode == SFmode && out_n == 4
5597 && in_mode == SFmode && in_n == 4)
5598 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5599 break;
5600 CASE_CFN_FMA:
5601 if (VECTOR_UNIT_VSX_P (V2DFmode)
5602 && out_mode == DFmode && out_n == 2
5603 && in_mode == DFmode && in_n == 2)
5604 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5605 if (VECTOR_UNIT_VSX_P (V4SFmode)
5606 && out_mode == SFmode && out_n == 4
5607 && in_mode == SFmode && in_n == 4)
5608 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5609 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5610 && out_mode == SFmode && out_n == 4
5611 && in_mode == SFmode && in_n == 4)
5612 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5613 break;
5614 CASE_CFN_TRUNC:
5615 if (VECTOR_UNIT_VSX_P (V2DFmode)
5616 && out_mode == DFmode && out_n == 2
5617 && in_mode == DFmode && in_n == 2)
5618 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5619 if (VECTOR_UNIT_VSX_P (V4SFmode)
5620 && out_mode == SFmode && out_n == 4
5621 && in_mode == SFmode && in_n == 4)
5622 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5623 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5624 && out_mode == SFmode && out_n == 4
5625 && in_mode == SFmode && in_n == 4)
5626 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5627 break;
5628 CASE_CFN_NEARBYINT:
5629 if (VECTOR_UNIT_VSX_P (V2DFmode)
5630 && flag_unsafe_math_optimizations
5631 && out_mode == DFmode && out_n == 2
5632 && in_mode == DFmode && in_n == 2)
5633 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5634 if (VECTOR_UNIT_VSX_P (V4SFmode)
5635 && flag_unsafe_math_optimizations
5636 && out_mode == SFmode && out_n == 4
5637 && in_mode == SFmode && in_n == 4)
5638 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5639 break;
5640 CASE_CFN_RINT:
5641 if (VECTOR_UNIT_VSX_P (V2DFmode)
5642 && !flag_trapping_math
5643 && out_mode == DFmode && out_n == 2
5644 && in_mode == DFmode && in_n == 2)
5645 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5646 if (VECTOR_UNIT_VSX_P (V4SFmode)
5647 && !flag_trapping_math
5648 && out_mode == SFmode && out_n == 4
5649 && in_mode == SFmode && in_n == 4)
5650 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5651 break;
5652 default:
5653 break;
5654 }
5655
5656 /* Generate calls to libmass if appropriate. */
5657 if (rs6000_veclib_handler)
5658 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5659
5660 return NULL_TREE;
5661 }
5662
5663 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5664
5665 static tree
5666 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5667 tree type_in)
5668 {
5669 machine_mode in_mode, out_mode;
5670 int in_n, out_n;
5671
5672 if (TARGET_DEBUG_BUILTIN)
5673 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5674 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5675 GET_MODE_NAME (TYPE_MODE (type_out)),
5676 GET_MODE_NAME (TYPE_MODE (type_in)));
5677
5678 if (TREE_CODE (type_out) != VECTOR_TYPE
5679 || TREE_CODE (type_in) != VECTOR_TYPE)
5680 return NULL_TREE;
5681
5682 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5683 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5684 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5685 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5686
5687 enum rs6000_builtins fn
5688 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5689 switch (fn)
5690 {
5691 case RS6000_BUILTIN_RSQRTF:
5692 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5693 && out_mode == SFmode && out_n == 4
5694 && in_mode == SFmode && in_n == 4)
5695 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5696 break;
5697 case RS6000_BUILTIN_RSQRT:
5698 if (VECTOR_UNIT_VSX_P (V2DFmode)
5699 && out_mode == DFmode && out_n == 2
5700 && in_mode == DFmode && in_n == 2)
5701 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5702 break;
5703 case RS6000_BUILTIN_RECIPF:
5704 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5705 && out_mode == SFmode && out_n == 4
5706 && in_mode == SFmode && in_n == 4)
5707 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5708 break;
5709 case RS6000_BUILTIN_RECIP:
5710 if (VECTOR_UNIT_VSX_P (V2DFmode)
5711 && out_mode == DFmode && out_n == 2
5712 && in_mode == DFmode && in_n == 2)
5713 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5714 break;
5715 default:
5716 break;
5717 }
5718 return NULL_TREE;
5719 }
5720 \f
5721 /* Default CPU string for rs6000*_file_start functions. */
5722 static const char *rs6000_default_cpu;
5723
5724 /* Do anything needed at the start of the asm file. */
5725
5726 static void
5727 rs6000_file_start (void)
5728 {
5729 char buffer[80];
5730 const char *start = buffer;
5731 FILE *file = asm_out_file;
5732
5733 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5734
5735 default_file_start ();
5736
5737 if (flag_verbose_asm)
5738 {
5739 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5740
5741 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5742 {
5743 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5744 start = "";
5745 }
5746
5747 if (global_options_set.x_rs6000_cpu_index)
5748 {
5749 fprintf (file, "%s -mcpu=%s", start,
5750 processor_target_table[rs6000_cpu_index].name);
5751 start = "";
5752 }
5753
5754 if (global_options_set.x_rs6000_tune_index)
5755 {
5756 fprintf (file, "%s -mtune=%s", start,
5757 processor_target_table[rs6000_tune_index].name);
5758 start = "";
5759 }
5760
5761 if (PPC405_ERRATUM77)
5762 {
5763 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5764 start = "";
5765 }
5766
5767 #ifdef USING_ELFOS_H
5768 switch (rs6000_sdata)
5769 {
5770 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5771 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5772 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5773 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5774 }
5775
5776 if (rs6000_sdata && g_switch_value)
5777 {
5778 fprintf (file, "%s -G %d", start,
5779 g_switch_value);
5780 start = "";
5781 }
5782 #endif
5783
5784 if (*start == '\0')
5785 putc ('\n', file);
5786 }
5787
5788 #ifdef USING_ELFOS_H
5789 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5790 && !global_options_set.x_rs6000_cpu_index)
5791 {
5792 fputs ("\t.machine ", asm_out_file);
5793 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5794 fputs ("power9\n", asm_out_file);
5795 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5796 fputs ("power8\n", asm_out_file);
5797 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5798 fputs ("power7\n", asm_out_file);
5799 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5800 fputs ("power6\n", asm_out_file);
5801 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5802 fputs ("power5\n", asm_out_file);
5803 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5804 fputs ("power4\n", asm_out_file);
5805 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5806 fputs ("ppc64\n", asm_out_file);
5807 else
5808 fputs ("ppc\n", asm_out_file);
5809 }
5810 #endif
5811
5812 if (DEFAULT_ABI == ABI_ELFv2)
5813 fprintf (file, "\t.abiversion 2\n");
5814 }
5815
5816 \f
5817 /* Return nonzero if this function is known to have a null epilogue. */
5818
5819 int
5820 direct_return (void)
5821 {
5822 if (reload_completed)
5823 {
5824 rs6000_stack_t *info = rs6000_stack_info ();
5825
5826 if (info->first_gp_reg_save == 32
5827 && info->first_fp_reg_save == 64
5828 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5829 && ! info->lr_save_p
5830 && ! info->cr_save_p
5831 && info->vrsave_size == 0
5832 && ! info->push_p)
5833 return 1;
5834 }
5835
5836 return 0;
5837 }
5838
5839 /* Helper for num_insns_constant. Calculate number of instructions to
5840 load VALUE to a single gpr using combinations of addi, addis, ori,
5841 oris and sldi instructions. */
5842
5843 static int
5844 num_insns_constant_gpr (HOST_WIDE_INT value)
5845 {
5846 /* signed constant loadable with addi */
5847 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5848 return 1;
5849
5850 /* constant loadable with addis */
5851 else if ((value & 0xffff) == 0
5852 && (value >> 31 == -1 || value >> 31 == 0))
5853 return 1;
5854
5855 else if (TARGET_POWERPC64)
5856 {
5857 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5858 HOST_WIDE_INT high = value >> 31;
5859
5860 if (high == 0 || high == -1)
5861 return 2;
5862
5863 high >>= 1;
5864
5865 if (low == 0)
5866 return num_insns_constant_gpr (high) + 1;
5867 else if (high == 0)
5868 return num_insns_constant_gpr (low) + 1;
5869 else
5870 return (num_insns_constant_gpr (high)
5871 + num_insns_constant_gpr (low) + 1);
5872 }
5873
5874 else
5875 return 2;
5876 }
5877
5878 /* Helper for num_insns_constant. Allow constants formed by the
5879 num_insns_constant_gpr sequences, plus li -1, rldicl/rldicr/rlwinm,
5880 and handle modes that require multiple gprs. */
5881
5882 static int
5883 num_insns_constant_multi (HOST_WIDE_INT value, machine_mode mode)
5884 {
5885 int nregs = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5886 int total = 0;
5887 while (nregs-- > 0)
5888 {
5889 HOST_WIDE_INT low = sext_hwi (value, BITS_PER_WORD);
5890 int insns = num_insns_constant_gpr (low);
5891 if (insns > 2
5892 /* We won't get more than 2 from num_insns_constant_gpr
5893 except when TARGET_POWERPC64 and mode is DImode or
5894 wider, so the register mode must be DImode. */
5895 && rs6000_is_valid_and_mask (GEN_INT (low), DImode))
5896 insns = 2;
5897 total += insns;
5898 value >>= BITS_PER_WORD;
5899 }
5900 return total;
5901 }
5902
5903 /* Return the number of instructions it takes to form a constant in as
5904 many gprs are needed for MODE. */
5905
5906 int
5907 num_insns_constant (rtx op, machine_mode mode)
5908 {
5909 HOST_WIDE_INT val;
5910
5911 switch (GET_CODE (op))
5912 {
5913 case CONST_INT:
5914 val = INTVAL (op);
5915 break;
5916
5917 case CONST_WIDE_INT:
5918 {
5919 int insns = 0;
5920 for (int i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5921 insns += num_insns_constant_multi (CONST_WIDE_INT_ELT (op, i),
5922 DImode);
5923 return insns;
5924 }
5925
5926 case CONST_DOUBLE:
5927 {
5928 const struct real_value *rv = CONST_DOUBLE_REAL_VALUE (op);
5929
5930 if (mode == SFmode || mode == SDmode)
5931 {
5932 long l;
5933
5934 if (mode == SDmode)
5935 REAL_VALUE_TO_TARGET_DECIMAL32 (*rv, l);
5936 else
5937 REAL_VALUE_TO_TARGET_SINGLE (*rv, l);
5938 /* See the first define_split in rs6000.md handling a
5939 const_double_operand. */
5940 val = l;
5941 mode = SImode;
5942 }
5943 else if (mode == DFmode || mode == DDmode)
5944 {
5945 long l[2];
5946
5947 if (mode == DDmode)
5948 REAL_VALUE_TO_TARGET_DECIMAL64 (*rv, l);
5949 else
5950 REAL_VALUE_TO_TARGET_DOUBLE (*rv, l);
5951
5952 /* See the second (32-bit) and third (64-bit) define_split
5953 in rs6000.md handling a const_double_operand. */
5954 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 1] << 32;
5955 val |= l[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffffUL;
5956 mode = DImode;
5957 }
5958 else if (mode == TFmode || mode == TDmode
5959 || mode == KFmode || mode == IFmode)
5960 {
5961 long l[4];
5962 int insns;
5963
5964 if (mode == TDmode)
5965 REAL_VALUE_TO_TARGET_DECIMAL128 (*rv, l);
5966 else
5967 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*rv, l);
5968
5969 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 3] << 32;
5970 val |= l[WORDS_BIG_ENDIAN ? 1 : 2] & 0xffffffffUL;
5971 insns = num_insns_constant_multi (val, DImode);
5972 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 2 : 1] << 32;
5973 val |= l[WORDS_BIG_ENDIAN ? 3 : 0] & 0xffffffffUL;
5974 insns += num_insns_constant_multi (val, DImode);
5975 return insns;
5976 }
5977 else
5978 gcc_unreachable ();
5979 }
5980 break;
5981
5982 default:
5983 gcc_unreachable ();
5984 }
5985
5986 return num_insns_constant_multi (val, mode);
5987 }
5988
5989 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5990 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5991 corresponding element of the vector, but for V4SFmode, the
5992 corresponding "float" is interpreted as an SImode integer. */
5993
5994 HOST_WIDE_INT
5995 const_vector_elt_as_int (rtx op, unsigned int elt)
5996 {
5997 rtx tmp;
5998
5999 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6000 gcc_assert (GET_MODE (op) != V2DImode
6001 && GET_MODE (op) != V2DFmode);
6002
6003 tmp = CONST_VECTOR_ELT (op, elt);
6004 if (GET_MODE (op) == V4SFmode)
6005 tmp = gen_lowpart (SImode, tmp);
6006 return INTVAL (tmp);
6007 }
6008
6009 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6010 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6011 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6012 all items are set to the same value and contain COPIES replicas of the
6013 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6014 operand and the others are set to the value of the operand's msb. */
6015
6016 static bool
6017 vspltis_constant (rtx op, unsigned step, unsigned copies)
6018 {
6019 machine_mode mode = GET_MODE (op);
6020 machine_mode inner = GET_MODE_INNER (mode);
6021
6022 unsigned i;
6023 unsigned nunits;
6024 unsigned bitsize;
6025 unsigned mask;
6026
6027 HOST_WIDE_INT val;
6028 HOST_WIDE_INT splat_val;
6029 HOST_WIDE_INT msb_val;
6030
6031 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6032 return false;
6033
6034 nunits = GET_MODE_NUNITS (mode);
6035 bitsize = GET_MODE_BITSIZE (inner);
6036 mask = GET_MODE_MASK (inner);
6037
6038 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6039 splat_val = val;
6040 msb_val = val >= 0 ? 0 : -1;
6041
6042 /* Construct the value to be splatted, if possible. If not, return 0. */
6043 for (i = 2; i <= copies; i *= 2)
6044 {
6045 HOST_WIDE_INT small_val;
6046 bitsize /= 2;
6047 small_val = splat_val >> bitsize;
6048 mask >>= bitsize;
6049 if (splat_val != ((HOST_WIDE_INT)
6050 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6051 | (small_val & mask)))
6052 return false;
6053 splat_val = small_val;
6054 }
6055
6056 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6057 if (EASY_VECTOR_15 (splat_val))
6058 ;
6059
6060 /* Also check if we can splat, and then add the result to itself. Do so if
6061 the value is positive, of if the splat instruction is using OP's mode;
6062 for splat_val < 0, the splat and the add should use the same mode. */
6063 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6064 && (splat_val >= 0 || (step == 1 && copies == 1)))
6065 ;
6066
6067 /* Also check if are loading up the most significant bit which can be done by
6068 loading up -1 and shifting the value left by -1. */
6069 else if (EASY_VECTOR_MSB (splat_val, inner))
6070 ;
6071
6072 else
6073 return false;
6074
6075 /* Check if VAL is present in every STEP-th element, and the
6076 other elements are filled with its most significant bit. */
6077 for (i = 1; i < nunits; ++i)
6078 {
6079 HOST_WIDE_INT desired_val;
6080 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6081 if ((i & (step - 1)) == 0)
6082 desired_val = val;
6083 else
6084 desired_val = msb_val;
6085
6086 if (desired_val != const_vector_elt_as_int (op, elt))
6087 return false;
6088 }
6089
6090 return true;
6091 }
6092
6093 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6094 instruction, filling in the bottom elements with 0 or -1.
6095
6096 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6097 for the number of zeroes to shift in, or negative for the number of 0xff
6098 bytes to shift in.
6099
6100 OP is a CONST_VECTOR. */
6101
6102 int
6103 vspltis_shifted (rtx op)
6104 {
6105 machine_mode mode = GET_MODE (op);
6106 machine_mode inner = GET_MODE_INNER (mode);
6107
6108 unsigned i, j;
6109 unsigned nunits;
6110 unsigned mask;
6111
6112 HOST_WIDE_INT val;
6113
6114 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6115 return false;
6116
6117 /* We need to create pseudo registers to do the shift, so don't recognize
6118 shift vector constants after reload. */
6119 if (!can_create_pseudo_p ())
6120 return false;
6121
6122 nunits = GET_MODE_NUNITS (mode);
6123 mask = GET_MODE_MASK (inner);
6124
6125 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6126
6127 /* Check if the value can really be the operand of a vspltis[bhw]. */
6128 if (EASY_VECTOR_15 (val))
6129 ;
6130
6131 /* Also check if we are loading up the most significant bit which can be done
6132 by loading up -1 and shifting the value left by -1. */
6133 else if (EASY_VECTOR_MSB (val, inner))
6134 ;
6135
6136 else
6137 return 0;
6138
6139 /* Check if VAL is present in every STEP-th element until we find elements
6140 that are 0 or all 1 bits. */
6141 for (i = 1; i < nunits; ++i)
6142 {
6143 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6144 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6145
6146 /* If the value isn't the splat value, check for the remaining elements
6147 being 0/-1. */
6148 if (val != elt_val)
6149 {
6150 if (elt_val == 0)
6151 {
6152 for (j = i+1; j < nunits; ++j)
6153 {
6154 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6155 if (const_vector_elt_as_int (op, elt2) != 0)
6156 return 0;
6157 }
6158
6159 return (nunits - i) * GET_MODE_SIZE (inner);
6160 }
6161
6162 else if ((elt_val & mask) == mask)
6163 {
6164 for (j = i+1; j < nunits; ++j)
6165 {
6166 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6167 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6168 return 0;
6169 }
6170
6171 return -((nunits - i) * GET_MODE_SIZE (inner));
6172 }
6173
6174 else
6175 return 0;
6176 }
6177 }
6178
6179 /* If all elements are equal, we don't need to do VLSDOI. */
6180 return 0;
6181 }
6182
6183
6184 /* Return true if OP is of the given MODE and can be synthesized
6185 with a vspltisb, vspltish or vspltisw. */
6186
6187 bool
6188 easy_altivec_constant (rtx op, machine_mode mode)
6189 {
6190 unsigned step, copies;
6191
6192 if (mode == VOIDmode)
6193 mode = GET_MODE (op);
6194 else if (mode != GET_MODE (op))
6195 return false;
6196
6197 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6198 constants. */
6199 if (mode == V2DFmode)
6200 return zero_constant (op, mode);
6201
6202 else if (mode == V2DImode)
6203 {
6204 if (!CONST_INT_P (CONST_VECTOR_ELT (op, 0))
6205 || !CONST_INT_P (CONST_VECTOR_ELT (op, 1)))
6206 return false;
6207
6208 if (zero_constant (op, mode))
6209 return true;
6210
6211 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6212 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6213 return true;
6214
6215 return false;
6216 }
6217
6218 /* V1TImode is a special container for TImode. Ignore for now. */
6219 else if (mode == V1TImode)
6220 return false;
6221
6222 /* Start with a vspltisw. */
6223 step = GET_MODE_NUNITS (mode) / 4;
6224 copies = 1;
6225
6226 if (vspltis_constant (op, step, copies))
6227 return true;
6228
6229 /* Then try with a vspltish. */
6230 if (step == 1)
6231 copies <<= 1;
6232 else
6233 step >>= 1;
6234
6235 if (vspltis_constant (op, step, copies))
6236 return true;
6237
6238 /* And finally a vspltisb. */
6239 if (step == 1)
6240 copies <<= 1;
6241 else
6242 step >>= 1;
6243
6244 if (vspltis_constant (op, step, copies))
6245 return true;
6246
6247 if (vspltis_shifted (op) != 0)
6248 return true;
6249
6250 return false;
6251 }
6252
6253 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6254 result is OP. Abort if it is not possible. */
6255
6256 rtx
6257 gen_easy_altivec_constant (rtx op)
6258 {
6259 machine_mode mode = GET_MODE (op);
6260 int nunits = GET_MODE_NUNITS (mode);
6261 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6262 unsigned step = nunits / 4;
6263 unsigned copies = 1;
6264
6265 /* Start with a vspltisw. */
6266 if (vspltis_constant (op, step, copies))
6267 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6268
6269 /* Then try with a vspltish. */
6270 if (step == 1)
6271 copies <<= 1;
6272 else
6273 step >>= 1;
6274
6275 if (vspltis_constant (op, step, copies))
6276 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6277
6278 /* And finally a vspltisb. */
6279 if (step == 1)
6280 copies <<= 1;
6281 else
6282 step >>= 1;
6283
6284 if (vspltis_constant (op, step, copies))
6285 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6286
6287 gcc_unreachable ();
6288 }
6289
6290 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6291 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6292
6293 Return the number of instructions needed (1 or 2) into the address pointed
6294 via NUM_INSNS_PTR.
6295
6296 Return the constant that is being split via CONSTANT_PTR. */
6297
6298 bool
6299 xxspltib_constant_p (rtx op,
6300 machine_mode mode,
6301 int *num_insns_ptr,
6302 int *constant_ptr)
6303 {
6304 size_t nunits = GET_MODE_NUNITS (mode);
6305 size_t i;
6306 HOST_WIDE_INT value;
6307 rtx element;
6308
6309 /* Set the returned values to out of bound values. */
6310 *num_insns_ptr = -1;
6311 *constant_ptr = 256;
6312
6313 if (!TARGET_P9_VECTOR)
6314 return false;
6315
6316 if (mode == VOIDmode)
6317 mode = GET_MODE (op);
6318
6319 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6320 return false;
6321
6322 /* Handle (vec_duplicate <constant>). */
6323 if (GET_CODE (op) == VEC_DUPLICATE)
6324 {
6325 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6326 && mode != V2DImode)
6327 return false;
6328
6329 element = XEXP (op, 0);
6330 if (!CONST_INT_P (element))
6331 return false;
6332
6333 value = INTVAL (element);
6334 if (!IN_RANGE (value, -128, 127))
6335 return false;
6336 }
6337
6338 /* Handle (const_vector [...]). */
6339 else if (GET_CODE (op) == CONST_VECTOR)
6340 {
6341 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6342 && mode != V2DImode)
6343 return false;
6344
6345 element = CONST_VECTOR_ELT (op, 0);
6346 if (!CONST_INT_P (element))
6347 return false;
6348
6349 value = INTVAL (element);
6350 if (!IN_RANGE (value, -128, 127))
6351 return false;
6352
6353 for (i = 1; i < nunits; i++)
6354 {
6355 element = CONST_VECTOR_ELT (op, i);
6356 if (!CONST_INT_P (element))
6357 return false;
6358
6359 if (value != INTVAL (element))
6360 return false;
6361 }
6362 }
6363
6364 /* Handle integer constants being loaded into the upper part of the VSX
6365 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6366 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6367 else if (CONST_INT_P (op))
6368 {
6369 if (!SCALAR_INT_MODE_P (mode))
6370 return false;
6371
6372 value = INTVAL (op);
6373 if (!IN_RANGE (value, -128, 127))
6374 return false;
6375
6376 if (!IN_RANGE (value, -1, 0))
6377 {
6378 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6379 return false;
6380
6381 if (EASY_VECTOR_15 (value))
6382 return false;
6383 }
6384 }
6385
6386 else
6387 return false;
6388
6389 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6390 sign extend. Special case 0/-1 to allow getting any VSX register instead
6391 of an Altivec register. */
6392 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6393 && EASY_VECTOR_15 (value))
6394 return false;
6395
6396 /* Return # of instructions and the constant byte for XXSPLTIB. */
6397 if (mode == V16QImode)
6398 *num_insns_ptr = 1;
6399
6400 else if (IN_RANGE (value, -1, 0))
6401 *num_insns_ptr = 1;
6402
6403 else
6404 *num_insns_ptr = 2;
6405
6406 *constant_ptr = (int) value;
6407 return true;
6408 }
6409
6410 const char *
6411 output_vec_const_move (rtx *operands)
6412 {
6413 int shift;
6414 machine_mode mode;
6415 rtx dest, vec;
6416
6417 dest = operands[0];
6418 vec = operands[1];
6419 mode = GET_MODE (dest);
6420
6421 if (TARGET_VSX)
6422 {
6423 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6424 int xxspltib_value = 256;
6425 int num_insns = -1;
6426
6427 if (zero_constant (vec, mode))
6428 {
6429 if (TARGET_P9_VECTOR)
6430 return "xxspltib %x0,0";
6431
6432 else if (dest_vmx_p)
6433 return "vspltisw %0,0";
6434
6435 else
6436 return "xxlxor %x0,%x0,%x0";
6437 }
6438
6439 if (all_ones_constant (vec, mode))
6440 {
6441 if (TARGET_P9_VECTOR)
6442 return "xxspltib %x0,255";
6443
6444 else if (dest_vmx_p)
6445 return "vspltisw %0,-1";
6446
6447 else if (TARGET_P8_VECTOR)
6448 return "xxlorc %x0,%x0,%x0";
6449
6450 else
6451 gcc_unreachable ();
6452 }
6453
6454 if (TARGET_P9_VECTOR
6455 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6456 {
6457 if (num_insns == 1)
6458 {
6459 operands[2] = GEN_INT (xxspltib_value & 0xff);
6460 return "xxspltib %x0,%2";
6461 }
6462
6463 return "#";
6464 }
6465 }
6466
6467 if (TARGET_ALTIVEC)
6468 {
6469 rtx splat_vec;
6470
6471 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6472 if (zero_constant (vec, mode))
6473 return "vspltisw %0,0";
6474
6475 if (all_ones_constant (vec, mode))
6476 return "vspltisw %0,-1";
6477
6478 /* Do we need to construct a value using VSLDOI? */
6479 shift = vspltis_shifted (vec);
6480 if (shift != 0)
6481 return "#";
6482
6483 splat_vec = gen_easy_altivec_constant (vec);
6484 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6485 operands[1] = XEXP (splat_vec, 0);
6486 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6487 return "#";
6488
6489 switch (GET_MODE (splat_vec))
6490 {
6491 case E_V4SImode:
6492 return "vspltisw %0,%1";
6493
6494 case E_V8HImode:
6495 return "vspltish %0,%1";
6496
6497 case E_V16QImode:
6498 return "vspltisb %0,%1";
6499
6500 default:
6501 gcc_unreachable ();
6502 }
6503 }
6504
6505 gcc_unreachable ();
6506 }
6507
6508 /* Initialize vector TARGET to VALS. */
6509
6510 void
6511 rs6000_expand_vector_init (rtx target, rtx vals)
6512 {
6513 machine_mode mode = GET_MODE (target);
6514 machine_mode inner_mode = GET_MODE_INNER (mode);
6515 int n_elts = GET_MODE_NUNITS (mode);
6516 int n_var = 0, one_var = -1;
6517 bool all_same = true, all_const_zero = true;
6518 rtx x, mem;
6519 int i;
6520
6521 for (i = 0; i < n_elts; ++i)
6522 {
6523 x = XVECEXP (vals, 0, i);
6524 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6525 ++n_var, one_var = i;
6526 else if (x != CONST0_RTX (inner_mode))
6527 all_const_zero = false;
6528
6529 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6530 all_same = false;
6531 }
6532
6533 if (n_var == 0)
6534 {
6535 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6536 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6537 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6538 {
6539 /* Zero register. */
6540 emit_move_insn (target, CONST0_RTX (mode));
6541 return;
6542 }
6543 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6544 {
6545 /* Splat immediate. */
6546 emit_insn (gen_rtx_SET (target, const_vec));
6547 return;
6548 }
6549 else
6550 {
6551 /* Load from constant pool. */
6552 emit_move_insn (target, const_vec);
6553 return;
6554 }
6555 }
6556
6557 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6558 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6559 {
6560 rtx op[2];
6561 size_t i;
6562 size_t num_elements = all_same ? 1 : 2;
6563 for (i = 0; i < num_elements; i++)
6564 {
6565 op[i] = XVECEXP (vals, 0, i);
6566 /* Just in case there is a SUBREG with a smaller mode, do a
6567 conversion. */
6568 if (GET_MODE (op[i]) != inner_mode)
6569 {
6570 rtx tmp = gen_reg_rtx (inner_mode);
6571 convert_move (tmp, op[i], 0);
6572 op[i] = tmp;
6573 }
6574 /* Allow load with splat double word. */
6575 else if (MEM_P (op[i]))
6576 {
6577 if (!all_same)
6578 op[i] = force_reg (inner_mode, op[i]);
6579 }
6580 else if (!REG_P (op[i]))
6581 op[i] = force_reg (inner_mode, op[i]);
6582 }
6583
6584 if (all_same)
6585 {
6586 if (mode == V2DFmode)
6587 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6588 else
6589 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6590 }
6591 else
6592 {
6593 if (mode == V2DFmode)
6594 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6595 else
6596 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6597 }
6598 return;
6599 }
6600
6601 /* Special case initializing vector int if we are on 64-bit systems with
6602 direct move or we have the ISA 3.0 instructions. */
6603 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6604 && TARGET_DIRECT_MOVE_64BIT)
6605 {
6606 if (all_same)
6607 {
6608 rtx element0 = XVECEXP (vals, 0, 0);
6609 if (MEM_P (element0))
6610 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6611 else
6612 element0 = force_reg (SImode, element0);
6613
6614 if (TARGET_P9_VECTOR)
6615 emit_insn (gen_vsx_splat_v4si (target, element0));
6616 else
6617 {
6618 rtx tmp = gen_reg_rtx (DImode);
6619 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6620 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6621 }
6622 return;
6623 }
6624 else
6625 {
6626 rtx elements[4];
6627 size_t i;
6628
6629 for (i = 0; i < 4; i++)
6630 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6631
6632 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6633 elements[2], elements[3]));
6634 return;
6635 }
6636 }
6637
6638 /* With single precision floating point on VSX, know that internally single
6639 precision is actually represented as a double, and either make 2 V2DF
6640 vectors, and convert these vectors to single precision, or do one
6641 conversion, and splat the result to the other elements. */
6642 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6643 {
6644 if (all_same)
6645 {
6646 rtx element0 = XVECEXP (vals, 0, 0);
6647
6648 if (TARGET_P9_VECTOR)
6649 {
6650 if (MEM_P (element0))
6651 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6652
6653 emit_insn (gen_vsx_splat_v4sf (target, element0));
6654 }
6655
6656 else
6657 {
6658 rtx freg = gen_reg_rtx (V4SFmode);
6659 rtx sreg = force_reg (SFmode, element0);
6660 rtx cvt = (TARGET_XSCVDPSPN
6661 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6662 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6663
6664 emit_insn (cvt);
6665 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6666 const0_rtx));
6667 }
6668 }
6669 else
6670 {
6671 rtx dbl_even = gen_reg_rtx (V2DFmode);
6672 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6673 rtx flt_even = gen_reg_rtx (V4SFmode);
6674 rtx flt_odd = gen_reg_rtx (V4SFmode);
6675 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6676 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6677 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6678 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6679
6680 /* Use VMRGEW if we can instead of doing a permute. */
6681 if (TARGET_P8_VECTOR)
6682 {
6683 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6684 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6685 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6686 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6687 if (BYTES_BIG_ENDIAN)
6688 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6689 else
6690 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6691 }
6692 else
6693 {
6694 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6695 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6696 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6697 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6698 rs6000_expand_extract_even (target, flt_even, flt_odd);
6699 }
6700 }
6701 return;
6702 }
6703
6704 /* Special case initializing vector short/char that are splats if we are on
6705 64-bit systems with direct move. */
6706 if (all_same && TARGET_DIRECT_MOVE_64BIT
6707 && (mode == V16QImode || mode == V8HImode))
6708 {
6709 rtx op0 = XVECEXP (vals, 0, 0);
6710 rtx di_tmp = gen_reg_rtx (DImode);
6711
6712 if (!REG_P (op0))
6713 op0 = force_reg (GET_MODE_INNER (mode), op0);
6714
6715 if (mode == V16QImode)
6716 {
6717 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6718 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6719 return;
6720 }
6721
6722 if (mode == V8HImode)
6723 {
6724 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6725 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6726 return;
6727 }
6728 }
6729
6730 /* Store value to stack temp. Load vector element. Splat. However, splat
6731 of 64-bit items is not supported on Altivec. */
6732 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6733 {
6734 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6735 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6736 XVECEXP (vals, 0, 0));
6737 x = gen_rtx_UNSPEC (VOIDmode,
6738 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6739 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6740 gen_rtvec (2,
6741 gen_rtx_SET (target, mem),
6742 x)));
6743 x = gen_rtx_VEC_SELECT (inner_mode, target,
6744 gen_rtx_PARALLEL (VOIDmode,
6745 gen_rtvec (1, const0_rtx)));
6746 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6747 return;
6748 }
6749
6750 /* One field is non-constant. Load constant then overwrite
6751 varying field. */
6752 if (n_var == 1)
6753 {
6754 rtx copy = copy_rtx (vals);
6755
6756 /* Load constant part of vector, substitute neighboring value for
6757 varying element. */
6758 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6759 rs6000_expand_vector_init (target, copy);
6760
6761 /* Insert variable. */
6762 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6763 return;
6764 }
6765
6766 /* Construct the vector in memory one field at a time
6767 and load the whole vector. */
6768 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6769 for (i = 0; i < n_elts; i++)
6770 emit_move_insn (adjust_address_nv (mem, inner_mode,
6771 i * GET_MODE_SIZE (inner_mode)),
6772 XVECEXP (vals, 0, i));
6773 emit_move_insn (target, mem);
6774 }
6775
6776 /* Set field ELT of TARGET to VAL. */
6777
6778 void
6779 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6780 {
6781 machine_mode mode = GET_MODE (target);
6782 machine_mode inner_mode = GET_MODE_INNER (mode);
6783 rtx reg = gen_reg_rtx (mode);
6784 rtx mask, mem, x;
6785 int width = GET_MODE_SIZE (inner_mode);
6786 int i;
6787
6788 val = force_reg (GET_MODE (val), val);
6789
6790 if (VECTOR_MEM_VSX_P (mode))
6791 {
6792 rtx insn = NULL_RTX;
6793 rtx elt_rtx = GEN_INT (elt);
6794
6795 if (mode == V2DFmode)
6796 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6797
6798 else if (mode == V2DImode)
6799 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6800
6801 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6802 {
6803 if (mode == V4SImode)
6804 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6805 else if (mode == V8HImode)
6806 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6807 else if (mode == V16QImode)
6808 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6809 else if (mode == V4SFmode)
6810 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6811 }
6812
6813 if (insn)
6814 {
6815 emit_insn (insn);
6816 return;
6817 }
6818 }
6819
6820 /* Simplify setting single element vectors like V1TImode. */
6821 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6822 {
6823 emit_move_insn (target, gen_lowpart (mode, val));
6824 return;
6825 }
6826
6827 /* Load single variable value. */
6828 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6829 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6830 x = gen_rtx_UNSPEC (VOIDmode,
6831 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6832 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6833 gen_rtvec (2,
6834 gen_rtx_SET (reg, mem),
6835 x)));
6836
6837 /* Linear sequence. */
6838 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6839 for (i = 0; i < 16; ++i)
6840 XVECEXP (mask, 0, i) = GEN_INT (i);
6841
6842 /* Set permute mask to insert element into target. */
6843 for (i = 0; i < width; ++i)
6844 XVECEXP (mask, 0, elt*width + i)
6845 = GEN_INT (i + 0x10);
6846 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6847
6848 if (BYTES_BIG_ENDIAN)
6849 x = gen_rtx_UNSPEC (mode,
6850 gen_rtvec (3, target, reg,
6851 force_reg (V16QImode, x)),
6852 UNSPEC_VPERM);
6853 else
6854 {
6855 if (TARGET_P9_VECTOR)
6856 x = gen_rtx_UNSPEC (mode,
6857 gen_rtvec (3, reg, target,
6858 force_reg (V16QImode, x)),
6859 UNSPEC_VPERMR);
6860 else
6861 {
6862 /* Invert selector. We prefer to generate VNAND on P8 so
6863 that future fusion opportunities can kick in, but must
6864 generate VNOR elsewhere. */
6865 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6866 rtx iorx = (TARGET_P8_VECTOR
6867 ? gen_rtx_IOR (V16QImode, notx, notx)
6868 : gen_rtx_AND (V16QImode, notx, notx));
6869 rtx tmp = gen_reg_rtx (V16QImode);
6870 emit_insn (gen_rtx_SET (tmp, iorx));
6871
6872 /* Permute with operands reversed and adjusted selector. */
6873 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6874 UNSPEC_VPERM);
6875 }
6876 }
6877
6878 emit_insn (gen_rtx_SET (target, x));
6879 }
6880
6881 /* Extract field ELT from VEC into TARGET. */
6882
6883 void
6884 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6885 {
6886 machine_mode mode = GET_MODE (vec);
6887 machine_mode inner_mode = GET_MODE_INNER (mode);
6888 rtx mem;
6889
6890 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6891 {
6892 switch (mode)
6893 {
6894 default:
6895 break;
6896 case E_V1TImode:
6897 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
6898 emit_move_insn (target, gen_lowpart (TImode, vec));
6899 break;
6900 case E_V2DFmode:
6901 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6902 return;
6903 case E_V2DImode:
6904 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6905 return;
6906 case E_V4SFmode:
6907 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6908 return;
6909 case E_V16QImode:
6910 if (TARGET_DIRECT_MOVE_64BIT)
6911 {
6912 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6913 return;
6914 }
6915 else
6916 break;
6917 case E_V8HImode:
6918 if (TARGET_DIRECT_MOVE_64BIT)
6919 {
6920 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6921 return;
6922 }
6923 else
6924 break;
6925 case E_V4SImode:
6926 if (TARGET_DIRECT_MOVE_64BIT)
6927 {
6928 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6929 return;
6930 }
6931 break;
6932 }
6933 }
6934 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6935 && TARGET_DIRECT_MOVE_64BIT)
6936 {
6937 if (GET_MODE (elt) != DImode)
6938 {
6939 rtx tmp = gen_reg_rtx (DImode);
6940 convert_move (tmp, elt, 0);
6941 elt = tmp;
6942 }
6943 else if (!REG_P (elt))
6944 elt = force_reg (DImode, elt);
6945
6946 switch (mode)
6947 {
6948 case E_V2DFmode:
6949 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6950 return;
6951
6952 case E_V2DImode:
6953 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6954 return;
6955
6956 case E_V4SFmode:
6957 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6958 return;
6959
6960 case E_V4SImode:
6961 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6962 return;
6963
6964 case E_V8HImode:
6965 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6966 return;
6967
6968 case E_V16QImode:
6969 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6970 return;
6971
6972 default:
6973 gcc_unreachable ();
6974 }
6975 }
6976
6977 gcc_assert (CONST_INT_P (elt));
6978
6979 /* Allocate mode-sized buffer. */
6980 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6981
6982 emit_move_insn (mem, vec);
6983
6984 /* Add offset to field within buffer matching vector element. */
6985 mem = adjust_address_nv (mem, inner_mode,
6986 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
6987
6988 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6989 }
6990
6991 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6992 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6993 temporary (BASE_TMP) to fixup the address. Return the new memory address
6994 that is valid for reads or writes to a given register (SCALAR_REG). */
6995
6996 rtx
6997 rs6000_adjust_vec_address (rtx scalar_reg,
6998 rtx mem,
6999 rtx element,
7000 rtx base_tmp,
7001 machine_mode scalar_mode)
7002 {
7003 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7004 rtx addr = XEXP (mem, 0);
7005 rtx element_offset;
7006 rtx new_addr;
7007 bool valid_addr_p;
7008
7009 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7010 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7011
7012 /* Calculate what we need to add to the address to get the element
7013 address. */
7014 if (CONST_INT_P (element))
7015 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7016 else
7017 {
7018 int byte_shift = exact_log2 (scalar_size);
7019 gcc_assert (byte_shift >= 0);
7020
7021 if (byte_shift == 0)
7022 element_offset = element;
7023
7024 else
7025 {
7026 if (TARGET_POWERPC64)
7027 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7028 else
7029 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7030
7031 element_offset = base_tmp;
7032 }
7033 }
7034
7035 /* Create the new address pointing to the element within the vector. If we
7036 are adding 0, we don't have to change the address. */
7037 if (element_offset == const0_rtx)
7038 new_addr = addr;
7039
7040 /* A simple indirect address can be converted into a reg + offset
7041 address. */
7042 else if (REG_P (addr) || SUBREG_P (addr))
7043 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7044
7045 /* Optimize D-FORM addresses with constant offset with a constant element, to
7046 include the element offset in the address directly. */
7047 else if (GET_CODE (addr) == PLUS)
7048 {
7049 rtx op0 = XEXP (addr, 0);
7050 rtx op1 = XEXP (addr, 1);
7051 rtx insn;
7052
7053 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7054 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7055 {
7056 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7057 rtx offset_rtx = GEN_INT (offset);
7058
7059 if (IN_RANGE (offset, -32768, 32767)
7060 && (scalar_size < 8 || (offset & 0x3) == 0))
7061 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7062 else
7063 {
7064 emit_move_insn (base_tmp, offset_rtx);
7065 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7066 }
7067 }
7068 else
7069 {
7070 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7071 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7072
7073 /* Note, ADDI requires the register being added to be a base
7074 register. If the register was R0, load it up into the temporary
7075 and do the add. */
7076 if (op1_reg_p
7077 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7078 {
7079 insn = gen_add3_insn (base_tmp, op1, element_offset);
7080 gcc_assert (insn != NULL_RTX);
7081 emit_insn (insn);
7082 }
7083
7084 else if (ele_reg_p
7085 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7086 {
7087 insn = gen_add3_insn (base_tmp, element_offset, op1);
7088 gcc_assert (insn != NULL_RTX);
7089 emit_insn (insn);
7090 }
7091
7092 else
7093 {
7094 emit_move_insn (base_tmp, op1);
7095 emit_insn (gen_add2_insn (base_tmp, element_offset));
7096 }
7097
7098 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7099 }
7100 }
7101
7102 else
7103 {
7104 emit_move_insn (base_tmp, addr);
7105 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7106 }
7107
7108 /* If we have a PLUS, we need to see whether the particular register class
7109 allows for D-FORM or X-FORM addressing. */
7110 if (GET_CODE (new_addr) == PLUS)
7111 {
7112 rtx op1 = XEXP (new_addr, 1);
7113 addr_mask_type addr_mask;
7114 unsigned int scalar_regno = reg_or_subregno (scalar_reg);
7115
7116 gcc_assert (HARD_REGISTER_NUM_P (scalar_regno));
7117 if (INT_REGNO_P (scalar_regno))
7118 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7119
7120 else if (FP_REGNO_P (scalar_regno))
7121 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7122
7123 else if (ALTIVEC_REGNO_P (scalar_regno))
7124 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7125
7126 else
7127 gcc_unreachable ();
7128
7129 if (REG_P (op1) || SUBREG_P (op1))
7130 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7131 else
7132 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7133 }
7134
7135 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7136 valid_addr_p = true;
7137
7138 else
7139 valid_addr_p = false;
7140
7141 if (!valid_addr_p)
7142 {
7143 emit_move_insn (base_tmp, new_addr);
7144 new_addr = base_tmp;
7145 }
7146
7147 return change_address (mem, scalar_mode, new_addr);
7148 }
7149
7150 /* Split a variable vec_extract operation into the component instructions. */
7151
7152 void
7153 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7154 rtx tmp_altivec)
7155 {
7156 machine_mode mode = GET_MODE (src);
7157 machine_mode scalar_mode = GET_MODE (dest);
7158 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7159 int byte_shift = exact_log2 (scalar_size);
7160
7161 gcc_assert (byte_shift >= 0);
7162
7163 /* If we are given a memory address, optimize to load just the element. We
7164 don't have to adjust the vector element number on little endian
7165 systems. */
7166 if (MEM_P (src))
7167 {
7168 gcc_assert (REG_P (tmp_gpr));
7169 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7170 tmp_gpr, scalar_mode));
7171 return;
7172 }
7173
7174 else if (REG_P (src) || SUBREG_P (src))
7175 {
7176 int bit_shift = byte_shift + 3;
7177 rtx element2;
7178 unsigned int dest_regno = reg_or_subregno (dest);
7179 unsigned int src_regno = reg_or_subregno (src);
7180 unsigned int element_regno = reg_or_subregno (element);
7181
7182 gcc_assert (REG_P (tmp_gpr));
7183
7184 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7185 a general purpose register. */
7186 if (TARGET_P9_VECTOR
7187 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7188 && INT_REGNO_P (dest_regno)
7189 && ALTIVEC_REGNO_P (src_regno)
7190 && INT_REGNO_P (element_regno))
7191 {
7192 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7193 rtx element_si = gen_rtx_REG (SImode, element_regno);
7194
7195 if (mode == V16QImode)
7196 emit_insn (BYTES_BIG_ENDIAN
7197 ? gen_vextublx (dest_si, element_si, src)
7198 : gen_vextubrx (dest_si, element_si, src));
7199
7200 else if (mode == V8HImode)
7201 {
7202 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7203 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7204 emit_insn (BYTES_BIG_ENDIAN
7205 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7206 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7207 }
7208
7209
7210 else
7211 {
7212 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7213 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7214 emit_insn (BYTES_BIG_ENDIAN
7215 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7216 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7217 }
7218
7219 return;
7220 }
7221
7222
7223 gcc_assert (REG_P (tmp_altivec));
7224
7225 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7226 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7227 will shift the element into the upper position (adding 3 to convert a
7228 byte shift into a bit shift). */
7229 if (scalar_size == 8)
7230 {
7231 if (!BYTES_BIG_ENDIAN)
7232 {
7233 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7234 element2 = tmp_gpr;
7235 }
7236 else
7237 element2 = element;
7238
7239 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7240 bit. */
7241 emit_insn (gen_rtx_SET (tmp_gpr,
7242 gen_rtx_AND (DImode,
7243 gen_rtx_ASHIFT (DImode,
7244 element2,
7245 GEN_INT (6)),
7246 GEN_INT (64))));
7247 }
7248 else
7249 {
7250 if (!BYTES_BIG_ENDIAN)
7251 {
7252 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7253
7254 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7255 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7256 element2 = tmp_gpr;
7257 }
7258 else
7259 element2 = element;
7260
7261 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7262 }
7263
7264 /* Get the value into the lower byte of the Altivec register where VSLO
7265 expects it. */
7266 if (TARGET_P9_VECTOR)
7267 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7268 else if (can_create_pseudo_p ())
7269 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7270 else
7271 {
7272 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7273 emit_move_insn (tmp_di, tmp_gpr);
7274 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7275 }
7276
7277 /* Do the VSLO to get the value into the final location. */
7278 switch (mode)
7279 {
7280 case E_V2DFmode:
7281 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7282 return;
7283
7284 case E_V2DImode:
7285 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7286 return;
7287
7288 case E_V4SFmode:
7289 {
7290 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7291 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7292 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7293 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7294 tmp_altivec));
7295
7296 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7297 return;
7298 }
7299
7300 case E_V4SImode:
7301 case E_V8HImode:
7302 case E_V16QImode:
7303 {
7304 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7305 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7306 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7307 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7308 tmp_altivec));
7309 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7310 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7311 GEN_INT (64 - (8 * scalar_size))));
7312 return;
7313 }
7314
7315 default:
7316 gcc_unreachable ();
7317 }
7318
7319 return;
7320 }
7321 else
7322 gcc_unreachable ();
7323 }
7324
7325 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7326 selects whether the alignment is abi mandated, optional, or
7327 both abi and optional alignment. */
7328
7329 unsigned int
7330 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7331 {
7332 if (how != align_opt)
7333 {
7334 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7335 align = 128;
7336 }
7337
7338 if (how != align_abi)
7339 {
7340 if (TREE_CODE (type) == ARRAY_TYPE
7341 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7342 {
7343 if (align < BITS_PER_WORD)
7344 align = BITS_PER_WORD;
7345 }
7346 }
7347
7348 return align;
7349 }
7350
7351 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7352 instructions simply ignore the low bits; VSX memory instructions
7353 are aligned to 4 or 8 bytes. */
7354
7355 static bool
7356 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7357 {
7358 return (STRICT_ALIGNMENT
7359 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7360 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7361 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7362 && (int) align < VECTOR_ALIGN (mode)))));
7363 }
7364
7365 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7366
7367 bool
7368 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7369 {
7370 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7371 {
7372 if (computed != 128)
7373 {
7374 static bool warned;
7375 if (!warned && warn_psabi)
7376 {
7377 warned = true;
7378 inform (input_location,
7379 "the layout of aggregates containing vectors with"
7380 " %d-byte alignment has changed in GCC 5",
7381 computed / BITS_PER_UNIT);
7382 }
7383 }
7384 /* In current GCC there is no special case. */
7385 return false;
7386 }
7387
7388 return false;
7389 }
7390
7391 /* AIX increases natural record alignment to doubleword if the first
7392 field is an FP double while the FP fields remain word aligned. */
7393
7394 unsigned int
7395 rs6000_special_round_type_align (tree type, unsigned int computed,
7396 unsigned int specified)
7397 {
7398 unsigned int align = MAX (computed, specified);
7399 tree field = TYPE_FIELDS (type);
7400
7401 /* Skip all non field decls */
7402 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7403 field = DECL_CHAIN (field);
7404
7405 if (field != NULL && field != type)
7406 {
7407 type = TREE_TYPE (field);
7408 while (TREE_CODE (type) == ARRAY_TYPE)
7409 type = TREE_TYPE (type);
7410
7411 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7412 align = MAX (align, 64);
7413 }
7414
7415 return align;
7416 }
7417
7418 /* Darwin increases record alignment to the natural alignment of
7419 the first field. */
7420
7421 unsigned int
7422 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7423 unsigned int specified)
7424 {
7425 unsigned int align = MAX (computed, specified);
7426
7427 if (TYPE_PACKED (type))
7428 return align;
7429
7430 /* Find the first field, looking down into aggregates. */
7431 do {
7432 tree field = TYPE_FIELDS (type);
7433 /* Skip all non field decls */
7434 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7435 field = DECL_CHAIN (field);
7436 if (! field)
7437 break;
7438 /* A packed field does not contribute any extra alignment. */
7439 if (DECL_PACKED (field))
7440 return align;
7441 type = TREE_TYPE (field);
7442 while (TREE_CODE (type) == ARRAY_TYPE)
7443 type = TREE_TYPE (type);
7444 } while (AGGREGATE_TYPE_P (type));
7445
7446 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7447 align = MAX (align, TYPE_ALIGN (type));
7448
7449 return align;
7450 }
7451
7452 /* Return 1 for an operand in small memory on V.4/eabi. */
7453
7454 int
7455 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7456 machine_mode mode ATTRIBUTE_UNUSED)
7457 {
7458 #if TARGET_ELF
7459 rtx sym_ref;
7460
7461 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7462 return 0;
7463
7464 if (DEFAULT_ABI != ABI_V4)
7465 return 0;
7466
7467 if (SYMBOL_REF_P (op))
7468 sym_ref = op;
7469
7470 else if (GET_CODE (op) != CONST
7471 || GET_CODE (XEXP (op, 0)) != PLUS
7472 || !SYMBOL_REF_P (XEXP (XEXP (op, 0), 0))
7473 || !CONST_INT_P (XEXP (XEXP (op, 0), 1)))
7474 return 0;
7475
7476 else
7477 {
7478 rtx sum = XEXP (op, 0);
7479 HOST_WIDE_INT summand;
7480
7481 /* We have to be careful here, because it is the referenced address
7482 that must be 32k from _SDA_BASE_, not just the symbol. */
7483 summand = INTVAL (XEXP (sum, 1));
7484 if (summand < 0 || summand > g_switch_value)
7485 return 0;
7486
7487 sym_ref = XEXP (sum, 0);
7488 }
7489
7490 return SYMBOL_REF_SMALL_P (sym_ref);
7491 #else
7492 return 0;
7493 #endif
7494 }
7495
7496 /* Return true if either operand is a general purpose register. */
7497
7498 bool
7499 gpr_or_gpr_p (rtx op0, rtx op1)
7500 {
7501 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7502 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7503 }
7504
7505 /* Return true if this is a move direct operation between GPR registers and
7506 floating point/VSX registers. */
7507
7508 bool
7509 direct_move_p (rtx op0, rtx op1)
7510 {
7511 int regno0, regno1;
7512
7513 if (!REG_P (op0) || !REG_P (op1))
7514 return false;
7515
7516 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7517 return false;
7518
7519 regno0 = REGNO (op0);
7520 regno1 = REGNO (op1);
7521 if (!HARD_REGISTER_NUM_P (regno0) || !HARD_REGISTER_NUM_P (regno1))
7522 return false;
7523
7524 if (INT_REGNO_P (regno0))
7525 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7526
7527 else if (INT_REGNO_P (regno1))
7528 {
7529 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7530 return true;
7531
7532 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7533 return true;
7534 }
7535
7536 return false;
7537 }
7538
7539 /* Return true if the OFFSET is valid for the quad address instructions that
7540 use d-form (register + offset) addressing. */
7541
7542 static inline bool
7543 quad_address_offset_p (HOST_WIDE_INT offset)
7544 {
7545 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7546 }
7547
7548 /* Return true if the ADDR is an acceptable address for a quad memory
7549 operation of mode MODE (either LQ/STQ for general purpose registers, or
7550 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7551 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7552 3.0 LXV/STXV instruction. */
7553
7554 bool
7555 quad_address_p (rtx addr, machine_mode mode, bool strict)
7556 {
7557 rtx op0, op1;
7558
7559 if (GET_MODE_SIZE (mode) != 16)
7560 return false;
7561
7562 if (legitimate_indirect_address_p (addr, strict))
7563 return true;
7564
7565 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7566 return false;
7567
7568 if (GET_CODE (addr) != PLUS)
7569 return false;
7570
7571 op0 = XEXP (addr, 0);
7572 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7573 return false;
7574
7575 op1 = XEXP (addr, 1);
7576 if (!CONST_INT_P (op1))
7577 return false;
7578
7579 return quad_address_offset_p (INTVAL (op1));
7580 }
7581
7582 /* Return true if this is a load or store quad operation. This function does
7583 not handle the atomic quad memory instructions. */
7584
7585 bool
7586 quad_load_store_p (rtx op0, rtx op1)
7587 {
7588 bool ret;
7589
7590 if (!TARGET_QUAD_MEMORY)
7591 ret = false;
7592
7593 else if (REG_P (op0) && MEM_P (op1))
7594 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7595 && quad_memory_operand (op1, GET_MODE (op1))
7596 && !reg_overlap_mentioned_p (op0, op1));
7597
7598 else if (MEM_P (op0) && REG_P (op1))
7599 ret = (quad_memory_operand (op0, GET_MODE (op0))
7600 && quad_int_reg_operand (op1, GET_MODE (op1)));
7601
7602 else
7603 ret = false;
7604
7605 if (TARGET_DEBUG_ADDR)
7606 {
7607 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7608 ret ? "true" : "false");
7609 debug_rtx (gen_rtx_SET (op0, op1));
7610 }
7611
7612 return ret;
7613 }
7614
7615 /* Given an address, return a constant offset term if one exists. */
7616
7617 static rtx
7618 address_offset (rtx op)
7619 {
7620 if (GET_CODE (op) == PRE_INC
7621 || GET_CODE (op) == PRE_DEC)
7622 op = XEXP (op, 0);
7623 else if (GET_CODE (op) == PRE_MODIFY
7624 || GET_CODE (op) == LO_SUM)
7625 op = XEXP (op, 1);
7626
7627 if (GET_CODE (op) == CONST)
7628 op = XEXP (op, 0);
7629
7630 if (GET_CODE (op) == PLUS)
7631 op = XEXP (op, 1);
7632
7633 if (CONST_INT_P (op))
7634 return op;
7635
7636 return NULL_RTX;
7637 }
7638
7639 /* Return true if the MEM operand is a memory operand suitable for use
7640 with a (full width, possibly multiple) gpr load/store. On
7641 powerpc64 this means the offset must be divisible by 4.
7642 Implements 'Y' constraint.
7643
7644 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7645 a constraint function we know the operand has satisfied a suitable
7646 memory predicate. Also accept some odd rtl generated by reload
7647 (see rs6000_legitimize_reload_address for various forms). It is
7648 important that reload rtl be accepted by appropriate constraints
7649 but not by the operand predicate.
7650
7651 Offsetting a lo_sum should not be allowed, except where we know by
7652 alignment that a 32k boundary is not crossed, but see the ???
7653 comment in rs6000_legitimize_reload_address. Note that by
7654 "offsetting" here we mean a further offset to access parts of the
7655 MEM. It's fine to have a lo_sum where the inner address is offset
7656 from a sym, since the same sym+offset will appear in the high part
7657 of the address calculation. */
7658
7659 bool
7660 mem_operand_gpr (rtx op, machine_mode mode)
7661 {
7662 unsigned HOST_WIDE_INT offset;
7663 int extra;
7664 rtx addr = XEXP (op, 0);
7665
7666 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7667 if (TARGET_UPDATE
7668 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7669 && mode_supports_pre_incdec_p (mode)
7670 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7671 return true;
7672
7673 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7674 if (!rs6000_offsettable_memref_p (op, mode, false))
7675 return false;
7676
7677 op = address_offset (addr);
7678 if (op == NULL_RTX)
7679 return true;
7680
7681 offset = INTVAL (op);
7682 if (TARGET_POWERPC64 && (offset & 3) != 0)
7683 return false;
7684
7685 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7686 if (extra < 0)
7687 extra = 0;
7688
7689 if (GET_CODE (addr) == LO_SUM)
7690 /* For lo_sum addresses, we must allow any offset except one that
7691 causes a wrap, so test only the low 16 bits. */
7692 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7693
7694 return offset + 0x8000 < 0x10000u - extra;
7695 }
7696
7697 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7698 enforce an offset divisible by 4 even for 32-bit. */
7699
7700 bool
7701 mem_operand_ds_form (rtx op, machine_mode mode)
7702 {
7703 unsigned HOST_WIDE_INT offset;
7704 int extra;
7705 rtx addr = XEXP (op, 0);
7706
7707 if (!offsettable_address_p (false, mode, addr))
7708 return false;
7709
7710 op = address_offset (addr);
7711 if (op == NULL_RTX)
7712 return true;
7713
7714 offset = INTVAL (op);
7715 if ((offset & 3) != 0)
7716 return false;
7717
7718 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7719 if (extra < 0)
7720 extra = 0;
7721
7722 if (GET_CODE (addr) == LO_SUM)
7723 /* For lo_sum addresses, we must allow any offset except one that
7724 causes a wrap, so test only the low 16 bits. */
7725 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7726
7727 return offset + 0x8000 < 0x10000u - extra;
7728 }
7729 \f
7730 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7731
7732 static bool
7733 reg_offset_addressing_ok_p (machine_mode mode)
7734 {
7735 switch (mode)
7736 {
7737 case E_V16QImode:
7738 case E_V8HImode:
7739 case E_V4SFmode:
7740 case E_V4SImode:
7741 case E_V2DFmode:
7742 case E_V2DImode:
7743 case E_V1TImode:
7744 case E_TImode:
7745 case E_TFmode:
7746 case E_KFmode:
7747 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7748 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7749 a vector mode, if we want to use the VSX registers to move it around,
7750 we need to restrict ourselves to reg+reg addressing. Similarly for
7751 IEEE 128-bit floating point that is passed in a single vector
7752 register. */
7753 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7754 return mode_supports_dq_form (mode);
7755 break;
7756
7757 case E_SDmode:
7758 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7759 addressing for the LFIWZX and STFIWX instructions. */
7760 if (TARGET_NO_SDMODE_STACK)
7761 return false;
7762 break;
7763
7764 default:
7765 break;
7766 }
7767
7768 return true;
7769 }
7770
7771 static bool
7772 virtual_stack_registers_memory_p (rtx op)
7773 {
7774 int regnum;
7775
7776 if (REG_P (op))
7777 regnum = REGNO (op);
7778
7779 else if (GET_CODE (op) == PLUS
7780 && REG_P (XEXP (op, 0))
7781 && CONST_INT_P (XEXP (op, 1)))
7782 regnum = REGNO (XEXP (op, 0));
7783
7784 else
7785 return false;
7786
7787 return (regnum >= FIRST_VIRTUAL_REGISTER
7788 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7789 }
7790
7791 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7792 is known to not straddle a 32k boundary. This function is used
7793 to determine whether -mcmodel=medium code can use TOC pointer
7794 relative addressing for OP. This means the alignment of the TOC
7795 pointer must also be taken into account, and unfortunately that is
7796 only 8 bytes. */
7797
7798 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7799 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7800 #endif
7801
7802 static bool
7803 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7804 machine_mode mode)
7805 {
7806 tree decl;
7807 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7808
7809 if (!SYMBOL_REF_P (op))
7810 return false;
7811
7812 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7813 SYMBOL_REF. */
7814 if (mode_supports_dq_form (mode))
7815 return false;
7816
7817 dsize = GET_MODE_SIZE (mode);
7818 decl = SYMBOL_REF_DECL (op);
7819 if (!decl)
7820 {
7821 if (dsize == 0)
7822 return false;
7823
7824 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7825 replacing memory addresses with an anchor plus offset. We
7826 could find the decl by rummaging around in the block->objects
7827 VEC for the given offset but that seems like too much work. */
7828 dalign = BITS_PER_UNIT;
7829 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7830 && SYMBOL_REF_ANCHOR_P (op)
7831 && SYMBOL_REF_BLOCK (op) != NULL)
7832 {
7833 struct object_block *block = SYMBOL_REF_BLOCK (op);
7834
7835 dalign = block->alignment;
7836 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7837 }
7838 else if (CONSTANT_POOL_ADDRESS_P (op))
7839 {
7840 /* It would be nice to have get_pool_align().. */
7841 machine_mode cmode = get_pool_mode (op);
7842
7843 dalign = GET_MODE_ALIGNMENT (cmode);
7844 }
7845 }
7846 else if (DECL_P (decl))
7847 {
7848 dalign = DECL_ALIGN (decl);
7849
7850 if (dsize == 0)
7851 {
7852 /* Allow BLKmode when the entire object is known to not
7853 cross a 32k boundary. */
7854 if (!DECL_SIZE_UNIT (decl))
7855 return false;
7856
7857 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7858 return false;
7859
7860 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7861 if (dsize > 32768)
7862 return false;
7863
7864 dalign /= BITS_PER_UNIT;
7865 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7866 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7867 return dalign >= dsize;
7868 }
7869 }
7870 else
7871 gcc_unreachable ();
7872
7873 /* Find how many bits of the alignment we know for this access. */
7874 dalign /= BITS_PER_UNIT;
7875 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7876 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7877 mask = dalign - 1;
7878 lsb = offset & -offset;
7879 mask &= lsb - 1;
7880 dalign = mask + 1;
7881
7882 return dalign >= dsize;
7883 }
7884
7885 static bool
7886 constant_pool_expr_p (rtx op)
7887 {
7888 rtx base, offset;
7889
7890 split_const (op, &base, &offset);
7891 return (SYMBOL_REF_P (base)
7892 && CONSTANT_POOL_ADDRESS_P (base)
7893 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7894 }
7895
7896 /* These are only used to pass through from print_operand/print_operand_address
7897 to rs6000_output_addr_const_extra over the intervening function
7898 output_addr_const which is not target code. */
7899 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7900
7901 /* Return true if OP is a toc pointer relative address (the output
7902 of create_TOC_reference). If STRICT, do not match non-split
7903 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7904 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7905 TOCREL_OFFSET_RET respectively. */
7906
7907 bool
7908 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7909 const_rtx *tocrel_offset_ret)
7910 {
7911 if (!TARGET_TOC)
7912 return false;
7913
7914 if (TARGET_CMODEL != CMODEL_SMALL)
7915 {
7916 /* When strict ensure we have everything tidy. */
7917 if (strict
7918 && !(GET_CODE (op) == LO_SUM
7919 && REG_P (XEXP (op, 0))
7920 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7921 return false;
7922
7923 /* When not strict, allow non-split TOC addresses and also allow
7924 (lo_sum (high ..)) TOC addresses created during reload. */
7925 if (GET_CODE (op) == LO_SUM)
7926 op = XEXP (op, 1);
7927 }
7928
7929 const_rtx tocrel_base = op;
7930 const_rtx tocrel_offset = const0_rtx;
7931
7932 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7933 {
7934 tocrel_base = XEXP (op, 0);
7935 tocrel_offset = XEXP (op, 1);
7936 }
7937
7938 if (tocrel_base_ret)
7939 *tocrel_base_ret = tocrel_base;
7940 if (tocrel_offset_ret)
7941 *tocrel_offset_ret = tocrel_offset;
7942
7943 return (GET_CODE (tocrel_base) == UNSPEC
7944 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7945 && REG_P (XVECEXP (tocrel_base, 0, 1))
7946 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7947 }
7948
7949 /* Return true if X is a constant pool address, and also for cmodel=medium
7950 if X is a toc-relative address known to be offsettable within MODE. */
7951
7952 bool
7953 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7954 bool strict)
7955 {
7956 const_rtx tocrel_base, tocrel_offset;
7957 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7958 && (TARGET_CMODEL != CMODEL_MEDIUM
7959 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7960 || mode == QImode
7961 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7962 INTVAL (tocrel_offset), mode)));
7963 }
7964
7965 static bool
7966 legitimate_small_data_p (machine_mode mode, rtx x)
7967 {
7968 return (DEFAULT_ABI == ABI_V4
7969 && !flag_pic && !TARGET_TOC
7970 && (SYMBOL_REF_P (x) || GET_CODE (x) == CONST)
7971 && small_data_operand (x, mode));
7972 }
7973
7974 bool
7975 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7976 bool strict, bool worst_case)
7977 {
7978 unsigned HOST_WIDE_INT offset;
7979 unsigned int extra;
7980
7981 if (GET_CODE (x) != PLUS)
7982 return false;
7983 if (!REG_P (XEXP (x, 0)))
7984 return false;
7985 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7986 return false;
7987 if (mode_supports_dq_form (mode))
7988 return quad_address_p (x, mode, strict);
7989 if (!reg_offset_addressing_ok_p (mode))
7990 return virtual_stack_registers_memory_p (x);
7991 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7992 return true;
7993 if (!CONST_INT_P (XEXP (x, 1)))
7994 return false;
7995
7996 offset = INTVAL (XEXP (x, 1));
7997 extra = 0;
7998 switch (mode)
7999 {
8000 case E_DFmode:
8001 case E_DDmode:
8002 case E_DImode:
8003 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8004 addressing. */
8005 if (VECTOR_MEM_VSX_P (mode))
8006 return false;
8007
8008 if (!worst_case)
8009 break;
8010 if (!TARGET_POWERPC64)
8011 extra = 4;
8012 else if (offset & 3)
8013 return false;
8014 break;
8015
8016 case E_TFmode:
8017 case E_IFmode:
8018 case E_KFmode:
8019 case E_TDmode:
8020 case E_TImode:
8021 case E_PTImode:
8022 extra = 8;
8023 if (!worst_case)
8024 break;
8025 if (!TARGET_POWERPC64)
8026 extra = 12;
8027 else if (offset & 3)
8028 return false;
8029 break;
8030
8031 default:
8032 break;
8033 }
8034
8035 offset += 0x8000;
8036 return offset < 0x10000 - extra;
8037 }
8038
8039 bool
8040 legitimate_indexed_address_p (rtx x, int strict)
8041 {
8042 rtx op0, op1;
8043
8044 if (GET_CODE (x) != PLUS)
8045 return false;
8046
8047 op0 = XEXP (x, 0);
8048 op1 = XEXP (x, 1);
8049
8050 return (REG_P (op0) && REG_P (op1)
8051 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8052 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8053 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8054 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8055 }
8056
8057 bool
8058 avoiding_indexed_address_p (machine_mode mode)
8059 {
8060 /* Avoid indexed addressing for modes that have non-indexed
8061 load/store instruction forms. */
8062 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8063 }
8064
8065 bool
8066 legitimate_indirect_address_p (rtx x, int strict)
8067 {
8068 return REG_P (x) && INT_REG_OK_FOR_BASE_P (x, strict);
8069 }
8070
8071 bool
8072 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8073 {
8074 if (!TARGET_MACHO || !flag_pic
8075 || mode != SImode || !MEM_P (x))
8076 return false;
8077 x = XEXP (x, 0);
8078
8079 if (GET_CODE (x) != LO_SUM)
8080 return false;
8081 if (!REG_P (XEXP (x, 0)))
8082 return false;
8083 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8084 return false;
8085 x = XEXP (x, 1);
8086
8087 return CONSTANT_P (x);
8088 }
8089
8090 static bool
8091 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8092 {
8093 if (GET_CODE (x) != LO_SUM)
8094 return false;
8095 if (!REG_P (XEXP (x, 0)))
8096 return false;
8097 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8098 return false;
8099 /* quad word addresses are restricted, and we can't use LO_SUM. */
8100 if (mode_supports_dq_form (mode))
8101 return false;
8102 x = XEXP (x, 1);
8103
8104 if (TARGET_ELF || TARGET_MACHO)
8105 {
8106 bool large_toc_ok;
8107
8108 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8109 return false;
8110 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8111 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8112 recognizes some LO_SUM addresses as valid although this
8113 function says opposite. In most cases, LRA through different
8114 transformations can generate correct code for address reloads.
8115 It cannot manage only some LO_SUM cases. So we need to add
8116 code analogous to one in rs6000_legitimize_reload_address for
8117 LOW_SUM here saying that some addresses are still valid. */
8118 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8119 && small_toc_ref (x, VOIDmode));
8120 if (TARGET_TOC && ! large_toc_ok)
8121 return false;
8122 if (GET_MODE_NUNITS (mode) != 1)
8123 return false;
8124 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8125 && !(/* ??? Assume floating point reg based on mode? */
8126 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8127 return false;
8128
8129 return CONSTANT_P (x) || large_toc_ok;
8130 }
8131
8132 return false;
8133 }
8134
8135
8136 /* Try machine-dependent ways of modifying an illegitimate address
8137 to be legitimate. If we find one, return the new, valid address.
8138 This is used from only one place: `memory_address' in explow.c.
8139
8140 OLDX is the address as it was before break_out_memory_refs was
8141 called. In some cases it is useful to look at this to decide what
8142 needs to be done.
8143
8144 It is always safe for this function to do nothing. It exists to
8145 recognize opportunities to optimize the output.
8146
8147 On RS/6000, first check for the sum of a register with a constant
8148 integer that is out of range. If so, generate code to add the
8149 constant with the low-order 16 bits masked to the register and force
8150 this result into another register (this can be done with `cau').
8151 Then generate an address of REG+(CONST&0xffff), allowing for the
8152 possibility of bit 16 being a one.
8153
8154 Then check for the sum of a register and something not constant, try to
8155 load the other things into a register and return the sum. */
8156
8157 static rtx
8158 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8159 machine_mode mode)
8160 {
8161 unsigned int extra;
8162
8163 if (!reg_offset_addressing_ok_p (mode)
8164 || mode_supports_dq_form (mode))
8165 {
8166 if (virtual_stack_registers_memory_p (x))
8167 return x;
8168
8169 /* In theory we should not be seeing addresses of the form reg+0,
8170 but just in case it is generated, optimize it away. */
8171 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8172 return force_reg (Pmode, XEXP (x, 0));
8173
8174 /* For TImode with load/store quad, restrict addresses to just a single
8175 pointer, so it works with both GPRs and VSX registers. */
8176 /* Make sure both operands are registers. */
8177 else if (GET_CODE (x) == PLUS
8178 && (mode != TImode || !TARGET_VSX))
8179 return gen_rtx_PLUS (Pmode,
8180 force_reg (Pmode, XEXP (x, 0)),
8181 force_reg (Pmode, XEXP (x, 1)));
8182 else
8183 return force_reg (Pmode, x);
8184 }
8185 if (SYMBOL_REF_P (x))
8186 {
8187 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8188 if (model != 0)
8189 return rs6000_legitimize_tls_address (x, model);
8190 }
8191
8192 extra = 0;
8193 switch (mode)
8194 {
8195 case E_TFmode:
8196 case E_TDmode:
8197 case E_TImode:
8198 case E_PTImode:
8199 case E_IFmode:
8200 case E_KFmode:
8201 /* As in legitimate_offset_address_p we do not assume
8202 worst-case. The mode here is just a hint as to the registers
8203 used. A TImode is usually in gprs, but may actually be in
8204 fprs. Leave worst-case scenario for reload to handle via
8205 insn constraints. PTImode is only GPRs. */
8206 extra = 8;
8207 break;
8208 default:
8209 break;
8210 }
8211
8212 if (GET_CODE (x) == PLUS
8213 && REG_P (XEXP (x, 0))
8214 && CONST_INT_P (XEXP (x, 1))
8215 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8216 >= 0x10000 - extra))
8217 {
8218 HOST_WIDE_INT high_int, low_int;
8219 rtx sum;
8220 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8221 if (low_int >= 0x8000 - extra)
8222 low_int = 0;
8223 high_int = INTVAL (XEXP (x, 1)) - low_int;
8224 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8225 GEN_INT (high_int)), 0);
8226 return plus_constant (Pmode, sum, low_int);
8227 }
8228 else if (GET_CODE (x) == PLUS
8229 && REG_P (XEXP (x, 0))
8230 && !CONST_INT_P (XEXP (x, 1))
8231 && GET_MODE_NUNITS (mode) == 1
8232 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8233 || (/* ??? Assume floating point reg based on mode? */
8234 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8235 && !avoiding_indexed_address_p (mode))
8236 {
8237 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8238 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8239 }
8240 else if ((TARGET_ELF
8241 #if TARGET_MACHO
8242 || !MACHO_DYNAMIC_NO_PIC_P
8243 #endif
8244 )
8245 && TARGET_32BIT
8246 && TARGET_NO_TOC
8247 && !flag_pic
8248 && !CONST_INT_P (x)
8249 && !CONST_WIDE_INT_P (x)
8250 && !CONST_DOUBLE_P (x)
8251 && CONSTANT_P (x)
8252 && GET_MODE_NUNITS (mode) == 1
8253 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8254 || (/* ??? Assume floating point reg based on mode? */
8255 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8256 {
8257 rtx reg = gen_reg_rtx (Pmode);
8258 if (TARGET_ELF)
8259 emit_insn (gen_elf_high (reg, x));
8260 else
8261 emit_insn (gen_macho_high (reg, x));
8262 return gen_rtx_LO_SUM (Pmode, reg, x);
8263 }
8264 else if (TARGET_TOC
8265 && SYMBOL_REF_P (x)
8266 && constant_pool_expr_p (x)
8267 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8268 return create_TOC_reference (x, NULL_RTX);
8269 else
8270 return x;
8271 }
8272
8273 /* Debug version of rs6000_legitimize_address. */
8274 static rtx
8275 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8276 {
8277 rtx ret;
8278 rtx_insn *insns;
8279
8280 start_sequence ();
8281 ret = rs6000_legitimize_address (x, oldx, mode);
8282 insns = get_insns ();
8283 end_sequence ();
8284
8285 if (ret != x)
8286 {
8287 fprintf (stderr,
8288 "\nrs6000_legitimize_address: mode %s, old code %s, "
8289 "new code %s, modified\n",
8290 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8291 GET_RTX_NAME (GET_CODE (ret)));
8292
8293 fprintf (stderr, "Original address:\n");
8294 debug_rtx (x);
8295
8296 fprintf (stderr, "oldx:\n");
8297 debug_rtx (oldx);
8298
8299 fprintf (stderr, "New address:\n");
8300 debug_rtx (ret);
8301
8302 if (insns)
8303 {
8304 fprintf (stderr, "Insns added:\n");
8305 debug_rtx_list (insns, 20);
8306 }
8307 }
8308 else
8309 {
8310 fprintf (stderr,
8311 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8312 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8313
8314 debug_rtx (x);
8315 }
8316
8317 if (insns)
8318 emit_insn (insns);
8319
8320 return ret;
8321 }
8322
8323 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8324 We need to emit DTP-relative relocations. */
8325
8326 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8327 static void
8328 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8329 {
8330 switch (size)
8331 {
8332 case 4:
8333 fputs ("\t.long\t", file);
8334 break;
8335 case 8:
8336 fputs (DOUBLE_INT_ASM_OP, file);
8337 break;
8338 default:
8339 gcc_unreachable ();
8340 }
8341 output_addr_const (file, x);
8342 if (TARGET_ELF)
8343 fputs ("@dtprel+0x8000", file);
8344 else if (TARGET_XCOFF && SYMBOL_REF_P (x))
8345 {
8346 switch (SYMBOL_REF_TLS_MODEL (x))
8347 {
8348 case 0:
8349 break;
8350 case TLS_MODEL_LOCAL_EXEC:
8351 fputs ("@le", file);
8352 break;
8353 case TLS_MODEL_INITIAL_EXEC:
8354 fputs ("@ie", file);
8355 break;
8356 case TLS_MODEL_GLOBAL_DYNAMIC:
8357 case TLS_MODEL_LOCAL_DYNAMIC:
8358 fputs ("@m", file);
8359 break;
8360 default:
8361 gcc_unreachable ();
8362 }
8363 }
8364 }
8365
8366 /* Return true if X is a symbol that refers to real (rather than emulated)
8367 TLS. */
8368
8369 static bool
8370 rs6000_real_tls_symbol_ref_p (rtx x)
8371 {
8372 return (SYMBOL_REF_P (x)
8373 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8374 }
8375
8376 /* In the name of slightly smaller debug output, and to cater to
8377 general assembler lossage, recognize various UNSPEC sequences
8378 and turn them back into a direct symbol reference. */
8379
8380 static rtx
8381 rs6000_delegitimize_address (rtx orig_x)
8382 {
8383 rtx x, y, offset;
8384
8385 if (GET_CODE (orig_x) == UNSPEC && XINT (orig_x, 1) == UNSPEC_FUSION_GPR)
8386 orig_x = XVECEXP (orig_x, 0, 0);
8387
8388 orig_x = delegitimize_mem_from_attrs (orig_x);
8389
8390 x = orig_x;
8391 if (MEM_P (x))
8392 x = XEXP (x, 0);
8393
8394 y = x;
8395 if (TARGET_CMODEL != CMODEL_SMALL && GET_CODE (y) == LO_SUM)
8396 y = XEXP (y, 1);
8397
8398 offset = NULL_RTX;
8399 if (GET_CODE (y) == PLUS
8400 && GET_MODE (y) == Pmode
8401 && CONST_INT_P (XEXP (y, 1)))
8402 {
8403 offset = XEXP (y, 1);
8404 y = XEXP (y, 0);
8405 }
8406
8407 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_TOCREL)
8408 {
8409 y = XVECEXP (y, 0, 0);
8410
8411 #ifdef HAVE_AS_TLS
8412 /* Do not associate thread-local symbols with the original
8413 constant pool symbol. */
8414 if (TARGET_XCOFF
8415 && SYMBOL_REF_P (y)
8416 && CONSTANT_POOL_ADDRESS_P (y)
8417 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8418 return orig_x;
8419 #endif
8420
8421 if (offset != NULL_RTX)
8422 y = gen_rtx_PLUS (Pmode, y, offset);
8423 if (!MEM_P (orig_x))
8424 return y;
8425 else
8426 return replace_equiv_address_nv (orig_x, y);
8427 }
8428
8429 if (TARGET_MACHO
8430 && GET_CODE (orig_x) == LO_SUM
8431 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8432 {
8433 y = XEXP (XEXP (orig_x, 1), 0);
8434 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8435 return XVECEXP (y, 0, 0);
8436 }
8437
8438 return orig_x;
8439 }
8440
8441 /* Return true if X shouldn't be emitted into the debug info.
8442 The linker doesn't like .toc section references from
8443 .debug_* sections, so reject .toc section symbols. */
8444
8445 static bool
8446 rs6000_const_not_ok_for_debug_p (rtx x)
8447 {
8448 if (GET_CODE (x) == UNSPEC)
8449 return true;
8450 if (SYMBOL_REF_P (x)
8451 && CONSTANT_POOL_ADDRESS_P (x))
8452 {
8453 rtx c = get_pool_constant (x);
8454 machine_mode cmode = get_pool_mode (x);
8455 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8456 return true;
8457 }
8458
8459 return false;
8460 }
8461
8462 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8463
8464 static bool
8465 rs6000_legitimate_combined_insn (rtx_insn *insn)
8466 {
8467 int icode = INSN_CODE (insn);
8468
8469 /* Reject creating doloop insns. Combine should not be allowed
8470 to create these for a number of reasons:
8471 1) In a nested loop, if combine creates one of these in an
8472 outer loop and the register allocator happens to allocate ctr
8473 to the outer loop insn, then the inner loop can't use ctr.
8474 Inner loops ought to be more highly optimized.
8475 2) Combine often wants to create one of these from what was
8476 originally a three insn sequence, first combining the three
8477 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8478 allocated ctr, the splitter takes use back to the three insn
8479 sequence. It's better to stop combine at the two insn
8480 sequence.
8481 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8482 insns, the register allocator sometimes uses floating point
8483 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8484 jump insn and output reloads are not implemented for jumps,
8485 the ctrsi/ctrdi splitters need to handle all possible cases.
8486 That's a pain, and it gets to be seriously difficult when a
8487 splitter that runs after reload needs memory to transfer from
8488 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8489 for the difficult case. It's better to not create problems
8490 in the first place. */
8491 if (icode != CODE_FOR_nothing
8492 && (icode == CODE_FOR_bdz_si
8493 || icode == CODE_FOR_bdz_di
8494 || icode == CODE_FOR_bdnz_si
8495 || icode == CODE_FOR_bdnz_di
8496 || icode == CODE_FOR_bdztf_si
8497 || icode == CODE_FOR_bdztf_di
8498 || icode == CODE_FOR_bdnztf_si
8499 || icode == CODE_FOR_bdnztf_di))
8500 return false;
8501
8502 return true;
8503 }
8504
8505 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8506
8507 static GTY(()) rtx rs6000_tls_symbol;
8508 static rtx
8509 rs6000_tls_get_addr (void)
8510 {
8511 if (!rs6000_tls_symbol)
8512 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8513
8514 return rs6000_tls_symbol;
8515 }
8516
8517 /* Construct the SYMBOL_REF for TLS GOT references. */
8518
8519 static GTY(()) rtx rs6000_got_symbol;
8520 static rtx
8521 rs6000_got_sym (void)
8522 {
8523 if (!rs6000_got_symbol)
8524 {
8525 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8526 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8527 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8528 }
8529
8530 return rs6000_got_symbol;
8531 }
8532
8533 /* AIX Thread-Local Address support. */
8534
8535 static rtx
8536 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8537 {
8538 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8539 const char *name;
8540 char *tlsname;
8541
8542 name = XSTR (addr, 0);
8543 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8544 or the symbol will be in TLS private data section. */
8545 if (name[strlen (name) - 1] != ']'
8546 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8547 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8548 {
8549 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8550 strcpy (tlsname, name);
8551 strcat (tlsname,
8552 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8553 tlsaddr = copy_rtx (addr);
8554 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8555 }
8556 else
8557 tlsaddr = addr;
8558
8559 /* Place addr into TOC constant pool. */
8560 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8561
8562 /* Output the TOC entry and create the MEM referencing the value. */
8563 if (constant_pool_expr_p (XEXP (sym, 0))
8564 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8565 {
8566 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8567 mem = gen_const_mem (Pmode, tocref);
8568 set_mem_alias_set (mem, get_TOC_alias_set ());
8569 }
8570 else
8571 return sym;
8572
8573 /* Use global-dynamic for local-dynamic. */
8574 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8575 || model == TLS_MODEL_LOCAL_DYNAMIC)
8576 {
8577 /* Create new TOC reference for @m symbol. */
8578 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8579 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8580 strcpy (tlsname, "*LCM");
8581 strcat (tlsname, name + 3);
8582 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8583 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8584 tocref = create_TOC_reference (modaddr, NULL_RTX);
8585 rtx modmem = gen_const_mem (Pmode, tocref);
8586 set_mem_alias_set (modmem, get_TOC_alias_set ());
8587
8588 rtx modreg = gen_reg_rtx (Pmode);
8589 emit_insn (gen_rtx_SET (modreg, modmem));
8590
8591 tmpreg = gen_reg_rtx (Pmode);
8592 emit_insn (gen_rtx_SET (tmpreg, mem));
8593
8594 dest = gen_reg_rtx (Pmode);
8595 if (TARGET_32BIT)
8596 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8597 else
8598 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8599 return dest;
8600 }
8601 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8602 else if (TARGET_32BIT)
8603 {
8604 tlsreg = gen_reg_rtx (SImode);
8605 emit_insn (gen_tls_get_tpointer (tlsreg));
8606 }
8607 else
8608 tlsreg = gen_rtx_REG (DImode, 13);
8609
8610 /* Load the TOC value into temporary register. */
8611 tmpreg = gen_reg_rtx (Pmode);
8612 emit_insn (gen_rtx_SET (tmpreg, mem));
8613 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8614 gen_rtx_MINUS (Pmode, addr, tlsreg));
8615
8616 /* Add TOC symbol value to TLS pointer. */
8617 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8618
8619 return dest;
8620 }
8621
8622 /* Output arg setup instructions for a !TARGET_TLS_MARKERS
8623 __tls_get_addr call. */
8624
8625 void
8626 rs6000_output_tlsargs (rtx *operands)
8627 {
8628 /* Set up operands for output_asm_insn, without modifying OPERANDS. */
8629 rtx op[3];
8630
8631 /* The set dest of the call, ie. r3, which is also the first arg reg. */
8632 op[0] = operands[0];
8633 /* The TLS symbol from global_tlsarg stashed as CALL operand 2. */
8634 op[1] = XVECEXP (operands[2], 0, 0);
8635 if (XINT (operands[2], 1) == UNSPEC_TLSGD)
8636 {
8637 /* The GOT register. */
8638 op[2] = XVECEXP (operands[2], 0, 1);
8639 if (TARGET_CMODEL != CMODEL_SMALL)
8640 output_asm_insn ("addis %0,%2,%1@got@tlsgd@ha\n\t"
8641 "addi %0,%0,%1@got@tlsgd@l", op);
8642 else
8643 output_asm_insn ("addi %0,%2,%1@got@tlsgd", op);
8644 }
8645 else if (XINT (operands[2], 1) == UNSPEC_TLSLD)
8646 {
8647 if (TARGET_CMODEL != CMODEL_SMALL)
8648 output_asm_insn ("addis %0,%1,%&@got@tlsld@ha\n\t"
8649 "addi %0,%0,%&@got@tlsld@l", op);
8650 else
8651 output_asm_insn ("addi %0,%1,%&@got@tlsld", op);
8652 }
8653 else
8654 gcc_unreachable ();
8655 }
8656
8657 /* Passes the tls arg value for global dynamic and local dynamic
8658 emit_library_call_value in rs6000_legitimize_tls_address to
8659 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8660 marker relocs put on __tls_get_addr calls. */
8661 static rtx global_tlsarg;
8662
8663 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8664 this (thread-local) address. */
8665
8666 static rtx
8667 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8668 {
8669 rtx dest, insn;
8670
8671 if (TARGET_XCOFF)
8672 return rs6000_legitimize_tls_address_aix (addr, model);
8673
8674 dest = gen_reg_rtx (Pmode);
8675 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8676 {
8677 rtx tlsreg;
8678
8679 if (TARGET_64BIT)
8680 {
8681 tlsreg = gen_rtx_REG (Pmode, 13);
8682 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8683 }
8684 else
8685 {
8686 tlsreg = gen_rtx_REG (Pmode, 2);
8687 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8688 }
8689 emit_insn (insn);
8690 }
8691 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8692 {
8693 rtx tlsreg, tmp;
8694
8695 tmp = gen_reg_rtx (Pmode);
8696 if (TARGET_64BIT)
8697 {
8698 tlsreg = gen_rtx_REG (Pmode, 13);
8699 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8700 }
8701 else
8702 {
8703 tlsreg = gen_rtx_REG (Pmode, 2);
8704 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8705 }
8706 emit_insn (insn);
8707 if (TARGET_64BIT)
8708 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8709 else
8710 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8711 emit_insn (insn);
8712 }
8713 else
8714 {
8715 rtx got, tga, tmp1, tmp2;
8716
8717 /* We currently use relocations like @got@tlsgd for tls, which
8718 means the linker will handle allocation of tls entries, placing
8719 them in the .got section. So use a pointer to the .got section,
8720 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8721 or to secondary GOT sections used by 32-bit -fPIC. */
8722 if (TARGET_64BIT)
8723 got = gen_rtx_REG (Pmode, 2);
8724 else
8725 {
8726 if (flag_pic == 1)
8727 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8728 else
8729 {
8730 rtx gsym = rs6000_got_sym ();
8731 got = gen_reg_rtx (Pmode);
8732 if (flag_pic == 0)
8733 rs6000_emit_move (got, gsym, Pmode);
8734 else
8735 {
8736 rtx mem, lab;
8737
8738 tmp1 = gen_reg_rtx (Pmode);
8739 tmp2 = gen_reg_rtx (Pmode);
8740 mem = gen_const_mem (Pmode, tmp1);
8741 lab = gen_label_rtx ();
8742 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8743 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8744 if (TARGET_LINK_STACK)
8745 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8746 emit_move_insn (tmp2, mem);
8747 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8748 set_unique_reg_note (last, REG_EQUAL, gsym);
8749 }
8750 }
8751 }
8752
8753 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8754 {
8755 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8756 UNSPEC_TLSGD);
8757 tga = rs6000_tls_get_addr ();
8758 global_tlsarg = arg;
8759 if (TARGET_TLS_MARKERS)
8760 {
8761 rtx argreg = gen_rtx_REG (Pmode, 3);
8762 emit_insn (gen_rtx_SET (argreg, arg));
8763 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8764 argreg, Pmode);
8765 }
8766 else
8767 emit_library_call_value (tga, dest, LCT_CONST, Pmode);
8768 global_tlsarg = NULL_RTX;
8769 }
8770 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8771 {
8772 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got), UNSPEC_TLSLD);
8773 tga = rs6000_tls_get_addr ();
8774 tmp1 = gen_reg_rtx (Pmode);
8775 global_tlsarg = arg;
8776 if (TARGET_TLS_MARKERS)
8777 {
8778 rtx argreg = gen_rtx_REG (Pmode, 3);
8779 emit_insn (gen_rtx_SET (argreg, arg));
8780 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8781 argreg, Pmode);
8782 }
8783 else
8784 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode);
8785 global_tlsarg = NULL_RTX;
8786
8787 if (rs6000_tls_size == 16)
8788 {
8789 if (TARGET_64BIT)
8790 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8791 else
8792 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8793 }
8794 else if (rs6000_tls_size == 32)
8795 {
8796 tmp2 = gen_reg_rtx (Pmode);
8797 if (TARGET_64BIT)
8798 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8799 else
8800 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8801 emit_insn (insn);
8802 if (TARGET_64BIT)
8803 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8804 else
8805 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8806 }
8807 else
8808 {
8809 tmp2 = gen_reg_rtx (Pmode);
8810 if (TARGET_64BIT)
8811 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8812 else
8813 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8814 emit_insn (insn);
8815 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8816 }
8817 emit_insn (insn);
8818 }
8819 else
8820 {
8821 /* IE, or 64-bit offset LE. */
8822 tmp2 = gen_reg_rtx (Pmode);
8823 if (TARGET_64BIT)
8824 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8825 else
8826 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8827 emit_insn (insn);
8828 if (TARGET_64BIT)
8829 insn = gen_tls_tls_64 (dest, tmp2, addr);
8830 else
8831 insn = gen_tls_tls_32 (dest, tmp2, addr);
8832 emit_insn (insn);
8833 }
8834 }
8835
8836 return dest;
8837 }
8838
8839 /* Only create the global variable for the stack protect guard if we are using
8840 the global flavor of that guard. */
8841 static tree
8842 rs6000_init_stack_protect_guard (void)
8843 {
8844 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8845 return default_stack_protect_guard ();
8846
8847 return NULL_TREE;
8848 }
8849
8850 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8851
8852 static bool
8853 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8854 {
8855 if (GET_CODE (x) == HIGH
8856 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8857 return true;
8858
8859 /* A TLS symbol in the TOC cannot contain a sum. */
8860 if (GET_CODE (x) == CONST
8861 && GET_CODE (XEXP (x, 0)) == PLUS
8862 && SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
8863 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8864 return true;
8865
8866 /* Do not place an ELF TLS symbol in the constant pool. */
8867 return TARGET_ELF && tls_referenced_p (x);
8868 }
8869
8870 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8871 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8872 can be addressed relative to the toc pointer. */
8873
8874 static bool
8875 use_toc_relative_ref (rtx sym, machine_mode mode)
8876 {
8877 return ((constant_pool_expr_p (sym)
8878 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8879 get_pool_mode (sym)))
8880 || (TARGET_CMODEL == CMODEL_MEDIUM
8881 && SYMBOL_REF_LOCAL_P (sym)
8882 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8883 }
8884
8885 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
8886 replace the input X, or the original X if no replacement is called for.
8887 The output parameter *WIN is 1 if the calling macro should goto WIN,
8888 0 if it should not.
8889
8890 For RS/6000, we wish to handle large displacements off a base
8891 register by splitting the addend across an addiu/addis and the mem insn.
8892 This cuts number of extra insns needed from 3 to 1.
8893
8894 On Darwin, we use this to generate code for floating point constants.
8895 A movsf_low is generated so we wind up with 2 instructions rather than 3.
8896 The Darwin code is inside #if TARGET_MACHO because only then are the
8897 machopic_* functions defined. */
8898 static rtx
8899 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
8900 int opnum, int type,
8901 int ind_levels ATTRIBUTE_UNUSED, int *win)
8902 {
8903 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8904 bool quad_offset_p = mode_supports_dq_form (mode);
8905
8906 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
8907 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
8908 if (reg_offset_p
8909 && opnum == 1
8910 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
8911 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
8912 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
8913 && TARGET_P9_VECTOR)
8914 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
8915 && TARGET_P9_VECTOR)))
8916 reg_offset_p = false;
8917
8918 /* We must recognize output that we have already generated ourselves. */
8919 if (GET_CODE (x) == PLUS
8920 && GET_CODE (XEXP (x, 0)) == PLUS
8921 && REG_P (XEXP (XEXP (x, 0), 0))
8922 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
8923 && CONST_INT_P (XEXP (x, 1)))
8924 {
8925 if (TARGET_DEBUG_ADDR)
8926 {
8927 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
8928 debug_rtx (x);
8929 }
8930 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8931 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
8932 opnum, (enum reload_type) type);
8933 *win = 1;
8934 return x;
8935 }
8936
8937 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
8938 if (GET_CODE (x) == LO_SUM
8939 && GET_CODE (XEXP (x, 0)) == HIGH)
8940 {
8941 if (TARGET_DEBUG_ADDR)
8942 {
8943 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
8944 debug_rtx (x);
8945 }
8946 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8947 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8948 opnum, (enum reload_type) type);
8949 *win = 1;
8950 return x;
8951 }
8952
8953 #if TARGET_MACHO
8954 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
8955 && GET_CODE (x) == LO_SUM
8956 && GET_CODE (XEXP (x, 0)) == PLUS
8957 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
8958 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
8959 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
8960 && machopic_operand_p (XEXP (x, 1)))
8961 {
8962 /* Result of previous invocation of this function on Darwin
8963 floating point constant. */
8964 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8965 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8966 opnum, (enum reload_type) type);
8967 *win = 1;
8968 return x;
8969 }
8970 #endif
8971
8972 if (TARGET_CMODEL != CMODEL_SMALL
8973 && reg_offset_p
8974 && !quad_offset_p
8975 && small_toc_ref (x, VOIDmode))
8976 {
8977 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
8978 x = gen_rtx_LO_SUM (Pmode, hi, x);
8979 if (TARGET_DEBUG_ADDR)
8980 {
8981 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
8982 debug_rtx (x);
8983 }
8984 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
8985 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
8986 opnum, (enum reload_type) type);
8987 *win = 1;
8988 return x;
8989 }
8990
8991 if (GET_CODE (x) == PLUS
8992 && REG_P (XEXP (x, 0))
8993 && HARD_REGISTER_P (XEXP (x, 0))
8994 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
8995 && CONST_INT_P (XEXP (x, 1))
8996 && reg_offset_p
8997 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
8998 {
8999 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9000 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9001 HOST_WIDE_INT high
9002 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9003
9004 /* Check for 32-bit overflow or quad addresses with one of the
9005 four least significant bits set. */
9006 if (high + low != val
9007 || (quad_offset_p && (low & 0xf)))
9008 {
9009 *win = 0;
9010 return x;
9011 }
9012
9013 /* Reload the high part into a base reg; leave the low part
9014 in the mem directly. */
9015
9016 x = gen_rtx_PLUS (GET_MODE (x),
9017 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9018 GEN_INT (high)),
9019 GEN_INT (low));
9020
9021 if (TARGET_DEBUG_ADDR)
9022 {
9023 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9024 debug_rtx (x);
9025 }
9026 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9027 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9028 opnum, (enum reload_type) type);
9029 *win = 1;
9030 return x;
9031 }
9032
9033 if (SYMBOL_REF_P (x)
9034 && reg_offset_p
9035 && !quad_offset_p
9036 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9037 #if TARGET_MACHO
9038 && DEFAULT_ABI == ABI_DARWIN
9039 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9040 && machopic_symbol_defined_p (x)
9041 #else
9042 && DEFAULT_ABI == ABI_V4
9043 && !flag_pic
9044 #endif
9045 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9046 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9047 without fprs.
9048 ??? Assume floating point reg based on mode? This assumption is
9049 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9050 where reload ends up doing a DFmode load of a constant from
9051 mem using two gprs. Unfortunately, at this point reload
9052 hasn't yet selected regs so poking around in reload data
9053 won't help and even if we could figure out the regs reliably,
9054 we'd still want to allow this transformation when the mem is
9055 naturally aligned. Since we say the address is good here, we
9056 can't disable offsets from LO_SUMs in mem_operand_gpr.
9057 FIXME: Allow offset from lo_sum for other modes too, when
9058 mem is sufficiently aligned.
9059
9060 Also disallow this if the type can go in VMX/Altivec registers, since
9061 those registers do not have d-form (reg+offset) address modes. */
9062 && !reg_addr[mode].scalar_in_vmx_p
9063 && mode != TFmode
9064 && mode != TDmode
9065 && mode != IFmode
9066 && mode != KFmode
9067 && (mode != TImode || !TARGET_VSX)
9068 && mode != PTImode
9069 && (mode != DImode || TARGET_POWERPC64)
9070 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9071 || TARGET_HARD_FLOAT))
9072 {
9073 #if TARGET_MACHO
9074 if (flag_pic)
9075 {
9076 rtx offset = machopic_gen_offset (x);
9077 x = gen_rtx_LO_SUM (GET_MODE (x),
9078 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9079 gen_rtx_HIGH (Pmode, offset)), offset);
9080 }
9081 else
9082 #endif
9083 x = gen_rtx_LO_SUM (GET_MODE (x),
9084 gen_rtx_HIGH (Pmode, x), x);
9085
9086 if (TARGET_DEBUG_ADDR)
9087 {
9088 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9089 debug_rtx (x);
9090 }
9091 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9092 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9093 opnum, (enum reload_type) type);
9094 *win = 1;
9095 return x;
9096 }
9097
9098 /* Reload an offset address wrapped by an AND that represents the
9099 masking of the lower bits. Strip the outer AND and let reload
9100 convert the offset address into an indirect address. For VSX,
9101 force reload to create the address with an AND in a separate
9102 register, because we can't guarantee an altivec register will
9103 be used. */
9104 if (VECTOR_MEM_ALTIVEC_P (mode)
9105 && GET_CODE (x) == AND
9106 && GET_CODE (XEXP (x, 0)) == PLUS
9107 && REG_P (XEXP (XEXP (x, 0), 0))
9108 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
9109 && CONST_INT_P (XEXP (x, 1))
9110 && INTVAL (XEXP (x, 1)) == -16)
9111 {
9112 x = XEXP (x, 0);
9113 *win = 1;
9114 return x;
9115 }
9116
9117 if (TARGET_TOC
9118 && reg_offset_p
9119 && !quad_offset_p
9120 && SYMBOL_REF_P (x)
9121 && use_toc_relative_ref (x, mode))
9122 {
9123 x = create_TOC_reference (x, NULL_RTX);
9124 if (TARGET_CMODEL != CMODEL_SMALL)
9125 {
9126 if (TARGET_DEBUG_ADDR)
9127 {
9128 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9129 debug_rtx (x);
9130 }
9131 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9132 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9133 opnum, (enum reload_type) type);
9134 }
9135 *win = 1;
9136 return x;
9137 }
9138 *win = 0;
9139 return x;
9140 }
9141
9142 /* Debug version of rs6000_legitimize_reload_address. */
9143 static rtx
9144 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9145 int opnum, int type,
9146 int ind_levels, int *win)
9147 {
9148 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9149 ind_levels, win);
9150 fprintf (stderr,
9151 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9152 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9153 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9154 debug_rtx (x);
9155
9156 if (x == ret)
9157 fprintf (stderr, "Same address returned\n");
9158 else if (!ret)
9159 fprintf (stderr, "NULL returned\n");
9160 else
9161 {
9162 fprintf (stderr, "New address:\n");
9163 debug_rtx (ret);
9164 }
9165
9166 return ret;
9167 }
9168
9169 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9170 that is a valid memory address for an instruction.
9171 The MODE argument is the machine mode for the MEM expression
9172 that wants to use this address.
9173
9174 On the RS/6000, there are four valid address: a SYMBOL_REF that
9175 refers to a constant pool entry of an address (or the sum of it
9176 plus a constant), a short (16-bit signed) constant plus a register,
9177 the sum of two registers, or a register indirect, possibly with an
9178 auto-increment. For DFmode, DDmode and DImode with a constant plus
9179 register, we must ensure that both words are addressable or PowerPC64
9180 with offset word aligned.
9181
9182 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9183 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9184 because adjacent memory cells are accessed by adding word-sized offsets
9185 during assembly output. */
9186 static bool
9187 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9188 {
9189 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9190 bool quad_offset_p = mode_supports_dq_form (mode);
9191
9192 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9193 if (VECTOR_MEM_ALTIVEC_P (mode)
9194 && GET_CODE (x) == AND
9195 && CONST_INT_P (XEXP (x, 1))
9196 && INTVAL (XEXP (x, 1)) == -16)
9197 x = XEXP (x, 0);
9198
9199 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9200 return 0;
9201 if (legitimate_indirect_address_p (x, reg_ok_strict))
9202 return 1;
9203 if (TARGET_UPDATE
9204 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9205 && mode_supports_pre_incdec_p (mode)
9206 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9207 return 1;
9208 /* Handle restricted vector d-form offsets in ISA 3.0. */
9209 if (quad_offset_p)
9210 {
9211 if (quad_address_p (x, mode, reg_ok_strict))
9212 return 1;
9213 }
9214 else if (virtual_stack_registers_memory_p (x))
9215 return 1;
9216
9217 else if (reg_offset_p)
9218 {
9219 if (legitimate_small_data_p (mode, x))
9220 return 1;
9221 if (legitimate_constant_pool_address_p (x, mode,
9222 reg_ok_strict || lra_in_progress))
9223 return 1;
9224 }
9225
9226 /* For TImode, if we have TImode in VSX registers, only allow register
9227 indirect addresses. This will allow the values to go in either GPRs
9228 or VSX registers without reloading. The vector types would tend to
9229 go into VSX registers, so we allow REG+REG, while TImode seems
9230 somewhat split, in that some uses are GPR based, and some VSX based. */
9231 /* FIXME: We could loosen this by changing the following to
9232 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9233 but currently we cannot allow REG+REG addressing for TImode. See
9234 PR72827 for complete details on how this ends up hoodwinking DSE. */
9235 if (mode == TImode && TARGET_VSX)
9236 return 0;
9237 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9238 if (! reg_ok_strict
9239 && reg_offset_p
9240 && GET_CODE (x) == PLUS
9241 && REG_P (XEXP (x, 0))
9242 && (XEXP (x, 0) == virtual_stack_vars_rtx
9243 || XEXP (x, 0) == arg_pointer_rtx)
9244 && CONST_INT_P (XEXP (x, 1)))
9245 return 1;
9246 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9247 return 1;
9248 if (!FLOAT128_2REG_P (mode)
9249 && (TARGET_HARD_FLOAT
9250 || TARGET_POWERPC64
9251 || (mode != DFmode && mode != DDmode))
9252 && (TARGET_POWERPC64 || mode != DImode)
9253 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9254 && mode != PTImode
9255 && !avoiding_indexed_address_p (mode)
9256 && legitimate_indexed_address_p (x, reg_ok_strict))
9257 return 1;
9258 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9259 && mode_supports_pre_modify_p (mode)
9260 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9261 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9262 reg_ok_strict, false)
9263 || (!avoiding_indexed_address_p (mode)
9264 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9265 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9266 return 1;
9267 if (reg_offset_p && !quad_offset_p
9268 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9269 return 1;
9270 return 0;
9271 }
9272
9273 /* Debug version of rs6000_legitimate_address_p. */
9274 static bool
9275 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9276 bool reg_ok_strict)
9277 {
9278 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9279 fprintf (stderr,
9280 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9281 "strict = %d, reload = %s, code = %s\n",
9282 ret ? "true" : "false",
9283 GET_MODE_NAME (mode),
9284 reg_ok_strict,
9285 (reload_completed ? "after" : "before"),
9286 GET_RTX_NAME (GET_CODE (x)));
9287 debug_rtx (x);
9288
9289 return ret;
9290 }
9291
9292 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9293
9294 static bool
9295 rs6000_mode_dependent_address_p (const_rtx addr,
9296 addr_space_t as ATTRIBUTE_UNUSED)
9297 {
9298 return rs6000_mode_dependent_address_ptr (addr);
9299 }
9300
9301 /* Go to LABEL if ADDR (a legitimate address expression)
9302 has an effect that depends on the machine mode it is used for.
9303
9304 On the RS/6000 this is true of all integral offsets (since AltiVec
9305 and VSX modes don't allow them) or is a pre-increment or decrement.
9306
9307 ??? Except that due to conceptual problems in offsettable_address_p
9308 we can't really report the problems of integral offsets. So leave
9309 this assuming that the adjustable offset must be valid for the
9310 sub-words of a TFmode operand, which is what we had before. */
9311
9312 static bool
9313 rs6000_mode_dependent_address (const_rtx addr)
9314 {
9315 switch (GET_CODE (addr))
9316 {
9317 case PLUS:
9318 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9319 is considered a legitimate address before reload, so there
9320 are no offset restrictions in that case. Note that this
9321 condition is safe in strict mode because any address involving
9322 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9323 been rejected as illegitimate. */
9324 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9325 && XEXP (addr, 0) != arg_pointer_rtx
9326 && CONST_INT_P (XEXP (addr, 1)))
9327 {
9328 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9329 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9330 }
9331 break;
9332
9333 case LO_SUM:
9334 /* Anything in the constant pool is sufficiently aligned that
9335 all bytes have the same high part address. */
9336 return !legitimate_constant_pool_address_p (addr, QImode, false);
9337
9338 /* Auto-increment cases are now treated generically in recog.c. */
9339 case PRE_MODIFY:
9340 return TARGET_UPDATE;
9341
9342 /* AND is only allowed in Altivec loads. */
9343 case AND:
9344 return true;
9345
9346 default:
9347 break;
9348 }
9349
9350 return false;
9351 }
9352
9353 /* Debug version of rs6000_mode_dependent_address. */
9354 static bool
9355 rs6000_debug_mode_dependent_address (const_rtx addr)
9356 {
9357 bool ret = rs6000_mode_dependent_address (addr);
9358
9359 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9360 ret ? "true" : "false");
9361 debug_rtx (addr);
9362
9363 return ret;
9364 }
9365
9366 /* Implement FIND_BASE_TERM. */
9367
9368 rtx
9369 rs6000_find_base_term (rtx op)
9370 {
9371 rtx base;
9372
9373 base = op;
9374 if (GET_CODE (base) == CONST)
9375 base = XEXP (base, 0);
9376 if (GET_CODE (base) == PLUS)
9377 base = XEXP (base, 0);
9378 if (GET_CODE (base) == UNSPEC)
9379 switch (XINT (base, 1))
9380 {
9381 case UNSPEC_TOCREL:
9382 case UNSPEC_MACHOPIC_OFFSET:
9383 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9384 for aliasing purposes. */
9385 return XVECEXP (base, 0, 0);
9386 }
9387
9388 return op;
9389 }
9390
9391 /* More elaborate version of recog's offsettable_memref_p predicate
9392 that works around the ??? note of rs6000_mode_dependent_address.
9393 In particular it accepts
9394
9395 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9396
9397 in 32-bit mode, that the recog predicate rejects. */
9398
9399 static bool
9400 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9401 {
9402 bool worst_case;
9403
9404 if (!MEM_P (op))
9405 return false;
9406
9407 /* First mimic offsettable_memref_p. */
9408 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9409 return true;
9410
9411 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9412 the latter predicate knows nothing about the mode of the memory
9413 reference and, therefore, assumes that it is the largest supported
9414 mode (TFmode). As a consequence, legitimate offsettable memory
9415 references are rejected. rs6000_legitimate_offset_address_p contains
9416 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9417 at least with a little bit of help here given that we know the
9418 actual registers used. */
9419 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9420 || GET_MODE_SIZE (reg_mode) == 4);
9421 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9422 strict, worst_case);
9423 }
9424
9425 /* Determine the reassociation width to be used in reassociate_bb.
9426 This takes into account how many parallel operations we
9427 can actually do of a given type, and also the latency.
9428 P8:
9429 int add/sub 6/cycle
9430 mul 2/cycle
9431 vect add/sub/mul 2/cycle
9432 fp add/sub/mul 2/cycle
9433 dfp 1/cycle
9434 */
9435
9436 static int
9437 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9438 machine_mode mode)
9439 {
9440 switch (rs6000_tune)
9441 {
9442 case PROCESSOR_POWER8:
9443 case PROCESSOR_POWER9:
9444 if (DECIMAL_FLOAT_MODE_P (mode))
9445 return 1;
9446 if (VECTOR_MODE_P (mode))
9447 return 4;
9448 if (INTEGRAL_MODE_P (mode))
9449 return 1;
9450 if (FLOAT_MODE_P (mode))
9451 return 4;
9452 break;
9453 default:
9454 break;
9455 }
9456 return 1;
9457 }
9458
9459 /* Change register usage conditional on target flags. */
9460 static void
9461 rs6000_conditional_register_usage (void)
9462 {
9463 int i;
9464
9465 if (TARGET_DEBUG_TARGET)
9466 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9467
9468 /* Set MQ register fixed (already call_used) so that it will not be
9469 allocated. */
9470 fixed_regs[64] = 1;
9471
9472 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9473 if (TARGET_64BIT)
9474 fixed_regs[13] = call_used_regs[13]
9475 = call_really_used_regs[13] = 1;
9476
9477 /* Conditionally disable FPRs. */
9478 if (TARGET_SOFT_FLOAT)
9479 for (i = 32; i < 64; i++)
9480 fixed_regs[i] = call_used_regs[i]
9481 = call_really_used_regs[i] = 1;
9482
9483 /* The TOC register is not killed across calls in a way that is
9484 visible to the compiler. */
9485 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9486 call_really_used_regs[2] = 0;
9487
9488 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9489 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9490
9491 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9492 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9493 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9494 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9495
9496 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9497 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9498 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9499 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9500
9501 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9502 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9503 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9504
9505 if (!TARGET_ALTIVEC && !TARGET_VSX)
9506 {
9507 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9508 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9509 call_really_used_regs[VRSAVE_REGNO] = 1;
9510 }
9511
9512 if (TARGET_ALTIVEC || TARGET_VSX)
9513 global_regs[VSCR_REGNO] = 1;
9514
9515 if (TARGET_ALTIVEC_ABI)
9516 {
9517 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9518 call_used_regs[i] = call_really_used_regs[i] = 1;
9519
9520 /* AIX reserves VR20:31 in non-extended ABI mode. */
9521 if (TARGET_XCOFF)
9522 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9523 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9524 }
9525 }
9526
9527 \f
9528 /* Output insns to set DEST equal to the constant SOURCE as a series of
9529 lis, ori and shl instructions and return TRUE. */
9530
9531 bool
9532 rs6000_emit_set_const (rtx dest, rtx source)
9533 {
9534 machine_mode mode = GET_MODE (dest);
9535 rtx temp, set;
9536 rtx_insn *insn;
9537 HOST_WIDE_INT c;
9538
9539 gcc_checking_assert (CONST_INT_P (source));
9540 c = INTVAL (source);
9541 switch (mode)
9542 {
9543 case E_QImode:
9544 case E_HImode:
9545 emit_insn (gen_rtx_SET (dest, source));
9546 return true;
9547
9548 case E_SImode:
9549 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9550
9551 emit_insn (gen_rtx_SET (copy_rtx (temp),
9552 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9553 emit_insn (gen_rtx_SET (dest,
9554 gen_rtx_IOR (SImode, copy_rtx (temp),
9555 GEN_INT (c & 0xffff))));
9556 break;
9557
9558 case E_DImode:
9559 if (!TARGET_POWERPC64)
9560 {
9561 rtx hi, lo;
9562
9563 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9564 DImode);
9565 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9566 DImode);
9567 emit_move_insn (hi, GEN_INT (c >> 32));
9568 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9569 emit_move_insn (lo, GEN_INT (c));
9570 }
9571 else
9572 rs6000_emit_set_long_const (dest, c);
9573 break;
9574
9575 default:
9576 gcc_unreachable ();
9577 }
9578
9579 insn = get_last_insn ();
9580 set = single_set (insn);
9581 if (! CONSTANT_P (SET_SRC (set)))
9582 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9583
9584 return true;
9585 }
9586
9587 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9588 Output insns to set DEST equal to the constant C as a series of
9589 lis, ori and shl instructions. */
9590
9591 static void
9592 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9593 {
9594 rtx temp;
9595 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9596
9597 ud1 = c & 0xffff;
9598 c = c >> 16;
9599 ud2 = c & 0xffff;
9600 c = c >> 16;
9601 ud3 = c & 0xffff;
9602 c = c >> 16;
9603 ud4 = c & 0xffff;
9604
9605 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9606 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9607 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9608
9609 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9610 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9611 {
9612 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9613
9614 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9615 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9616 if (ud1 != 0)
9617 emit_move_insn (dest,
9618 gen_rtx_IOR (DImode, copy_rtx (temp),
9619 GEN_INT (ud1)));
9620 }
9621 else if (ud3 == 0 && ud4 == 0)
9622 {
9623 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9624
9625 gcc_assert (ud2 & 0x8000);
9626 emit_move_insn (copy_rtx (temp),
9627 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9628 if (ud1 != 0)
9629 emit_move_insn (copy_rtx (temp),
9630 gen_rtx_IOR (DImode, copy_rtx (temp),
9631 GEN_INT (ud1)));
9632 emit_move_insn (dest,
9633 gen_rtx_ZERO_EXTEND (DImode,
9634 gen_lowpart (SImode,
9635 copy_rtx (temp))));
9636 }
9637 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9638 || (ud4 == 0 && ! (ud3 & 0x8000)))
9639 {
9640 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9641
9642 emit_move_insn (copy_rtx (temp),
9643 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9644 if (ud2 != 0)
9645 emit_move_insn (copy_rtx (temp),
9646 gen_rtx_IOR (DImode, copy_rtx (temp),
9647 GEN_INT (ud2)));
9648 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9649 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9650 GEN_INT (16)));
9651 if (ud1 != 0)
9652 emit_move_insn (dest,
9653 gen_rtx_IOR (DImode, copy_rtx (temp),
9654 GEN_INT (ud1)));
9655 }
9656 else
9657 {
9658 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9659
9660 emit_move_insn (copy_rtx (temp),
9661 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9662 if (ud3 != 0)
9663 emit_move_insn (copy_rtx (temp),
9664 gen_rtx_IOR (DImode, copy_rtx (temp),
9665 GEN_INT (ud3)));
9666
9667 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9668 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9669 GEN_INT (32)));
9670 if (ud2 != 0)
9671 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9672 gen_rtx_IOR (DImode, copy_rtx (temp),
9673 GEN_INT (ud2 << 16)));
9674 if (ud1 != 0)
9675 emit_move_insn (dest,
9676 gen_rtx_IOR (DImode, copy_rtx (temp),
9677 GEN_INT (ud1)));
9678 }
9679 }
9680
9681 /* Helper for the following. Get rid of [r+r] memory refs
9682 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9683
9684 static void
9685 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9686 {
9687 if (MEM_P (operands[0])
9688 && !REG_P (XEXP (operands[0], 0))
9689 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9690 GET_MODE (operands[0]), false))
9691 operands[0]
9692 = replace_equiv_address (operands[0],
9693 copy_addr_to_reg (XEXP (operands[0], 0)));
9694
9695 if (MEM_P (operands[1])
9696 && !REG_P (XEXP (operands[1], 0))
9697 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9698 GET_MODE (operands[1]), false))
9699 operands[1]
9700 = replace_equiv_address (operands[1],
9701 copy_addr_to_reg (XEXP (operands[1], 0)));
9702 }
9703
9704 /* Generate a vector of constants to permute MODE for a little-endian
9705 storage operation by swapping the two halves of a vector. */
9706 static rtvec
9707 rs6000_const_vec (machine_mode mode)
9708 {
9709 int i, subparts;
9710 rtvec v;
9711
9712 switch (mode)
9713 {
9714 case E_V1TImode:
9715 subparts = 1;
9716 break;
9717 case E_V2DFmode:
9718 case E_V2DImode:
9719 subparts = 2;
9720 break;
9721 case E_V4SFmode:
9722 case E_V4SImode:
9723 subparts = 4;
9724 break;
9725 case E_V8HImode:
9726 subparts = 8;
9727 break;
9728 case E_V16QImode:
9729 subparts = 16;
9730 break;
9731 default:
9732 gcc_unreachable();
9733 }
9734
9735 v = rtvec_alloc (subparts);
9736
9737 for (i = 0; i < subparts / 2; ++i)
9738 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9739 for (i = subparts / 2; i < subparts; ++i)
9740 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9741
9742 return v;
9743 }
9744
9745 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9746 store operation. */
9747 void
9748 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9749 {
9750 /* Scalar permutations are easier to express in integer modes rather than
9751 floating-point modes, so cast them here. We use V1TImode instead
9752 of TImode to ensure that the values don't go through GPRs. */
9753 if (FLOAT128_VECTOR_P (mode))
9754 {
9755 dest = gen_lowpart (V1TImode, dest);
9756 source = gen_lowpart (V1TImode, source);
9757 mode = V1TImode;
9758 }
9759
9760 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9761 scalar. */
9762 if (mode == TImode || mode == V1TImode)
9763 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9764 GEN_INT (64))));
9765 else
9766 {
9767 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9768 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9769 }
9770 }
9771
9772 /* Emit a little-endian load from vector memory location SOURCE to VSX
9773 register DEST in mode MODE. The load is done with two permuting
9774 insn's that represent an lxvd2x and xxpermdi. */
9775 void
9776 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9777 {
9778 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9779 V1TImode). */
9780 if (mode == TImode || mode == V1TImode)
9781 {
9782 mode = V2DImode;
9783 dest = gen_lowpart (V2DImode, dest);
9784 source = adjust_address (source, V2DImode, 0);
9785 }
9786
9787 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9788 rs6000_emit_le_vsx_permute (tmp, source, mode);
9789 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9790 }
9791
9792 /* Emit a little-endian store to vector memory location DEST from VSX
9793 register SOURCE in mode MODE. The store is done with two permuting
9794 insn's that represent an xxpermdi and an stxvd2x. */
9795 void
9796 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9797 {
9798 /* This should never be called during or after LRA, because it does
9799 not re-permute the source register. It is intended only for use
9800 during expand. */
9801 gcc_assert (!lra_in_progress && !reload_completed);
9802
9803 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9804 V1TImode). */
9805 if (mode == TImode || mode == V1TImode)
9806 {
9807 mode = V2DImode;
9808 dest = adjust_address (dest, V2DImode, 0);
9809 source = gen_lowpart (V2DImode, source);
9810 }
9811
9812 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9813 rs6000_emit_le_vsx_permute (tmp, source, mode);
9814 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9815 }
9816
9817 /* Emit a sequence representing a little-endian VSX load or store,
9818 moving data from SOURCE to DEST in mode MODE. This is done
9819 separately from rs6000_emit_move to ensure it is called only
9820 during expand. LE VSX loads and stores introduced later are
9821 handled with a split. The expand-time RTL generation allows
9822 us to optimize away redundant pairs of register-permutes. */
9823 void
9824 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9825 {
9826 gcc_assert (!BYTES_BIG_ENDIAN
9827 && VECTOR_MEM_VSX_P (mode)
9828 && !TARGET_P9_VECTOR
9829 && !gpr_or_gpr_p (dest, source)
9830 && (MEM_P (source) ^ MEM_P (dest)));
9831
9832 if (MEM_P (source))
9833 {
9834 gcc_assert (REG_P (dest) || SUBREG_P (dest));
9835 rs6000_emit_le_vsx_load (dest, source, mode);
9836 }
9837 else
9838 {
9839 if (!REG_P (source))
9840 source = force_reg (mode, source);
9841 rs6000_emit_le_vsx_store (dest, source, mode);
9842 }
9843 }
9844
9845 /* Return whether a SFmode or SImode move can be done without converting one
9846 mode to another. This arrises when we have:
9847
9848 (SUBREG:SF (REG:SI ...))
9849 (SUBREG:SI (REG:SF ...))
9850
9851 and one of the values is in a floating point/vector register, where SFmode
9852 scalars are stored in DFmode format. */
9853
9854 bool
9855 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9856 {
9857 if (TARGET_ALLOW_SF_SUBREG)
9858 return true;
9859
9860 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9861 return true;
9862
9863 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9864 return true;
9865
9866 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9867 if (SUBREG_P (dest))
9868 {
9869 rtx dest_subreg = SUBREG_REG (dest);
9870 rtx src_subreg = SUBREG_REG (src);
9871 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9872 }
9873
9874 return false;
9875 }
9876
9877
9878 /* Helper function to change moves with:
9879
9880 (SUBREG:SF (REG:SI)) and
9881 (SUBREG:SI (REG:SF))
9882
9883 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9884 values are stored as DFmode values in the VSX registers. We need to convert
9885 the bits before we can use a direct move or operate on the bits in the
9886 vector register as an integer type.
9887
9888 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9889
9890 static bool
9891 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9892 {
9893 if (TARGET_DIRECT_MOVE_64BIT && !reload_completed
9894 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9895 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9896 {
9897 rtx inner_source = SUBREG_REG (source);
9898 machine_mode inner_mode = GET_MODE (inner_source);
9899
9900 if (mode == SImode && inner_mode == SFmode)
9901 {
9902 emit_insn (gen_movsi_from_sf (dest, inner_source));
9903 return true;
9904 }
9905
9906 if (mode == SFmode && inner_mode == SImode)
9907 {
9908 emit_insn (gen_movsf_from_si (dest, inner_source));
9909 return true;
9910 }
9911 }
9912
9913 return false;
9914 }
9915
9916 /* Emit a move from SOURCE to DEST in mode MODE. */
9917 void
9918 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9919 {
9920 rtx operands[2];
9921 operands[0] = dest;
9922 operands[1] = source;
9923
9924 if (TARGET_DEBUG_ADDR)
9925 {
9926 fprintf (stderr,
9927 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9928 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9929 GET_MODE_NAME (mode),
9930 lra_in_progress,
9931 reload_completed,
9932 can_create_pseudo_p ());
9933 debug_rtx (dest);
9934 fprintf (stderr, "source:\n");
9935 debug_rtx (source);
9936 }
9937
9938 /* Check that we get CONST_WIDE_INT only when we should. */
9939 if (CONST_WIDE_INT_P (operands[1])
9940 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9941 gcc_unreachable ();
9942
9943 #ifdef HAVE_AS_GNU_ATTRIBUTE
9944 /* If we use a long double type, set the flags in .gnu_attribute that say
9945 what the long double type is. This is to allow the linker's warning
9946 message for the wrong long double to be useful, even if the function does
9947 not do a call (for example, doing a 128-bit add on power9 if the long
9948 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9949 used if they aren't the default long dobule type. */
9950 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9951 {
9952 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9953 rs6000_passes_float = rs6000_passes_long_double = true;
9954
9955 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9956 rs6000_passes_float = rs6000_passes_long_double = true;
9957 }
9958 #endif
9959
9960 /* See if we need to special case SImode/SFmode SUBREG moves. */
9961 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9962 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9963 return;
9964
9965 /* Check if GCC is setting up a block move that will end up using FP
9966 registers as temporaries. We must make sure this is acceptable. */
9967 if (MEM_P (operands[0])
9968 && MEM_P (operands[1])
9969 && mode == DImode
9970 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9971 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9972 && ! (rs6000_slow_unaligned_access (SImode,
9973 (MEM_ALIGN (operands[0]) > 32
9974 ? 32 : MEM_ALIGN (operands[0])))
9975 || rs6000_slow_unaligned_access (SImode,
9976 (MEM_ALIGN (operands[1]) > 32
9977 ? 32 : MEM_ALIGN (operands[1]))))
9978 && ! MEM_VOLATILE_P (operands [0])
9979 && ! MEM_VOLATILE_P (operands [1]))
9980 {
9981 emit_move_insn (adjust_address (operands[0], SImode, 0),
9982 adjust_address (operands[1], SImode, 0));
9983 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9984 adjust_address (copy_rtx (operands[1]), SImode, 4));
9985 return;
9986 }
9987
9988 if (can_create_pseudo_p () && MEM_P (operands[0])
9989 && !gpc_reg_operand (operands[1], mode))
9990 operands[1] = force_reg (mode, operands[1]);
9991
9992 /* Recognize the case where operand[1] is a reference to thread-local
9993 data and load its address to a register. */
9994 if (tls_referenced_p (operands[1]))
9995 {
9996 enum tls_model model;
9997 rtx tmp = operands[1];
9998 rtx addend = NULL;
9999
10000 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10001 {
10002 addend = XEXP (XEXP (tmp, 0), 1);
10003 tmp = XEXP (XEXP (tmp, 0), 0);
10004 }
10005
10006 gcc_assert (SYMBOL_REF_P (tmp));
10007 model = SYMBOL_REF_TLS_MODEL (tmp);
10008 gcc_assert (model != 0);
10009
10010 tmp = rs6000_legitimize_tls_address (tmp, model);
10011 if (addend)
10012 {
10013 tmp = gen_rtx_PLUS (mode, tmp, addend);
10014 tmp = force_operand (tmp, operands[0]);
10015 }
10016 operands[1] = tmp;
10017 }
10018
10019 /* 128-bit constant floating-point values on Darwin should really be loaded
10020 as two parts. However, this premature splitting is a problem when DFmode
10021 values can go into Altivec registers. */
10022 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
10023 && !reg_addr[DFmode].scalar_in_vmx_p)
10024 {
10025 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10026 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10027 DFmode);
10028 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10029 GET_MODE_SIZE (DFmode)),
10030 simplify_gen_subreg (DFmode, operands[1], mode,
10031 GET_MODE_SIZE (DFmode)),
10032 DFmode);
10033 return;
10034 }
10035
10036 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10037 p1:SD) if p1 is not of floating point class and p0 is spilled as
10038 we can have no analogous movsd_store for this. */
10039 if (lra_in_progress && mode == DDmode
10040 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
10041 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10042 && SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1]))
10043 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10044 {
10045 enum reg_class cl;
10046 int regno = REGNO (SUBREG_REG (operands[1]));
10047
10048 if (!HARD_REGISTER_NUM_P (regno))
10049 {
10050 cl = reg_preferred_class (regno);
10051 regno = reg_renumber[regno];
10052 if (regno < 0)
10053 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10054 }
10055 if (regno >= 0 && ! FP_REGNO_P (regno))
10056 {
10057 mode = SDmode;
10058 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10059 operands[1] = SUBREG_REG (operands[1]);
10060 }
10061 }
10062 if (lra_in_progress
10063 && mode == SDmode
10064 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
10065 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10066 && (REG_P (operands[1])
10067 || (SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1])))))
10068 {
10069 int regno = reg_or_subregno (operands[1]);
10070 enum reg_class cl;
10071
10072 if (!HARD_REGISTER_NUM_P (regno))
10073 {
10074 cl = reg_preferred_class (regno);
10075 gcc_assert (cl != NO_REGS);
10076 regno = reg_renumber[regno];
10077 if (regno < 0)
10078 regno = ira_class_hard_regs[cl][0];
10079 }
10080 if (FP_REGNO_P (regno))
10081 {
10082 if (GET_MODE (operands[0]) != DDmode)
10083 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10084 emit_insn (gen_movsd_store (operands[0], operands[1]));
10085 }
10086 else if (INT_REGNO_P (regno))
10087 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10088 else
10089 gcc_unreachable();
10090 return;
10091 }
10092 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10093 p:DD)) if p0 is not of floating point class and p1 is spilled as
10094 we can have no analogous movsd_load for this. */
10095 if (lra_in_progress && mode == DDmode
10096 && SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))
10097 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10098 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
10099 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10100 {
10101 enum reg_class cl;
10102 int regno = REGNO (SUBREG_REG (operands[0]));
10103
10104 if (!HARD_REGISTER_NUM_P (regno))
10105 {
10106 cl = reg_preferred_class (regno);
10107 regno = reg_renumber[regno];
10108 if (regno < 0)
10109 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10110 }
10111 if (regno >= 0 && ! FP_REGNO_P (regno))
10112 {
10113 mode = SDmode;
10114 operands[0] = SUBREG_REG (operands[0]);
10115 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10116 }
10117 }
10118 if (lra_in_progress
10119 && mode == SDmode
10120 && (REG_P (operands[0])
10121 || (SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))))
10122 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
10123 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10124 {
10125 int regno = reg_or_subregno (operands[0]);
10126 enum reg_class cl;
10127
10128 if (!HARD_REGISTER_NUM_P (regno))
10129 {
10130 cl = reg_preferred_class (regno);
10131 gcc_assert (cl != NO_REGS);
10132 regno = reg_renumber[regno];
10133 if (regno < 0)
10134 regno = ira_class_hard_regs[cl][0];
10135 }
10136 if (FP_REGNO_P (regno))
10137 {
10138 if (GET_MODE (operands[1]) != DDmode)
10139 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10140 emit_insn (gen_movsd_load (operands[0], operands[1]));
10141 }
10142 else if (INT_REGNO_P (regno))
10143 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10144 else
10145 gcc_unreachable();
10146 return;
10147 }
10148
10149 /* FIXME: In the long term, this switch statement should go away
10150 and be replaced by a sequence of tests based on things like
10151 mode == Pmode. */
10152 switch (mode)
10153 {
10154 case E_HImode:
10155 case E_QImode:
10156 if (CONSTANT_P (operands[1])
10157 && !CONST_INT_P (operands[1]))
10158 operands[1] = force_const_mem (mode, operands[1]);
10159 break;
10160
10161 case E_TFmode:
10162 case E_TDmode:
10163 case E_IFmode:
10164 case E_KFmode:
10165 if (FLOAT128_2REG_P (mode))
10166 rs6000_eliminate_indexed_memrefs (operands);
10167 /* fall through */
10168
10169 case E_DFmode:
10170 case E_DDmode:
10171 case E_SFmode:
10172 case E_SDmode:
10173 if (CONSTANT_P (operands[1])
10174 && ! easy_fp_constant (operands[1], mode))
10175 operands[1] = force_const_mem (mode, operands[1]);
10176 break;
10177
10178 case E_V16QImode:
10179 case E_V8HImode:
10180 case E_V4SFmode:
10181 case E_V4SImode:
10182 case E_V2DFmode:
10183 case E_V2DImode:
10184 case E_V1TImode:
10185 if (CONSTANT_P (operands[1])
10186 && !easy_vector_constant (operands[1], mode))
10187 operands[1] = force_const_mem (mode, operands[1]);
10188 break;
10189
10190 case E_SImode:
10191 case E_DImode:
10192 /* Use default pattern for address of ELF small data */
10193 if (TARGET_ELF
10194 && mode == Pmode
10195 && DEFAULT_ABI == ABI_V4
10196 && (SYMBOL_REF_P (operands[1])
10197 || GET_CODE (operands[1]) == CONST)
10198 && small_data_operand (operands[1], mode))
10199 {
10200 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10201 return;
10202 }
10203
10204 if (DEFAULT_ABI == ABI_V4
10205 && mode == Pmode && mode == SImode
10206 && flag_pic == 1 && got_operand (operands[1], mode))
10207 {
10208 emit_insn (gen_movsi_got (operands[0], operands[1]));
10209 return;
10210 }
10211
10212 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10213 && TARGET_NO_TOC
10214 && ! flag_pic
10215 && mode == Pmode
10216 && CONSTANT_P (operands[1])
10217 && GET_CODE (operands[1]) != HIGH
10218 && !CONST_INT_P (operands[1]))
10219 {
10220 rtx target = (!can_create_pseudo_p ()
10221 ? operands[0]
10222 : gen_reg_rtx (mode));
10223
10224 /* If this is a function address on -mcall-aixdesc,
10225 convert it to the address of the descriptor. */
10226 if (DEFAULT_ABI == ABI_AIX
10227 && SYMBOL_REF_P (operands[1])
10228 && XSTR (operands[1], 0)[0] == '.')
10229 {
10230 const char *name = XSTR (operands[1], 0);
10231 rtx new_ref;
10232 while (*name == '.')
10233 name++;
10234 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10235 CONSTANT_POOL_ADDRESS_P (new_ref)
10236 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10237 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10238 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10239 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10240 operands[1] = new_ref;
10241 }
10242
10243 if (DEFAULT_ABI == ABI_DARWIN)
10244 {
10245 #if TARGET_MACHO
10246 if (MACHO_DYNAMIC_NO_PIC_P)
10247 {
10248 /* Take care of any required data indirection. */
10249 operands[1] = rs6000_machopic_legitimize_pic_address (
10250 operands[1], mode, operands[0]);
10251 if (operands[0] != operands[1])
10252 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10253 return;
10254 }
10255 #endif
10256 emit_insn (gen_macho_high (target, operands[1]));
10257 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10258 return;
10259 }
10260
10261 emit_insn (gen_elf_high (target, operands[1]));
10262 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10263 return;
10264 }
10265
10266 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10267 and we have put it in the TOC, we just need to make a TOC-relative
10268 reference to it. */
10269 if (TARGET_TOC
10270 && SYMBOL_REF_P (operands[1])
10271 && use_toc_relative_ref (operands[1], mode))
10272 operands[1] = create_TOC_reference (operands[1], operands[0]);
10273 else if (mode == Pmode
10274 && CONSTANT_P (operands[1])
10275 && GET_CODE (operands[1]) != HIGH
10276 && ((REG_P (operands[0])
10277 && FP_REGNO_P (REGNO (operands[0])))
10278 || !CONST_INT_P (operands[1])
10279 || (num_insns_constant (operands[1], mode)
10280 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10281 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10282 && (TARGET_CMODEL == CMODEL_SMALL
10283 || can_create_pseudo_p ()
10284 || (REG_P (operands[0])
10285 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10286 {
10287
10288 #if TARGET_MACHO
10289 /* Darwin uses a special PIC legitimizer. */
10290 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10291 {
10292 operands[1] =
10293 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10294 operands[0]);
10295 if (operands[0] != operands[1])
10296 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10297 return;
10298 }
10299 #endif
10300
10301 /* If we are to limit the number of things we put in the TOC and
10302 this is a symbol plus a constant we can add in one insn,
10303 just put the symbol in the TOC and add the constant. */
10304 if (GET_CODE (operands[1]) == CONST
10305 && TARGET_NO_SUM_IN_TOC
10306 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10307 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10308 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10309 || SYMBOL_REF_P (XEXP (XEXP (operands[1], 0), 0)))
10310 && ! side_effects_p (operands[0]))
10311 {
10312 rtx sym =
10313 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10314 rtx other = XEXP (XEXP (operands[1], 0), 1);
10315
10316 sym = force_reg (mode, sym);
10317 emit_insn (gen_add3_insn (operands[0], sym, other));
10318 return;
10319 }
10320
10321 operands[1] = force_const_mem (mode, operands[1]);
10322
10323 if (TARGET_TOC
10324 && SYMBOL_REF_P (XEXP (operands[1], 0))
10325 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10326 {
10327 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10328 operands[0]);
10329 operands[1] = gen_const_mem (mode, tocref);
10330 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10331 }
10332 }
10333 break;
10334
10335 case E_TImode:
10336 if (!VECTOR_MEM_VSX_P (TImode))
10337 rs6000_eliminate_indexed_memrefs (operands);
10338 break;
10339
10340 case E_PTImode:
10341 rs6000_eliminate_indexed_memrefs (operands);
10342 break;
10343
10344 default:
10345 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10346 }
10347
10348 /* Above, we may have called force_const_mem which may have returned
10349 an invalid address. If we can, fix this up; otherwise, reload will
10350 have to deal with it. */
10351 if (MEM_P (operands[1]))
10352 operands[1] = validize_mem (operands[1]);
10353
10354 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10355 }
10356 \f
10357 /* Nonzero if we can use a floating-point register to pass this arg. */
10358 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10359 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10360 && (CUM)->fregno <= FP_ARG_MAX_REG \
10361 && TARGET_HARD_FLOAT)
10362
10363 /* Nonzero if we can use an AltiVec register to pass this arg. */
10364 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10365 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10366 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10367 && TARGET_ALTIVEC_ABI \
10368 && (NAMED))
10369
10370 /* Walk down the type tree of TYPE counting consecutive base elements.
10371 If *MODEP is VOIDmode, then set it to the first valid floating point
10372 or vector type. If a non-floating point or vector type is found, or
10373 if a floating point or vector type that doesn't match a non-VOIDmode
10374 *MODEP is found, then return -1, otherwise return the count in the
10375 sub-tree. */
10376
10377 static int
10378 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10379 {
10380 machine_mode mode;
10381 HOST_WIDE_INT size;
10382
10383 switch (TREE_CODE (type))
10384 {
10385 case REAL_TYPE:
10386 mode = TYPE_MODE (type);
10387 if (!SCALAR_FLOAT_MODE_P (mode))
10388 return -1;
10389
10390 if (*modep == VOIDmode)
10391 *modep = mode;
10392
10393 if (*modep == mode)
10394 return 1;
10395
10396 break;
10397
10398 case COMPLEX_TYPE:
10399 mode = TYPE_MODE (TREE_TYPE (type));
10400 if (!SCALAR_FLOAT_MODE_P (mode))
10401 return -1;
10402
10403 if (*modep == VOIDmode)
10404 *modep = mode;
10405
10406 if (*modep == mode)
10407 return 2;
10408
10409 break;
10410
10411 case VECTOR_TYPE:
10412 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10413 return -1;
10414
10415 /* Use V4SImode as representative of all 128-bit vector types. */
10416 size = int_size_in_bytes (type);
10417 switch (size)
10418 {
10419 case 16:
10420 mode = V4SImode;
10421 break;
10422 default:
10423 return -1;
10424 }
10425
10426 if (*modep == VOIDmode)
10427 *modep = mode;
10428
10429 /* Vector modes are considered to be opaque: two vectors are
10430 equivalent for the purposes of being homogeneous aggregates
10431 if they are the same size. */
10432 if (*modep == mode)
10433 return 1;
10434
10435 break;
10436
10437 case ARRAY_TYPE:
10438 {
10439 int count;
10440 tree index = TYPE_DOMAIN (type);
10441
10442 /* Can't handle incomplete types nor sizes that are not
10443 fixed. */
10444 if (!COMPLETE_TYPE_P (type)
10445 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10446 return -1;
10447
10448 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10449 if (count == -1
10450 || !index
10451 || !TYPE_MAX_VALUE (index)
10452 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10453 || !TYPE_MIN_VALUE (index)
10454 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10455 || count < 0)
10456 return -1;
10457
10458 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10459 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10460
10461 /* There must be no padding. */
10462 if (wi::to_wide (TYPE_SIZE (type))
10463 != count * GET_MODE_BITSIZE (*modep))
10464 return -1;
10465
10466 return count;
10467 }
10468
10469 case RECORD_TYPE:
10470 {
10471 int count = 0;
10472 int sub_count;
10473 tree field;
10474
10475 /* Can't handle incomplete types nor sizes that are not
10476 fixed. */
10477 if (!COMPLETE_TYPE_P (type)
10478 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10479 return -1;
10480
10481 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10482 {
10483 if (TREE_CODE (field) != FIELD_DECL)
10484 continue;
10485
10486 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10487 if (sub_count < 0)
10488 return -1;
10489 count += sub_count;
10490 }
10491
10492 /* There must be no padding. */
10493 if (wi::to_wide (TYPE_SIZE (type))
10494 != count * GET_MODE_BITSIZE (*modep))
10495 return -1;
10496
10497 return count;
10498 }
10499
10500 case UNION_TYPE:
10501 case QUAL_UNION_TYPE:
10502 {
10503 /* These aren't very interesting except in a degenerate case. */
10504 int count = 0;
10505 int sub_count;
10506 tree field;
10507
10508 /* Can't handle incomplete types nor sizes that are not
10509 fixed. */
10510 if (!COMPLETE_TYPE_P (type)
10511 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10512 return -1;
10513
10514 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10515 {
10516 if (TREE_CODE (field) != FIELD_DECL)
10517 continue;
10518
10519 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10520 if (sub_count < 0)
10521 return -1;
10522 count = count > sub_count ? count : sub_count;
10523 }
10524
10525 /* There must be no padding. */
10526 if (wi::to_wide (TYPE_SIZE (type))
10527 != count * GET_MODE_BITSIZE (*modep))
10528 return -1;
10529
10530 return count;
10531 }
10532
10533 default:
10534 break;
10535 }
10536
10537 return -1;
10538 }
10539
10540 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10541 float or vector aggregate that shall be passed in FP/vector registers
10542 according to the ELFv2 ABI, return the homogeneous element mode in
10543 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10544
10545 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10546
10547 static bool
10548 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10549 machine_mode *elt_mode,
10550 int *n_elts)
10551 {
10552 /* Note that we do not accept complex types at the top level as
10553 homogeneous aggregates; these types are handled via the
10554 targetm.calls.split_complex_arg mechanism. Complex types
10555 can be elements of homogeneous aggregates, however. */
10556 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10557 && AGGREGATE_TYPE_P (type))
10558 {
10559 machine_mode field_mode = VOIDmode;
10560 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10561
10562 if (field_count > 0)
10563 {
10564 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10565 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10566
10567 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10568 up to AGGR_ARG_NUM_REG registers. */
10569 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10570 {
10571 if (elt_mode)
10572 *elt_mode = field_mode;
10573 if (n_elts)
10574 *n_elts = field_count;
10575 return true;
10576 }
10577 }
10578 }
10579
10580 if (elt_mode)
10581 *elt_mode = mode;
10582 if (n_elts)
10583 *n_elts = 1;
10584 return false;
10585 }
10586
10587 /* Return a nonzero value to say to return the function value in
10588 memory, just as large structures are always returned. TYPE will be
10589 the data type of the value, and FNTYPE will be the type of the
10590 function doing the returning, or @code{NULL} for libcalls.
10591
10592 The AIX ABI for the RS/6000 specifies that all structures are
10593 returned in memory. The Darwin ABI does the same.
10594
10595 For the Darwin 64 Bit ABI, a function result can be returned in
10596 registers or in memory, depending on the size of the return data
10597 type. If it is returned in registers, the value occupies the same
10598 registers as it would if it were the first and only function
10599 argument. Otherwise, the function places its result in memory at
10600 the location pointed to by GPR3.
10601
10602 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10603 but a draft put them in memory, and GCC used to implement the draft
10604 instead of the final standard. Therefore, aix_struct_return
10605 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10606 compatibility can change DRAFT_V4_STRUCT_RET to override the
10607 default, and -m switches get the final word. See
10608 rs6000_option_override_internal for more details.
10609
10610 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10611 long double support is enabled. These values are returned in memory.
10612
10613 int_size_in_bytes returns -1 for variable size objects, which go in
10614 memory always. The cast to unsigned makes -1 > 8. */
10615
10616 static bool
10617 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10618 {
10619 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10620 if (TARGET_MACHO
10621 && rs6000_darwin64_abi
10622 && TREE_CODE (type) == RECORD_TYPE
10623 && int_size_in_bytes (type) > 0)
10624 {
10625 CUMULATIVE_ARGS valcum;
10626 rtx valret;
10627
10628 valcum.words = 0;
10629 valcum.fregno = FP_ARG_MIN_REG;
10630 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10631 /* Do a trial code generation as if this were going to be passed
10632 as an argument; if any part goes in memory, we return NULL. */
10633 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10634 if (valret)
10635 return false;
10636 /* Otherwise fall through to more conventional ABI rules. */
10637 }
10638
10639 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10640 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10641 NULL, NULL))
10642 return false;
10643
10644 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10645 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10646 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10647 return false;
10648
10649 if (AGGREGATE_TYPE_P (type)
10650 && (aix_struct_return
10651 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10652 return true;
10653
10654 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10655 modes only exist for GCC vector types if -maltivec. */
10656 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10657 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10658 return false;
10659
10660 /* Return synthetic vectors in memory. */
10661 if (TREE_CODE (type) == VECTOR_TYPE
10662 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10663 {
10664 static bool warned_for_return_big_vectors = false;
10665 if (!warned_for_return_big_vectors)
10666 {
10667 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10668 "non-standard ABI extension with no compatibility "
10669 "guarantee");
10670 warned_for_return_big_vectors = true;
10671 }
10672 return true;
10673 }
10674
10675 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10676 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10677 return true;
10678
10679 return false;
10680 }
10681
10682 /* Specify whether values returned in registers should be at the most
10683 significant end of a register. We want aggregates returned by
10684 value to match the way aggregates are passed to functions. */
10685
10686 static bool
10687 rs6000_return_in_msb (const_tree valtype)
10688 {
10689 return (DEFAULT_ABI == ABI_ELFv2
10690 && BYTES_BIG_ENDIAN
10691 && AGGREGATE_TYPE_P (valtype)
10692 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10693 == PAD_UPWARD));
10694 }
10695
10696 #ifdef HAVE_AS_GNU_ATTRIBUTE
10697 /* Return TRUE if a call to function FNDECL may be one that
10698 potentially affects the function calling ABI of the object file. */
10699
10700 static bool
10701 call_ABI_of_interest (tree fndecl)
10702 {
10703 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10704 {
10705 struct cgraph_node *c_node;
10706
10707 /* Libcalls are always interesting. */
10708 if (fndecl == NULL_TREE)
10709 return true;
10710
10711 /* Any call to an external function is interesting. */
10712 if (DECL_EXTERNAL (fndecl))
10713 return true;
10714
10715 /* Interesting functions that we are emitting in this object file. */
10716 c_node = cgraph_node::get (fndecl);
10717 c_node = c_node->ultimate_alias_target ();
10718 return !c_node->only_called_directly_p ();
10719 }
10720 return false;
10721 }
10722 #endif
10723
10724 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10725 for a call to a function whose data type is FNTYPE.
10726 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10727
10728 For incoming args we set the number of arguments in the prototype large
10729 so we never return a PARALLEL. */
10730
10731 void
10732 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10733 rtx libname ATTRIBUTE_UNUSED, int incoming,
10734 int libcall, int n_named_args,
10735 tree fndecl,
10736 machine_mode return_mode ATTRIBUTE_UNUSED)
10737 {
10738 static CUMULATIVE_ARGS zero_cumulative;
10739
10740 *cum = zero_cumulative;
10741 cum->words = 0;
10742 cum->fregno = FP_ARG_MIN_REG;
10743 cum->vregno = ALTIVEC_ARG_MIN_REG;
10744 cum->prototype = (fntype && prototype_p (fntype));
10745 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10746 ? CALL_LIBCALL : CALL_NORMAL);
10747 cum->sysv_gregno = GP_ARG_MIN_REG;
10748 cum->stdarg = stdarg_p (fntype);
10749 cum->libcall = libcall;
10750
10751 cum->nargs_prototype = 0;
10752 if (incoming || cum->prototype)
10753 cum->nargs_prototype = n_named_args;
10754
10755 /* Check for a longcall attribute. */
10756 if ((!fntype && rs6000_default_long_calls)
10757 || (fntype
10758 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10759 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10760 cum->call_cookie |= CALL_LONG;
10761 else if (DEFAULT_ABI != ABI_DARWIN)
10762 {
10763 bool is_local = (fndecl
10764 && !DECL_EXTERNAL (fndecl)
10765 && !DECL_WEAK (fndecl)
10766 && (*targetm.binds_local_p) (fndecl));
10767 if (is_local)
10768 ;
10769 else if (flag_plt)
10770 {
10771 if (fntype
10772 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10773 cum->call_cookie |= CALL_LONG;
10774 }
10775 else
10776 {
10777 if (!(fntype
10778 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10779 cum->call_cookie |= CALL_LONG;
10780 }
10781 }
10782
10783 if (TARGET_DEBUG_ARG)
10784 {
10785 fprintf (stderr, "\ninit_cumulative_args:");
10786 if (fntype)
10787 {
10788 tree ret_type = TREE_TYPE (fntype);
10789 fprintf (stderr, " ret code = %s,",
10790 get_tree_code_name (TREE_CODE (ret_type)));
10791 }
10792
10793 if (cum->call_cookie & CALL_LONG)
10794 fprintf (stderr, " longcall,");
10795
10796 fprintf (stderr, " proto = %d, nargs = %d\n",
10797 cum->prototype, cum->nargs_prototype);
10798 }
10799
10800 #ifdef HAVE_AS_GNU_ATTRIBUTE
10801 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10802 {
10803 cum->escapes = call_ABI_of_interest (fndecl);
10804 if (cum->escapes)
10805 {
10806 tree return_type;
10807
10808 if (fntype)
10809 {
10810 return_type = TREE_TYPE (fntype);
10811 return_mode = TYPE_MODE (return_type);
10812 }
10813 else
10814 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10815
10816 if (return_type != NULL)
10817 {
10818 if (TREE_CODE (return_type) == RECORD_TYPE
10819 && TYPE_TRANSPARENT_AGGR (return_type))
10820 {
10821 return_type = TREE_TYPE (first_field (return_type));
10822 return_mode = TYPE_MODE (return_type);
10823 }
10824 if (AGGREGATE_TYPE_P (return_type)
10825 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10826 <= 8))
10827 rs6000_returns_struct = true;
10828 }
10829 if (SCALAR_FLOAT_MODE_P (return_mode))
10830 {
10831 rs6000_passes_float = true;
10832 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10833 && (FLOAT128_IBM_P (return_mode)
10834 || FLOAT128_IEEE_P (return_mode)
10835 || (return_type != NULL
10836 && (TYPE_MAIN_VARIANT (return_type)
10837 == long_double_type_node))))
10838 rs6000_passes_long_double = true;
10839
10840 /* Note if we passed or return a IEEE 128-bit type. We changed
10841 the mangling for these types, and we may need to make an alias
10842 with the old mangling. */
10843 if (FLOAT128_IEEE_P (return_mode))
10844 rs6000_passes_ieee128 = true;
10845 }
10846 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10847 rs6000_passes_vector = true;
10848 }
10849 }
10850 #endif
10851
10852 if (fntype
10853 && !TARGET_ALTIVEC
10854 && TARGET_ALTIVEC_ABI
10855 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10856 {
10857 error ("cannot return value in vector register because"
10858 " altivec instructions are disabled, use %qs"
10859 " to enable them", "-maltivec");
10860 }
10861 }
10862 \f
10863 /* The mode the ABI uses for a word. This is not the same as word_mode
10864 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10865
10866 static scalar_int_mode
10867 rs6000_abi_word_mode (void)
10868 {
10869 return TARGET_32BIT ? SImode : DImode;
10870 }
10871
10872 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10873 static char *
10874 rs6000_offload_options (void)
10875 {
10876 if (TARGET_64BIT)
10877 return xstrdup ("-foffload-abi=lp64");
10878 else
10879 return xstrdup ("-foffload-abi=ilp32");
10880 }
10881
10882 /* On rs6000, function arguments are promoted, as are function return
10883 values. */
10884
10885 static machine_mode
10886 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10887 machine_mode mode,
10888 int *punsignedp ATTRIBUTE_UNUSED,
10889 const_tree, int)
10890 {
10891 PROMOTE_MODE (mode, *punsignedp, type);
10892
10893 return mode;
10894 }
10895
10896 /* Return true if TYPE must be passed on the stack and not in registers. */
10897
10898 static bool
10899 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10900 {
10901 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10902 return must_pass_in_stack_var_size (mode, type);
10903 else
10904 return must_pass_in_stack_var_size_or_pad (mode, type);
10905 }
10906
10907 static inline bool
10908 is_complex_IBM_long_double (machine_mode mode)
10909 {
10910 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10911 }
10912
10913 /* Whether ABI_V4 passes MODE args to a function in floating point
10914 registers. */
10915
10916 static bool
10917 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10918 {
10919 if (!TARGET_HARD_FLOAT)
10920 return false;
10921 if (mode == DFmode)
10922 return true;
10923 if (mode == SFmode && named)
10924 return true;
10925 /* ABI_V4 passes complex IBM long double in 8 gprs.
10926 Stupid, but we can't change the ABI now. */
10927 if (is_complex_IBM_long_double (mode))
10928 return false;
10929 if (FLOAT128_2REG_P (mode))
10930 return true;
10931 if (DECIMAL_FLOAT_MODE_P (mode))
10932 return true;
10933 return false;
10934 }
10935
10936 /* Implement TARGET_FUNCTION_ARG_PADDING.
10937
10938 For the AIX ABI structs are always stored left shifted in their
10939 argument slot. */
10940
10941 static pad_direction
10942 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10943 {
10944 #ifndef AGGREGATE_PADDING_FIXED
10945 #define AGGREGATE_PADDING_FIXED 0
10946 #endif
10947 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10948 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10949 #endif
10950
10951 if (!AGGREGATE_PADDING_FIXED)
10952 {
10953 /* GCC used to pass structures of the same size as integer types as
10954 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10955 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10956 passed padded downward, except that -mstrict-align further
10957 muddied the water in that multi-component structures of 2 and 4
10958 bytes in size were passed padded upward.
10959
10960 The following arranges for best compatibility with previous
10961 versions of gcc, but removes the -mstrict-align dependency. */
10962 if (BYTES_BIG_ENDIAN)
10963 {
10964 HOST_WIDE_INT size = 0;
10965
10966 if (mode == BLKmode)
10967 {
10968 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10969 size = int_size_in_bytes (type);
10970 }
10971 else
10972 size = GET_MODE_SIZE (mode);
10973
10974 if (size == 1 || size == 2 || size == 4)
10975 return PAD_DOWNWARD;
10976 }
10977 return PAD_UPWARD;
10978 }
10979
10980 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10981 {
10982 if (type != 0 && AGGREGATE_TYPE_P (type))
10983 return PAD_UPWARD;
10984 }
10985
10986 /* Fall back to the default. */
10987 return default_function_arg_padding (mode, type);
10988 }
10989
10990 /* If defined, a C expression that gives the alignment boundary, in bits,
10991 of an argument with the specified mode and type. If it is not defined,
10992 PARM_BOUNDARY is used for all arguments.
10993
10994 V.4 wants long longs and doubles to be double word aligned. Just
10995 testing the mode size is a boneheaded way to do this as it means
10996 that other types such as complex int are also double word aligned.
10997 However, we're stuck with this because changing the ABI might break
10998 existing library interfaces.
10999
11000 Quadword align Altivec/VSX vectors.
11001 Quadword align large synthetic vector types. */
11002
11003 static unsigned int
11004 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11005 {
11006 machine_mode elt_mode;
11007 int n_elts;
11008
11009 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11010
11011 if (DEFAULT_ABI == ABI_V4
11012 && (GET_MODE_SIZE (mode) == 8
11013 || (TARGET_HARD_FLOAT
11014 && !is_complex_IBM_long_double (mode)
11015 && FLOAT128_2REG_P (mode))))
11016 return 64;
11017 else if (FLOAT128_VECTOR_P (mode))
11018 return 128;
11019 else if (type && TREE_CODE (type) == VECTOR_TYPE
11020 && int_size_in_bytes (type) >= 8
11021 && int_size_in_bytes (type) < 16)
11022 return 64;
11023 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11024 || (type && TREE_CODE (type) == VECTOR_TYPE
11025 && int_size_in_bytes (type) >= 16))
11026 return 128;
11027
11028 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11029 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11030 -mcompat-align-parm is used. */
11031 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11032 || DEFAULT_ABI == ABI_ELFv2)
11033 && type && TYPE_ALIGN (type) > 64)
11034 {
11035 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11036 or homogeneous float/vector aggregates here. We already handled
11037 vector aggregates above, but still need to check for float here. */
11038 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11039 && !SCALAR_FLOAT_MODE_P (elt_mode));
11040
11041 /* We used to check for BLKmode instead of the above aggregate type
11042 check. Warn when this results in any difference to the ABI. */
11043 if (aggregate_p != (mode == BLKmode))
11044 {
11045 static bool warned;
11046 if (!warned && warn_psabi)
11047 {
11048 warned = true;
11049 inform (input_location,
11050 "the ABI of passing aggregates with %d-byte alignment"
11051 " has changed in GCC 5",
11052 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11053 }
11054 }
11055
11056 if (aggregate_p)
11057 return 128;
11058 }
11059
11060 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11061 implement the "aggregate type" check as a BLKmode check here; this
11062 means certain aggregate types are in fact not aligned. */
11063 if (TARGET_MACHO && rs6000_darwin64_abi
11064 && mode == BLKmode
11065 && type && TYPE_ALIGN (type) > 64)
11066 return 128;
11067
11068 return PARM_BOUNDARY;
11069 }
11070
11071 /* The offset in words to the start of the parameter save area. */
11072
11073 static unsigned int
11074 rs6000_parm_offset (void)
11075 {
11076 return (DEFAULT_ABI == ABI_V4 ? 2
11077 : DEFAULT_ABI == ABI_ELFv2 ? 4
11078 : 6);
11079 }
11080
11081 /* For a function parm of MODE and TYPE, return the starting word in
11082 the parameter area. NWORDS of the parameter area are already used. */
11083
11084 static unsigned int
11085 rs6000_parm_start (machine_mode mode, const_tree type,
11086 unsigned int nwords)
11087 {
11088 unsigned int align;
11089
11090 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11091 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11092 }
11093
11094 /* Compute the size (in words) of a function argument. */
11095
11096 static unsigned long
11097 rs6000_arg_size (machine_mode mode, const_tree type)
11098 {
11099 unsigned long size;
11100
11101 if (mode != BLKmode)
11102 size = GET_MODE_SIZE (mode);
11103 else
11104 size = int_size_in_bytes (type);
11105
11106 if (TARGET_32BIT)
11107 return (size + 3) >> 2;
11108 else
11109 return (size + 7) >> 3;
11110 }
11111 \f
11112 /* Use this to flush pending int fields. */
11113
11114 static void
11115 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11116 HOST_WIDE_INT bitpos, int final)
11117 {
11118 unsigned int startbit, endbit;
11119 int intregs, intoffset;
11120
11121 /* Handle the situations where a float is taking up the first half
11122 of the GPR, and the other half is empty (typically due to
11123 alignment restrictions). We can detect this by a 8-byte-aligned
11124 int field, or by seeing that this is the final flush for this
11125 argument. Count the word and continue on. */
11126 if (cum->floats_in_gpr == 1
11127 && (cum->intoffset % 64 == 0
11128 || (cum->intoffset == -1 && final)))
11129 {
11130 cum->words++;
11131 cum->floats_in_gpr = 0;
11132 }
11133
11134 if (cum->intoffset == -1)
11135 return;
11136
11137 intoffset = cum->intoffset;
11138 cum->intoffset = -1;
11139 cum->floats_in_gpr = 0;
11140
11141 if (intoffset % BITS_PER_WORD != 0)
11142 {
11143 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11144 if (!int_mode_for_size (bits, 0).exists ())
11145 {
11146 /* We couldn't find an appropriate mode, which happens,
11147 e.g., in packed structs when there are 3 bytes to load.
11148 Back intoffset back to the beginning of the word in this
11149 case. */
11150 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11151 }
11152 }
11153
11154 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11155 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11156 intregs = (endbit - startbit) / BITS_PER_WORD;
11157 cum->words += intregs;
11158 /* words should be unsigned. */
11159 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11160 {
11161 int pad = (endbit/BITS_PER_WORD) - cum->words;
11162 cum->words += pad;
11163 }
11164 }
11165
11166 /* The darwin64 ABI calls for us to recurse down through structs,
11167 looking for elements passed in registers. Unfortunately, we have
11168 to track int register count here also because of misalignments
11169 in powerpc alignment mode. */
11170
11171 static void
11172 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11173 const_tree type,
11174 HOST_WIDE_INT startbitpos)
11175 {
11176 tree f;
11177
11178 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11179 if (TREE_CODE (f) == FIELD_DECL)
11180 {
11181 HOST_WIDE_INT bitpos = startbitpos;
11182 tree ftype = TREE_TYPE (f);
11183 machine_mode mode;
11184 if (ftype == error_mark_node)
11185 continue;
11186 mode = TYPE_MODE (ftype);
11187
11188 if (DECL_SIZE (f) != 0
11189 && tree_fits_uhwi_p (bit_position (f)))
11190 bitpos += int_bit_position (f);
11191
11192 /* ??? FIXME: else assume zero offset. */
11193
11194 if (TREE_CODE (ftype) == RECORD_TYPE)
11195 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11196 else if (USE_FP_FOR_ARG_P (cum, mode))
11197 {
11198 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11199 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11200 cum->fregno += n_fpregs;
11201 /* Single-precision floats present a special problem for
11202 us, because they are smaller than an 8-byte GPR, and so
11203 the structure-packing rules combined with the standard
11204 varargs behavior mean that we want to pack float/float
11205 and float/int combinations into a single register's
11206 space. This is complicated by the arg advance flushing,
11207 which works on arbitrarily large groups of int-type
11208 fields. */
11209 if (mode == SFmode)
11210 {
11211 if (cum->floats_in_gpr == 1)
11212 {
11213 /* Two floats in a word; count the word and reset
11214 the float count. */
11215 cum->words++;
11216 cum->floats_in_gpr = 0;
11217 }
11218 else if (bitpos % 64 == 0)
11219 {
11220 /* A float at the beginning of an 8-byte word;
11221 count it and put off adjusting cum->words until
11222 we see if a arg advance flush is going to do it
11223 for us. */
11224 cum->floats_in_gpr++;
11225 }
11226 else
11227 {
11228 /* The float is at the end of a word, preceded
11229 by integer fields, so the arg advance flush
11230 just above has already set cum->words and
11231 everything is taken care of. */
11232 }
11233 }
11234 else
11235 cum->words += n_fpregs;
11236 }
11237 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11238 {
11239 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11240 cum->vregno++;
11241 cum->words += 2;
11242 }
11243 else if (cum->intoffset == -1)
11244 cum->intoffset = bitpos;
11245 }
11246 }
11247
11248 /* Check for an item that needs to be considered specially under the darwin 64
11249 bit ABI. These are record types where the mode is BLK or the structure is
11250 8 bytes in size. */
11251 static int
11252 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11253 {
11254 return rs6000_darwin64_abi
11255 && ((mode == BLKmode
11256 && TREE_CODE (type) == RECORD_TYPE
11257 && int_size_in_bytes (type) > 0)
11258 || (type && TREE_CODE (type) == RECORD_TYPE
11259 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11260 }
11261
11262 /* Update the data in CUM to advance over an argument
11263 of mode MODE and data type TYPE.
11264 (TYPE is null for libcalls where that information may not be available.)
11265
11266 Note that for args passed by reference, function_arg will be called
11267 with MODE and TYPE set to that of the pointer to the arg, not the arg
11268 itself. */
11269
11270 static void
11271 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11272 const_tree type, bool named, int depth)
11273 {
11274 machine_mode elt_mode;
11275 int n_elts;
11276
11277 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11278
11279 /* Only tick off an argument if we're not recursing. */
11280 if (depth == 0)
11281 cum->nargs_prototype--;
11282
11283 #ifdef HAVE_AS_GNU_ATTRIBUTE
11284 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11285 && cum->escapes)
11286 {
11287 if (SCALAR_FLOAT_MODE_P (mode))
11288 {
11289 rs6000_passes_float = true;
11290 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11291 && (FLOAT128_IBM_P (mode)
11292 || FLOAT128_IEEE_P (mode)
11293 || (type != NULL
11294 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11295 rs6000_passes_long_double = true;
11296
11297 /* Note if we passed or return a IEEE 128-bit type. We changed the
11298 mangling for these types, and we may need to make an alias with
11299 the old mangling. */
11300 if (FLOAT128_IEEE_P (mode))
11301 rs6000_passes_ieee128 = true;
11302 }
11303 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11304 rs6000_passes_vector = true;
11305 }
11306 #endif
11307
11308 if (TARGET_ALTIVEC_ABI
11309 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11310 || (type && TREE_CODE (type) == VECTOR_TYPE
11311 && int_size_in_bytes (type) == 16)))
11312 {
11313 bool stack = false;
11314
11315 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11316 {
11317 cum->vregno += n_elts;
11318
11319 if (!TARGET_ALTIVEC)
11320 error ("cannot pass argument in vector register because"
11321 " altivec instructions are disabled, use %qs"
11322 " to enable them", "-maltivec");
11323
11324 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11325 even if it is going to be passed in a vector register.
11326 Darwin does the same for variable-argument functions. */
11327 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11328 && TARGET_64BIT)
11329 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11330 stack = true;
11331 }
11332 else
11333 stack = true;
11334
11335 if (stack)
11336 {
11337 int align;
11338
11339 /* Vector parameters must be 16-byte aligned. In 32-bit
11340 mode this means we need to take into account the offset
11341 to the parameter save area. In 64-bit mode, they just
11342 have to start on an even word, since the parameter save
11343 area is 16-byte aligned. */
11344 if (TARGET_32BIT)
11345 align = -(rs6000_parm_offset () + cum->words) & 3;
11346 else
11347 align = cum->words & 1;
11348 cum->words += align + rs6000_arg_size (mode, type);
11349
11350 if (TARGET_DEBUG_ARG)
11351 {
11352 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11353 cum->words, align);
11354 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11355 cum->nargs_prototype, cum->prototype,
11356 GET_MODE_NAME (mode));
11357 }
11358 }
11359 }
11360 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11361 {
11362 int size = int_size_in_bytes (type);
11363 /* Variable sized types have size == -1 and are
11364 treated as if consisting entirely of ints.
11365 Pad to 16 byte boundary if needed. */
11366 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11367 && (cum->words % 2) != 0)
11368 cum->words++;
11369 /* For varargs, we can just go up by the size of the struct. */
11370 if (!named)
11371 cum->words += (size + 7) / 8;
11372 else
11373 {
11374 /* It is tempting to say int register count just goes up by
11375 sizeof(type)/8, but this is wrong in a case such as
11376 { int; double; int; } [powerpc alignment]. We have to
11377 grovel through the fields for these too. */
11378 cum->intoffset = 0;
11379 cum->floats_in_gpr = 0;
11380 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11381 rs6000_darwin64_record_arg_advance_flush (cum,
11382 size * BITS_PER_UNIT, 1);
11383 }
11384 if (TARGET_DEBUG_ARG)
11385 {
11386 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11387 cum->words, TYPE_ALIGN (type), size);
11388 fprintf (stderr,
11389 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11390 cum->nargs_prototype, cum->prototype,
11391 GET_MODE_NAME (mode));
11392 }
11393 }
11394 else if (DEFAULT_ABI == ABI_V4)
11395 {
11396 if (abi_v4_pass_in_fpr (mode, named))
11397 {
11398 /* _Decimal128 must use an even/odd register pair. This assumes
11399 that the register number is odd when fregno is odd. */
11400 if (mode == TDmode && (cum->fregno % 2) == 1)
11401 cum->fregno++;
11402
11403 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11404 <= FP_ARG_V4_MAX_REG)
11405 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11406 else
11407 {
11408 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11409 if (mode == DFmode || FLOAT128_IBM_P (mode)
11410 || mode == DDmode || mode == TDmode)
11411 cum->words += cum->words & 1;
11412 cum->words += rs6000_arg_size (mode, type);
11413 }
11414 }
11415 else
11416 {
11417 int n_words = rs6000_arg_size (mode, type);
11418 int gregno = cum->sysv_gregno;
11419
11420 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11421 As does any other 2 word item such as complex int due to a
11422 historical mistake. */
11423 if (n_words == 2)
11424 gregno += (1 - gregno) & 1;
11425
11426 /* Multi-reg args are not split between registers and stack. */
11427 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11428 {
11429 /* Long long is aligned on the stack. So are other 2 word
11430 items such as complex int due to a historical mistake. */
11431 if (n_words == 2)
11432 cum->words += cum->words & 1;
11433 cum->words += n_words;
11434 }
11435
11436 /* Note: continuing to accumulate gregno past when we've started
11437 spilling to the stack indicates the fact that we've started
11438 spilling to the stack to expand_builtin_saveregs. */
11439 cum->sysv_gregno = gregno + n_words;
11440 }
11441
11442 if (TARGET_DEBUG_ARG)
11443 {
11444 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11445 cum->words, cum->fregno);
11446 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11447 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11448 fprintf (stderr, "mode = %4s, named = %d\n",
11449 GET_MODE_NAME (mode), named);
11450 }
11451 }
11452 else
11453 {
11454 int n_words = rs6000_arg_size (mode, type);
11455 int start_words = cum->words;
11456 int align_words = rs6000_parm_start (mode, type, start_words);
11457
11458 cum->words = align_words + n_words;
11459
11460 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11461 {
11462 /* _Decimal128 must be passed in an even/odd float register pair.
11463 This assumes that the register number is odd when fregno is
11464 odd. */
11465 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11466 cum->fregno++;
11467 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11468 }
11469
11470 if (TARGET_DEBUG_ARG)
11471 {
11472 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11473 cum->words, cum->fregno);
11474 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11475 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11476 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11477 named, align_words - start_words, depth);
11478 }
11479 }
11480 }
11481
11482 static void
11483 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11484 const_tree type, bool named)
11485 {
11486 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11487 0);
11488 }
11489
11490 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11491 structure between cum->intoffset and bitpos to integer registers. */
11492
11493 static void
11494 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11495 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11496 {
11497 machine_mode mode;
11498 unsigned int regno;
11499 unsigned int startbit, endbit;
11500 int this_regno, intregs, intoffset;
11501 rtx reg;
11502
11503 if (cum->intoffset == -1)
11504 return;
11505
11506 intoffset = cum->intoffset;
11507 cum->intoffset = -1;
11508
11509 /* If this is the trailing part of a word, try to only load that
11510 much into the register. Otherwise load the whole register. Note
11511 that in the latter case we may pick up unwanted bits. It's not a
11512 problem at the moment but may wish to revisit. */
11513
11514 if (intoffset % BITS_PER_WORD != 0)
11515 {
11516 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11517 if (!int_mode_for_size (bits, 0).exists (&mode))
11518 {
11519 /* We couldn't find an appropriate mode, which happens,
11520 e.g., in packed structs when there are 3 bytes to load.
11521 Back intoffset back to the beginning of the word in this
11522 case. */
11523 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11524 mode = word_mode;
11525 }
11526 }
11527 else
11528 mode = word_mode;
11529
11530 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11531 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11532 intregs = (endbit - startbit) / BITS_PER_WORD;
11533 this_regno = cum->words + intoffset / BITS_PER_WORD;
11534
11535 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11536 cum->use_stack = 1;
11537
11538 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11539 if (intregs <= 0)
11540 return;
11541
11542 intoffset /= BITS_PER_UNIT;
11543 do
11544 {
11545 regno = GP_ARG_MIN_REG + this_regno;
11546 reg = gen_rtx_REG (mode, regno);
11547 rvec[(*k)++] =
11548 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11549
11550 this_regno += 1;
11551 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11552 mode = word_mode;
11553 intregs -= 1;
11554 }
11555 while (intregs > 0);
11556 }
11557
11558 /* Recursive workhorse for the following. */
11559
11560 static void
11561 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11562 HOST_WIDE_INT startbitpos, rtx rvec[],
11563 int *k)
11564 {
11565 tree f;
11566
11567 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11568 if (TREE_CODE (f) == FIELD_DECL)
11569 {
11570 HOST_WIDE_INT bitpos = startbitpos;
11571 tree ftype = TREE_TYPE (f);
11572 machine_mode mode;
11573 if (ftype == error_mark_node)
11574 continue;
11575 mode = TYPE_MODE (ftype);
11576
11577 if (DECL_SIZE (f) != 0
11578 && tree_fits_uhwi_p (bit_position (f)))
11579 bitpos += int_bit_position (f);
11580
11581 /* ??? FIXME: else assume zero offset. */
11582
11583 if (TREE_CODE (ftype) == RECORD_TYPE)
11584 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11585 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11586 {
11587 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11588 #if 0
11589 switch (mode)
11590 {
11591 case E_SCmode: mode = SFmode; break;
11592 case E_DCmode: mode = DFmode; break;
11593 case E_TCmode: mode = TFmode; break;
11594 default: break;
11595 }
11596 #endif
11597 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11598 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11599 {
11600 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11601 && (mode == TFmode || mode == TDmode));
11602 /* Long double or _Decimal128 split over regs and memory. */
11603 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11604 cum->use_stack=1;
11605 }
11606 rvec[(*k)++]
11607 = gen_rtx_EXPR_LIST (VOIDmode,
11608 gen_rtx_REG (mode, cum->fregno++),
11609 GEN_INT (bitpos / BITS_PER_UNIT));
11610 if (FLOAT128_2REG_P (mode))
11611 cum->fregno++;
11612 }
11613 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11614 {
11615 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11616 rvec[(*k)++]
11617 = gen_rtx_EXPR_LIST (VOIDmode,
11618 gen_rtx_REG (mode, cum->vregno++),
11619 GEN_INT (bitpos / BITS_PER_UNIT));
11620 }
11621 else if (cum->intoffset == -1)
11622 cum->intoffset = bitpos;
11623 }
11624 }
11625
11626 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11627 the register(s) to be used for each field and subfield of a struct
11628 being passed by value, along with the offset of where the
11629 register's value may be found in the block. FP fields go in FP
11630 register, vector fields go in vector registers, and everything
11631 else goes in int registers, packed as in memory.
11632
11633 This code is also used for function return values. RETVAL indicates
11634 whether this is the case.
11635
11636 Much of this is taken from the SPARC V9 port, which has a similar
11637 calling convention. */
11638
11639 static rtx
11640 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11641 bool named, bool retval)
11642 {
11643 rtx rvec[FIRST_PSEUDO_REGISTER];
11644 int k = 1, kbase = 1;
11645 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11646 /* This is a copy; modifications are not visible to our caller. */
11647 CUMULATIVE_ARGS copy_cum = *orig_cum;
11648 CUMULATIVE_ARGS *cum = &copy_cum;
11649
11650 /* Pad to 16 byte boundary if needed. */
11651 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11652 && (cum->words % 2) != 0)
11653 cum->words++;
11654
11655 cum->intoffset = 0;
11656 cum->use_stack = 0;
11657 cum->named = named;
11658
11659 /* Put entries into rvec[] for individual FP and vector fields, and
11660 for the chunks of memory that go in int regs. Note we start at
11661 element 1; 0 is reserved for an indication of using memory, and
11662 may or may not be filled in below. */
11663 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11664 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11665
11666 /* If any part of the struct went on the stack put all of it there.
11667 This hack is because the generic code for
11668 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11669 parts of the struct are not at the beginning. */
11670 if (cum->use_stack)
11671 {
11672 if (retval)
11673 return NULL_RTX; /* doesn't go in registers at all */
11674 kbase = 0;
11675 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11676 }
11677 if (k > 1 || cum->use_stack)
11678 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11679 else
11680 return NULL_RTX;
11681 }
11682
11683 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11684
11685 static rtx
11686 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11687 int align_words)
11688 {
11689 int n_units;
11690 int i, k;
11691 rtx rvec[GP_ARG_NUM_REG + 1];
11692
11693 if (align_words >= GP_ARG_NUM_REG)
11694 return NULL_RTX;
11695
11696 n_units = rs6000_arg_size (mode, type);
11697
11698 /* Optimize the simple case where the arg fits in one gpr, except in
11699 the case of BLKmode due to assign_parms assuming that registers are
11700 BITS_PER_WORD wide. */
11701 if (n_units == 0
11702 || (n_units == 1 && mode != BLKmode))
11703 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11704
11705 k = 0;
11706 if (align_words + n_units > GP_ARG_NUM_REG)
11707 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11708 using a magic NULL_RTX component.
11709 This is not strictly correct. Only some of the arg belongs in
11710 memory, not all of it. However, the normal scheme using
11711 function_arg_partial_nregs can result in unusual subregs, eg.
11712 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11713 store the whole arg to memory is often more efficient than code
11714 to store pieces, and we know that space is available in the right
11715 place for the whole arg. */
11716 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11717
11718 i = 0;
11719 do
11720 {
11721 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11722 rtx off = GEN_INT (i++ * 4);
11723 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11724 }
11725 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11726
11727 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11728 }
11729
11730 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11731 but must also be copied into the parameter save area starting at
11732 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11733 to the GPRs and/or memory. Return the number of elements used. */
11734
11735 static int
11736 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11737 int align_words, rtx *rvec)
11738 {
11739 int k = 0;
11740
11741 if (align_words < GP_ARG_NUM_REG)
11742 {
11743 int n_words = rs6000_arg_size (mode, type);
11744
11745 if (align_words + n_words > GP_ARG_NUM_REG
11746 || mode == BLKmode
11747 || (TARGET_32BIT && TARGET_POWERPC64))
11748 {
11749 /* If this is partially on the stack, then we only
11750 include the portion actually in registers here. */
11751 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11752 int i = 0;
11753
11754 if (align_words + n_words > GP_ARG_NUM_REG)
11755 {
11756 /* Not all of the arg fits in gprs. Say that it goes in memory
11757 too, using a magic NULL_RTX component. Also see comment in
11758 rs6000_mixed_function_arg for why the normal
11759 function_arg_partial_nregs scheme doesn't work in this case. */
11760 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11761 }
11762
11763 do
11764 {
11765 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11766 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11767 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11768 }
11769 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11770 }
11771 else
11772 {
11773 /* The whole arg fits in gprs. */
11774 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11775 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11776 }
11777 }
11778 else
11779 {
11780 /* It's entirely in memory. */
11781 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11782 }
11783
11784 return k;
11785 }
11786
11787 /* RVEC is a vector of K components of an argument of mode MODE.
11788 Construct the final function_arg return value from it. */
11789
11790 static rtx
11791 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11792 {
11793 gcc_assert (k >= 1);
11794
11795 /* Avoid returning a PARALLEL in the trivial cases. */
11796 if (k == 1)
11797 {
11798 if (XEXP (rvec[0], 0) == NULL_RTX)
11799 return NULL_RTX;
11800
11801 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11802 return XEXP (rvec[0], 0);
11803 }
11804
11805 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11806 }
11807
11808 /* Determine where to put an argument to a function.
11809 Value is zero to push the argument on the stack,
11810 or a hard register in which to store the argument.
11811
11812 MODE is the argument's machine mode.
11813 TYPE is the data type of the argument (as a tree).
11814 This is null for libcalls where that information may
11815 not be available.
11816 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11817 the preceding args and about the function being called. It is
11818 not modified in this routine.
11819 NAMED is nonzero if this argument is a named parameter
11820 (otherwise it is an extra parameter matching an ellipsis).
11821
11822 On RS/6000 the first eight words of non-FP are normally in registers
11823 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11824 Under V.4, the first 8 FP args are in registers.
11825
11826 If this is floating-point and no prototype is specified, we use
11827 both an FP and integer register (or possibly FP reg and stack). Library
11828 functions (when CALL_LIBCALL is set) always have the proper types for args,
11829 so we can pass the FP value just in one register. emit_library_function
11830 doesn't support PARALLEL anyway.
11831
11832 Note that for args passed by reference, function_arg will be called
11833 with MODE and TYPE set to that of the pointer to the arg, not the arg
11834 itself. */
11835
11836 static rtx
11837 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11838 const_tree type, bool named)
11839 {
11840 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11841 enum rs6000_abi abi = DEFAULT_ABI;
11842 machine_mode elt_mode;
11843 int n_elts;
11844
11845 /* Return a marker to indicate whether CR1 needs to set or clear the
11846 bit that V.4 uses to say fp args were passed in registers.
11847 Assume that we don't need the marker for software floating point,
11848 or compiler generated library calls. */
11849 if (mode == VOIDmode)
11850 {
11851 if (abi == ABI_V4
11852 && (cum->call_cookie & CALL_LIBCALL) == 0
11853 && (cum->stdarg
11854 || (cum->nargs_prototype < 0
11855 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11856 && TARGET_HARD_FLOAT)
11857 return GEN_INT (cum->call_cookie
11858 | ((cum->fregno == FP_ARG_MIN_REG)
11859 ? CALL_V4_SET_FP_ARGS
11860 : CALL_V4_CLEAR_FP_ARGS));
11861
11862 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11863 }
11864
11865 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11866
11867 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11868 {
11869 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11870 if (rslt != NULL_RTX)
11871 return rslt;
11872 /* Else fall through to usual handling. */
11873 }
11874
11875 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11876 {
11877 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11878 rtx r, off;
11879 int i, k = 0;
11880
11881 /* Do we also need to pass this argument in the parameter save area?
11882 Library support functions for IEEE 128-bit are assumed to not need the
11883 value passed both in GPRs and in vector registers. */
11884 if (TARGET_64BIT && !cum->prototype
11885 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11886 {
11887 int align_words = ROUND_UP (cum->words, 2);
11888 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11889 }
11890
11891 /* Describe where this argument goes in the vector registers. */
11892 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11893 {
11894 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11895 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11896 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11897 }
11898
11899 return rs6000_finish_function_arg (mode, rvec, k);
11900 }
11901 else if (TARGET_ALTIVEC_ABI
11902 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11903 || (type && TREE_CODE (type) == VECTOR_TYPE
11904 && int_size_in_bytes (type) == 16)))
11905 {
11906 if (named || abi == ABI_V4)
11907 return NULL_RTX;
11908 else
11909 {
11910 /* Vector parameters to varargs functions under AIX or Darwin
11911 get passed in memory and possibly also in GPRs. */
11912 int align, align_words, n_words;
11913 machine_mode part_mode;
11914
11915 /* Vector parameters must be 16-byte aligned. In 32-bit
11916 mode this means we need to take into account the offset
11917 to the parameter save area. In 64-bit mode, they just
11918 have to start on an even word, since the parameter save
11919 area is 16-byte aligned. */
11920 if (TARGET_32BIT)
11921 align = -(rs6000_parm_offset () + cum->words) & 3;
11922 else
11923 align = cum->words & 1;
11924 align_words = cum->words + align;
11925
11926 /* Out of registers? Memory, then. */
11927 if (align_words >= GP_ARG_NUM_REG)
11928 return NULL_RTX;
11929
11930 if (TARGET_32BIT && TARGET_POWERPC64)
11931 return rs6000_mixed_function_arg (mode, type, align_words);
11932
11933 /* The vector value goes in GPRs. Only the part of the
11934 value in GPRs is reported here. */
11935 part_mode = mode;
11936 n_words = rs6000_arg_size (mode, type);
11937 if (align_words + n_words > GP_ARG_NUM_REG)
11938 /* Fortunately, there are only two possibilities, the value
11939 is either wholly in GPRs or half in GPRs and half not. */
11940 part_mode = DImode;
11941
11942 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11943 }
11944 }
11945
11946 else if (abi == ABI_V4)
11947 {
11948 if (abi_v4_pass_in_fpr (mode, named))
11949 {
11950 /* _Decimal128 must use an even/odd register pair. This assumes
11951 that the register number is odd when fregno is odd. */
11952 if (mode == TDmode && (cum->fregno % 2) == 1)
11953 cum->fregno++;
11954
11955 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11956 <= FP_ARG_V4_MAX_REG)
11957 return gen_rtx_REG (mode, cum->fregno);
11958 else
11959 return NULL_RTX;
11960 }
11961 else
11962 {
11963 int n_words = rs6000_arg_size (mode, type);
11964 int gregno = cum->sysv_gregno;
11965
11966 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11967 As does any other 2 word item such as complex int due to a
11968 historical mistake. */
11969 if (n_words == 2)
11970 gregno += (1 - gregno) & 1;
11971
11972 /* Multi-reg args are not split between registers and stack. */
11973 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11974 return NULL_RTX;
11975
11976 if (TARGET_32BIT && TARGET_POWERPC64)
11977 return rs6000_mixed_function_arg (mode, type,
11978 gregno - GP_ARG_MIN_REG);
11979 return gen_rtx_REG (mode, gregno);
11980 }
11981 }
11982 else
11983 {
11984 int align_words = rs6000_parm_start (mode, type, cum->words);
11985
11986 /* _Decimal128 must be passed in an even/odd float register pair.
11987 This assumes that the register number is odd when fregno is odd. */
11988 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11989 cum->fregno++;
11990
11991 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11992 && !(TARGET_AIX && !TARGET_ELF
11993 && type != NULL && AGGREGATE_TYPE_P (type)))
11994 {
11995 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11996 rtx r, off;
11997 int i, k = 0;
11998 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11999 int fpr_words;
12000
12001 /* Do we also need to pass this argument in the parameter
12002 save area? */
12003 if (type && (cum->nargs_prototype <= 0
12004 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12005 && TARGET_XL_COMPAT
12006 && align_words >= GP_ARG_NUM_REG)))
12007 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12008
12009 /* Describe where this argument goes in the fprs. */
12010 for (i = 0; i < n_elts
12011 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12012 {
12013 /* Check if the argument is split over registers and memory.
12014 This can only ever happen for long double or _Decimal128;
12015 complex types are handled via split_complex_arg. */
12016 machine_mode fmode = elt_mode;
12017 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12018 {
12019 gcc_assert (FLOAT128_2REG_P (fmode));
12020 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12021 }
12022
12023 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12024 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12025 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12026 }
12027
12028 /* If there were not enough FPRs to hold the argument, the rest
12029 usually goes into memory. However, if the current position
12030 is still within the register parameter area, a portion may
12031 actually have to go into GPRs.
12032
12033 Note that it may happen that the portion of the argument
12034 passed in the first "half" of the first GPR was already
12035 passed in the last FPR as well.
12036
12037 For unnamed arguments, we already set up GPRs to cover the
12038 whole argument in rs6000_psave_function_arg, so there is
12039 nothing further to do at this point. */
12040 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12041 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12042 && cum->nargs_prototype > 0)
12043 {
12044 static bool warned;
12045
12046 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12047 int n_words = rs6000_arg_size (mode, type);
12048
12049 align_words += fpr_words;
12050 n_words -= fpr_words;
12051
12052 do
12053 {
12054 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12055 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12056 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12057 }
12058 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12059
12060 if (!warned && warn_psabi)
12061 {
12062 warned = true;
12063 inform (input_location,
12064 "the ABI of passing homogeneous float aggregates"
12065 " has changed in GCC 5");
12066 }
12067 }
12068
12069 return rs6000_finish_function_arg (mode, rvec, k);
12070 }
12071 else if (align_words < GP_ARG_NUM_REG)
12072 {
12073 if (TARGET_32BIT && TARGET_POWERPC64)
12074 return rs6000_mixed_function_arg (mode, type, align_words);
12075
12076 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12077 }
12078 else
12079 return NULL_RTX;
12080 }
12081 }
12082 \f
12083 /* For an arg passed partly in registers and partly in memory, this is
12084 the number of bytes passed in registers. For args passed entirely in
12085 registers or entirely in memory, zero. When an arg is described by a
12086 PARALLEL, perhaps using more than one register type, this function
12087 returns the number of bytes used by the first element of the PARALLEL. */
12088
12089 static int
12090 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12091 tree type, bool named)
12092 {
12093 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12094 bool passed_in_gprs = true;
12095 int ret = 0;
12096 int align_words;
12097 machine_mode elt_mode;
12098 int n_elts;
12099
12100 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12101
12102 if (DEFAULT_ABI == ABI_V4)
12103 return 0;
12104
12105 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12106 {
12107 /* If we are passing this arg in the fixed parameter save area (gprs or
12108 memory) as well as VRs, we do not use the partial bytes mechanism;
12109 instead, rs6000_function_arg will return a PARALLEL including a memory
12110 element as necessary. Library support functions for IEEE 128-bit are
12111 assumed to not need the value passed both in GPRs and in vector
12112 registers. */
12113 if (TARGET_64BIT && !cum->prototype
12114 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12115 return 0;
12116
12117 /* Otherwise, we pass in VRs only. Check for partial copies. */
12118 passed_in_gprs = false;
12119 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12120 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12121 }
12122
12123 /* In this complicated case we just disable the partial_nregs code. */
12124 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12125 return 0;
12126
12127 align_words = rs6000_parm_start (mode, type, cum->words);
12128
12129 if (USE_FP_FOR_ARG_P (cum, elt_mode)
12130 && !(TARGET_AIX && !TARGET_ELF
12131 && type != NULL && AGGREGATE_TYPE_P (type)))
12132 {
12133 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12134
12135 /* If we are passing this arg in the fixed parameter save area
12136 (gprs or memory) as well as FPRs, we do not use the partial
12137 bytes mechanism; instead, rs6000_function_arg will return a
12138 PARALLEL including a memory element as necessary. */
12139 if (type
12140 && (cum->nargs_prototype <= 0
12141 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12142 && TARGET_XL_COMPAT
12143 && align_words >= GP_ARG_NUM_REG)))
12144 return 0;
12145
12146 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12147 passed_in_gprs = false;
12148 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12149 {
12150 /* Compute number of bytes / words passed in FPRs. If there
12151 is still space available in the register parameter area
12152 *after* that amount, a part of the argument will be passed
12153 in GPRs. In that case, the total amount passed in any
12154 registers is equal to the amount that would have been passed
12155 in GPRs if everything were passed there, so we fall back to
12156 the GPR code below to compute the appropriate value. */
12157 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12158 * MIN (8, GET_MODE_SIZE (elt_mode)));
12159 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12160
12161 if (align_words + fpr_words < GP_ARG_NUM_REG)
12162 passed_in_gprs = true;
12163 else
12164 ret = fpr;
12165 }
12166 }
12167
12168 if (passed_in_gprs
12169 && align_words < GP_ARG_NUM_REG
12170 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12171 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12172
12173 if (ret != 0 && TARGET_DEBUG_ARG)
12174 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12175
12176 return ret;
12177 }
12178 \f
12179 /* A C expression that indicates when an argument must be passed by
12180 reference. If nonzero for an argument, a copy of that argument is
12181 made in memory and a pointer to the argument is passed instead of
12182 the argument itself. The pointer is passed in whatever way is
12183 appropriate for passing a pointer to that type.
12184
12185 Under V.4, aggregates and long double are passed by reference.
12186
12187 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12188 reference unless the AltiVec vector extension ABI is in force.
12189
12190 As an extension to all ABIs, variable sized types are passed by
12191 reference. */
12192
12193 static bool
12194 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12195 machine_mode mode, const_tree type,
12196 bool named ATTRIBUTE_UNUSED)
12197 {
12198 if (!type)
12199 return 0;
12200
12201 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12202 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12203 {
12204 if (TARGET_DEBUG_ARG)
12205 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12206 return 1;
12207 }
12208
12209 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12210 {
12211 if (TARGET_DEBUG_ARG)
12212 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12213 return 1;
12214 }
12215
12216 if (int_size_in_bytes (type) < 0)
12217 {
12218 if (TARGET_DEBUG_ARG)
12219 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12220 return 1;
12221 }
12222
12223 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12224 modes only exist for GCC vector types if -maltivec. */
12225 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12226 {
12227 if (TARGET_DEBUG_ARG)
12228 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12229 return 1;
12230 }
12231
12232 /* Pass synthetic vectors in memory. */
12233 if (TREE_CODE (type) == VECTOR_TYPE
12234 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12235 {
12236 static bool warned_for_pass_big_vectors = false;
12237 if (TARGET_DEBUG_ARG)
12238 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12239 if (!warned_for_pass_big_vectors)
12240 {
12241 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12242 "non-standard ABI extension with no compatibility "
12243 "guarantee");
12244 warned_for_pass_big_vectors = true;
12245 }
12246 return 1;
12247 }
12248
12249 return 0;
12250 }
12251
12252 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12253 already processes. Return true if the parameter must be passed
12254 (fully or partially) on the stack. */
12255
12256 static bool
12257 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12258 {
12259 machine_mode mode;
12260 int unsignedp;
12261 rtx entry_parm;
12262
12263 /* Catch errors. */
12264 if (type == NULL || type == error_mark_node)
12265 return true;
12266
12267 /* Handle types with no storage requirement. */
12268 if (TYPE_MODE (type) == VOIDmode)
12269 return false;
12270
12271 /* Handle complex types. */
12272 if (TREE_CODE (type) == COMPLEX_TYPE)
12273 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12274 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12275
12276 /* Handle transparent aggregates. */
12277 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12278 && TYPE_TRANSPARENT_AGGR (type))
12279 type = TREE_TYPE (first_field (type));
12280
12281 /* See if this arg was passed by invisible reference. */
12282 if (pass_by_reference (get_cumulative_args (args_so_far),
12283 TYPE_MODE (type), type, true))
12284 type = build_pointer_type (type);
12285
12286 /* Find mode as it is passed by the ABI. */
12287 unsignedp = TYPE_UNSIGNED (type);
12288 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12289
12290 /* If we must pass in stack, we need a stack. */
12291 if (rs6000_must_pass_in_stack (mode, type))
12292 return true;
12293
12294 /* If there is no incoming register, we need a stack. */
12295 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12296 if (entry_parm == NULL)
12297 return true;
12298
12299 /* Likewise if we need to pass both in registers and on the stack. */
12300 if (GET_CODE (entry_parm) == PARALLEL
12301 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12302 return true;
12303
12304 /* Also true if we're partially in registers and partially not. */
12305 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12306 return true;
12307
12308 /* Update info on where next arg arrives in registers. */
12309 rs6000_function_arg_advance (args_so_far, mode, type, true);
12310 return false;
12311 }
12312
12313 /* Return true if FUN has no prototype, has a variable argument
12314 list, or passes any parameter in memory. */
12315
12316 static bool
12317 rs6000_function_parms_need_stack (tree fun, bool incoming)
12318 {
12319 tree fntype, result;
12320 CUMULATIVE_ARGS args_so_far_v;
12321 cumulative_args_t args_so_far;
12322
12323 if (!fun)
12324 /* Must be a libcall, all of which only use reg parms. */
12325 return false;
12326
12327 fntype = fun;
12328 if (!TYPE_P (fun))
12329 fntype = TREE_TYPE (fun);
12330
12331 /* Varargs functions need the parameter save area. */
12332 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12333 return true;
12334
12335 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12336 args_so_far = pack_cumulative_args (&args_so_far_v);
12337
12338 /* When incoming, we will have been passed the function decl.
12339 It is necessary to use the decl to handle K&R style functions,
12340 where TYPE_ARG_TYPES may not be available. */
12341 if (incoming)
12342 {
12343 gcc_assert (DECL_P (fun));
12344 result = DECL_RESULT (fun);
12345 }
12346 else
12347 result = TREE_TYPE (fntype);
12348
12349 if (result && aggregate_value_p (result, fntype))
12350 {
12351 if (!TYPE_P (result))
12352 result = TREE_TYPE (result);
12353 result = build_pointer_type (result);
12354 rs6000_parm_needs_stack (args_so_far, result);
12355 }
12356
12357 if (incoming)
12358 {
12359 tree parm;
12360
12361 for (parm = DECL_ARGUMENTS (fun);
12362 parm && parm != void_list_node;
12363 parm = TREE_CHAIN (parm))
12364 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12365 return true;
12366 }
12367 else
12368 {
12369 function_args_iterator args_iter;
12370 tree arg_type;
12371
12372 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12373 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12374 return true;
12375 }
12376
12377 return false;
12378 }
12379
12380 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12381 usually a constant depending on the ABI. However, in the ELFv2 ABI
12382 the register parameter area is optional when calling a function that
12383 has a prototype is scope, has no variable argument list, and passes
12384 all parameters in registers. */
12385
12386 int
12387 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12388 {
12389 int reg_parm_stack_space;
12390
12391 switch (DEFAULT_ABI)
12392 {
12393 default:
12394 reg_parm_stack_space = 0;
12395 break;
12396
12397 case ABI_AIX:
12398 case ABI_DARWIN:
12399 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12400 break;
12401
12402 case ABI_ELFv2:
12403 /* ??? Recomputing this every time is a bit expensive. Is there
12404 a place to cache this information? */
12405 if (rs6000_function_parms_need_stack (fun, incoming))
12406 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12407 else
12408 reg_parm_stack_space = 0;
12409 break;
12410 }
12411
12412 return reg_parm_stack_space;
12413 }
12414
12415 static void
12416 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12417 {
12418 int i;
12419 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12420
12421 if (nregs == 0)
12422 return;
12423
12424 for (i = 0; i < nregs; i++)
12425 {
12426 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12427 if (reload_completed)
12428 {
12429 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12430 tem = NULL_RTX;
12431 else
12432 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12433 i * GET_MODE_SIZE (reg_mode));
12434 }
12435 else
12436 tem = replace_equiv_address (tem, XEXP (tem, 0));
12437
12438 gcc_assert (tem);
12439
12440 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12441 }
12442 }
12443 \f
12444 /* Perform any needed actions needed for a function that is receiving a
12445 variable number of arguments.
12446
12447 CUM is as above.
12448
12449 MODE and TYPE are the mode and type of the current parameter.
12450
12451 PRETEND_SIZE is a variable that should be set to the amount of stack
12452 that must be pushed by the prolog to pretend that our caller pushed
12453 it.
12454
12455 Normally, this macro will push all remaining incoming registers on the
12456 stack and set PRETEND_SIZE to the length of the registers pushed. */
12457
12458 static void
12459 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12460 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12461 int no_rtl)
12462 {
12463 CUMULATIVE_ARGS next_cum;
12464 int reg_size = TARGET_32BIT ? 4 : 8;
12465 rtx save_area = NULL_RTX, mem;
12466 int first_reg_offset;
12467 alias_set_type set;
12468
12469 /* Skip the last named argument. */
12470 next_cum = *get_cumulative_args (cum);
12471 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12472
12473 if (DEFAULT_ABI == ABI_V4)
12474 {
12475 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12476
12477 if (! no_rtl)
12478 {
12479 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12480 HOST_WIDE_INT offset = 0;
12481
12482 /* Try to optimize the size of the varargs save area.
12483 The ABI requires that ap.reg_save_area is doubleword
12484 aligned, but we don't need to allocate space for all
12485 the bytes, only those to which we actually will save
12486 anything. */
12487 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12488 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12489 if (TARGET_HARD_FLOAT
12490 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12491 && cfun->va_list_fpr_size)
12492 {
12493 if (gpr_reg_num)
12494 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12495 * UNITS_PER_FP_WORD;
12496 if (cfun->va_list_fpr_size
12497 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12498 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12499 else
12500 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12501 * UNITS_PER_FP_WORD;
12502 }
12503 if (gpr_reg_num)
12504 {
12505 offset = -((first_reg_offset * reg_size) & ~7);
12506 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12507 {
12508 gpr_reg_num = cfun->va_list_gpr_size;
12509 if (reg_size == 4 && (first_reg_offset & 1))
12510 gpr_reg_num++;
12511 }
12512 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12513 }
12514 else if (fpr_size)
12515 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12516 * UNITS_PER_FP_WORD
12517 - (int) (GP_ARG_NUM_REG * reg_size);
12518
12519 if (gpr_size + fpr_size)
12520 {
12521 rtx reg_save_area
12522 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12523 gcc_assert (MEM_P (reg_save_area));
12524 reg_save_area = XEXP (reg_save_area, 0);
12525 if (GET_CODE (reg_save_area) == PLUS)
12526 {
12527 gcc_assert (XEXP (reg_save_area, 0)
12528 == virtual_stack_vars_rtx);
12529 gcc_assert (CONST_INT_P (XEXP (reg_save_area, 1)));
12530 offset += INTVAL (XEXP (reg_save_area, 1));
12531 }
12532 else
12533 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12534 }
12535
12536 cfun->machine->varargs_save_offset = offset;
12537 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12538 }
12539 }
12540 else
12541 {
12542 first_reg_offset = next_cum.words;
12543 save_area = crtl->args.internal_arg_pointer;
12544
12545 if (targetm.calls.must_pass_in_stack (mode, type))
12546 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12547 }
12548
12549 set = get_varargs_alias_set ();
12550 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12551 && cfun->va_list_gpr_size)
12552 {
12553 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12554
12555 if (va_list_gpr_counter_field)
12556 /* V4 va_list_gpr_size counts number of registers needed. */
12557 n_gpr = cfun->va_list_gpr_size;
12558 else
12559 /* char * va_list instead counts number of bytes needed. */
12560 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12561
12562 if (nregs > n_gpr)
12563 nregs = n_gpr;
12564
12565 mem = gen_rtx_MEM (BLKmode,
12566 plus_constant (Pmode, save_area,
12567 first_reg_offset * reg_size));
12568 MEM_NOTRAP_P (mem) = 1;
12569 set_mem_alias_set (mem, set);
12570 set_mem_align (mem, BITS_PER_WORD);
12571
12572 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12573 nregs);
12574 }
12575
12576 /* Save FP registers if needed. */
12577 if (DEFAULT_ABI == ABI_V4
12578 && TARGET_HARD_FLOAT
12579 && ! no_rtl
12580 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12581 && cfun->va_list_fpr_size)
12582 {
12583 int fregno = next_cum.fregno, nregs;
12584 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12585 rtx lab = gen_label_rtx ();
12586 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12587 * UNITS_PER_FP_WORD);
12588
12589 emit_jump_insn
12590 (gen_rtx_SET (pc_rtx,
12591 gen_rtx_IF_THEN_ELSE (VOIDmode,
12592 gen_rtx_NE (VOIDmode, cr1,
12593 const0_rtx),
12594 gen_rtx_LABEL_REF (VOIDmode, lab),
12595 pc_rtx)));
12596
12597 for (nregs = 0;
12598 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12599 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12600 {
12601 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12602 plus_constant (Pmode, save_area, off));
12603 MEM_NOTRAP_P (mem) = 1;
12604 set_mem_alias_set (mem, set);
12605 set_mem_align (mem, GET_MODE_ALIGNMENT (
12606 TARGET_HARD_FLOAT ? DFmode : SFmode));
12607 emit_move_insn (mem, gen_rtx_REG (
12608 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12609 }
12610
12611 emit_label (lab);
12612 }
12613 }
12614
12615 /* Create the va_list data type. */
12616
12617 static tree
12618 rs6000_build_builtin_va_list (void)
12619 {
12620 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12621
12622 /* For AIX, prefer 'char *' because that's what the system
12623 header files like. */
12624 if (DEFAULT_ABI != ABI_V4)
12625 return build_pointer_type (char_type_node);
12626
12627 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12628 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12629 get_identifier ("__va_list_tag"), record);
12630
12631 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12632 unsigned_char_type_node);
12633 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12634 unsigned_char_type_node);
12635 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12636 every user file. */
12637 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12638 get_identifier ("reserved"), short_unsigned_type_node);
12639 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12640 get_identifier ("overflow_arg_area"),
12641 ptr_type_node);
12642 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12643 get_identifier ("reg_save_area"),
12644 ptr_type_node);
12645
12646 va_list_gpr_counter_field = f_gpr;
12647 va_list_fpr_counter_field = f_fpr;
12648
12649 DECL_FIELD_CONTEXT (f_gpr) = record;
12650 DECL_FIELD_CONTEXT (f_fpr) = record;
12651 DECL_FIELD_CONTEXT (f_res) = record;
12652 DECL_FIELD_CONTEXT (f_ovf) = record;
12653 DECL_FIELD_CONTEXT (f_sav) = record;
12654
12655 TYPE_STUB_DECL (record) = type_decl;
12656 TYPE_NAME (record) = type_decl;
12657 TYPE_FIELDS (record) = f_gpr;
12658 DECL_CHAIN (f_gpr) = f_fpr;
12659 DECL_CHAIN (f_fpr) = f_res;
12660 DECL_CHAIN (f_res) = f_ovf;
12661 DECL_CHAIN (f_ovf) = f_sav;
12662
12663 layout_type (record);
12664
12665 /* The correct type is an array type of one element. */
12666 return build_array_type (record, build_index_type (size_zero_node));
12667 }
12668
12669 /* Implement va_start. */
12670
12671 static void
12672 rs6000_va_start (tree valist, rtx nextarg)
12673 {
12674 HOST_WIDE_INT words, n_gpr, n_fpr;
12675 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12676 tree gpr, fpr, ovf, sav, t;
12677
12678 /* Only SVR4 needs something special. */
12679 if (DEFAULT_ABI != ABI_V4)
12680 {
12681 std_expand_builtin_va_start (valist, nextarg);
12682 return;
12683 }
12684
12685 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12686 f_fpr = DECL_CHAIN (f_gpr);
12687 f_res = DECL_CHAIN (f_fpr);
12688 f_ovf = DECL_CHAIN (f_res);
12689 f_sav = DECL_CHAIN (f_ovf);
12690
12691 valist = build_simple_mem_ref (valist);
12692 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12693 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12694 f_fpr, NULL_TREE);
12695 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12696 f_ovf, NULL_TREE);
12697 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12698 f_sav, NULL_TREE);
12699
12700 /* Count number of gp and fp argument registers used. */
12701 words = crtl->args.info.words;
12702 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12703 GP_ARG_NUM_REG);
12704 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12705 FP_ARG_NUM_REG);
12706
12707 if (TARGET_DEBUG_ARG)
12708 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12709 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12710 words, n_gpr, n_fpr);
12711
12712 if (cfun->va_list_gpr_size)
12713 {
12714 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12715 build_int_cst (NULL_TREE, n_gpr));
12716 TREE_SIDE_EFFECTS (t) = 1;
12717 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12718 }
12719
12720 if (cfun->va_list_fpr_size)
12721 {
12722 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12723 build_int_cst (NULL_TREE, n_fpr));
12724 TREE_SIDE_EFFECTS (t) = 1;
12725 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12726
12727 #ifdef HAVE_AS_GNU_ATTRIBUTE
12728 if (call_ABI_of_interest (cfun->decl))
12729 rs6000_passes_float = true;
12730 #endif
12731 }
12732
12733 /* Find the overflow area. */
12734 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12735 if (words != 0)
12736 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12737 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12738 TREE_SIDE_EFFECTS (t) = 1;
12739 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12740
12741 /* If there were no va_arg invocations, don't set up the register
12742 save area. */
12743 if (!cfun->va_list_gpr_size
12744 && !cfun->va_list_fpr_size
12745 && n_gpr < GP_ARG_NUM_REG
12746 && n_fpr < FP_ARG_V4_MAX_REG)
12747 return;
12748
12749 /* Find the register save area. */
12750 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12751 if (cfun->machine->varargs_save_offset)
12752 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12753 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12754 TREE_SIDE_EFFECTS (t) = 1;
12755 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12756 }
12757
12758 /* Implement va_arg. */
12759
12760 static tree
12761 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12762 gimple_seq *post_p)
12763 {
12764 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12765 tree gpr, fpr, ovf, sav, reg, t, u;
12766 int size, rsize, n_reg, sav_ofs, sav_scale;
12767 tree lab_false, lab_over, addr;
12768 int align;
12769 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12770 int regalign = 0;
12771 gimple *stmt;
12772
12773 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12774 {
12775 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12776 return build_va_arg_indirect_ref (t);
12777 }
12778
12779 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12780 earlier version of gcc, with the property that it always applied alignment
12781 adjustments to the va-args (even for zero-sized types). The cheapest way
12782 to deal with this is to replicate the effect of the part of
12783 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12784 of relevance.
12785 We don't need to check for pass-by-reference because of the test above.
12786 We can return a simplifed answer, since we know there's no offset to add. */
12787
12788 if (((TARGET_MACHO
12789 && rs6000_darwin64_abi)
12790 || DEFAULT_ABI == ABI_ELFv2
12791 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12792 && integer_zerop (TYPE_SIZE (type)))
12793 {
12794 unsigned HOST_WIDE_INT align, boundary;
12795 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12796 align = PARM_BOUNDARY / BITS_PER_UNIT;
12797 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12798 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12799 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12800 boundary /= BITS_PER_UNIT;
12801 if (boundary > align)
12802 {
12803 tree t ;
12804 /* This updates arg ptr by the amount that would be necessary
12805 to align the zero-sized (but not zero-alignment) item. */
12806 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12807 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12808 gimplify_and_add (t, pre_p);
12809
12810 t = fold_convert (sizetype, valist_tmp);
12811 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12812 fold_convert (TREE_TYPE (valist),
12813 fold_build2 (BIT_AND_EXPR, sizetype, t,
12814 size_int (-boundary))));
12815 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12816 gimplify_and_add (t, pre_p);
12817 }
12818 /* Since it is zero-sized there's no increment for the item itself. */
12819 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12820 return build_va_arg_indirect_ref (valist_tmp);
12821 }
12822
12823 if (DEFAULT_ABI != ABI_V4)
12824 {
12825 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12826 {
12827 tree elem_type = TREE_TYPE (type);
12828 machine_mode elem_mode = TYPE_MODE (elem_type);
12829 int elem_size = GET_MODE_SIZE (elem_mode);
12830
12831 if (elem_size < UNITS_PER_WORD)
12832 {
12833 tree real_part, imag_part;
12834 gimple_seq post = NULL;
12835
12836 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12837 &post);
12838 /* Copy the value into a temporary, lest the formal temporary
12839 be reused out from under us. */
12840 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12841 gimple_seq_add_seq (pre_p, post);
12842
12843 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12844 post_p);
12845
12846 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12847 }
12848 }
12849
12850 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12851 }
12852
12853 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12854 f_fpr = DECL_CHAIN (f_gpr);
12855 f_res = DECL_CHAIN (f_fpr);
12856 f_ovf = DECL_CHAIN (f_res);
12857 f_sav = DECL_CHAIN (f_ovf);
12858
12859 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12860 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12861 f_fpr, NULL_TREE);
12862 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12863 f_ovf, NULL_TREE);
12864 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12865 f_sav, NULL_TREE);
12866
12867 size = int_size_in_bytes (type);
12868 rsize = (size + 3) / 4;
12869 int pad = 4 * rsize - size;
12870 align = 1;
12871
12872 machine_mode mode = TYPE_MODE (type);
12873 if (abi_v4_pass_in_fpr (mode, false))
12874 {
12875 /* FP args go in FP registers, if present. */
12876 reg = fpr;
12877 n_reg = (size + 7) / 8;
12878 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12879 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12880 if (mode != SFmode && mode != SDmode)
12881 align = 8;
12882 }
12883 else
12884 {
12885 /* Otherwise into GP registers. */
12886 reg = gpr;
12887 n_reg = rsize;
12888 sav_ofs = 0;
12889 sav_scale = 4;
12890 if (n_reg == 2)
12891 align = 8;
12892 }
12893
12894 /* Pull the value out of the saved registers.... */
12895
12896 lab_over = NULL;
12897 addr = create_tmp_var (ptr_type_node, "addr");
12898
12899 /* AltiVec vectors never go in registers when -mabi=altivec. */
12900 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12901 align = 16;
12902 else
12903 {
12904 lab_false = create_artificial_label (input_location);
12905 lab_over = create_artificial_label (input_location);
12906
12907 /* Long long is aligned in the registers. As are any other 2 gpr
12908 item such as complex int due to a historical mistake. */
12909 u = reg;
12910 if (n_reg == 2 && reg == gpr)
12911 {
12912 regalign = 1;
12913 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12914 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12915 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12916 unshare_expr (reg), u);
12917 }
12918 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12919 reg number is 0 for f1, so we want to make it odd. */
12920 else if (reg == fpr && mode == TDmode)
12921 {
12922 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12923 build_int_cst (TREE_TYPE (reg), 1));
12924 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12925 }
12926
12927 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12928 t = build2 (GE_EXPR, boolean_type_node, u, t);
12929 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12930 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12931 gimplify_and_add (t, pre_p);
12932
12933 t = sav;
12934 if (sav_ofs)
12935 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12936
12937 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12938 build_int_cst (TREE_TYPE (reg), n_reg));
12939 u = fold_convert (sizetype, u);
12940 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12941 t = fold_build_pointer_plus (t, u);
12942
12943 /* _Decimal32 varargs are located in the second word of the 64-bit
12944 FP register for 32-bit binaries. */
12945 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12946 t = fold_build_pointer_plus_hwi (t, size);
12947
12948 /* Args are passed right-aligned. */
12949 if (BYTES_BIG_ENDIAN)
12950 t = fold_build_pointer_plus_hwi (t, pad);
12951
12952 gimplify_assign (addr, t, pre_p);
12953
12954 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12955
12956 stmt = gimple_build_label (lab_false);
12957 gimple_seq_add_stmt (pre_p, stmt);
12958
12959 if ((n_reg == 2 && !regalign) || n_reg > 2)
12960 {
12961 /* Ensure that we don't find any more args in regs.
12962 Alignment has taken care of for special cases. */
12963 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12964 }
12965 }
12966
12967 /* ... otherwise out of the overflow area. */
12968
12969 /* Care for on-stack alignment if needed. */
12970 t = ovf;
12971 if (align != 1)
12972 {
12973 t = fold_build_pointer_plus_hwi (t, align - 1);
12974 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12975 build_int_cst (TREE_TYPE (t), -align));
12976 }
12977
12978 /* Args are passed right-aligned. */
12979 if (BYTES_BIG_ENDIAN)
12980 t = fold_build_pointer_plus_hwi (t, pad);
12981
12982 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12983
12984 gimplify_assign (unshare_expr (addr), t, pre_p);
12985
12986 t = fold_build_pointer_plus_hwi (t, size);
12987 gimplify_assign (unshare_expr (ovf), t, pre_p);
12988
12989 if (lab_over)
12990 {
12991 stmt = gimple_build_label (lab_over);
12992 gimple_seq_add_stmt (pre_p, stmt);
12993 }
12994
12995 if (STRICT_ALIGNMENT
12996 && (TYPE_ALIGN (type)
12997 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12998 {
12999 /* The value (of type complex double, for example) may not be
13000 aligned in memory in the saved registers, so copy via a
13001 temporary. (This is the same code as used for SPARC.) */
13002 tree tmp = create_tmp_var (type, "va_arg_tmp");
13003 tree dest_addr = build_fold_addr_expr (tmp);
13004
13005 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13006 3, dest_addr, addr, size_int (rsize * 4));
13007 TREE_ADDRESSABLE (tmp) = 1;
13008
13009 gimplify_and_add (copy, pre_p);
13010 addr = dest_addr;
13011 }
13012
13013 addr = fold_convert (ptrtype, addr);
13014 return build_va_arg_indirect_ref (addr);
13015 }
13016
13017 /* Builtins. */
13018
13019 static void
13020 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13021 {
13022 tree t;
13023 unsigned classify = rs6000_builtin_info[(int)code].attr;
13024 const char *attr_string = "";
13025
13026 gcc_assert (name != NULL);
13027 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13028
13029 if (rs6000_builtin_decls[(int)code])
13030 fatal_error (input_location,
13031 "internal error: builtin function %qs already processed",
13032 name);
13033
13034 rs6000_builtin_decls[(int)code] = t =
13035 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13036
13037 /* Set any special attributes. */
13038 if ((classify & RS6000_BTC_CONST) != 0)
13039 {
13040 /* const function, function only depends on the inputs. */
13041 TREE_READONLY (t) = 1;
13042 TREE_NOTHROW (t) = 1;
13043 attr_string = ", const";
13044 }
13045 else if ((classify & RS6000_BTC_PURE) != 0)
13046 {
13047 /* pure function, function can read global memory, but does not set any
13048 external state. */
13049 DECL_PURE_P (t) = 1;
13050 TREE_NOTHROW (t) = 1;
13051 attr_string = ", pure";
13052 }
13053 else if ((classify & RS6000_BTC_FP) != 0)
13054 {
13055 /* Function is a math function. If rounding mode is on, then treat the
13056 function as not reading global memory, but it can have arbitrary side
13057 effects. If it is off, then assume the function is a const function.
13058 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13059 builtin-attribute.def that is used for the math functions. */
13060 TREE_NOTHROW (t) = 1;
13061 if (flag_rounding_math)
13062 {
13063 DECL_PURE_P (t) = 1;
13064 DECL_IS_NOVOPS (t) = 1;
13065 attr_string = ", fp, pure";
13066 }
13067 else
13068 {
13069 TREE_READONLY (t) = 1;
13070 attr_string = ", fp, const";
13071 }
13072 }
13073 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13074 gcc_unreachable ();
13075
13076 if (TARGET_DEBUG_BUILTIN)
13077 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13078 (int)code, name, attr_string);
13079 }
13080
13081 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13082
13083 #undef RS6000_BUILTIN_0
13084 #undef RS6000_BUILTIN_1
13085 #undef RS6000_BUILTIN_2
13086 #undef RS6000_BUILTIN_3
13087 #undef RS6000_BUILTIN_A
13088 #undef RS6000_BUILTIN_D
13089 #undef RS6000_BUILTIN_H
13090 #undef RS6000_BUILTIN_P
13091 #undef RS6000_BUILTIN_X
13092
13093 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13094 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13095 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13096 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13097 { MASK, ICODE, NAME, ENUM },
13098
13099 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13100 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13101 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13102 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13103 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13104
13105 static const struct builtin_description bdesc_3arg[] =
13106 {
13107 #include "rs6000-builtin.def"
13108 };
13109
13110 /* DST operations: void foo (void *, const int, const char). */
13111
13112 #undef RS6000_BUILTIN_0
13113 #undef RS6000_BUILTIN_1
13114 #undef RS6000_BUILTIN_2
13115 #undef RS6000_BUILTIN_3
13116 #undef RS6000_BUILTIN_A
13117 #undef RS6000_BUILTIN_D
13118 #undef RS6000_BUILTIN_H
13119 #undef RS6000_BUILTIN_P
13120 #undef RS6000_BUILTIN_X
13121
13122 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13123 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13124 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13125 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13126 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13127 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13128 { MASK, ICODE, NAME, ENUM },
13129
13130 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13131 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13132 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13133
13134 static const struct builtin_description bdesc_dst[] =
13135 {
13136 #include "rs6000-builtin.def"
13137 };
13138
13139 /* Simple binary operations: VECc = foo (VECa, VECb). */
13140
13141 #undef RS6000_BUILTIN_0
13142 #undef RS6000_BUILTIN_1
13143 #undef RS6000_BUILTIN_2
13144 #undef RS6000_BUILTIN_3
13145 #undef RS6000_BUILTIN_A
13146 #undef RS6000_BUILTIN_D
13147 #undef RS6000_BUILTIN_H
13148 #undef RS6000_BUILTIN_P
13149 #undef RS6000_BUILTIN_X
13150
13151 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13152 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13153 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13154 { MASK, ICODE, NAME, ENUM },
13155
13156 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13157 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13158 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13159 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13160 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13161 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13162
13163 static const struct builtin_description bdesc_2arg[] =
13164 {
13165 #include "rs6000-builtin.def"
13166 };
13167
13168 #undef RS6000_BUILTIN_0
13169 #undef RS6000_BUILTIN_1
13170 #undef RS6000_BUILTIN_2
13171 #undef RS6000_BUILTIN_3
13172 #undef RS6000_BUILTIN_A
13173 #undef RS6000_BUILTIN_D
13174 #undef RS6000_BUILTIN_H
13175 #undef RS6000_BUILTIN_P
13176 #undef RS6000_BUILTIN_X
13177
13178 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13179 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13180 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13181 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13182 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13183 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13184 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13185 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13186 { MASK, ICODE, NAME, ENUM },
13187
13188 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13189
13190 /* AltiVec predicates. */
13191
13192 static const struct builtin_description bdesc_altivec_preds[] =
13193 {
13194 #include "rs6000-builtin.def"
13195 };
13196
13197 /* ABS* operations. */
13198
13199 #undef RS6000_BUILTIN_0
13200 #undef RS6000_BUILTIN_1
13201 #undef RS6000_BUILTIN_2
13202 #undef RS6000_BUILTIN_3
13203 #undef RS6000_BUILTIN_A
13204 #undef RS6000_BUILTIN_D
13205 #undef RS6000_BUILTIN_H
13206 #undef RS6000_BUILTIN_P
13207 #undef RS6000_BUILTIN_X
13208
13209 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13210 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13211 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13212 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13213 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13214 { MASK, ICODE, NAME, ENUM },
13215
13216 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13217 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13218 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13219 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13220
13221 static const struct builtin_description bdesc_abs[] =
13222 {
13223 #include "rs6000-builtin.def"
13224 };
13225
13226 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13227 foo (VECa). */
13228
13229 #undef RS6000_BUILTIN_0
13230 #undef RS6000_BUILTIN_1
13231 #undef RS6000_BUILTIN_2
13232 #undef RS6000_BUILTIN_3
13233 #undef RS6000_BUILTIN_A
13234 #undef RS6000_BUILTIN_D
13235 #undef RS6000_BUILTIN_H
13236 #undef RS6000_BUILTIN_P
13237 #undef RS6000_BUILTIN_X
13238
13239 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13240 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13241 { MASK, ICODE, NAME, ENUM },
13242
13243 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13244 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13245 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13246 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13247 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13248 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13249 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13250
13251 static const struct builtin_description bdesc_1arg[] =
13252 {
13253 #include "rs6000-builtin.def"
13254 };
13255
13256 /* Simple no-argument operations: result = __builtin_darn_32 () */
13257
13258 #undef RS6000_BUILTIN_0
13259 #undef RS6000_BUILTIN_1
13260 #undef RS6000_BUILTIN_2
13261 #undef RS6000_BUILTIN_3
13262 #undef RS6000_BUILTIN_A
13263 #undef RS6000_BUILTIN_D
13264 #undef RS6000_BUILTIN_H
13265 #undef RS6000_BUILTIN_P
13266 #undef RS6000_BUILTIN_X
13267
13268 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13269 { MASK, ICODE, NAME, ENUM },
13270
13271 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13272 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13273 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13274 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13275 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13276 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13277 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13278 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13279
13280 static const struct builtin_description bdesc_0arg[] =
13281 {
13282 #include "rs6000-builtin.def"
13283 };
13284
13285 /* HTM builtins. */
13286 #undef RS6000_BUILTIN_0
13287 #undef RS6000_BUILTIN_1
13288 #undef RS6000_BUILTIN_2
13289 #undef RS6000_BUILTIN_3
13290 #undef RS6000_BUILTIN_A
13291 #undef RS6000_BUILTIN_D
13292 #undef RS6000_BUILTIN_H
13293 #undef RS6000_BUILTIN_P
13294 #undef RS6000_BUILTIN_X
13295
13296 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13297 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13298 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13299 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13300 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13301 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13302 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13303 { MASK, ICODE, NAME, ENUM },
13304
13305 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13306 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13307
13308 static const struct builtin_description bdesc_htm[] =
13309 {
13310 #include "rs6000-builtin.def"
13311 };
13312
13313 #undef RS6000_BUILTIN_0
13314 #undef RS6000_BUILTIN_1
13315 #undef RS6000_BUILTIN_2
13316 #undef RS6000_BUILTIN_3
13317 #undef RS6000_BUILTIN_A
13318 #undef RS6000_BUILTIN_D
13319 #undef RS6000_BUILTIN_H
13320 #undef RS6000_BUILTIN_P
13321
13322 /* Return true if a builtin function is overloaded. */
13323 bool
13324 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13325 {
13326 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13327 }
13328
13329 const char *
13330 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13331 {
13332 return rs6000_builtin_info[(int)fncode].name;
13333 }
13334
13335 /* Expand an expression EXP that calls a builtin without arguments. */
13336 static rtx
13337 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13338 {
13339 rtx pat;
13340 machine_mode tmode = insn_data[icode].operand[0].mode;
13341
13342 if (icode == CODE_FOR_nothing)
13343 /* Builtin not supported on this processor. */
13344 return 0;
13345
13346 if (icode == CODE_FOR_rs6000_mffsl
13347 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13348 {
13349 error ("%<__builtin_mffsl%> not supported with %<-msoft-float%>");
13350 return const0_rtx;
13351 }
13352
13353 if (target == 0
13354 || GET_MODE (target) != tmode
13355 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13356 target = gen_reg_rtx (tmode);
13357
13358 pat = GEN_FCN (icode) (target);
13359 if (! pat)
13360 return 0;
13361 emit_insn (pat);
13362
13363 return target;
13364 }
13365
13366
13367 static rtx
13368 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13369 {
13370 rtx pat;
13371 tree arg0 = CALL_EXPR_ARG (exp, 0);
13372 tree arg1 = CALL_EXPR_ARG (exp, 1);
13373 rtx op0 = expand_normal (arg0);
13374 rtx op1 = expand_normal (arg1);
13375 machine_mode mode0 = insn_data[icode].operand[0].mode;
13376 machine_mode mode1 = insn_data[icode].operand[1].mode;
13377
13378 if (icode == CODE_FOR_nothing)
13379 /* Builtin not supported on this processor. */
13380 return 0;
13381
13382 /* If we got invalid arguments bail out before generating bad rtl. */
13383 if (arg0 == error_mark_node || arg1 == error_mark_node)
13384 return const0_rtx;
13385
13386 if (!CONST_INT_P (op0)
13387 || INTVAL (op0) > 255
13388 || INTVAL (op0) < 0)
13389 {
13390 error ("argument 1 must be an 8-bit field value");
13391 return const0_rtx;
13392 }
13393
13394 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13395 op0 = copy_to_mode_reg (mode0, op0);
13396
13397 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13398 op1 = copy_to_mode_reg (mode1, op1);
13399
13400 pat = GEN_FCN (icode) (op0, op1);
13401 if (!pat)
13402 return const0_rtx;
13403 emit_insn (pat);
13404
13405 return NULL_RTX;
13406 }
13407
13408 static rtx
13409 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13410 {
13411 rtx pat;
13412 tree arg0 = CALL_EXPR_ARG (exp, 0);
13413 rtx op0 = expand_normal (arg0);
13414
13415 if (icode == CODE_FOR_nothing)
13416 /* Builtin not supported on this processor. */
13417 return 0;
13418
13419 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13420 {
13421 error ("%<__builtin_mtfsb0%> and %<__builtin_mtfsb1%> not supported with "
13422 "%<-msoft-float%>");
13423 return const0_rtx;
13424 }
13425
13426 /* If we got invalid arguments bail out before generating bad rtl. */
13427 if (arg0 == error_mark_node)
13428 return const0_rtx;
13429
13430 /* Only allow bit numbers 0 to 31. */
13431 if (!u5bit_cint_operand (op0, VOIDmode))
13432 {
13433 error ("Argument must be a constant between 0 and 31.");
13434 return const0_rtx;
13435 }
13436
13437 pat = GEN_FCN (icode) (op0);
13438 if (!pat)
13439 return const0_rtx;
13440 emit_insn (pat);
13441
13442 return NULL_RTX;
13443 }
13444
13445 static rtx
13446 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13447 {
13448 rtx pat;
13449 tree arg0 = CALL_EXPR_ARG (exp, 0);
13450 rtx op0 = expand_normal (arg0);
13451 machine_mode mode0 = insn_data[icode].operand[0].mode;
13452
13453 if (icode == CODE_FOR_nothing)
13454 /* Builtin not supported on this processor. */
13455 return 0;
13456
13457 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13458 {
13459 error ("%<__builtin_set_fpscr_rn%> not supported with %<-msoft-float%>");
13460 return const0_rtx;
13461 }
13462
13463 /* If we got invalid arguments bail out before generating bad rtl. */
13464 if (arg0 == error_mark_node)
13465 return const0_rtx;
13466
13467 /* If the argument is a constant, check the range. Argument can only be a
13468 2-bit value. Unfortunately, can't check the range of the value at
13469 compile time if the argument is a variable. The least significant two
13470 bits of the argument, regardless of type, are used to set the rounding
13471 mode. All other bits are ignored. */
13472 if (CONST_INT_P (op0) && !const_0_to_3_operand(op0, VOIDmode))
13473 {
13474 error ("Argument must be a value between 0 and 3.");
13475 return const0_rtx;
13476 }
13477
13478 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13479 op0 = copy_to_mode_reg (mode0, op0);
13480
13481 pat = GEN_FCN (icode) (op0);
13482 if (!pat)
13483 return const0_rtx;
13484 emit_insn (pat);
13485
13486 return NULL_RTX;
13487 }
13488 static rtx
13489 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13490 {
13491 rtx pat;
13492 tree arg0 = CALL_EXPR_ARG (exp, 0);
13493 rtx op0 = expand_normal (arg0);
13494 machine_mode mode0 = insn_data[icode].operand[0].mode;
13495
13496 if (TARGET_32BIT)
13497 /* Builtin not supported in 32-bit mode. */
13498 fatal_error (input_location,
13499 "%<__builtin_set_fpscr_drn%> is not supported "
13500 "in 32-bit mode.");
13501
13502 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13503 {
13504 error ("%<__builtin_set_fpscr_drn%> not supported with %<-msoft-float%>");
13505 return const0_rtx;
13506 }
13507
13508 if (icode == CODE_FOR_nothing)
13509 /* Builtin not supported on this processor. */
13510 return 0;
13511
13512 /* If we got invalid arguments bail out before generating bad rtl. */
13513 if (arg0 == error_mark_node)
13514 return const0_rtx;
13515
13516 /* If the argument is a constant, check the range. Agrument can only be a
13517 3-bit value. Unfortunately, can't check the range of the value at
13518 compile time if the argument is a variable. The least significant two
13519 bits of the argument, regardless of type, are used to set the rounding
13520 mode. All other bits are ignored. */
13521 if (CONST_INT_P (op0) && !const_0_to_7_operand(op0, VOIDmode))
13522 {
13523 error ("Argument must be a value between 0 and 7.");
13524 return const0_rtx;
13525 }
13526
13527 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13528 op0 = copy_to_mode_reg (mode0, op0);
13529
13530 pat = GEN_FCN (icode) (op0);
13531 if (! pat)
13532 return const0_rtx;
13533 emit_insn (pat);
13534
13535 return NULL_RTX;
13536 }
13537
13538 static rtx
13539 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13540 {
13541 rtx pat;
13542 tree arg0 = CALL_EXPR_ARG (exp, 0);
13543 rtx op0 = expand_normal (arg0);
13544 machine_mode tmode = insn_data[icode].operand[0].mode;
13545 machine_mode mode0 = insn_data[icode].operand[1].mode;
13546
13547 if (icode == CODE_FOR_nothing)
13548 /* Builtin not supported on this processor. */
13549 return 0;
13550
13551 /* If we got invalid arguments bail out before generating bad rtl. */
13552 if (arg0 == error_mark_node)
13553 return const0_rtx;
13554
13555 if (icode == CODE_FOR_altivec_vspltisb
13556 || icode == CODE_FOR_altivec_vspltish
13557 || icode == CODE_FOR_altivec_vspltisw)
13558 {
13559 /* Only allow 5-bit *signed* literals. */
13560 if (!CONST_INT_P (op0)
13561 || INTVAL (op0) > 15
13562 || INTVAL (op0) < -16)
13563 {
13564 error ("argument 1 must be a 5-bit signed literal");
13565 return CONST0_RTX (tmode);
13566 }
13567 }
13568
13569 if (target == 0
13570 || GET_MODE (target) != tmode
13571 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13572 target = gen_reg_rtx (tmode);
13573
13574 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13575 op0 = copy_to_mode_reg (mode0, op0);
13576
13577 pat = GEN_FCN (icode) (target, op0);
13578 if (! pat)
13579 return 0;
13580 emit_insn (pat);
13581
13582 return target;
13583 }
13584
13585 static rtx
13586 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13587 {
13588 rtx pat, scratch1, scratch2;
13589 tree arg0 = CALL_EXPR_ARG (exp, 0);
13590 rtx op0 = expand_normal (arg0);
13591 machine_mode tmode = insn_data[icode].operand[0].mode;
13592 machine_mode mode0 = insn_data[icode].operand[1].mode;
13593
13594 /* If we have invalid arguments, bail out before generating bad rtl. */
13595 if (arg0 == error_mark_node)
13596 return const0_rtx;
13597
13598 if (target == 0
13599 || GET_MODE (target) != tmode
13600 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13601 target = gen_reg_rtx (tmode);
13602
13603 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13604 op0 = copy_to_mode_reg (mode0, op0);
13605
13606 scratch1 = gen_reg_rtx (mode0);
13607 scratch2 = gen_reg_rtx (mode0);
13608
13609 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13610 if (! pat)
13611 return 0;
13612 emit_insn (pat);
13613
13614 return target;
13615 }
13616
13617 static rtx
13618 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13619 {
13620 rtx pat;
13621 tree arg0 = CALL_EXPR_ARG (exp, 0);
13622 tree arg1 = CALL_EXPR_ARG (exp, 1);
13623 rtx op0 = expand_normal (arg0);
13624 rtx op1 = expand_normal (arg1);
13625 machine_mode tmode = insn_data[icode].operand[0].mode;
13626 machine_mode mode0 = insn_data[icode].operand[1].mode;
13627 machine_mode mode1 = insn_data[icode].operand[2].mode;
13628
13629 if (icode == CODE_FOR_nothing)
13630 /* Builtin not supported on this processor. */
13631 return 0;
13632
13633 /* If we got invalid arguments bail out before generating bad rtl. */
13634 if (arg0 == error_mark_node || arg1 == error_mark_node)
13635 return const0_rtx;
13636
13637 if (icode == CODE_FOR_unpackv1ti
13638 || icode == CODE_FOR_unpackkf
13639 || icode == CODE_FOR_unpacktf
13640 || icode == CODE_FOR_unpackif
13641 || icode == CODE_FOR_unpacktd)
13642 {
13643 /* Only allow 1-bit unsigned literals. */
13644 STRIP_NOPS (arg1);
13645 if (TREE_CODE (arg1) != INTEGER_CST
13646 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13647 {
13648 error ("argument 2 must be a 1-bit unsigned literal");
13649 return CONST0_RTX (tmode);
13650 }
13651 }
13652 else if (icode == CODE_FOR_altivec_vspltw)
13653 {
13654 /* Only allow 2-bit unsigned literals. */
13655 STRIP_NOPS (arg1);
13656 if (TREE_CODE (arg1) != INTEGER_CST
13657 || TREE_INT_CST_LOW (arg1) & ~3)
13658 {
13659 error ("argument 2 must be a 2-bit unsigned literal");
13660 return CONST0_RTX (tmode);
13661 }
13662 }
13663 else if (icode == CODE_FOR_altivec_vsplth)
13664 {
13665 /* Only allow 3-bit unsigned literals. */
13666 STRIP_NOPS (arg1);
13667 if (TREE_CODE (arg1) != INTEGER_CST
13668 || TREE_INT_CST_LOW (arg1) & ~7)
13669 {
13670 error ("argument 2 must be a 3-bit unsigned literal");
13671 return CONST0_RTX (tmode);
13672 }
13673 }
13674 else if (icode == CODE_FOR_altivec_vspltb)
13675 {
13676 /* Only allow 4-bit unsigned literals. */
13677 STRIP_NOPS (arg1);
13678 if (TREE_CODE (arg1) != INTEGER_CST
13679 || TREE_INT_CST_LOW (arg1) & ~15)
13680 {
13681 error ("argument 2 must be a 4-bit unsigned literal");
13682 return CONST0_RTX (tmode);
13683 }
13684 }
13685 else if (icode == CODE_FOR_altivec_vcfux
13686 || icode == CODE_FOR_altivec_vcfsx
13687 || icode == CODE_FOR_altivec_vctsxs
13688 || icode == CODE_FOR_altivec_vctuxs)
13689 {
13690 /* Only allow 5-bit unsigned literals. */
13691 STRIP_NOPS (arg1);
13692 if (TREE_CODE (arg1) != INTEGER_CST
13693 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13694 {
13695 error ("argument 2 must be a 5-bit unsigned literal");
13696 return CONST0_RTX (tmode);
13697 }
13698 }
13699 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13700 || icode == CODE_FOR_dfptstsfi_lt_dd
13701 || icode == CODE_FOR_dfptstsfi_gt_dd
13702 || icode == CODE_FOR_dfptstsfi_unordered_dd
13703 || icode == CODE_FOR_dfptstsfi_eq_td
13704 || icode == CODE_FOR_dfptstsfi_lt_td
13705 || icode == CODE_FOR_dfptstsfi_gt_td
13706 || icode == CODE_FOR_dfptstsfi_unordered_td)
13707 {
13708 /* Only allow 6-bit unsigned literals. */
13709 STRIP_NOPS (arg0);
13710 if (TREE_CODE (arg0) != INTEGER_CST
13711 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13712 {
13713 error ("argument 1 must be a 6-bit unsigned literal");
13714 return CONST0_RTX (tmode);
13715 }
13716 }
13717 else if (icode == CODE_FOR_xststdcqp_kf
13718 || icode == CODE_FOR_xststdcqp_tf
13719 || icode == CODE_FOR_xststdcdp
13720 || icode == CODE_FOR_xststdcsp
13721 || icode == CODE_FOR_xvtstdcdp
13722 || icode == CODE_FOR_xvtstdcsp)
13723 {
13724 /* Only allow 7-bit unsigned literals. */
13725 STRIP_NOPS (arg1);
13726 if (TREE_CODE (arg1) != INTEGER_CST
13727 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13728 {
13729 error ("argument 2 must be a 7-bit unsigned literal");
13730 return CONST0_RTX (tmode);
13731 }
13732 }
13733
13734 if (target == 0
13735 || GET_MODE (target) != tmode
13736 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13737 target = gen_reg_rtx (tmode);
13738
13739 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13740 op0 = copy_to_mode_reg (mode0, op0);
13741 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13742 op1 = copy_to_mode_reg (mode1, op1);
13743
13744 pat = GEN_FCN (icode) (target, op0, op1);
13745 if (! pat)
13746 return 0;
13747 emit_insn (pat);
13748
13749 return target;
13750 }
13751
13752 static rtx
13753 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13754 {
13755 rtx pat, scratch;
13756 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13757 tree arg0 = CALL_EXPR_ARG (exp, 1);
13758 tree arg1 = CALL_EXPR_ARG (exp, 2);
13759 rtx op0 = expand_normal (arg0);
13760 rtx op1 = expand_normal (arg1);
13761 machine_mode tmode = SImode;
13762 machine_mode mode0 = insn_data[icode].operand[1].mode;
13763 machine_mode mode1 = insn_data[icode].operand[2].mode;
13764 int cr6_form_int;
13765
13766 if (TREE_CODE (cr6_form) != INTEGER_CST)
13767 {
13768 error ("argument 1 of %qs must be a constant",
13769 "__builtin_altivec_predicate");
13770 return const0_rtx;
13771 }
13772 else
13773 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13774
13775 gcc_assert (mode0 == mode1);
13776
13777 /* If we have invalid arguments, bail out before generating bad rtl. */
13778 if (arg0 == error_mark_node || arg1 == error_mark_node)
13779 return const0_rtx;
13780
13781 if (target == 0
13782 || GET_MODE (target) != tmode
13783 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13784 target = gen_reg_rtx (tmode);
13785
13786 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13787 op0 = copy_to_mode_reg (mode0, op0);
13788 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13789 op1 = copy_to_mode_reg (mode1, op1);
13790
13791 /* Note that for many of the relevant operations (e.g. cmpne or
13792 cmpeq) with float or double operands, it makes more sense for the
13793 mode of the allocated scratch register to select a vector of
13794 integer. But the choice to copy the mode of operand 0 was made
13795 long ago and there are no plans to change it. */
13796 scratch = gen_reg_rtx (mode0);
13797
13798 pat = GEN_FCN (icode) (scratch, op0, op1);
13799 if (! pat)
13800 return 0;
13801 emit_insn (pat);
13802
13803 /* The vec_any* and vec_all* predicates use the same opcodes for two
13804 different operations, but the bits in CR6 will be different
13805 depending on what information we want. So we have to play tricks
13806 with CR6 to get the right bits out.
13807
13808 If you think this is disgusting, look at the specs for the
13809 AltiVec predicates. */
13810
13811 switch (cr6_form_int)
13812 {
13813 case 0:
13814 emit_insn (gen_cr6_test_for_zero (target));
13815 break;
13816 case 1:
13817 emit_insn (gen_cr6_test_for_zero_reverse (target));
13818 break;
13819 case 2:
13820 emit_insn (gen_cr6_test_for_lt (target));
13821 break;
13822 case 3:
13823 emit_insn (gen_cr6_test_for_lt_reverse (target));
13824 break;
13825 default:
13826 error ("argument 1 of %qs is out of range",
13827 "__builtin_altivec_predicate");
13828 break;
13829 }
13830
13831 return target;
13832 }
13833
13834 rtx
13835 swap_endian_selector_for_mode (machine_mode mode)
13836 {
13837 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13838 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13839 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13840 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13841
13842 unsigned int *swaparray, i;
13843 rtx perm[16];
13844
13845 switch (mode)
13846 {
13847 case E_V1TImode:
13848 swaparray = swap1;
13849 break;
13850 case E_V2DFmode:
13851 case E_V2DImode:
13852 swaparray = swap2;
13853 break;
13854 case E_V4SFmode:
13855 case E_V4SImode:
13856 swaparray = swap4;
13857 break;
13858 case E_V8HImode:
13859 swaparray = swap8;
13860 break;
13861 default:
13862 gcc_unreachable ();
13863 }
13864
13865 for (i = 0; i < 16; ++i)
13866 perm[i] = GEN_INT (swaparray[i]);
13867
13868 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13869 gen_rtvec_v (16, perm)));
13870 }
13871
13872 static rtx
13873 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13874 {
13875 rtx pat, addr;
13876 tree arg0 = CALL_EXPR_ARG (exp, 0);
13877 tree arg1 = CALL_EXPR_ARG (exp, 1);
13878 machine_mode tmode = insn_data[icode].operand[0].mode;
13879 machine_mode mode0 = Pmode;
13880 machine_mode mode1 = Pmode;
13881 rtx op0 = expand_normal (arg0);
13882 rtx op1 = expand_normal (arg1);
13883
13884 if (icode == CODE_FOR_nothing)
13885 /* Builtin not supported on this processor. */
13886 return 0;
13887
13888 /* If we got invalid arguments bail out before generating bad rtl. */
13889 if (arg0 == error_mark_node || arg1 == error_mark_node)
13890 return const0_rtx;
13891
13892 if (target == 0
13893 || GET_MODE (target) != tmode
13894 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13895 target = gen_reg_rtx (tmode);
13896
13897 op1 = copy_to_mode_reg (mode1, op1);
13898
13899 /* For LVX, express the RTL accurately by ANDing the address with -16.
13900 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13901 so the raw address is fine. */
13902 if (icode == CODE_FOR_altivec_lvx_v1ti
13903 || icode == CODE_FOR_altivec_lvx_v2df
13904 || icode == CODE_FOR_altivec_lvx_v2di
13905 || icode == CODE_FOR_altivec_lvx_v4sf
13906 || icode == CODE_FOR_altivec_lvx_v4si
13907 || icode == CODE_FOR_altivec_lvx_v8hi
13908 || icode == CODE_FOR_altivec_lvx_v16qi)
13909 {
13910 rtx rawaddr;
13911 if (op0 == const0_rtx)
13912 rawaddr = op1;
13913 else
13914 {
13915 op0 = copy_to_mode_reg (mode0, op0);
13916 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13917 }
13918 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13919 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13920
13921 emit_insn (gen_rtx_SET (target, addr));
13922 }
13923 else
13924 {
13925 if (op0 == const0_rtx)
13926 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13927 else
13928 {
13929 op0 = copy_to_mode_reg (mode0, op0);
13930 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13931 gen_rtx_PLUS (Pmode, op1, op0));
13932 }
13933
13934 pat = GEN_FCN (icode) (target, addr);
13935 if (! pat)
13936 return 0;
13937 emit_insn (pat);
13938 }
13939
13940 return target;
13941 }
13942
13943 static rtx
13944 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13945 {
13946 rtx pat;
13947 tree arg0 = CALL_EXPR_ARG (exp, 0);
13948 tree arg1 = CALL_EXPR_ARG (exp, 1);
13949 tree arg2 = CALL_EXPR_ARG (exp, 2);
13950 rtx op0 = expand_normal (arg0);
13951 rtx op1 = expand_normal (arg1);
13952 rtx op2 = expand_normal (arg2);
13953 machine_mode mode0 = insn_data[icode].operand[0].mode;
13954 machine_mode mode1 = insn_data[icode].operand[1].mode;
13955 machine_mode mode2 = insn_data[icode].operand[2].mode;
13956
13957 if (icode == CODE_FOR_nothing)
13958 /* Builtin not supported on this processor. */
13959 return NULL_RTX;
13960
13961 /* If we got invalid arguments bail out before generating bad rtl. */
13962 if (arg0 == error_mark_node
13963 || arg1 == error_mark_node
13964 || arg2 == error_mark_node)
13965 return NULL_RTX;
13966
13967 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13968 op0 = copy_to_mode_reg (mode0, op0);
13969 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13970 op1 = copy_to_mode_reg (mode1, op1);
13971 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13972 op2 = copy_to_mode_reg (mode2, op2);
13973
13974 pat = GEN_FCN (icode) (op0, op1, op2);
13975 if (pat)
13976 emit_insn (pat);
13977
13978 return NULL_RTX;
13979 }
13980
13981 static rtx
13982 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13983 {
13984 tree arg0 = CALL_EXPR_ARG (exp, 0);
13985 tree arg1 = CALL_EXPR_ARG (exp, 1);
13986 tree arg2 = CALL_EXPR_ARG (exp, 2);
13987 rtx op0 = expand_normal (arg0);
13988 rtx op1 = expand_normal (arg1);
13989 rtx op2 = expand_normal (arg2);
13990 rtx pat, addr, rawaddr;
13991 machine_mode tmode = insn_data[icode].operand[0].mode;
13992 machine_mode smode = insn_data[icode].operand[1].mode;
13993 machine_mode mode1 = Pmode;
13994 machine_mode mode2 = Pmode;
13995
13996 /* Invalid arguments. Bail before doing anything stoopid! */
13997 if (arg0 == error_mark_node
13998 || arg1 == error_mark_node
13999 || arg2 == error_mark_node)
14000 return const0_rtx;
14001
14002 op2 = copy_to_mode_reg (mode2, op2);
14003
14004 /* For STVX, express the RTL accurately by ANDing the address with -16.
14005 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14006 so the raw address is fine. */
14007 if (icode == CODE_FOR_altivec_stvx_v2df
14008 || icode == CODE_FOR_altivec_stvx_v2di
14009 || icode == CODE_FOR_altivec_stvx_v4sf
14010 || icode == CODE_FOR_altivec_stvx_v4si
14011 || icode == CODE_FOR_altivec_stvx_v8hi
14012 || icode == CODE_FOR_altivec_stvx_v16qi)
14013 {
14014 if (op1 == const0_rtx)
14015 rawaddr = op2;
14016 else
14017 {
14018 op1 = copy_to_mode_reg (mode1, op1);
14019 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14020 }
14021
14022 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14023 addr = gen_rtx_MEM (tmode, addr);
14024
14025 op0 = copy_to_mode_reg (tmode, op0);
14026
14027 emit_insn (gen_rtx_SET (addr, op0));
14028 }
14029 else
14030 {
14031 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14032 op0 = copy_to_mode_reg (smode, op0);
14033
14034 if (op1 == const0_rtx)
14035 addr = gen_rtx_MEM (tmode, op2);
14036 else
14037 {
14038 op1 = copy_to_mode_reg (mode1, op1);
14039 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14040 }
14041
14042 pat = GEN_FCN (icode) (addr, op0);
14043 if (pat)
14044 emit_insn (pat);
14045 }
14046
14047 return NULL_RTX;
14048 }
14049
14050 /* Return the appropriate SPR number associated with the given builtin. */
14051 static inline HOST_WIDE_INT
14052 htm_spr_num (enum rs6000_builtins code)
14053 {
14054 if (code == HTM_BUILTIN_GET_TFHAR
14055 || code == HTM_BUILTIN_SET_TFHAR)
14056 return TFHAR_SPR;
14057 else if (code == HTM_BUILTIN_GET_TFIAR
14058 || code == HTM_BUILTIN_SET_TFIAR)
14059 return TFIAR_SPR;
14060 else if (code == HTM_BUILTIN_GET_TEXASR
14061 || code == HTM_BUILTIN_SET_TEXASR)
14062 return TEXASR_SPR;
14063 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14064 || code == HTM_BUILTIN_SET_TEXASRU);
14065 return TEXASRU_SPR;
14066 }
14067
14068 /* Return the appropriate SPR regno associated with the given builtin. */
14069 static inline HOST_WIDE_INT
14070 htm_spr_regno (enum rs6000_builtins code)
14071 {
14072 if (code == HTM_BUILTIN_GET_TFHAR
14073 || code == HTM_BUILTIN_SET_TFHAR)
14074 return TFHAR_REGNO;
14075 else if (code == HTM_BUILTIN_GET_TFIAR
14076 || code == HTM_BUILTIN_SET_TFIAR)
14077 return TFIAR_REGNO;
14078 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14079 || code == HTM_BUILTIN_SET_TEXASR
14080 || code == HTM_BUILTIN_GET_TEXASRU
14081 || code == HTM_BUILTIN_SET_TEXASRU);
14082 return TEXASR_REGNO;
14083 }
14084
14085 /* Return the correct ICODE value depending on whether we are
14086 setting or reading the HTM SPRs. */
14087 static inline enum insn_code
14088 rs6000_htm_spr_icode (bool nonvoid)
14089 {
14090 if (nonvoid)
14091 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14092 else
14093 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14094 }
14095
14096 /* Expand the HTM builtin in EXP and store the result in TARGET.
14097 Store true in *EXPANDEDP if we found a builtin to expand. */
14098 static rtx
14099 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14100 {
14101 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14102 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14103 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14104 const struct builtin_description *d;
14105 size_t i;
14106
14107 *expandedp = true;
14108
14109 if (!TARGET_POWERPC64
14110 && (fcode == HTM_BUILTIN_TABORTDC
14111 || fcode == HTM_BUILTIN_TABORTDCI))
14112 {
14113 size_t uns_fcode = (size_t)fcode;
14114 const char *name = rs6000_builtin_info[uns_fcode].name;
14115 error ("builtin %qs is only valid in 64-bit mode", name);
14116 return const0_rtx;
14117 }
14118
14119 /* Expand the HTM builtins. */
14120 d = bdesc_htm;
14121 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
14122 if (d->code == fcode)
14123 {
14124 rtx op[MAX_HTM_OPERANDS], pat;
14125 int nopnds = 0;
14126 tree arg;
14127 call_expr_arg_iterator iter;
14128 unsigned attr = rs6000_builtin_info[fcode].attr;
14129 enum insn_code icode = d->icode;
14130 const struct insn_operand_data *insn_op;
14131 bool uses_spr = (attr & RS6000_BTC_SPR);
14132 rtx cr = NULL_RTX;
14133
14134 if (uses_spr)
14135 icode = rs6000_htm_spr_icode (nonvoid);
14136 insn_op = &insn_data[icode].operand[0];
14137
14138 if (nonvoid)
14139 {
14140 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
14141 if (!target
14142 || GET_MODE (target) != tmode
14143 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
14144 target = gen_reg_rtx (tmode);
14145 if (uses_spr)
14146 op[nopnds++] = target;
14147 }
14148
14149 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
14150 {
14151 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
14152 return const0_rtx;
14153
14154 insn_op = &insn_data[icode].operand[nopnds];
14155
14156 op[nopnds] = expand_normal (arg);
14157
14158 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
14159 {
14160 if (!strcmp (insn_op->constraint, "n"))
14161 {
14162 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
14163 if (!CONST_INT_P (op[nopnds]))
14164 error ("argument %d must be an unsigned literal", arg_num);
14165 else
14166 error ("argument %d is an unsigned literal that is "
14167 "out of range", arg_num);
14168 return const0_rtx;
14169 }
14170 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
14171 }
14172
14173 nopnds++;
14174 }
14175
14176 /* Handle the builtins for extended mnemonics. These accept
14177 no arguments, but map to builtins that take arguments. */
14178 switch (fcode)
14179 {
14180 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
14181 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
14182 op[nopnds++] = GEN_INT (1);
14183 if (flag_checking)
14184 attr |= RS6000_BTC_UNARY;
14185 break;
14186 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
14187 op[nopnds++] = GEN_INT (0);
14188 if (flag_checking)
14189 attr |= RS6000_BTC_UNARY;
14190 break;
14191 default:
14192 break;
14193 }
14194
14195 /* If this builtin accesses SPRs, then pass in the appropriate
14196 SPR number and SPR regno as the last two operands. */
14197 if (uses_spr)
14198 {
14199 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
14200 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
14201 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
14202 }
14203 /* If this builtin accesses a CR, then pass in a scratch
14204 CR as the last operand. */
14205 else if (attr & RS6000_BTC_CR)
14206 { cr = gen_reg_rtx (CCmode);
14207 op[nopnds++] = cr;
14208 }
14209
14210 if (flag_checking)
14211 {
14212 int expected_nopnds = 0;
14213 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
14214 expected_nopnds = 1;
14215 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
14216 expected_nopnds = 2;
14217 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
14218 expected_nopnds = 3;
14219 if (!(attr & RS6000_BTC_VOID))
14220 expected_nopnds += 1;
14221 if (uses_spr)
14222 expected_nopnds += 2;
14223
14224 gcc_assert (nopnds == expected_nopnds
14225 && nopnds <= MAX_HTM_OPERANDS);
14226 }
14227
14228 switch (nopnds)
14229 {
14230 case 1:
14231 pat = GEN_FCN (icode) (op[0]);
14232 break;
14233 case 2:
14234 pat = GEN_FCN (icode) (op[0], op[1]);
14235 break;
14236 case 3:
14237 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
14238 break;
14239 case 4:
14240 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
14241 break;
14242 default:
14243 gcc_unreachable ();
14244 }
14245 if (!pat)
14246 return NULL_RTX;
14247 emit_insn (pat);
14248
14249 if (attr & RS6000_BTC_CR)
14250 {
14251 if (fcode == HTM_BUILTIN_TBEGIN)
14252 {
14253 /* Emit code to set TARGET to true or false depending on
14254 whether the tbegin. instruction successfully or failed
14255 to start a transaction. We do this by placing the 1's
14256 complement of CR's EQ bit into TARGET. */
14257 rtx scratch = gen_reg_rtx (SImode);
14258 emit_insn (gen_rtx_SET (scratch,
14259 gen_rtx_EQ (SImode, cr,
14260 const0_rtx)));
14261 emit_insn (gen_rtx_SET (target,
14262 gen_rtx_XOR (SImode, scratch,
14263 GEN_INT (1))));
14264 }
14265 else
14266 {
14267 /* Emit code to copy the 4-bit condition register field
14268 CR into the least significant end of register TARGET. */
14269 rtx scratch1 = gen_reg_rtx (SImode);
14270 rtx scratch2 = gen_reg_rtx (SImode);
14271 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
14272 emit_insn (gen_movcc (subreg, cr));
14273 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
14274 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
14275 }
14276 }
14277
14278 if (nonvoid)
14279 return target;
14280 return const0_rtx;
14281 }
14282
14283 *expandedp = false;
14284 return NULL_RTX;
14285 }
14286
14287 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14288
14289 static rtx
14290 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14291 rtx target)
14292 {
14293 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14294 if (fcode == RS6000_BUILTIN_CPU_INIT)
14295 return const0_rtx;
14296
14297 if (target == 0 || GET_MODE (target) != SImode)
14298 target = gen_reg_rtx (SImode);
14299
14300 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14301 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14302 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14303 to a STRING_CST. */
14304 if (TREE_CODE (arg) == ARRAY_REF
14305 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14306 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14307 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14308 arg = TREE_OPERAND (arg, 0);
14309
14310 if (TREE_CODE (arg) != STRING_CST)
14311 {
14312 error ("builtin %qs only accepts a string argument",
14313 rs6000_builtin_info[(size_t) fcode].name);
14314 return const0_rtx;
14315 }
14316
14317 if (fcode == RS6000_BUILTIN_CPU_IS)
14318 {
14319 const char *cpu = TREE_STRING_POINTER (arg);
14320 rtx cpuid = NULL_RTX;
14321 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14322 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14323 {
14324 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14325 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14326 break;
14327 }
14328 if (cpuid == NULL_RTX)
14329 {
14330 /* Invalid CPU argument. */
14331 error ("cpu %qs is an invalid argument to builtin %qs",
14332 cpu, rs6000_builtin_info[(size_t) fcode].name);
14333 return const0_rtx;
14334 }
14335
14336 rtx platform = gen_reg_rtx (SImode);
14337 rtx tcbmem = gen_const_mem (SImode,
14338 gen_rtx_PLUS (Pmode,
14339 gen_rtx_REG (Pmode, TLS_REGNUM),
14340 GEN_INT (TCB_PLATFORM_OFFSET)));
14341 emit_move_insn (platform, tcbmem);
14342 emit_insn (gen_eqsi3 (target, platform, cpuid));
14343 }
14344 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14345 {
14346 const char *hwcap = TREE_STRING_POINTER (arg);
14347 rtx mask = NULL_RTX;
14348 int hwcap_offset;
14349 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14350 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14351 {
14352 mask = GEN_INT (cpu_supports_info[i].mask);
14353 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14354 break;
14355 }
14356 if (mask == NULL_RTX)
14357 {
14358 /* Invalid HWCAP argument. */
14359 error ("%s %qs is an invalid argument to builtin %qs",
14360 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14361 return const0_rtx;
14362 }
14363
14364 rtx tcb_hwcap = gen_reg_rtx (SImode);
14365 rtx tcbmem = gen_const_mem (SImode,
14366 gen_rtx_PLUS (Pmode,
14367 gen_rtx_REG (Pmode, TLS_REGNUM),
14368 GEN_INT (hwcap_offset)));
14369 emit_move_insn (tcb_hwcap, tcbmem);
14370 rtx scratch1 = gen_reg_rtx (SImode);
14371 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14372 rtx scratch2 = gen_reg_rtx (SImode);
14373 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14374 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14375 }
14376 else
14377 gcc_unreachable ();
14378
14379 /* Record that we have expanded a CPU builtin, so that we can later
14380 emit a reference to the special symbol exported by LIBC to ensure we
14381 do not link against an old LIBC that doesn't support this feature. */
14382 cpu_builtin_p = true;
14383
14384 #else
14385 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14386 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14387
14388 /* For old LIBCs, always return FALSE. */
14389 emit_move_insn (target, GEN_INT (0));
14390 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14391
14392 return target;
14393 }
14394
14395 static rtx
14396 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14397 {
14398 rtx pat;
14399 tree arg0 = CALL_EXPR_ARG (exp, 0);
14400 tree arg1 = CALL_EXPR_ARG (exp, 1);
14401 tree arg2 = CALL_EXPR_ARG (exp, 2);
14402 rtx op0 = expand_normal (arg0);
14403 rtx op1 = expand_normal (arg1);
14404 rtx op2 = expand_normal (arg2);
14405 machine_mode tmode = insn_data[icode].operand[0].mode;
14406 machine_mode mode0 = insn_data[icode].operand[1].mode;
14407 machine_mode mode1 = insn_data[icode].operand[2].mode;
14408 machine_mode mode2 = insn_data[icode].operand[3].mode;
14409
14410 if (icode == CODE_FOR_nothing)
14411 /* Builtin not supported on this processor. */
14412 return 0;
14413
14414 /* If we got invalid arguments bail out before generating bad rtl. */
14415 if (arg0 == error_mark_node
14416 || arg1 == error_mark_node
14417 || arg2 == error_mark_node)
14418 return const0_rtx;
14419
14420 /* Check and prepare argument depending on the instruction code.
14421
14422 Note that a switch statement instead of the sequence of tests
14423 would be incorrect as many of the CODE_FOR values could be
14424 CODE_FOR_nothing and that would yield multiple alternatives
14425 with identical values. We'd never reach here at runtime in
14426 this case. */
14427 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14428 || icode == CODE_FOR_altivec_vsldoi_v2df
14429 || icode == CODE_FOR_altivec_vsldoi_v4si
14430 || icode == CODE_FOR_altivec_vsldoi_v8hi
14431 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14432 {
14433 /* Only allow 4-bit unsigned literals. */
14434 STRIP_NOPS (arg2);
14435 if (TREE_CODE (arg2) != INTEGER_CST
14436 || TREE_INT_CST_LOW (arg2) & ~0xf)
14437 {
14438 error ("argument 3 must be a 4-bit unsigned literal");
14439 return CONST0_RTX (tmode);
14440 }
14441 }
14442 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14443 || icode == CODE_FOR_vsx_xxpermdi_v2di
14444 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14445 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14446 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14447 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14448 || icode == CODE_FOR_vsx_xxpermdi_v4si
14449 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14450 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14451 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14452 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14453 || icode == CODE_FOR_vsx_xxsldwi_v4si
14454 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14455 || icode == CODE_FOR_vsx_xxsldwi_v2di
14456 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14457 {
14458 /* Only allow 2-bit unsigned literals. */
14459 STRIP_NOPS (arg2);
14460 if (TREE_CODE (arg2) != INTEGER_CST
14461 || TREE_INT_CST_LOW (arg2) & ~0x3)
14462 {
14463 error ("argument 3 must be a 2-bit unsigned literal");
14464 return CONST0_RTX (tmode);
14465 }
14466 }
14467 else if (icode == CODE_FOR_vsx_set_v2df
14468 || icode == CODE_FOR_vsx_set_v2di
14469 || icode == CODE_FOR_bcdadd
14470 || icode == CODE_FOR_bcdadd_lt
14471 || icode == CODE_FOR_bcdadd_eq
14472 || icode == CODE_FOR_bcdadd_gt
14473 || icode == CODE_FOR_bcdsub
14474 || icode == CODE_FOR_bcdsub_lt
14475 || icode == CODE_FOR_bcdsub_eq
14476 || icode == CODE_FOR_bcdsub_gt)
14477 {
14478 /* Only allow 1-bit unsigned literals. */
14479 STRIP_NOPS (arg2);
14480 if (TREE_CODE (arg2) != INTEGER_CST
14481 || TREE_INT_CST_LOW (arg2) & ~0x1)
14482 {
14483 error ("argument 3 must be a 1-bit unsigned literal");
14484 return CONST0_RTX (tmode);
14485 }
14486 }
14487 else if (icode == CODE_FOR_dfp_ddedpd_dd
14488 || icode == CODE_FOR_dfp_ddedpd_td)
14489 {
14490 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14491 STRIP_NOPS (arg0);
14492 if (TREE_CODE (arg0) != INTEGER_CST
14493 || TREE_INT_CST_LOW (arg2) & ~0x3)
14494 {
14495 error ("argument 1 must be 0 or 2");
14496 return CONST0_RTX (tmode);
14497 }
14498 }
14499 else if (icode == CODE_FOR_dfp_denbcd_dd
14500 || icode == CODE_FOR_dfp_denbcd_td)
14501 {
14502 /* Only allow 1-bit unsigned literals. */
14503 STRIP_NOPS (arg0);
14504 if (TREE_CODE (arg0) != INTEGER_CST
14505 || TREE_INT_CST_LOW (arg0) & ~0x1)
14506 {
14507 error ("argument 1 must be a 1-bit unsigned literal");
14508 return CONST0_RTX (tmode);
14509 }
14510 }
14511 else if (icode == CODE_FOR_dfp_dscli_dd
14512 || icode == CODE_FOR_dfp_dscli_td
14513 || icode == CODE_FOR_dfp_dscri_dd
14514 || icode == CODE_FOR_dfp_dscri_td)
14515 {
14516 /* Only allow 6-bit unsigned literals. */
14517 STRIP_NOPS (arg1);
14518 if (TREE_CODE (arg1) != INTEGER_CST
14519 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14520 {
14521 error ("argument 2 must be a 6-bit unsigned literal");
14522 return CONST0_RTX (tmode);
14523 }
14524 }
14525 else if (icode == CODE_FOR_crypto_vshasigmaw
14526 || icode == CODE_FOR_crypto_vshasigmad)
14527 {
14528 /* Check whether the 2nd and 3rd arguments are integer constants and in
14529 range and prepare arguments. */
14530 STRIP_NOPS (arg1);
14531 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14532 {
14533 error ("argument 2 must be 0 or 1");
14534 return CONST0_RTX (tmode);
14535 }
14536
14537 STRIP_NOPS (arg2);
14538 if (TREE_CODE (arg2) != INTEGER_CST
14539 || wi::geu_p (wi::to_wide (arg2), 16))
14540 {
14541 error ("argument 3 must be in the range 0..15");
14542 return CONST0_RTX (tmode);
14543 }
14544 }
14545
14546 if (target == 0
14547 || GET_MODE (target) != tmode
14548 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14549 target = gen_reg_rtx (tmode);
14550
14551 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14552 op0 = copy_to_mode_reg (mode0, op0);
14553 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14554 op1 = copy_to_mode_reg (mode1, op1);
14555 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14556 op2 = copy_to_mode_reg (mode2, op2);
14557
14558 pat = GEN_FCN (icode) (target, op0, op1, op2);
14559 if (! pat)
14560 return 0;
14561 emit_insn (pat);
14562
14563 return target;
14564 }
14565
14566
14567 /* Expand the dst builtins. */
14568 static rtx
14569 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14570 bool *expandedp)
14571 {
14572 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14573 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14574 tree arg0, arg1, arg2;
14575 machine_mode mode0, mode1;
14576 rtx pat, op0, op1, op2;
14577 const struct builtin_description *d;
14578 size_t i;
14579
14580 *expandedp = false;
14581
14582 /* Handle DST variants. */
14583 d = bdesc_dst;
14584 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14585 if (d->code == fcode)
14586 {
14587 arg0 = CALL_EXPR_ARG (exp, 0);
14588 arg1 = CALL_EXPR_ARG (exp, 1);
14589 arg2 = CALL_EXPR_ARG (exp, 2);
14590 op0 = expand_normal (arg0);
14591 op1 = expand_normal (arg1);
14592 op2 = expand_normal (arg2);
14593 mode0 = insn_data[d->icode].operand[0].mode;
14594 mode1 = insn_data[d->icode].operand[1].mode;
14595
14596 /* Invalid arguments, bail out before generating bad rtl. */
14597 if (arg0 == error_mark_node
14598 || arg1 == error_mark_node
14599 || arg2 == error_mark_node)
14600 return const0_rtx;
14601
14602 *expandedp = true;
14603 STRIP_NOPS (arg2);
14604 if (TREE_CODE (arg2) != INTEGER_CST
14605 || TREE_INT_CST_LOW (arg2) & ~0x3)
14606 {
14607 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14608 return const0_rtx;
14609 }
14610
14611 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14612 op0 = copy_to_mode_reg (Pmode, op0);
14613 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14614 op1 = copy_to_mode_reg (mode1, op1);
14615
14616 pat = GEN_FCN (d->icode) (op0, op1, op2);
14617 if (pat != 0)
14618 emit_insn (pat);
14619
14620 return NULL_RTX;
14621 }
14622
14623 return NULL_RTX;
14624 }
14625
14626 /* Expand vec_init builtin. */
14627 static rtx
14628 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14629 {
14630 machine_mode tmode = TYPE_MODE (type);
14631 machine_mode inner_mode = GET_MODE_INNER (tmode);
14632 int i, n_elt = GET_MODE_NUNITS (tmode);
14633
14634 gcc_assert (VECTOR_MODE_P (tmode));
14635 gcc_assert (n_elt == call_expr_nargs (exp));
14636
14637 if (!target || !register_operand (target, tmode))
14638 target = gen_reg_rtx (tmode);
14639
14640 /* If we have a vector compromised of a single element, such as V1TImode, do
14641 the initialization directly. */
14642 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14643 {
14644 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14645 emit_move_insn (target, gen_lowpart (tmode, x));
14646 }
14647 else
14648 {
14649 rtvec v = rtvec_alloc (n_elt);
14650
14651 for (i = 0; i < n_elt; ++i)
14652 {
14653 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14654 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14655 }
14656
14657 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14658 }
14659
14660 return target;
14661 }
14662
14663 /* Return the integer constant in ARG. Constrain it to be in the range
14664 of the subparts of VEC_TYPE; issue an error if not. */
14665
14666 static int
14667 get_element_number (tree vec_type, tree arg)
14668 {
14669 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14670
14671 if (!tree_fits_uhwi_p (arg)
14672 || (elt = tree_to_uhwi (arg), elt > max))
14673 {
14674 error ("selector must be an integer constant in the range 0..%wi", max);
14675 return 0;
14676 }
14677
14678 return elt;
14679 }
14680
14681 /* Expand vec_set builtin. */
14682 static rtx
14683 altivec_expand_vec_set_builtin (tree exp)
14684 {
14685 machine_mode tmode, mode1;
14686 tree arg0, arg1, arg2;
14687 int elt;
14688 rtx op0, op1;
14689
14690 arg0 = CALL_EXPR_ARG (exp, 0);
14691 arg1 = CALL_EXPR_ARG (exp, 1);
14692 arg2 = CALL_EXPR_ARG (exp, 2);
14693
14694 tmode = TYPE_MODE (TREE_TYPE (arg0));
14695 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14696 gcc_assert (VECTOR_MODE_P (tmode));
14697
14698 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14699 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14700 elt = get_element_number (TREE_TYPE (arg0), arg2);
14701
14702 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14703 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14704
14705 op0 = force_reg (tmode, op0);
14706 op1 = force_reg (mode1, op1);
14707
14708 rs6000_expand_vector_set (op0, op1, elt);
14709
14710 return op0;
14711 }
14712
14713 /* Expand vec_ext builtin. */
14714 static rtx
14715 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14716 {
14717 machine_mode tmode, mode0;
14718 tree arg0, arg1;
14719 rtx op0;
14720 rtx op1;
14721
14722 arg0 = CALL_EXPR_ARG (exp, 0);
14723 arg1 = CALL_EXPR_ARG (exp, 1);
14724
14725 op0 = expand_normal (arg0);
14726 op1 = expand_normal (arg1);
14727
14728 /* Call get_element_number to validate arg1 if it is a constant. */
14729 if (TREE_CODE (arg1) == INTEGER_CST)
14730 (void) get_element_number (TREE_TYPE (arg0), arg1);
14731
14732 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14733 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14734 gcc_assert (VECTOR_MODE_P (mode0));
14735
14736 op0 = force_reg (mode0, op0);
14737
14738 if (optimize || !target || !register_operand (target, tmode))
14739 target = gen_reg_rtx (tmode);
14740
14741 rs6000_expand_vector_extract (target, op0, op1);
14742
14743 return target;
14744 }
14745
14746 /* Expand the builtin in EXP and store the result in TARGET. Store
14747 true in *EXPANDEDP if we found a builtin to expand. */
14748 static rtx
14749 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14750 {
14751 const struct builtin_description *d;
14752 size_t i;
14753 enum insn_code icode;
14754 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14755 tree arg0, arg1, arg2;
14756 rtx op0, pat;
14757 machine_mode tmode, mode0;
14758 enum rs6000_builtins fcode
14759 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14760
14761 if (rs6000_overloaded_builtin_p (fcode))
14762 {
14763 *expandedp = true;
14764 error ("unresolved overload for Altivec builtin %qF", fndecl);
14765
14766 /* Given it is invalid, just generate a normal call. */
14767 return expand_call (exp, target, false);
14768 }
14769
14770 target = altivec_expand_dst_builtin (exp, target, expandedp);
14771 if (*expandedp)
14772 return target;
14773
14774 *expandedp = true;
14775
14776 switch (fcode)
14777 {
14778 case ALTIVEC_BUILTIN_STVX_V2DF:
14779 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14780 case ALTIVEC_BUILTIN_STVX_V2DI:
14781 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14782 case ALTIVEC_BUILTIN_STVX_V4SF:
14783 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14784 case ALTIVEC_BUILTIN_STVX:
14785 case ALTIVEC_BUILTIN_STVX_V4SI:
14786 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14787 case ALTIVEC_BUILTIN_STVX_V8HI:
14788 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14789 case ALTIVEC_BUILTIN_STVX_V16QI:
14790 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14791 case ALTIVEC_BUILTIN_STVEBX:
14792 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14793 case ALTIVEC_BUILTIN_STVEHX:
14794 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14795 case ALTIVEC_BUILTIN_STVEWX:
14796 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14797 case ALTIVEC_BUILTIN_STVXL_V2DF:
14798 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14799 case ALTIVEC_BUILTIN_STVXL_V2DI:
14800 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14801 case ALTIVEC_BUILTIN_STVXL_V4SF:
14802 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14803 case ALTIVEC_BUILTIN_STVXL:
14804 case ALTIVEC_BUILTIN_STVXL_V4SI:
14805 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14806 case ALTIVEC_BUILTIN_STVXL_V8HI:
14807 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14808 case ALTIVEC_BUILTIN_STVXL_V16QI:
14809 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14810
14811 case ALTIVEC_BUILTIN_STVLX:
14812 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14813 case ALTIVEC_BUILTIN_STVLXL:
14814 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14815 case ALTIVEC_BUILTIN_STVRX:
14816 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14817 case ALTIVEC_BUILTIN_STVRXL:
14818 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14819
14820 case P9V_BUILTIN_STXVL:
14821 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14822
14823 case P9V_BUILTIN_XST_LEN_R:
14824 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14825
14826 case VSX_BUILTIN_STXVD2X_V1TI:
14827 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14828 case VSX_BUILTIN_STXVD2X_V2DF:
14829 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14830 case VSX_BUILTIN_STXVD2X_V2DI:
14831 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14832 case VSX_BUILTIN_STXVW4X_V4SF:
14833 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14834 case VSX_BUILTIN_STXVW4X_V4SI:
14835 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14836 case VSX_BUILTIN_STXVW4X_V8HI:
14837 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14838 case VSX_BUILTIN_STXVW4X_V16QI:
14839 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14840
14841 /* For the following on big endian, it's ok to use any appropriate
14842 unaligned-supporting store, so use a generic expander. For
14843 little-endian, the exact element-reversing instruction must
14844 be used. */
14845 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14846 {
14847 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14848 : CODE_FOR_vsx_st_elemrev_v1ti);
14849 return altivec_expand_stv_builtin (code, exp);
14850 }
14851 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14852 {
14853 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14854 : CODE_FOR_vsx_st_elemrev_v2df);
14855 return altivec_expand_stv_builtin (code, exp);
14856 }
14857 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14858 {
14859 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14860 : CODE_FOR_vsx_st_elemrev_v2di);
14861 return altivec_expand_stv_builtin (code, exp);
14862 }
14863 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14864 {
14865 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14866 : CODE_FOR_vsx_st_elemrev_v4sf);
14867 return altivec_expand_stv_builtin (code, exp);
14868 }
14869 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14870 {
14871 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14872 : CODE_FOR_vsx_st_elemrev_v4si);
14873 return altivec_expand_stv_builtin (code, exp);
14874 }
14875 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14876 {
14877 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14878 : CODE_FOR_vsx_st_elemrev_v8hi);
14879 return altivec_expand_stv_builtin (code, exp);
14880 }
14881 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14882 {
14883 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14884 : CODE_FOR_vsx_st_elemrev_v16qi);
14885 return altivec_expand_stv_builtin (code, exp);
14886 }
14887
14888 case ALTIVEC_BUILTIN_MFVSCR:
14889 icode = CODE_FOR_altivec_mfvscr;
14890 tmode = insn_data[icode].operand[0].mode;
14891
14892 if (target == 0
14893 || GET_MODE (target) != tmode
14894 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14895 target = gen_reg_rtx (tmode);
14896
14897 pat = GEN_FCN (icode) (target);
14898 if (! pat)
14899 return 0;
14900 emit_insn (pat);
14901 return target;
14902
14903 case ALTIVEC_BUILTIN_MTVSCR:
14904 icode = CODE_FOR_altivec_mtvscr;
14905 arg0 = CALL_EXPR_ARG (exp, 0);
14906 op0 = expand_normal (arg0);
14907 mode0 = insn_data[icode].operand[0].mode;
14908
14909 /* If we got invalid arguments bail out before generating bad rtl. */
14910 if (arg0 == error_mark_node)
14911 return const0_rtx;
14912
14913 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14914 op0 = copy_to_mode_reg (mode0, op0);
14915
14916 pat = GEN_FCN (icode) (op0);
14917 if (pat)
14918 emit_insn (pat);
14919 return NULL_RTX;
14920
14921 case ALTIVEC_BUILTIN_DSSALL:
14922 emit_insn (gen_altivec_dssall ());
14923 return NULL_RTX;
14924
14925 case ALTIVEC_BUILTIN_DSS:
14926 icode = CODE_FOR_altivec_dss;
14927 arg0 = CALL_EXPR_ARG (exp, 0);
14928 STRIP_NOPS (arg0);
14929 op0 = expand_normal (arg0);
14930 mode0 = insn_data[icode].operand[0].mode;
14931
14932 /* If we got invalid arguments bail out before generating bad rtl. */
14933 if (arg0 == error_mark_node)
14934 return const0_rtx;
14935
14936 if (TREE_CODE (arg0) != INTEGER_CST
14937 || TREE_INT_CST_LOW (arg0) & ~0x3)
14938 {
14939 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14940 return const0_rtx;
14941 }
14942
14943 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14944 op0 = copy_to_mode_reg (mode0, op0);
14945
14946 emit_insn (gen_altivec_dss (op0));
14947 return NULL_RTX;
14948
14949 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14950 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14951 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14952 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14953 case VSX_BUILTIN_VEC_INIT_V2DF:
14954 case VSX_BUILTIN_VEC_INIT_V2DI:
14955 case VSX_BUILTIN_VEC_INIT_V1TI:
14956 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14957
14958 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14959 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14960 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14961 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14962 case VSX_BUILTIN_VEC_SET_V2DF:
14963 case VSX_BUILTIN_VEC_SET_V2DI:
14964 case VSX_BUILTIN_VEC_SET_V1TI:
14965 return altivec_expand_vec_set_builtin (exp);
14966
14967 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14968 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14969 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14970 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14971 case VSX_BUILTIN_VEC_EXT_V2DF:
14972 case VSX_BUILTIN_VEC_EXT_V2DI:
14973 case VSX_BUILTIN_VEC_EXT_V1TI:
14974 return altivec_expand_vec_ext_builtin (exp, target);
14975
14976 case P9V_BUILTIN_VEC_EXTRACT4B:
14977 arg1 = CALL_EXPR_ARG (exp, 1);
14978 STRIP_NOPS (arg1);
14979
14980 /* Generate a normal call if it is invalid. */
14981 if (arg1 == error_mark_node)
14982 return expand_call (exp, target, false);
14983
14984 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14985 {
14986 error ("second argument to %qs must be 0..12", "vec_vextract4b");
14987 return expand_call (exp, target, false);
14988 }
14989 break;
14990
14991 case P9V_BUILTIN_VEC_INSERT4B:
14992 arg2 = CALL_EXPR_ARG (exp, 2);
14993 STRIP_NOPS (arg2);
14994
14995 /* Generate a normal call if it is invalid. */
14996 if (arg2 == error_mark_node)
14997 return expand_call (exp, target, false);
14998
14999 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15000 {
15001 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15002 return expand_call (exp, target, false);
15003 }
15004 break;
15005
15006 default:
15007 break;
15008 /* Fall through. */
15009 }
15010
15011 /* Expand abs* operations. */
15012 d = bdesc_abs;
15013 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
15014 if (d->code == fcode)
15015 return altivec_expand_abs_builtin (d->icode, exp, target);
15016
15017 /* Expand the AltiVec predicates. */
15018 d = bdesc_altivec_preds;
15019 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
15020 if (d->code == fcode)
15021 return altivec_expand_predicate_builtin (d->icode, exp, target);
15022
15023 /* LV* are funky. We initialized them differently. */
15024 switch (fcode)
15025 {
15026 case ALTIVEC_BUILTIN_LVSL:
15027 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
15028 exp, target, false);
15029 case ALTIVEC_BUILTIN_LVSR:
15030 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
15031 exp, target, false);
15032 case ALTIVEC_BUILTIN_LVEBX:
15033 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
15034 exp, target, false);
15035 case ALTIVEC_BUILTIN_LVEHX:
15036 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
15037 exp, target, false);
15038 case ALTIVEC_BUILTIN_LVEWX:
15039 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
15040 exp, target, false);
15041 case ALTIVEC_BUILTIN_LVXL_V2DF:
15042 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
15043 exp, target, false);
15044 case ALTIVEC_BUILTIN_LVXL_V2DI:
15045 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
15046 exp, target, false);
15047 case ALTIVEC_BUILTIN_LVXL_V4SF:
15048 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
15049 exp, target, false);
15050 case ALTIVEC_BUILTIN_LVXL:
15051 case ALTIVEC_BUILTIN_LVXL_V4SI:
15052 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
15053 exp, target, false);
15054 case ALTIVEC_BUILTIN_LVXL_V8HI:
15055 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
15056 exp, target, false);
15057 case ALTIVEC_BUILTIN_LVXL_V16QI:
15058 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
15059 exp, target, false);
15060 case ALTIVEC_BUILTIN_LVX_V1TI:
15061 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
15062 exp, target, false);
15063 case ALTIVEC_BUILTIN_LVX_V2DF:
15064 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
15065 exp, target, false);
15066 case ALTIVEC_BUILTIN_LVX_V2DI:
15067 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
15068 exp, target, false);
15069 case ALTIVEC_BUILTIN_LVX_V4SF:
15070 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
15071 exp, target, false);
15072 case ALTIVEC_BUILTIN_LVX:
15073 case ALTIVEC_BUILTIN_LVX_V4SI:
15074 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
15075 exp, target, false);
15076 case ALTIVEC_BUILTIN_LVX_V8HI:
15077 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
15078 exp, target, false);
15079 case ALTIVEC_BUILTIN_LVX_V16QI:
15080 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
15081 exp, target, false);
15082 case ALTIVEC_BUILTIN_LVLX:
15083 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
15084 exp, target, true);
15085 case ALTIVEC_BUILTIN_LVLXL:
15086 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
15087 exp, target, true);
15088 case ALTIVEC_BUILTIN_LVRX:
15089 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
15090 exp, target, true);
15091 case ALTIVEC_BUILTIN_LVRXL:
15092 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
15093 exp, target, true);
15094 case VSX_BUILTIN_LXVD2X_V1TI:
15095 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
15096 exp, target, false);
15097 case VSX_BUILTIN_LXVD2X_V2DF:
15098 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
15099 exp, target, false);
15100 case VSX_BUILTIN_LXVD2X_V2DI:
15101 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
15102 exp, target, false);
15103 case VSX_BUILTIN_LXVW4X_V4SF:
15104 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
15105 exp, target, false);
15106 case VSX_BUILTIN_LXVW4X_V4SI:
15107 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
15108 exp, target, false);
15109 case VSX_BUILTIN_LXVW4X_V8HI:
15110 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
15111 exp, target, false);
15112 case VSX_BUILTIN_LXVW4X_V16QI:
15113 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
15114 exp, target, false);
15115 /* For the following on big endian, it's ok to use any appropriate
15116 unaligned-supporting load, so use a generic expander. For
15117 little-endian, the exact element-reversing instruction must
15118 be used. */
15119 case VSX_BUILTIN_LD_ELEMREV_V2DF:
15120 {
15121 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
15122 : CODE_FOR_vsx_ld_elemrev_v2df);
15123 return altivec_expand_lv_builtin (code, exp, target, false);
15124 }
15125 case VSX_BUILTIN_LD_ELEMREV_V1TI:
15126 {
15127 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
15128 : CODE_FOR_vsx_ld_elemrev_v1ti);
15129 return altivec_expand_lv_builtin (code, exp, target, false);
15130 }
15131 case VSX_BUILTIN_LD_ELEMREV_V2DI:
15132 {
15133 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
15134 : CODE_FOR_vsx_ld_elemrev_v2di);
15135 return altivec_expand_lv_builtin (code, exp, target, false);
15136 }
15137 case VSX_BUILTIN_LD_ELEMREV_V4SF:
15138 {
15139 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
15140 : CODE_FOR_vsx_ld_elemrev_v4sf);
15141 return altivec_expand_lv_builtin (code, exp, target, false);
15142 }
15143 case VSX_BUILTIN_LD_ELEMREV_V4SI:
15144 {
15145 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
15146 : CODE_FOR_vsx_ld_elemrev_v4si);
15147 return altivec_expand_lv_builtin (code, exp, target, false);
15148 }
15149 case VSX_BUILTIN_LD_ELEMREV_V8HI:
15150 {
15151 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
15152 : CODE_FOR_vsx_ld_elemrev_v8hi);
15153 return altivec_expand_lv_builtin (code, exp, target, false);
15154 }
15155 case VSX_BUILTIN_LD_ELEMREV_V16QI:
15156 {
15157 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
15158 : CODE_FOR_vsx_ld_elemrev_v16qi);
15159 return altivec_expand_lv_builtin (code, exp, target, false);
15160 }
15161 break;
15162 default:
15163 break;
15164 /* Fall through. */
15165 }
15166
15167 *expandedp = false;
15168 return NULL_RTX;
15169 }
15170
15171 /* Check whether a builtin function is supported in this target
15172 configuration. */
15173 bool
15174 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
15175 {
15176 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
15177 if ((fnmask & rs6000_builtin_mask) != fnmask)
15178 return false;
15179 else
15180 return true;
15181 }
15182
15183 /* Raise an error message for a builtin function that is called without the
15184 appropriate target options being set. */
15185
15186 static void
15187 rs6000_invalid_builtin (enum rs6000_builtins fncode)
15188 {
15189 size_t uns_fncode = (size_t) fncode;
15190 const char *name = rs6000_builtin_info[uns_fncode].name;
15191 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
15192
15193 gcc_assert (name != NULL);
15194 if ((fnmask & RS6000_BTM_CELL) != 0)
15195 error ("builtin function %qs is only valid for the cell processor", name);
15196 else if ((fnmask & RS6000_BTM_VSX) != 0)
15197 error ("builtin function %qs requires the %qs option", name, "-mvsx");
15198 else if ((fnmask & RS6000_BTM_HTM) != 0)
15199 error ("builtin function %qs requires the %qs option", name, "-mhtm");
15200 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
15201 error ("builtin function %qs requires the %qs option", name, "-maltivec");
15202 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15203 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
15204 error ("builtin function %qs requires the %qs and %qs options",
15205 name, "-mhard-dfp", "-mpower8-vector");
15206 else if ((fnmask & RS6000_BTM_DFP) != 0)
15207 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
15208 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
15209 error ("builtin function %qs requires the %qs option", name,
15210 "-mpower8-vector");
15211 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15212 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
15213 error ("builtin function %qs requires the %qs and %qs options",
15214 name, "-mcpu=power9", "-m64");
15215 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
15216 error ("builtin function %qs requires the %qs option", name,
15217 "-mcpu=power9");
15218 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15219 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
15220 error ("builtin function %qs requires the %qs and %qs options",
15221 name, "-mcpu=power9", "-m64");
15222 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
15223 error ("builtin function %qs requires the %qs option", name,
15224 "-mcpu=power9");
15225 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
15226 {
15227 if (!TARGET_HARD_FLOAT)
15228 error ("builtin function %qs requires the %qs option", name,
15229 "-mhard-float");
15230 else
15231 error ("builtin function %qs requires the %qs option", name,
15232 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
15233 }
15234 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
15235 error ("builtin function %qs requires the %qs option", name,
15236 "-mhard-float");
15237 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
15238 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15239 name);
15240 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
15241 error ("builtin function %qs requires the %qs option", name,
15242 "%<-mfloat128%>");
15243 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15244 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
15245 error ("builtin function %qs requires the %qs (or newer), and "
15246 "%qs or %qs options",
15247 name, "-mcpu=power7", "-m64", "-mpowerpc64");
15248 else
15249 error ("builtin function %qs is not supported with the current options",
15250 name);
15251 }
15252
15253 /* Target hook for early folding of built-ins, shamelessly stolen
15254 from ia64.c. */
15255
15256 static tree
15257 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
15258 int n_args ATTRIBUTE_UNUSED,
15259 tree *args ATTRIBUTE_UNUSED,
15260 bool ignore ATTRIBUTE_UNUSED)
15261 {
15262 #ifdef SUBTARGET_FOLD_BUILTIN
15263 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
15264 #else
15265 return NULL_TREE;
15266 #endif
15267 }
15268
15269 /* Helper function to sort out which built-ins may be valid without having
15270 a LHS. */
15271 static bool
15272 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
15273 {
15274 switch (fn_code)
15275 {
15276 case ALTIVEC_BUILTIN_STVX_V16QI:
15277 case ALTIVEC_BUILTIN_STVX_V8HI:
15278 case ALTIVEC_BUILTIN_STVX_V4SI:
15279 case ALTIVEC_BUILTIN_STVX_V4SF:
15280 case ALTIVEC_BUILTIN_STVX_V2DI:
15281 case ALTIVEC_BUILTIN_STVX_V2DF:
15282 case VSX_BUILTIN_STXVW4X_V16QI:
15283 case VSX_BUILTIN_STXVW4X_V8HI:
15284 case VSX_BUILTIN_STXVW4X_V4SF:
15285 case VSX_BUILTIN_STXVW4X_V4SI:
15286 case VSX_BUILTIN_STXVD2X_V2DF:
15287 case VSX_BUILTIN_STXVD2X_V2DI:
15288 return true;
15289 default:
15290 return false;
15291 }
15292 }
15293
15294 /* Helper function to handle the gimple folding of a vector compare
15295 operation. This sets up true/false vectors, and uses the
15296 VEC_COND_EXPR operation.
15297 CODE indicates which comparison is to be made. (EQ, GT, ...).
15298 TYPE indicates the type of the result. */
15299 static tree
15300 fold_build_vec_cmp (tree_code code, tree type,
15301 tree arg0, tree arg1)
15302 {
15303 tree cmp_type = build_same_sized_truth_vector_type (type);
15304 tree zero_vec = build_zero_cst (type);
15305 tree minus_one_vec = build_minus_one_cst (type);
15306 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
15307 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
15308 }
15309
15310 /* Helper function to handle the in-between steps for the
15311 vector compare built-ins. */
15312 static void
15313 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15314 {
15315 tree arg0 = gimple_call_arg (stmt, 0);
15316 tree arg1 = gimple_call_arg (stmt, 1);
15317 tree lhs = gimple_call_lhs (stmt);
15318 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15319 gimple *g = gimple_build_assign (lhs, cmp);
15320 gimple_set_location (g, gimple_location (stmt));
15321 gsi_replace (gsi, g, true);
15322 }
15323
15324 /* Helper function to map V2DF and V4SF types to their
15325 integral equivalents (V2DI and V4SI). */
15326 tree map_to_integral_tree_type (tree input_tree_type)
15327 {
15328 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
15329 return input_tree_type;
15330 else
15331 {
15332 if (types_compatible_p (TREE_TYPE (input_tree_type),
15333 TREE_TYPE (V2DF_type_node)))
15334 return V2DI_type_node;
15335 else if (types_compatible_p (TREE_TYPE (input_tree_type),
15336 TREE_TYPE (V4SF_type_node)))
15337 return V4SI_type_node;
15338 else
15339 gcc_unreachable ();
15340 }
15341 }
15342
15343 /* Helper function to handle the vector merge[hl] built-ins. The
15344 implementation difference between h and l versions for this code are in
15345 the values used when building of the permute vector for high word versus
15346 low word merge. The variance is keyed off the use_high parameter. */
15347 static void
15348 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15349 {
15350 tree arg0 = gimple_call_arg (stmt, 0);
15351 tree arg1 = gimple_call_arg (stmt, 1);
15352 tree lhs = gimple_call_lhs (stmt);
15353 tree lhs_type = TREE_TYPE (lhs);
15354 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15355 int midpoint = n_elts / 2;
15356 int offset = 0;
15357
15358 if (use_high == 1)
15359 offset = midpoint;
15360
15361 /* The permute_type will match the lhs for integral types. For double and
15362 float types, the permute type needs to map to the V2 or V4 type that
15363 matches size. */
15364 tree permute_type;
15365 permute_type = map_to_integral_tree_type (lhs_type);
15366 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15367
15368 for (int i = 0; i < midpoint; i++)
15369 {
15370 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15371 offset + i));
15372 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15373 offset + n_elts + i));
15374 }
15375
15376 tree permute = elts.build ();
15377
15378 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15379 gimple_set_location (g, gimple_location (stmt));
15380 gsi_replace (gsi, g, true);
15381 }
15382
15383 /* Helper function to handle the vector merge[eo] built-ins. */
15384 static void
15385 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
15386 {
15387 tree arg0 = gimple_call_arg (stmt, 0);
15388 tree arg1 = gimple_call_arg (stmt, 1);
15389 tree lhs = gimple_call_lhs (stmt);
15390 tree lhs_type = TREE_TYPE (lhs);
15391 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15392
15393 /* The permute_type will match the lhs for integral types. For double and
15394 float types, the permute type needs to map to the V2 or V4 type that
15395 matches size. */
15396 tree permute_type;
15397 permute_type = map_to_integral_tree_type (lhs_type);
15398
15399 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15400
15401 /* Build the permute vector. */
15402 for (int i = 0; i < n_elts / 2; i++)
15403 {
15404 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15405 2*i + use_odd));
15406 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15407 2*i + use_odd + n_elts));
15408 }
15409
15410 tree permute = elts.build ();
15411
15412 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15413 gimple_set_location (g, gimple_location (stmt));
15414 gsi_replace (gsi, g, true);
15415 }
15416
15417 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15418 a constant, use rs6000_fold_builtin.) */
15419
15420 bool
15421 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15422 {
15423 gimple *stmt = gsi_stmt (*gsi);
15424 tree fndecl = gimple_call_fndecl (stmt);
15425 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15426 enum rs6000_builtins fn_code
15427 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15428 tree arg0, arg1, lhs, temp;
15429 enum tree_code bcode;
15430 gimple *g;
15431
15432 size_t uns_fncode = (size_t) fn_code;
15433 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15434 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15435 const char *fn_name2 = (icode != CODE_FOR_nothing)
15436 ? get_insn_name ((int) icode)
15437 : "nothing";
15438
15439 if (TARGET_DEBUG_BUILTIN)
15440 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15441 fn_code, fn_name1, fn_name2);
15442
15443 if (!rs6000_fold_gimple)
15444 return false;
15445
15446 /* Prevent gimple folding for code that does not have a LHS, unless it is
15447 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15448 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15449 return false;
15450
15451 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15452 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15453 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15454 if (!func_valid_p)
15455 return false;
15456
15457 switch (fn_code)
15458 {
15459 /* Flavors of vec_add. We deliberately don't expand
15460 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15461 TImode, resulting in much poorer code generation. */
15462 case ALTIVEC_BUILTIN_VADDUBM:
15463 case ALTIVEC_BUILTIN_VADDUHM:
15464 case ALTIVEC_BUILTIN_VADDUWM:
15465 case P8V_BUILTIN_VADDUDM:
15466 case ALTIVEC_BUILTIN_VADDFP:
15467 case VSX_BUILTIN_XVADDDP:
15468 bcode = PLUS_EXPR;
15469 do_binary:
15470 arg0 = gimple_call_arg (stmt, 0);
15471 arg1 = gimple_call_arg (stmt, 1);
15472 lhs = gimple_call_lhs (stmt);
15473 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15474 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15475 {
15476 /* Ensure the binary operation is performed in a type
15477 that wraps if it is integral type. */
15478 gimple_seq stmts = NULL;
15479 tree type = unsigned_type_for (TREE_TYPE (lhs));
15480 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15481 type, arg0);
15482 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15483 type, arg1);
15484 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15485 type, uarg0, uarg1);
15486 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15487 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15488 build1 (VIEW_CONVERT_EXPR,
15489 TREE_TYPE (lhs), res));
15490 gsi_replace (gsi, g, true);
15491 return true;
15492 }
15493 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15494 gimple_set_location (g, gimple_location (stmt));
15495 gsi_replace (gsi, g, true);
15496 return true;
15497 /* Flavors of vec_sub. We deliberately don't expand
15498 P8V_BUILTIN_VSUBUQM. */
15499 case ALTIVEC_BUILTIN_VSUBUBM:
15500 case ALTIVEC_BUILTIN_VSUBUHM:
15501 case ALTIVEC_BUILTIN_VSUBUWM:
15502 case P8V_BUILTIN_VSUBUDM:
15503 case ALTIVEC_BUILTIN_VSUBFP:
15504 case VSX_BUILTIN_XVSUBDP:
15505 bcode = MINUS_EXPR;
15506 goto do_binary;
15507 case VSX_BUILTIN_XVMULSP:
15508 case VSX_BUILTIN_XVMULDP:
15509 arg0 = gimple_call_arg (stmt, 0);
15510 arg1 = gimple_call_arg (stmt, 1);
15511 lhs = gimple_call_lhs (stmt);
15512 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15513 gimple_set_location (g, gimple_location (stmt));
15514 gsi_replace (gsi, g, true);
15515 return true;
15516 /* Even element flavors of vec_mul (signed). */
15517 case ALTIVEC_BUILTIN_VMULESB:
15518 case ALTIVEC_BUILTIN_VMULESH:
15519 case P8V_BUILTIN_VMULESW:
15520 /* Even element flavors of vec_mul (unsigned). */
15521 case ALTIVEC_BUILTIN_VMULEUB:
15522 case ALTIVEC_BUILTIN_VMULEUH:
15523 case P8V_BUILTIN_VMULEUW:
15524 arg0 = gimple_call_arg (stmt, 0);
15525 arg1 = gimple_call_arg (stmt, 1);
15526 lhs = gimple_call_lhs (stmt);
15527 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15528 gimple_set_location (g, gimple_location (stmt));
15529 gsi_replace (gsi, g, true);
15530 return true;
15531 /* Odd element flavors of vec_mul (signed). */
15532 case ALTIVEC_BUILTIN_VMULOSB:
15533 case ALTIVEC_BUILTIN_VMULOSH:
15534 case P8V_BUILTIN_VMULOSW:
15535 /* Odd element flavors of vec_mul (unsigned). */
15536 case ALTIVEC_BUILTIN_VMULOUB:
15537 case ALTIVEC_BUILTIN_VMULOUH:
15538 case P8V_BUILTIN_VMULOUW:
15539 arg0 = gimple_call_arg (stmt, 0);
15540 arg1 = gimple_call_arg (stmt, 1);
15541 lhs = gimple_call_lhs (stmt);
15542 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15543 gimple_set_location (g, gimple_location (stmt));
15544 gsi_replace (gsi, g, true);
15545 return true;
15546 /* Flavors of vec_div (Integer). */
15547 case VSX_BUILTIN_DIV_V2DI:
15548 case VSX_BUILTIN_UDIV_V2DI:
15549 arg0 = gimple_call_arg (stmt, 0);
15550 arg1 = gimple_call_arg (stmt, 1);
15551 lhs = gimple_call_lhs (stmt);
15552 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15553 gimple_set_location (g, gimple_location (stmt));
15554 gsi_replace (gsi, g, true);
15555 return true;
15556 /* Flavors of vec_div (Float). */
15557 case VSX_BUILTIN_XVDIVSP:
15558 case VSX_BUILTIN_XVDIVDP:
15559 arg0 = gimple_call_arg (stmt, 0);
15560 arg1 = gimple_call_arg (stmt, 1);
15561 lhs = gimple_call_lhs (stmt);
15562 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15563 gimple_set_location (g, gimple_location (stmt));
15564 gsi_replace (gsi, g, true);
15565 return true;
15566 /* Flavors of vec_and. */
15567 case ALTIVEC_BUILTIN_VAND:
15568 arg0 = gimple_call_arg (stmt, 0);
15569 arg1 = gimple_call_arg (stmt, 1);
15570 lhs = gimple_call_lhs (stmt);
15571 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15572 gimple_set_location (g, gimple_location (stmt));
15573 gsi_replace (gsi, g, true);
15574 return true;
15575 /* Flavors of vec_andc. */
15576 case ALTIVEC_BUILTIN_VANDC:
15577 arg0 = gimple_call_arg (stmt, 0);
15578 arg1 = gimple_call_arg (stmt, 1);
15579 lhs = gimple_call_lhs (stmt);
15580 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15581 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15582 gimple_set_location (g, gimple_location (stmt));
15583 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15584 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15585 gimple_set_location (g, gimple_location (stmt));
15586 gsi_replace (gsi, g, true);
15587 return true;
15588 /* Flavors of vec_nand. */
15589 case P8V_BUILTIN_VEC_NAND:
15590 case P8V_BUILTIN_NAND_V16QI:
15591 case P8V_BUILTIN_NAND_V8HI:
15592 case P8V_BUILTIN_NAND_V4SI:
15593 case P8V_BUILTIN_NAND_V4SF:
15594 case P8V_BUILTIN_NAND_V2DF:
15595 case P8V_BUILTIN_NAND_V2DI:
15596 arg0 = gimple_call_arg (stmt, 0);
15597 arg1 = gimple_call_arg (stmt, 1);
15598 lhs = gimple_call_lhs (stmt);
15599 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15600 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15601 gimple_set_location (g, gimple_location (stmt));
15602 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15603 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15604 gimple_set_location (g, gimple_location (stmt));
15605 gsi_replace (gsi, g, true);
15606 return true;
15607 /* Flavors of vec_or. */
15608 case ALTIVEC_BUILTIN_VOR:
15609 arg0 = gimple_call_arg (stmt, 0);
15610 arg1 = gimple_call_arg (stmt, 1);
15611 lhs = gimple_call_lhs (stmt);
15612 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15613 gimple_set_location (g, gimple_location (stmt));
15614 gsi_replace (gsi, g, true);
15615 return true;
15616 /* flavors of vec_orc. */
15617 case P8V_BUILTIN_ORC_V16QI:
15618 case P8V_BUILTIN_ORC_V8HI:
15619 case P8V_BUILTIN_ORC_V4SI:
15620 case P8V_BUILTIN_ORC_V4SF:
15621 case P8V_BUILTIN_ORC_V2DF:
15622 case P8V_BUILTIN_ORC_V2DI:
15623 arg0 = gimple_call_arg (stmt, 0);
15624 arg1 = gimple_call_arg (stmt, 1);
15625 lhs = gimple_call_lhs (stmt);
15626 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15627 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15628 gimple_set_location (g, gimple_location (stmt));
15629 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15630 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15631 gimple_set_location (g, gimple_location (stmt));
15632 gsi_replace (gsi, g, true);
15633 return true;
15634 /* Flavors of vec_xor. */
15635 case ALTIVEC_BUILTIN_VXOR:
15636 arg0 = gimple_call_arg (stmt, 0);
15637 arg1 = gimple_call_arg (stmt, 1);
15638 lhs = gimple_call_lhs (stmt);
15639 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15640 gimple_set_location (g, gimple_location (stmt));
15641 gsi_replace (gsi, g, true);
15642 return true;
15643 /* Flavors of vec_nor. */
15644 case ALTIVEC_BUILTIN_VNOR:
15645 arg0 = gimple_call_arg (stmt, 0);
15646 arg1 = gimple_call_arg (stmt, 1);
15647 lhs = gimple_call_lhs (stmt);
15648 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15649 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15650 gimple_set_location (g, gimple_location (stmt));
15651 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15652 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15653 gimple_set_location (g, gimple_location (stmt));
15654 gsi_replace (gsi, g, true);
15655 return true;
15656 /* flavors of vec_abs. */
15657 case ALTIVEC_BUILTIN_ABS_V16QI:
15658 case ALTIVEC_BUILTIN_ABS_V8HI:
15659 case ALTIVEC_BUILTIN_ABS_V4SI:
15660 case ALTIVEC_BUILTIN_ABS_V4SF:
15661 case P8V_BUILTIN_ABS_V2DI:
15662 case VSX_BUILTIN_XVABSDP:
15663 arg0 = gimple_call_arg (stmt, 0);
15664 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15665 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15666 return false;
15667 lhs = gimple_call_lhs (stmt);
15668 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15669 gimple_set_location (g, gimple_location (stmt));
15670 gsi_replace (gsi, g, true);
15671 return true;
15672 /* flavors of vec_min. */
15673 case VSX_BUILTIN_XVMINDP:
15674 case P8V_BUILTIN_VMINSD:
15675 case P8V_BUILTIN_VMINUD:
15676 case ALTIVEC_BUILTIN_VMINSB:
15677 case ALTIVEC_BUILTIN_VMINSH:
15678 case ALTIVEC_BUILTIN_VMINSW:
15679 case ALTIVEC_BUILTIN_VMINUB:
15680 case ALTIVEC_BUILTIN_VMINUH:
15681 case ALTIVEC_BUILTIN_VMINUW:
15682 case ALTIVEC_BUILTIN_VMINFP:
15683 arg0 = gimple_call_arg (stmt, 0);
15684 arg1 = gimple_call_arg (stmt, 1);
15685 lhs = gimple_call_lhs (stmt);
15686 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15687 gimple_set_location (g, gimple_location (stmt));
15688 gsi_replace (gsi, g, true);
15689 return true;
15690 /* flavors of vec_max. */
15691 case VSX_BUILTIN_XVMAXDP:
15692 case P8V_BUILTIN_VMAXSD:
15693 case P8V_BUILTIN_VMAXUD:
15694 case ALTIVEC_BUILTIN_VMAXSB:
15695 case ALTIVEC_BUILTIN_VMAXSH:
15696 case ALTIVEC_BUILTIN_VMAXSW:
15697 case ALTIVEC_BUILTIN_VMAXUB:
15698 case ALTIVEC_BUILTIN_VMAXUH:
15699 case ALTIVEC_BUILTIN_VMAXUW:
15700 case ALTIVEC_BUILTIN_VMAXFP:
15701 arg0 = gimple_call_arg (stmt, 0);
15702 arg1 = gimple_call_arg (stmt, 1);
15703 lhs = gimple_call_lhs (stmt);
15704 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15705 gimple_set_location (g, gimple_location (stmt));
15706 gsi_replace (gsi, g, true);
15707 return true;
15708 /* Flavors of vec_eqv. */
15709 case P8V_BUILTIN_EQV_V16QI:
15710 case P8V_BUILTIN_EQV_V8HI:
15711 case P8V_BUILTIN_EQV_V4SI:
15712 case P8V_BUILTIN_EQV_V4SF:
15713 case P8V_BUILTIN_EQV_V2DF:
15714 case P8V_BUILTIN_EQV_V2DI:
15715 arg0 = gimple_call_arg (stmt, 0);
15716 arg1 = gimple_call_arg (stmt, 1);
15717 lhs = gimple_call_lhs (stmt);
15718 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15719 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15720 gimple_set_location (g, gimple_location (stmt));
15721 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15722 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15723 gimple_set_location (g, gimple_location (stmt));
15724 gsi_replace (gsi, g, true);
15725 return true;
15726 /* Flavors of vec_rotate_left. */
15727 case ALTIVEC_BUILTIN_VRLB:
15728 case ALTIVEC_BUILTIN_VRLH:
15729 case ALTIVEC_BUILTIN_VRLW:
15730 case P8V_BUILTIN_VRLD:
15731 arg0 = gimple_call_arg (stmt, 0);
15732 arg1 = gimple_call_arg (stmt, 1);
15733 lhs = gimple_call_lhs (stmt);
15734 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15735 gimple_set_location (g, gimple_location (stmt));
15736 gsi_replace (gsi, g, true);
15737 return true;
15738 /* Flavors of vector shift right algebraic.
15739 vec_sra{b,h,w} -> vsra{b,h,w}. */
15740 case ALTIVEC_BUILTIN_VSRAB:
15741 case ALTIVEC_BUILTIN_VSRAH:
15742 case ALTIVEC_BUILTIN_VSRAW:
15743 case P8V_BUILTIN_VSRAD:
15744 {
15745 arg0 = gimple_call_arg (stmt, 0);
15746 arg1 = gimple_call_arg (stmt, 1);
15747 lhs = gimple_call_lhs (stmt);
15748 tree arg1_type = TREE_TYPE (arg1);
15749 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15750 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15751 location_t loc = gimple_location (stmt);
15752 /* Force arg1 into the range valid matching the arg0 type. */
15753 /* Build a vector consisting of the max valid bit-size values. */
15754 int n_elts = VECTOR_CST_NELTS (arg1);
15755 tree element_size = build_int_cst (unsigned_element_type,
15756 128 / n_elts);
15757 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15758 for (int i = 0; i < n_elts; i++)
15759 elts.safe_push (element_size);
15760 tree modulo_tree = elts.build ();
15761 /* Modulo the provided shift value against that vector. */
15762 gimple_seq stmts = NULL;
15763 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15764 unsigned_arg1_type, arg1);
15765 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15766 unsigned_arg1_type, unsigned_arg1,
15767 modulo_tree);
15768 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15769 /* And finally, do the shift. */
15770 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, new_arg1);
15771 gimple_set_location (g, loc);
15772 gsi_replace (gsi, g, true);
15773 return true;
15774 }
15775 /* Flavors of vector shift left.
15776 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15777 case ALTIVEC_BUILTIN_VSLB:
15778 case ALTIVEC_BUILTIN_VSLH:
15779 case ALTIVEC_BUILTIN_VSLW:
15780 case P8V_BUILTIN_VSLD:
15781 {
15782 location_t loc;
15783 gimple_seq stmts = NULL;
15784 arg0 = gimple_call_arg (stmt, 0);
15785 tree arg0_type = TREE_TYPE (arg0);
15786 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15787 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15788 return false;
15789 arg1 = gimple_call_arg (stmt, 1);
15790 tree arg1_type = TREE_TYPE (arg1);
15791 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15792 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15793 loc = gimple_location (stmt);
15794 lhs = gimple_call_lhs (stmt);
15795 /* Force arg1 into the range valid matching the arg0 type. */
15796 /* Build a vector consisting of the max valid bit-size values. */
15797 int n_elts = VECTOR_CST_NELTS (arg1);
15798 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15799 * BITS_PER_UNIT;
15800 tree element_size = build_int_cst (unsigned_element_type,
15801 tree_size_in_bits / n_elts);
15802 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15803 for (int i = 0; i < n_elts; i++)
15804 elts.safe_push (element_size);
15805 tree modulo_tree = elts.build ();
15806 /* Modulo the provided shift value against that vector. */
15807 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15808 unsigned_arg1_type, arg1);
15809 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15810 unsigned_arg1_type, unsigned_arg1,
15811 modulo_tree);
15812 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15813 /* And finally, do the shift. */
15814 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15815 gimple_set_location (g, gimple_location (stmt));
15816 gsi_replace (gsi, g, true);
15817 return true;
15818 }
15819 /* Flavors of vector shift right. */
15820 case ALTIVEC_BUILTIN_VSRB:
15821 case ALTIVEC_BUILTIN_VSRH:
15822 case ALTIVEC_BUILTIN_VSRW:
15823 case P8V_BUILTIN_VSRD:
15824 {
15825 arg0 = gimple_call_arg (stmt, 0);
15826 arg1 = gimple_call_arg (stmt, 1);
15827 lhs = gimple_call_lhs (stmt);
15828 tree arg1_type = TREE_TYPE (arg1);
15829 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15830 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15831 location_t loc = gimple_location (stmt);
15832 gimple_seq stmts = NULL;
15833 /* Convert arg0 to unsigned. */
15834 tree arg0_unsigned
15835 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15836 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15837 /* Force arg1 into the range valid matching the arg0 type. */
15838 /* Build a vector consisting of the max valid bit-size values. */
15839 int n_elts = VECTOR_CST_NELTS (arg1);
15840 tree element_size = build_int_cst (unsigned_element_type,
15841 128 / n_elts);
15842 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15843 for (int i = 0; i < n_elts; i++)
15844 elts.safe_push (element_size);
15845 tree modulo_tree = elts.build ();
15846 /* Modulo the provided shift value against that vector. */
15847 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15848 unsigned_arg1_type, arg1);
15849 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15850 unsigned_arg1_type, unsigned_arg1,
15851 modulo_tree);
15852 /* Do the shift. */
15853 tree res
15854 = gimple_build (&stmts, RSHIFT_EXPR,
15855 TREE_TYPE (arg0_unsigned), arg0_unsigned, new_arg1);
15856 /* Convert result back to the lhs type. */
15857 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15858 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15859 update_call_from_tree (gsi, res);
15860 return true;
15861 }
15862 /* Vector loads. */
15863 case ALTIVEC_BUILTIN_LVX_V16QI:
15864 case ALTIVEC_BUILTIN_LVX_V8HI:
15865 case ALTIVEC_BUILTIN_LVX_V4SI:
15866 case ALTIVEC_BUILTIN_LVX_V4SF:
15867 case ALTIVEC_BUILTIN_LVX_V2DI:
15868 case ALTIVEC_BUILTIN_LVX_V2DF:
15869 case ALTIVEC_BUILTIN_LVX_V1TI:
15870 {
15871 arg0 = gimple_call_arg (stmt, 0); // offset
15872 arg1 = gimple_call_arg (stmt, 1); // address
15873 lhs = gimple_call_lhs (stmt);
15874 location_t loc = gimple_location (stmt);
15875 /* Since arg1 may be cast to a different type, just use ptr_type_node
15876 here instead of trying to enforce TBAA on pointer types. */
15877 tree arg1_type = ptr_type_node;
15878 tree lhs_type = TREE_TYPE (lhs);
15879 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15880 the tree using the value from arg0. The resulting type will match
15881 the type of arg1. */
15882 gimple_seq stmts = NULL;
15883 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15884 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15885 arg1_type, arg1, temp_offset);
15886 /* Mask off any lower bits from the address. */
15887 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15888 arg1_type, temp_addr,
15889 build_int_cst (arg1_type, -16));
15890 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15891 if (!is_gimple_mem_ref_addr (aligned_addr))
15892 {
15893 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15894 gimple *g = gimple_build_assign (t, aligned_addr);
15895 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15896 aligned_addr = t;
15897 }
15898 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15899 take an offset, but since we've already incorporated the offset
15900 above, here we just pass in a zero. */
15901 gimple *g
15902 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15903 build_int_cst (arg1_type, 0)));
15904 gimple_set_location (g, loc);
15905 gsi_replace (gsi, g, true);
15906 return true;
15907 }
15908 /* Vector stores. */
15909 case ALTIVEC_BUILTIN_STVX_V16QI:
15910 case ALTIVEC_BUILTIN_STVX_V8HI:
15911 case ALTIVEC_BUILTIN_STVX_V4SI:
15912 case ALTIVEC_BUILTIN_STVX_V4SF:
15913 case ALTIVEC_BUILTIN_STVX_V2DI:
15914 case ALTIVEC_BUILTIN_STVX_V2DF:
15915 {
15916 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15917 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15918 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15919 location_t loc = gimple_location (stmt);
15920 tree arg0_type = TREE_TYPE (arg0);
15921 /* Use ptr_type_node (no TBAA) for the arg2_type.
15922 FIXME: (Richard) "A proper fix would be to transition this type as
15923 seen from the frontend to GIMPLE, for example in a similar way we
15924 do for MEM_REFs by piggy-backing that on an extra argument, a
15925 constant zero pointer of the alias pointer type to use (which would
15926 also serve as a type indicator of the store itself). I'd use a
15927 target specific internal function for this (not sure if we can have
15928 those target specific, but I guess if it's folded away then that's
15929 fine) and get away with the overload set." */
15930 tree arg2_type = ptr_type_node;
15931 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15932 the tree using the value from arg0. The resulting type will match
15933 the type of arg2. */
15934 gimple_seq stmts = NULL;
15935 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15936 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15937 arg2_type, arg2, temp_offset);
15938 /* Mask off any lower bits from the address. */
15939 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15940 arg2_type, temp_addr,
15941 build_int_cst (arg2_type, -16));
15942 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15943 if (!is_gimple_mem_ref_addr (aligned_addr))
15944 {
15945 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15946 gimple *g = gimple_build_assign (t, aligned_addr);
15947 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15948 aligned_addr = t;
15949 }
15950 /* The desired gimple result should be similar to:
15951 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15952 gimple *g
15953 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15954 build_int_cst (arg2_type, 0)), arg0);
15955 gimple_set_location (g, loc);
15956 gsi_replace (gsi, g, true);
15957 return true;
15958 }
15959
15960 /* unaligned Vector loads. */
15961 case VSX_BUILTIN_LXVW4X_V16QI:
15962 case VSX_BUILTIN_LXVW4X_V8HI:
15963 case VSX_BUILTIN_LXVW4X_V4SF:
15964 case VSX_BUILTIN_LXVW4X_V4SI:
15965 case VSX_BUILTIN_LXVD2X_V2DF:
15966 case VSX_BUILTIN_LXVD2X_V2DI:
15967 {
15968 arg0 = gimple_call_arg (stmt, 0); // offset
15969 arg1 = gimple_call_arg (stmt, 1); // address
15970 lhs = gimple_call_lhs (stmt);
15971 location_t loc = gimple_location (stmt);
15972 /* Since arg1 may be cast to a different type, just use ptr_type_node
15973 here instead of trying to enforce TBAA on pointer types. */
15974 tree arg1_type = ptr_type_node;
15975 tree lhs_type = TREE_TYPE (lhs);
15976 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15977 required alignment (power) is 4 bytes regardless of data type. */
15978 tree align_ltype = build_aligned_type (lhs_type, 4);
15979 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15980 the tree using the value from arg0. The resulting type will match
15981 the type of arg1. */
15982 gimple_seq stmts = NULL;
15983 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15984 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15985 arg1_type, arg1, temp_offset);
15986 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15987 if (!is_gimple_mem_ref_addr (temp_addr))
15988 {
15989 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15990 gimple *g = gimple_build_assign (t, temp_addr);
15991 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15992 temp_addr = t;
15993 }
15994 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15995 take an offset, but since we've already incorporated the offset
15996 above, here we just pass in a zero. */
15997 gimple *g;
15998 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15999 build_int_cst (arg1_type, 0)));
16000 gimple_set_location (g, loc);
16001 gsi_replace (gsi, g, true);
16002 return true;
16003 }
16004
16005 /* unaligned Vector stores. */
16006 case VSX_BUILTIN_STXVW4X_V16QI:
16007 case VSX_BUILTIN_STXVW4X_V8HI:
16008 case VSX_BUILTIN_STXVW4X_V4SF:
16009 case VSX_BUILTIN_STXVW4X_V4SI:
16010 case VSX_BUILTIN_STXVD2X_V2DF:
16011 case VSX_BUILTIN_STXVD2X_V2DI:
16012 {
16013 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
16014 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
16015 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
16016 location_t loc = gimple_location (stmt);
16017 tree arg0_type = TREE_TYPE (arg0);
16018 /* Use ptr_type_node (no TBAA) for the arg2_type. */
16019 tree arg2_type = ptr_type_node;
16020 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
16021 required alignment (power) is 4 bytes regardless of data type. */
16022 tree align_stype = build_aligned_type (arg0_type, 4);
16023 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
16024 the tree using the value from arg1. */
16025 gimple_seq stmts = NULL;
16026 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
16027 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
16028 arg2_type, arg2, temp_offset);
16029 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16030 if (!is_gimple_mem_ref_addr (temp_addr))
16031 {
16032 tree t = make_ssa_name (TREE_TYPE (temp_addr));
16033 gimple *g = gimple_build_assign (t, temp_addr);
16034 gsi_insert_before (gsi, g, GSI_SAME_STMT);
16035 temp_addr = t;
16036 }
16037 gimple *g;
16038 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
16039 build_int_cst (arg2_type, 0)), arg0);
16040 gimple_set_location (g, loc);
16041 gsi_replace (gsi, g, true);
16042 return true;
16043 }
16044
16045 /* Vector Fused multiply-add (fma). */
16046 case ALTIVEC_BUILTIN_VMADDFP:
16047 case VSX_BUILTIN_XVMADDDP:
16048 case ALTIVEC_BUILTIN_VMLADDUHM:
16049 {
16050 arg0 = gimple_call_arg (stmt, 0);
16051 arg1 = gimple_call_arg (stmt, 1);
16052 tree arg2 = gimple_call_arg (stmt, 2);
16053 lhs = gimple_call_lhs (stmt);
16054 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
16055 gimple_call_set_lhs (g, lhs);
16056 gimple_call_set_nothrow (g, true);
16057 gimple_set_location (g, gimple_location (stmt));
16058 gsi_replace (gsi, g, true);
16059 return true;
16060 }
16061
16062 /* Vector compares; EQ, NE, GE, GT, LE. */
16063 case ALTIVEC_BUILTIN_VCMPEQUB:
16064 case ALTIVEC_BUILTIN_VCMPEQUH:
16065 case ALTIVEC_BUILTIN_VCMPEQUW:
16066 case P8V_BUILTIN_VCMPEQUD:
16067 fold_compare_helper (gsi, EQ_EXPR, stmt);
16068 return true;
16069
16070 case P9V_BUILTIN_CMPNEB:
16071 case P9V_BUILTIN_CMPNEH:
16072 case P9V_BUILTIN_CMPNEW:
16073 fold_compare_helper (gsi, NE_EXPR, stmt);
16074 return true;
16075
16076 case VSX_BUILTIN_CMPGE_16QI:
16077 case VSX_BUILTIN_CMPGE_U16QI:
16078 case VSX_BUILTIN_CMPGE_8HI:
16079 case VSX_BUILTIN_CMPGE_U8HI:
16080 case VSX_BUILTIN_CMPGE_4SI:
16081 case VSX_BUILTIN_CMPGE_U4SI:
16082 case VSX_BUILTIN_CMPGE_2DI:
16083 case VSX_BUILTIN_CMPGE_U2DI:
16084 fold_compare_helper (gsi, GE_EXPR, stmt);
16085 return true;
16086
16087 case ALTIVEC_BUILTIN_VCMPGTSB:
16088 case ALTIVEC_BUILTIN_VCMPGTUB:
16089 case ALTIVEC_BUILTIN_VCMPGTSH:
16090 case ALTIVEC_BUILTIN_VCMPGTUH:
16091 case ALTIVEC_BUILTIN_VCMPGTSW:
16092 case ALTIVEC_BUILTIN_VCMPGTUW:
16093 case P8V_BUILTIN_VCMPGTUD:
16094 case P8V_BUILTIN_VCMPGTSD:
16095 fold_compare_helper (gsi, GT_EXPR, stmt);
16096 return true;
16097
16098 case VSX_BUILTIN_CMPLE_16QI:
16099 case VSX_BUILTIN_CMPLE_U16QI:
16100 case VSX_BUILTIN_CMPLE_8HI:
16101 case VSX_BUILTIN_CMPLE_U8HI:
16102 case VSX_BUILTIN_CMPLE_4SI:
16103 case VSX_BUILTIN_CMPLE_U4SI:
16104 case VSX_BUILTIN_CMPLE_2DI:
16105 case VSX_BUILTIN_CMPLE_U2DI:
16106 fold_compare_helper (gsi, LE_EXPR, stmt);
16107 return true;
16108
16109 /* flavors of vec_splat_[us]{8,16,32}. */
16110 case ALTIVEC_BUILTIN_VSPLTISB:
16111 case ALTIVEC_BUILTIN_VSPLTISH:
16112 case ALTIVEC_BUILTIN_VSPLTISW:
16113 {
16114 arg0 = gimple_call_arg (stmt, 0);
16115 lhs = gimple_call_lhs (stmt);
16116
16117 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
16118 5-bit signed constant in range -16 to +15. */
16119 if (TREE_CODE (arg0) != INTEGER_CST
16120 || !IN_RANGE (TREE_INT_CST_LOW (arg0), -16, 15))
16121 return false;
16122 gimple_seq stmts = NULL;
16123 location_t loc = gimple_location (stmt);
16124 tree splat_value = gimple_convert (&stmts, loc,
16125 TREE_TYPE (TREE_TYPE (lhs)), arg0);
16126 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16127 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
16128 g = gimple_build_assign (lhs, splat_tree);
16129 gimple_set_location (g, gimple_location (stmt));
16130 gsi_replace (gsi, g, true);
16131 return true;
16132 }
16133
16134 /* Flavors of vec_splat. */
16135 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
16136 case ALTIVEC_BUILTIN_VSPLTB:
16137 case ALTIVEC_BUILTIN_VSPLTH:
16138 case ALTIVEC_BUILTIN_VSPLTW:
16139 case VSX_BUILTIN_XXSPLTD_V2DI:
16140 case VSX_BUILTIN_XXSPLTD_V2DF:
16141 {
16142 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
16143 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
16144 /* Only fold the vec_splat_*() if arg1 is both a constant value and
16145 is a valid index into the arg0 vector. */
16146 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
16147 if (TREE_CODE (arg1) != INTEGER_CST
16148 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
16149 return false;
16150 lhs = gimple_call_lhs (stmt);
16151 tree lhs_type = TREE_TYPE (lhs);
16152 tree arg0_type = TREE_TYPE (arg0);
16153 tree splat;
16154 if (TREE_CODE (arg0) == VECTOR_CST)
16155 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
16156 else
16157 {
16158 /* Determine (in bits) the length and start location of the
16159 splat value for a call to the tree_vec_extract helper. */
16160 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
16161 * BITS_PER_UNIT / n_elts;
16162 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
16163 tree len = build_int_cst (bitsizetype, splat_elem_size);
16164 tree start = build_int_cst (bitsizetype, splat_start_bit);
16165 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
16166 len, start);
16167 }
16168 /* And finally, build the new vector. */
16169 tree splat_tree = build_vector_from_val (lhs_type, splat);
16170 g = gimple_build_assign (lhs, splat_tree);
16171 gimple_set_location (g, gimple_location (stmt));
16172 gsi_replace (gsi, g, true);
16173 return true;
16174 }
16175
16176 /* vec_mergel (integrals). */
16177 case ALTIVEC_BUILTIN_VMRGLH:
16178 case ALTIVEC_BUILTIN_VMRGLW:
16179 case VSX_BUILTIN_XXMRGLW_4SI:
16180 case ALTIVEC_BUILTIN_VMRGLB:
16181 case VSX_BUILTIN_VEC_MERGEL_V2DI:
16182 case VSX_BUILTIN_XXMRGLW_4SF:
16183 case VSX_BUILTIN_VEC_MERGEL_V2DF:
16184 fold_mergehl_helper (gsi, stmt, 1);
16185 return true;
16186 /* vec_mergeh (integrals). */
16187 case ALTIVEC_BUILTIN_VMRGHH:
16188 case ALTIVEC_BUILTIN_VMRGHW:
16189 case VSX_BUILTIN_XXMRGHW_4SI:
16190 case ALTIVEC_BUILTIN_VMRGHB:
16191 case VSX_BUILTIN_VEC_MERGEH_V2DI:
16192 case VSX_BUILTIN_XXMRGHW_4SF:
16193 case VSX_BUILTIN_VEC_MERGEH_V2DF:
16194 fold_mergehl_helper (gsi, stmt, 0);
16195 return true;
16196
16197 /* Flavors of vec_mergee. */
16198 case P8V_BUILTIN_VMRGEW_V4SI:
16199 case P8V_BUILTIN_VMRGEW_V2DI:
16200 case P8V_BUILTIN_VMRGEW_V4SF:
16201 case P8V_BUILTIN_VMRGEW_V2DF:
16202 fold_mergeeo_helper (gsi, stmt, 0);
16203 return true;
16204 /* Flavors of vec_mergeo. */
16205 case P8V_BUILTIN_VMRGOW_V4SI:
16206 case P8V_BUILTIN_VMRGOW_V2DI:
16207 case P8V_BUILTIN_VMRGOW_V4SF:
16208 case P8V_BUILTIN_VMRGOW_V2DF:
16209 fold_mergeeo_helper (gsi, stmt, 1);
16210 return true;
16211
16212 /* d = vec_pack (a, b) */
16213 case P8V_BUILTIN_VPKUDUM:
16214 case ALTIVEC_BUILTIN_VPKUHUM:
16215 case ALTIVEC_BUILTIN_VPKUWUM:
16216 {
16217 arg0 = gimple_call_arg (stmt, 0);
16218 arg1 = gimple_call_arg (stmt, 1);
16219 lhs = gimple_call_lhs (stmt);
16220 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
16221 gimple_set_location (g, gimple_location (stmt));
16222 gsi_replace (gsi, g, true);
16223 return true;
16224 }
16225
16226 /* d = vec_unpackh (a) */
16227 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
16228 in this code is sensitive to endian-ness, and needs to be inverted to
16229 handle both LE and BE targets. */
16230 case ALTIVEC_BUILTIN_VUPKHSB:
16231 case ALTIVEC_BUILTIN_VUPKHSH:
16232 case P8V_BUILTIN_VUPKHSW:
16233 {
16234 arg0 = gimple_call_arg (stmt, 0);
16235 lhs = gimple_call_lhs (stmt);
16236 if (BYTES_BIG_ENDIAN)
16237 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16238 else
16239 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16240 gimple_set_location (g, gimple_location (stmt));
16241 gsi_replace (gsi, g, true);
16242 return true;
16243 }
16244 /* d = vec_unpackl (a) */
16245 case ALTIVEC_BUILTIN_VUPKLSB:
16246 case ALTIVEC_BUILTIN_VUPKLSH:
16247 case P8V_BUILTIN_VUPKLSW:
16248 {
16249 arg0 = gimple_call_arg (stmt, 0);
16250 lhs = gimple_call_lhs (stmt);
16251 if (BYTES_BIG_ENDIAN)
16252 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
16253 else
16254 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
16255 gimple_set_location (g, gimple_location (stmt));
16256 gsi_replace (gsi, g, true);
16257 return true;
16258 }
16259 /* There is no gimple type corresponding with pixel, so just return. */
16260 case ALTIVEC_BUILTIN_VUPKHPX:
16261 case ALTIVEC_BUILTIN_VUPKLPX:
16262 return false;
16263
16264 /* vec_perm. */
16265 case ALTIVEC_BUILTIN_VPERM_16QI:
16266 case ALTIVEC_BUILTIN_VPERM_8HI:
16267 case ALTIVEC_BUILTIN_VPERM_4SI:
16268 case ALTIVEC_BUILTIN_VPERM_2DI:
16269 case ALTIVEC_BUILTIN_VPERM_4SF:
16270 case ALTIVEC_BUILTIN_VPERM_2DF:
16271 {
16272 arg0 = gimple_call_arg (stmt, 0);
16273 arg1 = gimple_call_arg (stmt, 1);
16274 tree permute = gimple_call_arg (stmt, 2);
16275 lhs = gimple_call_lhs (stmt);
16276 location_t loc = gimple_location (stmt);
16277 gimple_seq stmts = NULL;
16278 // convert arg0 and arg1 to match the type of the permute
16279 // for the VEC_PERM_EXPR operation.
16280 tree permute_type = (TREE_TYPE (permute));
16281 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
16282 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
16283 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
16284 permute_type, arg0_ptype, arg1_ptype,
16285 permute);
16286 // Convert the result back to the desired lhs type upon completion.
16287 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
16288 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16289 g = gimple_build_assign (lhs, temp);
16290 gimple_set_location (g, loc);
16291 gsi_replace (gsi, g, true);
16292 return true;
16293 }
16294
16295 default:
16296 if (TARGET_DEBUG_BUILTIN)
16297 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16298 fn_code, fn_name1, fn_name2);
16299 break;
16300 }
16301
16302 return false;
16303 }
16304
16305 /* Expand an expression EXP that calls a built-in function,
16306 with result going to TARGET if that's convenient
16307 (and in mode MODE if that's convenient).
16308 SUBTARGET may be used as the target for computing one of EXP's operands.
16309 IGNORE is nonzero if the value is to be ignored. */
16310
16311 static rtx
16312 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16313 machine_mode mode ATTRIBUTE_UNUSED,
16314 int ignore ATTRIBUTE_UNUSED)
16315 {
16316 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16317 enum rs6000_builtins fcode
16318 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16319 size_t uns_fcode = (size_t)fcode;
16320 const struct builtin_description *d;
16321 size_t i;
16322 rtx ret;
16323 bool success;
16324 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16325 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16326 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16327
16328 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16329 floating point type, depending on whether long double is the IBM extended
16330 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16331 we only define one variant of the built-in function, and switch the code
16332 when defining it, rather than defining two built-ins and using the
16333 overload table in rs6000-c.c to switch between the two. If we don't have
16334 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16335 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16336 if (FLOAT128_IEEE_P (TFmode))
16337 switch (icode)
16338 {
16339 default:
16340 break;
16341
16342 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16343 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16344 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16345 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16346 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16347 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16348 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16349 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16350 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16351 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16352 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16353 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16354 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16355 }
16356
16357 if (TARGET_DEBUG_BUILTIN)
16358 {
16359 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16360 const char *name2 = (icode != CODE_FOR_nothing)
16361 ? get_insn_name ((int) icode)
16362 : "nothing";
16363 const char *name3;
16364
16365 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16366 {
16367 default: name3 = "unknown"; break;
16368 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16369 case RS6000_BTC_UNARY: name3 = "unary"; break;
16370 case RS6000_BTC_BINARY: name3 = "binary"; break;
16371 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16372 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16373 case RS6000_BTC_ABS: name3 = "abs"; break;
16374 case RS6000_BTC_DST: name3 = "dst"; break;
16375 }
16376
16377
16378 fprintf (stderr,
16379 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16380 (name1) ? name1 : "---", fcode,
16381 (name2) ? name2 : "---", (int) icode,
16382 name3,
16383 func_valid_p ? "" : ", not valid");
16384 }
16385
16386 if (!func_valid_p)
16387 {
16388 rs6000_invalid_builtin (fcode);
16389
16390 /* Given it is invalid, just generate a normal call. */
16391 return expand_call (exp, target, ignore);
16392 }
16393
16394 switch (fcode)
16395 {
16396 case RS6000_BUILTIN_RECIP:
16397 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16398
16399 case RS6000_BUILTIN_RECIPF:
16400 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16401
16402 case RS6000_BUILTIN_RSQRTF:
16403 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16404
16405 case RS6000_BUILTIN_RSQRT:
16406 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16407
16408 case POWER7_BUILTIN_BPERMD:
16409 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16410 ? CODE_FOR_bpermd_di
16411 : CODE_FOR_bpermd_si), exp, target);
16412
16413 case RS6000_BUILTIN_GET_TB:
16414 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16415 target);
16416
16417 case RS6000_BUILTIN_MFTB:
16418 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16419 ? CODE_FOR_rs6000_mftb_di
16420 : CODE_FOR_rs6000_mftb_si),
16421 target);
16422
16423 case RS6000_BUILTIN_MFFS:
16424 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16425
16426 case RS6000_BUILTIN_MTFSB0:
16427 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16428
16429 case RS6000_BUILTIN_MTFSB1:
16430 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16431
16432 case RS6000_BUILTIN_SET_FPSCR_RN:
16433 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16434 exp);
16435
16436 case RS6000_BUILTIN_SET_FPSCR_DRN:
16437 return
16438 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16439 exp);
16440
16441 case RS6000_BUILTIN_MFFSL:
16442 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16443
16444 case RS6000_BUILTIN_MTFSF:
16445 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16446
16447 case RS6000_BUILTIN_CPU_INIT:
16448 case RS6000_BUILTIN_CPU_IS:
16449 case RS6000_BUILTIN_CPU_SUPPORTS:
16450 return cpu_expand_builtin (fcode, exp, target);
16451
16452 case MISC_BUILTIN_SPEC_BARRIER:
16453 {
16454 emit_insn (gen_speculation_barrier ());
16455 return NULL_RTX;
16456 }
16457
16458 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16459 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16460 {
16461 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16462 : (int) CODE_FOR_altivec_lvsl_direct);
16463 machine_mode tmode = insn_data[icode2].operand[0].mode;
16464 machine_mode mode = insn_data[icode2].operand[1].mode;
16465 tree arg;
16466 rtx op, addr, pat;
16467
16468 gcc_assert (TARGET_ALTIVEC);
16469
16470 arg = CALL_EXPR_ARG (exp, 0);
16471 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16472 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16473 addr = memory_address (mode, op);
16474 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16475 op = addr;
16476 else
16477 {
16478 /* For the load case need to negate the address. */
16479 op = gen_reg_rtx (GET_MODE (addr));
16480 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16481 }
16482 op = gen_rtx_MEM (mode, op);
16483
16484 if (target == 0
16485 || GET_MODE (target) != tmode
16486 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16487 target = gen_reg_rtx (tmode);
16488
16489 pat = GEN_FCN (icode2) (target, op);
16490 if (!pat)
16491 return 0;
16492 emit_insn (pat);
16493
16494 return target;
16495 }
16496
16497 case ALTIVEC_BUILTIN_VCFUX:
16498 case ALTIVEC_BUILTIN_VCFSX:
16499 case ALTIVEC_BUILTIN_VCTUXS:
16500 case ALTIVEC_BUILTIN_VCTSXS:
16501 /* FIXME: There's got to be a nicer way to handle this case than
16502 constructing a new CALL_EXPR. */
16503 if (call_expr_nargs (exp) == 1)
16504 {
16505 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16506 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16507 }
16508 break;
16509
16510 /* For the pack and unpack int128 routines, fix up the builtin so it
16511 uses the correct IBM128 type. */
16512 case MISC_BUILTIN_PACK_IF:
16513 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16514 {
16515 icode = CODE_FOR_packtf;
16516 fcode = MISC_BUILTIN_PACK_TF;
16517 uns_fcode = (size_t)fcode;
16518 }
16519 break;
16520
16521 case MISC_BUILTIN_UNPACK_IF:
16522 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16523 {
16524 icode = CODE_FOR_unpacktf;
16525 fcode = MISC_BUILTIN_UNPACK_TF;
16526 uns_fcode = (size_t)fcode;
16527 }
16528 break;
16529
16530 default:
16531 break;
16532 }
16533
16534 if (TARGET_ALTIVEC)
16535 {
16536 ret = altivec_expand_builtin (exp, target, &success);
16537
16538 if (success)
16539 return ret;
16540 }
16541 if (TARGET_HTM)
16542 {
16543 ret = htm_expand_builtin (exp, target, &success);
16544
16545 if (success)
16546 return ret;
16547 }
16548
16549 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16550 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16551 gcc_assert (attr == RS6000_BTC_UNARY
16552 || attr == RS6000_BTC_BINARY
16553 || attr == RS6000_BTC_TERNARY
16554 || attr == RS6000_BTC_SPECIAL);
16555
16556 /* Handle simple unary operations. */
16557 d = bdesc_1arg;
16558 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16559 if (d->code == fcode)
16560 return rs6000_expand_unop_builtin (icode, exp, target);
16561
16562 /* Handle simple binary operations. */
16563 d = bdesc_2arg;
16564 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16565 if (d->code == fcode)
16566 return rs6000_expand_binop_builtin (icode, exp, target);
16567
16568 /* Handle simple ternary operations. */
16569 d = bdesc_3arg;
16570 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16571 if (d->code == fcode)
16572 return rs6000_expand_ternop_builtin (icode, exp, target);
16573
16574 /* Handle simple no-argument operations. */
16575 d = bdesc_0arg;
16576 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16577 if (d->code == fcode)
16578 return rs6000_expand_zeroop_builtin (icode, target);
16579
16580 gcc_unreachable ();
16581 }
16582
16583 /* Create a builtin vector type with a name. Taking care not to give
16584 the canonical type a name. */
16585
16586 static tree
16587 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16588 {
16589 tree result = build_vector_type (elt_type, num_elts);
16590
16591 /* Copy so we don't give the canonical type a name. */
16592 result = build_variant_type_copy (result);
16593
16594 add_builtin_type (name, result);
16595
16596 return result;
16597 }
16598
16599 static void
16600 rs6000_init_builtins (void)
16601 {
16602 tree tdecl;
16603 tree ftype;
16604 machine_mode mode;
16605
16606 if (TARGET_DEBUG_BUILTIN)
16607 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16608 (TARGET_ALTIVEC) ? ", altivec" : "",
16609 (TARGET_VSX) ? ", vsx" : "");
16610
16611 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16612 : "__vector long long",
16613 intDI_type_node, 2);
16614 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16615 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16616 intSI_type_node, 4);
16617 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16618 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16619 intHI_type_node, 8);
16620 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16621 intQI_type_node, 16);
16622
16623 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16624 unsigned_intQI_type_node, 16);
16625 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16626 unsigned_intHI_type_node, 8);
16627 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16628 unsigned_intSI_type_node, 4);
16629 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16630 ? "__vector unsigned long"
16631 : "__vector unsigned long long",
16632 unsigned_intDI_type_node, 2);
16633
16634 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16635
16636 const_str_type_node
16637 = build_pointer_type (build_qualified_type (char_type_node,
16638 TYPE_QUAL_CONST));
16639
16640 /* We use V1TI mode as a special container to hold __int128_t items that
16641 must live in VSX registers. */
16642 if (intTI_type_node)
16643 {
16644 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16645 intTI_type_node, 1);
16646 unsigned_V1TI_type_node
16647 = rs6000_vector_type ("__vector unsigned __int128",
16648 unsigned_intTI_type_node, 1);
16649 }
16650
16651 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16652 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16653 'vector unsigned short'. */
16654
16655 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16656 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16657 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16658 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16659 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16660
16661 long_integer_type_internal_node = long_integer_type_node;
16662 long_unsigned_type_internal_node = long_unsigned_type_node;
16663 long_long_integer_type_internal_node = long_long_integer_type_node;
16664 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16665 intQI_type_internal_node = intQI_type_node;
16666 uintQI_type_internal_node = unsigned_intQI_type_node;
16667 intHI_type_internal_node = intHI_type_node;
16668 uintHI_type_internal_node = unsigned_intHI_type_node;
16669 intSI_type_internal_node = intSI_type_node;
16670 uintSI_type_internal_node = unsigned_intSI_type_node;
16671 intDI_type_internal_node = intDI_type_node;
16672 uintDI_type_internal_node = unsigned_intDI_type_node;
16673 intTI_type_internal_node = intTI_type_node;
16674 uintTI_type_internal_node = unsigned_intTI_type_node;
16675 float_type_internal_node = float_type_node;
16676 double_type_internal_node = double_type_node;
16677 long_double_type_internal_node = long_double_type_node;
16678 dfloat64_type_internal_node = dfloat64_type_node;
16679 dfloat128_type_internal_node = dfloat128_type_node;
16680 void_type_internal_node = void_type_node;
16681
16682 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16683 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16684 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16685 format that uses a pair of doubles, depending on the switches and
16686 defaults.
16687
16688 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16689 floating point, we need make sure the type is non-zero or else self-test
16690 fails during bootstrap.
16691
16692 Always create __ibm128 as a separate type, even if the current long double
16693 format is IBM extended double.
16694
16695 For IEEE 128-bit floating point, always create the type __ieee128. If the
16696 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16697 __ieee128. */
16698 if (TARGET_FLOAT128_TYPE)
16699 {
16700 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16701 ibm128_float_type_node = long_double_type_node;
16702 else
16703 {
16704 ibm128_float_type_node = make_node (REAL_TYPE);
16705 TYPE_PRECISION (ibm128_float_type_node) = 128;
16706 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16707 layout_type (ibm128_float_type_node);
16708 }
16709
16710 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16711 "__ibm128");
16712
16713 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16714 ieee128_float_type_node = long_double_type_node;
16715 else
16716 ieee128_float_type_node = float128_type_node;
16717
16718 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16719 "__ieee128");
16720 }
16721
16722 else
16723 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16724
16725 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16726 tree type node. */
16727 builtin_mode_to_type[QImode][0] = integer_type_node;
16728 builtin_mode_to_type[HImode][0] = integer_type_node;
16729 builtin_mode_to_type[SImode][0] = intSI_type_node;
16730 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16731 builtin_mode_to_type[DImode][0] = intDI_type_node;
16732 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16733 builtin_mode_to_type[TImode][0] = intTI_type_node;
16734 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16735 builtin_mode_to_type[SFmode][0] = float_type_node;
16736 builtin_mode_to_type[DFmode][0] = double_type_node;
16737 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16738 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16739 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16740 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16741 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16742 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16743 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16744 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16745 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16746 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16747 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16748 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16749 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16750 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16751 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16752 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16753 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16754
16755 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16756 TYPE_NAME (bool_char_type_node) = tdecl;
16757
16758 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16759 TYPE_NAME (bool_short_type_node) = tdecl;
16760
16761 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16762 TYPE_NAME (bool_int_type_node) = tdecl;
16763
16764 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16765 TYPE_NAME (pixel_type_node) = tdecl;
16766
16767 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16768 bool_char_type_node, 16);
16769 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16770 bool_short_type_node, 8);
16771 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16772 bool_int_type_node, 4);
16773 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16774 ? "__vector __bool long"
16775 : "__vector __bool long long",
16776 bool_long_long_type_node, 2);
16777 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16778 pixel_type_node, 8);
16779
16780 /* Create Altivec and VSX builtins on machines with at least the
16781 general purpose extensions (970 and newer) to allow the use of
16782 the target attribute. */
16783 if (TARGET_EXTRA_BUILTINS)
16784 altivec_init_builtins ();
16785 if (TARGET_HTM)
16786 htm_init_builtins ();
16787
16788 if (TARGET_EXTRA_BUILTINS)
16789 rs6000_common_init_builtins ();
16790
16791 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16792 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16793 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16794
16795 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16796 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16797 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16798
16799 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16800 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16801 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16802
16803 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16804 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16805 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16806
16807 mode = (TARGET_64BIT) ? DImode : SImode;
16808 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16809 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16810 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16811
16812 ftype = build_function_type_list (unsigned_intDI_type_node,
16813 NULL_TREE);
16814 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16815
16816 if (TARGET_64BIT)
16817 ftype = build_function_type_list (unsigned_intDI_type_node,
16818 NULL_TREE);
16819 else
16820 ftype = build_function_type_list (unsigned_intSI_type_node,
16821 NULL_TREE);
16822 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16823
16824 ftype = build_function_type_list (double_type_node, NULL_TREE);
16825 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16826
16827 ftype = build_function_type_list (double_type_node, NULL_TREE);
16828 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16829
16830 ftype = build_function_type_list (void_type_node,
16831 intSI_type_node,
16832 NULL_TREE);
16833 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16834
16835 ftype = build_function_type_list (void_type_node,
16836 intSI_type_node,
16837 NULL_TREE);
16838 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16839
16840 ftype = build_function_type_list (void_type_node,
16841 intDI_type_node,
16842 NULL_TREE);
16843 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16844
16845 ftype = build_function_type_list (void_type_node,
16846 intDI_type_node,
16847 NULL_TREE);
16848 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16849
16850 ftype = build_function_type_list (void_type_node,
16851 intSI_type_node, double_type_node,
16852 NULL_TREE);
16853 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16854
16855 ftype = build_function_type_list (void_type_node, NULL_TREE);
16856 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16857 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16858 MISC_BUILTIN_SPEC_BARRIER);
16859
16860 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16861 NULL_TREE);
16862 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16863 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16864
16865 /* AIX libm provides clog as __clog. */
16866 if (TARGET_XCOFF &&
16867 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16868 set_user_assembler_name (tdecl, "__clog");
16869
16870 #ifdef SUBTARGET_INIT_BUILTINS
16871 SUBTARGET_INIT_BUILTINS;
16872 #endif
16873 }
16874
16875 /* Returns the rs6000 builtin decl for CODE. */
16876
16877 static tree
16878 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16879 {
16880 HOST_WIDE_INT fnmask;
16881
16882 if (code >= RS6000_BUILTIN_COUNT)
16883 return error_mark_node;
16884
16885 fnmask = rs6000_builtin_info[code].mask;
16886 if ((fnmask & rs6000_builtin_mask) != fnmask)
16887 {
16888 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16889 return error_mark_node;
16890 }
16891
16892 return rs6000_builtin_decls[code];
16893 }
16894
16895 static void
16896 altivec_init_builtins (void)
16897 {
16898 const struct builtin_description *d;
16899 size_t i;
16900 tree ftype;
16901 tree decl;
16902 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16903
16904 tree pvoid_type_node = build_pointer_type (void_type_node);
16905
16906 tree pcvoid_type_node
16907 = build_pointer_type (build_qualified_type (void_type_node,
16908 TYPE_QUAL_CONST));
16909
16910 tree int_ftype_opaque
16911 = build_function_type_list (integer_type_node,
16912 opaque_V4SI_type_node, NULL_TREE);
16913 tree opaque_ftype_opaque
16914 = build_function_type_list (integer_type_node, NULL_TREE);
16915 tree opaque_ftype_opaque_int
16916 = build_function_type_list (opaque_V4SI_type_node,
16917 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16918 tree opaque_ftype_opaque_opaque_int
16919 = build_function_type_list (opaque_V4SI_type_node,
16920 opaque_V4SI_type_node, opaque_V4SI_type_node,
16921 integer_type_node, NULL_TREE);
16922 tree opaque_ftype_opaque_opaque_opaque
16923 = build_function_type_list (opaque_V4SI_type_node,
16924 opaque_V4SI_type_node, opaque_V4SI_type_node,
16925 opaque_V4SI_type_node, NULL_TREE);
16926 tree opaque_ftype_opaque_opaque
16927 = build_function_type_list (opaque_V4SI_type_node,
16928 opaque_V4SI_type_node, opaque_V4SI_type_node,
16929 NULL_TREE);
16930 tree int_ftype_int_opaque_opaque
16931 = build_function_type_list (integer_type_node,
16932 integer_type_node, opaque_V4SI_type_node,
16933 opaque_V4SI_type_node, NULL_TREE);
16934 tree int_ftype_int_v4si_v4si
16935 = build_function_type_list (integer_type_node,
16936 integer_type_node, V4SI_type_node,
16937 V4SI_type_node, NULL_TREE);
16938 tree int_ftype_int_v2di_v2di
16939 = build_function_type_list (integer_type_node,
16940 integer_type_node, V2DI_type_node,
16941 V2DI_type_node, NULL_TREE);
16942 tree void_ftype_v4si
16943 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16944 tree v8hi_ftype_void
16945 = build_function_type_list (V8HI_type_node, NULL_TREE);
16946 tree void_ftype_void
16947 = build_function_type_list (void_type_node, NULL_TREE);
16948 tree void_ftype_int
16949 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16950
16951 tree opaque_ftype_long_pcvoid
16952 = build_function_type_list (opaque_V4SI_type_node,
16953 long_integer_type_node, pcvoid_type_node,
16954 NULL_TREE);
16955 tree v16qi_ftype_long_pcvoid
16956 = build_function_type_list (V16QI_type_node,
16957 long_integer_type_node, pcvoid_type_node,
16958 NULL_TREE);
16959 tree v8hi_ftype_long_pcvoid
16960 = build_function_type_list (V8HI_type_node,
16961 long_integer_type_node, pcvoid_type_node,
16962 NULL_TREE);
16963 tree v4si_ftype_long_pcvoid
16964 = build_function_type_list (V4SI_type_node,
16965 long_integer_type_node, pcvoid_type_node,
16966 NULL_TREE);
16967 tree v4sf_ftype_long_pcvoid
16968 = build_function_type_list (V4SF_type_node,
16969 long_integer_type_node, pcvoid_type_node,
16970 NULL_TREE);
16971 tree v2df_ftype_long_pcvoid
16972 = build_function_type_list (V2DF_type_node,
16973 long_integer_type_node, pcvoid_type_node,
16974 NULL_TREE);
16975 tree v2di_ftype_long_pcvoid
16976 = build_function_type_list (V2DI_type_node,
16977 long_integer_type_node, pcvoid_type_node,
16978 NULL_TREE);
16979 tree v1ti_ftype_long_pcvoid
16980 = build_function_type_list (V1TI_type_node,
16981 long_integer_type_node, pcvoid_type_node,
16982 NULL_TREE);
16983
16984 tree void_ftype_opaque_long_pvoid
16985 = build_function_type_list (void_type_node,
16986 opaque_V4SI_type_node, long_integer_type_node,
16987 pvoid_type_node, NULL_TREE);
16988 tree void_ftype_v4si_long_pvoid
16989 = build_function_type_list (void_type_node,
16990 V4SI_type_node, long_integer_type_node,
16991 pvoid_type_node, NULL_TREE);
16992 tree void_ftype_v16qi_long_pvoid
16993 = build_function_type_list (void_type_node,
16994 V16QI_type_node, long_integer_type_node,
16995 pvoid_type_node, NULL_TREE);
16996
16997 tree void_ftype_v16qi_pvoid_long
16998 = build_function_type_list (void_type_node,
16999 V16QI_type_node, pvoid_type_node,
17000 long_integer_type_node, NULL_TREE);
17001
17002 tree void_ftype_v8hi_long_pvoid
17003 = build_function_type_list (void_type_node,
17004 V8HI_type_node, long_integer_type_node,
17005 pvoid_type_node, NULL_TREE);
17006 tree void_ftype_v4sf_long_pvoid
17007 = build_function_type_list (void_type_node,
17008 V4SF_type_node, long_integer_type_node,
17009 pvoid_type_node, NULL_TREE);
17010 tree void_ftype_v2df_long_pvoid
17011 = build_function_type_list (void_type_node,
17012 V2DF_type_node, long_integer_type_node,
17013 pvoid_type_node, NULL_TREE);
17014 tree void_ftype_v1ti_long_pvoid
17015 = build_function_type_list (void_type_node,
17016 V1TI_type_node, long_integer_type_node,
17017 pvoid_type_node, NULL_TREE);
17018 tree void_ftype_v2di_long_pvoid
17019 = build_function_type_list (void_type_node,
17020 V2DI_type_node, long_integer_type_node,
17021 pvoid_type_node, NULL_TREE);
17022 tree int_ftype_int_v8hi_v8hi
17023 = build_function_type_list (integer_type_node,
17024 integer_type_node, V8HI_type_node,
17025 V8HI_type_node, NULL_TREE);
17026 tree int_ftype_int_v16qi_v16qi
17027 = build_function_type_list (integer_type_node,
17028 integer_type_node, V16QI_type_node,
17029 V16QI_type_node, NULL_TREE);
17030 tree int_ftype_int_v4sf_v4sf
17031 = build_function_type_list (integer_type_node,
17032 integer_type_node, V4SF_type_node,
17033 V4SF_type_node, NULL_TREE);
17034 tree int_ftype_int_v2df_v2df
17035 = build_function_type_list (integer_type_node,
17036 integer_type_node, V2DF_type_node,
17037 V2DF_type_node, NULL_TREE);
17038 tree v2di_ftype_v2di
17039 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17040 tree v4si_ftype_v4si
17041 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17042 tree v8hi_ftype_v8hi
17043 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17044 tree v16qi_ftype_v16qi
17045 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17046 tree v4sf_ftype_v4sf
17047 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17048 tree v2df_ftype_v2df
17049 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17050 tree void_ftype_pcvoid_int_int
17051 = build_function_type_list (void_type_node,
17052 pcvoid_type_node, integer_type_node,
17053 integer_type_node, NULL_TREE);
17054
17055 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17056 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17057 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17058 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17059 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17060 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17061 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17062 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17063 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17064 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17065 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17066 ALTIVEC_BUILTIN_LVXL_V2DF);
17067 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17068 ALTIVEC_BUILTIN_LVXL_V2DI);
17069 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17070 ALTIVEC_BUILTIN_LVXL_V4SF);
17071 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17072 ALTIVEC_BUILTIN_LVXL_V4SI);
17073 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17074 ALTIVEC_BUILTIN_LVXL_V8HI);
17075 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17076 ALTIVEC_BUILTIN_LVXL_V16QI);
17077 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17078 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
17079 ALTIVEC_BUILTIN_LVX_V1TI);
17080 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17081 ALTIVEC_BUILTIN_LVX_V2DF);
17082 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17083 ALTIVEC_BUILTIN_LVX_V2DI);
17084 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17085 ALTIVEC_BUILTIN_LVX_V4SF);
17086 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17087 ALTIVEC_BUILTIN_LVX_V4SI);
17088 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17089 ALTIVEC_BUILTIN_LVX_V8HI);
17090 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17091 ALTIVEC_BUILTIN_LVX_V16QI);
17092 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17093 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17094 ALTIVEC_BUILTIN_STVX_V2DF);
17095 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17096 ALTIVEC_BUILTIN_STVX_V2DI);
17097 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17098 ALTIVEC_BUILTIN_STVX_V4SF);
17099 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17100 ALTIVEC_BUILTIN_STVX_V4SI);
17101 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17102 ALTIVEC_BUILTIN_STVX_V8HI);
17103 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17104 ALTIVEC_BUILTIN_STVX_V16QI);
17105 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17106 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17107 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17108 ALTIVEC_BUILTIN_STVXL_V2DF);
17109 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17110 ALTIVEC_BUILTIN_STVXL_V2DI);
17111 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17112 ALTIVEC_BUILTIN_STVXL_V4SF);
17113 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17114 ALTIVEC_BUILTIN_STVXL_V4SI);
17115 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17116 ALTIVEC_BUILTIN_STVXL_V8HI);
17117 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17118 ALTIVEC_BUILTIN_STVXL_V16QI);
17119 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17120 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17121 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17122 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17123 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17124 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17125 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17126 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17127 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17128 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17129 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17130 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17131 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17132 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17133 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17134 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17135
17136 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17137 VSX_BUILTIN_LXVD2X_V2DF);
17138 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17139 VSX_BUILTIN_LXVD2X_V2DI);
17140 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17141 VSX_BUILTIN_LXVW4X_V4SF);
17142 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17143 VSX_BUILTIN_LXVW4X_V4SI);
17144 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17145 VSX_BUILTIN_LXVW4X_V8HI);
17146 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17147 VSX_BUILTIN_LXVW4X_V16QI);
17148 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17149 VSX_BUILTIN_STXVD2X_V2DF);
17150 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17151 VSX_BUILTIN_STXVD2X_V2DI);
17152 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17153 VSX_BUILTIN_STXVW4X_V4SF);
17154 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17155 VSX_BUILTIN_STXVW4X_V4SI);
17156 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17157 VSX_BUILTIN_STXVW4X_V8HI);
17158 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17159 VSX_BUILTIN_STXVW4X_V16QI);
17160
17161 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17162 VSX_BUILTIN_LD_ELEMREV_V2DF);
17163 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17164 VSX_BUILTIN_LD_ELEMREV_V2DI);
17165 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17166 VSX_BUILTIN_LD_ELEMREV_V4SF);
17167 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17168 VSX_BUILTIN_LD_ELEMREV_V4SI);
17169 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17170 VSX_BUILTIN_LD_ELEMREV_V8HI);
17171 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17172 VSX_BUILTIN_LD_ELEMREV_V16QI);
17173 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17174 VSX_BUILTIN_ST_ELEMREV_V2DF);
17175 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
17176 VSX_BUILTIN_ST_ELEMREV_V1TI);
17177 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17178 VSX_BUILTIN_ST_ELEMREV_V2DI);
17179 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17180 VSX_BUILTIN_ST_ELEMREV_V4SF);
17181 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17182 VSX_BUILTIN_ST_ELEMREV_V4SI);
17183 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
17184 VSX_BUILTIN_ST_ELEMREV_V8HI);
17185 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
17186 VSX_BUILTIN_ST_ELEMREV_V16QI);
17187
17188 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17189 VSX_BUILTIN_VEC_LD);
17190 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17191 VSX_BUILTIN_VEC_ST);
17192 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17193 VSX_BUILTIN_VEC_XL);
17194 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
17195 VSX_BUILTIN_VEC_XL_BE);
17196 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17197 VSX_BUILTIN_VEC_XST);
17198 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
17199 VSX_BUILTIN_VEC_XST_BE);
17200
17201 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17202 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17203 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17204
17205 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17206 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17207 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17208 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17209 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17210 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17211 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17212 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17213 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17214 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17215 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17216 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17217
17218 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17219 ALTIVEC_BUILTIN_VEC_ADDE);
17220 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17221 ALTIVEC_BUILTIN_VEC_ADDEC);
17222 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17223 ALTIVEC_BUILTIN_VEC_CMPNE);
17224 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17225 ALTIVEC_BUILTIN_VEC_MUL);
17226 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
17227 ALTIVEC_BUILTIN_VEC_SUBE);
17228 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
17229 ALTIVEC_BUILTIN_VEC_SUBEC);
17230
17231 /* Cell builtins. */
17232 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17233 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17234 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17235 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17236
17237 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17238 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17239 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17240 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17241
17242 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17243 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17244 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17245 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17246
17247 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17248 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17249 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17250 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17251
17252 if (TARGET_P9_VECTOR)
17253 {
17254 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17255 P9V_BUILTIN_STXVL);
17256 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
17257 P9V_BUILTIN_XST_LEN_R);
17258 }
17259
17260 /* Add the DST variants. */
17261 d = bdesc_dst;
17262 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17263 {
17264 HOST_WIDE_INT mask = d->mask;
17265
17266 /* It is expected that these dst built-in functions may have
17267 d->icode equal to CODE_FOR_nothing. */
17268 if ((mask & builtin_mask) != mask)
17269 {
17270 if (TARGET_DEBUG_BUILTIN)
17271 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17272 d->name);
17273 continue;
17274 }
17275 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17276 }
17277
17278 /* Initialize the predicates. */
17279 d = bdesc_altivec_preds;
17280 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17281 {
17282 machine_mode mode1;
17283 tree type;
17284 HOST_WIDE_INT mask = d->mask;
17285
17286 if ((mask & builtin_mask) != mask)
17287 {
17288 if (TARGET_DEBUG_BUILTIN)
17289 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17290 d->name);
17291 continue;
17292 }
17293
17294 if (rs6000_overloaded_builtin_p (d->code))
17295 mode1 = VOIDmode;
17296 else
17297 {
17298 /* Cannot define builtin if the instruction is disabled. */
17299 gcc_assert (d->icode != CODE_FOR_nothing);
17300 mode1 = insn_data[d->icode].operand[1].mode;
17301 }
17302
17303 switch (mode1)
17304 {
17305 case E_VOIDmode:
17306 type = int_ftype_int_opaque_opaque;
17307 break;
17308 case E_V2DImode:
17309 type = int_ftype_int_v2di_v2di;
17310 break;
17311 case E_V4SImode:
17312 type = int_ftype_int_v4si_v4si;
17313 break;
17314 case E_V8HImode:
17315 type = int_ftype_int_v8hi_v8hi;
17316 break;
17317 case E_V16QImode:
17318 type = int_ftype_int_v16qi_v16qi;
17319 break;
17320 case E_V4SFmode:
17321 type = int_ftype_int_v4sf_v4sf;
17322 break;
17323 case E_V2DFmode:
17324 type = int_ftype_int_v2df_v2df;
17325 break;
17326 default:
17327 gcc_unreachable ();
17328 }
17329
17330 def_builtin (d->name, type, d->code);
17331 }
17332
17333 /* Initialize the abs* operators. */
17334 d = bdesc_abs;
17335 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17336 {
17337 machine_mode mode0;
17338 tree type;
17339 HOST_WIDE_INT mask = d->mask;
17340
17341 if ((mask & builtin_mask) != mask)
17342 {
17343 if (TARGET_DEBUG_BUILTIN)
17344 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17345 d->name);
17346 continue;
17347 }
17348
17349 /* Cannot define builtin if the instruction is disabled. */
17350 gcc_assert (d->icode != CODE_FOR_nothing);
17351 mode0 = insn_data[d->icode].operand[0].mode;
17352
17353 switch (mode0)
17354 {
17355 case E_V2DImode:
17356 type = v2di_ftype_v2di;
17357 break;
17358 case E_V4SImode:
17359 type = v4si_ftype_v4si;
17360 break;
17361 case E_V8HImode:
17362 type = v8hi_ftype_v8hi;
17363 break;
17364 case E_V16QImode:
17365 type = v16qi_ftype_v16qi;
17366 break;
17367 case E_V4SFmode:
17368 type = v4sf_ftype_v4sf;
17369 break;
17370 case E_V2DFmode:
17371 type = v2df_ftype_v2df;
17372 break;
17373 default:
17374 gcc_unreachable ();
17375 }
17376
17377 def_builtin (d->name, type, d->code);
17378 }
17379
17380 /* Initialize target builtin that implements
17381 targetm.vectorize.builtin_mask_for_load. */
17382
17383 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17384 v16qi_ftype_long_pcvoid,
17385 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17386 BUILT_IN_MD, NULL, NULL_TREE);
17387 TREE_READONLY (decl) = 1;
17388 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17389 altivec_builtin_mask_for_load = decl;
17390
17391 /* Access to the vec_init patterns. */
17392 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17393 integer_type_node, integer_type_node,
17394 integer_type_node, NULL_TREE);
17395 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17396
17397 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17398 short_integer_type_node,
17399 short_integer_type_node,
17400 short_integer_type_node,
17401 short_integer_type_node,
17402 short_integer_type_node,
17403 short_integer_type_node,
17404 short_integer_type_node, NULL_TREE);
17405 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17406
17407 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17408 char_type_node, char_type_node,
17409 char_type_node, char_type_node,
17410 char_type_node, char_type_node,
17411 char_type_node, char_type_node,
17412 char_type_node, char_type_node,
17413 char_type_node, char_type_node,
17414 char_type_node, char_type_node,
17415 char_type_node, NULL_TREE);
17416 def_builtin ("__builtin_vec_init_v16qi", ftype,
17417 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17418
17419 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17420 float_type_node, float_type_node,
17421 float_type_node, NULL_TREE);
17422 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17423
17424 /* VSX builtins. */
17425 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17426 double_type_node, NULL_TREE);
17427 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17428
17429 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17430 intDI_type_node, NULL_TREE);
17431 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17432
17433 /* Access to the vec_set patterns. */
17434 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17435 intSI_type_node,
17436 integer_type_node, NULL_TREE);
17437 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17438
17439 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17440 intHI_type_node,
17441 integer_type_node, NULL_TREE);
17442 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17443
17444 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17445 intQI_type_node,
17446 integer_type_node, NULL_TREE);
17447 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17448
17449 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17450 float_type_node,
17451 integer_type_node, NULL_TREE);
17452 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17453
17454 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17455 double_type_node,
17456 integer_type_node, NULL_TREE);
17457 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17458
17459 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17460 intDI_type_node,
17461 integer_type_node, NULL_TREE);
17462 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17463
17464 /* Access to the vec_extract patterns. */
17465 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17466 integer_type_node, NULL_TREE);
17467 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17468
17469 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17470 integer_type_node, NULL_TREE);
17471 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17472
17473 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17474 integer_type_node, NULL_TREE);
17475 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17476
17477 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17478 integer_type_node, NULL_TREE);
17479 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17480
17481 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17482 integer_type_node, NULL_TREE);
17483 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17484
17485 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17486 integer_type_node, NULL_TREE);
17487 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17488
17489
17490 if (V1TI_type_node)
17491 {
17492 tree v1ti_ftype_long_pcvoid
17493 = build_function_type_list (V1TI_type_node,
17494 long_integer_type_node, pcvoid_type_node,
17495 NULL_TREE);
17496 tree void_ftype_v1ti_long_pvoid
17497 = build_function_type_list (void_type_node,
17498 V1TI_type_node, long_integer_type_node,
17499 pvoid_type_node, NULL_TREE);
17500 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17501 VSX_BUILTIN_LD_ELEMREV_V1TI);
17502 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17503 VSX_BUILTIN_LXVD2X_V1TI);
17504 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17505 VSX_BUILTIN_STXVD2X_V1TI);
17506 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17507 NULL_TREE, NULL_TREE);
17508 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17509 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17510 intTI_type_node,
17511 integer_type_node, NULL_TREE);
17512 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17513 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17514 integer_type_node, NULL_TREE);
17515 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17516 }
17517
17518 }
17519
17520 static void
17521 htm_init_builtins (void)
17522 {
17523 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17524 const struct builtin_description *d;
17525 size_t i;
17526
17527 d = bdesc_htm;
17528 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17529 {
17530 tree op[MAX_HTM_OPERANDS], type;
17531 HOST_WIDE_INT mask = d->mask;
17532 unsigned attr = rs6000_builtin_info[d->code].attr;
17533 bool void_func = (attr & RS6000_BTC_VOID);
17534 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17535 int nopnds = 0;
17536 tree gpr_type_node;
17537 tree rettype;
17538 tree argtype;
17539
17540 /* It is expected that these htm built-in functions may have
17541 d->icode equal to CODE_FOR_nothing. */
17542
17543 if (TARGET_32BIT && TARGET_POWERPC64)
17544 gpr_type_node = long_long_unsigned_type_node;
17545 else
17546 gpr_type_node = long_unsigned_type_node;
17547
17548 if (attr & RS6000_BTC_SPR)
17549 {
17550 rettype = gpr_type_node;
17551 argtype = gpr_type_node;
17552 }
17553 else if (d->code == HTM_BUILTIN_TABORTDC
17554 || d->code == HTM_BUILTIN_TABORTDCI)
17555 {
17556 rettype = unsigned_type_node;
17557 argtype = gpr_type_node;
17558 }
17559 else
17560 {
17561 rettype = unsigned_type_node;
17562 argtype = unsigned_type_node;
17563 }
17564
17565 if ((mask & builtin_mask) != mask)
17566 {
17567 if (TARGET_DEBUG_BUILTIN)
17568 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17569 continue;
17570 }
17571
17572 if (d->name == 0)
17573 {
17574 if (TARGET_DEBUG_BUILTIN)
17575 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17576 (long unsigned) i);
17577 continue;
17578 }
17579
17580 op[nopnds++] = (void_func) ? void_type_node : rettype;
17581
17582 if (attr_args == RS6000_BTC_UNARY)
17583 op[nopnds++] = argtype;
17584 else if (attr_args == RS6000_BTC_BINARY)
17585 {
17586 op[nopnds++] = argtype;
17587 op[nopnds++] = argtype;
17588 }
17589 else if (attr_args == RS6000_BTC_TERNARY)
17590 {
17591 op[nopnds++] = argtype;
17592 op[nopnds++] = argtype;
17593 op[nopnds++] = argtype;
17594 }
17595
17596 switch (nopnds)
17597 {
17598 case 1:
17599 type = build_function_type_list (op[0], NULL_TREE);
17600 break;
17601 case 2:
17602 type = build_function_type_list (op[0], op[1], NULL_TREE);
17603 break;
17604 case 3:
17605 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17606 break;
17607 case 4:
17608 type = build_function_type_list (op[0], op[1], op[2], op[3],
17609 NULL_TREE);
17610 break;
17611 default:
17612 gcc_unreachable ();
17613 }
17614
17615 def_builtin (d->name, type, d->code);
17616 }
17617 }
17618
17619 /* Hash function for builtin functions with up to 3 arguments and a return
17620 type. */
17621 hashval_t
17622 builtin_hasher::hash (builtin_hash_struct *bh)
17623 {
17624 unsigned ret = 0;
17625 int i;
17626
17627 for (i = 0; i < 4; i++)
17628 {
17629 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17630 ret = (ret * 2) + bh->uns_p[i];
17631 }
17632
17633 return ret;
17634 }
17635
17636 /* Compare builtin hash entries H1 and H2 for equivalence. */
17637 bool
17638 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17639 {
17640 return ((p1->mode[0] == p2->mode[0])
17641 && (p1->mode[1] == p2->mode[1])
17642 && (p1->mode[2] == p2->mode[2])
17643 && (p1->mode[3] == p2->mode[3])
17644 && (p1->uns_p[0] == p2->uns_p[0])
17645 && (p1->uns_p[1] == p2->uns_p[1])
17646 && (p1->uns_p[2] == p2->uns_p[2])
17647 && (p1->uns_p[3] == p2->uns_p[3]));
17648 }
17649
17650 /* Map types for builtin functions with an explicit return type and up to 3
17651 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17652 of the argument. */
17653 static tree
17654 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17655 machine_mode mode_arg1, machine_mode mode_arg2,
17656 enum rs6000_builtins builtin, const char *name)
17657 {
17658 struct builtin_hash_struct h;
17659 struct builtin_hash_struct *h2;
17660 int num_args = 3;
17661 int i;
17662 tree ret_type = NULL_TREE;
17663 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17664
17665 /* Create builtin_hash_table. */
17666 if (builtin_hash_table == NULL)
17667 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17668
17669 h.type = NULL_TREE;
17670 h.mode[0] = mode_ret;
17671 h.mode[1] = mode_arg0;
17672 h.mode[2] = mode_arg1;
17673 h.mode[3] = mode_arg2;
17674 h.uns_p[0] = 0;
17675 h.uns_p[1] = 0;
17676 h.uns_p[2] = 0;
17677 h.uns_p[3] = 0;
17678
17679 /* If the builtin is a type that produces unsigned results or takes unsigned
17680 arguments, and it is returned as a decl for the vectorizer (such as
17681 widening multiplies, permute), make sure the arguments and return value
17682 are type correct. */
17683 switch (builtin)
17684 {
17685 /* unsigned 1 argument functions. */
17686 case CRYPTO_BUILTIN_VSBOX:
17687 case CRYPTO_BUILTIN_VSBOX_BE:
17688 case P8V_BUILTIN_VGBBD:
17689 case MISC_BUILTIN_CDTBCD:
17690 case MISC_BUILTIN_CBCDTD:
17691 h.uns_p[0] = 1;
17692 h.uns_p[1] = 1;
17693 break;
17694
17695 /* unsigned 2 argument functions. */
17696 case ALTIVEC_BUILTIN_VMULEUB:
17697 case ALTIVEC_BUILTIN_VMULEUH:
17698 case P8V_BUILTIN_VMULEUW:
17699 case ALTIVEC_BUILTIN_VMULOUB:
17700 case ALTIVEC_BUILTIN_VMULOUH:
17701 case P8V_BUILTIN_VMULOUW:
17702 case CRYPTO_BUILTIN_VCIPHER:
17703 case CRYPTO_BUILTIN_VCIPHER_BE:
17704 case CRYPTO_BUILTIN_VCIPHERLAST:
17705 case CRYPTO_BUILTIN_VCIPHERLAST_BE:
17706 case CRYPTO_BUILTIN_VNCIPHER:
17707 case CRYPTO_BUILTIN_VNCIPHER_BE:
17708 case CRYPTO_BUILTIN_VNCIPHERLAST:
17709 case CRYPTO_BUILTIN_VNCIPHERLAST_BE:
17710 case CRYPTO_BUILTIN_VPMSUMB:
17711 case CRYPTO_BUILTIN_VPMSUMH:
17712 case CRYPTO_BUILTIN_VPMSUMW:
17713 case CRYPTO_BUILTIN_VPMSUMD:
17714 case CRYPTO_BUILTIN_VPMSUM:
17715 case MISC_BUILTIN_ADDG6S:
17716 case MISC_BUILTIN_DIVWEU:
17717 case MISC_BUILTIN_DIVDEU:
17718 case VSX_BUILTIN_UDIV_V2DI:
17719 case ALTIVEC_BUILTIN_VMAXUB:
17720 case ALTIVEC_BUILTIN_VMINUB:
17721 case ALTIVEC_BUILTIN_VMAXUH:
17722 case ALTIVEC_BUILTIN_VMINUH:
17723 case ALTIVEC_BUILTIN_VMAXUW:
17724 case ALTIVEC_BUILTIN_VMINUW:
17725 case P8V_BUILTIN_VMAXUD:
17726 case P8V_BUILTIN_VMINUD:
17727 h.uns_p[0] = 1;
17728 h.uns_p[1] = 1;
17729 h.uns_p[2] = 1;
17730 break;
17731
17732 /* unsigned 3 argument functions. */
17733 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17734 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17735 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17736 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17737 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17738 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17739 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17740 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17741 case VSX_BUILTIN_VPERM_16QI_UNS:
17742 case VSX_BUILTIN_VPERM_8HI_UNS:
17743 case VSX_BUILTIN_VPERM_4SI_UNS:
17744 case VSX_BUILTIN_VPERM_2DI_UNS:
17745 case VSX_BUILTIN_XXSEL_16QI_UNS:
17746 case VSX_BUILTIN_XXSEL_8HI_UNS:
17747 case VSX_BUILTIN_XXSEL_4SI_UNS:
17748 case VSX_BUILTIN_XXSEL_2DI_UNS:
17749 case CRYPTO_BUILTIN_VPERMXOR:
17750 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17751 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17752 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17753 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17754 case CRYPTO_BUILTIN_VSHASIGMAW:
17755 case CRYPTO_BUILTIN_VSHASIGMAD:
17756 case CRYPTO_BUILTIN_VSHASIGMA:
17757 h.uns_p[0] = 1;
17758 h.uns_p[1] = 1;
17759 h.uns_p[2] = 1;
17760 h.uns_p[3] = 1;
17761 break;
17762
17763 /* signed permute functions with unsigned char mask. */
17764 case ALTIVEC_BUILTIN_VPERM_16QI:
17765 case ALTIVEC_BUILTIN_VPERM_8HI:
17766 case ALTIVEC_BUILTIN_VPERM_4SI:
17767 case ALTIVEC_BUILTIN_VPERM_4SF:
17768 case ALTIVEC_BUILTIN_VPERM_2DI:
17769 case ALTIVEC_BUILTIN_VPERM_2DF:
17770 case VSX_BUILTIN_VPERM_16QI:
17771 case VSX_BUILTIN_VPERM_8HI:
17772 case VSX_BUILTIN_VPERM_4SI:
17773 case VSX_BUILTIN_VPERM_4SF:
17774 case VSX_BUILTIN_VPERM_2DI:
17775 case VSX_BUILTIN_VPERM_2DF:
17776 h.uns_p[3] = 1;
17777 break;
17778
17779 /* unsigned args, signed return. */
17780 case VSX_BUILTIN_XVCVUXDSP:
17781 case VSX_BUILTIN_XVCVUXDDP_UNS:
17782 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17783 h.uns_p[1] = 1;
17784 break;
17785
17786 /* signed args, unsigned return. */
17787 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17788 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17789 case MISC_BUILTIN_UNPACK_TD:
17790 case MISC_BUILTIN_UNPACK_V1TI:
17791 h.uns_p[0] = 1;
17792 break;
17793
17794 /* unsigned arguments, bool return (compares). */
17795 case ALTIVEC_BUILTIN_VCMPEQUB:
17796 case ALTIVEC_BUILTIN_VCMPEQUH:
17797 case ALTIVEC_BUILTIN_VCMPEQUW:
17798 case P8V_BUILTIN_VCMPEQUD:
17799 case VSX_BUILTIN_CMPGE_U16QI:
17800 case VSX_BUILTIN_CMPGE_U8HI:
17801 case VSX_BUILTIN_CMPGE_U4SI:
17802 case VSX_BUILTIN_CMPGE_U2DI:
17803 case ALTIVEC_BUILTIN_VCMPGTUB:
17804 case ALTIVEC_BUILTIN_VCMPGTUH:
17805 case ALTIVEC_BUILTIN_VCMPGTUW:
17806 case P8V_BUILTIN_VCMPGTUD:
17807 h.uns_p[1] = 1;
17808 h.uns_p[2] = 1;
17809 break;
17810
17811 /* unsigned arguments for 128-bit pack instructions. */
17812 case MISC_BUILTIN_PACK_TD:
17813 case MISC_BUILTIN_PACK_V1TI:
17814 h.uns_p[1] = 1;
17815 h.uns_p[2] = 1;
17816 break;
17817
17818 /* unsigned second arguments (vector shift right). */
17819 case ALTIVEC_BUILTIN_VSRB:
17820 case ALTIVEC_BUILTIN_VSRH:
17821 case ALTIVEC_BUILTIN_VSRW:
17822 case P8V_BUILTIN_VSRD:
17823 h.uns_p[2] = 1;
17824 break;
17825
17826 default:
17827 break;
17828 }
17829
17830 /* Figure out how many args are present. */
17831 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17832 num_args--;
17833
17834 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17835 if (!ret_type && h.uns_p[0])
17836 ret_type = builtin_mode_to_type[h.mode[0]][0];
17837
17838 if (!ret_type)
17839 fatal_error (input_location,
17840 "internal error: builtin function %qs had an unexpected "
17841 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17842
17843 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17844 arg_type[i] = NULL_TREE;
17845
17846 for (i = 0; i < num_args; i++)
17847 {
17848 int m = (int) h.mode[i+1];
17849 int uns_p = h.uns_p[i+1];
17850
17851 arg_type[i] = builtin_mode_to_type[m][uns_p];
17852 if (!arg_type[i] && uns_p)
17853 arg_type[i] = builtin_mode_to_type[m][0];
17854
17855 if (!arg_type[i])
17856 fatal_error (input_location,
17857 "internal error: builtin function %qs, argument %d "
17858 "had unexpected argument type %qs", name, i,
17859 GET_MODE_NAME (m));
17860 }
17861
17862 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17863 if (*found == NULL)
17864 {
17865 h2 = ggc_alloc<builtin_hash_struct> ();
17866 *h2 = h;
17867 *found = h2;
17868
17869 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17870 arg_type[2], NULL_TREE);
17871 }
17872
17873 return (*found)->type;
17874 }
17875
17876 static void
17877 rs6000_common_init_builtins (void)
17878 {
17879 const struct builtin_description *d;
17880 size_t i;
17881
17882 tree opaque_ftype_opaque = NULL_TREE;
17883 tree opaque_ftype_opaque_opaque = NULL_TREE;
17884 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17885 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17886
17887 /* Create Altivec and VSX builtins on machines with at least the
17888 general purpose extensions (970 and newer) to allow the use of
17889 the target attribute. */
17890
17891 if (TARGET_EXTRA_BUILTINS)
17892 builtin_mask |= RS6000_BTM_COMMON;
17893
17894 /* Add the ternary operators. */
17895 d = bdesc_3arg;
17896 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17897 {
17898 tree type;
17899 HOST_WIDE_INT mask = d->mask;
17900
17901 if ((mask & builtin_mask) != mask)
17902 {
17903 if (TARGET_DEBUG_BUILTIN)
17904 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17905 continue;
17906 }
17907
17908 if (rs6000_overloaded_builtin_p (d->code))
17909 {
17910 if (! (type = opaque_ftype_opaque_opaque_opaque))
17911 type = opaque_ftype_opaque_opaque_opaque
17912 = build_function_type_list (opaque_V4SI_type_node,
17913 opaque_V4SI_type_node,
17914 opaque_V4SI_type_node,
17915 opaque_V4SI_type_node,
17916 NULL_TREE);
17917 }
17918 else
17919 {
17920 enum insn_code icode = d->icode;
17921 if (d->name == 0)
17922 {
17923 if (TARGET_DEBUG_BUILTIN)
17924 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17925 (long unsigned)i);
17926
17927 continue;
17928 }
17929
17930 if (icode == CODE_FOR_nothing)
17931 {
17932 if (TARGET_DEBUG_BUILTIN)
17933 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17934 d->name);
17935
17936 continue;
17937 }
17938
17939 type = builtin_function_type (insn_data[icode].operand[0].mode,
17940 insn_data[icode].operand[1].mode,
17941 insn_data[icode].operand[2].mode,
17942 insn_data[icode].operand[3].mode,
17943 d->code, d->name);
17944 }
17945
17946 def_builtin (d->name, type, d->code);
17947 }
17948
17949 /* Add the binary operators. */
17950 d = bdesc_2arg;
17951 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17952 {
17953 machine_mode mode0, mode1, mode2;
17954 tree type;
17955 HOST_WIDE_INT mask = d->mask;
17956
17957 if ((mask & builtin_mask) != mask)
17958 {
17959 if (TARGET_DEBUG_BUILTIN)
17960 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17961 continue;
17962 }
17963
17964 if (rs6000_overloaded_builtin_p (d->code))
17965 {
17966 if (! (type = opaque_ftype_opaque_opaque))
17967 type = opaque_ftype_opaque_opaque
17968 = build_function_type_list (opaque_V4SI_type_node,
17969 opaque_V4SI_type_node,
17970 opaque_V4SI_type_node,
17971 NULL_TREE);
17972 }
17973 else
17974 {
17975 enum insn_code icode = d->icode;
17976 if (d->name == 0)
17977 {
17978 if (TARGET_DEBUG_BUILTIN)
17979 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17980 (long unsigned)i);
17981
17982 continue;
17983 }
17984
17985 if (icode == CODE_FOR_nothing)
17986 {
17987 if (TARGET_DEBUG_BUILTIN)
17988 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17989 d->name);
17990
17991 continue;
17992 }
17993
17994 mode0 = insn_data[icode].operand[0].mode;
17995 mode1 = insn_data[icode].operand[1].mode;
17996 mode2 = insn_data[icode].operand[2].mode;
17997
17998 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17999 d->code, d->name);
18000 }
18001
18002 def_builtin (d->name, type, d->code);
18003 }
18004
18005 /* Add the simple unary operators. */
18006 d = bdesc_1arg;
18007 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18008 {
18009 machine_mode mode0, mode1;
18010 tree type;
18011 HOST_WIDE_INT mask = d->mask;
18012
18013 if ((mask & builtin_mask) != mask)
18014 {
18015 if (TARGET_DEBUG_BUILTIN)
18016 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18017 continue;
18018 }
18019
18020 if (rs6000_overloaded_builtin_p (d->code))
18021 {
18022 if (! (type = opaque_ftype_opaque))
18023 type = opaque_ftype_opaque
18024 = build_function_type_list (opaque_V4SI_type_node,
18025 opaque_V4SI_type_node,
18026 NULL_TREE);
18027 }
18028 else
18029 {
18030 enum insn_code icode = d->icode;
18031 if (d->name == 0)
18032 {
18033 if (TARGET_DEBUG_BUILTIN)
18034 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18035 (long unsigned)i);
18036
18037 continue;
18038 }
18039
18040 if (icode == CODE_FOR_nothing)
18041 {
18042 if (TARGET_DEBUG_BUILTIN)
18043 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18044 d->name);
18045
18046 continue;
18047 }
18048
18049 mode0 = insn_data[icode].operand[0].mode;
18050 mode1 = insn_data[icode].operand[1].mode;
18051
18052 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18053 d->code, d->name);
18054 }
18055
18056 def_builtin (d->name, type, d->code);
18057 }
18058
18059 /* Add the simple no-argument operators. */
18060 d = bdesc_0arg;
18061 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18062 {
18063 machine_mode mode0;
18064 tree type;
18065 HOST_WIDE_INT mask = d->mask;
18066
18067 if ((mask & builtin_mask) != mask)
18068 {
18069 if (TARGET_DEBUG_BUILTIN)
18070 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18071 continue;
18072 }
18073 if (rs6000_overloaded_builtin_p (d->code))
18074 {
18075 if (!opaque_ftype_opaque)
18076 opaque_ftype_opaque
18077 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18078 type = opaque_ftype_opaque;
18079 }
18080 else
18081 {
18082 enum insn_code icode = d->icode;
18083 if (d->name == 0)
18084 {
18085 if (TARGET_DEBUG_BUILTIN)
18086 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18087 (long unsigned) i);
18088 continue;
18089 }
18090 if (icode == CODE_FOR_nothing)
18091 {
18092 if (TARGET_DEBUG_BUILTIN)
18093 fprintf (stderr,
18094 "rs6000_builtin, skip no-argument %s (no code)\n",
18095 d->name);
18096 continue;
18097 }
18098 mode0 = insn_data[icode].operand[0].mode;
18099 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18100 d->code, d->name);
18101 }
18102 def_builtin (d->name, type, d->code);
18103 }
18104 }
18105
18106 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18107 static void
18108 init_float128_ibm (machine_mode mode)
18109 {
18110 if (!TARGET_XL_COMPAT)
18111 {
18112 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18113 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18114 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18115 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18116
18117 if (!TARGET_HARD_FLOAT)
18118 {
18119 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18120 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18121 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18122 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18123 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18124 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18125 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18126 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18127
18128 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18129 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18130 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18131 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18132 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18133 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18134 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18135 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18136 }
18137 }
18138 else
18139 {
18140 set_optab_libfunc (add_optab, mode, "_xlqadd");
18141 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18142 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18143 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18144 }
18145
18146 /* Add various conversions for IFmode to use the traditional TFmode
18147 names. */
18148 if (mode == IFmode)
18149 {
18150 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
18151 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
18152 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
18153 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
18154 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
18155 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
18156
18157 if (TARGET_POWERPC64)
18158 {
18159 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18160 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18161 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18162 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18163 }
18164 }
18165 }
18166
18167 /* Create a decl for either complex long double multiply or complex long double
18168 divide when long double is IEEE 128-bit floating point. We can't use
18169 __multc3 and __divtc3 because the original long double using IBM extended
18170 double used those names. The complex multiply/divide functions are encoded
18171 as builtin functions with a complex result and 4 scalar inputs. */
18172
18173 static void
18174 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
18175 {
18176 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
18177 name, NULL_TREE);
18178
18179 set_builtin_decl (fncode, fndecl, true);
18180
18181 if (TARGET_DEBUG_BUILTIN)
18182 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
18183
18184 return;
18185 }
18186
18187 /* Set up IEEE 128-bit floating point routines. Use different names if the
18188 arguments can be passed in a vector register. The historical PowerPC
18189 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18190 continue to use that if we aren't using vector registers to pass IEEE
18191 128-bit floating point. */
18192
18193 static void
18194 init_float128_ieee (machine_mode mode)
18195 {
18196 if (FLOAT128_VECTOR_P (mode))
18197 {
18198 static bool complex_muldiv_init_p = false;
18199
18200 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
18201 we have clone or target attributes, this will be called a second
18202 time. We want to create the built-in function only once. */
18203 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
18204 {
18205 complex_muldiv_init_p = true;
18206 built_in_function fncode_mul =
18207 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
18208 - MIN_MODE_COMPLEX_FLOAT);
18209 built_in_function fncode_div =
18210 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
18211 - MIN_MODE_COMPLEX_FLOAT);
18212
18213 tree fntype = build_function_type_list (complex_long_double_type_node,
18214 long_double_type_node,
18215 long_double_type_node,
18216 long_double_type_node,
18217 long_double_type_node,
18218 NULL_TREE);
18219
18220 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
18221 create_complex_muldiv ("__divkc3", fncode_div, fntype);
18222 }
18223
18224 set_optab_libfunc (add_optab, mode, "__addkf3");
18225 set_optab_libfunc (sub_optab, mode, "__subkf3");
18226 set_optab_libfunc (neg_optab, mode, "__negkf2");
18227 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18228 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18229 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18230 set_optab_libfunc (abs_optab, mode, "__abskf2");
18231 set_optab_libfunc (powi_optab, mode, "__powikf2");
18232
18233 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18234 set_optab_libfunc (ne_optab, mode, "__nekf2");
18235 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18236 set_optab_libfunc (ge_optab, mode, "__gekf2");
18237 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18238 set_optab_libfunc (le_optab, mode, "__lekf2");
18239 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18240
18241 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18242 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18243 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18244 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18245
18246 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
18247 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18248 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
18249
18250 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
18251 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18252 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
18253
18254 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
18255 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
18256 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
18257 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
18258 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
18259 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
18260
18261 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18262 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18263 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18264 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18265
18266 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18267 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18268 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18269 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18270
18271 if (TARGET_POWERPC64)
18272 {
18273 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18274 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18275 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18276 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18277 }
18278 }
18279
18280 else
18281 {
18282 set_optab_libfunc (add_optab, mode, "_q_add");
18283 set_optab_libfunc (sub_optab, mode, "_q_sub");
18284 set_optab_libfunc (neg_optab, mode, "_q_neg");
18285 set_optab_libfunc (smul_optab, mode, "_q_mul");
18286 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18287 if (TARGET_PPC_GPOPT)
18288 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18289
18290 set_optab_libfunc (eq_optab, mode, "_q_feq");
18291 set_optab_libfunc (ne_optab, mode, "_q_fne");
18292 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18293 set_optab_libfunc (ge_optab, mode, "_q_fge");
18294 set_optab_libfunc (lt_optab, mode, "_q_flt");
18295 set_optab_libfunc (le_optab, mode, "_q_fle");
18296
18297 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18298 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18299 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18300 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18301 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18302 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18303 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18304 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18305 }
18306 }
18307
18308 static void
18309 rs6000_init_libfuncs (void)
18310 {
18311 /* __float128 support. */
18312 if (TARGET_FLOAT128_TYPE)
18313 {
18314 init_float128_ibm (IFmode);
18315 init_float128_ieee (KFmode);
18316 }
18317
18318 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18319 if (TARGET_LONG_DOUBLE_128)
18320 {
18321 if (!TARGET_IEEEQUAD)
18322 init_float128_ibm (TFmode);
18323
18324 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18325 else
18326 init_float128_ieee (TFmode);
18327 }
18328 }
18329
18330 /* Emit a potentially record-form instruction, setting DST from SRC.
18331 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18332 signed comparison of DST with zero. If DOT is 1, the generated RTL
18333 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18334 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18335 a separate COMPARE. */
18336
18337 void
18338 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18339 {
18340 if (dot == 0)
18341 {
18342 emit_move_insn (dst, src);
18343 return;
18344 }
18345
18346 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18347 {
18348 emit_move_insn (dst, src);
18349 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18350 return;
18351 }
18352
18353 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18354 if (dot == 1)
18355 {
18356 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18357 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18358 }
18359 else
18360 {
18361 rtx set = gen_rtx_SET (dst, src);
18362 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18363 }
18364 }
18365
18366 \f
18367 /* A validation routine: say whether CODE, a condition code, and MODE
18368 match. The other alternatives either don't make sense or should
18369 never be generated. */
18370
18371 void
18372 validate_condition_mode (enum rtx_code code, machine_mode mode)
18373 {
18374 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18375 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18376 && GET_MODE_CLASS (mode) == MODE_CC);
18377
18378 /* These don't make sense. */
18379 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18380 || mode != CCUNSmode);
18381
18382 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18383 || mode == CCUNSmode);
18384
18385 gcc_assert (mode == CCFPmode
18386 || (code != ORDERED && code != UNORDERED
18387 && code != UNEQ && code != LTGT
18388 && code != UNGT && code != UNLT
18389 && code != UNGE && code != UNLE));
18390
18391 /* These should never be generated except for
18392 flag_finite_math_only. */
18393 gcc_assert (mode != CCFPmode
18394 || flag_finite_math_only
18395 || (code != LE && code != GE
18396 && code != UNEQ && code != LTGT
18397 && code != UNGT && code != UNLT));
18398
18399 /* These are invalid; the information is not there. */
18400 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18401 }
18402
18403 \f
18404 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18405 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18406 not zero, store there the bit offset (counted from the right) where
18407 the single stretch of 1 bits begins; and similarly for B, the bit
18408 offset where it ends. */
18409
18410 bool
18411 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18412 {
18413 unsigned HOST_WIDE_INT val = INTVAL (mask);
18414 unsigned HOST_WIDE_INT bit;
18415 int nb, ne;
18416 int n = GET_MODE_PRECISION (mode);
18417
18418 if (mode != DImode && mode != SImode)
18419 return false;
18420
18421 if (INTVAL (mask) >= 0)
18422 {
18423 bit = val & -val;
18424 ne = exact_log2 (bit);
18425 nb = exact_log2 (val + bit);
18426 }
18427 else if (val + 1 == 0)
18428 {
18429 nb = n;
18430 ne = 0;
18431 }
18432 else if (val & 1)
18433 {
18434 val = ~val;
18435 bit = val & -val;
18436 nb = exact_log2 (bit);
18437 ne = exact_log2 (val + bit);
18438 }
18439 else
18440 {
18441 bit = val & -val;
18442 ne = exact_log2 (bit);
18443 if (val + bit == 0)
18444 nb = n;
18445 else
18446 nb = 0;
18447 }
18448
18449 nb--;
18450
18451 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18452 return false;
18453
18454 if (b)
18455 *b = nb;
18456 if (e)
18457 *e = ne;
18458
18459 return true;
18460 }
18461
18462 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18463 or rldicr instruction, to implement an AND with it in mode MODE. */
18464
18465 bool
18466 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18467 {
18468 int nb, ne;
18469
18470 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18471 return false;
18472
18473 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18474 does not wrap. */
18475 if (mode == DImode)
18476 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18477
18478 /* For SImode, rlwinm can do everything. */
18479 if (mode == SImode)
18480 return (nb < 32 && ne < 32);
18481
18482 return false;
18483 }
18484
18485 /* Return the instruction template for an AND with mask in mode MODE, with
18486 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18487
18488 const char *
18489 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18490 {
18491 int nb, ne;
18492
18493 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18494 gcc_unreachable ();
18495
18496 if (mode == DImode && ne == 0)
18497 {
18498 operands[3] = GEN_INT (63 - nb);
18499 if (dot)
18500 return "rldicl. %0,%1,0,%3";
18501 return "rldicl %0,%1,0,%3";
18502 }
18503
18504 if (mode == DImode && nb == 63)
18505 {
18506 operands[3] = GEN_INT (63 - ne);
18507 if (dot)
18508 return "rldicr. %0,%1,0,%3";
18509 return "rldicr %0,%1,0,%3";
18510 }
18511
18512 if (nb < 32 && ne < 32)
18513 {
18514 operands[3] = GEN_INT (31 - nb);
18515 operands[4] = GEN_INT (31 - ne);
18516 if (dot)
18517 return "rlwinm. %0,%1,0,%3,%4";
18518 return "rlwinm %0,%1,0,%3,%4";
18519 }
18520
18521 gcc_unreachable ();
18522 }
18523
18524 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18525 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18526 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18527
18528 bool
18529 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18530 {
18531 int nb, ne;
18532
18533 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18534 return false;
18535
18536 int n = GET_MODE_PRECISION (mode);
18537 int sh = -1;
18538
18539 if (CONST_INT_P (XEXP (shift, 1)))
18540 {
18541 sh = INTVAL (XEXP (shift, 1));
18542 if (sh < 0 || sh >= n)
18543 return false;
18544 }
18545
18546 rtx_code code = GET_CODE (shift);
18547
18548 /* Convert any shift by 0 to a rotate, to simplify below code. */
18549 if (sh == 0)
18550 code = ROTATE;
18551
18552 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18553 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18554 code = ASHIFT;
18555 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18556 {
18557 code = LSHIFTRT;
18558 sh = n - sh;
18559 }
18560
18561 /* DImode rotates need rld*. */
18562 if (mode == DImode && code == ROTATE)
18563 return (nb == 63 || ne == 0 || ne == sh);
18564
18565 /* SImode rotates need rlw*. */
18566 if (mode == SImode && code == ROTATE)
18567 return (nb < 32 && ne < 32 && sh < 32);
18568
18569 /* Wrap-around masks are only okay for rotates. */
18570 if (ne > nb)
18571 return false;
18572
18573 /* Variable shifts are only okay for rotates. */
18574 if (sh < 0)
18575 return false;
18576
18577 /* Don't allow ASHIFT if the mask is wrong for that. */
18578 if (code == ASHIFT && ne < sh)
18579 return false;
18580
18581 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18582 if the mask is wrong for that. */
18583 if (nb < 32 && ne < 32 && sh < 32
18584 && !(code == LSHIFTRT && nb >= 32 - sh))
18585 return true;
18586
18587 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18588 if the mask is wrong for that. */
18589 if (code == LSHIFTRT)
18590 sh = 64 - sh;
18591 if (nb == 63 || ne == 0 || ne == sh)
18592 return !(code == LSHIFTRT && nb >= sh);
18593
18594 return false;
18595 }
18596
18597 /* Return the instruction template for a shift with mask in mode MODE, with
18598 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18599
18600 const char *
18601 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18602 {
18603 int nb, ne;
18604
18605 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18606 gcc_unreachable ();
18607
18608 if (mode == DImode && ne == 0)
18609 {
18610 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18611 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18612 operands[3] = GEN_INT (63 - nb);
18613 if (dot)
18614 return "rld%I2cl. %0,%1,%2,%3";
18615 return "rld%I2cl %0,%1,%2,%3";
18616 }
18617
18618 if (mode == DImode && nb == 63)
18619 {
18620 operands[3] = GEN_INT (63 - ne);
18621 if (dot)
18622 return "rld%I2cr. %0,%1,%2,%3";
18623 return "rld%I2cr %0,%1,%2,%3";
18624 }
18625
18626 if (mode == DImode
18627 && GET_CODE (operands[4]) != LSHIFTRT
18628 && CONST_INT_P (operands[2])
18629 && ne == INTVAL (operands[2]))
18630 {
18631 operands[3] = GEN_INT (63 - nb);
18632 if (dot)
18633 return "rld%I2c. %0,%1,%2,%3";
18634 return "rld%I2c %0,%1,%2,%3";
18635 }
18636
18637 if (nb < 32 && ne < 32)
18638 {
18639 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18640 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18641 operands[3] = GEN_INT (31 - nb);
18642 operands[4] = GEN_INT (31 - ne);
18643 /* This insn can also be a 64-bit rotate with mask that really makes
18644 it just a shift right (with mask); the %h below are to adjust for
18645 that situation (shift count is >= 32 in that case). */
18646 if (dot)
18647 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18648 return "rlw%I2nm %0,%1,%h2,%3,%4";
18649 }
18650
18651 gcc_unreachable ();
18652 }
18653
18654 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18655 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18656 ASHIFT, or LSHIFTRT) in mode MODE. */
18657
18658 bool
18659 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18660 {
18661 int nb, ne;
18662
18663 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18664 return false;
18665
18666 int n = GET_MODE_PRECISION (mode);
18667
18668 int sh = INTVAL (XEXP (shift, 1));
18669 if (sh < 0 || sh >= n)
18670 return false;
18671
18672 rtx_code code = GET_CODE (shift);
18673
18674 /* Convert any shift by 0 to a rotate, to simplify below code. */
18675 if (sh == 0)
18676 code = ROTATE;
18677
18678 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18679 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18680 code = ASHIFT;
18681 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18682 {
18683 code = LSHIFTRT;
18684 sh = n - sh;
18685 }
18686
18687 /* DImode rotates need rldimi. */
18688 if (mode == DImode && code == ROTATE)
18689 return (ne == sh);
18690
18691 /* SImode rotates need rlwimi. */
18692 if (mode == SImode && code == ROTATE)
18693 return (nb < 32 && ne < 32 && sh < 32);
18694
18695 /* Wrap-around masks are only okay for rotates. */
18696 if (ne > nb)
18697 return false;
18698
18699 /* Don't allow ASHIFT if the mask is wrong for that. */
18700 if (code == ASHIFT && ne < sh)
18701 return false;
18702
18703 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18704 if the mask is wrong for that. */
18705 if (nb < 32 && ne < 32 && sh < 32
18706 && !(code == LSHIFTRT && nb >= 32 - sh))
18707 return true;
18708
18709 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18710 if the mask is wrong for that. */
18711 if (code == LSHIFTRT)
18712 sh = 64 - sh;
18713 if (ne == sh)
18714 return !(code == LSHIFTRT && nb >= sh);
18715
18716 return false;
18717 }
18718
18719 /* Return the instruction template for an insert with mask in mode MODE, with
18720 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18721
18722 const char *
18723 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18724 {
18725 int nb, ne;
18726
18727 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18728 gcc_unreachable ();
18729
18730 /* Prefer rldimi because rlwimi is cracked. */
18731 if (TARGET_POWERPC64
18732 && (!dot || mode == DImode)
18733 && GET_CODE (operands[4]) != LSHIFTRT
18734 && ne == INTVAL (operands[2]))
18735 {
18736 operands[3] = GEN_INT (63 - nb);
18737 if (dot)
18738 return "rldimi. %0,%1,%2,%3";
18739 return "rldimi %0,%1,%2,%3";
18740 }
18741
18742 if (nb < 32 && ne < 32)
18743 {
18744 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18745 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18746 operands[3] = GEN_INT (31 - nb);
18747 operands[4] = GEN_INT (31 - ne);
18748 if (dot)
18749 return "rlwimi. %0,%1,%2,%3,%4";
18750 return "rlwimi %0,%1,%2,%3,%4";
18751 }
18752
18753 gcc_unreachable ();
18754 }
18755
18756 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18757 using two machine instructions. */
18758
18759 bool
18760 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18761 {
18762 /* There are two kinds of AND we can handle with two insns:
18763 1) those we can do with two rl* insn;
18764 2) ori[s];xori[s].
18765
18766 We do not handle that last case yet. */
18767
18768 /* If there is just one stretch of ones, we can do it. */
18769 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18770 return true;
18771
18772 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18773 one insn, we can do the whole thing with two. */
18774 unsigned HOST_WIDE_INT val = INTVAL (c);
18775 unsigned HOST_WIDE_INT bit1 = val & -val;
18776 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18777 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18778 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18779 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18780 }
18781
18782 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18783 If EXPAND is true, split rotate-and-mask instructions we generate to
18784 their constituent parts as well (this is used during expand); if DOT
18785 is 1, make the last insn a record-form instruction clobbering the
18786 destination GPR and setting the CC reg (from operands[3]); if 2, set
18787 that GPR as well as the CC reg. */
18788
18789 void
18790 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18791 {
18792 gcc_assert (!(expand && dot));
18793
18794 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18795
18796 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18797 shift right. This generates better code than doing the masks without
18798 shifts, or shifting first right and then left. */
18799 int nb, ne;
18800 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18801 {
18802 gcc_assert (mode == DImode);
18803
18804 int shift = 63 - nb;
18805 if (expand)
18806 {
18807 rtx tmp1 = gen_reg_rtx (DImode);
18808 rtx tmp2 = gen_reg_rtx (DImode);
18809 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18810 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18811 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18812 }
18813 else
18814 {
18815 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18816 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18817 emit_move_insn (operands[0], tmp);
18818 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18819 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18820 }
18821 return;
18822 }
18823
18824 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18825 that does the rest. */
18826 unsigned HOST_WIDE_INT bit1 = val & -val;
18827 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18828 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18829 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18830
18831 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18832 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18833
18834 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18835
18836 /* Two "no-rotate"-and-mask instructions, for SImode. */
18837 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18838 {
18839 gcc_assert (mode == SImode);
18840
18841 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18842 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18843 emit_move_insn (reg, tmp);
18844 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18845 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18846 return;
18847 }
18848
18849 gcc_assert (mode == DImode);
18850
18851 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18852 insns; we have to do the first in SImode, because it wraps. */
18853 if (mask2 <= 0xffffffff
18854 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18855 {
18856 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18857 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18858 GEN_INT (mask1));
18859 rtx reg_low = gen_lowpart (SImode, reg);
18860 emit_move_insn (reg_low, tmp);
18861 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18862 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18863 return;
18864 }
18865
18866 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18867 at the top end), rotate back and clear the other hole. */
18868 int right = exact_log2 (bit3);
18869 int left = 64 - right;
18870
18871 /* Rotate the mask too. */
18872 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18873
18874 if (expand)
18875 {
18876 rtx tmp1 = gen_reg_rtx (DImode);
18877 rtx tmp2 = gen_reg_rtx (DImode);
18878 rtx tmp3 = gen_reg_rtx (DImode);
18879 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18880 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18881 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18882 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18883 }
18884 else
18885 {
18886 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18887 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18888 emit_move_insn (operands[0], tmp);
18889 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18890 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18891 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18892 }
18893 }
18894 \f
18895 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18896 for lfq and stfq insns iff the registers are hard registers. */
18897
18898 int
18899 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18900 {
18901 /* We might have been passed a SUBREG. */
18902 if (!REG_P (reg1) || !REG_P (reg2))
18903 return 0;
18904
18905 /* We might have been passed non floating point registers. */
18906 if (!FP_REGNO_P (REGNO (reg1))
18907 || !FP_REGNO_P (REGNO (reg2)))
18908 return 0;
18909
18910 return (REGNO (reg1) == REGNO (reg2) - 1);
18911 }
18912
18913 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18914 addr1 and addr2 must be in consecutive memory locations
18915 (addr2 == addr1 + 8). */
18916
18917 int
18918 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18919 {
18920 rtx addr1, addr2;
18921 unsigned int reg1, reg2;
18922 int offset1, offset2;
18923
18924 /* The mems cannot be volatile. */
18925 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18926 return 0;
18927
18928 addr1 = XEXP (mem1, 0);
18929 addr2 = XEXP (mem2, 0);
18930
18931 /* Extract an offset (if used) from the first addr. */
18932 if (GET_CODE (addr1) == PLUS)
18933 {
18934 /* If not a REG, return zero. */
18935 if (!REG_P (XEXP (addr1, 0)))
18936 return 0;
18937 else
18938 {
18939 reg1 = REGNO (XEXP (addr1, 0));
18940 /* The offset must be constant! */
18941 if (!CONST_INT_P (XEXP (addr1, 1)))
18942 return 0;
18943 offset1 = INTVAL (XEXP (addr1, 1));
18944 }
18945 }
18946 else if (!REG_P (addr1))
18947 return 0;
18948 else
18949 {
18950 reg1 = REGNO (addr1);
18951 /* This was a simple (mem (reg)) expression. Offset is 0. */
18952 offset1 = 0;
18953 }
18954
18955 /* And now for the second addr. */
18956 if (GET_CODE (addr2) == PLUS)
18957 {
18958 /* If not a REG, return zero. */
18959 if (!REG_P (XEXP (addr2, 0)))
18960 return 0;
18961 else
18962 {
18963 reg2 = REGNO (XEXP (addr2, 0));
18964 /* The offset must be constant. */
18965 if (!CONST_INT_P (XEXP (addr2, 1)))
18966 return 0;
18967 offset2 = INTVAL (XEXP (addr2, 1));
18968 }
18969 }
18970 else if (!REG_P (addr2))
18971 return 0;
18972 else
18973 {
18974 reg2 = REGNO (addr2);
18975 /* This was a simple (mem (reg)) expression. Offset is 0. */
18976 offset2 = 0;
18977 }
18978
18979 /* Both of these must have the same base register. */
18980 if (reg1 != reg2)
18981 return 0;
18982
18983 /* The offset for the second addr must be 8 more than the first addr. */
18984 if (offset2 != offset1 + 8)
18985 return 0;
18986
18987 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18988 instructions. */
18989 return 1;
18990 }
18991 \f
18992 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18993 need to use DDmode, in all other cases we can use the same mode. */
18994 static machine_mode
18995 rs6000_secondary_memory_needed_mode (machine_mode mode)
18996 {
18997 if (lra_in_progress && mode == SDmode)
18998 return DDmode;
18999 return mode;
19000 }
19001
19002 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
19003 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
19004 only work on the traditional altivec registers, note if an altivec register
19005 was chosen. */
19006
19007 static enum rs6000_reg_type
19008 register_to_reg_type (rtx reg, bool *is_altivec)
19009 {
19010 HOST_WIDE_INT regno;
19011 enum reg_class rclass;
19012
19013 if (SUBREG_P (reg))
19014 reg = SUBREG_REG (reg);
19015
19016 if (!REG_P (reg))
19017 return NO_REG_TYPE;
19018
19019 regno = REGNO (reg);
19020 if (!HARD_REGISTER_NUM_P (regno))
19021 {
19022 if (!lra_in_progress && !reload_completed)
19023 return PSEUDO_REG_TYPE;
19024
19025 regno = true_regnum (reg);
19026 if (regno < 0 || !HARD_REGISTER_NUM_P (regno))
19027 return PSEUDO_REG_TYPE;
19028 }
19029
19030 gcc_assert (regno >= 0);
19031
19032 if (is_altivec && ALTIVEC_REGNO_P (regno))
19033 *is_altivec = true;
19034
19035 rclass = rs6000_regno_regclass[regno];
19036 return reg_class_to_reg_type[(int)rclass];
19037 }
19038
19039 /* Helper function to return the cost of adding a TOC entry address. */
19040
19041 static inline int
19042 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
19043 {
19044 int ret;
19045
19046 if (TARGET_CMODEL != CMODEL_SMALL)
19047 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
19048
19049 else
19050 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
19051
19052 return ret;
19053 }
19054
19055 /* Helper function for rs6000_secondary_reload to determine whether the memory
19056 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
19057 needs reloading. Return negative if the memory is not handled by the memory
19058 helper functions and to try a different reload method, 0 if no additional
19059 instructions are need, and positive to give the extra cost for the
19060 memory. */
19061
19062 static int
19063 rs6000_secondary_reload_memory (rtx addr,
19064 enum reg_class rclass,
19065 machine_mode mode)
19066 {
19067 int extra_cost = 0;
19068 rtx reg, and_arg, plus_arg0, plus_arg1;
19069 addr_mask_type addr_mask;
19070 const char *type = NULL;
19071 const char *fail_msg = NULL;
19072
19073 if (GPR_REG_CLASS_P (rclass))
19074 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19075
19076 else if (rclass == FLOAT_REGS)
19077 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19078
19079 else if (rclass == ALTIVEC_REGS)
19080 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19081
19082 /* For the combined VSX_REGS, turn off Altivec AND -16. */
19083 else if (rclass == VSX_REGS)
19084 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
19085 & ~RELOAD_REG_AND_M16);
19086
19087 /* If the register allocator hasn't made up its mind yet on the register
19088 class to use, settle on defaults to use. */
19089 else if (rclass == NO_REGS)
19090 {
19091 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
19092 & ~RELOAD_REG_AND_M16);
19093
19094 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
19095 addr_mask &= ~(RELOAD_REG_INDEXED
19096 | RELOAD_REG_PRE_INCDEC
19097 | RELOAD_REG_PRE_MODIFY);
19098 }
19099
19100 else
19101 addr_mask = 0;
19102
19103 /* If the register isn't valid in this register class, just return now. */
19104 if ((addr_mask & RELOAD_REG_VALID) == 0)
19105 {
19106 if (TARGET_DEBUG_ADDR)
19107 {
19108 fprintf (stderr,
19109 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19110 "not valid in class\n",
19111 GET_MODE_NAME (mode), reg_class_names[rclass]);
19112 debug_rtx (addr);
19113 }
19114
19115 return -1;
19116 }
19117
19118 switch (GET_CODE (addr))
19119 {
19120 /* Does the register class supports auto update forms for this mode? We
19121 don't need a scratch register, since the powerpc only supports
19122 PRE_INC, PRE_DEC, and PRE_MODIFY. */
19123 case PRE_INC:
19124 case PRE_DEC:
19125 reg = XEXP (addr, 0);
19126 if (!base_reg_operand (addr, GET_MODE (reg)))
19127 {
19128 fail_msg = "no base register #1";
19129 extra_cost = -1;
19130 }
19131
19132 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19133 {
19134 extra_cost = 1;
19135 type = "update";
19136 }
19137 break;
19138
19139 case PRE_MODIFY:
19140 reg = XEXP (addr, 0);
19141 plus_arg1 = XEXP (addr, 1);
19142 if (!base_reg_operand (reg, GET_MODE (reg))
19143 || GET_CODE (plus_arg1) != PLUS
19144 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
19145 {
19146 fail_msg = "bad PRE_MODIFY";
19147 extra_cost = -1;
19148 }
19149
19150 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19151 {
19152 extra_cost = 1;
19153 type = "update";
19154 }
19155 break;
19156
19157 /* Do we need to simulate AND -16 to clear the bottom address bits used
19158 in VMX load/stores? Only allow the AND for vector sizes. */
19159 case AND:
19160 and_arg = XEXP (addr, 0);
19161 if (GET_MODE_SIZE (mode) != 16
19162 || !CONST_INT_P (XEXP (addr, 1))
19163 || INTVAL (XEXP (addr, 1)) != -16)
19164 {
19165 fail_msg = "bad Altivec AND #1";
19166 extra_cost = -1;
19167 }
19168
19169 if (rclass != ALTIVEC_REGS)
19170 {
19171 if (legitimate_indirect_address_p (and_arg, false))
19172 extra_cost = 1;
19173
19174 else if (legitimate_indexed_address_p (and_arg, false))
19175 extra_cost = 2;
19176
19177 else
19178 {
19179 fail_msg = "bad Altivec AND #2";
19180 extra_cost = -1;
19181 }
19182
19183 type = "and";
19184 }
19185 break;
19186
19187 /* If this is an indirect address, make sure it is a base register. */
19188 case REG:
19189 case SUBREG:
19190 if (!legitimate_indirect_address_p (addr, false))
19191 {
19192 extra_cost = 1;
19193 type = "move";
19194 }
19195 break;
19196
19197 /* If this is an indexed address, make sure the register class can handle
19198 indexed addresses for this mode. */
19199 case PLUS:
19200 plus_arg0 = XEXP (addr, 0);
19201 plus_arg1 = XEXP (addr, 1);
19202
19203 /* (plus (plus (reg) (constant)) (constant)) is generated during
19204 push_reload processing, so handle it now. */
19205 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
19206 {
19207 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19208 {
19209 extra_cost = 1;
19210 type = "offset";
19211 }
19212 }
19213
19214 /* (plus (plus (reg) (constant)) (reg)) is also generated during
19215 push_reload processing, so handle it now. */
19216 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
19217 {
19218 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19219 {
19220 extra_cost = 1;
19221 type = "indexed #2";
19222 }
19223 }
19224
19225 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
19226 {
19227 fail_msg = "no base register #2";
19228 extra_cost = -1;
19229 }
19230
19231 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
19232 {
19233 if ((addr_mask & RELOAD_REG_INDEXED) == 0
19234 || !legitimate_indexed_address_p (addr, false))
19235 {
19236 extra_cost = 1;
19237 type = "indexed";
19238 }
19239 }
19240
19241 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
19242 && CONST_INT_P (plus_arg1))
19243 {
19244 if (!quad_address_offset_p (INTVAL (plus_arg1)))
19245 {
19246 extra_cost = 1;
19247 type = "vector d-form offset";
19248 }
19249 }
19250
19251 /* Make sure the register class can handle offset addresses. */
19252 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19253 {
19254 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19255 {
19256 extra_cost = 1;
19257 type = "offset #2";
19258 }
19259 }
19260
19261 else
19262 {
19263 fail_msg = "bad PLUS";
19264 extra_cost = -1;
19265 }
19266
19267 break;
19268
19269 case LO_SUM:
19270 /* Quad offsets are restricted and can't handle normal addresses. */
19271 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19272 {
19273 extra_cost = -1;
19274 type = "vector d-form lo_sum";
19275 }
19276
19277 else if (!legitimate_lo_sum_address_p (mode, addr, false))
19278 {
19279 fail_msg = "bad LO_SUM";
19280 extra_cost = -1;
19281 }
19282
19283 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19284 {
19285 extra_cost = 1;
19286 type = "lo_sum";
19287 }
19288 break;
19289
19290 /* Static addresses need to create a TOC entry. */
19291 case CONST:
19292 case SYMBOL_REF:
19293 case LABEL_REF:
19294 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19295 {
19296 extra_cost = -1;
19297 type = "vector d-form lo_sum #2";
19298 }
19299
19300 else
19301 {
19302 type = "address";
19303 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19304 }
19305 break;
19306
19307 /* TOC references look like offsetable memory. */
19308 case UNSPEC:
19309 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19310 {
19311 fail_msg = "bad UNSPEC";
19312 extra_cost = -1;
19313 }
19314
19315 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19316 {
19317 extra_cost = -1;
19318 type = "vector d-form lo_sum #3";
19319 }
19320
19321 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19322 {
19323 extra_cost = 1;
19324 type = "toc reference";
19325 }
19326 break;
19327
19328 default:
19329 {
19330 fail_msg = "bad address";
19331 extra_cost = -1;
19332 }
19333 }
19334
19335 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19336 {
19337 if (extra_cost < 0)
19338 fprintf (stderr,
19339 "rs6000_secondary_reload_memory error: mode = %s, "
19340 "class = %s, addr_mask = '%s', %s\n",
19341 GET_MODE_NAME (mode),
19342 reg_class_names[rclass],
19343 rs6000_debug_addr_mask (addr_mask, false),
19344 (fail_msg != NULL) ? fail_msg : "<bad address>");
19345
19346 else
19347 fprintf (stderr,
19348 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19349 "addr_mask = '%s', extra cost = %d, %s\n",
19350 GET_MODE_NAME (mode),
19351 reg_class_names[rclass],
19352 rs6000_debug_addr_mask (addr_mask, false),
19353 extra_cost,
19354 (type) ? type : "<none>");
19355
19356 debug_rtx (addr);
19357 }
19358
19359 return extra_cost;
19360 }
19361
19362 /* Helper function for rs6000_secondary_reload to return true if a move to a
19363 different register classe is really a simple move. */
19364
19365 static bool
19366 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19367 enum rs6000_reg_type from_type,
19368 machine_mode mode)
19369 {
19370 int size = GET_MODE_SIZE (mode);
19371
19372 /* Add support for various direct moves available. In this function, we only
19373 look at cases where we don't need any extra registers, and one or more
19374 simple move insns are issued. Originally small integers are not allowed
19375 in FPR/VSX registers. Single precision binary floating is not a simple
19376 move because we need to convert to the single precision memory layout.
19377 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19378 need special direct move handling, which we do not support yet. */
19379 if (TARGET_DIRECT_MOVE
19380 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19381 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19382 {
19383 if (TARGET_POWERPC64)
19384 {
19385 /* ISA 2.07: MTVSRD or MVFVSRD. */
19386 if (size == 8)
19387 return true;
19388
19389 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19390 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19391 return true;
19392 }
19393
19394 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19395 if (TARGET_P8_VECTOR)
19396 {
19397 if (mode == SImode)
19398 return true;
19399
19400 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19401 return true;
19402 }
19403
19404 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19405 if (mode == SDmode)
19406 return true;
19407 }
19408
19409 /* Power6+: MFTGPR or MFFGPR. */
19410 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19411 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19412 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19413 return true;
19414
19415 /* Move to/from SPR. */
19416 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19417 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19418 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19419 return true;
19420
19421 return false;
19422 }
19423
19424 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19425 special direct moves that involve allocating an extra register, return the
19426 insn code of the helper function if there is such a function or
19427 CODE_FOR_nothing if not. */
19428
19429 static bool
19430 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19431 enum rs6000_reg_type from_type,
19432 machine_mode mode,
19433 secondary_reload_info *sri,
19434 bool altivec_p)
19435 {
19436 bool ret = false;
19437 enum insn_code icode = CODE_FOR_nothing;
19438 int cost = 0;
19439 int size = GET_MODE_SIZE (mode);
19440
19441 if (TARGET_POWERPC64 && size == 16)
19442 {
19443 /* Handle moving 128-bit values from GPRs to VSX point registers on
19444 ISA 2.07 (power8, power9) when running in 64-bit mode using
19445 XXPERMDI to glue the two 64-bit values back together. */
19446 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19447 {
19448 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19449 icode = reg_addr[mode].reload_vsx_gpr;
19450 }
19451
19452 /* Handle moving 128-bit values from VSX point registers to GPRs on
19453 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19454 bottom 64-bit value. */
19455 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19456 {
19457 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19458 icode = reg_addr[mode].reload_gpr_vsx;
19459 }
19460 }
19461
19462 else if (TARGET_POWERPC64 && mode == SFmode)
19463 {
19464 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19465 {
19466 cost = 3; /* xscvdpspn, mfvsrd, and. */
19467 icode = reg_addr[mode].reload_gpr_vsx;
19468 }
19469
19470 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19471 {
19472 cost = 2; /* mtvsrz, xscvspdpn. */
19473 icode = reg_addr[mode].reload_vsx_gpr;
19474 }
19475 }
19476
19477 else if (!TARGET_POWERPC64 && size == 8)
19478 {
19479 /* Handle moving 64-bit values from GPRs to floating point registers on
19480 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19481 32-bit values back together. Altivec register classes must be handled
19482 specially since a different instruction is used, and the secondary
19483 reload support requires a single instruction class in the scratch
19484 register constraint. However, right now TFmode is not allowed in
19485 Altivec registers, so the pattern will never match. */
19486 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19487 {
19488 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19489 icode = reg_addr[mode].reload_fpr_gpr;
19490 }
19491 }
19492
19493 if (icode != CODE_FOR_nothing)
19494 {
19495 ret = true;
19496 if (sri)
19497 {
19498 sri->icode = icode;
19499 sri->extra_cost = cost;
19500 }
19501 }
19502
19503 return ret;
19504 }
19505
19506 /* Return whether a move between two register classes can be done either
19507 directly (simple move) or via a pattern that uses a single extra temporary
19508 (using ISA 2.07's direct move in this case. */
19509
19510 static bool
19511 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19512 enum rs6000_reg_type from_type,
19513 machine_mode mode,
19514 secondary_reload_info *sri,
19515 bool altivec_p)
19516 {
19517 /* Fall back to load/store reloads if either type is not a register. */
19518 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19519 return false;
19520
19521 /* If we haven't allocated registers yet, assume the move can be done for the
19522 standard register types. */
19523 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19524 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19525 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19526 return true;
19527
19528 /* Moves to the same set of registers is a simple move for non-specialized
19529 registers. */
19530 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19531 return true;
19532
19533 /* Check whether a simple move can be done directly. */
19534 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19535 {
19536 if (sri)
19537 {
19538 sri->icode = CODE_FOR_nothing;
19539 sri->extra_cost = 0;
19540 }
19541 return true;
19542 }
19543
19544 /* Now check if we can do it in a few steps. */
19545 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19546 altivec_p);
19547 }
19548
19549 /* Inform reload about cases where moving X with a mode MODE to a register in
19550 RCLASS requires an extra scratch or immediate register. Return the class
19551 needed for the immediate register.
19552
19553 For VSX and Altivec, we may need a register to convert sp+offset into
19554 reg+sp.
19555
19556 For misaligned 64-bit gpr loads and stores we need a register to
19557 convert an offset address to indirect. */
19558
19559 static reg_class_t
19560 rs6000_secondary_reload (bool in_p,
19561 rtx x,
19562 reg_class_t rclass_i,
19563 machine_mode mode,
19564 secondary_reload_info *sri)
19565 {
19566 enum reg_class rclass = (enum reg_class) rclass_i;
19567 reg_class_t ret = ALL_REGS;
19568 enum insn_code icode;
19569 bool default_p = false;
19570 bool done_p = false;
19571
19572 /* Allow subreg of memory before/during reload. */
19573 bool memory_p = (MEM_P (x)
19574 || (!reload_completed && SUBREG_P (x)
19575 && MEM_P (SUBREG_REG (x))));
19576
19577 sri->icode = CODE_FOR_nothing;
19578 sri->t_icode = CODE_FOR_nothing;
19579 sri->extra_cost = 0;
19580 icode = ((in_p)
19581 ? reg_addr[mode].reload_load
19582 : reg_addr[mode].reload_store);
19583
19584 if (REG_P (x) || register_operand (x, mode))
19585 {
19586 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19587 bool altivec_p = (rclass == ALTIVEC_REGS);
19588 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19589
19590 if (!in_p)
19591 std::swap (to_type, from_type);
19592
19593 /* Can we do a direct move of some sort? */
19594 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19595 altivec_p))
19596 {
19597 icode = (enum insn_code)sri->icode;
19598 default_p = false;
19599 done_p = true;
19600 ret = NO_REGS;
19601 }
19602 }
19603
19604 /* Make sure 0.0 is not reloaded or forced into memory. */
19605 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19606 {
19607 ret = NO_REGS;
19608 default_p = false;
19609 done_p = true;
19610 }
19611
19612 /* If this is a scalar floating point value and we want to load it into the
19613 traditional Altivec registers, do it via a move via a traditional floating
19614 point register, unless we have D-form addressing. Also make sure that
19615 non-zero constants use a FPR. */
19616 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19617 && !mode_supports_vmx_dform (mode)
19618 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19619 && (memory_p || CONST_DOUBLE_P (x)))
19620 {
19621 ret = FLOAT_REGS;
19622 default_p = false;
19623 done_p = true;
19624 }
19625
19626 /* Handle reload of load/stores if we have reload helper functions. */
19627 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19628 {
19629 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19630 mode);
19631
19632 if (extra_cost >= 0)
19633 {
19634 done_p = true;
19635 ret = NO_REGS;
19636 if (extra_cost > 0)
19637 {
19638 sri->extra_cost = extra_cost;
19639 sri->icode = icode;
19640 }
19641 }
19642 }
19643
19644 /* Handle unaligned loads and stores of integer registers. */
19645 if (!done_p && TARGET_POWERPC64
19646 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19647 && memory_p
19648 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19649 {
19650 rtx addr = XEXP (x, 0);
19651 rtx off = address_offset (addr);
19652
19653 if (off != NULL_RTX)
19654 {
19655 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19656 unsigned HOST_WIDE_INT offset = INTVAL (off);
19657
19658 /* We need a secondary reload when our legitimate_address_p
19659 says the address is good (as otherwise the entire address
19660 will be reloaded), and the offset is not a multiple of
19661 four or we have an address wrap. Address wrap will only
19662 occur for LO_SUMs since legitimate_offset_address_p
19663 rejects addresses for 16-byte mems that will wrap. */
19664 if (GET_CODE (addr) == LO_SUM
19665 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19666 && ((offset & 3) != 0
19667 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19668 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19669 && (offset & 3) != 0))
19670 {
19671 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19672 if (in_p)
19673 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19674 : CODE_FOR_reload_di_load);
19675 else
19676 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19677 : CODE_FOR_reload_di_store);
19678 sri->extra_cost = 2;
19679 ret = NO_REGS;
19680 done_p = true;
19681 }
19682 else
19683 default_p = true;
19684 }
19685 else
19686 default_p = true;
19687 }
19688
19689 if (!done_p && !TARGET_POWERPC64
19690 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19691 && memory_p
19692 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19693 {
19694 rtx addr = XEXP (x, 0);
19695 rtx off = address_offset (addr);
19696
19697 if (off != NULL_RTX)
19698 {
19699 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19700 unsigned HOST_WIDE_INT offset = INTVAL (off);
19701
19702 /* We need a secondary reload when our legitimate_address_p
19703 says the address is good (as otherwise the entire address
19704 will be reloaded), and we have a wrap.
19705
19706 legitimate_lo_sum_address_p allows LO_SUM addresses to
19707 have any offset so test for wrap in the low 16 bits.
19708
19709 legitimate_offset_address_p checks for the range
19710 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19711 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19712 [0x7ff4,0x7fff] respectively, so test for the
19713 intersection of these ranges, [0x7ffc,0x7fff] and
19714 [0x7ff4,0x7ff7] respectively.
19715
19716 Note that the address we see here may have been
19717 manipulated by legitimize_reload_address. */
19718 if (GET_CODE (addr) == LO_SUM
19719 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19720 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19721 {
19722 if (in_p)
19723 sri->icode = CODE_FOR_reload_si_load;
19724 else
19725 sri->icode = CODE_FOR_reload_si_store;
19726 sri->extra_cost = 2;
19727 ret = NO_REGS;
19728 done_p = true;
19729 }
19730 else
19731 default_p = true;
19732 }
19733 else
19734 default_p = true;
19735 }
19736
19737 if (!done_p)
19738 default_p = true;
19739
19740 if (default_p)
19741 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19742
19743 gcc_assert (ret != ALL_REGS);
19744
19745 if (TARGET_DEBUG_ADDR)
19746 {
19747 fprintf (stderr,
19748 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19749 "mode = %s",
19750 reg_class_names[ret],
19751 in_p ? "true" : "false",
19752 reg_class_names[rclass],
19753 GET_MODE_NAME (mode));
19754
19755 if (reload_completed)
19756 fputs (", after reload", stderr);
19757
19758 if (!done_p)
19759 fputs (", done_p not set", stderr);
19760
19761 if (default_p)
19762 fputs (", default secondary reload", stderr);
19763
19764 if (sri->icode != CODE_FOR_nothing)
19765 fprintf (stderr, ", reload func = %s, extra cost = %d",
19766 insn_data[sri->icode].name, sri->extra_cost);
19767
19768 else if (sri->extra_cost > 0)
19769 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19770
19771 fputs ("\n", stderr);
19772 debug_rtx (x);
19773 }
19774
19775 return ret;
19776 }
19777
19778 /* Better tracing for rs6000_secondary_reload_inner. */
19779
19780 static void
19781 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19782 bool store_p)
19783 {
19784 rtx set, clobber;
19785
19786 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19787
19788 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19789 store_p ? "store" : "load");
19790
19791 if (store_p)
19792 set = gen_rtx_SET (mem, reg);
19793 else
19794 set = gen_rtx_SET (reg, mem);
19795
19796 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19797 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19798 }
19799
19800 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19801 ATTRIBUTE_NORETURN;
19802
19803 static void
19804 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19805 bool store_p)
19806 {
19807 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19808 gcc_unreachable ();
19809 }
19810
19811 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19812 reload helper functions. These were identified in
19813 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19814 reload, it calls the insns:
19815 reload_<RELOAD:mode>_<P:mptrsize>_store
19816 reload_<RELOAD:mode>_<P:mptrsize>_load
19817
19818 which in turn calls this function, to do whatever is necessary to create
19819 valid addresses. */
19820
19821 void
19822 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19823 {
19824 int regno = true_regnum (reg);
19825 machine_mode mode = GET_MODE (reg);
19826 addr_mask_type addr_mask;
19827 rtx addr;
19828 rtx new_addr;
19829 rtx op_reg, op0, op1;
19830 rtx and_op;
19831 rtx cc_clobber;
19832 rtvec rv;
19833
19834 if (regno < 0 || !HARD_REGISTER_NUM_P (regno) || !MEM_P (mem)
19835 || !base_reg_operand (scratch, GET_MODE (scratch)))
19836 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19837
19838 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19839 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19840
19841 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19842 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19843
19844 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19845 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19846
19847 else
19848 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19849
19850 /* Make sure the mode is valid in this register class. */
19851 if ((addr_mask & RELOAD_REG_VALID) == 0)
19852 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19853
19854 if (TARGET_DEBUG_ADDR)
19855 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19856
19857 new_addr = addr = XEXP (mem, 0);
19858 switch (GET_CODE (addr))
19859 {
19860 /* Does the register class support auto update forms for this mode? If
19861 not, do the update now. We don't need a scratch register, since the
19862 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19863 case PRE_INC:
19864 case PRE_DEC:
19865 op_reg = XEXP (addr, 0);
19866 if (!base_reg_operand (op_reg, Pmode))
19867 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19868
19869 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19870 {
19871 int delta = GET_MODE_SIZE (mode);
19872 if (GET_CODE (addr) == PRE_DEC)
19873 delta = -delta;
19874 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19875 new_addr = op_reg;
19876 }
19877 break;
19878
19879 case PRE_MODIFY:
19880 op0 = XEXP (addr, 0);
19881 op1 = XEXP (addr, 1);
19882 if (!base_reg_operand (op0, Pmode)
19883 || GET_CODE (op1) != PLUS
19884 || !rtx_equal_p (op0, XEXP (op1, 0)))
19885 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19886
19887 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19888 {
19889 emit_insn (gen_rtx_SET (op0, op1));
19890 new_addr = reg;
19891 }
19892 break;
19893
19894 /* Do we need to simulate AND -16 to clear the bottom address bits used
19895 in VMX load/stores? */
19896 case AND:
19897 op0 = XEXP (addr, 0);
19898 op1 = XEXP (addr, 1);
19899 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19900 {
19901 if (REG_P (op0) || SUBREG_P (op0))
19902 op_reg = op0;
19903
19904 else if (GET_CODE (op1) == PLUS)
19905 {
19906 emit_insn (gen_rtx_SET (scratch, op1));
19907 op_reg = scratch;
19908 }
19909
19910 else
19911 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19912
19913 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19914 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19915 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19916 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19917 new_addr = scratch;
19918 }
19919 break;
19920
19921 /* If this is an indirect address, make sure it is a base register. */
19922 case REG:
19923 case SUBREG:
19924 if (!base_reg_operand (addr, GET_MODE (addr)))
19925 {
19926 emit_insn (gen_rtx_SET (scratch, addr));
19927 new_addr = scratch;
19928 }
19929 break;
19930
19931 /* If this is an indexed address, make sure the register class can handle
19932 indexed addresses for this mode. */
19933 case PLUS:
19934 op0 = XEXP (addr, 0);
19935 op1 = XEXP (addr, 1);
19936 if (!base_reg_operand (op0, Pmode))
19937 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19938
19939 else if (int_reg_operand (op1, Pmode))
19940 {
19941 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19942 {
19943 emit_insn (gen_rtx_SET (scratch, addr));
19944 new_addr = scratch;
19945 }
19946 }
19947
19948 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19949 {
19950 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19951 || !quad_address_p (addr, mode, false))
19952 {
19953 emit_insn (gen_rtx_SET (scratch, addr));
19954 new_addr = scratch;
19955 }
19956 }
19957
19958 /* Make sure the register class can handle offset addresses. */
19959 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19960 {
19961 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19962 {
19963 emit_insn (gen_rtx_SET (scratch, addr));
19964 new_addr = scratch;
19965 }
19966 }
19967
19968 else
19969 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19970
19971 break;
19972
19973 case LO_SUM:
19974 op0 = XEXP (addr, 0);
19975 op1 = XEXP (addr, 1);
19976 if (!base_reg_operand (op0, Pmode))
19977 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19978
19979 else if (int_reg_operand (op1, Pmode))
19980 {
19981 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19982 {
19983 emit_insn (gen_rtx_SET (scratch, addr));
19984 new_addr = scratch;
19985 }
19986 }
19987
19988 /* Quad offsets are restricted and can't handle normal addresses. */
19989 else if (mode_supports_dq_form (mode))
19990 {
19991 emit_insn (gen_rtx_SET (scratch, addr));
19992 new_addr = scratch;
19993 }
19994
19995 /* Make sure the register class can handle offset addresses. */
19996 else if (legitimate_lo_sum_address_p (mode, addr, false))
19997 {
19998 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19999 {
20000 emit_insn (gen_rtx_SET (scratch, addr));
20001 new_addr = scratch;
20002 }
20003 }
20004
20005 else
20006 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20007
20008 break;
20009
20010 case SYMBOL_REF:
20011 case CONST:
20012 case LABEL_REF:
20013 rs6000_emit_move (scratch, addr, Pmode);
20014 new_addr = scratch;
20015 break;
20016
20017 default:
20018 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
20019 }
20020
20021 /* Adjust the address if it changed. */
20022 if (addr != new_addr)
20023 {
20024 mem = replace_equiv_address_nv (mem, new_addr);
20025 if (TARGET_DEBUG_ADDR)
20026 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
20027 }
20028
20029 /* Now create the move. */
20030 if (store_p)
20031 emit_insn (gen_rtx_SET (mem, reg));
20032 else
20033 emit_insn (gen_rtx_SET (reg, mem));
20034
20035 return;
20036 }
20037
20038 /* Convert reloads involving 64-bit gprs and misaligned offset
20039 addressing, or multiple 32-bit gprs and offsets that are too large,
20040 to use indirect addressing. */
20041
20042 void
20043 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
20044 {
20045 int regno = true_regnum (reg);
20046 enum reg_class rclass;
20047 rtx addr;
20048 rtx scratch_or_premodify = scratch;
20049
20050 if (TARGET_DEBUG_ADDR)
20051 {
20052 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
20053 store_p ? "store" : "load");
20054 fprintf (stderr, "reg:\n");
20055 debug_rtx (reg);
20056 fprintf (stderr, "mem:\n");
20057 debug_rtx (mem);
20058 fprintf (stderr, "scratch:\n");
20059 debug_rtx (scratch);
20060 }
20061
20062 gcc_assert (regno >= 0 && HARD_REGISTER_NUM_P (regno));
20063 gcc_assert (MEM_P (mem));
20064 rclass = REGNO_REG_CLASS (regno);
20065 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
20066 addr = XEXP (mem, 0);
20067
20068 if (GET_CODE (addr) == PRE_MODIFY)
20069 {
20070 gcc_assert (REG_P (XEXP (addr, 0))
20071 && GET_CODE (XEXP (addr, 1)) == PLUS
20072 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
20073 scratch_or_premodify = XEXP (addr, 0);
20074 addr = XEXP (addr, 1);
20075 }
20076 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
20077
20078 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
20079
20080 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
20081
20082 /* Now create the move. */
20083 if (store_p)
20084 emit_insn (gen_rtx_SET (mem, reg));
20085 else
20086 emit_insn (gen_rtx_SET (reg, mem));
20087
20088 return;
20089 }
20090
20091 /* Given an rtx X being reloaded into a reg required to be
20092 in class CLASS, return the class of reg to actually use.
20093 In general this is just CLASS; but on some machines
20094 in some cases it is preferable to use a more restrictive class.
20095
20096 On the RS/6000, we have to return NO_REGS when we want to reload a
20097 floating-point CONST_DOUBLE to force it to be copied to memory.
20098
20099 We also don't want to reload integer values into floating-point
20100 registers if we can at all help it. In fact, this can
20101 cause reload to die, if it tries to generate a reload of CTR
20102 into a FP register and discovers it doesn't have the memory location
20103 required.
20104
20105 ??? Would it be a good idea to have reload do the converse, that is
20106 try to reload floating modes into FP registers if possible?
20107 */
20108
20109 static enum reg_class
20110 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
20111 {
20112 machine_mode mode = GET_MODE (x);
20113 bool is_constant = CONSTANT_P (x);
20114
20115 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
20116 reload class for it. */
20117 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20118 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
20119 return NO_REGS;
20120
20121 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
20122 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
20123 return NO_REGS;
20124
20125 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
20126 the reloading of address expressions using PLUS into floating point
20127 registers. */
20128 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
20129 {
20130 if (is_constant)
20131 {
20132 /* Zero is always allowed in all VSX registers. */
20133 if (x == CONST0_RTX (mode))
20134 return rclass;
20135
20136 /* If this is a vector constant that can be formed with a few Altivec
20137 instructions, we want altivec registers. */
20138 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
20139 return ALTIVEC_REGS;
20140
20141 /* If this is an integer constant that can easily be loaded into
20142 vector registers, allow it. */
20143 if (CONST_INT_P (x))
20144 {
20145 HOST_WIDE_INT value = INTVAL (x);
20146
20147 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
20148 2.06 can generate it in the Altivec registers with
20149 VSPLTI<x>. */
20150 if (value == -1)
20151 {
20152 if (TARGET_P8_VECTOR)
20153 return rclass;
20154 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
20155 return ALTIVEC_REGS;
20156 else
20157 return NO_REGS;
20158 }
20159
20160 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
20161 a sign extend in the Altivec registers. */
20162 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
20163 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
20164 return ALTIVEC_REGS;
20165 }
20166
20167 /* Force constant to memory. */
20168 return NO_REGS;
20169 }
20170
20171 /* D-form addressing can easily reload the value. */
20172 if (mode_supports_vmx_dform (mode)
20173 || mode_supports_dq_form (mode))
20174 return rclass;
20175
20176 /* If this is a scalar floating point value and we don't have D-form
20177 addressing, prefer the traditional floating point registers so that we
20178 can use D-form (register+offset) addressing. */
20179 if (rclass == VSX_REGS
20180 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
20181 return FLOAT_REGS;
20182
20183 /* Prefer the Altivec registers if Altivec is handling the vector
20184 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
20185 loads. */
20186 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
20187 || mode == V1TImode)
20188 return ALTIVEC_REGS;
20189
20190 return rclass;
20191 }
20192
20193 if (is_constant || GET_CODE (x) == PLUS)
20194 {
20195 if (reg_class_subset_p (GENERAL_REGS, rclass))
20196 return GENERAL_REGS;
20197 if (reg_class_subset_p (BASE_REGS, rclass))
20198 return BASE_REGS;
20199 return NO_REGS;
20200 }
20201
20202 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
20203 return GENERAL_REGS;
20204
20205 return rclass;
20206 }
20207
20208 /* Debug version of rs6000_preferred_reload_class. */
20209 static enum reg_class
20210 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
20211 {
20212 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
20213
20214 fprintf (stderr,
20215 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
20216 "mode = %s, x:\n",
20217 reg_class_names[ret], reg_class_names[rclass],
20218 GET_MODE_NAME (GET_MODE (x)));
20219 debug_rtx (x);
20220
20221 return ret;
20222 }
20223
20224 /* If we are copying between FP or AltiVec registers and anything else, we need
20225 a memory location. The exception is when we are targeting ppc64 and the
20226 move to/from fpr to gpr instructions are available. Also, under VSX, you
20227 can copy vector registers from the FP register set to the Altivec register
20228 set and vice versa. */
20229
20230 static bool
20231 rs6000_secondary_memory_needed (machine_mode mode,
20232 reg_class_t from_class,
20233 reg_class_t to_class)
20234 {
20235 enum rs6000_reg_type from_type, to_type;
20236 bool altivec_p = ((from_class == ALTIVEC_REGS)
20237 || (to_class == ALTIVEC_REGS));
20238
20239 /* If a simple/direct move is available, we don't need secondary memory */
20240 from_type = reg_class_to_reg_type[(int)from_class];
20241 to_type = reg_class_to_reg_type[(int)to_class];
20242
20243 if (rs6000_secondary_reload_move (to_type, from_type, mode,
20244 (secondary_reload_info *)0, altivec_p))
20245 return false;
20246
20247 /* If we have a floating point or vector register class, we need to use
20248 memory to transfer the data. */
20249 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
20250 return true;
20251
20252 return false;
20253 }
20254
20255 /* Debug version of rs6000_secondary_memory_needed. */
20256 static bool
20257 rs6000_debug_secondary_memory_needed (machine_mode mode,
20258 reg_class_t from_class,
20259 reg_class_t to_class)
20260 {
20261 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
20262
20263 fprintf (stderr,
20264 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
20265 "to_class = %s, mode = %s\n",
20266 ret ? "true" : "false",
20267 reg_class_names[from_class],
20268 reg_class_names[to_class],
20269 GET_MODE_NAME (mode));
20270
20271 return ret;
20272 }
20273
20274 /* Return the register class of a scratch register needed to copy IN into
20275 or out of a register in RCLASS in MODE. If it can be done directly,
20276 NO_REGS is returned. */
20277
20278 static enum reg_class
20279 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20280 rtx in)
20281 {
20282 int regno;
20283
20284 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20285 #if TARGET_MACHO
20286 && MACHOPIC_INDIRECT
20287 #endif
20288 ))
20289 {
20290 /* We cannot copy a symbolic operand directly into anything
20291 other than BASE_REGS for TARGET_ELF. So indicate that a
20292 register from BASE_REGS is needed as an intermediate
20293 register.
20294
20295 On Darwin, pic addresses require a load from memory, which
20296 needs a base register. */
20297 if (rclass != BASE_REGS
20298 && (SYMBOL_REF_P (in)
20299 || GET_CODE (in) == HIGH
20300 || GET_CODE (in) == LABEL_REF
20301 || GET_CODE (in) == CONST))
20302 return BASE_REGS;
20303 }
20304
20305 if (REG_P (in))
20306 {
20307 regno = REGNO (in);
20308 if (!HARD_REGISTER_NUM_P (regno))
20309 {
20310 regno = true_regnum (in);
20311 if (!HARD_REGISTER_NUM_P (regno))
20312 regno = -1;
20313 }
20314 }
20315 else if (SUBREG_P (in))
20316 {
20317 regno = true_regnum (in);
20318 if (!HARD_REGISTER_NUM_P (regno))
20319 regno = -1;
20320 }
20321 else
20322 regno = -1;
20323
20324 /* If we have VSX register moves, prefer moving scalar values between
20325 Altivec registers and GPR by going via an FPR (and then via memory)
20326 instead of reloading the secondary memory address for Altivec moves. */
20327 if (TARGET_VSX
20328 && GET_MODE_SIZE (mode) < 16
20329 && !mode_supports_vmx_dform (mode)
20330 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20331 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20332 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20333 && (regno >= 0 && INT_REGNO_P (regno)))))
20334 return FLOAT_REGS;
20335
20336 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20337 into anything. */
20338 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20339 || (regno >= 0 && INT_REGNO_P (regno)))
20340 return NO_REGS;
20341
20342 /* Constants, memory, and VSX registers can go into VSX registers (both the
20343 traditional floating point and the altivec registers). */
20344 if (rclass == VSX_REGS
20345 && (regno == -1 || VSX_REGNO_P (regno)))
20346 return NO_REGS;
20347
20348 /* Constants, memory, and FP registers can go into FP registers. */
20349 if ((regno == -1 || FP_REGNO_P (regno))
20350 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
20351 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20352
20353 /* Memory, and AltiVec registers can go into AltiVec registers. */
20354 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20355 && rclass == ALTIVEC_REGS)
20356 return NO_REGS;
20357
20358 /* We can copy among the CR registers. */
20359 if ((rclass == CR_REGS || rclass == CR0_REGS)
20360 && regno >= 0 && CR_REGNO_P (regno))
20361 return NO_REGS;
20362
20363 /* Otherwise, we need GENERAL_REGS. */
20364 return GENERAL_REGS;
20365 }
20366
20367 /* Debug version of rs6000_secondary_reload_class. */
20368 static enum reg_class
20369 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20370 machine_mode mode, rtx in)
20371 {
20372 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20373 fprintf (stderr,
20374 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20375 "mode = %s, input rtx:\n",
20376 reg_class_names[ret], reg_class_names[rclass],
20377 GET_MODE_NAME (mode));
20378 debug_rtx (in);
20379
20380 return ret;
20381 }
20382
20383 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20384
20385 static bool
20386 rs6000_can_change_mode_class (machine_mode from,
20387 machine_mode to,
20388 reg_class_t rclass)
20389 {
20390 unsigned from_size = GET_MODE_SIZE (from);
20391 unsigned to_size = GET_MODE_SIZE (to);
20392
20393 if (from_size != to_size)
20394 {
20395 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20396
20397 if (reg_classes_intersect_p (xclass, rclass))
20398 {
20399 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20400 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20401 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20402 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20403
20404 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20405 single register under VSX because the scalar part of the register
20406 is in the upper 64-bits, and not the lower 64-bits. Types like
20407 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20408 IEEE floating point can't overlap, and neither can small
20409 values. */
20410
20411 if (to_float128_vector_p && from_float128_vector_p)
20412 return true;
20413
20414 else if (to_float128_vector_p || from_float128_vector_p)
20415 return false;
20416
20417 /* TDmode in floating-mode registers must always go into a register
20418 pair with the most significant word in the even-numbered register
20419 to match ISA requirements. In little-endian mode, this does not
20420 match subreg numbering, so we cannot allow subregs. */
20421 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20422 return false;
20423
20424 if (from_size < 8 || to_size < 8)
20425 return false;
20426
20427 if (from_size == 8 && (8 * to_nregs) != to_size)
20428 return false;
20429
20430 if (to_size == 8 && (8 * from_nregs) != from_size)
20431 return false;
20432
20433 return true;
20434 }
20435 else
20436 return true;
20437 }
20438
20439 /* Since the VSX register set includes traditional floating point registers
20440 and altivec registers, just check for the size being different instead of
20441 trying to check whether the modes are vector modes. Otherwise it won't
20442 allow say DF and DI to change classes. For types like TFmode and TDmode
20443 that take 2 64-bit registers, rather than a single 128-bit register, don't
20444 allow subregs of those types to other 128 bit types. */
20445 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20446 {
20447 unsigned num_regs = (from_size + 15) / 16;
20448 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20449 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20450 return false;
20451
20452 return (from_size == 8 || from_size == 16);
20453 }
20454
20455 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20456 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20457 return false;
20458
20459 return true;
20460 }
20461
20462 /* Debug version of rs6000_can_change_mode_class. */
20463 static bool
20464 rs6000_debug_can_change_mode_class (machine_mode from,
20465 machine_mode to,
20466 reg_class_t rclass)
20467 {
20468 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20469
20470 fprintf (stderr,
20471 "rs6000_can_change_mode_class, return %s, from = %s, "
20472 "to = %s, rclass = %s\n",
20473 ret ? "true" : "false",
20474 GET_MODE_NAME (from), GET_MODE_NAME (to),
20475 reg_class_names[rclass]);
20476
20477 return ret;
20478 }
20479 \f
20480 /* Return a string to do a move operation of 128 bits of data. */
20481
20482 const char *
20483 rs6000_output_move_128bit (rtx operands[])
20484 {
20485 rtx dest = operands[0];
20486 rtx src = operands[1];
20487 machine_mode mode = GET_MODE (dest);
20488 int dest_regno;
20489 int src_regno;
20490 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20491 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20492
20493 if (REG_P (dest))
20494 {
20495 dest_regno = REGNO (dest);
20496 dest_gpr_p = INT_REGNO_P (dest_regno);
20497 dest_fp_p = FP_REGNO_P (dest_regno);
20498 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20499 dest_vsx_p = dest_fp_p | dest_vmx_p;
20500 }
20501 else
20502 {
20503 dest_regno = -1;
20504 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20505 }
20506
20507 if (REG_P (src))
20508 {
20509 src_regno = REGNO (src);
20510 src_gpr_p = INT_REGNO_P (src_regno);
20511 src_fp_p = FP_REGNO_P (src_regno);
20512 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20513 src_vsx_p = src_fp_p | src_vmx_p;
20514 }
20515 else
20516 {
20517 src_regno = -1;
20518 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20519 }
20520
20521 /* Register moves. */
20522 if (dest_regno >= 0 && src_regno >= 0)
20523 {
20524 if (dest_gpr_p)
20525 {
20526 if (src_gpr_p)
20527 return "#";
20528
20529 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20530 return (WORDS_BIG_ENDIAN
20531 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20532 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20533
20534 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20535 return "#";
20536 }
20537
20538 else if (TARGET_VSX && dest_vsx_p)
20539 {
20540 if (src_vsx_p)
20541 return "xxlor %x0,%x1,%x1";
20542
20543 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20544 return (WORDS_BIG_ENDIAN
20545 ? "mtvsrdd %x0,%1,%L1"
20546 : "mtvsrdd %x0,%L1,%1");
20547
20548 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20549 return "#";
20550 }
20551
20552 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20553 return "vor %0,%1,%1";
20554
20555 else if (dest_fp_p && src_fp_p)
20556 return "#";
20557 }
20558
20559 /* Loads. */
20560 else if (dest_regno >= 0 && MEM_P (src))
20561 {
20562 if (dest_gpr_p)
20563 {
20564 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20565 return "lq %0,%1";
20566 else
20567 return "#";
20568 }
20569
20570 else if (TARGET_ALTIVEC && dest_vmx_p
20571 && altivec_indexed_or_indirect_operand (src, mode))
20572 return "lvx %0,%y1";
20573
20574 else if (TARGET_VSX && dest_vsx_p)
20575 {
20576 if (mode_supports_dq_form (mode)
20577 && quad_address_p (XEXP (src, 0), mode, true))
20578 return "lxv %x0,%1";
20579
20580 else if (TARGET_P9_VECTOR)
20581 return "lxvx %x0,%y1";
20582
20583 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20584 return "lxvw4x %x0,%y1";
20585
20586 else
20587 return "lxvd2x %x0,%y1";
20588 }
20589
20590 else if (TARGET_ALTIVEC && dest_vmx_p)
20591 return "lvx %0,%y1";
20592
20593 else if (dest_fp_p)
20594 return "#";
20595 }
20596
20597 /* Stores. */
20598 else if (src_regno >= 0 && MEM_P (dest))
20599 {
20600 if (src_gpr_p)
20601 {
20602 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20603 return "stq %1,%0";
20604 else
20605 return "#";
20606 }
20607
20608 else if (TARGET_ALTIVEC && src_vmx_p
20609 && altivec_indexed_or_indirect_operand (dest, mode))
20610 return "stvx %1,%y0";
20611
20612 else if (TARGET_VSX && src_vsx_p)
20613 {
20614 if (mode_supports_dq_form (mode)
20615 && quad_address_p (XEXP (dest, 0), mode, true))
20616 return "stxv %x1,%0";
20617
20618 else if (TARGET_P9_VECTOR)
20619 return "stxvx %x1,%y0";
20620
20621 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20622 return "stxvw4x %x1,%y0";
20623
20624 else
20625 return "stxvd2x %x1,%y0";
20626 }
20627
20628 else if (TARGET_ALTIVEC && src_vmx_p)
20629 return "stvx %1,%y0";
20630
20631 else if (src_fp_p)
20632 return "#";
20633 }
20634
20635 /* Constants. */
20636 else if (dest_regno >= 0
20637 && (CONST_INT_P (src)
20638 || CONST_WIDE_INT_P (src)
20639 || CONST_DOUBLE_P (src)
20640 || GET_CODE (src) == CONST_VECTOR))
20641 {
20642 if (dest_gpr_p)
20643 return "#";
20644
20645 else if ((dest_vmx_p && TARGET_ALTIVEC)
20646 || (dest_vsx_p && TARGET_VSX))
20647 return output_vec_const_move (operands);
20648 }
20649
20650 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20651 }
20652
20653 /* Validate a 128-bit move. */
20654 bool
20655 rs6000_move_128bit_ok_p (rtx operands[])
20656 {
20657 machine_mode mode = GET_MODE (operands[0]);
20658 return (gpc_reg_operand (operands[0], mode)
20659 || gpc_reg_operand (operands[1], mode));
20660 }
20661
20662 /* Return true if a 128-bit move needs to be split. */
20663 bool
20664 rs6000_split_128bit_ok_p (rtx operands[])
20665 {
20666 if (!reload_completed)
20667 return false;
20668
20669 if (!gpr_or_gpr_p (operands[0], operands[1]))
20670 return false;
20671
20672 if (quad_load_store_p (operands[0], operands[1]))
20673 return false;
20674
20675 return true;
20676 }
20677
20678 \f
20679 /* Given a comparison operation, return the bit number in CCR to test. We
20680 know this is a valid comparison.
20681
20682 SCC_P is 1 if this is for an scc. That means that %D will have been
20683 used instead of %C, so the bits will be in different places.
20684
20685 Return -1 if OP isn't a valid comparison for some reason. */
20686
20687 int
20688 ccr_bit (rtx op, int scc_p)
20689 {
20690 enum rtx_code code = GET_CODE (op);
20691 machine_mode cc_mode;
20692 int cc_regnum;
20693 int base_bit;
20694 rtx reg;
20695
20696 if (!COMPARISON_P (op))
20697 return -1;
20698
20699 reg = XEXP (op, 0);
20700
20701 if (!REG_P (reg) || !CR_REGNO_P (REGNO (reg)))
20702 return -1;
20703
20704 cc_mode = GET_MODE (reg);
20705 cc_regnum = REGNO (reg);
20706 base_bit = 4 * (cc_regnum - CR0_REGNO);
20707
20708 validate_condition_mode (code, cc_mode);
20709
20710 /* When generating a sCOND operation, only positive conditions are
20711 allowed. */
20712 if (scc_p)
20713 switch (code)
20714 {
20715 case EQ:
20716 case GT:
20717 case LT:
20718 case UNORDERED:
20719 case GTU:
20720 case LTU:
20721 break;
20722 default:
20723 return -1;
20724 }
20725
20726 switch (code)
20727 {
20728 case NE:
20729 return scc_p ? base_bit + 3 : base_bit + 2;
20730 case EQ:
20731 return base_bit + 2;
20732 case GT: case GTU: case UNLE:
20733 return base_bit + 1;
20734 case LT: case LTU: case UNGE:
20735 return base_bit;
20736 case ORDERED: case UNORDERED:
20737 return base_bit + 3;
20738
20739 case GE: case GEU:
20740 /* If scc, we will have done a cror to put the bit in the
20741 unordered position. So test that bit. For integer, this is ! LT
20742 unless this is an scc insn. */
20743 return scc_p ? base_bit + 3 : base_bit;
20744
20745 case LE: case LEU:
20746 return scc_p ? base_bit + 3 : base_bit + 1;
20747
20748 default:
20749 return -1;
20750 }
20751 }
20752 \f
20753 /* Return the GOT register. */
20754
20755 rtx
20756 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20757 {
20758 /* The second flow pass currently (June 1999) can't update
20759 regs_ever_live without disturbing other parts of the compiler, so
20760 update it here to make the prolog/epilogue code happy. */
20761 if (!can_create_pseudo_p ()
20762 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20763 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20764
20765 crtl->uses_pic_offset_table = 1;
20766
20767 return pic_offset_table_rtx;
20768 }
20769 \f
20770 static rs6000_stack_t stack_info;
20771
20772 /* Function to init struct machine_function.
20773 This will be called, via a pointer variable,
20774 from push_function_context. */
20775
20776 static struct machine_function *
20777 rs6000_init_machine_status (void)
20778 {
20779 stack_info.reload_completed = 0;
20780 return ggc_cleared_alloc<machine_function> ();
20781 }
20782 \f
20783 #define INT_P(X) (CONST_INT_P (X) && GET_MODE (X) == VOIDmode)
20784
20785 /* Write out a function code label. */
20786
20787 void
20788 rs6000_output_function_entry (FILE *file, const char *fname)
20789 {
20790 if (fname[0] != '.')
20791 {
20792 switch (DEFAULT_ABI)
20793 {
20794 default:
20795 gcc_unreachable ();
20796
20797 case ABI_AIX:
20798 if (DOT_SYMBOLS)
20799 putc ('.', file);
20800 else
20801 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20802 break;
20803
20804 case ABI_ELFv2:
20805 case ABI_V4:
20806 case ABI_DARWIN:
20807 break;
20808 }
20809 }
20810
20811 RS6000_OUTPUT_BASENAME (file, fname);
20812 }
20813
20814 /* Print an operand. Recognize special options, documented below. */
20815
20816 #if TARGET_ELF
20817 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20818 only introduced by the linker, when applying the sda21
20819 relocation. */
20820 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20821 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20822 #else
20823 #define SMALL_DATA_RELOC "sda21"
20824 #define SMALL_DATA_REG 0
20825 #endif
20826
20827 void
20828 print_operand (FILE *file, rtx x, int code)
20829 {
20830 int i;
20831 unsigned HOST_WIDE_INT uval;
20832
20833 switch (code)
20834 {
20835 /* %a is output_address. */
20836
20837 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20838 output_operand. */
20839
20840 case 'D':
20841 /* Like 'J' but get to the GT bit only. */
20842 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20843 {
20844 output_operand_lossage ("invalid %%D value");
20845 return;
20846 }
20847
20848 /* Bit 1 is GT bit. */
20849 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20850
20851 /* Add one for shift count in rlinm for scc. */
20852 fprintf (file, "%d", i + 1);
20853 return;
20854
20855 case 'e':
20856 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20857 if (! INT_P (x))
20858 {
20859 output_operand_lossage ("invalid %%e value");
20860 return;
20861 }
20862
20863 uval = INTVAL (x);
20864 if ((uval & 0xffff) == 0 && uval != 0)
20865 putc ('s', file);
20866 return;
20867
20868 case 'E':
20869 /* X is a CR register. Print the number of the EQ bit of the CR */
20870 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20871 output_operand_lossage ("invalid %%E value");
20872 else
20873 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20874 return;
20875
20876 case 'f':
20877 /* X is a CR register. Print the shift count needed to move it
20878 to the high-order four bits. */
20879 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20880 output_operand_lossage ("invalid %%f value");
20881 else
20882 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20883 return;
20884
20885 case 'F':
20886 /* Similar, but print the count for the rotate in the opposite
20887 direction. */
20888 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20889 output_operand_lossage ("invalid %%F value");
20890 else
20891 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20892 return;
20893
20894 case 'G':
20895 /* X is a constant integer. If it is negative, print "m",
20896 otherwise print "z". This is to make an aze or ame insn. */
20897 if (!CONST_INT_P (x))
20898 output_operand_lossage ("invalid %%G value");
20899 else if (INTVAL (x) >= 0)
20900 putc ('z', file);
20901 else
20902 putc ('m', file);
20903 return;
20904
20905 case 'h':
20906 /* If constant, output low-order five bits. Otherwise, write
20907 normally. */
20908 if (INT_P (x))
20909 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20910 else
20911 print_operand (file, x, 0);
20912 return;
20913
20914 case 'H':
20915 /* If constant, output low-order six bits. Otherwise, write
20916 normally. */
20917 if (INT_P (x))
20918 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20919 else
20920 print_operand (file, x, 0);
20921 return;
20922
20923 case 'I':
20924 /* Print `i' if this is a constant, else nothing. */
20925 if (INT_P (x))
20926 putc ('i', file);
20927 return;
20928
20929 case 'j':
20930 /* Write the bit number in CCR for jump. */
20931 i = ccr_bit (x, 0);
20932 if (i == -1)
20933 output_operand_lossage ("invalid %%j code");
20934 else
20935 fprintf (file, "%d", i);
20936 return;
20937
20938 case 'J':
20939 /* Similar, but add one for shift count in rlinm for scc and pass
20940 scc flag to `ccr_bit'. */
20941 i = ccr_bit (x, 1);
20942 if (i == -1)
20943 output_operand_lossage ("invalid %%J code");
20944 else
20945 /* If we want bit 31, write a shift count of zero, not 32. */
20946 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20947 return;
20948
20949 case 'k':
20950 /* X must be a constant. Write the 1's complement of the
20951 constant. */
20952 if (! INT_P (x))
20953 output_operand_lossage ("invalid %%k value");
20954 else
20955 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20956 return;
20957
20958 case 'K':
20959 /* X must be a symbolic constant on ELF. Write an
20960 expression suitable for an 'addi' that adds in the low 16
20961 bits of the MEM. */
20962 if (GET_CODE (x) == CONST)
20963 {
20964 if (GET_CODE (XEXP (x, 0)) != PLUS
20965 || (!SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
20966 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20967 || !CONST_INT_P (XEXP (XEXP (x, 0), 1)))
20968 output_operand_lossage ("invalid %%K value");
20969 }
20970 print_operand_address (file, x);
20971 fputs ("@l", file);
20972 return;
20973
20974 /* %l is output_asm_label. */
20975
20976 case 'L':
20977 /* Write second word of DImode or DFmode reference. Works on register
20978 or non-indexed memory only. */
20979 if (REG_P (x))
20980 fputs (reg_names[REGNO (x) + 1], file);
20981 else if (MEM_P (x))
20982 {
20983 machine_mode mode = GET_MODE (x);
20984 /* Handle possible auto-increment. Since it is pre-increment and
20985 we have already done it, we can just use an offset of word. */
20986 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20987 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20988 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20989 UNITS_PER_WORD));
20990 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20991 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20992 UNITS_PER_WORD));
20993 else
20994 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20995 UNITS_PER_WORD),
20996 0));
20997
20998 if (small_data_operand (x, GET_MODE (x)))
20999 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21000 reg_names[SMALL_DATA_REG]);
21001 }
21002 return;
21003
21004 case 'N': /* Unused */
21005 /* Write the number of elements in the vector times 4. */
21006 if (GET_CODE (x) != PARALLEL)
21007 output_operand_lossage ("invalid %%N value");
21008 else
21009 fprintf (file, "%d", XVECLEN (x, 0) * 4);
21010 return;
21011
21012 case 'O': /* Unused */
21013 /* Similar, but subtract 1 first. */
21014 if (GET_CODE (x) != PARALLEL)
21015 output_operand_lossage ("invalid %%O value");
21016 else
21017 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
21018 return;
21019
21020 case 'p':
21021 /* X is a CONST_INT that is a power of two. Output the logarithm. */
21022 if (! INT_P (x)
21023 || INTVAL (x) < 0
21024 || (i = exact_log2 (INTVAL (x))) < 0)
21025 output_operand_lossage ("invalid %%p value");
21026 else
21027 fprintf (file, "%d", i);
21028 return;
21029
21030 case 'P':
21031 /* The operand must be an indirect memory reference. The result
21032 is the register name. */
21033 if (!MEM_P (x) || !REG_P (XEXP (x, 0))
21034 || REGNO (XEXP (x, 0)) >= 32)
21035 output_operand_lossage ("invalid %%P value");
21036 else
21037 fputs (reg_names[REGNO (XEXP (x, 0))], file);
21038 return;
21039
21040 case 'q':
21041 /* This outputs the logical code corresponding to a boolean
21042 expression. The expression may have one or both operands
21043 negated (if one, only the first one). For condition register
21044 logical operations, it will also treat the negated
21045 CR codes as NOTs, but not handle NOTs of them. */
21046 {
21047 const char *const *t = 0;
21048 const char *s;
21049 enum rtx_code code = GET_CODE (x);
21050 static const char * const tbl[3][3] = {
21051 { "and", "andc", "nor" },
21052 { "or", "orc", "nand" },
21053 { "xor", "eqv", "xor" } };
21054
21055 if (code == AND)
21056 t = tbl[0];
21057 else if (code == IOR)
21058 t = tbl[1];
21059 else if (code == XOR)
21060 t = tbl[2];
21061 else
21062 output_operand_lossage ("invalid %%q value");
21063
21064 if (GET_CODE (XEXP (x, 0)) != NOT)
21065 s = t[0];
21066 else
21067 {
21068 if (GET_CODE (XEXP (x, 1)) == NOT)
21069 s = t[2];
21070 else
21071 s = t[1];
21072 }
21073
21074 fputs (s, file);
21075 }
21076 return;
21077
21078 case 'Q':
21079 if (! TARGET_MFCRF)
21080 return;
21081 fputc (',', file);
21082 /* FALLTHRU */
21083
21084 case 'R':
21085 /* X is a CR register. Print the mask for `mtcrf'. */
21086 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21087 output_operand_lossage ("invalid %%R value");
21088 else
21089 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
21090 return;
21091
21092 case 's':
21093 /* Low 5 bits of 32 - value */
21094 if (! INT_P (x))
21095 output_operand_lossage ("invalid %%s value");
21096 else
21097 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
21098 return;
21099
21100 case 't':
21101 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
21102 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
21103 {
21104 output_operand_lossage ("invalid %%t value");
21105 return;
21106 }
21107
21108 /* Bit 3 is OV bit. */
21109 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
21110
21111 /* If we want bit 31, write a shift count of zero, not 32. */
21112 fprintf (file, "%d", i == 31 ? 0 : i + 1);
21113 return;
21114
21115 case 'T':
21116 /* Print the symbolic name of a branch target register. */
21117 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21118 x = XVECEXP (x, 0, 0);
21119 if (!REG_P (x) || (REGNO (x) != LR_REGNO
21120 && REGNO (x) != CTR_REGNO))
21121 output_operand_lossage ("invalid %%T value");
21122 else if (REGNO (x) == LR_REGNO)
21123 fputs ("lr", file);
21124 else
21125 fputs ("ctr", file);
21126 return;
21127
21128 case 'u':
21129 /* High-order or low-order 16 bits of constant, whichever is non-zero,
21130 for use in unsigned operand. */
21131 if (! INT_P (x))
21132 {
21133 output_operand_lossage ("invalid %%u value");
21134 return;
21135 }
21136
21137 uval = INTVAL (x);
21138 if ((uval & 0xffff) == 0)
21139 uval >>= 16;
21140
21141 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
21142 return;
21143
21144 case 'v':
21145 /* High-order 16 bits of constant for use in signed operand. */
21146 if (! INT_P (x))
21147 output_operand_lossage ("invalid %%v value");
21148 else
21149 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
21150 (INTVAL (x) >> 16) & 0xffff);
21151 return;
21152
21153 case 'U':
21154 /* Print `u' if this has an auto-increment or auto-decrement. */
21155 if (MEM_P (x)
21156 && (GET_CODE (XEXP (x, 0)) == PRE_INC
21157 || GET_CODE (XEXP (x, 0)) == PRE_DEC
21158 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
21159 putc ('u', file);
21160 return;
21161
21162 case 'V':
21163 /* Print the trap code for this operand. */
21164 switch (GET_CODE (x))
21165 {
21166 case EQ:
21167 fputs ("eq", file); /* 4 */
21168 break;
21169 case NE:
21170 fputs ("ne", file); /* 24 */
21171 break;
21172 case LT:
21173 fputs ("lt", file); /* 16 */
21174 break;
21175 case LE:
21176 fputs ("le", file); /* 20 */
21177 break;
21178 case GT:
21179 fputs ("gt", file); /* 8 */
21180 break;
21181 case GE:
21182 fputs ("ge", file); /* 12 */
21183 break;
21184 case LTU:
21185 fputs ("llt", file); /* 2 */
21186 break;
21187 case LEU:
21188 fputs ("lle", file); /* 6 */
21189 break;
21190 case GTU:
21191 fputs ("lgt", file); /* 1 */
21192 break;
21193 case GEU:
21194 fputs ("lge", file); /* 5 */
21195 break;
21196 default:
21197 output_operand_lossage ("invalid %%V value");
21198 }
21199 break;
21200
21201 case 'w':
21202 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
21203 normally. */
21204 if (INT_P (x))
21205 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
21206 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
21207 else
21208 print_operand (file, x, 0);
21209 return;
21210
21211 case 'x':
21212 /* X is a FPR or Altivec register used in a VSX context. */
21213 if (!REG_P (x) || !VSX_REGNO_P (REGNO (x)))
21214 output_operand_lossage ("invalid %%x value");
21215 else
21216 {
21217 int reg = REGNO (x);
21218 int vsx_reg = (FP_REGNO_P (reg)
21219 ? reg - 32
21220 : reg - FIRST_ALTIVEC_REGNO + 32);
21221
21222 #ifdef TARGET_REGNAMES
21223 if (TARGET_REGNAMES)
21224 fprintf (file, "%%vs%d", vsx_reg);
21225 else
21226 #endif
21227 fprintf (file, "%d", vsx_reg);
21228 }
21229 return;
21230
21231 case 'X':
21232 if (MEM_P (x)
21233 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
21234 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
21235 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
21236 putc ('x', file);
21237 return;
21238
21239 case 'Y':
21240 /* Like 'L', for third word of TImode/PTImode */
21241 if (REG_P (x))
21242 fputs (reg_names[REGNO (x) + 2], file);
21243 else if (MEM_P (x))
21244 {
21245 machine_mode mode = GET_MODE (x);
21246 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21247 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21248 output_address (mode, plus_constant (Pmode,
21249 XEXP (XEXP (x, 0), 0), 8));
21250 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21251 output_address (mode, plus_constant (Pmode,
21252 XEXP (XEXP (x, 0), 0), 8));
21253 else
21254 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
21255 if (small_data_operand (x, GET_MODE (x)))
21256 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21257 reg_names[SMALL_DATA_REG]);
21258 }
21259 return;
21260
21261 case 'z':
21262 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21263 x = XVECEXP (x, 0, 1);
21264 /* X is a SYMBOL_REF. Write out the name preceded by a
21265 period and without any trailing data in brackets. Used for function
21266 names. If we are configured for System V (or the embedded ABI) on
21267 the PowerPC, do not emit the period, since those systems do not use
21268 TOCs and the like. */
21269 if (!SYMBOL_REF_P (x))
21270 {
21271 output_operand_lossage ("invalid %%z value");
21272 return;
21273 }
21274
21275 /* For macho, check to see if we need a stub. */
21276 if (TARGET_MACHO)
21277 {
21278 const char *name = XSTR (x, 0);
21279 #if TARGET_MACHO
21280 if (darwin_emit_branch_islands
21281 && MACHOPIC_INDIRECT
21282 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21283 name = machopic_indirection_name (x, /*stub_p=*/true);
21284 #endif
21285 assemble_name (file, name);
21286 }
21287 else if (!DOT_SYMBOLS)
21288 assemble_name (file, XSTR (x, 0));
21289 else
21290 rs6000_output_function_entry (file, XSTR (x, 0));
21291 return;
21292
21293 case 'Z':
21294 /* Like 'L', for last word of TImode/PTImode. */
21295 if (REG_P (x))
21296 fputs (reg_names[REGNO (x) + 3], file);
21297 else if (MEM_P (x))
21298 {
21299 machine_mode mode = GET_MODE (x);
21300 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21301 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21302 output_address (mode, plus_constant (Pmode,
21303 XEXP (XEXP (x, 0), 0), 12));
21304 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21305 output_address (mode, plus_constant (Pmode,
21306 XEXP (XEXP (x, 0), 0), 12));
21307 else
21308 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21309 if (small_data_operand (x, GET_MODE (x)))
21310 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21311 reg_names[SMALL_DATA_REG]);
21312 }
21313 return;
21314
21315 /* Print AltiVec memory operand. */
21316 case 'y':
21317 {
21318 rtx tmp;
21319
21320 gcc_assert (MEM_P (x));
21321
21322 tmp = XEXP (x, 0);
21323
21324 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21325 && GET_CODE (tmp) == AND
21326 && CONST_INT_P (XEXP (tmp, 1))
21327 && INTVAL (XEXP (tmp, 1)) == -16)
21328 tmp = XEXP (tmp, 0);
21329 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21330 && GET_CODE (tmp) == PRE_MODIFY)
21331 tmp = XEXP (tmp, 1);
21332 if (REG_P (tmp))
21333 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21334 else
21335 {
21336 if (GET_CODE (tmp) != PLUS
21337 || !REG_P (XEXP (tmp, 0))
21338 || !REG_P (XEXP (tmp, 1)))
21339 {
21340 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21341 break;
21342 }
21343
21344 if (REGNO (XEXP (tmp, 0)) == 0)
21345 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21346 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21347 else
21348 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21349 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21350 }
21351 break;
21352 }
21353
21354 case 0:
21355 if (REG_P (x))
21356 fprintf (file, "%s", reg_names[REGNO (x)]);
21357 else if (MEM_P (x))
21358 {
21359 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21360 know the width from the mode. */
21361 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21362 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21363 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21364 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21365 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21366 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21367 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21368 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21369 else
21370 output_address (GET_MODE (x), XEXP (x, 0));
21371 }
21372 else if (toc_relative_expr_p (x, false,
21373 &tocrel_base_oac, &tocrel_offset_oac))
21374 /* This hack along with a corresponding hack in
21375 rs6000_output_addr_const_extra arranges to output addends
21376 where the assembler expects to find them. eg.
21377 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21378 without this hack would be output as "x@toc+4". We
21379 want "x+4@toc". */
21380 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21381 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
21382 output_addr_const (file, XVECEXP (x, 0, 0));
21383 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21384 output_addr_const (file, XVECEXP (x, 0, 1));
21385 else
21386 output_addr_const (file, x);
21387 return;
21388
21389 case '&':
21390 if (const char *name = get_some_local_dynamic_name ())
21391 assemble_name (file, name);
21392 else
21393 output_operand_lossage ("'%%&' used without any "
21394 "local dynamic TLS references");
21395 return;
21396
21397 default:
21398 output_operand_lossage ("invalid %%xn code");
21399 }
21400 }
21401 \f
21402 /* Print the address of an operand. */
21403
21404 void
21405 print_operand_address (FILE *file, rtx x)
21406 {
21407 if (REG_P (x))
21408 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21409 else if (SYMBOL_REF_P (x) || GET_CODE (x) == CONST
21410 || GET_CODE (x) == LABEL_REF)
21411 {
21412 output_addr_const (file, x);
21413 if (small_data_operand (x, GET_MODE (x)))
21414 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21415 reg_names[SMALL_DATA_REG]);
21416 else
21417 gcc_assert (!TARGET_TOC);
21418 }
21419 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21420 && REG_P (XEXP (x, 1)))
21421 {
21422 if (REGNO (XEXP (x, 0)) == 0)
21423 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21424 reg_names[ REGNO (XEXP (x, 0)) ]);
21425 else
21426 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21427 reg_names[ REGNO (XEXP (x, 1)) ]);
21428 }
21429 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21430 && CONST_INT_P (XEXP (x, 1)))
21431 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21432 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21433 #if TARGET_MACHO
21434 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21435 && CONSTANT_P (XEXP (x, 1)))
21436 {
21437 fprintf (file, "lo16(");
21438 output_addr_const (file, XEXP (x, 1));
21439 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21440 }
21441 #endif
21442 #if TARGET_ELF
21443 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21444 && CONSTANT_P (XEXP (x, 1)))
21445 {
21446 output_addr_const (file, XEXP (x, 1));
21447 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21448 }
21449 #endif
21450 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21451 {
21452 /* This hack along with a corresponding hack in
21453 rs6000_output_addr_const_extra arranges to output addends
21454 where the assembler expects to find them. eg.
21455 (lo_sum (reg 9)
21456 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21457 without this hack would be output as "x@toc+8@l(9)". We
21458 want "x+8@toc@l(9)". */
21459 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21460 if (GET_CODE (x) == LO_SUM)
21461 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21462 else
21463 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21464 }
21465 else
21466 output_addr_const (file, x);
21467 }
21468 \f
21469 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21470
21471 static bool
21472 rs6000_output_addr_const_extra (FILE *file, rtx x)
21473 {
21474 if (GET_CODE (x) == UNSPEC)
21475 switch (XINT (x, 1))
21476 {
21477 case UNSPEC_TOCREL:
21478 gcc_checking_assert (SYMBOL_REF_P (XVECEXP (x, 0, 0))
21479 && REG_P (XVECEXP (x, 0, 1))
21480 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21481 output_addr_const (file, XVECEXP (x, 0, 0));
21482 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21483 {
21484 if (INTVAL (tocrel_offset_oac) >= 0)
21485 fprintf (file, "+");
21486 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21487 }
21488 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21489 {
21490 putc ('-', file);
21491 assemble_name (file, toc_label_name);
21492 need_toc_init = 1;
21493 }
21494 else if (TARGET_ELF)
21495 fputs ("@toc", file);
21496 return true;
21497
21498 #if TARGET_MACHO
21499 case UNSPEC_MACHOPIC_OFFSET:
21500 output_addr_const (file, XVECEXP (x, 0, 0));
21501 putc ('-', file);
21502 machopic_output_function_base_name (file);
21503 return true;
21504 #endif
21505 }
21506 return false;
21507 }
21508 \f
21509 /* Target hook for assembling integer objects. The PowerPC version has
21510 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21511 is defined. It also needs to handle DI-mode objects on 64-bit
21512 targets. */
21513
21514 static bool
21515 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21516 {
21517 #ifdef RELOCATABLE_NEEDS_FIXUP
21518 /* Special handling for SI values. */
21519 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21520 {
21521 static int recurse = 0;
21522
21523 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21524 the .fixup section. Since the TOC section is already relocated, we
21525 don't need to mark it here. We used to skip the text section, but it
21526 should never be valid for relocated addresses to be placed in the text
21527 section. */
21528 if (DEFAULT_ABI == ABI_V4
21529 && (TARGET_RELOCATABLE || flag_pic > 1)
21530 && in_section != toc_section
21531 && !recurse
21532 && !CONST_SCALAR_INT_P (x)
21533 && CONSTANT_P (x))
21534 {
21535 char buf[256];
21536
21537 recurse = 1;
21538 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21539 fixuplabelno++;
21540 ASM_OUTPUT_LABEL (asm_out_file, buf);
21541 fprintf (asm_out_file, "\t.long\t(");
21542 output_addr_const (asm_out_file, x);
21543 fprintf (asm_out_file, ")@fixup\n");
21544 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21545 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21546 fprintf (asm_out_file, "\t.long\t");
21547 assemble_name (asm_out_file, buf);
21548 fprintf (asm_out_file, "\n\t.previous\n");
21549 recurse = 0;
21550 return true;
21551 }
21552 /* Remove initial .'s to turn a -mcall-aixdesc function
21553 address into the address of the descriptor, not the function
21554 itself. */
21555 else if (SYMBOL_REF_P (x)
21556 && XSTR (x, 0)[0] == '.'
21557 && DEFAULT_ABI == ABI_AIX)
21558 {
21559 const char *name = XSTR (x, 0);
21560 while (*name == '.')
21561 name++;
21562
21563 fprintf (asm_out_file, "\t.long\t%s\n", name);
21564 return true;
21565 }
21566 }
21567 #endif /* RELOCATABLE_NEEDS_FIXUP */
21568 return default_assemble_integer (x, size, aligned_p);
21569 }
21570
21571 /* Return a template string for assembly to emit when making an
21572 external call. FUNOP is the call mem argument operand number. */
21573
21574 static const char *
21575 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21576 {
21577 /* -Wformat-overflow workaround, without which gcc thinks that %u
21578 might produce 10 digits. */
21579 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21580
21581 char arg[12];
21582 arg[0] = 0;
21583 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21584 {
21585 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21586 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21587 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21588 sprintf (arg, "(%%&@tlsld)");
21589 else
21590 gcc_unreachable ();
21591 }
21592
21593 /* The magic 32768 offset here corresponds to the offset of
21594 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21595 char z[11];
21596 sprintf (z, "%%z%u%s", funop,
21597 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21598 ? "+32768" : ""));
21599
21600 static char str[32]; /* 2 spare */
21601 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21602 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21603 sibcall ? "" : "\n\tnop");
21604 else if (DEFAULT_ABI == ABI_V4)
21605 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21606 flag_pic ? "@plt" : "");
21607 #if TARGET_MACHO
21608 /* If/when we remove the mlongcall opt, we can share the AIX/ELGv2 case. */
21609 else if (DEFAULT_ABI == ABI_DARWIN)
21610 {
21611 /* The cookie is in operand func+2. */
21612 gcc_checking_assert (GET_CODE (operands[funop + 2]) == CONST_INT);
21613 int cookie = INTVAL (operands[funop + 2]);
21614 if (cookie & CALL_LONG)
21615 {
21616 tree funname = get_identifier (XSTR (operands[funop], 0));
21617 tree labelname = get_prev_label (funname);
21618 gcc_checking_assert (labelname && !sibcall);
21619
21620 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21621 instruction will reach 'foo', otherwise link as 'bl L42'".
21622 "L42" should be a 'branch island', that will do a far jump to
21623 'foo'. Branch islands are generated in
21624 macho_branch_islands(). */
21625 sprintf (str, "jbsr %%z%u,%.10s", funop,
21626 IDENTIFIER_POINTER (labelname));
21627 }
21628 else
21629 /* Same as AIX or ELFv2, except to keep backwards compat, no nop
21630 after the call. */
21631 sprintf (str, "b%s %s%s", sibcall ? "" : "l", z, arg);
21632 }
21633 #endif
21634 else
21635 gcc_unreachable ();
21636 return str;
21637 }
21638
21639 const char *
21640 rs6000_call_template (rtx *operands, unsigned int funop)
21641 {
21642 return rs6000_call_template_1 (operands, funop, false);
21643 }
21644
21645 const char *
21646 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21647 {
21648 return rs6000_call_template_1 (operands, funop, true);
21649 }
21650
21651 /* As above, for indirect calls. */
21652
21653 static const char *
21654 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21655 bool sibcall)
21656 {
21657 /* -Wformat-overflow workaround, without which gcc thinks that %u
21658 might produce 10 digits. Note that -Wformat-overflow will not
21659 currently warn here for str[], so do not rely on a warning to
21660 ensure str[] is correctly sized. */
21661 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21662
21663 /* Currently, funop is either 0 or 1. The maximum string is always
21664 a !speculate 64-bit __tls_get_addr call.
21665
21666 ABI_AIX:
21667 . 9 ld 2,%3\n\t
21668 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21669 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21670 . 9 crset 2\n\t
21671 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21672 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21673 . 10 beq%T1l-\n\t
21674 . 10 ld 2,%4(1)
21675 .---
21676 .151
21677
21678 ABI_ELFv2:
21679 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21680 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21681 . 9 crset 2\n\t
21682 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21683 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21684 . 10 beq%T1l-\n\t
21685 . 10 ld 2,%3(1)
21686 .---
21687 .142
21688
21689 ABI_V4:
21690 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21691 . 35 .reloc .,R_PPC64_PLTSEQ,%z1+32768\n\t
21692 . 9 crset 2\n\t
21693 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21694 . 36 .reloc .,R_PPC64_PLTCALL,%z1+32768\n\t
21695 . 8 beq%T1l-
21696 .---
21697 .141 */
21698 static char str[160]; /* 8 spare */
21699 char *s = str;
21700 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21701
21702 if (DEFAULT_ABI == ABI_AIX)
21703 s += sprintf (s,
21704 "l%s 2,%%%u\n\t",
21705 ptrload, funop + 2);
21706
21707 /* We don't need the extra code to stop indirect call speculation if
21708 calling via LR. */
21709 bool speculate = (TARGET_MACHO
21710 || rs6000_speculate_indirect_jumps
21711 || (REG_P (operands[funop])
21712 && REGNO (operands[funop]) == LR_REGNO));
21713
21714 if (TARGET_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21715 {
21716 const char *rel64 = TARGET_64BIT ? "64" : "";
21717 char tls[29];
21718 tls[0] = 0;
21719 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21720 {
21721 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21722 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21723 rel64, funop + 1);
21724 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21725 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21726 rel64);
21727 else
21728 gcc_unreachable ();
21729 }
21730
21731 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21732 && flag_pic == 2 ? "+32768" : "");
21733 if (!speculate)
21734 {
21735 s += sprintf (s,
21736 "%s.reloc .,R_PPC%s_PLTSEQ,%%z%u%s\n\t",
21737 tls, rel64, funop, addend);
21738 s += sprintf (s, "crset 2\n\t");
21739 }
21740 s += sprintf (s,
21741 "%s.reloc .,R_PPC%s_PLTCALL,%%z%u%s\n\t",
21742 tls, rel64, funop, addend);
21743 }
21744 else if (!speculate)
21745 s += sprintf (s, "crset 2\n\t");
21746
21747 if (DEFAULT_ABI == ABI_AIX)
21748 {
21749 if (speculate)
21750 sprintf (s,
21751 "b%%T%ul\n\t"
21752 "l%s 2,%%%u(1)",
21753 funop, ptrload, funop + 3);
21754 else
21755 sprintf (s,
21756 "beq%%T%ul-\n\t"
21757 "l%s 2,%%%u(1)",
21758 funop, ptrload, funop + 3);
21759 }
21760 else if (DEFAULT_ABI == ABI_ELFv2)
21761 {
21762 if (speculate)
21763 sprintf (s,
21764 "b%%T%ul\n\t"
21765 "l%s 2,%%%u(1)",
21766 funop, ptrload, funop + 2);
21767 else
21768 sprintf (s,
21769 "beq%%T%ul-\n\t"
21770 "l%s 2,%%%u(1)",
21771 funop, ptrload, funop + 2);
21772 }
21773 else
21774 {
21775 if (speculate)
21776 sprintf (s,
21777 "b%%T%u%s",
21778 funop, sibcall ? "" : "l");
21779 else
21780 sprintf (s,
21781 "beq%%T%u%s-%s",
21782 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21783 }
21784 return str;
21785 }
21786
21787 const char *
21788 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21789 {
21790 return rs6000_indirect_call_template_1 (operands, funop, false);
21791 }
21792
21793 const char *
21794 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21795 {
21796 return rs6000_indirect_call_template_1 (operands, funop, true);
21797 }
21798
21799 #if HAVE_AS_PLTSEQ
21800 /* Output indirect call insns.
21801 WHICH is 0 for tocsave, 1 for plt16_ha, 2 for plt16_lo, 3 for mtctr. */
21802 const char *
21803 rs6000_pltseq_template (rtx *operands, int which)
21804 {
21805 const char *rel64 = TARGET_64BIT ? "64" : "";
21806 char tls[28];
21807 tls[0] = 0;
21808 if (TARGET_TLS_MARKERS && GET_CODE (operands[3]) == UNSPEC)
21809 {
21810 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21811 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%3\n\t",
21812 rel64);
21813 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21814 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21815 rel64);
21816 else
21817 gcc_unreachable ();
21818 }
21819
21820 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21821 static char str[96]; /* 15 spare */
21822 const char *off = WORDS_BIG_ENDIAN ? "+2" : "";
21823 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21824 && flag_pic == 2 ? "+32768" : "");
21825 switch (which)
21826 {
21827 case 0:
21828 sprintf (str,
21829 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2\n\t"
21830 "st%s",
21831 tls, rel64, TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)");
21832 break;
21833 case 1:
21834 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21835 sprintf (str,
21836 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2\n\t"
21837 "lis %%0,0",
21838 tls, off, rel64);
21839 else
21840 sprintf (str,
21841 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2%s\n\t"
21842 "addis %%0,%%1,0",
21843 tls, off, rel64, addend);
21844 break;
21845 case 2:
21846 sprintf (str,
21847 "%s.reloc .%s,R_PPC%s_PLT16_LO%s,%%z2%s\n\t"
21848 "l%s %%0,0(%%1)",
21849 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend,
21850 TARGET_64BIT ? "d" : "wz");
21851 break;
21852 case 3:
21853 sprintf (str,
21854 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2%s\n\t"
21855 "mtctr %%1",
21856 tls, rel64, addend);
21857 break;
21858 default:
21859 gcc_unreachable ();
21860 }
21861 return str;
21862 }
21863 #endif
21864
21865 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21866 /* Emit an assembler directive to set symbol visibility for DECL to
21867 VISIBILITY_TYPE. */
21868
21869 static void
21870 rs6000_assemble_visibility (tree decl, int vis)
21871 {
21872 if (TARGET_XCOFF)
21873 return;
21874
21875 /* Functions need to have their entry point symbol visibility set as
21876 well as their descriptor symbol visibility. */
21877 if (DEFAULT_ABI == ABI_AIX
21878 && DOT_SYMBOLS
21879 && TREE_CODE (decl) == FUNCTION_DECL)
21880 {
21881 static const char * const visibility_types[] = {
21882 NULL, "protected", "hidden", "internal"
21883 };
21884
21885 const char *name, *type;
21886
21887 name = ((* targetm.strip_name_encoding)
21888 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21889 type = visibility_types[vis];
21890
21891 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21892 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21893 }
21894 else
21895 default_assemble_visibility (decl, vis);
21896 }
21897 #endif
21898 \f
21899 enum rtx_code
21900 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21901 {
21902 /* Reversal of FP compares takes care -- an ordered compare
21903 becomes an unordered compare and vice versa. */
21904 if (mode == CCFPmode
21905 && (!flag_finite_math_only
21906 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21907 || code == UNEQ || code == LTGT))
21908 return reverse_condition_maybe_unordered (code);
21909 else
21910 return reverse_condition (code);
21911 }
21912
21913 /* Generate a compare for CODE. Return a brand-new rtx that
21914 represents the result of the compare. */
21915
21916 static rtx
21917 rs6000_generate_compare (rtx cmp, machine_mode mode)
21918 {
21919 machine_mode comp_mode;
21920 rtx compare_result;
21921 enum rtx_code code = GET_CODE (cmp);
21922 rtx op0 = XEXP (cmp, 0);
21923 rtx op1 = XEXP (cmp, 1);
21924
21925 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21926 comp_mode = CCmode;
21927 else if (FLOAT_MODE_P (mode))
21928 comp_mode = CCFPmode;
21929 else if (code == GTU || code == LTU
21930 || code == GEU || code == LEU)
21931 comp_mode = CCUNSmode;
21932 else if ((code == EQ || code == NE)
21933 && unsigned_reg_p (op0)
21934 && (unsigned_reg_p (op1)
21935 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21936 /* These are unsigned values, perhaps there will be a later
21937 ordering compare that can be shared with this one. */
21938 comp_mode = CCUNSmode;
21939 else
21940 comp_mode = CCmode;
21941
21942 /* If we have an unsigned compare, make sure we don't have a signed value as
21943 an immediate. */
21944 if (comp_mode == CCUNSmode && CONST_INT_P (op1)
21945 && INTVAL (op1) < 0)
21946 {
21947 op0 = copy_rtx_if_shared (op0);
21948 op1 = force_reg (GET_MODE (op0), op1);
21949 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21950 }
21951
21952 /* First, the compare. */
21953 compare_result = gen_reg_rtx (comp_mode);
21954
21955 /* IEEE 128-bit support in VSX registers when we do not have hardware
21956 support. */
21957 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21958 {
21959 rtx libfunc = NULL_RTX;
21960 bool check_nan = false;
21961 rtx dest;
21962
21963 switch (code)
21964 {
21965 case EQ:
21966 case NE:
21967 libfunc = optab_libfunc (eq_optab, mode);
21968 break;
21969
21970 case GT:
21971 case GE:
21972 libfunc = optab_libfunc (ge_optab, mode);
21973 break;
21974
21975 case LT:
21976 case LE:
21977 libfunc = optab_libfunc (le_optab, mode);
21978 break;
21979
21980 case UNORDERED:
21981 case ORDERED:
21982 libfunc = optab_libfunc (unord_optab, mode);
21983 code = (code == UNORDERED) ? NE : EQ;
21984 break;
21985
21986 case UNGE:
21987 case UNGT:
21988 check_nan = true;
21989 libfunc = optab_libfunc (ge_optab, mode);
21990 code = (code == UNGE) ? GE : GT;
21991 break;
21992
21993 case UNLE:
21994 case UNLT:
21995 check_nan = true;
21996 libfunc = optab_libfunc (le_optab, mode);
21997 code = (code == UNLE) ? LE : LT;
21998 break;
21999
22000 case UNEQ:
22001 case LTGT:
22002 check_nan = true;
22003 libfunc = optab_libfunc (eq_optab, mode);
22004 code = (code = UNEQ) ? EQ : NE;
22005 break;
22006
22007 default:
22008 gcc_unreachable ();
22009 }
22010
22011 gcc_assert (libfunc);
22012
22013 if (!check_nan)
22014 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22015 SImode, op0, mode, op1, mode);
22016
22017 /* The library signals an exception for signalling NaNs, so we need to
22018 handle isgreater, etc. by first checking isordered. */
22019 else
22020 {
22021 rtx ne_rtx, normal_dest, unord_dest;
22022 rtx unord_func = optab_libfunc (unord_optab, mode);
22023 rtx join_label = gen_label_rtx ();
22024 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
22025 rtx unord_cmp = gen_reg_rtx (comp_mode);
22026
22027
22028 /* Test for either value being a NaN. */
22029 gcc_assert (unord_func);
22030 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
22031 SImode, op0, mode, op1, mode);
22032
22033 /* Set value (0) if either value is a NaN, and jump to the join
22034 label. */
22035 dest = gen_reg_rtx (SImode);
22036 emit_move_insn (dest, const1_rtx);
22037 emit_insn (gen_rtx_SET (unord_cmp,
22038 gen_rtx_COMPARE (comp_mode, unord_dest,
22039 const0_rtx)));
22040
22041 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
22042 emit_jump_insn (gen_rtx_SET (pc_rtx,
22043 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
22044 join_ref,
22045 pc_rtx)));
22046
22047 /* Do the normal comparison, knowing that the values are not
22048 NaNs. */
22049 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
22050 SImode, op0, mode, op1, mode);
22051
22052 emit_insn (gen_cstoresi4 (dest,
22053 gen_rtx_fmt_ee (code, SImode, normal_dest,
22054 const0_rtx),
22055 normal_dest, const0_rtx));
22056
22057 /* Join NaN and non-Nan paths. Compare dest against 0. */
22058 emit_label (join_label);
22059 code = NE;
22060 }
22061
22062 emit_insn (gen_rtx_SET (compare_result,
22063 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
22064 }
22065
22066 else
22067 {
22068 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
22069 CLOBBERs to match cmptf_internal2 pattern. */
22070 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
22071 && FLOAT128_IBM_P (GET_MODE (op0))
22072 && TARGET_HARD_FLOAT)
22073 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22074 gen_rtvec (10,
22075 gen_rtx_SET (compare_result,
22076 gen_rtx_COMPARE (comp_mode, op0, op1)),
22077 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22078 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22079 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22080 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22081 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22082 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22083 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22084 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
22085 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
22086 else if (GET_CODE (op1) == UNSPEC
22087 && XINT (op1, 1) == UNSPEC_SP_TEST)
22088 {
22089 rtx op1b = XVECEXP (op1, 0, 0);
22090 comp_mode = CCEQmode;
22091 compare_result = gen_reg_rtx (CCEQmode);
22092 if (TARGET_64BIT)
22093 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
22094 else
22095 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
22096 }
22097 else
22098 emit_insn (gen_rtx_SET (compare_result,
22099 gen_rtx_COMPARE (comp_mode, op0, op1)));
22100 }
22101
22102 /* Some kinds of FP comparisons need an OR operation;
22103 under flag_finite_math_only we don't bother. */
22104 if (FLOAT_MODE_P (mode)
22105 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
22106 && !flag_finite_math_only
22107 && (code == LE || code == GE
22108 || code == UNEQ || code == LTGT
22109 || code == UNGT || code == UNLT))
22110 {
22111 enum rtx_code or1, or2;
22112 rtx or1_rtx, or2_rtx, compare2_rtx;
22113 rtx or_result = gen_reg_rtx (CCEQmode);
22114
22115 switch (code)
22116 {
22117 case LE: or1 = LT; or2 = EQ; break;
22118 case GE: or1 = GT; or2 = EQ; break;
22119 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
22120 case LTGT: or1 = LT; or2 = GT; break;
22121 case UNGT: or1 = UNORDERED; or2 = GT; break;
22122 case UNLT: or1 = UNORDERED; or2 = LT; break;
22123 default: gcc_unreachable ();
22124 }
22125 validate_condition_mode (or1, comp_mode);
22126 validate_condition_mode (or2, comp_mode);
22127 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
22128 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
22129 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
22130 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
22131 const_true_rtx);
22132 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
22133
22134 compare_result = or_result;
22135 code = EQ;
22136 }
22137
22138 validate_condition_mode (code, GET_MODE (compare_result));
22139
22140 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
22141 }
22142
22143 \f
22144 /* Return the diagnostic message string if the binary operation OP is
22145 not permitted on TYPE1 and TYPE2, NULL otherwise. */
22146
22147 static const char*
22148 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
22149 const_tree type1,
22150 const_tree type2)
22151 {
22152 machine_mode mode1 = TYPE_MODE (type1);
22153 machine_mode mode2 = TYPE_MODE (type2);
22154
22155 /* For complex modes, use the inner type. */
22156 if (COMPLEX_MODE_P (mode1))
22157 mode1 = GET_MODE_INNER (mode1);
22158
22159 if (COMPLEX_MODE_P (mode2))
22160 mode2 = GET_MODE_INNER (mode2);
22161
22162 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
22163 double to intermix unless -mfloat128-convert. */
22164 if (mode1 == mode2)
22165 return NULL;
22166
22167 if (!TARGET_FLOAT128_CVT)
22168 {
22169 if ((mode1 == KFmode && mode2 == IFmode)
22170 || (mode1 == IFmode && mode2 == KFmode))
22171 return N_("__float128 and __ibm128 cannot be used in the same "
22172 "expression");
22173
22174 if (TARGET_IEEEQUAD
22175 && ((mode1 == IFmode && mode2 == TFmode)
22176 || (mode1 == TFmode && mode2 == IFmode)))
22177 return N_("__ibm128 and long double cannot be used in the same "
22178 "expression");
22179
22180 if (!TARGET_IEEEQUAD
22181 && ((mode1 == KFmode && mode2 == TFmode)
22182 || (mode1 == TFmode && mode2 == KFmode)))
22183 return N_("__float128 and long double cannot be used in the same "
22184 "expression");
22185 }
22186
22187 return NULL;
22188 }
22189
22190 \f
22191 /* Expand floating point conversion to/from __float128 and __ibm128. */
22192
22193 void
22194 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
22195 {
22196 machine_mode dest_mode = GET_MODE (dest);
22197 machine_mode src_mode = GET_MODE (src);
22198 convert_optab cvt = unknown_optab;
22199 bool do_move = false;
22200 rtx libfunc = NULL_RTX;
22201 rtx dest2;
22202 typedef rtx (*rtx_2func_t) (rtx, rtx);
22203 rtx_2func_t hw_convert = (rtx_2func_t)0;
22204 size_t kf_or_tf;
22205
22206 struct hw_conv_t {
22207 rtx_2func_t from_df;
22208 rtx_2func_t from_sf;
22209 rtx_2func_t from_si_sign;
22210 rtx_2func_t from_si_uns;
22211 rtx_2func_t from_di_sign;
22212 rtx_2func_t from_di_uns;
22213 rtx_2func_t to_df;
22214 rtx_2func_t to_sf;
22215 rtx_2func_t to_si_sign;
22216 rtx_2func_t to_si_uns;
22217 rtx_2func_t to_di_sign;
22218 rtx_2func_t to_di_uns;
22219 } hw_conversions[2] = {
22220 /* convertions to/from KFmode */
22221 {
22222 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
22223 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
22224 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
22225 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
22226 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
22227 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
22228 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
22229 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
22230 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
22231 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
22232 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
22233 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
22234 },
22235
22236 /* convertions to/from TFmode */
22237 {
22238 gen_extenddftf2_hw, /* TFmode <- DFmode. */
22239 gen_extendsftf2_hw, /* TFmode <- SFmode. */
22240 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
22241 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
22242 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
22243 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
22244 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
22245 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
22246 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
22247 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
22248 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
22249 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
22250 },
22251 };
22252
22253 if (dest_mode == src_mode)
22254 gcc_unreachable ();
22255
22256 /* Eliminate memory operations. */
22257 if (MEM_P (src))
22258 src = force_reg (src_mode, src);
22259
22260 if (MEM_P (dest))
22261 {
22262 rtx tmp = gen_reg_rtx (dest_mode);
22263 rs6000_expand_float128_convert (tmp, src, unsigned_p);
22264 rs6000_emit_move (dest, tmp, dest_mode);
22265 return;
22266 }
22267
22268 /* Convert to IEEE 128-bit floating point. */
22269 if (FLOAT128_IEEE_P (dest_mode))
22270 {
22271 if (dest_mode == KFmode)
22272 kf_or_tf = 0;
22273 else if (dest_mode == TFmode)
22274 kf_or_tf = 1;
22275 else
22276 gcc_unreachable ();
22277
22278 switch (src_mode)
22279 {
22280 case E_DFmode:
22281 cvt = sext_optab;
22282 hw_convert = hw_conversions[kf_or_tf].from_df;
22283 break;
22284
22285 case E_SFmode:
22286 cvt = sext_optab;
22287 hw_convert = hw_conversions[kf_or_tf].from_sf;
22288 break;
22289
22290 case E_KFmode:
22291 case E_IFmode:
22292 case E_TFmode:
22293 if (FLOAT128_IBM_P (src_mode))
22294 cvt = sext_optab;
22295 else
22296 do_move = true;
22297 break;
22298
22299 case E_SImode:
22300 if (unsigned_p)
22301 {
22302 cvt = ufloat_optab;
22303 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22304 }
22305 else
22306 {
22307 cvt = sfloat_optab;
22308 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22309 }
22310 break;
22311
22312 case E_DImode:
22313 if (unsigned_p)
22314 {
22315 cvt = ufloat_optab;
22316 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22317 }
22318 else
22319 {
22320 cvt = sfloat_optab;
22321 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22322 }
22323 break;
22324
22325 default:
22326 gcc_unreachable ();
22327 }
22328 }
22329
22330 /* Convert from IEEE 128-bit floating point. */
22331 else if (FLOAT128_IEEE_P (src_mode))
22332 {
22333 if (src_mode == KFmode)
22334 kf_or_tf = 0;
22335 else if (src_mode == TFmode)
22336 kf_or_tf = 1;
22337 else
22338 gcc_unreachable ();
22339
22340 switch (dest_mode)
22341 {
22342 case E_DFmode:
22343 cvt = trunc_optab;
22344 hw_convert = hw_conversions[kf_or_tf].to_df;
22345 break;
22346
22347 case E_SFmode:
22348 cvt = trunc_optab;
22349 hw_convert = hw_conversions[kf_or_tf].to_sf;
22350 break;
22351
22352 case E_KFmode:
22353 case E_IFmode:
22354 case E_TFmode:
22355 if (FLOAT128_IBM_P (dest_mode))
22356 cvt = trunc_optab;
22357 else
22358 do_move = true;
22359 break;
22360
22361 case E_SImode:
22362 if (unsigned_p)
22363 {
22364 cvt = ufix_optab;
22365 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22366 }
22367 else
22368 {
22369 cvt = sfix_optab;
22370 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22371 }
22372 break;
22373
22374 case E_DImode:
22375 if (unsigned_p)
22376 {
22377 cvt = ufix_optab;
22378 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22379 }
22380 else
22381 {
22382 cvt = sfix_optab;
22383 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22384 }
22385 break;
22386
22387 default:
22388 gcc_unreachable ();
22389 }
22390 }
22391
22392 /* Both IBM format. */
22393 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22394 do_move = true;
22395
22396 else
22397 gcc_unreachable ();
22398
22399 /* Handle conversion between TFmode/KFmode/IFmode. */
22400 if (do_move)
22401 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22402
22403 /* Handle conversion if we have hardware support. */
22404 else if (TARGET_FLOAT128_HW && hw_convert)
22405 emit_insn ((hw_convert) (dest, src));
22406
22407 /* Call an external function to do the conversion. */
22408 else if (cvt != unknown_optab)
22409 {
22410 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22411 gcc_assert (libfunc != NULL_RTX);
22412
22413 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22414 src, src_mode);
22415
22416 gcc_assert (dest2 != NULL_RTX);
22417 if (!rtx_equal_p (dest, dest2))
22418 emit_move_insn (dest, dest2);
22419 }
22420
22421 else
22422 gcc_unreachable ();
22423
22424 return;
22425 }
22426
22427 \f
22428 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22429 can be used as that dest register. Return the dest register. */
22430
22431 rtx
22432 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22433 {
22434 if (op2 == const0_rtx)
22435 return op1;
22436
22437 if (GET_CODE (scratch) == SCRATCH)
22438 scratch = gen_reg_rtx (mode);
22439
22440 if (logical_operand (op2, mode))
22441 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22442 else
22443 emit_insn (gen_rtx_SET (scratch,
22444 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22445
22446 return scratch;
22447 }
22448
22449 void
22450 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22451 {
22452 rtx condition_rtx;
22453 machine_mode op_mode;
22454 enum rtx_code cond_code;
22455 rtx result = operands[0];
22456
22457 condition_rtx = rs6000_generate_compare (operands[1], mode);
22458 cond_code = GET_CODE (condition_rtx);
22459
22460 if (cond_code == NE
22461 || cond_code == GE || cond_code == LE
22462 || cond_code == GEU || cond_code == LEU
22463 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22464 {
22465 rtx not_result = gen_reg_rtx (CCEQmode);
22466 rtx not_op, rev_cond_rtx;
22467 machine_mode cc_mode;
22468
22469 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22470
22471 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22472 SImode, XEXP (condition_rtx, 0), const0_rtx);
22473 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22474 emit_insn (gen_rtx_SET (not_result, not_op));
22475 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22476 }
22477
22478 op_mode = GET_MODE (XEXP (operands[1], 0));
22479 if (op_mode == VOIDmode)
22480 op_mode = GET_MODE (XEXP (operands[1], 1));
22481
22482 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22483 {
22484 PUT_MODE (condition_rtx, DImode);
22485 convert_move (result, condition_rtx, 0);
22486 }
22487 else
22488 {
22489 PUT_MODE (condition_rtx, SImode);
22490 emit_insn (gen_rtx_SET (result, condition_rtx));
22491 }
22492 }
22493
22494 /* Emit a branch of kind CODE to location LOC. */
22495
22496 void
22497 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22498 {
22499 rtx condition_rtx, loc_ref;
22500
22501 condition_rtx = rs6000_generate_compare (operands[0], mode);
22502 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22503 emit_jump_insn (gen_rtx_SET (pc_rtx,
22504 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22505 loc_ref, pc_rtx)));
22506 }
22507
22508 /* Return the string to output a conditional branch to LABEL, which is
22509 the operand template of the label, or NULL if the branch is really a
22510 conditional return.
22511
22512 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22513 condition code register and its mode specifies what kind of
22514 comparison we made.
22515
22516 REVERSED is nonzero if we should reverse the sense of the comparison.
22517
22518 INSN is the insn. */
22519
22520 char *
22521 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22522 {
22523 static char string[64];
22524 enum rtx_code code = GET_CODE (op);
22525 rtx cc_reg = XEXP (op, 0);
22526 machine_mode mode = GET_MODE (cc_reg);
22527 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22528 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22529 int really_reversed = reversed ^ need_longbranch;
22530 char *s = string;
22531 const char *ccode;
22532 const char *pred;
22533 rtx note;
22534
22535 validate_condition_mode (code, mode);
22536
22537 /* Work out which way this really branches. We could use
22538 reverse_condition_maybe_unordered here always but this
22539 makes the resulting assembler clearer. */
22540 if (really_reversed)
22541 {
22542 /* Reversal of FP compares takes care -- an ordered compare
22543 becomes an unordered compare and vice versa. */
22544 if (mode == CCFPmode)
22545 code = reverse_condition_maybe_unordered (code);
22546 else
22547 code = reverse_condition (code);
22548 }
22549
22550 switch (code)
22551 {
22552 /* Not all of these are actually distinct opcodes, but
22553 we distinguish them for clarity of the resulting assembler. */
22554 case NE: case LTGT:
22555 ccode = "ne"; break;
22556 case EQ: case UNEQ:
22557 ccode = "eq"; break;
22558 case GE: case GEU:
22559 ccode = "ge"; break;
22560 case GT: case GTU: case UNGT:
22561 ccode = "gt"; break;
22562 case LE: case LEU:
22563 ccode = "le"; break;
22564 case LT: case LTU: case UNLT:
22565 ccode = "lt"; break;
22566 case UNORDERED: ccode = "un"; break;
22567 case ORDERED: ccode = "nu"; break;
22568 case UNGE: ccode = "nl"; break;
22569 case UNLE: ccode = "ng"; break;
22570 default:
22571 gcc_unreachable ();
22572 }
22573
22574 /* Maybe we have a guess as to how likely the branch is. */
22575 pred = "";
22576 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22577 if (note != NULL_RTX)
22578 {
22579 /* PROB is the difference from 50%. */
22580 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22581 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22582
22583 /* Only hint for highly probable/improbable branches on newer cpus when
22584 we have real profile data, as static prediction overrides processor
22585 dynamic prediction. For older cpus we may as well always hint, but
22586 assume not taken for branches that are very close to 50% as a
22587 mispredicted taken branch is more expensive than a
22588 mispredicted not-taken branch. */
22589 if (rs6000_always_hint
22590 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22591 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22592 && br_prob_note_reliable_p (note)))
22593 {
22594 if (abs (prob) > REG_BR_PROB_BASE / 20
22595 && ((prob > 0) ^ need_longbranch))
22596 pred = "+";
22597 else
22598 pred = "-";
22599 }
22600 }
22601
22602 if (label == NULL)
22603 s += sprintf (s, "b%slr%s ", ccode, pred);
22604 else
22605 s += sprintf (s, "b%s%s ", ccode, pred);
22606
22607 /* We need to escape any '%' characters in the reg_names string.
22608 Assume they'd only be the first character.... */
22609 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22610 *s++ = '%';
22611 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22612
22613 if (label != NULL)
22614 {
22615 /* If the branch distance was too far, we may have to use an
22616 unconditional branch to go the distance. */
22617 if (need_longbranch)
22618 s += sprintf (s, ",$+8\n\tb %s", label);
22619 else
22620 s += sprintf (s, ",%s", label);
22621 }
22622
22623 return string;
22624 }
22625
22626 /* Return insn for VSX or Altivec comparisons. */
22627
22628 static rtx
22629 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22630 {
22631 rtx mask;
22632 machine_mode mode = GET_MODE (op0);
22633
22634 switch (code)
22635 {
22636 default:
22637 break;
22638
22639 case GE:
22640 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22641 return NULL_RTX;
22642 /* FALLTHRU */
22643
22644 case EQ:
22645 case GT:
22646 case GTU:
22647 case ORDERED:
22648 case UNORDERED:
22649 case UNEQ:
22650 case LTGT:
22651 mask = gen_reg_rtx (mode);
22652 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22653 return mask;
22654 }
22655
22656 return NULL_RTX;
22657 }
22658
22659 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22660 DMODE is expected destination mode. This is a recursive function. */
22661
22662 static rtx
22663 rs6000_emit_vector_compare (enum rtx_code rcode,
22664 rtx op0, rtx op1,
22665 machine_mode dmode)
22666 {
22667 rtx mask;
22668 bool swap_operands = false;
22669 bool try_again = false;
22670
22671 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22672 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22673
22674 /* See if the comparison works as is. */
22675 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22676 if (mask)
22677 return mask;
22678
22679 switch (rcode)
22680 {
22681 case LT:
22682 rcode = GT;
22683 swap_operands = true;
22684 try_again = true;
22685 break;
22686 case LTU:
22687 rcode = GTU;
22688 swap_operands = true;
22689 try_again = true;
22690 break;
22691 case NE:
22692 case UNLE:
22693 case UNLT:
22694 case UNGE:
22695 case UNGT:
22696 /* Invert condition and try again.
22697 e.g., A != B becomes ~(A==B). */
22698 {
22699 enum rtx_code rev_code;
22700 enum insn_code nor_code;
22701 rtx mask2;
22702
22703 rev_code = reverse_condition_maybe_unordered (rcode);
22704 if (rev_code == UNKNOWN)
22705 return NULL_RTX;
22706
22707 nor_code = optab_handler (one_cmpl_optab, dmode);
22708 if (nor_code == CODE_FOR_nothing)
22709 return NULL_RTX;
22710
22711 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22712 if (!mask2)
22713 return NULL_RTX;
22714
22715 mask = gen_reg_rtx (dmode);
22716 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22717 return mask;
22718 }
22719 break;
22720 case GE:
22721 case GEU:
22722 case LE:
22723 case LEU:
22724 /* Try GT/GTU/LT/LTU OR EQ */
22725 {
22726 rtx c_rtx, eq_rtx;
22727 enum insn_code ior_code;
22728 enum rtx_code new_code;
22729
22730 switch (rcode)
22731 {
22732 case GE:
22733 new_code = GT;
22734 break;
22735
22736 case GEU:
22737 new_code = GTU;
22738 break;
22739
22740 case LE:
22741 new_code = LT;
22742 break;
22743
22744 case LEU:
22745 new_code = LTU;
22746 break;
22747
22748 default:
22749 gcc_unreachable ();
22750 }
22751
22752 ior_code = optab_handler (ior_optab, dmode);
22753 if (ior_code == CODE_FOR_nothing)
22754 return NULL_RTX;
22755
22756 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22757 if (!c_rtx)
22758 return NULL_RTX;
22759
22760 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22761 if (!eq_rtx)
22762 return NULL_RTX;
22763
22764 mask = gen_reg_rtx (dmode);
22765 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22766 return mask;
22767 }
22768 break;
22769 default:
22770 return NULL_RTX;
22771 }
22772
22773 if (try_again)
22774 {
22775 if (swap_operands)
22776 std::swap (op0, op1);
22777
22778 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22779 if (mask)
22780 return mask;
22781 }
22782
22783 /* You only get two chances. */
22784 return NULL_RTX;
22785 }
22786
22787 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22788 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22789 operands for the relation operation COND. */
22790
22791 int
22792 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22793 rtx cond, rtx cc_op0, rtx cc_op1)
22794 {
22795 machine_mode dest_mode = GET_MODE (dest);
22796 machine_mode mask_mode = GET_MODE (cc_op0);
22797 enum rtx_code rcode = GET_CODE (cond);
22798 machine_mode cc_mode = CCmode;
22799 rtx mask;
22800 rtx cond2;
22801 bool invert_move = false;
22802
22803 if (VECTOR_UNIT_NONE_P (dest_mode))
22804 return 0;
22805
22806 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22807 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22808
22809 switch (rcode)
22810 {
22811 /* Swap operands if we can, and fall back to doing the operation as
22812 specified, and doing a NOR to invert the test. */
22813 case NE:
22814 case UNLE:
22815 case UNLT:
22816 case UNGE:
22817 case UNGT:
22818 /* Invert condition and try again.
22819 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22820 invert_move = true;
22821 rcode = reverse_condition_maybe_unordered (rcode);
22822 if (rcode == UNKNOWN)
22823 return 0;
22824 break;
22825
22826 case GE:
22827 case LE:
22828 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22829 {
22830 /* Invert condition to avoid compound test. */
22831 invert_move = true;
22832 rcode = reverse_condition (rcode);
22833 }
22834 break;
22835
22836 case GTU:
22837 case GEU:
22838 case LTU:
22839 case LEU:
22840 /* Mark unsigned tests with CCUNSmode. */
22841 cc_mode = CCUNSmode;
22842
22843 /* Invert condition to avoid compound test if necessary. */
22844 if (rcode == GEU || rcode == LEU)
22845 {
22846 invert_move = true;
22847 rcode = reverse_condition (rcode);
22848 }
22849 break;
22850
22851 default:
22852 break;
22853 }
22854
22855 /* Get the vector mask for the given relational operations. */
22856 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22857
22858 if (!mask)
22859 return 0;
22860
22861 if (invert_move)
22862 std::swap (op_true, op_false);
22863
22864 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22865 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22866 && (GET_CODE (op_true) == CONST_VECTOR
22867 || GET_CODE (op_false) == CONST_VECTOR))
22868 {
22869 rtx constant_0 = CONST0_RTX (dest_mode);
22870 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22871
22872 if (op_true == constant_m1 && op_false == constant_0)
22873 {
22874 emit_move_insn (dest, mask);
22875 return 1;
22876 }
22877
22878 else if (op_true == constant_0 && op_false == constant_m1)
22879 {
22880 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22881 return 1;
22882 }
22883
22884 /* If we can't use the vector comparison directly, perhaps we can use
22885 the mask for the true or false fields, instead of loading up a
22886 constant. */
22887 if (op_true == constant_m1)
22888 op_true = mask;
22889
22890 if (op_false == constant_0)
22891 op_false = mask;
22892 }
22893
22894 if (!REG_P (op_true) && !SUBREG_P (op_true))
22895 op_true = force_reg (dest_mode, op_true);
22896
22897 if (!REG_P (op_false) && !SUBREG_P (op_false))
22898 op_false = force_reg (dest_mode, op_false);
22899
22900 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22901 CONST0_RTX (dest_mode));
22902 emit_insn (gen_rtx_SET (dest,
22903 gen_rtx_IF_THEN_ELSE (dest_mode,
22904 cond2,
22905 op_true,
22906 op_false)));
22907 return 1;
22908 }
22909
22910 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22911 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22912 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22913 hardware has no such operation. */
22914
22915 static int
22916 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22917 {
22918 enum rtx_code code = GET_CODE (op);
22919 rtx op0 = XEXP (op, 0);
22920 rtx op1 = XEXP (op, 1);
22921 machine_mode compare_mode = GET_MODE (op0);
22922 machine_mode result_mode = GET_MODE (dest);
22923 bool max_p = false;
22924
22925 if (result_mode != compare_mode)
22926 return 0;
22927
22928 if (code == GE || code == GT)
22929 max_p = true;
22930 else if (code == LE || code == LT)
22931 max_p = false;
22932 else
22933 return 0;
22934
22935 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22936 ;
22937
22938 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22939 max_p = !max_p;
22940
22941 else
22942 return 0;
22943
22944 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22945 return 1;
22946 }
22947
22948 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22949 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22950 operands of the last comparison is nonzero/true, FALSE_COND if it is
22951 zero/false. Return 0 if the hardware has no such operation. */
22952
22953 static int
22954 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22955 {
22956 enum rtx_code code = GET_CODE (op);
22957 rtx op0 = XEXP (op, 0);
22958 rtx op1 = XEXP (op, 1);
22959 machine_mode result_mode = GET_MODE (dest);
22960 rtx compare_rtx;
22961 rtx cmove_rtx;
22962 rtx clobber_rtx;
22963
22964 if (!can_create_pseudo_p ())
22965 return 0;
22966
22967 switch (code)
22968 {
22969 case EQ:
22970 case GE:
22971 case GT:
22972 break;
22973
22974 case NE:
22975 case LT:
22976 case LE:
22977 code = swap_condition (code);
22978 std::swap (op0, op1);
22979 break;
22980
22981 default:
22982 return 0;
22983 }
22984
22985 /* Generate: [(parallel [(set (dest)
22986 (if_then_else (op (cmp1) (cmp2))
22987 (true)
22988 (false)))
22989 (clobber (scratch))])]. */
22990
22991 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22992 cmove_rtx = gen_rtx_SET (dest,
22993 gen_rtx_IF_THEN_ELSE (result_mode,
22994 compare_rtx,
22995 true_cond,
22996 false_cond));
22997
22998 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22999 emit_insn (gen_rtx_PARALLEL (VOIDmode,
23000 gen_rtvec (2, cmove_rtx, clobber_rtx)));
23001
23002 return 1;
23003 }
23004
23005 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
23006 operands of the last comparison is nonzero/true, FALSE_COND if it
23007 is zero/false. Return 0 if the hardware has no such operation. */
23008
23009 int
23010 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23011 {
23012 enum rtx_code code = GET_CODE (op);
23013 rtx op0 = XEXP (op, 0);
23014 rtx op1 = XEXP (op, 1);
23015 machine_mode compare_mode = GET_MODE (op0);
23016 machine_mode result_mode = GET_MODE (dest);
23017 rtx temp;
23018 bool is_against_zero;
23019
23020 /* These modes should always match. */
23021 if (GET_MODE (op1) != compare_mode
23022 /* In the isel case however, we can use a compare immediate, so
23023 op1 may be a small constant. */
23024 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
23025 return 0;
23026 if (GET_MODE (true_cond) != result_mode)
23027 return 0;
23028 if (GET_MODE (false_cond) != result_mode)
23029 return 0;
23030
23031 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
23032 if (TARGET_P9_MINMAX
23033 && (compare_mode == SFmode || compare_mode == DFmode)
23034 && (result_mode == SFmode || result_mode == DFmode))
23035 {
23036 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
23037 return 1;
23038
23039 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
23040 return 1;
23041 }
23042
23043 /* Don't allow using floating point comparisons for integer results for
23044 now. */
23045 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
23046 return 0;
23047
23048 /* First, work out if the hardware can do this at all, or
23049 if it's too slow.... */
23050 if (!FLOAT_MODE_P (compare_mode))
23051 {
23052 if (TARGET_ISEL)
23053 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
23054 return 0;
23055 }
23056
23057 is_against_zero = op1 == CONST0_RTX (compare_mode);
23058
23059 /* A floating-point subtract might overflow, underflow, or produce
23060 an inexact result, thus changing the floating-point flags, so it
23061 can't be generated if we care about that. It's safe if one side
23062 of the construct is zero, since then no subtract will be
23063 generated. */
23064 if (SCALAR_FLOAT_MODE_P (compare_mode)
23065 && flag_trapping_math && ! is_against_zero)
23066 return 0;
23067
23068 /* Eliminate half of the comparisons by switching operands, this
23069 makes the remaining code simpler. */
23070 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
23071 || code == LTGT || code == LT || code == UNLE)
23072 {
23073 code = reverse_condition_maybe_unordered (code);
23074 temp = true_cond;
23075 true_cond = false_cond;
23076 false_cond = temp;
23077 }
23078
23079 /* UNEQ and LTGT take four instructions for a comparison with zero,
23080 it'll probably be faster to use a branch here too. */
23081 if (code == UNEQ && HONOR_NANS (compare_mode))
23082 return 0;
23083
23084 /* We're going to try to implement comparisons by performing
23085 a subtract, then comparing against zero. Unfortunately,
23086 Inf - Inf is NaN which is not zero, and so if we don't
23087 know that the operand is finite and the comparison
23088 would treat EQ different to UNORDERED, we can't do it. */
23089 if (HONOR_INFINITIES (compare_mode)
23090 && code != GT && code != UNGE
23091 && (!CONST_DOUBLE_P (op1)
23092 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
23093 /* Constructs of the form (a OP b ? a : b) are safe. */
23094 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
23095 || (! rtx_equal_p (op0, true_cond)
23096 && ! rtx_equal_p (op1, true_cond))))
23097 return 0;
23098
23099 /* At this point we know we can use fsel. */
23100
23101 /* Reduce the comparison to a comparison against zero. */
23102 if (! is_against_zero)
23103 {
23104 temp = gen_reg_rtx (compare_mode);
23105 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
23106 op0 = temp;
23107 op1 = CONST0_RTX (compare_mode);
23108 }
23109
23110 /* If we don't care about NaNs we can reduce some of the comparisons
23111 down to faster ones. */
23112 if (! HONOR_NANS (compare_mode))
23113 switch (code)
23114 {
23115 case GT:
23116 code = LE;
23117 temp = true_cond;
23118 true_cond = false_cond;
23119 false_cond = temp;
23120 break;
23121 case UNGE:
23122 code = GE;
23123 break;
23124 case UNEQ:
23125 code = EQ;
23126 break;
23127 default:
23128 break;
23129 }
23130
23131 /* Now, reduce everything down to a GE. */
23132 switch (code)
23133 {
23134 case GE:
23135 break;
23136
23137 case LE:
23138 temp = gen_reg_rtx (compare_mode);
23139 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23140 op0 = temp;
23141 break;
23142
23143 case ORDERED:
23144 temp = gen_reg_rtx (compare_mode);
23145 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
23146 op0 = temp;
23147 break;
23148
23149 case EQ:
23150 temp = gen_reg_rtx (compare_mode);
23151 emit_insn (gen_rtx_SET (temp,
23152 gen_rtx_NEG (compare_mode,
23153 gen_rtx_ABS (compare_mode, op0))));
23154 op0 = temp;
23155 break;
23156
23157 case UNGE:
23158 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
23159 temp = gen_reg_rtx (result_mode);
23160 emit_insn (gen_rtx_SET (temp,
23161 gen_rtx_IF_THEN_ELSE (result_mode,
23162 gen_rtx_GE (VOIDmode,
23163 op0, op1),
23164 true_cond, false_cond)));
23165 false_cond = true_cond;
23166 true_cond = temp;
23167
23168 temp = gen_reg_rtx (compare_mode);
23169 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23170 op0 = temp;
23171 break;
23172
23173 case GT:
23174 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
23175 temp = gen_reg_rtx (result_mode);
23176 emit_insn (gen_rtx_SET (temp,
23177 gen_rtx_IF_THEN_ELSE (result_mode,
23178 gen_rtx_GE (VOIDmode,
23179 op0, op1),
23180 true_cond, false_cond)));
23181 true_cond = false_cond;
23182 false_cond = temp;
23183
23184 temp = gen_reg_rtx (compare_mode);
23185 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
23186 op0 = temp;
23187 break;
23188
23189 default:
23190 gcc_unreachable ();
23191 }
23192
23193 emit_insn (gen_rtx_SET (dest,
23194 gen_rtx_IF_THEN_ELSE (result_mode,
23195 gen_rtx_GE (VOIDmode,
23196 op0, op1),
23197 true_cond, false_cond)));
23198 return 1;
23199 }
23200
23201 /* Same as above, but for ints (isel). */
23202
23203 int
23204 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
23205 {
23206 rtx condition_rtx, cr;
23207 machine_mode mode = GET_MODE (dest);
23208 enum rtx_code cond_code;
23209 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
23210 bool signedp;
23211
23212 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
23213 return 0;
23214
23215 /* We still have to do the compare, because isel doesn't do a
23216 compare, it just looks at the CRx bits set by a previous compare
23217 instruction. */
23218 condition_rtx = rs6000_generate_compare (op, mode);
23219 cond_code = GET_CODE (condition_rtx);
23220 cr = XEXP (condition_rtx, 0);
23221 signedp = GET_MODE (cr) == CCmode;
23222
23223 isel_func = (mode == SImode
23224 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
23225 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
23226
23227 switch (cond_code)
23228 {
23229 case LT: case GT: case LTU: case GTU: case EQ:
23230 /* isel handles these directly. */
23231 break;
23232
23233 default:
23234 /* We need to swap the sense of the comparison. */
23235 {
23236 std::swap (false_cond, true_cond);
23237 PUT_CODE (condition_rtx, reverse_condition (cond_code));
23238 }
23239 break;
23240 }
23241
23242 false_cond = force_reg (mode, false_cond);
23243 if (true_cond != const0_rtx)
23244 true_cond = force_reg (mode, true_cond);
23245
23246 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
23247
23248 return 1;
23249 }
23250
23251 void
23252 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
23253 {
23254 machine_mode mode = GET_MODE (op0);
23255 enum rtx_code c;
23256 rtx target;
23257
23258 /* VSX/altivec have direct min/max insns. */
23259 if ((code == SMAX || code == SMIN)
23260 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
23261 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
23262 {
23263 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
23264 return;
23265 }
23266
23267 if (code == SMAX || code == SMIN)
23268 c = GE;
23269 else
23270 c = GEU;
23271
23272 if (code == SMAX || code == UMAX)
23273 target = emit_conditional_move (dest, c, op0, op1, mode,
23274 op0, op1, mode, 0);
23275 else
23276 target = emit_conditional_move (dest, c, op0, op1, mode,
23277 op1, op0, mode, 0);
23278 gcc_assert (target);
23279 if (target != dest)
23280 emit_move_insn (dest, target);
23281 }
23282
23283 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23284 COND is true. Mark the jump as unlikely to be taken. */
23285
23286 static void
23287 emit_unlikely_jump (rtx cond, rtx label)
23288 {
23289 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23290 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23291 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23292 }
23293
23294 /* A subroutine of the atomic operation splitters. Emit a load-locked
23295 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23296 the zero_extend operation. */
23297
23298 static void
23299 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23300 {
23301 rtx (*fn) (rtx, rtx) = NULL;
23302
23303 switch (mode)
23304 {
23305 case E_QImode:
23306 fn = gen_load_lockedqi;
23307 break;
23308 case E_HImode:
23309 fn = gen_load_lockedhi;
23310 break;
23311 case E_SImode:
23312 if (GET_MODE (mem) == QImode)
23313 fn = gen_load_lockedqi_si;
23314 else if (GET_MODE (mem) == HImode)
23315 fn = gen_load_lockedhi_si;
23316 else
23317 fn = gen_load_lockedsi;
23318 break;
23319 case E_DImode:
23320 fn = gen_load_lockeddi;
23321 break;
23322 case E_TImode:
23323 fn = gen_load_lockedti;
23324 break;
23325 default:
23326 gcc_unreachable ();
23327 }
23328 emit_insn (fn (reg, mem));
23329 }
23330
23331 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23332 instruction in MODE. */
23333
23334 static void
23335 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23336 {
23337 rtx (*fn) (rtx, rtx, rtx) = NULL;
23338
23339 switch (mode)
23340 {
23341 case E_QImode:
23342 fn = gen_store_conditionalqi;
23343 break;
23344 case E_HImode:
23345 fn = gen_store_conditionalhi;
23346 break;
23347 case E_SImode:
23348 fn = gen_store_conditionalsi;
23349 break;
23350 case E_DImode:
23351 fn = gen_store_conditionaldi;
23352 break;
23353 case E_TImode:
23354 fn = gen_store_conditionalti;
23355 break;
23356 default:
23357 gcc_unreachable ();
23358 }
23359
23360 /* Emit sync before stwcx. to address PPC405 Erratum. */
23361 if (PPC405_ERRATUM77)
23362 emit_insn (gen_hwsync ());
23363
23364 emit_insn (fn (res, mem, val));
23365 }
23366
23367 /* Expand barriers before and after a load_locked/store_cond sequence. */
23368
23369 static rtx
23370 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23371 {
23372 rtx addr = XEXP (mem, 0);
23373
23374 if (!legitimate_indirect_address_p (addr, reload_completed)
23375 && !legitimate_indexed_address_p (addr, reload_completed))
23376 {
23377 addr = force_reg (Pmode, addr);
23378 mem = replace_equiv_address_nv (mem, addr);
23379 }
23380
23381 switch (model)
23382 {
23383 case MEMMODEL_RELAXED:
23384 case MEMMODEL_CONSUME:
23385 case MEMMODEL_ACQUIRE:
23386 break;
23387 case MEMMODEL_RELEASE:
23388 case MEMMODEL_ACQ_REL:
23389 emit_insn (gen_lwsync ());
23390 break;
23391 case MEMMODEL_SEQ_CST:
23392 emit_insn (gen_hwsync ());
23393 break;
23394 default:
23395 gcc_unreachable ();
23396 }
23397 return mem;
23398 }
23399
23400 static void
23401 rs6000_post_atomic_barrier (enum memmodel model)
23402 {
23403 switch (model)
23404 {
23405 case MEMMODEL_RELAXED:
23406 case MEMMODEL_CONSUME:
23407 case MEMMODEL_RELEASE:
23408 break;
23409 case MEMMODEL_ACQUIRE:
23410 case MEMMODEL_ACQ_REL:
23411 case MEMMODEL_SEQ_CST:
23412 emit_insn (gen_isync ());
23413 break;
23414 default:
23415 gcc_unreachable ();
23416 }
23417 }
23418
23419 /* A subroutine of the various atomic expanders. For sub-word operations,
23420 we must adjust things to operate on SImode. Given the original MEM,
23421 return a new aligned memory. Also build and return the quantities by
23422 which to shift and mask. */
23423
23424 static rtx
23425 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23426 {
23427 rtx addr, align, shift, mask, mem;
23428 HOST_WIDE_INT shift_mask;
23429 machine_mode mode = GET_MODE (orig_mem);
23430
23431 /* For smaller modes, we have to implement this via SImode. */
23432 shift_mask = (mode == QImode ? 0x18 : 0x10);
23433
23434 addr = XEXP (orig_mem, 0);
23435 addr = force_reg (GET_MODE (addr), addr);
23436
23437 /* Aligned memory containing subword. Generate a new memory. We
23438 do not want any of the existing MEM_ATTR data, as we're now
23439 accessing memory outside the original object. */
23440 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23441 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23442 mem = gen_rtx_MEM (SImode, align);
23443 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23444 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23445 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23446
23447 /* Shift amount for subword relative to aligned word. */
23448 shift = gen_reg_rtx (SImode);
23449 addr = gen_lowpart (SImode, addr);
23450 rtx tmp = gen_reg_rtx (SImode);
23451 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23452 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23453 if (BYTES_BIG_ENDIAN)
23454 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23455 shift, 1, OPTAB_LIB_WIDEN);
23456 *pshift = shift;
23457
23458 /* Mask for insertion. */
23459 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23460 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23461 *pmask = mask;
23462
23463 return mem;
23464 }
23465
23466 /* A subroutine of the various atomic expanders. For sub-word operands,
23467 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23468
23469 static rtx
23470 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23471 {
23472 rtx x;
23473
23474 x = gen_reg_rtx (SImode);
23475 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23476 gen_rtx_NOT (SImode, mask),
23477 oldval)));
23478
23479 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23480
23481 return x;
23482 }
23483
23484 /* A subroutine of the various atomic expanders. For sub-word operands,
23485 extract WIDE to NARROW via SHIFT. */
23486
23487 static void
23488 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23489 {
23490 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23491 wide, 1, OPTAB_LIB_WIDEN);
23492 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23493 }
23494
23495 /* Expand an atomic compare and swap operation. */
23496
23497 void
23498 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23499 {
23500 rtx boolval, retval, mem, oldval, newval, cond;
23501 rtx label1, label2, x, mask, shift;
23502 machine_mode mode, orig_mode;
23503 enum memmodel mod_s, mod_f;
23504 bool is_weak;
23505
23506 boolval = operands[0];
23507 retval = operands[1];
23508 mem = operands[2];
23509 oldval = operands[3];
23510 newval = operands[4];
23511 is_weak = (INTVAL (operands[5]) != 0);
23512 mod_s = memmodel_base (INTVAL (operands[6]));
23513 mod_f = memmodel_base (INTVAL (operands[7]));
23514 orig_mode = mode = GET_MODE (mem);
23515
23516 mask = shift = NULL_RTX;
23517 if (mode == QImode || mode == HImode)
23518 {
23519 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23520 lwarx and shift/mask operations. With power8, we need to do the
23521 comparison in SImode, but the store is still done in QI/HImode. */
23522 oldval = convert_modes (SImode, mode, oldval, 1);
23523
23524 if (!TARGET_SYNC_HI_QI)
23525 {
23526 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23527
23528 /* Shift and mask OLDVAL into position with the word. */
23529 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23530 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23531
23532 /* Shift and mask NEWVAL into position within the word. */
23533 newval = convert_modes (SImode, mode, newval, 1);
23534 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23535 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23536 }
23537
23538 /* Prepare to adjust the return value. */
23539 retval = gen_reg_rtx (SImode);
23540 mode = SImode;
23541 }
23542 else if (reg_overlap_mentioned_p (retval, oldval))
23543 oldval = copy_to_reg (oldval);
23544
23545 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23546 oldval = copy_to_mode_reg (mode, oldval);
23547
23548 if (reg_overlap_mentioned_p (retval, newval))
23549 newval = copy_to_reg (newval);
23550
23551 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23552
23553 label1 = NULL_RTX;
23554 if (!is_weak)
23555 {
23556 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23557 emit_label (XEXP (label1, 0));
23558 }
23559 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23560
23561 emit_load_locked (mode, retval, mem);
23562
23563 x = retval;
23564 if (mask)
23565 x = expand_simple_binop (SImode, AND, retval, mask,
23566 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23567
23568 cond = gen_reg_rtx (CCmode);
23569 /* If we have TImode, synthesize a comparison. */
23570 if (mode != TImode)
23571 x = gen_rtx_COMPARE (CCmode, x, oldval);
23572 else
23573 {
23574 rtx xor1_result = gen_reg_rtx (DImode);
23575 rtx xor2_result = gen_reg_rtx (DImode);
23576 rtx or_result = gen_reg_rtx (DImode);
23577 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23578 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23579 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23580 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23581
23582 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23583 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23584 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23585 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23586 }
23587
23588 emit_insn (gen_rtx_SET (cond, x));
23589
23590 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23591 emit_unlikely_jump (x, label2);
23592
23593 x = newval;
23594 if (mask)
23595 x = rs6000_mask_atomic_subword (retval, newval, mask);
23596
23597 emit_store_conditional (orig_mode, cond, mem, x);
23598
23599 if (!is_weak)
23600 {
23601 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23602 emit_unlikely_jump (x, label1);
23603 }
23604
23605 if (!is_mm_relaxed (mod_f))
23606 emit_label (XEXP (label2, 0));
23607
23608 rs6000_post_atomic_barrier (mod_s);
23609
23610 if (is_mm_relaxed (mod_f))
23611 emit_label (XEXP (label2, 0));
23612
23613 if (shift)
23614 rs6000_finish_atomic_subword (operands[1], retval, shift);
23615 else if (mode != GET_MODE (operands[1]))
23616 convert_move (operands[1], retval, 1);
23617
23618 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23619 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23620 emit_insn (gen_rtx_SET (boolval, x));
23621 }
23622
23623 /* Expand an atomic exchange operation. */
23624
23625 void
23626 rs6000_expand_atomic_exchange (rtx operands[])
23627 {
23628 rtx retval, mem, val, cond;
23629 machine_mode mode;
23630 enum memmodel model;
23631 rtx label, x, mask, shift;
23632
23633 retval = operands[0];
23634 mem = operands[1];
23635 val = operands[2];
23636 model = memmodel_base (INTVAL (operands[3]));
23637 mode = GET_MODE (mem);
23638
23639 mask = shift = NULL_RTX;
23640 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23641 {
23642 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23643
23644 /* Shift and mask VAL into position with the word. */
23645 val = convert_modes (SImode, mode, val, 1);
23646 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23647 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23648
23649 /* Prepare to adjust the return value. */
23650 retval = gen_reg_rtx (SImode);
23651 mode = SImode;
23652 }
23653
23654 mem = rs6000_pre_atomic_barrier (mem, model);
23655
23656 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23657 emit_label (XEXP (label, 0));
23658
23659 emit_load_locked (mode, retval, mem);
23660
23661 x = val;
23662 if (mask)
23663 x = rs6000_mask_atomic_subword (retval, val, mask);
23664
23665 cond = gen_reg_rtx (CCmode);
23666 emit_store_conditional (mode, cond, mem, x);
23667
23668 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23669 emit_unlikely_jump (x, label);
23670
23671 rs6000_post_atomic_barrier (model);
23672
23673 if (shift)
23674 rs6000_finish_atomic_subword (operands[0], retval, shift);
23675 }
23676
23677 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23678 to perform. MEM is the memory on which to operate. VAL is the second
23679 operand of the binary operator. BEFORE and AFTER are optional locations to
23680 return the value of MEM either before of after the operation. MODEL_RTX
23681 is a CONST_INT containing the memory model to use. */
23682
23683 void
23684 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23685 rtx orig_before, rtx orig_after, rtx model_rtx)
23686 {
23687 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23688 machine_mode mode = GET_MODE (mem);
23689 machine_mode store_mode = mode;
23690 rtx label, x, cond, mask, shift;
23691 rtx before = orig_before, after = orig_after;
23692
23693 mask = shift = NULL_RTX;
23694 /* On power8, we want to use SImode for the operation. On previous systems,
23695 use the operation in a subword and shift/mask to get the proper byte or
23696 halfword. */
23697 if (mode == QImode || mode == HImode)
23698 {
23699 if (TARGET_SYNC_HI_QI)
23700 {
23701 val = convert_modes (SImode, mode, val, 1);
23702
23703 /* Prepare to adjust the return value. */
23704 before = gen_reg_rtx (SImode);
23705 if (after)
23706 after = gen_reg_rtx (SImode);
23707 mode = SImode;
23708 }
23709 else
23710 {
23711 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23712
23713 /* Shift and mask VAL into position with the word. */
23714 val = convert_modes (SImode, mode, val, 1);
23715 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23716 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23717
23718 switch (code)
23719 {
23720 case IOR:
23721 case XOR:
23722 /* We've already zero-extended VAL. That is sufficient to
23723 make certain that it does not affect other bits. */
23724 mask = NULL;
23725 break;
23726
23727 case AND:
23728 /* If we make certain that all of the other bits in VAL are
23729 set, that will be sufficient to not affect other bits. */
23730 x = gen_rtx_NOT (SImode, mask);
23731 x = gen_rtx_IOR (SImode, x, val);
23732 emit_insn (gen_rtx_SET (val, x));
23733 mask = NULL;
23734 break;
23735
23736 case NOT:
23737 case PLUS:
23738 case MINUS:
23739 /* These will all affect bits outside the field and need
23740 adjustment via MASK within the loop. */
23741 break;
23742
23743 default:
23744 gcc_unreachable ();
23745 }
23746
23747 /* Prepare to adjust the return value. */
23748 before = gen_reg_rtx (SImode);
23749 if (after)
23750 after = gen_reg_rtx (SImode);
23751 store_mode = mode = SImode;
23752 }
23753 }
23754
23755 mem = rs6000_pre_atomic_barrier (mem, model);
23756
23757 label = gen_label_rtx ();
23758 emit_label (label);
23759 label = gen_rtx_LABEL_REF (VOIDmode, label);
23760
23761 if (before == NULL_RTX)
23762 before = gen_reg_rtx (mode);
23763
23764 emit_load_locked (mode, before, mem);
23765
23766 if (code == NOT)
23767 {
23768 x = expand_simple_binop (mode, AND, before, val,
23769 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23770 after = expand_simple_unop (mode, NOT, x, after, 1);
23771 }
23772 else
23773 {
23774 after = expand_simple_binop (mode, code, before, val,
23775 after, 1, OPTAB_LIB_WIDEN);
23776 }
23777
23778 x = after;
23779 if (mask)
23780 {
23781 x = expand_simple_binop (SImode, AND, after, mask,
23782 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23783 x = rs6000_mask_atomic_subword (before, x, mask);
23784 }
23785 else if (store_mode != mode)
23786 x = convert_modes (store_mode, mode, x, 1);
23787
23788 cond = gen_reg_rtx (CCmode);
23789 emit_store_conditional (store_mode, cond, mem, x);
23790
23791 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23792 emit_unlikely_jump (x, label);
23793
23794 rs6000_post_atomic_barrier (model);
23795
23796 if (shift)
23797 {
23798 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23799 then do the calcuations in a SImode register. */
23800 if (orig_before)
23801 rs6000_finish_atomic_subword (orig_before, before, shift);
23802 if (orig_after)
23803 rs6000_finish_atomic_subword (orig_after, after, shift);
23804 }
23805 else if (store_mode != mode)
23806 {
23807 /* QImode/HImode on machines with lbarx/lharx where we do the native
23808 operation and then do the calcuations in a SImode register. */
23809 if (orig_before)
23810 convert_move (orig_before, before, 1);
23811 if (orig_after)
23812 convert_move (orig_after, after, 1);
23813 }
23814 else if (orig_after && after != orig_after)
23815 emit_move_insn (orig_after, after);
23816 }
23817
23818 /* Emit instructions to move SRC to DST. Called by splitters for
23819 multi-register moves. It will emit at most one instruction for
23820 each register that is accessed; that is, it won't emit li/lis pairs
23821 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23822 register. */
23823
23824 void
23825 rs6000_split_multireg_move (rtx dst, rtx src)
23826 {
23827 /* The register number of the first register being moved. */
23828 int reg;
23829 /* The mode that is to be moved. */
23830 machine_mode mode;
23831 /* The mode that the move is being done in, and its size. */
23832 machine_mode reg_mode;
23833 int reg_mode_size;
23834 /* The number of registers that will be moved. */
23835 int nregs;
23836
23837 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23838 mode = GET_MODE (dst);
23839 nregs = hard_regno_nregs (reg, mode);
23840 if (FP_REGNO_P (reg))
23841 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23842 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23843 else if (ALTIVEC_REGNO_P (reg))
23844 reg_mode = V16QImode;
23845 else
23846 reg_mode = word_mode;
23847 reg_mode_size = GET_MODE_SIZE (reg_mode);
23848
23849 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23850
23851 /* TDmode residing in FP registers is special, since the ISA requires that
23852 the lower-numbered word of a register pair is always the most significant
23853 word, even in little-endian mode. This does not match the usual subreg
23854 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23855 the appropriate constituent registers "by hand" in little-endian mode.
23856
23857 Note we do not need to check for destructive overlap here since TDmode
23858 can only reside in even/odd register pairs. */
23859 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23860 {
23861 rtx p_src, p_dst;
23862 int i;
23863
23864 for (i = 0; i < nregs; i++)
23865 {
23866 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23867 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23868 else
23869 p_src = simplify_gen_subreg (reg_mode, src, mode,
23870 i * reg_mode_size);
23871
23872 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23873 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23874 else
23875 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23876 i * reg_mode_size);
23877
23878 emit_insn (gen_rtx_SET (p_dst, p_src));
23879 }
23880
23881 return;
23882 }
23883
23884 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23885 {
23886 /* Move register range backwards, if we might have destructive
23887 overlap. */
23888 int i;
23889 for (i = nregs - 1; i >= 0; i--)
23890 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23891 i * reg_mode_size),
23892 simplify_gen_subreg (reg_mode, src, mode,
23893 i * reg_mode_size)));
23894 }
23895 else
23896 {
23897 int i;
23898 int j = -1;
23899 bool used_update = false;
23900 rtx restore_basereg = NULL_RTX;
23901
23902 if (MEM_P (src) && INT_REGNO_P (reg))
23903 {
23904 rtx breg;
23905
23906 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23907 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23908 {
23909 rtx delta_rtx;
23910 breg = XEXP (XEXP (src, 0), 0);
23911 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23912 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23913 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23914 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23915 src = replace_equiv_address (src, breg);
23916 }
23917 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23918 {
23919 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23920 {
23921 rtx basereg = XEXP (XEXP (src, 0), 0);
23922 if (TARGET_UPDATE)
23923 {
23924 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23925 emit_insn (gen_rtx_SET (ndst,
23926 gen_rtx_MEM (reg_mode,
23927 XEXP (src, 0))));
23928 used_update = true;
23929 }
23930 else
23931 emit_insn (gen_rtx_SET (basereg,
23932 XEXP (XEXP (src, 0), 1)));
23933 src = replace_equiv_address (src, basereg);
23934 }
23935 else
23936 {
23937 rtx basereg = gen_rtx_REG (Pmode, reg);
23938 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23939 src = replace_equiv_address (src, basereg);
23940 }
23941 }
23942
23943 breg = XEXP (src, 0);
23944 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23945 breg = XEXP (breg, 0);
23946
23947 /* If the base register we are using to address memory is
23948 also a destination reg, then change that register last. */
23949 if (REG_P (breg)
23950 && REGNO (breg) >= REGNO (dst)
23951 && REGNO (breg) < REGNO (dst) + nregs)
23952 j = REGNO (breg) - REGNO (dst);
23953 }
23954 else if (MEM_P (dst) && INT_REGNO_P (reg))
23955 {
23956 rtx breg;
23957
23958 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23959 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23960 {
23961 rtx delta_rtx;
23962 breg = XEXP (XEXP (dst, 0), 0);
23963 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23964 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23965 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23966
23967 /* We have to update the breg before doing the store.
23968 Use store with update, if available. */
23969
23970 if (TARGET_UPDATE)
23971 {
23972 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23973 emit_insn (TARGET_32BIT
23974 ? (TARGET_POWERPC64
23975 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23976 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
23977 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23978 used_update = true;
23979 }
23980 else
23981 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23982 dst = replace_equiv_address (dst, breg);
23983 }
23984 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23985 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23986 {
23987 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23988 {
23989 rtx basereg = XEXP (XEXP (dst, 0), 0);
23990 if (TARGET_UPDATE)
23991 {
23992 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23993 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23994 XEXP (dst, 0)),
23995 nsrc));
23996 used_update = true;
23997 }
23998 else
23999 emit_insn (gen_rtx_SET (basereg,
24000 XEXP (XEXP (dst, 0), 1)));
24001 dst = replace_equiv_address (dst, basereg);
24002 }
24003 else
24004 {
24005 rtx basereg = XEXP (XEXP (dst, 0), 0);
24006 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
24007 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
24008 && REG_P (basereg)
24009 && REG_P (offsetreg)
24010 && REGNO (basereg) != REGNO (offsetreg));
24011 if (REGNO (basereg) == 0)
24012 {
24013 rtx tmp = offsetreg;
24014 offsetreg = basereg;
24015 basereg = tmp;
24016 }
24017 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
24018 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
24019 dst = replace_equiv_address (dst, basereg);
24020 }
24021 }
24022 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
24023 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
24024 }
24025
24026 for (i = 0; i < nregs; i++)
24027 {
24028 /* Calculate index to next subword. */
24029 ++j;
24030 if (j == nregs)
24031 j = 0;
24032
24033 /* If compiler already emitted move of first word by
24034 store with update, no need to do anything. */
24035 if (j == 0 && used_update)
24036 continue;
24037
24038 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
24039 j * reg_mode_size),
24040 simplify_gen_subreg (reg_mode, src, mode,
24041 j * reg_mode_size)));
24042 }
24043 if (restore_basereg != NULL_RTX)
24044 emit_insn (restore_basereg);
24045 }
24046 }
24047
24048 \f
24049 /* This page contains routines that are used to determine what the
24050 function prologue and epilogue code will do and write them out. */
24051
24052 /* Determine whether the REG is really used. */
24053
24054 static bool
24055 save_reg_p (int reg)
24056 {
24057 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
24058 {
24059 /* When calling eh_return, we must return true for all the cases
24060 where conditional_register_usage marks the PIC offset reg
24061 call used or fixed. */
24062 if (crtl->calls_eh_return
24063 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24064 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24065 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24066 return true;
24067
24068 /* We need to mark the PIC offset register live for the same
24069 conditions as it is set up in rs6000_emit_prologue, or
24070 otherwise it won't be saved before we clobber it. */
24071 if (TARGET_TOC && TARGET_MINIMAL_TOC
24072 && !constant_pool_empty_p ())
24073 return true;
24074
24075 if (DEFAULT_ABI == ABI_V4
24076 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
24077 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
24078 return true;
24079
24080 if (DEFAULT_ABI == ABI_DARWIN
24081 && flag_pic && crtl->uses_pic_offset_table)
24082 return true;
24083 }
24084
24085 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
24086 }
24087
24088 /* Return the first fixed-point register that is required to be
24089 saved. 32 if none. */
24090
24091 int
24092 first_reg_to_save (void)
24093 {
24094 int first_reg;
24095
24096 /* Find lowest numbered live register. */
24097 for (first_reg = 13; first_reg <= 31; first_reg++)
24098 if (save_reg_p (first_reg))
24099 break;
24100
24101 return first_reg;
24102 }
24103
24104 /* Similar, for FP regs. */
24105
24106 int
24107 first_fp_reg_to_save (void)
24108 {
24109 int first_reg;
24110
24111 /* Find lowest numbered live register. */
24112 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
24113 if (save_reg_p (first_reg))
24114 break;
24115
24116 return first_reg;
24117 }
24118
24119 /* Similar, for AltiVec regs. */
24120
24121 static int
24122 first_altivec_reg_to_save (void)
24123 {
24124 int i;
24125
24126 /* Stack frame remains as is unless we are in AltiVec ABI. */
24127 if (! TARGET_ALTIVEC_ABI)
24128 return LAST_ALTIVEC_REGNO + 1;
24129
24130 /* On Darwin, the unwind routines are compiled without
24131 TARGET_ALTIVEC, and use save_world to save/restore the
24132 altivec registers when necessary. */
24133 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24134 && ! TARGET_ALTIVEC)
24135 return FIRST_ALTIVEC_REGNO + 20;
24136
24137 /* Find lowest numbered live register. */
24138 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
24139 if (save_reg_p (i))
24140 break;
24141
24142 return i;
24143 }
24144
24145 /* Return a 32-bit mask of the AltiVec registers we need to set in
24146 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
24147 the 32-bit word is 0. */
24148
24149 static unsigned int
24150 compute_vrsave_mask (void)
24151 {
24152 unsigned int i, mask = 0;
24153
24154 /* On Darwin, the unwind routines are compiled without
24155 TARGET_ALTIVEC, and use save_world to save/restore the
24156 call-saved altivec registers when necessary. */
24157 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
24158 && ! TARGET_ALTIVEC)
24159 mask |= 0xFFF;
24160
24161 /* First, find out if we use _any_ altivec registers. */
24162 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
24163 if (df_regs_ever_live_p (i))
24164 mask |= ALTIVEC_REG_BIT (i);
24165
24166 if (mask == 0)
24167 return mask;
24168
24169 /* Next, remove the argument registers from the set. These must
24170 be in the VRSAVE mask set by the caller, so we don't need to add
24171 them in again. More importantly, the mask we compute here is
24172 used to generate CLOBBERs in the set_vrsave insn, and we do not
24173 wish the argument registers to die. */
24174 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
24175 mask &= ~ALTIVEC_REG_BIT (i);
24176
24177 /* Similarly, remove the return value from the set. */
24178 {
24179 bool yes = false;
24180 diddle_return_value (is_altivec_return_reg, &yes);
24181 if (yes)
24182 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
24183 }
24184
24185 return mask;
24186 }
24187
24188 /* For a very restricted set of circumstances, we can cut down the
24189 size of prologues/epilogues by calling our own save/restore-the-world
24190 routines. */
24191
24192 static void
24193 compute_save_world_info (rs6000_stack_t *info)
24194 {
24195 info->world_save_p = 1;
24196 info->world_save_p
24197 = (WORLD_SAVE_P (info)
24198 && DEFAULT_ABI == ABI_DARWIN
24199 && !cfun->has_nonlocal_label
24200 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
24201 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
24202 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
24203 && info->cr_save_p);
24204
24205 /* This will not work in conjunction with sibcalls. Make sure there
24206 are none. (This check is expensive, but seldom executed.) */
24207 if (WORLD_SAVE_P (info))
24208 {
24209 rtx_insn *insn;
24210 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
24211 if (CALL_P (insn) && SIBLING_CALL_P (insn))
24212 {
24213 info->world_save_p = 0;
24214 break;
24215 }
24216 }
24217
24218 if (WORLD_SAVE_P (info))
24219 {
24220 /* Even if we're not touching VRsave, make sure there's room on the
24221 stack for it, if it looks like we're calling SAVE_WORLD, which
24222 will attempt to save it. */
24223 info->vrsave_size = 4;
24224
24225 /* If we are going to save the world, we need to save the link register too. */
24226 info->lr_save_p = 1;
24227
24228 /* "Save" the VRsave register too if we're saving the world. */
24229 if (info->vrsave_mask == 0)
24230 info->vrsave_mask = compute_vrsave_mask ();
24231
24232 /* Because the Darwin register save/restore routines only handle
24233 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
24234 check. */
24235 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
24236 && (info->first_altivec_reg_save
24237 >= FIRST_SAVED_ALTIVEC_REGNO));
24238 }
24239
24240 return;
24241 }
24242
24243
24244 static void
24245 is_altivec_return_reg (rtx reg, void *xyes)
24246 {
24247 bool *yes = (bool *) xyes;
24248 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
24249 *yes = true;
24250 }
24251
24252 \f
24253 /* Return whether REG is a global user reg or has been specifed by
24254 -ffixed-REG. We should not restore these, and so cannot use
24255 lmw or out-of-line restore functions if there are any. We also
24256 can't save them (well, emit frame notes for them), because frame
24257 unwinding during exception handling will restore saved registers. */
24258
24259 static bool
24260 fixed_reg_p (int reg)
24261 {
24262 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
24263 backend sets it, overriding anything the user might have given. */
24264 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
24265 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
24266 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
24267 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
24268 return false;
24269
24270 return fixed_regs[reg];
24271 }
24272
24273 /* Determine the strategy for savings/restoring registers. */
24274
24275 enum {
24276 SAVE_MULTIPLE = 0x1,
24277 SAVE_INLINE_GPRS = 0x2,
24278 SAVE_INLINE_FPRS = 0x4,
24279 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24280 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24281 SAVE_INLINE_VRS = 0x20,
24282 REST_MULTIPLE = 0x100,
24283 REST_INLINE_GPRS = 0x200,
24284 REST_INLINE_FPRS = 0x400,
24285 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24286 REST_INLINE_VRS = 0x1000
24287 };
24288
24289 static int
24290 rs6000_savres_strategy (rs6000_stack_t *info,
24291 bool using_static_chain_p)
24292 {
24293 int strategy = 0;
24294
24295 /* Select between in-line and out-of-line save and restore of regs.
24296 First, all the obvious cases where we don't use out-of-line. */
24297 if (crtl->calls_eh_return
24298 || cfun->machine->ra_need_lr)
24299 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24300 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24301 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24302
24303 if (info->first_gp_reg_save == 32)
24304 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24305
24306 if (info->first_fp_reg_save == 64)
24307 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24308
24309 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24310 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24311
24312 /* Define cutoff for using out-of-line functions to save registers. */
24313 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24314 {
24315 if (!optimize_size)
24316 {
24317 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24318 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24319 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24320 }
24321 else
24322 {
24323 /* Prefer out-of-line restore if it will exit. */
24324 if (info->first_fp_reg_save > 61)
24325 strategy |= SAVE_INLINE_FPRS;
24326 if (info->first_gp_reg_save > 29)
24327 {
24328 if (info->first_fp_reg_save == 64)
24329 strategy |= SAVE_INLINE_GPRS;
24330 else
24331 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24332 }
24333 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24334 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24335 }
24336 }
24337 else if (DEFAULT_ABI == ABI_DARWIN)
24338 {
24339 if (info->first_fp_reg_save > 60)
24340 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24341 if (info->first_gp_reg_save > 29)
24342 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24343 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24344 }
24345 else
24346 {
24347 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24348 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24349 || info->first_fp_reg_save > 61)
24350 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24351 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24352 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24353 }
24354
24355 /* Don't bother to try to save things out-of-line if r11 is occupied
24356 by the static chain. It would require too much fiddling and the
24357 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24358 pointer on Darwin, and AIX uses r1 or r12. */
24359 if (using_static_chain_p
24360 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24361 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24362 | SAVE_INLINE_GPRS
24363 | SAVE_INLINE_VRS);
24364
24365 /* Don't ever restore fixed regs. That means we can't use the
24366 out-of-line register restore functions if a fixed reg is in the
24367 range of regs restored. */
24368 if (!(strategy & REST_INLINE_FPRS))
24369 for (int i = info->first_fp_reg_save; i < 64; i++)
24370 if (fixed_regs[i])
24371 {
24372 strategy |= REST_INLINE_FPRS;
24373 break;
24374 }
24375
24376 /* We can only use the out-of-line routines to restore fprs if we've
24377 saved all the registers from first_fp_reg_save in the prologue.
24378 Otherwise, we risk loading garbage. Of course, if we have saved
24379 out-of-line then we know we haven't skipped any fprs. */
24380 if ((strategy & SAVE_INLINE_FPRS)
24381 && !(strategy & REST_INLINE_FPRS))
24382 for (int i = info->first_fp_reg_save; i < 64; i++)
24383 if (!save_reg_p (i))
24384 {
24385 strategy |= REST_INLINE_FPRS;
24386 break;
24387 }
24388
24389 /* Similarly, for altivec regs. */
24390 if (!(strategy & REST_INLINE_VRS))
24391 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24392 if (fixed_regs[i])
24393 {
24394 strategy |= REST_INLINE_VRS;
24395 break;
24396 }
24397
24398 if ((strategy & SAVE_INLINE_VRS)
24399 && !(strategy & REST_INLINE_VRS))
24400 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24401 if (!save_reg_p (i))
24402 {
24403 strategy |= REST_INLINE_VRS;
24404 break;
24405 }
24406
24407 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24408 saved is an out-of-line save or restore. Set up the value for
24409 the next test (excluding out-of-line gprs). */
24410 bool lr_save_p = (info->lr_save_p
24411 || !(strategy & SAVE_INLINE_FPRS)
24412 || !(strategy & SAVE_INLINE_VRS)
24413 || !(strategy & REST_INLINE_FPRS)
24414 || !(strategy & REST_INLINE_VRS));
24415
24416 if (TARGET_MULTIPLE
24417 && !TARGET_POWERPC64
24418 && info->first_gp_reg_save < 31
24419 && !(flag_shrink_wrap
24420 && flag_shrink_wrap_separate
24421 && optimize_function_for_speed_p (cfun)))
24422 {
24423 int count = 0;
24424 for (int i = info->first_gp_reg_save; i < 32; i++)
24425 if (save_reg_p (i))
24426 count++;
24427
24428 if (count <= 1)
24429 /* Don't use store multiple if only one reg needs to be
24430 saved. This can occur for example when the ABI_V4 pic reg
24431 (r30) needs to be saved to make calls, but r31 is not
24432 used. */
24433 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24434 else
24435 {
24436 /* Prefer store multiple for saves over out-of-line
24437 routines, since the store-multiple instruction will
24438 always be smaller. */
24439 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24440
24441 /* The situation is more complicated with load multiple.
24442 We'd prefer to use the out-of-line routines for restores,
24443 since the "exit" out-of-line routines can handle the
24444 restore of LR and the frame teardown. However if doesn't
24445 make sense to use the out-of-line routine if that is the
24446 only reason we'd need to save LR, and we can't use the
24447 "exit" out-of-line gpr restore if we have saved some
24448 fprs; In those cases it is advantageous to use load
24449 multiple when available. */
24450 if (info->first_fp_reg_save != 64 || !lr_save_p)
24451 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24452 }
24453 }
24454
24455 /* Using the "exit" out-of-line routine does not improve code size
24456 if using it would require lr to be saved and if only saving one
24457 or two gprs. */
24458 else if (!lr_save_p && info->first_gp_reg_save > 29)
24459 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24460
24461 /* Don't ever restore fixed regs. */
24462 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24463 for (int i = info->first_gp_reg_save; i < 32; i++)
24464 if (fixed_reg_p (i))
24465 {
24466 strategy |= REST_INLINE_GPRS;
24467 strategy &= ~REST_MULTIPLE;
24468 break;
24469 }
24470
24471 /* We can only use load multiple or the out-of-line routines to
24472 restore gprs if we've saved all the registers from
24473 first_gp_reg_save. Otherwise, we risk loading garbage.
24474 Of course, if we have saved out-of-line or used stmw then we know
24475 we haven't skipped any gprs. */
24476 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24477 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24478 for (int i = info->first_gp_reg_save; i < 32; i++)
24479 if (!save_reg_p (i))
24480 {
24481 strategy |= REST_INLINE_GPRS;
24482 strategy &= ~REST_MULTIPLE;
24483 break;
24484 }
24485
24486 if (TARGET_ELF && TARGET_64BIT)
24487 {
24488 if (!(strategy & SAVE_INLINE_FPRS))
24489 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24490 else if (!(strategy & SAVE_INLINE_GPRS)
24491 && info->first_fp_reg_save == 64)
24492 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24493 }
24494 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24495 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24496
24497 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24498 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24499
24500 return strategy;
24501 }
24502
24503 /* Calculate the stack information for the current function. This is
24504 complicated by having two separate calling sequences, the AIX calling
24505 sequence and the V.4 calling sequence.
24506
24507 AIX (and Darwin/Mac OS X) stack frames look like:
24508 32-bit 64-bit
24509 SP----> +---------------------------------------+
24510 | back chain to caller | 0 0
24511 +---------------------------------------+
24512 | saved CR | 4 8 (8-11)
24513 +---------------------------------------+
24514 | saved LR | 8 16
24515 +---------------------------------------+
24516 | reserved for compilers | 12 24
24517 +---------------------------------------+
24518 | reserved for binders | 16 32
24519 +---------------------------------------+
24520 | saved TOC pointer | 20 40
24521 +---------------------------------------+
24522 | Parameter save area (+padding*) (P) | 24 48
24523 +---------------------------------------+
24524 | Alloca space (A) | 24+P etc.
24525 +---------------------------------------+
24526 | Local variable space (L) | 24+P+A
24527 +---------------------------------------+
24528 | Float/int conversion temporary (X) | 24+P+A+L
24529 +---------------------------------------+
24530 | Save area for AltiVec registers (W) | 24+P+A+L+X
24531 +---------------------------------------+
24532 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24533 +---------------------------------------+
24534 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24535 +---------------------------------------+
24536 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24537 +---------------------------------------+
24538 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24539 +---------------------------------------+
24540 old SP->| back chain to caller's caller |
24541 +---------------------------------------+
24542
24543 * If the alloca area is present, the parameter save area is
24544 padded so that the former starts 16-byte aligned.
24545
24546 The required alignment for AIX configurations is two words (i.e., 8
24547 or 16 bytes).
24548
24549 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24550
24551 SP----> +---------------------------------------+
24552 | Back chain to caller | 0
24553 +---------------------------------------+
24554 | Save area for CR | 8
24555 +---------------------------------------+
24556 | Saved LR | 16
24557 +---------------------------------------+
24558 | Saved TOC pointer | 24
24559 +---------------------------------------+
24560 | Parameter save area (+padding*) (P) | 32
24561 +---------------------------------------+
24562 | Alloca space (A) | 32+P
24563 +---------------------------------------+
24564 | Local variable space (L) | 32+P+A
24565 +---------------------------------------+
24566 | Save area for AltiVec registers (W) | 32+P+A+L
24567 +---------------------------------------+
24568 | AltiVec alignment padding (Y) | 32+P+A+L+W
24569 +---------------------------------------+
24570 | Save area for GP registers (G) | 32+P+A+L+W+Y
24571 +---------------------------------------+
24572 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24573 +---------------------------------------+
24574 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24575 +---------------------------------------+
24576
24577 * If the alloca area is present, the parameter save area is
24578 padded so that the former starts 16-byte aligned.
24579
24580 V.4 stack frames look like:
24581
24582 SP----> +---------------------------------------+
24583 | back chain to caller | 0
24584 +---------------------------------------+
24585 | caller's saved LR | 4
24586 +---------------------------------------+
24587 | Parameter save area (+padding*) (P) | 8
24588 +---------------------------------------+
24589 | Alloca space (A) | 8+P
24590 +---------------------------------------+
24591 | Varargs save area (V) | 8+P+A
24592 +---------------------------------------+
24593 | Local variable space (L) | 8+P+A+V
24594 +---------------------------------------+
24595 | Float/int conversion temporary (X) | 8+P+A+V+L
24596 +---------------------------------------+
24597 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24598 +---------------------------------------+
24599 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24600 +---------------------------------------+
24601 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24602 +---------------------------------------+
24603 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24604 +---------------------------------------+
24605 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24606 +---------------------------------------+
24607 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24608 +---------------------------------------+
24609 old SP->| back chain to caller's caller |
24610 +---------------------------------------+
24611
24612 * If the alloca area is present and the required alignment is
24613 16 bytes, the parameter save area is padded so that the
24614 alloca area starts 16-byte aligned.
24615
24616 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24617 given. (But note below and in sysv4.h that we require only 8 and
24618 may round up the size of our stack frame anyways. The historical
24619 reason is early versions of powerpc-linux which didn't properly
24620 align the stack at program startup. A happy side-effect is that
24621 -mno-eabi libraries can be used with -meabi programs.)
24622
24623 The EABI configuration defaults to the V.4 layout. However,
24624 the stack alignment requirements may differ. If -mno-eabi is not
24625 given, the required stack alignment is 8 bytes; if -mno-eabi is
24626 given, the required alignment is 16 bytes. (But see V.4 comment
24627 above.) */
24628
24629 #ifndef ABI_STACK_BOUNDARY
24630 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24631 #endif
24632
24633 static rs6000_stack_t *
24634 rs6000_stack_info (void)
24635 {
24636 /* We should never be called for thunks, we are not set up for that. */
24637 gcc_assert (!cfun->is_thunk);
24638
24639 rs6000_stack_t *info = &stack_info;
24640 int reg_size = TARGET_32BIT ? 4 : 8;
24641 int ehrd_size;
24642 int ehcr_size;
24643 int save_align;
24644 int first_gp;
24645 HOST_WIDE_INT non_fixed_size;
24646 bool using_static_chain_p;
24647
24648 if (reload_completed && info->reload_completed)
24649 return info;
24650
24651 memset (info, 0, sizeof (*info));
24652 info->reload_completed = reload_completed;
24653
24654 /* Select which calling sequence. */
24655 info->abi = DEFAULT_ABI;
24656
24657 /* Calculate which registers need to be saved & save area size. */
24658 info->first_gp_reg_save = first_reg_to_save ();
24659 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24660 even if it currently looks like we won't. Reload may need it to
24661 get at a constant; if so, it will have already created a constant
24662 pool entry for it. */
24663 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24664 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24665 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24666 && crtl->uses_const_pool
24667 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24668 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24669 else
24670 first_gp = info->first_gp_reg_save;
24671
24672 info->gp_size = reg_size * (32 - first_gp);
24673
24674 info->first_fp_reg_save = first_fp_reg_to_save ();
24675 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24676
24677 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24678 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24679 - info->first_altivec_reg_save);
24680
24681 /* Does this function call anything? */
24682 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24683
24684 /* Determine if we need to save the condition code registers. */
24685 if (save_reg_p (CR2_REGNO)
24686 || save_reg_p (CR3_REGNO)
24687 || save_reg_p (CR4_REGNO))
24688 {
24689 info->cr_save_p = 1;
24690 if (DEFAULT_ABI == ABI_V4)
24691 info->cr_size = reg_size;
24692 }
24693
24694 /* If the current function calls __builtin_eh_return, then we need
24695 to allocate stack space for registers that will hold data for
24696 the exception handler. */
24697 if (crtl->calls_eh_return)
24698 {
24699 unsigned int i;
24700 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24701 continue;
24702
24703 ehrd_size = i * UNITS_PER_WORD;
24704 }
24705 else
24706 ehrd_size = 0;
24707
24708 /* In the ELFv2 ABI, we also need to allocate space for separate
24709 CR field save areas if the function calls __builtin_eh_return. */
24710 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24711 {
24712 /* This hard-codes that we have three call-saved CR fields. */
24713 ehcr_size = 3 * reg_size;
24714 /* We do *not* use the regular CR save mechanism. */
24715 info->cr_save_p = 0;
24716 }
24717 else
24718 ehcr_size = 0;
24719
24720 /* Determine various sizes. */
24721 info->reg_size = reg_size;
24722 info->fixed_size = RS6000_SAVE_AREA;
24723 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24724 if (cfun->calls_alloca)
24725 info->parm_size =
24726 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24727 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24728 else
24729 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24730 TARGET_ALTIVEC ? 16 : 8);
24731 if (FRAME_GROWS_DOWNWARD)
24732 info->vars_size
24733 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24734 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24735 - (info->fixed_size + info->vars_size + info->parm_size);
24736
24737 if (TARGET_ALTIVEC_ABI)
24738 info->vrsave_mask = compute_vrsave_mask ();
24739
24740 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24741 info->vrsave_size = 4;
24742
24743 compute_save_world_info (info);
24744
24745 /* Calculate the offsets. */
24746 switch (DEFAULT_ABI)
24747 {
24748 case ABI_NONE:
24749 default:
24750 gcc_unreachable ();
24751
24752 case ABI_AIX:
24753 case ABI_ELFv2:
24754 case ABI_DARWIN:
24755 info->fp_save_offset = -info->fp_size;
24756 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24757
24758 if (TARGET_ALTIVEC_ABI)
24759 {
24760 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24761
24762 /* Align stack so vector save area is on a quadword boundary.
24763 The padding goes above the vectors. */
24764 if (info->altivec_size != 0)
24765 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24766
24767 info->altivec_save_offset = info->vrsave_save_offset
24768 - info->altivec_padding_size
24769 - info->altivec_size;
24770 gcc_assert (info->altivec_size == 0
24771 || info->altivec_save_offset % 16 == 0);
24772
24773 /* Adjust for AltiVec case. */
24774 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24775 }
24776 else
24777 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24778
24779 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24780 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24781 info->lr_save_offset = 2*reg_size;
24782 break;
24783
24784 case ABI_V4:
24785 info->fp_save_offset = -info->fp_size;
24786 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24787 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24788
24789 if (TARGET_ALTIVEC_ABI)
24790 {
24791 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24792
24793 /* Align stack so vector save area is on a quadword boundary. */
24794 if (info->altivec_size != 0)
24795 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24796
24797 info->altivec_save_offset = info->vrsave_save_offset
24798 - info->altivec_padding_size
24799 - info->altivec_size;
24800
24801 /* Adjust for AltiVec case. */
24802 info->ehrd_offset = info->altivec_save_offset;
24803 }
24804 else
24805 info->ehrd_offset = info->cr_save_offset;
24806
24807 info->ehrd_offset -= ehrd_size;
24808 info->lr_save_offset = reg_size;
24809 }
24810
24811 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24812 info->save_size = RS6000_ALIGN (info->fp_size
24813 + info->gp_size
24814 + info->altivec_size
24815 + info->altivec_padding_size
24816 + ehrd_size
24817 + ehcr_size
24818 + info->cr_size
24819 + info->vrsave_size,
24820 save_align);
24821
24822 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24823
24824 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24825 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24826
24827 /* Determine if we need to save the link register. */
24828 if (info->calls_p
24829 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24830 && crtl->profile
24831 && !TARGET_PROFILE_KERNEL)
24832 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24833 #ifdef TARGET_RELOCATABLE
24834 || (DEFAULT_ABI == ABI_V4
24835 && (TARGET_RELOCATABLE || flag_pic > 1)
24836 && !constant_pool_empty_p ())
24837 #endif
24838 || rs6000_ra_ever_killed ())
24839 info->lr_save_p = 1;
24840
24841 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24842 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24843 && call_used_regs[STATIC_CHAIN_REGNUM]);
24844 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24845
24846 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24847 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24848 || !(info->savres_strategy & SAVE_INLINE_VRS)
24849 || !(info->savres_strategy & REST_INLINE_GPRS)
24850 || !(info->savres_strategy & REST_INLINE_FPRS)
24851 || !(info->savres_strategy & REST_INLINE_VRS))
24852 info->lr_save_p = 1;
24853
24854 if (info->lr_save_p)
24855 df_set_regs_ever_live (LR_REGNO, true);
24856
24857 /* Determine if we need to allocate any stack frame:
24858
24859 For AIX we need to push the stack if a frame pointer is needed
24860 (because the stack might be dynamically adjusted), if we are
24861 debugging, if we make calls, or if the sum of fp_save, gp_save,
24862 and local variables are more than the space needed to save all
24863 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24864 + 18*8 = 288 (GPR13 reserved).
24865
24866 For V.4 we don't have the stack cushion that AIX uses, but assume
24867 that the debugger can handle stackless frames. */
24868
24869 if (info->calls_p)
24870 info->push_p = 1;
24871
24872 else if (DEFAULT_ABI == ABI_V4)
24873 info->push_p = non_fixed_size != 0;
24874
24875 else if (frame_pointer_needed)
24876 info->push_p = 1;
24877
24878 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24879 info->push_p = 1;
24880
24881 else
24882 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24883
24884 return info;
24885 }
24886
24887 static void
24888 debug_stack_info (rs6000_stack_t *info)
24889 {
24890 const char *abi_string;
24891
24892 if (! info)
24893 info = rs6000_stack_info ();
24894
24895 fprintf (stderr, "\nStack information for function %s:\n",
24896 ((current_function_decl && DECL_NAME (current_function_decl))
24897 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24898 : "<unknown>"));
24899
24900 switch (info->abi)
24901 {
24902 default: abi_string = "Unknown"; break;
24903 case ABI_NONE: abi_string = "NONE"; break;
24904 case ABI_AIX: abi_string = "AIX"; break;
24905 case ABI_ELFv2: abi_string = "ELFv2"; break;
24906 case ABI_DARWIN: abi_string = "Darwin"; break;
24907 case ABI_V4: abi_string = "V.4"; break;
24908 }
24909
24910 fprintf (stderr, "\tABI = %5s\n", abi_string);
24911
24912 if (TARGET_ALTIVEC_ABI)
24913 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24914
24915 if (info->first_gp_reg_save != 32)
24916 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24917
24918 if (info->first_fp_reg_save != 64)
24919 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24920
24921 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24922 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24923 info->first_altivec_reg_save);
24924
24925 if (info->lr_save_p)
24926 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24927
24928 if (info->cr_save_p)
24929 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24930
24931 if (info->vrsave_mask)
24932 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24933
24934 if (info->push_p)
24935 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24936
24937 if (info->calls_p)
24938 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24939
24940 if (info->gp_size)
24941 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24942
24943 if (info->fp_size)
24944 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24945
24946 if (info->altivec_size)
24947 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24948 info->altivec_save_offset);
24949
24950 if (info->vrsave_size)
24951 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24952 info->vrsave_save_offset);
24953
24954 if (info->lr_save_p)
24955 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24956
24957 if (info->cr_save_p)
24958 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24959
24960 if (info->varargs_save_offset)
24961 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24962
24963 if (info->total_size)
24964 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24965 info->total_size);
24966
24967 if (info->vars_size)
24968 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24969 info->vars_size);
24970
24971 if (info->parm_size)
24972 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24973
24974 if (info->fixed_size)
24975 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24976
24977 if (info->gp_size)
24978 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24979
24980 if (info->fp_size)
24981 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24982
24983 if (info->altivec_size)
24984 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24985
24986 if (info->vrsave_size)
24987 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24988
24989 if (info->altivec_padding_size)
24990 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24991 info->altivec_padding_size);
24992
24993 if (info->cr_size)
24994 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24995
24996 if (info->save_size)
24997 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24998
24999 if (info->reg_size != 4)
25000 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
25001
25002 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
25003
25004 fprintf (stderr, "\n");
25005 }
25006
25007 rtx
25008 rs6000_return_addr (int count, rtx frame)
25009 {
25010 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
25011 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
25012 if (count != 0
25013 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
25014 {
25015 cfun->machine->ra_needs_full_frame = 1;
25016
25017 if (count == 0)
25018 /* FRAME is set to frame_pointer_rtx by the generic code, but that
25019 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
25020 frame = stack_pointer_rtx;
25021 rtx prev_frame_addr = memory_address (Pmode, frame);
25022 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
25023 rtx lr_save_off = plus_constant (Pmode,
25024 prev_frame, RETURN_ADDRESS_OFFSET);
25025 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
25026 return gen_rtx_MEM (Pmode, lr_save_addr);
25027 }
25028
25029 cfun->machine->ra_need_lr = 1;
25030 return get_hard_reg_initial_val (Pmode, LR_REGNO);
25031 }
25032
25033 /* Say whether a function is a candidate for sibcall handling or not. */
25034
25035 static bool
25036 rs6000_function_ok_for_sibcall (tree decl, tree exp)
25037 {
25038 tree fntype;
25039
25040 /* The sibcall epilogue may clobber the static chain register.
25041 ??? We could work harder and avoid that, but it's probably
25042 not worth the hassle in practice. */
25043 if (CALL_EXPR_STATIC_CHAIN (exp))
25044 return false;
25045
25046 if (decl)
25047 fntype = TREE_TYPE (decl);
25048 else
25049 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
25050
25051 /* We can't do it if the called function has more vector parameters
25052 than the current function; there's nowhere to put the VRsave code. */
25053 if (TARGET_ALTIVEC_ABI
25054 && TARGET_ALTIVEC_VRSAVE
25055 && !(decl && decl == current_function_decl))
25056 {
25057 function_args_iterator args_iter;
25058 tree type;
25059 int nvreg = 0;
25060
25061 /* Functions with vector parameters are required to have a
25062 prototype, so the argument type info must be available
25063 here. */
25064 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
25065 if (TREE_CODE (type) == VECTOR_TYPE
25066 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25067 nvreg++;
25068
25069 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
25070 if (TREE_CODE (type) == VECTOR_TYPE
25071 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
25072 nvreg--;
25073
25074 if (nvreg > 0)
25075 return false;
25076 }
25077
25078 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
25079 functions, because the callee may have a different TOC pointer to
25080 the caller and there's no way to ensure we restore the TOC when
25081 we return. With the secure-plt SYSV ABI we can't make non-local
25082 calls when -fpic/PIC because the plt call stubs use r30. */
25083 if (DEFAULT_ABI == ABI_DARWIN
25084 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25085 && decl
25086 && !DECL_EXTERNAL (decl)
25087 && !DECL_WEAK (decl)
25088 && (*targetm.binds_local_p) (decl))
25089 || (DEFAULT_ABI == ABI_V4
25090 && (!TARGET_SECURE_PLT
25091 || !flag_pic
25092 || (decl
25093 && (*targetm.binds_local_p) (decl)))))
25094 {
25095 tree attr_list = TYPE_ATTRIBUTES (fntype);
25096
25097 if (!lookup_attribute ("longcall", attr_list)
25098 || lookup_attribute ("shortcall", attr_list))
25099 return true;
25100 }
25101
25102 return false;
25103 }
25104
25105 static int
25106 rs6000_ra_ever_killed (void)
25107 {
25108 rtx_insn *top;
25109 rtx reg;
25110 rtx_insn *insn;
25111
25112 if (cfun->is_thunk)
25113 return 0;
25114
25115 if (cfun->machine->lr_save_state)
25116 return cfun->machine->lr_save_state - 1;
25117
25118 /* regs_ever_live has LR marked as used if any sibcalls are present,
25119 but this should not force saving and restoring in the
25120 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
25121 clobbers LR, so that is inappropriate. */
25122
25123 /* Also, the prologue can generate a store into LR that
25124 doesn't really count, like this:
25125
25126 move LR->R0
25127 bcl to set PIC register
25128 move LR->R31
25129 move R0->LR
25130
25131 When we're called from the epilogue, we need to avoid counting
25132 this as a store. */
25133
25134 push_topmost_sequence ();
25135 top = get_insns ();
25136 pop_topmost_sequence ();
25137 reg = gen_rtx_REG (Pmode, LR_REGNO);
25138
25139 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
25140 {
25141 if (INSN_P (insn))
25142 {
25143 if (CALL_P (insn))
25144 {
25145 if (!SIBLING_CALL_P (insn))
25146 return 1;
25147 }
25148 else if (find_regno_note (insn, REG_INC, LR_REGNO))
25149 return 1;
25150 else if (set_of (reg, insn) != NULL_RTX
25151 && !prologue_epilogue_contains (insn))
25152 return 1;
25153 }
25154 }
25155 return 0;
25156 }
25157 \f
25158 /* Emit instructions needed to load the TOC register.
25159 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
25160 a constant pool; or for SVR4 -fpic. */
25161
25162 void
25163 rs6000_emit_load_toc_table (int fromprolog)
25164 {
25165 rtx dest;
25166 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
25167
25168 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
25169 {
25170 char buf[30];
25171 rtx lab, tmp1, tmp2, got;
25172
25173 lab = gen_label_rtx ();
25174 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
25175 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25176 if (flag_pic == 2)
25177 {
25178 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25179 need_toc_init = 1;
25180 }
25181 else
25182 got = rs6000_got_sym ();
25183 tmp1 = tmp2 = dest;
25184 if (!fromprolog)
25185 {
25186 tmp1 = gen_reg_rtx (Pmode);
25187 tmp2 = gen_reg_rtx (Pmode);
25188 }
25189 emit_insn (gen_load_toc_v4_PIC_1 (lab));
25190 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
25191 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
25192 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
25193 }
25194 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
25195 {
25196 emit_insn (gen_load_toc_v4_pic_si ());
25197 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25198 }
25199 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
25200 {
25201 char buf[30];
25202 rtx temp0 = (fromprolog
25203 ? gen_rtx_REG (Pmode, 0)
25204 : gen_reg_rtx (Pmode));
25205
25206 if (fromprolog)
25207 {
25208 rtx symF, symL;
25209
25210 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
25211 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25212
25213 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
25214 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
25215
25216 emit_insn (gen_load_toc_v4_PIC_1 (symF));
25217 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25218 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
25219 }
25220 else
25221 {
25222 rtx tocsym, lab;
25223
25224 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25225 need_toc_init = 1;
25226 lab = gen_label_rtx ();
25227 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
25228 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
25229 if (TARGET_LINK_STACK)
25230 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
25231 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
25232 }
25233 emit_insn (gen_addsi3 (dest, temp0, dest));
25234 }
25235 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
25236 {
25237 /* This is for AIX code running in non-PIC ELF32. */
25238 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
25239
25240 need_toc_init = 1;
25241 emit_insn (gen_elf_high (dest, realsym));
25242 emit_insn (gen_elf_low (dest, dest, realsym));
25243 }
25244 else
25245 {
25246 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
25247
25248 if (TARGET_32BIT)
25249 emit_insn (gen_load_toc_aix_si (dest));
25250 else
25251 emit_insn (gen_load_toc_aix_di (dest));
25252 }
25253 }
25254
25255 /* Emit instructions to restore the link register after determining where
25256 its value has been stored. */
25257
25258 void
25259 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
25260 {
25261 rs6000_stack_t *info = rs6000_stack_info ();
25262 rtx operands[2];
25263
25264 operands[0] = source;
25265 operands[1] = scratch;
25266
25267 if (info->lr_save_p)
25268 {
25269 rtx frame_rtx = stack_pointer_rtx;
25270 HOST_WIDE_INT sp_offset = 0;
25271 rtx tmp;
25272
25273 if (frame_pointer_needed
25274 || cfun->calls_alloca
25275 || info->total_size > 32767)
25276 {
25277 tmp = gen_frame_mem (Pmode, frame_rtx);
25278 emit_move_insn (operands[1], tmp);
25279 frame_rtx = operands[1];
25280 }
25281 else if (info->push_p)
25282 sp_offset = info->total_size;
25283
25284 tmp = plus_constant (Pmode, frame_rtx,
25285 info->lr_save_offset + sp_offset);
25286 tmp = gen_frame_mem (Pmode, tmp);
25287 emit_move_insn (tmp, operands[0]);
25288 }
25289 else
25290 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25291
25292 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25293 state of lr_save_p so any change from here on would be a bug. In
25294 particular, stop rs6000_ra_ever_killed from considering the SET
25295 of lr we may have added just above. */
25296 cfun->machine->lr_save_state = info->lr_save_p + 1;
25297 }
25298
25299 static GTY(()) alias_set_type set = -1;
25300
25301 alias_set_type
25302 get_TOC_alias_set (void)
25303 {
25304 if (set == -1)
25305 set = new_alias_set ();
25306 return set;
25307 }
25308
25309 /* This returns nonzero if the current function uses the TOC. This is
25310 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25311 is generated by the ABI_V4 load_toc_* patterns.
25312 Return 2 instead of 1 if the load_toc_* pattern is in the function
25313 partition that doesn't start the function. */
25314 #if TARGET_ELF
25315 static int
25316 uses_TOC (void)
25317 {
25318 rtx_insn *insn;
25319 int ret = 1;
25320
25321 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25322 {
25323 if (INSN_P (insn))
25324 {
25325 rtx pat = PATTERN (insn);
25326 int i;
25327
25328 if (GET_CODE (pat) == PARALLEL)
25329 for (i = 0; i < XVECLEN (pat, 0); i++)
25330 {
25331 rtx sub = XVECEXP (pat, 0, i);
25332 if (GET_CODE (sub) == USE)
25333 {
25334 sub = XEXP (sub, 0);
25335 if (GET_CODE (sub) == UNSPEC
25336 && XINT (sub, 1) == UNSPEC_TOC)
25337 return ret;
25338 }
25339 }
25340 }
25341 else if (crtl->has_bb_partition
25342 && NOTE_P (insn)
25343 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25344 ret = 2;
25345 }
25346 return 0;
25347 }
25348 #endif
25349
25350 rtx
25351 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25352 {
25353 rtx tocrel, tocreg, hi;
25354
25355 if (TARGET_DEBUG_ADDR)
25356 {
25357 if (SYMBOL_REF_P (symbol))
25358 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25359 XSTR (symbol, 0));
25360 else
25361 {
25362 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25363 GET_RTX_NAME (GET_CODE (symbol)));
25364 debug_rtx (symbol);
25365 }
25366 }
25367
25368 if (!can_create_pseudo_p ())
25369 df_set_regs_ever_live (TOC_REGISTER, true);
25370
25371 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25372 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25373 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25374 return tocrel;
25375
25376 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25377 if (largetoc_reg != NULL)
25378 {
25379 emit_move_insn (largetoc_reg, hi);
25380 hi = largetoc_reg;
25381 }
25382 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25383 }
25384
25385 /* Issue assembly directives that create a reference to the given DWARF
25386 FRAME_TABLE_LABEL from the current function section. */
25387 void
25388 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25389 {
25390 fprintf (asm_out_file, "\t.ref %s\n",
25391 (* targetm.strip_name_encoding) (frame_table_label));
25392 }
25393 \f
25394 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25395 and the change to the stack pointer. */
25396
25397 static void
25398 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25399 {
25400 rtvec p;
25401 int i;
25402 rtx regs[3];
25403
25404 i = 0;
25405 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25406 if (hard_frame_needed)
25407 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25408 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25409 || (hard_frame_needed
25410 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25411 regs[i++] = fp;
25412
25413 p = rtvec_alloc (i);
25414 while (--i >= 0)
25415 {
25416 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25417 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25418 }
25419
25420 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25421 }
25422
25423 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25424 and set the appropriate attributes for the generated insn. Return the
25425 first insn which adjusts the stack pointer or the last insn before
25426 the stack adjustment loop.
25427
25428 SIZE_INT is used to create the CFI note for the allocation.
25429
25430 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25431 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25432
25433 ORIG_SP contains the backchain value that must be stored at *sp. */
25434
25435 static rtx_insn *
25436 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25437 {
25438 rtx_insn *insn;
25439
25440 rtx size_rtx = GEN_INT (-size_int);
25441 if (size_int > 32767)
25442 {
25443 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25444 /* Need a note here so that try_split doesn't get confused. */
25445 if (get_last_insn () == NULL_RTX)
25446 emit_note (NOTE_INSN_DELETED);
25447 insn = emit_move_insn (tmp_reg, size_rtx);
25448 try_split (PATTERN (insn), insn, 0);
25449 size_rtx = tmp_reg;
25450 }
25451
25452 if (Pmode == SImode)
25453 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25454 stack_pointer_rtx,
25455 size_rtx,
25456 orig_sp));
25457 else
25458 insn = emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx,
25459 stack_pointer_rtx,
25460 size_rtx,
25461 orig_sp));
25462 rtx par = PATTERN (insn);
25463 gcc_assert (GET_CODE (par) == PARALLEL);
25464 rtx set = XVECEXP (par, 0, 0);
25465 gcc_assert (GET_CODE (set) == SET);
25466 rtx mem = SET_DEST (set);
25467 gcc_assert (MEM_P (mem));
25468 MEM_NOTRAP_P (mem) = 1;
25469 set_mem_alias_set (mem, get_frame_alias_set ());
25470
25471 RTX_FRAME_RELATED_P (insn) = 1;
25472 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25473 gen_rtx_SET (stack_pointer_rtx,
25474 gen_rtx_PLUS (Pmode,
25475 stack_pointer_rtx,
25476 GEN_INT (-size_int))));
25477
25478 /* Emit a blockage to ensure the allocation/probing insns are
25479 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25480 note for similar reasons. */
25481 if (flag_stack_clash_protection)
25482 {
25483 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25484 emit_insn (gen_blockage ());
25485 }
25486
25487 return insn;
25488 }
25489
25490 static HOST_WIDE_INT
25491 get_stack_clash_protection_probe_interval (void)
25492 {
25493 return (HOST_WIDE_INT_1U
25494 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25495 }
25496
25497 static HOST_WIDE_INT
25498 get_stack_clash_protection_guard_size (void)
25499 {
25500 return (HOST_WIDE_INT_1U
25501 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25502 }
25503
25504 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25505 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25506
25507 COPY_REG, if non-null, should contain a copy of the original
25508 stack pointer at exit from this function.
25509
25510 This is subtly different than the Ada probing in that it tries hard to
25511 prevent attacks that jump the stack guard. Thus it is never allowed to
25512 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25513 space without a suitable probe. */
25514 static rtx_insn *
25515 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25516 rtx copy_reg)
25517 {
25518 rtx orig_sp = copy_reg;
25519
25520 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25521
25522 /* Round the size down to a multiple of PROBE_INTERVAL. */
25523 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25524
25525 /* If explicitly requested,
25526 or the rounded size is not the same as the original size
25527 or the the rounded size is greater than a page,
25528 then we will need a copy of the original stack pointer. */
25529 if (rounded_size != orig_size
25530 || rounded_size > probe_interval
25531 || copy_reg)
25532 {
25533 /* If the caller did not request a copy of the incoming stack
25534 pointer, then we use r0 to hold the copy. */
25535 if (!copy_reg)
25536 orig_sp = gen_rtx_REG (Pmode, 0);
25537 emit_move_insn (orig_sp, stack_pointer_rtx);
25538 }
25539
25540 /* There's three cases here.
25541
25542 One is a single probe which is the most common and most efficiently
25543 implemented as it does not have to have a copy of the original
25544 stack pointer if there are no residuals.
25545
25546 Second is unrolled allocation/probes which we use if there's just
25547 a few of them. It needs to save the original stack pointer into a
25548 temporary for use as a source register in the allocation/probe.
25549
25550 Last is a loop. This is the most uncommon case and least efficient. */
25551 rtx_insn *retval = NULL;
25552 if (rounded_size == probe_interval)
25553 {
25554 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25555
25556 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25557 }
25558 else if (rounded_size <= 8 * probe_interval)
25559 {
25560 /* The ABI requires using the store with update insns to allocate
25561 space and store the backchain into the stack
25562
25563 So we save the current stack pointer into a temporary, then
25564 emit the store-with-update insns to store the saved stack pointer
25565 into the right location in each new page. */
25566 for (int i = 0; i < rounded_size; i += probe_interval)
25567 {
25568 rtx_insn *insn
25569 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25570
25571 /* Save the first stack adjustment in RETVAL. */
25572 if (i == 0)
25573 retval = insn;
25574 }
25575
25576 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25577 }
25578 else
25579 {
25580 /* Compute the ending address. */
25581 rtx end_addr
25582 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25583 rtx rs = GEN_INT (-rounded_size);
25584 rtx_insn *insn;
25585 if (add_operand (rs, Pmode))
25586 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25587 else
25588 {
25589 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25590 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25591 stack_pointer_rtx));
25592 /* Describe the effect of INSN to the CFI engine. */
25593 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25594 gen_rtx_SET (end_addr,
25595 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25596 rs)));
25597 }
25598 RTX_FRAME_RELATED_P (insn) = 1;
25599
25600 /* Emit the loop. */
25601 if (TARGET_64BIT)
25602 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25603 stack_pointer_rtx, orig_sp,
25604 end_addr));
25605 else
25606 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25607 stack_pointer_rtx, orig_sp,
25608 end_addr));
25609 RTX_FRAME_RELATED_P (retval) = 1;
25610 /* Describe the effect of INSN to the CFI engine. */
25611 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25612 gen_rtx_SET (stack_pointer_rtx, end_addr));
25613
25614 /* Emit a blockage to ensure the allocation/probing insns are
25615 not optimized, combined, removed, etc. Other cases handle this
25616 within their call to rs6000_emit_allocate_stack_1. */
25617 emit_insn (gen_blockage ());
25618
25619 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25620 }
25621
25622 if (orig_size != rounded_size)
25623 {
25624 /* Allocate (and implicitly probe) any residual space. */
25625 HOST_WIDE_INT residual = orig_size - rounded_size;
25626
25627 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25628
25629 /* If the residual was the only allocation, then we can return the
25630 allocating insn. */
25631 if (!retval)
25632 retval = insn;
25633 }
25634
25635 return retval;
25636 }
25637
25638 /* Emit the correct code for allocating stack space, as insns.
25639 If COPY_REG, make sure a copy of the old frame is left there.
25640 The generated code may use hard register 0 as a temporary. */
25641
25642 static rtx_insn *
25643 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25644 {
25645 rtx_insn *insn;
25646 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25647 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25648 rtx todec = gen_int_mode (-size, Pmode);
25649
25650 if (INTVAL (todec) != -size)
25651 {
25652 warning (0, "stack frame too large");
25653 emit_insn (gen_trap ());
25654 return 0;
25655 }
25656
25657 if (crtl->limit_stack)
25658 {
25659 if (REG_P (stack_limit_rtx)
25660 && REGNO (stack_limit_rtx) > 1
25661 && REGNO (stack_limit_rtx) <= 31)
25662 {
25663 rtx_insn *insn
25664 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25665 gcc_assert (insn);
25666 emit_insn (insn);
25667 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25668 }
25669 else if (SYMBOL_REF_P (stack_limit_rtx)
25670 && TARGET_32BIT
25671 && DEFAULT_ABI == ABI_V4
25672 && !flag_pic)
25673 {
25674 rtx toload = gen_rtx_CONST (VOIDmode,
25675 gen_rtx_PLUS (Pmode,
25676 stack_limit_rtx,
25677 GEN_INT (size)));
25678
25679 emit_insn (gen_elf_high (tmp_reg, toload));
25680 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25681 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25682 const0_rtx));
25683 }
25684 else
25685 warning (0, "stack limit expression is not supported");
25686 }
25687
25688 if (flag_stack_clash_protection)
25689 {
25690 if (size < get_stack_clash_protection_guard_size ())
25691 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25692 else
25693 {
25694 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25695 copy_reg);
25696
25697 /* If we asked for a copy with an offset, then we still need add in
25698 the offset. */
25699 if (copy_reg && copy_off)
25700 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25701 return insn;
25702 }
25703 }
25704
25705 if (copy_reg)
25706 {
25707 if (copy_off != 0)
25708 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25709 else
25710 emit_move_insn (copy_reg, stack_reg);
25711 }
25712
25713 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25714 it now and set the alias set/attributes. The above gen_*_update
25715 calls will generate a PARALLEL with the MEM set being the first
25716 operation. */
25717 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25718 return insn;
25719 }
25720
25721 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25722
25723 #if PROBE_INTERVAL > 32768
25724 #error Cannot use indexed addressing mode for stack probing
25725 #endif
25726
25727 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25728 inclusive. These are offsets from the current stack pointer. */
25729
25730 static void
25731 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25732 {
25733 /* See if we have a constant small number of probes to generate. If so,
25734 that's the easy case. */
25735 if (first + size <= 32768)
25736 {
25737 HOST_WIDE_INT i;
25738
25739 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25740 it exceeds SIZE. If only one probe is needed, this will not
25741 generate any code. Then probe at FIRST + SIZE. */
25742 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25743 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25744 -(first + i)));
25745
25746 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25747 -(first + size)));
25748 }
25749
25750 /* Otherwise, do the same as above, but in a loop. Note that we must be
25751 extra careful with variables wrapping around because we might be at
25752 the very top (or the very bottom) of the address space and we have
25753 to be able to handle this case properly; in particular, we use an
25754 equality test for the loop condition. */
25755 else
25756 {
25757 HOST_WIDE_INT rounded_size;
25758 rtx r12 = gen_rtx_REG (Pmode, 12);
25759 rtx r0 = gen_rtx_REG (Pmode, 0);
25760
25761 /* Sanity check for the addressing mode we're going to use. */
25762 gcc_assert (first <= 32768);
25763
25764 /* Step 1: round SIZE to the previous multiple of the interval. */
25765
25766 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25767
25768
25769 /* Step 2: compute initial and final value of the loop counter. */
25770
25771 /* TEST_ADDR = SP + FIRST. */
25772 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25773 -first)));
25774
25775 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25776 if (rounded_size > 32768)
25777 {
25778 emit_move_insn (r0, GEN_INT (-rounded_size));
25779 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25780 }
25781 else
25782 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25783 -rounded_size)));
25784
25785
25786 /* Step 3: the loop
25787
25788 do
25789 {
25790 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25791 probe at TEST_ADDR
25792 }
25793 while (TEST_ADDR != LAST_ADDR)
25794
25795 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25796 until it is equal to ROUNDED_SIZE. */
25797
25798 if (TARGET_64BIT)
25799 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25800 else
25801 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25802
25803
25804 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25805 that SIZE is equal to ROUNDED_SIZE. */
25806
25807 if (size != rounded_size)
25808 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25809 }
25810 }
25811
25812 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25813 addresses, not offsets. */
25814
25815 static const char *
25816 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25817 {
25818 static int labelno = 0;
25819 char loop_lab[32];
25820 rtx xops[2];
25821
25822 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25823
25824 /* Loop. */
25825 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25826
25827 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25828 xops[0] = reg1;
25829 xops[1] = GEN_INT (-PROBE_INTERVAL);
25830 output_asm_insn ("addi %0,%0,%1", xops);
25831
25832 /* Probe at TEST_ADDR. */
25833 xops[1] = gen_rtx_REG (Pmode, 0);
25834 output_asm_insn ("stw %1,0(%0)", xops);
25835
25836 /* Test if TEST_ADDR == LAST_ADDR. */
25837 xops[1] = reg2;
25838 if (TARGET_64BIT)
25839 output_asm_insn ("cmpd 0,%0,%1", xops);
25840 else
25841 output_asm_insn ("cmpw 0,%0,%1", xops);
25842
25843 /* Branch. */
25844 fputs ("\tbne 0,", asm_out_file);
25845 assemble_name_raw (asm_out_file, loop_lab);
25846 fputc ('\n', asm_out_file);
25847
25848 return "";
25849 }
25850
25851 /* This function is called when rs6000_frame_related is processing
25852 SETs within a PARALLEL, and returns whether the REGNO save ought to
25853 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25854 for out-of-line register save functions, store multiple, and the
25855 Darwin world_save. They may contain registers that don't really
25856 need saving. */
25857
25858 static bool
25859 interesting_frame_related_regno (unsigned int regno)
25860 {
25861 /* Saves apparently of r0 are actually saving LR. It doesn't make
25862 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25863 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25864 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25865 as frame related. */
25866 if (regno == 0)
25867 return true;
25868 /* If we see CR2 then we are here on a Darwin world save. Saves of
25869 CR2 signify the whole CR is being saved. This is a long-standing
25870 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25871 that CR needs to be saved. */
25872 if (regno == CR2_REGNO)
25873 return true;
25874 /* Omit frame info for any user-defined global regs. If frame info
25875 is supplied for them, frame unwinding will restore a user reg.
25876 Also omit frame info for any reg we don't need to save, as that
25877 bloats frame info and can cause problems with shrink wrapping.
25878 Since global regs won't be seen as needing to be saved, both of
25879 these conditions are covered by save_reg_p. */
25880 return save_reg_p (regno);
25881 }
25882
25883 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25884 addresses, not offsets.
25885
25886 REG2 contains the backchain that must be stored into *sp at each allocation.
25887
25888 This is subtly different than the Ada probing above in that it tries hard
25889 to prevent attacks that jump the stack guard. Thus, it is never allowed
25890 to allocate more than PROBE_INTERVAL bytes of stack space without a
25891 suitable probe. */
25892
25893 static const char *
25894 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25895 {
25896 static int labelno = 0;
25897 char loop_lab[32];
25898 rtx xops[3];
25899
25900 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25901
25902 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25903
25904 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25905
25906 /* This allocates and probes. */
25907 xops[0] = reg1;
25908 xops[1] = reg2;
25909 xops[2] = GEN_INT (-probe_interval);
25910 if (TARGET_64BIT)
25911 output_asm_insn ("stdu %1,%2(%0)", xops);
25912 else
25913 output_asm_insn ("stwu %1,%2(%0)", xops);
25914
25915 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25916 xops[0] = reg1;
25917 xops[1] = reg3;
25918 if (TARGET_64BIT)
25919 output_asm_insn ("cmpd 0,%0,%1", xops);
25920 else
25921 output_asm_insn ("cmpw 0,%0,%1", xops);
25922
25923 fputs ("\tbne 0,", asm_out_file);
25924 assemble_name_raw (asm_out_file, loop_lab);
25925 fputc ('\n', asm_out_file);
25926
25927 return "";
25928 }
25929
25930 /* Wrapper around the output_probe_stack_range routines. */
25931 const char *
25932 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25933 {
25934 if (flag_stack_clash_protection)
25935 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25936 else
25937 return output_probe_stack_range_1 (reg1, reg3);
25938 }
25939
25940 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25941 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25942 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25943 deduce these equivalences by itself so it wasn't necessary to hold
25944 its hand so much. Don't be tempted to always supply d2_f_d_e with
25945 the actual cfa register, ie. r31 when we are using a hard frame
25946 pointer. That fails when saving regs off r1, and sched moves the
25947 r31 setup past the reg saves. */
25948
25949 static rtx_insn *
25950 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25951 rtx reg2, rtx repl2)
25952 {
25953 rtx repl;
25954
25955 if (REGNO (reg) == STACK_POINTER_REGNUM)
25956 {
25957 gcc_checking_assert (val == 0);
25958 repl = NULL_RTX;
25959 }
25960 else
25961 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25962 GEN_INT (val));
25963
25964 rtx pat = PATTERN (insn);
25965 if (!repl && !reg2)
25966 {
25967 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25968 if (GET_CODE (pat) == PARALLEL)
25969 for (int i = 0; i < XVECLEN (pat, 0); i++)
25970 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25971 {
25972 rtx set = XVECEXP (pat, 0, i);
25973
25974 if (!REG_P (SET_SRC (set))
25975 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25976 RTX_FRAME_RELATED_P (set) = 1;
25977 }
25978 RTX_FRAME_RELATED_P (insn) = 1;
25979 return insn;
25980 }
25981
25982 /* We expect that 'pat' is either a SET or a PARALLEL containing
25983 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25984 are important so they all have to be marked RTX_FRAME_RELATED_P.
25985 Call simplify_replace_rtx on the SETs rather than the whole insn
25986 so as to leave the other stuff alone (for example USE of r12). */
25987
25988 set_used_flags (pat);
25989 if (GET_CODE (pat) == SET)
25990 {
25991 if (repl)
25992 pat = simplify_replace_rtx (pat, reg, repl);
25993 if (reg2)
25994 pat = simplify_replace_rtx (pat, reg2, repl2);
25995 }
25996 else if (GET_CODE (pat) == PARALLEL)
25997 {
25998 pat = shallow_copy_rtx (pat);
25999 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
26000
26001 for (int i = 0; i < XVECLEN (pat, 0); i++)
26002 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
26003 {
26004 rtx set = XVECEXP (pat, 0, i);
26005
26006 if (repl)
26007 set = simplify_replace_rtx (set, reg, repl);
26008 if (reg2)
26009 set = simplify_replace_rtx (set, reg2, repl2);
26010 XVECEXP (pat, 0, i) = set;
26011
26012 if (!REG_P (SET_SRC (set))
26013 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
26014 RTX_FRAME_RELATED_P (set) = 1;
26015 }
26016 }
26017 else
26018 gcc_unreachable ();
26019
26020 RTX_FRAME_RELATED_P (insn) = 1;
26021 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
26022
26023 return insn;
26024 }
26025
26026 /* Returns an insn that has a vrsave set operation with the
26027 appropriate CLOBBERs. */
26028
26029 static rtx
26030 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
26031 {
26032 int nclobs, i;
26033 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
26034 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26035
26036 clobs[0]
26037 = gen_rtx_SET (vrsave,
26038 gen_rtx_UNSPEC_VOLATILE (SImode,
26039 gen_rtvec (2, reg, vrsave),
26040 UNSPECV_SET_VRSAVE));
26041
26042 nclobs = 1;
26043
26044 /* We need to clobber the registers in the mask so the scheduler
26045 does not move sets to VRSAVE before sets of AltiVec registers.
26046
26047 However, if the function receives nonlocal gotos, reload will set
26048 all call saved registers live. We will end up with:
26049
26050 (set (reg 999) (mem))
26051 (parallel [ (set (reg vrsave) (unspec blah))
26052 (clobber (reg 999))])
26053
26054 The clobber will cause the store into reg 999 to be dead, and
26055 flow will attempt to delete an epilogue insn. In this case, we
26056 need an unspec use/set of the register. */
26057
26058 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
26059 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
26060 {
26061 if (!epiloguep || call_used_regs [i])
26062 clobs[nclobs++] = gen_hard_reg_clobber (V4SImode, i);
26063 else
26064 {
26065 rtx reg = gen_rtx_REG (V4SImode, i);
26066
26067 clobs[nclobs++]
26068 = gen_rtx_SET (reg,
26069 gen_rtx_UNSPEC (V4SImode,
26070 gen_rtvec (1, reg), 27));
26071 }
26072 }
26073
26074 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
26075
26076 for (i = 0; i < nclobs; ++i)
26077 XVECEXP (insn, 0, i) = clobs[i];
26078
26079 return insn;
26080 }
26081
26082 static rtx
26083 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
26084 {
26085 rtx addr, mem;
26086
26087 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
26088 mem = gen_frame_mem (GET_MODE (reg), addr);
26089 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
26090 }
26091
26092 static rtx
26093 gen_frame_load (rtx reg, rtx frame_reg, int offset)
26094 {
26095 return gen_frame_set (reg, frame_reg, offset, false);
26096 }
26097
26098 static rtx
26099 gen_frame_store (rtx reg, rtx frame_reg, int offset)
26100 {
26101 return gen_frame_set (reg, frame_reg, offset, true);
26102 }
26103
26104 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
26105 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
26106
26107 static rtx_insn *
26108 emit_frame_save (rtx frame_reg, machine_mode mode,
26109 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
26110 {
26111 rtx reg;
26112
26113 /* Some cases that need register indexed addressing. */
26114 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
26115 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
26116
26117 reg = gen_rtx_REG (mode, regno);
26118 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
26119 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
26120 NULL_RTX, NULL_RTX);
26121 }
26122
26123 /* Emit an offset memory reference suitable for a frame store, while
26124 converting to a valid addressing mode. */
26125
26126 static rtx
26127 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
26128 {
26129 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
26130 }
26131
26132 #ifndef TARGET_FIX_AND_CONTINUE
26133 #define TARGET_FIX_AND_CONTINUE 0
26134 #endif
26135
26136 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
26137 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
26138 #define LAST_SAVRES_REGISTER 31
26139 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
26140
26141 enum {
26142 SAVRES_LR = 0x1,
26143 SAVRES_SAVE = 0x2,
26144 SAVRES_REG = 0x0c,
26145 SAVRES_GPR = 0,
26146 SAVRES_FPR = 4,
26147 SAVRES_VR = 8
26148 };
26149
26150 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
26151
26152 /* Temporary holding space for an out-of-line register save/restore
26153 routine name. */
26154 static char savres_routine_name[30];
26155
26156 /* Return the name for an out-of-line register save/restore routine.
26157 We are saving/restoring GPRs if GPR is true. */
26158
26159 static char *
26160 rs6000_savres_routine_name (int regno, int sel)
26161 {
26162 const char *prefix = "";
26163 const char *suffix = "";
26164
26165 /* Different targets are supposed to define
26166 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
26167 routine name could be defined with:
26168
26169 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
26170
26171 This is a nice idea in practice, but in reality, things are
26172 complicated in several ways:
26173
26174 - ELF targets have save/restore routines for GPRs.
26175
26176 - PPC64 ELF targets have routines for save/restore of GPRs that
26177 differ in what they do with the link register, so having a set
26178 prefix doesn't work. (We only use one of the save routines at
26179 the moment, though.)
26180
26181 - PPC32 elf targets have "exit" versions of the restore routines
26182 that restore the link register and can save some extra space.
26183 These require an extra suffix. (There are also "tail" versions
26184 of the restore routines and "GOT" versions of the save routines,
26185 but we don't generate those at present. Same problems apply,
26186 though.)
26187
26188 We deal with all this by synthesizing our own prefix/suffix and
26189 using that for the simple sprintf call shown above. */
26190 if (DEFAULT_ABI == ABI_V4)
26191 {
26192 if (TARGET_64BIT)
26193 goto aix_names;
26194
26195 if ((sel & SAVRES_REG) == SAVRES_GPR)
26196 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
26197 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26198 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
26199 else if ((sel & SAVRES_REG) == SAVRES_VR)
26200 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26201 else
26202 abort ();
26203
26204 if ((sel & SAVRES_LR))
26205 suffix = "_x";
26206 }
26207 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26208 {
26209 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
26210 /* No out-of-line save/restore routines for GPRs on AIX. */
26211 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
26212 #endif
26213
26214 aix_names:
26215 if ((sel & SAVRES_REG) == SAVRES_GPR)
26216 prefix = ((sel & SAVRES_SAVE)
26217 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
26218 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
26219 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26220 {
26221 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
26222 if ((sel & SAVRES_LR))
26223 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
26224 else
26225 #endif
26226 {
26227 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
26228 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
26229 }
26230 }
26231 else if ((sel & SAVRES_REG) == SAVRES_VR)
26232 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
26233 else
26234 abort ();
26235 }
26236
26237 if (DEFAULT_ABI == ABI_DARWIN)
26238 {
26239 /* The Darwin approach is (slightly) different, in order to be
26240 compatible with code generated by the system toolchain. There is a
26241 single symbol for the start of save sequence, and the code here
26242 embeds an offset into that code on the basis of the first register
26243 to be saved. */
26244 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
26245 if ((sel & SAVRES_REG) == SAVRES_GPR)
26246 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
26247 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
26248 (regno - 13) * 4, prefix, regno);
26249 else if ((sel & SAVRES_REG) == SAVRES_FPR)
26250 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
26251 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
26252 else if ((sel & SAVRES_REG) == SAVRES_VR)
26253 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
26254 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
26255 else
26256 abort ();
26257 }
26258 else
26259 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
26260
26261 return savres_routine_name;
26262 }
26263
26264 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
26265 We are saving/restoring GPRs if GPR is true. */
26266
26267 static rtx
26268 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
26269 {
26270 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
26271 ? info->first_gp_reg_save
26272 : (sel & SAVRES_REG) == SAVRES_FPR
26273 ? info->first_fp_reg_save - 32
26274 : (sel & SAVRES_REG) == SAVRES_VR
26275 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
26276 : -1);
26277 rtx sym;
26278 int select = sel;
26279
26280 /* Don't generate bogus routine names. */
26281 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26282 && regno <= LAST_SAVRES_REGISTER
26283 && select >= 0 && select <= 12);
26284
26285 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26286
26287 if (sym == NULL)
26288 {
26289 char *name;
26290
26291 name = rs6000_savres_routine_name (regno, sel);
26292
26293 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26294 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26295 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26296 }
26297
26298 return sym;
26299 }
26300
26301 /* Emit a sequence of insns, including a stack tie if needed, for
26302 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26303 reset the stack pointer, but move the base of the frame into
26304 reg UPDT_REGNO for use by out-of-line register restore routines. */
26305
26306 static rtx
26307 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26308 unsigned updt_regno)
26309 {
26310 /* If there is nothing to do, don't do anything. */
26311 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26312 return NULL_RTX;
26313
26314 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26315
26316 /* This blockage is needed so that sched doesn't decide to move
26317 the sp change before the register restores. */
26318 if (DEFAULT_ABI == ABI_V4)
26319 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26320 GEN_INT (frame_off)));
26321
26322 /* If we are restoring registers out-of-line, we will be using the
26323 "exit" variants of the restore routines, which will reset the
26324 stack for us. But we do need to point updt_reg into the
26325 right place for those routines. */
26326 if (frame_off != 0)
26327 return emit_insn (gen_add3_insn (updt_reg_rtx,
26328 frame_reg_rtx, GEN_INT (frame_off)));
26329 else
26330 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26331
26332 return NULL_RTX;
26333 }
26334
26335 /* Return the register number used as a pointer by out-of-line
26336 save/restore functions. */
26337
26338 static inline unsigned
26339 ptr_regno_for_savres (int sel)
26340 {
26341 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26342 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26343 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26344 }
26345
26346 /* Construct a parallel rtx describing the effect of a call to an
26347 out-of-line register save/restore routine, and emit the insn
26348 or jump_insn as appropriate. */
26349
26350 static rtx_insn *
26351 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26352 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26353 machine_mode reg_mode, int sel)
26354 {
26355 int i;
26356 int offset, start_reg, end_reg, n_regs, use_reg;
26357 int reg_size = GET_MODE_SIZE (reg_mode);
26358 rtx sym;
26359 rtvec p;
26360 rtx par;
26361 rtx_insn *insn;
26362
26363 offset = 0;
26364 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26365 ? info->first_gp_reg_save
26366 : (sel & SAVRES_REG) == SAVRES_FPR
26367 ? info->first_fp_reg_save
26368 : (sel & SAVRES_REG) == SAVRES_VR
26369 ? info->first_altivec_reg_save
26370 : -1);
26371 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26372 ? 32
26373 : (sel & SAVRES_REG) == SAVRES_FPR
26374 ? 64
26375 : (sel & SAVRES_REG) == SAVRES_VR
26376 ? LAST_ALTIVEC_REGNO + 1
26377 : -1);
26378 n_regs = end_reg - start_reg;
26379 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26380 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26381 + n_regs);
26382
26383 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26384 RTVEC_ELT (p, offset++) = ret_rtx;
26385
26386 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
26387
26388 sym = rs6000_savres_routine_sym (info, sel);
26389 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26390
26391 use_reg = ptr_regno_for_savres (sel);
26392 if ((sel & SAVRES_REG) == SAVRES_VR)
26393 {
26394 /* Vector regs are saved/restored using [reg+reg] addressing. */
26395 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, use_reg);
26396 RTVEC_ELT (p, offset++)
26397 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26398 }
26399 else
26400 RTVEC_ELT (p, offset++)
26401 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26402
26403 for (i = 0; i < end_reg - start_reg; i++)
26404 RTVEC_ELT (p, i + offset)
26405 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26406 frame_reg_rtx, save_area_offset + reg_size * i,
26407 (sel & SAVRES_SAVE) != 0);
26408
26409 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26410 RTVEC_ELT (p, i + offset)
26411 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26412
26413 par = gen_rtx_PARALLEL (VOIDmode, p);
26414
26415 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26416 {
26417 insn = emit_jump_insn (par);
26418 JUMP_LABEL (insn) = ret_rtx;
26419 }
26420 else
26421 insn = emit_insn (par);
26422 return insn;
26423 }
26424
26425 /* Emit prologue code to store CR fields that need to be saved into REG. This
26426 function should only be called when moving the non-volatile CRs to REG, it
26427 is not a general purpose routine to move the entire set of CRs to REG.
26428 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26429 volatile CRs. */
26430
26431 static void
26432 rs6000_emit_prologue_move_from_cr (rtx reg)
26433 {
26434 /* Only the ELFv2 ABI allows storing only selected fields. */
26435 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26436 {
26437 int i, cr_reg[8], count = 0;
26438
26439 /* Collect CR fields that must be saved. */
26440 for (i = 0; i < 8; i++)
26441 if (save_reg_p (CR0_REGNO + i))
26442 cr_reg[count++] = i;
26443
26444 /* If it's just a single one, use mfcrf. */
26445 if (count == 1)
26446 {
26447 rtvec p = rtvec_alloc (1);
26448 rtvec r = rtvec_alloc (2);
26449 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26450 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26451 RTVEC_ELT (p, 0)
26452 = gen_rtx_SET (reg,
26453 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26454
26455 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26456 return;
26457 }
26458
26459 /* ??? It might be better to handle count == 2 / 3 cases here
26460 as well, using logical operations to combine the values. */
26461 }
26462
26463 emit_insn (gen_prologue_movesi_from_cr (reg));
26464 }
26465
26466 /* Return whether the split-stack arg pointer (r12) is used. */
26467
26468 static bool
26469 split_stack_arg_pointer_used_p (void)
26470 {
26471 /* If the pseudo holding the arg pointer is no longer a pseudo,
26472 then the arg pointer is used. */
26473 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26474 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26475 || HARD_REGISTER_P (cfun->machine->split_stack_arg_pointer)))
26476 return true;
26477
26478 /* Unfortunately we also need to do some code scanning, since
26479 r12 may have been substituted for the pseudo. */
26480 rtx_insn *insn;
26481 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26482 FOR_BB_INSNS (bb, insn)
26483 if (NONDEBUG_INSN_P (insn))
26484 {
26485 /* A call destroys r12. */
26486 if (CALL_P (insn))
26487 return false;
26488
26489 df_ref use;
26490 FOR_EACH_INSN_USE (use, insn)
26491 {
26492 rtx x = DF_REF_REG (use);
26493 if (REG_P (x) && REGNO (x) == 12)
26494 return true;
26495 }
26496 df_ref def;
26497 FOR_EACH_INSN_DEF (def, insn)
26498 {
26499 rtx x = DF_REF_REG (def);
26500 if (REG_P (x) && REGNO (x) == 12)
26501 return false;
26502 }
26503 }
26504 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26505 }
26506
26507 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26508
26509 static bool
26510 rs6000_global_entry_point_needed_p (void)
26511 {
26512 /* Only needed for the ELFv2 ABI. */
26513 if (DEFAULT_ABI != ABI_ELFv2)
26514 return false;
26515
26516 /* With -msingle-pic-base, we assume the whole program shares the same
26517 TOC, so no global entry point prologues are needed anywhere. */
26518 if (TARGET_SINGLE_PIC_BASE)
26519 return false;
26520
26521 /* Ensure we have a global entry point for thunks. ??? We could
26522 avoid that if the target routine doesn't need a global entry point,
26523 but we do not know whether this is the case at this point. */
26524 if (cfun->is_thunk)
26525 return true;
26526
26527 /* For regular functions, rs6000_emit_prologue sets this flag if the
26528 routine ever uses the TOC pointer. */
26529 return cfun->machine->r2_setup_needed;
26530 }
26531
26532 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26533 static sbitmap
26534 rs6000_get_separate_components (void)
26535 {
26536 rs6000_stack_t *info = rs6000_stack_info ();
26537
26538 if (WORLD_SAVE_P (info))
26539 return NULL;
26540
26541 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26542 && !(info->savres_strategy & REST_MULTIPLE));
26543
26544 /* Component 0 is the save/restore of LR (done via GPR0).
26545 Component 2 is the save of the TOC (GPR2).
26546 Components 13..31 are the save/restore of GPR13..GPR31.
26547 Components 46..63 are the save/restore of FPR14..FPR31. */
26548
26549 cfun->machine->n_components = 64;
26550
26551 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26552 bitmap_clear (components);
26553
26554 int reg_size = TARGET_32BIT ? 4 : 8;
26555 int fp_reg_size = 8;
26556
26557 /* The GPRs we need saved to the frame. */
26558 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26559 && (info->savres_strategy & REST_INLINE_GPRS))
26560 {
26561 int offset = info->gp_save_offset;
26562 if (info->push_p)
26563 offset += info->total_size;
26564
26565 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26566 {
26567 if (IN_RANGE (offset, -0x8000, 0x7fff)
26568 && save_reg_p (regno))
26569 bitmap_set_bit (components, regno);
26570
26571 offset += reg_size;
26572 }
26573 }
26574
26575 /* Don't mess with the hard frame pointer. */
26576 if (frame_pointer_needed)
26577 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26578
26579 /* Don't mess with the fixed TOC register. */
26580 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26581 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26582 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26583 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26584
26585 /* The FPRs we need saved to the frame. */
26586 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26587 && (info->savres_strategy & REST_INLINE_FPRS))
26588 {
26589 int offset = info->fp_save_offset;
26590 if (info->push_p)
26591 offset += info->total_size;
26592
26593 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26594 {
26595 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26596 bitmap_set_bit (components, regno);
26597
26598 offset += fp_reg_size;
26599 }
26600 }
26601
26602 /* Optimize LR save and restore if we can. This is component 0. Any
26603 out-of-line register save/restore routines need LR. */
26604 if (info->lr_save_p
26605 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26606 && (info->savres_strategy & SAVE_INLINE_GPRS)
26607 && (info->savres_strategy & REST_INLINE_GPRS)
26608 && (info->savres_strategy & SAVE_INLINE_FPRS)
26609 && (info->savres_strategy & REST_INLINE_FPRS)
26610 && (info->savres_strategy & SAVE_INLINE_VRS)
26611 && (info->savres_strategy & REST_INLINE_VRS))
26612 {
26613 int offset = info->lr_save_offset;
26614 if (info->push_p)
26615 offset += info->total_size;
26616 if (IN_RANGE (offset, -0x8000, 0x7fff))
26617 bitmap_set_bit (components, 0);
26618 }
26619
26620 /* Optimize saving the TOC. This is component 2. */
26621 if (cfun->machine->save_toc_in_prologue)
26622 bitmap_set_bit (components, 2);
26623
26624 return components;
26625 }
26626
26627 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26628 static sbitmap
26629 rs6000_components_for_bb (basic_block bb)
26630 {
26631 rs6000_stack_t *info = rs6000_stack_info ();
26632
26633 bitmap in = DF_LIVE_IN (bb);
26634 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26635 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26636
26637 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26638 bitmap_clear (components);
26639
26640 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26641
26642 /* GPRs. */
26643 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26644 if (bitmap_bit_p (in, regno)
26645 || bitmap_bit_p (gen, regno)
26646 || bitmap_bit_p (kill, regno))
26647 bitmap_set_bit (components, regno);
26648
26649 /* FPRs. */
26650 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26651 if (bitmap_bit_p (in, regno)
26652 || bitmap_bit_p (gen, regno)
26653 || bitmap_bit_p (kill, regno))
26654 bitmap_set_bit (components, regno);
26655
26656 /* The link register. */
26657 if (bitmap_bit_p (in, LR_REGNO)
26658 || bitmap_bit_p (gen, LR_REGNO)
26659 || bitmap_bit_p (kill, LR_REGNO))
26660 bitmap_set_bit (components, 0);
26661
26662 /* The TOC save. */
26663 if (bitmap_bit_p (in, TOC_REGNUM)
26664 || bitmap_bit_p (gen, TOC_REGNUM)
26665 || bitmap_bit_p (kill, TOC_REGNUM))
26666 bitmap_set_bit (components, 2);
26667
26668 return components;
26669 }
26670
26671 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26672 static void
26673 rs6000_disqualify_components (sbitmap components, edge e,
26674 sbitmap edge_components, bool /*is_prologue*/)
26675 {
26676 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26677 live where we want to place that code. */
26678 if (bitmap_bit_p (edge_components, 0)
26679 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26680 {
26681 if (dump_file)
26682 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26683 "on entry to bb %d\n", e->dest->index);
26684 bitmap_clear_bit (components, 0);
26685 }
26686 }
26687
26688 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26689 static void
26690 rs6000_emit_prologue_components (sbitmap components)
26691 {
26692 rs6000_stack_t *info = rs6000_stack_info ();
26693 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26694 ? HARD_FRAME_POINTER_REGNUM
26695 : STACK_POINTER_REGNUM);
26696
26697 machine_mode reg_mode = Pmode;
26698 int reg_size = TARGET_32BIT ? 4 : 8;
26699 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26700 int fp_reg_size = 8;
26701
26702 /* Prologue for LR. */
26703 if (bitmap_bit_p (components, 0))
26704 {
26705 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26706 rtx reg = gen_rtx_REG (reg_mode, 0);
26707 rtx_insn *insn = emit_move_insn (reg, lr);
26708 RTX_FRAME_RELATED_P (insn) = 1;
26709 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26710
26711 int offset = info->lr_save_offset;
26712 if (info->push_p)
26713 offset += info->total_size;
26714
26715 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26716 RTX_FRAME_RELATED_P (insn) = 1;
26717 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26718 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26719 }
26720
26721 /* Prologue for TOC. */
26722 if (bitmap_bit_p (components, 2))
26723 {
26724 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26725 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26726 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26727 }
26728
26729 /* Prologue for the GPRs. */
26730 int offset = info->gp_save_offset;
26731 if (info->push_p)
26732 offset += info->total_size;
26733
26734 for (int i = info->first_gp_reg_save; i < 32; i++)
26735 {
26736 if (bitmap_bit_p (components, i))
26737 {
26738 rtx reg = gen_rtx_REG (reg_mode, i);
26739 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26740 RTX_FRAME_RELATED_P (insn) = 1;
26741 rtx set = copy_rtx (single_set (insn));
26742 add_reg_note (insn, REG_CFA_OFFSET, set);
26743 }
26744
26745 offset += reg_size;
26746 }
26747
26748 /* Prologue for the FPRs. */
26749 offset = info->fp_save_offset;
26750 if (info->push_p)
26751 offset += info->total_size;
26752
26753 for (int i = info->first_fp_reg_save; i < 64; i++)
26754 {
26755 if (bitmap_bit_p (components, i))
26756 {
26757 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26758 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26759 RTX_FRAME_RELATED_P (insn) = 1;
26760 rtx set = copy_rtx (single_set (insn));
26761 add_reg_note (insn, REG_CFA_OFFSET, set);
26762 }
26763
26764 offset += fp_reg_size;
26765 }
26766 }
26767
26768 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26769 static void
26770 rs6000_emit_epilogue_components (sbitmap components)
26771 {
26772 rs6000_stack_t *info = rs6000_stack_info ();
26773 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26774 ? HARD_FRAME_POINTER_REGNUM
26775 : STACK_POINTER_REGNUM);
26776
26777 machine_mode reg_mode = Pmode;
26778 int reg_size = TARGET_32BIT ? 4 : 8;
26779
26780 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26781 int fp_reg_size = 8;
26782
26783 /* Epilogue for the FPRs. */
26784 int offset = info->fp_save_offset;
26785 if (info->push_p)
26786 offset += info->total_size;
26787
26788 for (int i = info->first_fp_reg_save; i < 64; i++)
26789 {
26790 if (bitmap_bit_p (components, i))
26791 {
26792 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26793 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26794 RTX_FRAME_RELATED_P (insn) = 1;
26795 add_reg_note (insn, REG_CFA_RESTORE, reg);
26796 }
26797
26798 offset += fp_reg_size;
26799 }
26800
26801 /* Epilogue for the GPRs. */
26802 offset = info->gp_save_offset;
26803 if (info->push_p)
26804 offset += info->total_size;
26805
26806 for (int i = info->first_gp_reg_save; i < 32; i++)
26807 {
26808 if (bitmap_bit_p (components, i))
26809 {
26810 rtx reg = gen_rtx_REG (reg_mode, i);
26811 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26812 RTX_FRAME_RELATED_P (insn) = 1;
26813 add_reg_note (insn, REG_CFA_RESTORE, reg);
26814 }
26815
26816 offset += reg_size;
26817 }
26818
26819 /* Epilogue for LR. */
26820 if (bitmap_bit_p (components, 0))
26821 {
26822 int offset = info->lr_save_offset;
26823 if (info->push_p)
26824 offset += info->total_size;
26825
26826 rtx reg = gen_rtx_REG (reg_mode, 0);
26827 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26828
26829 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26830 insn = emit_move_insn (lr, reg);
26831 RTX_FRAME_RELATED_P (insn) = 1;
26832 add_reg_note (insn, REG_CFA_RESTORE, lr);
26833 }
26834 }
26835
26836 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26837 static void
26838 rs6000_set_handled_components (sbitmap components)
26839 {
26840 rs6000_stack_t *info = rs6000_stack_info ();
26841
26842 for (int i = info->first_gp_reg_save; i < 32; i++)
26843 if (bitmap_bit_p (components, i))
26844 cfun->machine->gpr_is_wrapped_separately[i] = true;
26845
26846 for (int i = info->first_fp_reg_save; i < 64; i++)
26847 if (bitmap_bit_p (components, i))
26848 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26849
26850 if (bitmap_bit_p (components, 0))
26851 cfun->machine->lr_is_wrapped_separately = true;
26852
26853 if (bitmap_bit_p (components, 2))
26854 cfun->machine->toc_is_wrapped_separately = true;
26855 }
26856
26857 /* VRSAVE is a bit vector representing which AltiVec registers
26858 are used. The OS uses this to determine which vector
26859 registers to save on a context switch. We need to save
26860 VRSAVE on the stack frame, add whatever AltiVec registers we
26861 used in this function, and do the corresponding magic in the
26862 epilogue. */
26863 static void
26864 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26865 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26866 {
26867 /* Get VRSAVE into a GPR. */
26868 rtx reg = gen_rtx_REG (SImode, save_regno);
26869 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26870 if (TARGET_MACHO)
26871 emit_insn (gen_get_vrsave_internal (reg));
26872 else
26873 emit_insn (gen_rtx_SET (reg, vrsave));
26874
26875 /* Save VRSAVE. */
26876 int offset = info->vrsave_save_offset + frame_off;
26877 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26878
26879 /* Include the registers in the mask. */
26880 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26881
26882 emit_insn (generate_set_vrsave (reg, info, 0));
26883 }
26884
26885 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26886 called, it left the arg pointer to the old stack in r29. Otherwise, the
26887 arg pointer is the top of the current frame. */
26888 static void
26889 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26890 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26891 {
26892 cfun->machine->split_stack_argp_used = true;
26893
26894 if (sp_adjust)
26895 {
26896 rtx r12 = gen_rtx_REG (Pmode, 12);
26897 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26898 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26899 emit_insn_before (set_r12, sp_adjust);
26900 }
26901 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26902 {
26903 rtx r12 = gen_rtx_REG (Pmode, 12);
26904 if (frame_off == 0)
26905 emit_move_insn (r12, frame_reg_rtx);
26906 else
26907 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26908 }
26909
26910 if (info->push_p)
26911 {
26912 rtx r12 = gen_rtx_REG (Pmode, 12);
26913 rtx r29 = gen_rtx_REG (Pmode, 29);
26914 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26915 rtx not_more = gen_label_rtx ();
26916 rtx jump;
26917
26918 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26919 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26920 gen_rtx_LABEL_REF (VOIDmode, not_more),
26921 pc_rtx);
26922 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26923 JUMP_LABEL (jump) = not_more;
26924 LABEL_NUSES (not_more) += 1;
26925 emit_move_insn (r12, r29);
26926 emit_label (not_more);
26927 }
26928 }
26929
26930 /* Emit function prologue as insns. */
26931
26932 void
26933 rs6000_emit_prologue (void)
26934 {
26935 rs6000_stack_t *info = rs6000_stack_info ();
26936 machine_mode reg_mode = Pmode;
26937 int reg_size = TARGET_32BIT ? 4 : 8;
26938 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26939 int fp_reg_size = 8;
26940 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26941 rtx frame_reg_rtx = sp_reg_rtx;
26942 unsigned int cr_save_regno;
26943 rtx cr_save_rtx = NULL_RTX;
26944 rtx_insn *insn;
26945 int strategy;
26946 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26947 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26948 && call_used_regs[STATIC_CHAIN_REGNUM]);
26949 int using_split_stack = (flag_split_stack
26950 && (lookup_attribute ("no_split_stack",
26951 DECL_ATTRIBUTES (cfun->decl))
26952 == NULL));
26953
26954 /* Offset to top of frame for frame_reg and sp respectively. */
26955 HOST_WIDE_INT frame_off = 0;
26956 HOST_WIDE_INT sp_off = 0;
26957 /* sp_adjust is the stack adjusting instruction, tracked so that the
26958 insn setting up the split-stack arg pointer can be emitted just
26959 prior to it, when r12 is not used here for other purposes. */
26960 rtx_insn *sp_adjust = 0;
26961
26962 #if CHECKING_P
26963 /* Track and check usage of r0, r11, r12. */
26964 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26965 #define START_USE(R) do \
26966 { \
26967 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26968 reg_inuse |= 1 << (R); \
26969 } while (0)
26970 #define END_USE(R) do \
26971 { \
26972 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26973 reg_inuse &= ~(1 << (R)); \
26974 } while (0)
26975 #define NOT_INUSE(R) do \
26976 { \
26977 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26978 } while (0)
26979 #else
26980 #define START_USE(R) do {} while (0)
26981 #define END_USE(R) do {} while (0)
26982 #define NOT_INUSE(R) do {} while (0)
26983 #endif
26984
26985 if (DEFAULT_ABI == ABI_ELFv2
26986 && !TARGET_SINGLE_PIC_BASE)
26987 {
26988 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26989
26990 /* With -mminimal-toc we may generate an extra use of r2 below. */
26991 if (TARGET_TOC && TARGET_MINIMAL_TOC
26992 && !constant_pool_empty_p ())
26993 cfun->machine->r2_setup_needed = true;
26994 }
26995
26996
26997 if (flag_stack_usage_info)
26998 current_function_static_stack_size = info->total_size;
26999
27000 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
27001 {
27002 HOST_WIDE_INT size = info->total_size;
27003
27004 if (crtl->is_leaf && !cfun->calls_alloca)
27005 {
27006 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
27007 rs6000_emit_probe_stack_range (get_stack_check_protect (),
27008 size - get_stack_check_protect ());
27009 }
27010 else if (size > 0)
27011 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
27012 }
27013
27014 if (TARGET_FIX_AND_CONTINUE)
27015 {
27016 /* gdb on darwin arranges to forward a function from the old
27017 address by modifying the first 5 instructions of the function
27018 to branch to the overriding function. This is necessary to
27019 permit function pointers that point to the old function to
27020 actually forward to the new function. */
27021 emit_insn (gen_nop ());
27022 emit_insn (gen_nop ());
27023 emit_insn (gen_nop ());
27024 emit_insn (gen_nop ());
27025 emit_insn (gen_nop ());
27026 }
27027
27028 /* Handle world saves specially here. */
27029 if (WORLD_SAVE_P (info))
27030 {
27031 int i, j, sz;
27032 rtx treg;
27033 rtvec p;
27034 rtx reg0;
27035
27036 /* save_world expects lr in r0. */
27037 reg0 = gen_rtx_REG (Pmode, 0);
27038 if (info->lr_save_p)
27039 {
27040 insn = emit_move_insn (reg0,
27041 gen_rtx_REG (Pmode, LR_REGNO));
27042 RTX_FRAME_RELATED_P (insn) = 1;
27043 }
27044
27045 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
27046 assumptions about the offsets of various bits of the stack
27047 frame. */
27048 gcc_assert (info->gp_save_offset == -220
27049 && info->fp_save_offset == -144
27050 && info->lr_save_offset == 8
27051 && info->cr_save_offset == 4
27052 && info->push_p
27053 && info->lr_save_p
27054 && (!crtl->calls_eh_return
27055 || info->ehrd_offset == -432)
27056 && info->vrsave_save_offset == -224
27057 && info->altivec_save_offset == -416);
27058
27059 treg = gen_rtx_REG (SImode, 11);
27060 emit_move_insn (treg, GEN_INT (-info->total_size));
27061
27062 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
27063 in R11. It also clobbers R12, so beware! */
27064
27065 /* Preserve CR2 for save_world prologues */
27066 sz = 5;
27067 sz += 32 - info->first_gp_reg_save;
27068 sz += 64 - info->first_fp_reg_save;
27069 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
27070 p = rtvec_alloc (sz);
27071 j = 0;
27072 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, LR_REGNO);
27073 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
27074 gen_rtx_SYMBOL_REF (Pmode,
27075 "*save_world"));
27076 /* We do floats first so that the instruction pattern matches
27077 properly. */
27078 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
27079 RTVEC_ELT (p, j++)
27080 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
27081 info->first_fp_reg_save + i),
27082 frame_reg_rtx,
27083 info->fp_save_offset + frame_off + 8 * i);
27084 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27085 RTVEC_ELT (p, j++)
27086 = gen_frame_store (gen_rtx_REG (V4SImode,
27087 info->first_altivec_reg_save + i),
27088 frame_reg_rtx,
27089 info->altivec_save_offset + frame_off + 16 * i);
27090 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27091 RTVEC_ELT (p, j++)
27092 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27093 frame_reg_rtx,
27094 info->gp_save_offset + frame_off + reg_size * i);
27095
27096 /* CR register traditionally saved as CR2. */
27097 RTVEC_ELT (p, j++)
27098 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
27099 frame_reg_rtx, info->cr_save_offset + frame_off);
27100 /* Explain about use of R0. */
27101 if (info->lr_save_p)
27102 RTVEC_ELT (p, j++)
27103 = gen_frame_store (reg0,
27104 frame_reg_rtx, info->lr_save_offset + frame_off);
27105 /* Explain what happens to the stack pointer. */
27106 {
27107 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
27108 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
27109 }
27110
27111 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27112 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27113 treg, GEN_INT (-info->total_size));
27114 sp_off = frame_off = info->total_size;
27115 }
27116
27117 strategy = info->savres_strategy;
27118
27119 /* For V.4, update stack before we do any saving and set back pointer. */
27120 if (! WORLD_SAVE_P (info)
27121 && info->push_p
27122 && (DEFAULT_ABI == ABI_V4
27123 || crtl->calls_eh_return))
27124 {
27125 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
27126 || !(strategy & SAVE_INLINE_GPRS)
27127 || !(strategy & SAVE_INLINE_VRS));
27128 int ptr_regno = -1;
27129 rtx ptr_reg = NULL_RTX;
27130 int ptr_off = 0;
27131
27132 if (info->total_size < 32767)
27133 frame_off = info->total_size;
27134 else if (need_r11)
27135 ptr_regno = 11;
27136 else if (info->cr_save_p
27137 || info->lr_save_p
27138 || info->first_fp_reg_save < 64
27139 || info->first_gp_reg_save < 32
27140 || info->altivec_size != 0
27141 || info->vrsave_size != 0
27142 || crtl->calls_eh_return)
27143 ptr_regno = 12;
27144 else
27145 {
27146 /* The prologue won't be saving any regs so there is no need
27147 to set up a frame register to access any frame save area.
27148 We also won't be using frame_off anywhere below, but set
27149 the correct value anyway to protect against future
27150 changes to this function. */
27151 frame_off = info->total_size;
27152 }
27153 if (ptr_regno != -1)
27154 {
27155 /* Set up the frame offset to that needed by the first
27156 out-of-line save function. */
27157 START_USE (ptr_regno);
27158 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27159 frame_reg_rtx = ptr_reg;
27160 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
27161 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
27162 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
27163 ptr_off = info->gp_save_offset + info->gp_size;
27164 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
27165 ptr_off = info->altivec_save_offset + info->altivec_size;
27166 frame_off = -ptr_off;
27167 }
27168 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27169 ptr_reg, ptr_off);
27170 if (REGNO (frame_reg_rtx) == 12)
27171 sp_adjust = 0;
27172 sp_off = info->total_size;
27173 if (frame_reg_rtx != sp_reg_rtx)
27174 rs6000_emit_stack_tie (frame_reg_rtx, false);
27175 }
27176
27177 /* If we use the link register, get it into r0. */
27178 if (!WORLD_SAVE_P (info) && info->lr_save_p
27179 && !cfun->machine->lr_is_wrapped_separately)
27180 {
27181 rtx addr, reg, mem;
27182
27183 reg = gen_rtx_REG (Pmode, 0);
27184 START_USE (0);
27185 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
27186 RTX_FRAME_RELATED_P (insn) = 1;
27187
27188 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
27189 | SAVE_NOINLINE_FPRS_SAVES_LR)))
27190 {
27191 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27192 GEN_INT (info->lr_save_offset + frame_off));
27193 mem = gen_rtx_MEM (Pmode, addr);
27194 /* This should not be of rs6000_sr_alias_set, because of
27195 __builtin_return_address. */
27196
27197 insn = emit_move_insn (mem, reg);
27198 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27199 NULL_RTX, NULL_RTX);
27200 END_USE (0);
27201 }
27202 }
27203
27204 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
27205 r12 will be needed by out-of-line gpr save. */
27206 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27207 && !(strategy & (SAVE_INLINE_GPRS
27208 | SAVE_NOINLINE_GPRS_SAVES_LR))
27209 ? 11 : 12);
27210 if (!WORLD_SAVE_P (info)
27211 && info->cr_save_p
27212 && REGNO (frame_reg_rtx) != cr_save_regno
27213 && !(using_static_chain_p && cr_save_regno == 11)
27214 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
27215 {
27216 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
27217 START_USE (cr_save_regno);
27218 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27219 }
27220
27221 /* Do any required saving of fpr's. If only one or two to save, do
27222 it ourselves. Otherwise, call function. */
27223 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
27224 {
27225 int offset = info->fp_save_offset + frame_off;
27226 for (int i = info->first_fp_reg_save; i < 64; i++)
27227 {
27228 if (save_reg_p (i)
27229 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
27230 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
27231 sp_off - frame_off);
27232
27233 offset += fp_reg_size;
27234 }
27235 }
27236 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
27237 {
27238 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27239 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27240 unsigned ptr_regno = ptr_regno_for_savres (sel);
27241 rtx ptr_reg = frame_reg_rtx;
27242
27243 if (REGNO (frame_reg_rtx) == ptr_regno)
27244 gcc_checking_assert (frame_off == 0);
27245 else
27246 {
27247 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27248 NOT_INUSE (ptr_regno);
27249 emit_insn (gen_add3_insn (ptr_reg,
27250 frame_reg_rtx, GEN_INT (frame_off)));
27251 }
27252 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27253 info->fp_save_offset,
27254 info->lr_save_offset,
27255 DFmode, sel);
27256 rs6000_frame_related (insn, ptr_reg, sp_off,
27257 NULL_RTX, NULL_RTX);
27258 if (lr)
27259 END_USE (0);
27260 }
27261
27262 /* Save GPRs. This is done as a PARALLEL if we are using
27263 the store-multiple instructions. */
27264 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
27265 {
27266 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
27267 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
27268 unsigned ptr_regno = ptr_regno_for_savres (sel);
27269 rtx ptr_reg = frame_reg_rtx;
27270 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
27271 int end_save = info->gp_save_offset + info->gp_size;
27272 int ptr_off;
27273
27274 if (ptr_regno == 12)
27275 sp_adjust = 0;
27276 if (!ptr_set_up)
27277 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27278
27279 /* Need to adjust r11 (r12) if we saved any FPRs. */
27280 if (end_save + frame_off != 0)
27281 {
27282 rtx offset = GEN_INT (end_save + frame_off);
27283
27284 if (ptr_set_up)
27285 frame_off = -end_save;
27286 else
27287 NOT_INUSE (ptr_regno);
27288 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27289 }
27290 else if (!ptr_set_up)
27291 {
27292 NOT_INUSE (ptr_regno);
27293 emit_move_insn (ptr_reg, frame_reg_rtx);
27294 }
27295 ptr_off = -end_save;
27296 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27297 info->gp_save_offset + ptr_off,
27298 info->lr_save_offset + ptr_off,
27299 reg_mode, sel);
27300 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27301 NULL_RTX, NULL_RTX);
27302 if (lr)
27303 END_USE (0);
27304 }
27305 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27306 {
27307 rtvec p;
27308 int i;
27309 p = rtvec_alloc (32 - info->first_gp_reg_save);
27310 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27311 RTVEC_ELT (p, i)
27312 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27313 frame_reg_rtx,
27314 info->gp_save_offset + frame_off + reg_size * i);
27315 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27316 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27317 NULL_RTX, NULL_RTX);
27318 }
27319 else if (!WORLD_SAVE_P (info))
27320 {
27321 int offset = info->gp_save_offset + frame_off;
27322 for (int i = info->first_gp_reg_save; i < 32; i++)
27323 {
27324 if (save_reg_p (i)
27325 && !cfun->machine->gpr_is_wrapped_separately[i])
27326 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27327 sp_off - frame_off);
27328
27329 offset += reg_size;
27330 }
27331 }
27332
27333 if (crtl->calls_eh_return)
27334 {
27335 unsigned int i;
27336 rtvec p;
27337
27338 for (i = 0; ; ++i)
27339 {
27340 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27341 if (regno == INVALID_REGNUM)
27342 break;
27343 }
27344
27345 p = rtvec_alloc (i);
27346
27347 for (i = 0; ; ++i)
27348 {
27349 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27350 if (regno == INVALID_REGNUM)
27351 break;
27352
27353 rtx set
27354 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27355 sp_reg_rtx,
27356 info->ehrd_offset + sp_off + reg_size * (int) i);
27357 RTVEC_ELT (p, i) = set;
27358 RTX_FRAME_RELATED_P (set) = 1;
27359 }
27360
27361 insn = emit_insn (gen_blockage ());
27362 RTX_FRAME_RELATED_P (insn) = 1;
27363 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27364 }
27365
27366 /* In AIX ABI we need to make sure r2 is really saved. */
27367 if (TARGET_AIX && crtl->calls_eh_return)
27368 {
27369 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27370 rtx join_insn, note;
27371 rtx_insn *save_insn;
27372 long toc_restore_insn;
27373
27374 tmp_reg = gen_rtx_REG (Pmode, 11);
27375 tmp_reg_si = gen_rtx_REG (SImode, 11);
27376 if (using_static_chain_p)
27377 {
27378 START_USE (0);
27379 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27380 }
27381 else
27382 START_USE (11);
27383 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27384 /* Peek at instruction to which this function returns. If it's
27385 restoring r2, then we know we've already saved r2. We can't
27386 unconditionally save r2 because the value we have will already
27387 be updated if we arrived at this function via a plt call or
27388 toc adjusting stub. */
27389 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27390 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27391 + RS6000_TOC_SAVE_SLOT);
27392 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27393 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27394 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27395 validate_condition_mode (EQ, CCUNSmode);
27396 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27397 emit_insn (gen_rtx_SET (compare_result,
27398 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27399 toc_save_done = gen_label_rtx ();
27400 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27401 gen_rtx_EQ (VOIDmode, compare_result,
27402 const0_rtx),
27403 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27404 pc_rtx);
27405 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27406 JUMP_LABEL (jump) = toc_save_done;
27407 LABEL_NUSES (toc_save_done) += 1;
27408
27409 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27410 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27411 sp_off - frame_off);
27412
27413 emit_label (toc_save_done);
27414
27415 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27416 have a CFG that has different saves along different paths.
27417 Move the note to a dummy blockage insn, which describes that
27418 R2 is unconditionally saved after the label. */
27419 /* ??? An alternate representation might be a special insn pattern
27420 containing both the branch and the store. That might let the
27421 code that minimizes the number of DW_CFA_advance opcodes better
27422 freedom in placing the annotations. */
27423 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27424 if (note)
27425 remove_note (save_insn, note);
27426 else
27427 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27428 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27429 RTX_FRAME_RELATED_P (save_insn) = 0;
27430
27431 join_insn = emit_insn (gen_blockage ());
27432 REG_NOTES (join_insn) = note;
27433 RTX_FRAME_RELATED_P (join_insn) = 1;
27434
27435 if (using_static_chain_p)
27436 {
27437 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27438 END_USE (0);
27439 }
27440 else
27441 END_USE (11);
27442 }
27443
27444 /* Save CR if we use any that must be preserved. */
27445 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27446 {
27447 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27448 GEN_INT (info->cr_save_offset + frame_off));
27449 rtx mem = gen_frame_mem (SImode, addr);
27450
27451 /* If we didn't copy cr before, do so now using r0. */
27452 if (cr_save_rtx == NULL_RTX)
27453 {
27454 START_USE (0);
27455 cr_save_rtx = gen_rtx_REG (SImode, 0);
27456 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27457 }
27458
27459 /* Saving CR requires a two-instruction sequence: one instruction
27460 to move the CR to a general-purpose register, and a second
27461 instruction that stores the GPR to memory.
27462
27463 We do not emit any DWARF CFI records for the first of these,
27464 because we cannot properly represent the fact that CR is saved in
27465 a register. One reason is that we cannot express that multiple
27466 CR fields are saved; another reason is that on 64-bit, the size
27467 of the CR register in DWARF (4 bytes) differs from the size of
27468 a general-purpose register.
27469
27470 This means if any intervening instruction were to clobber one of
27471 the call-saved CR fields, we'd have incorrect CFI. To prevent
27472 this from happening, we mark the store to memory as a use of
27473 those CR fields, which prevents any such instruction from being
27474 scheduled in between the two instructions. */
27475 rtx crsave_v[9];
27476 int n_crsave = 0;
27477 int i;
27478
27479 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27480 for (i = 0; i < 8; i++)
27481 if (save_reg_p (CR0_REGNO + i))
27482 crsave_v[n_crsave++]
27483 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27484
27485 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27486 gen_rtvec_v (n_crsave, crsave_v)));
27487 END_USE (REGNO (cr_save_rtx));
27488
27489 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27490 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27491 so we need to construct a frame expression manually. */
27492 RTX_FRAME_RELATED_P (insn) = 1;
27493
27494 /* Update address to be stack-pointer relative, like
27495 rs6000_frame_related would do. */
27496 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27497 GEN_INT (info->cr_save_offset + sp_off));
27498 mem = gen_frame_mem (SImode, addr);
27499
27500 if (DEFAULT_ABI == ABI_ELFv2)
27501 {
27502 /* In the ELFv2 ABI we generate separate CFI records for each
27503 CR field that was actually saved. They all point to the
27504 same 32-bit stack slot. */
27505 rtx crframe[8];
27506 int n_crframe = 0;
27507
27508 for (i = 0; i < 8; i++)
27509 if (save_reg_p (CR0_REGNO + i))
27510 {
27511 crframe[n_crframe]
27512 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27513
27514 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27515 n_crframe++;
27516 }
27517
27518 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27519 gen_rtx_PARALLEL (VOIDmode,
27520 gen_rtvec_v (n_crframe, crframe)));
27521 }
27522 else
27523 {
27524 /* In other ABIs, by convention, we use a single CR regnum to
27525 represent the fact that all call-saved CR fields are saved.
27526 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27527 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27528 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27529 }
27530 }
27531
27532 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27533 *separate* slots if the routine calls __builtin_eh_return, so
27534 that they can be independently restored by the unwinder. */
27535 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27536 {
27537 int i, cr_off = info->ehcr_offset;
27538 rtx crsave;
27539
27540 /* ??? We might get better performance by using multiple mfocrf
27541 instructions. */
27542 crsave = gen_rtx_REG (SImode, 0);
27543 emit_insn (gen_prologue_movesi_from_cr (crsave));
27544
27545 for (i = 0; i < 8; i++)
27546 if (!call_used_regs[CR0_REGNO + i])
27547 {
27548 rtvec p = rtvec_alloc (2);
27549 RTVEC_ELT (p, 0)
27550 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27551 RTVEC_ELT (p, 1)
27552 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27553
27554 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27555
27556 RTX_FRAME_RELATED_P (insn) = 1;
27557 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27558 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27559 sp_reg_rtx, cr_off + sp_off));
27560
27561 cr_off += reg_size;
27562 }
27563 }
27564
27565 /* If we are emitting stack probes, but allocate no stack, then
27566 just note that in the dump file. */
27567 if (flag_stack_clash_protection
27568 && dump_file
27569 && !info->push_p)
27570 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27571
27572 /* Update stack and set back pointer unless this is V.4,
27573 for which it was done previously. */
27574 if (!WORLD_SAVE_P (info) && info->push_p
27575 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27576 {
27577 rtx ptr_reg = NULL;
27578 int ptr_off = 0;
27579
27580 /* If saving altivec regs we need to be able to address all save
27581 locations using a 16-bit offset. */
27582 if ((strategy & SAVE_INLINE_VRS) == 0
27583 || (info->altivec_size != 0
27584 && (info->altivec_save_offset + info->altivec_size - 16
27585 + info->total_size - frame_off) > 32767)
27586 || (info->vrsave_size != 0
27587 && (info->vrsave_save_offset
27588 + info->total_size - frame_off) > 32767))
27589 {
27590 int sel = SAVRES_SAVE | SAVRES_VR;
27591 unsigned ptr_regno = ptr_regno_for_savres (sel);
27592
27593 if (using_static_chain_p
27594 && ptr_regno == STATIC_CHAIN_REGNUM)
27595 ptr_regno = 12;
27596 if (REGNO (frame_reg_rtx) != ptr_regno)
27597 START_USE (ptr_regno);
27598 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27599 frame_reg_rtx = ptr_reg;
27600 ptr_off = info->altivec_save_offset + info->altivec_size;
27601 frame_off = -ptr_off;
27602 }
27603 else if (REGNO (frame_reg_rtx) == 1)
27604 frame_off = info->total_size;
27605 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27606 ptr_reg, ptr_off);
27607 if (REGNO (frame_reg_rtx) == 12)
27608 sp_adjust = 0;
27609 sp_off = info->total_size;
27610 if (frame_reg_rtx != sp_reg_rtx)
27611 rs6000_emit_stack_tie (frame_reg_rtx, false);
27612 }
27613
27614 /* Set frame pointer, if needed. */
27615 if (frame_pointer_needed)
27616 {
27617 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27618 sp_reg_rtx);
27619 RTX_FRAME_RELATED_P (insn) = 1;
27620 }
27621
27622 /* Save AltiVec registers if needed. Save here because the red zone does
27623 not always include AltiVec registers. */
27624 if (!WORLD_SAVE_P (info)
27625 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27626 {
27627 int end_save = info->altivec_save_offset + info->altivec_size;
27628 int ptr_off;
27629 /* Oddly, the vector save/restore functions point r0 at the end
27630 of the save area, then use r11 or r12 to load offsets for
27631 [reg+reg] addressing. */
27632 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27633 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27634 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27635
27636 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27637 NOT_INUSE (0);
27638 if (scratch_regno == 12)
27639 sp_adjust = 0;
27640 if (end_save + frame_off != 0)
27641 {
27642 rtx offset = GEN_INT (end_save + frame_off);
27643
27644 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27645 }
27646 else
27647 emit_move_insn (ptr_reg, frame_reg_rtx);
27648
27649 ptr_off = -end_save;
27650 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27651 info->altivec_save_offset + ptr_off,
27652 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27653 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27654 NULL_RTX, NULL_RTX);
27655 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27656 {
27657 /* The oddity mentioned above clobbered our frame reg. */
27658 emit_move_insn (frame_reg_rtx, ptr_reg);
27659 frame_off = ptr_off;
27660 }
27661 }
27662 else if (!WORLD_SAVE_P (info)
27663 && info->altivec_size != 0)
27664 {
27665 int i;
27666
27667 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27668 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27669 {
27670 rtx areg, savereg, mem;
27671 HOST_WIDE_INT offset;
27672
27673 offset = (info->altivec_save_offset + frame_off
27674 + 16 * (i - info->first_altivec_reg_save));
27675
27676 savereg = gen_rtx_REG (V4SImode, i);
27677
27678 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27679 {
27680 mem = gen_frame_mem (V4SImode,
27681 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27682 GEN_INT (offset)));
27683 insn = emit_insn (gen_rtx_SET (mem, savereg));
27684 areg = NULL_RTX;
27685 }
27686 else
27687 {
27688 NOT_INUSE (0);
27689 areg = gen_rtx_REG (Pmode, 0);
27690 emit_move_insn (areg, GEN_INT (offset));
27691
27692 /* AltiVec addressing mode is [reg+reg]. */
27693 mem = gen_frame_mem (V4SImode,
27694 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27695
27696 /* Rather than emitting a generic move, force use of the stvx
27697 instruction, which we always want on ISA 2.07 (power8) systems.
27698 In particular we don't want xxpermdi/stxvd2x for little
27699 endian. */
27700 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27701 }
27702
27703 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27704 areg, GEN_INT (offset));
27705 }
27706 }
27707
27708 /* VRSAVE is a bit vector representing which AltiVec registers
27709 are used. The OS uses this to determine which vector
27710 registers to save on a context switch. We need to save
27711 VRSAVE on the stack frame, add whatever AltiVec registers we
27712 used in this function, and do the corresponding magic in the
27713 epilogue. */
27714
27715 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27716 {
27717 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27718 be using r12 as frame_reg_rtx and r11 as the static chain
27719 pointer for nested functions. */
27720 int save_regno = 12;
27721 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27722 && !using_static_chain_p)
27723 save_regno = 11;
27724 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27725 {
27726 save_regno = 11;
27727 if (using_static_chain_p)
27728 save_regno = 0;
27729 }
27730 NOT_INUSE (save_regno);
27731
27732 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27733 }
27734
27735 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27736 if (!TARGET_SINGLE_PIC_BASE
27737 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27738 && !constant_pool_empty_p ())
27739 || (DEFAULT_ABI == ABI_V4
27740 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27741 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27742 {
27743 /* If emit_load_toc_table will use the link register, we need to save
27744 it. We use R12 for this purpose because emit_load_toc_table
27745 can use register 0. This allows us to use a plain 'blr' to return
27746 from the procedure more often. */
27747 int save_LR_around_toc_setup = (TARGET_ELF
27748 && DEFAULT_ABI == ABI_V4
27749 && flag_pic
27750 && ! info->lr_save_p
27751 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27752 if (save_LR_around_toc_setup)
27753 {
27754 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27755 rtx tmp = gen_rtx_REG (Pmode, 12);
27756
27757 sp_adjust = 0;
27758 insn = emit_move_insn (tmp, lr);
27759 RTX_FRAME_RELATED_P (insn) = 1;
27760
27761 rs6000_emit_load_toc_table (TRUE);
27762
27763 insn = emit_move_insn (lr, tmp);
27764 add_reg_note (insn, REG_CFA_RESTORE, lr);
27765 RTX_FRAME_RELATED_P (insn) = 1;
27766 }
27767 else
27768 rs6000_emit_load_toc_table (TRUE);
27769 }
27770
27771 #if TARGET_MACHO
27772 if (!TARGET_SINGLE_PIC_BASE
27773 && DEFAULT_ABI == ABI_DARWIN
27774 && flag_pic && crtl->uses_pic_offset_table)
27775 {
27776 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27777 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27778
27779 /* Save and restore LR locally around this call (in R0). */
27780 if (!info->lr_save_p)
27781 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27782
27783 emit_insn (gen_load_macho_picbase (src));
27784
27785 emit_move_insn (gen_rtx_REG (Pmode,
27786 RS6000_PIC_OFFSET_TABLE_REGNUM),
27787 lr);
27788
27789 if (!info->lr_save_p)
27790 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27791 }
27792 #endif
27793
27794 /* If we need to, save the TOC register after doing the stack setup.
27795 Do not emit eh frame info for this save. The unwinder wants info,
27796 conceptually attached to instructions in this function, about
27797 register values in the caller of this function. This R2 may have
27798 already been changed from the value in the caller.
27799 We don't attempt to write accurate DWARF EH frame info for R2
27800 because code emitted by gcc for a (non-pointer) function call
27801 doesn't save and restore R2. Instead, R2 is managed out-of-line
27802 by a linker generated plt call stub when the function resides in
27803 a shared library. This behavior is costly to describe in DWARF,
27804 both in terms of the size of DWARF info and the time taken in the
27805 unwinder to interpret it. R2 changes, apart from the
27806 calls_eh_return case earlier in this function, are handled by
27807 linux-unwind.h frob_update_context. */
27808 if (rs6000_save_toc_in_prologue_p ()
27809 && !cfun->machine->toc_is_wrapped_separately)
27810 {
27811 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27812 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27813 }
27814
27815 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27816 if (using_split_stack && split_stack_arg_pointer_used_p ())
27817 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27818 }
27819
27820 /* Output .extern statements for the save/restore routines we use. */
27821
27822 static void
27823 rs6000_output_savres_externs (FILE *file)
27824 {
27825 rs6000_stack_t *info = rs6000_stack_info ();
27826
27827 if (TARGET_DEBUG_STACK)
27828 debug_stack_info (info);
27829
27830 /* Write .extern for any function we will call to save and restore
27831 fp values. */
27832 if (info->first_fp_reg_save < 64
27833 && !TARGET_MACHO
27834 && !TARGET_ELF)
27835 {
27836 char *name;
27837 int regno = info->first_fp_reg_save - 32;
27838
27839 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27840 {
27841 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27842 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27843 name = rs6000_savres_routine_name (regno, sel);
27844 fprintf (file, "\t.extern %s\n", name);
27845 }
27846 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27847 {
27848 bool lr = (info->savres_strategy
27849 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27850 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27851 name = rs6000_savres_routine_name (regno, sel);
27852 fprintf (file, "\t.extern %s\n", name);
27853 }
27854 }
27855 }
27856
27857 /* Write function prologue. */
27858
27859 static void
27860 rs6000_output_function_prologue (FILE *file)
27861 {
27862 if (!cfun->is_thunk)
27863 rs6000_output_savres_externs (file);
27864
27865 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27866 immediately after the global entry point label. */
27867 if (rs6000_global_entry_point_needed_p ())
27868 {
27869 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27870
27871 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27872
27873 if (TARGET_CMODEL != CMODEL_LARGE)
27874 {
27875 /* In the small and medium code models, we assume the TOC is less
27876 2 GB away from the text section, so it can be computed via the
27877 following two-instruction sequence. */
27878 char buf[256];
27879
27880 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27881 fprintf (file, "0:\taddis 2,12,.TOC.-");
27882 assemble_name (file, buf);
27883 fprintf (file, "@ha\n");
27884 fprintf (file, "\taddi 2,2,.TOC.-");
27885 assemble_name (file, buf);
27886 fprintf (file, "@l\n");
27887 }
27888 else
27889 {
27890 /* In the large code model, we allow arbitrary offsets between the
27891 TOC and the text section, so we have to load the offset from
27892 memory. The data field is emitted directly before the global
27893 entry point in rs6000_elf_declare_function_name. */
27894 char buf[256];
27895
27896 #ifdef HAVE_AS_ENTRY_MARKERS
27897 /* If supported by the linker, emit a marker relocation. If the
27898 total code size of the final executable or shared library
27899 happens to fit into 2 GB after all, the linker will replace
27900 this code sequence with the sequence for the small or medium
27901 code model. */
27902 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27903 #endif
27904 fprintf (file, "\tld 2,");
27905 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27906 assemble_name (file, buf);
27907 fprintf (file, "-");
27908 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27909 assemble_name (file, buf);
27910 fprintf (file, "(12)\n");
27911 fprintf (file, "\tadd 2,2,12\n");
27912 }
27913
27914 fputs ("\t.localentry\t", file);
27915 assemble_name (file, name);
27916 fputs (",.-", file);
27917 assemble_name (file, name);
27918 fputs ("\n", file);
27919 }
27920
27921 /* Output -mprofile-kernel code. This needs to be done here instead of
27922 in output_function_profile since it must go after the ELFv2 ABI
27923 local entry point. */
27924 if (TARGET_PROFILE_KERNEL && crtl->profile)
27925 {
27926 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27927 gcc_assert (!TARGET_32BIT);
27928
27929 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27930
27931 /* In the ELFv2 ABI we have no compiler stack word. It must be
27932 the resposibility of _mcount to preserve the static chain
27933 register if required. */
27934 if (DEFAULT_ABI != ABI_ELFv2
27935 && cfun->static_chain_decl != NULL)
27936 {
27937 asm_fprintf (file, "\tstd %s,24(%s)\n",
27938 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27939 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27940 asm_fprintf (file, "\tld %s,24(%s)\n",
27941 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27942 }
27943 else
27944 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27945 }
27946
27947 rs6000_pic_labelno++;
27948 }
27949
27950 /* -mprofile-kernel code calls mcount before the function prolog,
27951 so a profiled leaf function should stay a leaf function. */
27952 static bool
27953 rs6000_keep_leaf_when_profiled ()
27954 {
27955 return TARGET_PROFILE_KERNEL;
27956 }
27957
27958 /* Non-zero if vmx regs are restored before the frame pop, zero if
27959 we restore after the pop when possible. */
27960 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27961
27962 /* Restoring cr is a two step process: loading a reg from the frame
27963 save, then moving the reg to cr. For ABI_V4 we must let the
27964 unwinder know that the stack location is no longer valid at or
27965 before the stack deallocation, but we can't emit a cfa_restore for
27966 cr at the stack deallocation like we do for other registers.
27967 The trouble is that it is possible for the move to cr to be
27968 scheduled after the stack deallocation. So say exactly where cr
27969 is located on each of the two insns. */
27970
27971 static rtx
27972 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27973 {
27974 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27975 rtx reg = gen_rtx_REG (SImode, regno);
27976 rtx_insn *insn = emit_move_insn (reg, mem);
27977
27978 if (!exit_func && DEFAULT_ABI == ABI_V4)
27979 {
27980 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27981 rtx set = gen_rtx_SET (reg, cr);
27982
27983 add_reg_note (insn, REG_CFA_REGISTER, set);
27984 RTX_FRAME_RELATED_P (insn) = 1;
27985 }
27986 return reg;
27987 }
27988
27989 /* Reload CR from REG. */
27990
27991 static void
27992 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27993 {
27994 int count = 0;
27995 int i;
27996
27997 if (using_mfcr_multiple)
27998 {
27999 for (i = 0; i < 8; i++)
28000 if (save_reg_p (CR0_REGNO + i))
28001 count++;
28002 gcc_assert (count);
28003 }
28004
28005 if (using_mfcr_multiple && count > 1)
28006 {
28007 rtx_insn *insn;
28008 rtvec p;
28009 int ndx;
28010
28011 p = rtvec_alloc (count);
28012
28013 ndx = 0;
28014 for (i = 0; i < 8; i++)
28015 if (save_reg_p (CR0_REGNO + i))
28016 {
28017 rtvec r = rtvec_alloc (2);
28018 RTVEC_ELT (r, 0) = reg;
28019 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
28020 RTVEC_ELT (p, ndx) =
28021 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
28022 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
28023 ndx++;
28024 }
28025 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28026 gcc_assert (ndx == count);
28027
28028 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28029 CR field separately. */
28030 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28031 {
28032 for (i = 0; i < 8; i++)
28033 if (save_reg_p (CR0_REGNO + i))
28034 add_reg_note (insn, REG_CFA_RESTORE,
28035 gen_rtx_REG (SImode, CR0_REGNO + i));
28036
28037 RTX_FRAME_RELATED_P (insn) = 1;
28038 }
28039 }
28040 else
28041 for (i = 0; i < 8; i++)
28042 if (save_reg_p (CR0_REGNO + i))
28043 {
28044 rtx insn = emit_insn (gen_movsi_to_cr_one
28045 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28046
28047 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
28048 CR field separately, attached to the insn that in fact
28049 restores this particular CR field. */
28050 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
28051 {
28052 add_reg_note (insn, REG_CFA_RESTORE,
28053 gen_rtx_REG (SImode, CR0_REGNO + i));
28054
28055 RTX_FRAME_RELATED_P (insn) = 1;
28056 }
28057 }
28058
28059 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
28060 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
28061 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
28062 {
28063 rtx_insn *insn = get_last_insn ();
28064 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
28065
28066 add_reg_note (insn, REG_CFA_RESTORE, cr);
28067 RTX_FRAME_RELATED_P (insn) = 1;
28068 }
28069 }
28070
28071 /* Like cr, the move to lr instruction can be scheduled after the
28072 stack deallocation, but unlike cr, its stack frame save is still
28073 valid. So we only need to emit the cfa_restore on the correct
28074 instruction. */
28075
28076 static void
28077 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
28078 {
28079 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
28080 rtx reg = gen_rtx_REG (Pmode, regno);
28081
28082 emit_move_insn (reg, mem);
28083 }
28084
28085 static void
28086 restore_saved_lr (int regno, bool exit_func)
28087 {
28088 rtx reg = gen_rtx_REG (Pmode, regno);
28089 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
28090 rtx_insn *insn = emit_move_insn (lr, reg);
28091
28092 if (!exit_func && flag_shrink_wrap)
28093 {
28094 add_reg_note (insn, REG_CFA_RESTORE, lr);
28095 RTX_FRAME_RELATED_P (insn) = 1;
28096 }
28097 }
28098
28099 static rtx
28100 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
28101 {
28102 if (DEFAULT_ABI == ABI_ELFv2)
28103 {
28104 int i;
28105 for (i = 0; i < 8; i++)
28106 if (save_reg_p (CR0_REGNO + i))
28107 {
28108 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
28109 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
28110 cfa_restores);
28111 }
28112 }
28113 else if (info->cr_save_p)
28114 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28115 gen_rtx_REG (SImode, CR2_REGNO),
28116 cfa_restores);
28117
28118 if (info->lr_save_p)
28119 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28120 gen_rtx_REG (Pmode, LR_REGNO),
28121 cfa_restores);
28122 return cfa_restores;
28123 }
28124
28125 /* Return true if OFFSET from stack pointer can be clobbered by signals.
28126 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
28127 below stack pointer not cloberred by signals. */
28128
28129 static inline bool
28130 offset_below_red_zone_p (HOST_WIDE_INT offset)
28131 {
28132 return offset < (DEFAULT_ABI == ABI_V4
28133 ? 0
28134 : TARGET_32BIT ? -220 : -288);
28135 }
28136
28137 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
28138
28139 static void
28140 emit_cfa_restores (rtx cfa_restores)
28141 {
28142 rtx_insn *insn = get_last_insn ();
28143 rtx *loc = &REG_NOTES (insn);
28144
28145 while (*loc)
28146 loc = &XEXP (*loc, 1);
28147 *loc = cfa_restores;
28148 RTX_FRAME_RELATED_P (insn) = 1;
28149 }
28150
28151 /* Emit function epilogue as insns. */
28152
28153 void
28154 rs6000_emit_epilogue (int sibcall)
28155 {
28156 rs6000_stack_t *info;
28157 int restoring_GPRs_inline;
28158 int restoring_FPRs_inline;
28159 int using_load_multiple;
28160 int using_mtcr_multiple;
28161 int use_backchain_to_restore_sp;
28162 int restore_lr;
28163 int strategy;
28164 HOST_WIDE_INT frame_off = 0;
28165 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
28166 rtx frame_reg_rtx = sp_reg_rtx;
28167 rtx cfa_restores = NULL_RTX;
28168 rtx insn;
28169 rtx cr_save_reg = NULL_RTX;
28170 machine_mode reg_mode = Pmode;
28171 int reg_size = TARGET_32BIT ? 4 : 8;
28172 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
28173 int fp_reg_size = 8;
28174 int i;
28175 bool exit_func;
28176 unsigned ptr_regno;
28177
28178 info = rs6000_stack_info ();
28179
28180 strategy = info->savres_strategy;
28181 using_load_multiple = strategy & REST_MULTIPLE;
28182 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
28183 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
28184 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
28185 || rs6000_tune == PROCESSOR_PPC603
28186 || rs6000_tune == PROCESSOR_PPC750
28187 || optimize_size);
28188 /* Restore via the backchain when we have a large frame, since this
28189 is more efficient than an addis, addi pair. The second condition
28190 here will not trigger at the moment; We don't actually need a
28191 frame pointer for alloca, but the generic parts of the compiler
28192 give us one anyway. */
28193 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
28194 ? info->lr_save_offset
28195 : 0) > 32767
28196 || (cfun->calls_alloca
28197 && !frame_pointer_needed));
28198 restore_lr = (info->lr_save_p
28199 && (restoring_FPRs_inline
28200 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
28201 && (restoring_GPRs_inline
28202 || info->first_fp_reg_save < 64)
28203 && !cfun->machine->lr_is_wrapped_separately);
28204
28205
28206 if (WORLD_SAVE_P (info))
28207 {
28208 int i, j;
28209 char rname[30];
28210 const char *alloc_rname;
28211 rtvec p;
28212
28213 /* eh_rest_world_r10 will return to the location saved in the LR
28214 stack slot (which is not likely to be our caller.)
28215 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
28216 rest_world is similar, except any R10 parameter is ignored.
28217 The exception-handling stuff that was here in 2.95 is no
28218 longer necessary. */
28219
28220 p = rtvec_alloc (9
28221 + 32 - info->first_gp_reg_save
28222 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
28223 + 63 + 1 - info->first_fp_reg_save);
28224
28225 strcpy (rname, ((crtl->calls_eh_return) ?
28226 "*eh_rest_world_r10" : "*rest_world"));
28227 alloc_rname = ggc_strdup (rname);
28228
28229 j = 0;
28230 RTVEC_ELT (p, j++) = ret_rtx;
28231 RTVEC_ELT (p, j++)
28232 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
28233 /* The instruction pattern requires a clobber here;
28234 it is shared with the restVEC helper. */
28235 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 11);
28236
28237 {
28238 /* CR register traditionally saved as CR2. */
28239 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
28240 RTVEC_ELT (p, j++)
28241 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
28242 if (flag_shrink_wrap)
28243 {
28244 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
28245 gen_rtx_REG (Pmode, LR_REGNO),
28246 cfa_restores);
28247 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28248 }
28249 }
28250
28251 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28252 {
28253 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
28254 RTVEC_ELT (p, j++)
28255 = gen_frame_load (reg,
28256 frame_reg_rtx, info->gp_save_offset + reg_size * i);
28257 if (flag_shrink_wrap
28258 && save_reg_p (info->first_gp_reg_save + i))
28259 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28260 }
28261 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28262 {
28263 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
28264 RTVEC_ELT (p, j++)
28265 = gen_frame_load (reg,
28266 frame_reg_rtx, info->altivec_save_offset + 16 * i);
28267 if (flag_shrink_wrap
28268 && save_reg_p (info->first_altivec_reg_save + i))
28269 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28270 }
28271 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
28272 {
28273 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
28274 info->first_fp_reg_save + i);
28275 RTVEC_ELT (p, j++)
28276 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28277 if (flag_shrink_wrap
28278 && save_reg_p (info->first_fp_reg_save + i))
28279 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28280 }
28281 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 0);
28282 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 12);
28283 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 7);
28284 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 8);
28285 RTVEC_ELT (p, j++)
28286 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28287 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28288
28289 if (flag_shrink_wrap)
28290 {
28291 REG_NOTES (insn) = cfa_restores;
28292 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28293 RTX_FRAME_RELATED_P (insn) = 1;
28294 }
28295 return;
28296 }
28297
28298 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28299 if (info->push_p)
28300 frame_off = info->total_size;
28301
28302 /* Restore AltiVec registers if we must do so before adjusting the
28303 stack. */
28304 if (info->altivec_size != 0
28305 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28306 || (DEFAULT_ABI != ABI_V4
28307 && offset_below_red_zone_p (info->altivec_save_offset))))
28308 {
28309 int i;
28310 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28311
28312 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28313 if (use_backchain_to_restore_sp)
28314 {
28315 int frame_regno = 11;
28316
28317 if ((strategy & REST_INLINE_VRS) == 0)
28318 {
28319 /* Of r11 and r12, select the one not clobbered by an
28320 out-of-line restore function for the frame register. */
28321 frame_regno = 11 + 12 - scratch_regno;
28322 }
28323 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28324 emit_move_insn (frame_reg_rtx,
28325 gen_rtx_MEM (Pmode, sp_reg_rtx));
28326 frame_off = 0;
28327 }
28328 else if (frame_pointer_needed)
28329 frame_reg_rtx = hard_frame_pointer_rtx;
28330
28331 if ((strategy & REST_INLINE_VRS) == 0)
28332 {
28333 int end_save = info->altivec_save_offset + info->altivec_size;
28334 int ptr_off;
28335 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28336 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28337
28338 if (end_save + frame_off != 0)
28339 {
28340 rtx offset = GEN_INT (end_save + frame_off);
28341
28342 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28343 }
28344 else
28345 emit_move_insn (ptr_reg, frame_reg_rtx);
28346
28347 ptr_off = -end_save;
28348 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28349 info->altivec_save_offset + ptr_off,
28350 0, V4SImode, SAVRES_VR);
28351 }
28352 else
28353 {
28354 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28355 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28356 {
28357 rtx addr, areg, mem, insn;
28358 rtx reg = gen_rtx_REG (V4SImode, i);
28359 HOST_WIDE_INT offset
28360 = (info->altivec_save_offset + frame_off
28361 + 16 * (i - info->first_altivec_reg_save));
28362
28363 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28364 {
28365 mem = gen_frame_mem (V4SImode,
28366 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28367 GEN_INT (offset)));
28368 insn = gen_rtx_SET (reg, mem);
28369 }
28370 else
28371 {
28372 areg = gen_rtx_REG (Pmode, 0);
28373 emit_move_insn (areg, GEN_INT (offset));
28374
28375 /* AltiVec addressing mode is [reg+reg]. */
28376 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28377 mem = gen_frame_mem (V4SImode, addr);
28378
28379 /* Rather than emitting a generic move, force use of the
28380 lvx instruction, which we always want. In particular we
28381 don't want lxvd2x/xxpermdi for little endian. */
28382 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28383 }
28384
28385 (void) emit_insn (insn);
28386 }
28387 }
28388
28389 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28390 if (((strategy & REST_INLINE_VRS) == 0
28391 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28392 && (flag_shrink_wrap
28393 || (offset_below_red_zone_p
28394 (info->altivec_save_offset
28395 + 16 * (i - info->first_altivec_reg_save))))
28396 && save_reg_p (i))
28397 {
28398 rtx reg = gen_rtx_REG (V4SImode, i);
28399 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28400 }
28401 }
28402
28403 /* Restore VRSAVE if we must do so before adjusting the stack. */
28404 if (info->vrsave_size != 0
28405 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28406 || (DEFAULT_ABI != ABI_V4
28407 && offset_below_red_zone_p (info->vrsave_save_offset))))
28408 {
28409 rtx reg;
28410
28411 if (frame_reg_rtx == sp_reg_rtx)
28412 {
28413 if (use_backchain_to_restore_sp)
28414 {
28415 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28416 emit_move_insn (frame_reg_rtx,
28417 gen_rtx_MEM (Pmode, sp_reg_rtx));
28418 frame_off = 0;
28419 }
28420 else if (frame_pointer_needed)
28421 frame_reg_rtx = hard_frame_pointer_rtx;
28422 }
28423
28424 reg = gen_rtx_REG (SImode, 12);
28425 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28426 info->vrsave_save_offset + frame_off));
28427
28428 emit_insn (generate_set_vrsave (reg, info, 1));
28429 }
28430
28431 insn = NULL_RTX;
28432 /* If we have a large stack frame, restore the old stack pointer
28433 using the backchain. */
28434 if (use_backchain_to_restore_sp)
28435 {
28436 if (frame_reg_rtx == sp_reg_rtx)
28437 {
28438 /* Under V.4, don't reset the stack pointer until after we're done
28439 loading the saved registers. */
28440 if (DEFAULT_ABI == ABI_V4)
28441 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28442
28443 insn = emit_move_insn (frame_reg_rtx,
28444 gen_rtx_MEM (Pmode, sp_reg_rtx));
28445 frame_off = 0;
28446 }
28447 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28448 && DEFAULT_ABI == ABI_V4)
28449 /* frame_reg_rtx has been set up by the altivec restore. */
28450 ;
28451 else
28452 {
28453 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28454 frame_reg_rtx = sp_reg_rtx;
28455 }
28456 }
28457 /* If we have a frame pointer, we can restore the old stack pointer
28458 from it. */
28459 else if (frame_pointer_needed)
28460 {
28461 frame_reg_rtx = sp_reg_rtx;
28462 if (DEFAULT_ABI == ABI_V4)
28463 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28464 /* Prevent reordering memory accesses against stack pointer restore. */
28465 else if (cfun->calls_alloca
28466 || offset_below_red_zone_p (-info->total_size))
28467 rs6000_emit_stack_tie (frame_reg_rtx, true);
28468
28469 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28470 GEN_INT (info->total_size)));
28471 frame_off = 0;
28472 }
28473 else if (info->push_p
28474 && DEFAULT_ABI != ABI_V4
28475 && !crtl->calls_eh_return)
28476 {
28477 /* Prevent reordering memory accesses against stack pointer restore. */
28478 if (cfun->calls_alloca
28479 || offset_below_red_zone_p (-info->total_size))
28480 rs6000_emit_stack_tie (frame_reg_rtx, false);
28481 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28482 GEN_INT (info->total_size)));
28483 frame_off = 0;
28484 }
28485 if (insn && frame_reg_rtx == sp_reg_rtx)
28486 {
28487 if (cfa_restores)
28488 {
28489 REG_NOTES (insn) = cfa_restores;
28490 cfa_restores = NULL_RTX;
28491 }
28492 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28493 RTX_FRAME_RELATED_P (insn) = 1;
28494 }
28495
28496 /* Restore AltiVec registers if we have not done so already. */
28497 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28498 && info->altivec_size != 0
28499 && (DEFAULT_ABI == ABI_V4
28500 || !offset_below_red_zone_p (info->altivec_save_offset)))
28501 {
28502 int i;
28503
28504 if ((strategy & REST_INLINE_VRS) == 0)
28505 {
28506 int end_save = info->altivec_save_offset + info->altivec_size;
28507 int ptr_off;
28508 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28509 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28510 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28511
28512 if (end_save + frame_off != 0)
28513 {
28514 rtx offset = GEN_INT (end_save + frame_off);
28515
28516 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28517 }
28518 else
28519 emit_move_insn (ptr_reg, frame_reg_rtx);
28520
28521 ptr_off = -end_save;
28522 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28523 info->altivec_save_offset + ptr_off,
28524 0, V4SImode, SAVRES_VR);
28525 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28526 {
28527 /* Frame reg was clobbered by out-of-line save. Restore it
28528 from ptr_reg, and if we are calling out-of-line gpr or
28529 fpr restore set up the correct pointer and offset. */
28530 unsigned newptr_regno = 1;
28531 if (!restoring_GPRs_inline)
28532 {
28533 bool lr = info->gp_save_offset + info->gp_size == 0;
28534 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28535 newptr_regno = ptr_regno_for_savres (sel);
28536 end_save = info->gp_save_offset + info->gp_size;
28537 }
28538 else if (!restoring_FPRs_inline)
28539 {
28540 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28541 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28542 newptr_regno = ptr_regno_for_savres (sel);
28543 end_save = info->fp_save_offset + info->fp_size;
28544 }
28545
28546 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28547 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28548
28549 if (end_save + ptr_off != 0)
28550 {
28551 rtx offset = GEN_INT (end_save + ptr_off);
28552
28553 frame_off = -end_save;
28554 if (TARGET_32BIT)
28555 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28556 ptr_reg, offset));
28557 else
28558 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28559 ptr_reg, offset));
28560 }
28561 else
28562 {
28563 frame_off = ptr_off;
28564 emit_move_insn (frame_reg_rtx, ptr_reg);
28565 }
28566 }
28567 }
28568 else
28569 {
28570 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28571 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28572 {
28573 rtx addr, areg, mem, insn;
28574 rtx reg = gen_rtx_REG (V4SImode, i);
28575 HOST_WIDE_INT offset
28576 = (info->altivec_save_offset + frame_off
28577 + 16 * (i - info->first_altivec_reg_save));
28578
28579 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28580 {
28581 mem = gen_frame_mem (V4SImode,
28582 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28583 GEN_INT (offset)));
28584 insn = gen_rtx_SET (reg, mem);
28585 }
28586 else
28587 {
28588 areg = gen_rtx_REG (Pmode, 0);
28589 emit_move_insn (areg, GEN_INT (offset));
28590
28591 /* AltiVec addressing mode is [reg+reg]. */
28592 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28593 mem = gen_frame_mem (V4SImode, addr);
28594
28595 /* Rather than emitting a generic move, force use of the
28596 lvx instruction, which we always want. In particular we
28597 don't want lxvd2x/xxpermdi for little endian. */
28598 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28599 }
28600
28601 (void) emit_insn (insn);
28602 }
28603 }
28604
28605 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28606 if (((strategy & REST_INLINE_VRS) == 0
28607 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28608 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28609 && save_reg_p (i))
28610 {
28611 rtx reg = gen_rtx_REG (V4SImode, i);
28612 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28613 }
28614 }
28615
28616 /* Restore VRSAVE if we have not done so already. */
28617 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28618 && info->vrsave_size != 0
28619 && (DEFAULT_ABI == ABI_V4
28620 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28621 {
28622 rtx reg;
28623
28624 reg = gen_rtx_REG (SImode, 12);
28625 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28626 info->vrsave_save_offset + frame_off));
28627
28628 emit_insn (generate_set_vrsave (reg, info, 1));
28629 }
28630
28631 /* If we exit by an out-of-line restore function on ABI_V4 then that
28632 function will deallocate the stack, so we don't need to worry
28633 about the unwinder restoring cr from an invalid stack frame
28634 location. */
28635 exit_func = (!restoring_FPRs_inline
28636 || (!restoring_GPRs_inline
28637 && info->first_fp_reg_save == 64));
28638
28639 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28640 *separate* slots if the routine calls __builtin_eh_return, so
28641 that they can be independently restored by the unwinder. */
28642 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28643 {
28644 int i, cr_off = info->ehcr_offset;
28645
28646 for (i = 0; i < 8; i++)
28647 if (!call_used_regs[CR0_REGNO + i])
28648 {
28649 rtx reg = gen_rtx_REG (SImode, 0);
28650 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28651 cr_off + frame_off));
28652
28653 insn = emit_insn (gen_movsi_to_cr_one
28654 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28655
28656 if (!exit_func && flag_shrink_wrap)
28657 {
28658 add_reg_note (insn, REG_CFA_RESTORE,
28659 gen_rtx_REG (SImode, CR0_REGNO + i));
28660
28661 RTX_FRAME_RELATED_P (insn) = 1;
28662 }
28663
28664 cr_off += reg_size;
28665 }
28666 }
28667
28668 /* Get the old lr if we saved it. If we are restoring registers
28669 out-of-line, then the out-of-line routines can do this for us. */
28670 if (restore_lr && restoring_GPRs_inline)
28671 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28672
28673 /* Get the old cr if we saved it. */
28674 if (info->cr_save_p)
28675 {
28676 unsigned cr_save_regno = 12;
28677
28678 if (!restoring_GPRs_inline)
28679 {
28680 /* Ensure we don't use the register used by the out-of-line
28681 gpr register restore below. */
28682 bool lr = info->gp_save_offset + info->gp_size == 0;
28683 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28684 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28685
28686 if (gpr_ptr_regno == 12)
28687 cr_save_regno = 11;
28688 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28689 }
28690 else if (REGNO (frame_reg_rtx) == 12)
28691 cr_save_regno = 11;
28692
28693 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28694 info->cr_save_offset + frame_off,
28695 exit_func);
28696 }
28697
28698 /* Set LR here to try to overlap restores below. */
28699 if (restore_lr && restoring_GPRs_inline)
28700 restore_saved_lr (0, exit_func);
28701
28702 /* Load exception handler data registers, if needed. */
28703 if (crtl->calls_eh_return)
28704 {
28705 unsigned int i, regno;
28706
28707 if (TARGET_AIX)
28708 {
28709 rtx reg = gen_rtx_REG (reg_mode, 2);
28710 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28711 frame_off + RS6000_TOC_SAVE_SLOT));
28712 }
28713
28714 for (i = 0; ; ++i)
28715 {
28716 rtx mem;
28717
28718 regno = EH_RETURN_DATA_REGNO (i);
28719 if (regno == INVALID_REGNUM)
28720 break;
28721
28722 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28723 info->ehrd_offset + frame_off
28724 + reg_size * (int) i);
28725
28726 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28727 }
28728 }
28729
28730 /* Restore GPRs. This is done as a PARALLEL if we are using
28731 the load-multiple instructions. */
28732 if (!restoring_GPRs_inline)
28733 {
28734 /* We are jumping to an out-of-line function. */
28735 rtx ptr_reg;
28736 int end_save = info->gp_save_offset + info->gp_size;
28737 bool can_use_exit = end_save == 0;
28738 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28739 int ptr_off;
28740
28741 /* Emit stack reset code if we need it. */
28742 ptr_regno = ptr_regno_for_savres (sel);
28743 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28744 if (can_use_exit)
28745 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28746 else if (end_save + frame_off != 0)
28747 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28748 GEN_INT (end_save + frame_off)));
28749 else if (REGNO (frame_reg_rtx) != ptr_regno)
28750 emit_move_insn (ptr_reg, frame_reg_rtx);
28751 if (REGNO (frame_reg_rtx) == ptr_regno)
28752 frame_off = -end_save;
28753
28754 if (can_use_exit && info->cr_save_p)
28755 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28756
28757 ptr_off = -end_save;
28758 rs6000_emit_savres_rtx (info, ptr_reg,
28759 info->gp_save_offset + ptr_off,
28760 info->lr_save_offset + ptr_off,
28761 reg_mode, sel);
28762 }
28763 else if (using_load_multiple)
28764 {
28765 rtvec p;
28766 p = rtvec_alloc (32 - info->first_gp_reg_save);
28767 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28768 RTVEC_ELT (p, i)
28769 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28770 frame_reg_rtx,
28771 info->gp_save_offset + frame_off + reg_size * i);
28772 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28773 }
28774 else
28775 {
28776 int offset = info->gp_save_offset + frame_off;
28777 for (i = info->first_gp_reg_save; i < 32; i++)
28778 {
28779 if (save_reg_p (i)
28780 && !cfun->machine->gpr_is_wrapped_separately[i])
28781 {
28782 rtx reg = gen_rtx_REG (reg_mode, i);
28783 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28784 }
28785
28786 offset += reg_size;
28787 }
28788 }
28789
28790 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28791 {
28792 /* If the frame pointer was used then we can't delay emitting
28793 a REG_CFA_DEF_CFA note. This must happen on the insn that
28794 restores the frame pointer, r31. We may have already emitted
28795 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28796 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28797 be harmless if emitted. */
28798 if (frame_pointer_needed)
28799 {
28800 insn = get_last_insn ();
28801 add_reg_note (insn, REG_CFA_DEF_CFA,
28802 plus_constant (Pmode, frame_reg_rtx, frame_off));
28803 RTX_FRAME_RELATED_P (insn) = 1;
28804 }
28805
28806 /* Set up cfa_restores. We always need these when
28807 shrink-wrapping. If not shrink-wrapping then we only need
28808 the cfa_restore when the stack location is no longer valid.
28809 The cfa_restores must be emitted on or before the insn that
28810 invalidates the stack, and of course must not be emitted
28811 before the insn that actually does the restore. The latter
28812 is why it is a bad idea to emit the cfa_restores as a group
28813 on the last instruction here that actually does a restore:
28814 That insn may be reordered with respect to others doing
28815 restores. */
28816 if (flag_shrink_wrap
28817 && !restoring_GPRs_inline
28818 && info->first_fp_reg_save == 64)
28819 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28820
28821 for (i = info->first_gp_reg_save; i < 32; i++)
28822 if (save_reg_p (i)
28823 && !cfun->machine->gpr_is_wrapped_separately[i])
28824 {
28825 rtx reg = gen_rtx_REG (reg_mode, i);
28826 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28827 }
28828 }
28829
28830 if (!restoring_GPRs_inline
28831 && info->first_fp_reg_save == 64)
28832 {
28833 /* We are jumping to an out-of-line function. */
28834 if (cfa_restores)
28835 emit_cfa_restores (cfa_restores);
28836 return;
28837 }
28838
28839 if (restore_lr && !restoring_GPRs_inline)
28840 {
28841 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28842 restore_saved_lr (0, exit_func);
28843 }
28844
28845 /* Restore fpr's if we need to do it without calling a function. */
28846 if (restoring_FPRs_inline)
28847 {
28848 int offset = info->fp_save_offset + frame_off;
28849 for (i = info->first_fp_reg_save; i < 64; i++)
28850 {
28851 if (save_reg_p (i)
28852 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28853 {
28854 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28855 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28856 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28857 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28858 cfa_restores);
28859 }
28860
28861 offset += fp_reg_size;
28862 }
28863 }
28864
28865 /* If we saved cr, restore it here. Just those that were used. */
28866 if (info->cr_save_p)
28867 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28868
28869 /* If this is V.4, unwind the stack pointer after all of the loads
28870 have been done, or set up r11 if we are restoring fp out of line. */
28871 ptr_regno = 1;
28872 if (!restoring_FPRs_inline)
28873 {
28874 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28875 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28876 ptr_regno = ptr_regno_for_savres (sel);
28877 }
28878
28879 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28880 if (REGNO (frame_reg_rtx) == ptr_regno)
28881 frame_off = 0;
28882
28883 if (insn && restoring_FPRs_inline)
28884 {
28885 if (cfa_restores)
28886 {
28887 REG_NOTES (insn) = cfa_restores;
28888 cfa_restores = NULL_RTX;
28889 }
28890 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28891 RTX_FRAME_RELATED_P (insn) = 1;
28892 }
28893
28894 if (crtl->calls_eh_return)
28895 {
28896 rtx sa = EH_RETURN_STACKADJ_RTX;
28897 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28898 }
28899
28900 if (!sibcall && restoring_FPRs_inline)
28901 {
28902 if (cfa_restores)
28903 {
28904 /* We can't hang the cfa_restores off a simple return,
28905 since the shrink-wrap code sometimes uses an existing
28906 return. This means there might be a path from
28907 pre-prologue code to this return, and dwarf2cfi code
28908 wants the eh_frame unwinder state to be the same on
28909 all paths to any point. So we need to emit the
28910 cfa_restores before the return. For -m64 we really
28911 don't need epilogue cfa_restores at all, except for
28912 this irritating dwarf2cfi with shrink-wrap
28913 requirement; The stack red-zone means eh_frame info
28914 from the prologue telling the unwinder to restore
28915 from the stack is perfectly good right to the end of
28916 the function. */
28917 emit_insn (gen_blockage ());
28918 emit_cfa_restores (cfa_restores);
28919 cfa_restores = NULL_RTX;
28920 }
28921
28922 emit_jump_insn (targetm.gen_simple_return ());
28923 }
28924
28925 if (!sibcall && !restoring_FPRs_inline)
28926 {
28927 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28928 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28929 int elt = 0;
28930 RTVEC_ELT (p, elt++) = ret_rtx;
28931 if (lr)
28932 RTVEC_ELT (p, elt++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
28933
28934 /* We have to restore more than two FP registers, so branch to the
28935 restore function. It will return to our caller. */
28936 int i;
28937 int reg;
28938 rtx sym;
28939
28940 if (flag_shrink_wrap)
28941 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28942
28943 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28944 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28945 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28946 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28947
28948 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28949 {
28950 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28951
28952 RTVEC_ELT (p, elt++)
28953 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28954 if (flag_shrink_wrap
28955 && save_reg_p (info->first_fp_reg_save + i))
28956 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28957 }
28958
28959 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28960 }
28961
28962 if (cfa_restores)
28963 {
28964 if (sibcall)
28965 /* Ensure the cfa_restores are hung off an insn that won't
28966 be reordered above other restores. */
28967 emit_insn (gen_blockage ());
28968
28969 emit_cfa_restores (cfa_restores);
28970 }
28971 }
28972
28973 /* Write function epilogue. */
28974
28975 static void
28976 rs6000_output_function_epilogue (FILE *file)
28977 {
28978 #if TARGET_MACHO
28979 macho_branch_islands ();
28980
28981 {
28982 rtx_insn *insn = get_last_insn ();
28983 rtx_insn *deleted_debug_label = NULL;
28984
28985 /* Mach-O doesn't support labels at the end of objects, so if
28986 it looks like we might want one, take special action.
28987
28988 First, collect any sequence of deleted debug labels. */
28989 while (insn
28990 && NOTE_P (insn)
28991 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28992 {
28993 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28994 notes only, instead set their CODE_LABEL_NUMBER to -1,
28995 otherwise there would be code generation differences
28996 in between -g and -g0. */
28997 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28998 deleted_debug_label = insn;
28999 insn = PREV_INSN (insn);
29000 }
29001
29002 /* Second, if we have:
29003 label:
29004 barrier
29005 then this needs to be detected, so skip past the barrier. */
29006
29007 if (insn && BARRIER_P (insn))
29008 insn = PREV_INSN (insn);
29009
29010 /* Up to now we've only seen notes or barriers. */
29011 if (insn)
29012 {
29013 if (LABEL_P (insn)
29014 || (NOTE_P (insn)
29015 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
29016 /* Trailing label: <barrier>. */
29017 fputs ("\tnop\n", file);
29018 else
29019 {
29020 /* Lastly, see if we have a completely empty function body. */
29021 while (insn && ! INSN_P (insn))
29022 insn = PREV_INSN (insn);
29023 /* If we don't find any insns, we've got an empty function body;
29024 I.e. completely empty - without a return or branch. This is
29025 taken as the case where a function body has been removed
29026 because it contains an inline __builtin_unreachable(). GCC
29027 states that reaching __builtin_unreachable() means UB so we're
29028 not obliged to do anything special; however, we want
29029 non-zero-sized function bodies. To meet this, and help the
29030 user out, let's trap the case. */
29031 if (insn == NULL)
29032 fputs ("\ttrap\n", file);
29033 }
29034 }
29035 else if (deleted_debug_label)
29036 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
29037 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
29038 CODE_LABEL_NUMBER (insn) = -1;
29039 }
29040 #endif
29041
29042 /* Output a traceback table here. See /usr/include/sys/debug.h for info
29043 on its format.
29044
29045 We don't output a traceback table if -finhibit-size-directive was
29046 used. The documentation for -finhibit-size-directive reads
29047 ``don't output a @code{.size} assembler directive, or anything
29048 else that would cause trouble if the function is split in the
29049 middle, and the two halves are placed at locations far apart in
29050 memory.'' The traceback table has this property, since it
29051 includes the offset from the start of the function to the
29052 traceback table itself.
29053
29054 System V.4 Powerpc's (and the embedded ABI derived from it) use a
29055 different traceback table. */
29056 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29057 && ! flag_inhibit_size_directive
29058 && rs6000_traceback != traceback_none && !cfun->is_thunk)
29059 {
29060 const char *fname = NULL;
29061 const char *language_string = lang_hooks.name;
29062 int fixed_parms = 0, float_parms = 0, parm_info = 0;
29063 int i;
29064 int optional_tbtab;
29065 rs6000_stack_t *info = rs6000_stack_info ();
29066
29067 if (rs6000_traceback == traceback_full)
29068 optional_tbtab = 1;
29069 else if (rs6000_traceback == traceback_part)
29070 optional_tbtab = 0;
29071 else
29072 optional_tbtab = !optimize_size && !TARGET_ELF;
29073
29074 if (optional_tbtab)
29075 {
29076 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
29077 while (*fname == '.') /* V.4 encodes . in the name */
29078 fname++;
29079
29080 /* Need label immediately before tbtab, so we can compute
29081 its offset from the function start. */
29082 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29083 ASM_OUTPUT_LABEL (file, fname);
29084 }
29085
29086 /* The .tbtab pseudo-op can only be used for the first eight
29087 expressions, since it can't handle the possibly variable
29088 length fields that follow. However, if you omit the optional
29089 fields, the assembler outputs zeros for all optional fields
29090 anyways, giving each variable length field is minimum length
29091 (as defined in sys/debug.h). Thus we cannot use the .tbtab
29092 pseudo-op at all. */
29093
29094 /* An all-zero word flags the start of the tbtab, for debuggers
29095 that have to find it by searching forward from the entry
29096 point or from the current pc. */
29097 fputs ("\t.long 0\n", file);
29098
29099 /* Tbtab format type. Use format type 0. */
29100 fputs ("\t.byte 0,", file);
29101
29102 /* Language type. Unfortunately, there does not seem to be any
29103 official way to discover the language being compiled, so we
29104 use language_string.
29105 C is 0. Fortran is 1. Ada is 3. C++ is 9.
29106 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
29107 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
29108 numbers either, so for now use 0. */
29109 if (lang_GNU_C ()
29110 || ! strcmp (language_string, "GNU GIMPLE")
29111 || ! strcmp (language_string, "GNU Go")
29112 || ! strcmp (language_string, "GNU D")
29113 || ! strcmp (language_string, "libgccjit"))
29114 i = 0;
29115 else if (! strcmp (language_string, "GNU F77")
29116 || lang_GNU_Fortran ())
29117 i = 1;
29118 else if (! strcmp (language_string, "GNU Ada"))
29119 i = 3;
29120 else if (lang_GNU_CXX ()
29121 || ! strcmp (language_string, "GNU Objective-C++"))
29122 i = 9;
29123 else if (! strcmp (language_string, "GNU Java"))
29124 i = 13;
29125 else if (! strcmp (language_string, "GNU Objective-C"))
29126 i = 14;
29127 else
29128 gcc_unreachable ();
29129 fprintf (file, "%d,", i);
29130
29131 /* 8 single bit fields: global linkage (not set for C extern linkage,
29132 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
29133 from start of procedure stored in tbtab, internal function, function
29134 has controlled storage, function has no toc, function uses fp,
29135 function logs/aborts fp operations. */
29136 /* Assume that fp operations are used if any fp reg must be saved. */
29137 fprintf (file, "%d,",
29138 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
29139
29140 /* 6 bitfields: function is interrupt handler, name present in
29141 proc table, function calls alloca, on condition directives
29142 (controls stack walks, 3 bits), saves condition reg, saves
29143 link reg. */
29144 /* The `function calls alloca' bit seems to be set whenever reg 31 is
29145 set up as a frame pointer, even when there is no alloca call. */
29146 fprintf (file, "%d,",
29147 ((optional_tbtab << 6)
29148 | ((optional_tbtab & frame_pointer_needed) << 5)
29149 | (info->cr_save_p << 1)
29150 | (info->lr_save_p)));
29151
29152 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
29153 (6 bits). */
29154 fprintf (file, "%d,",
29155 (info->push_p << 7) | (64 - info->first_fp_reg_save));
29156
29157 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
29158 fprintf (file, "%d,", (32 - first_reg_to_save ()));
29159
29160 if (optional_tbtab)
29161 {
29162 /* Compute the parameter info from the function decl argument
29163 list. */
29164 tree decl;
29165 int next_parm_info_bit = 31;
29166
29167 for (decl = DECL_ARGUMENTS (current_function_decl);
29168 decl; decl = DECL_CHAIN (decl))
29169 {
29170 rtx parameter = DECL_INCOMING_RTL (decl);
29171 machine_mode mode = GET_MODE (parameter);
29172
29173 if (REG_P (parameter))
29174 {
29175 if (SCALAR_FLOAT_MODE_P (mode))
29176 {
29177 int bits;
29178
29179 float_parms++;
29180
29181 switch (mode)
29182 {
29183 case E_SFmode:
29184 case E_SDmode:
29185 bits = 0x2;
29186 break;
29187
29188 case E_DFmode:
29189 case E_DDmode:
29190 case E_TFmode:
29191 case E_TDmode:
29192 case E_IFmode:
29193 case E_KFmode:
29194 bits = 0x3;
29195 break;
29196
29197 default:
29198 gcc_unreachable ();
29199 }
29200
29201 /* If only one bit will fit, don't or in this entry. */
29202 if (next_parm_info_bit > 0)
29203 parm_info |= (bits << (next_parm_info_bit - 1));
29204 next_parm_info_bit -= 2;
29205 }
29206 else
29207 {
29208 fixed_parms += ((GET_MODE_SIZE (mode)
29209 + (UNITS_PER_WORD - 1))
29210 / UNITS_PER_WORD);
29211 next_parm_info_bit -= 1;
29212 }
29213 }
29214 }
29215 }
29216
29217 /* Number of fixed point parameters. */
29218 /* This is actually the number of words of fixed point parameters; thus
29219 an 8 byte struct counts as 2; and thus the maximum value is 8. */
29220 fprintf (file, "%d,", fixed_parms);
29221
29222 /* 2 bitfields: number of floating point parameters (7 bits), parameters
29223 all on stack. */
29224 /* This is actually the number of fp registers that hold parameters;
29225 and thus the maximum value is 13. */
29226 /* Set parameters on stack bit if parameters are not in their original
29227 registers, regardless of whether they are on the stack? Xlc
29228 seems to set the bit when not optimizing. */
29229 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
29230
29231 if (optional_tbtab)
29232 {
29233 /* Optional fields follow. Some are variable length. */
29234
29235 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
29236 float, 11 double float. */
29237 /* There is an entry for each parameter in a register, in the order
29238 that they occur in the parameter list. Any intervening arguments
29239 on the stack are ignored. If the list overflows a long (max
29240 possible length 34 bits) then completely leave off all elements
29241 that don't fit. */
29242 /* Only emit this long if there was at least one parameter. */
29243 if (fixed_parms || float_parms)
29244 fprintf (file, "\t.long %d\n", parm_info);
29245
29246 /* Offset from start of code to tb table. */
29247 fputs ("\t.long ", file);
29248 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
29249 RS6000_OUTPUT_BASENAME (file, fname);
29250 putc ('-', file);
29251 rs6000_output_function_entry (file, fname);
29252 putc ('\n', file);
29253
29254 /* Interrupt handler mask. */
29255 /* Omit this long, since we never set the interrupt handler bit
29256 above. */
29257
29258 /* Number of CTL (controlled storage) anchors. */
29259 /* Omit this long, since the has_ctl bit is never set above. */
29260
29261 /* Displacement into stack of each CTL anchor. */
29262 /* Omit this list of longs, because there are no CTL anchors. */
29263
29264 /* Length of function name. */
29265 if (*fname == '*')
29266 ++fname;
29267 fprintf (file, "\t.short %d\n", (int) strlen (fname));
29268
29269 /* Function name. */
29270 assemble_string (fname, strlen (fname));
29271
29272 /* Register for alloca automatic storage; this is always reg 31.
29273 Only emit this if the alloca bit was set above. */
29274 if (frame_pointer_needed)
29275 fputs ("\t.byte 31\n", file);
29276
29277 fputs ("\t.align 2\n", file);
29278 }
29279 }
29280
29281 /* Arrange to define .LCTOC1 label, if not already done. */
29282 if (need_toc_init)
29283 {
29284 need_toc_init = 0;
29285 if (!toc_initialized)
29286 {
29287 switch_to_section (toc_section);
29288 switch_to_section (current_function_section ());
29289 }
29290 }
29291 }
29292
29293 /* -fsplit-stack support. */
29294
29295 /* A SYMBOL_REF for __morestack. */
29296 static GTY(()) rtx morestack_ref;
29297
29298 static rtx
29299 gen_add3_const (rtx rt, rtx ra, long c)
29300 {
29301 if (TARGET_64BIT)
29302 return gen_adddi3 (rt, ra, GEN_INT (c));
29303 else
29304 return gen_addsi3 (rt, ra, GEN_INT (c));
29305 }
29306
29307 /* Emit -fsplit-stack prologue, which goes before the regular function
29308 prologue (at local entry point in the case of ELFv2). */
29309
29310 void
29311 rs6000_expand_split_stack_prologue (void)
29312 {
29313 rs6000_stack_t *info = rs6000_stack_info ();
29314 unsigned HOST_WIDE_INT allocate;
29315 long alloc_hi, alloc_lo;
29316 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29317 rtx_insn *insn;
29318
29319 gcc_assert (flag_split_stack && reload_completed);
29320
29321 if (!info->push_p)
29322 return;
29323
29324 if (global_regs[29])
29325 {
29326 error ("%qs uses register r29", "%<-fsplit-stack%>");
29327 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29328 "conflicts with %qD", global_regs_decl[29]);
29329 }
29330
29331 allocate = info->total_size;
29332 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29333 {
29334 sorry ("Stack frame larger than 2G is not supported for "
29335 "%<-fsplit-stack%>");
29336 return;
29337 }
29338 if (morestack_ref == NULL_RTX)
29339 {
29340 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29341 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29342 | SYMBOL_FLAG_FUNCTION);
29343 }
29344
29345 r0 = gen_rtx_REG (Pmode, 0);
29346 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29347 r12 = gen_rtx_REG (Pmode, 12);
29348 emit_insn (gen_load_split_stack_limit (r0));
29349 /* Always emit two insns here to calculate the requested stack,
29350 so that the linker can edit them when adjusting size for calling
29351 non-split-stack code. */
29352 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29353 alloc_lo = -allocate - alloc_hi;
29354 if (alloc_hi != 0)
29355 {
29356 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29357 if (alloc_lo != 0)
29358 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29359 else
29360 emit_insn (gen_nop ());
29361 }
29362 else
29363 {
29364 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29365 emit_insn (gen_nop ());
29366 }
29367
29368 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29369 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29370 ok_label = gen_label_rtx ();
29371 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29372 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29373 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29374 pc_rtx);
29375 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29376 JUMP_LABEL (insn) = ok_label;
29377 /* Mark the jump as very likely to be taken. */
29378 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29379
29380 lr = gen_rtx_REG (Pmode, LR_REGNO);
29381 insn = emit_move_insn (r0, lr);
29382 RTX_FRAME_RELATED_P (insn) = 1;
29383 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29384 RTX_FRAME_RELATED_P (insn) = 1;
29385
29386 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29387 const0_rtx, const0_rtx));
29388 call_fusage = NULL_RTX;
29389 use_reg (&call_fusage, r12);
29390 /* Say the call uses r0, even though it doesn't, to stop regrename
29391 from twiddling with the insns saving lr, trashing args for cfun.
29392 The insns restoring lr are similarly protected by making
29393 split_stack_return use r0. */
29394 use_reg (&call_fusage, r0);
29395 add_function_usage_to (insn, call_fusage);
29396 /* Indicate that this function can't jump to non-local gotos. */
29397 make_reg_eh_region_note_nothrow_nononlocal (insn);
29398 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29399 insn = emit_move_insn (lr, r0);
29400 add_reg_note (insn, REG_CFA_RESTORE, lr);
29401 RTX_FRAME_RELATED_P (insn) = 1;
29402 emit_insn (gen_split_stack_return ());
29403
29404 emit_label (ok_label);
29405 LABEL_NUSES (ok_label) = 1;
29406 }
29407
29408 /* Return the internal arg pointer used for function incoming
29409 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29410 to copy it to a pseudo in order for it to be preserved over calls
29411 and suchlike. We'd really like to use a pseudo here for the
29412 internal arg pointer but data-flow analysis is not prepared to
29413 accept pseudos as live at the beginning of a function. */
29414
29415 static rtx
29416 rs6000_internal_arg_pointer (void)
29417 {
29418 if (flag_split_stack
29419 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29420 == NULL))
29421
29422 {
29423 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29424 {
29425 rtx pat;
29426
29427 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29428 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29429
29430 /* Put the pseudo initialization right after the note at the
29431 beginning of the function. */
29432 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29433 gen_rtx_REG (Pmode, 12));
29434 push_topmost_sequence ();
29435 emit_insn_after (pat, get_insns ());
29436 pop_topmost_sequence ();
29437 }
29438 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29439 FIRST_PARM_OFFSET (current_function_decl));
29440 return copy_to_reg (ret);
29441 }
29442 return virtual_incoming_args_rtx;
29443 }
29444
29445 /* We may have to tell the dataflow pass that the split stack prologue
29446 is initializing a register. */
29447
29448 static void
29449 rs6000_live_on_entry (bitmap regs)
29450 {
29451 if (flag_split_stack)
29452 bitmap_set_bit (regs, 12);
29453 }
29454
29455 /* Emit -fsplit-stack dynamic stack allocation space check. */
29456
29457 void
29458 rs6000_split_stack_space_check (rtx size, rtx label)
29459 {
29460 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29461 rtx limit = gen_reg_rtx (Pmode);
29462 rtx requested = gen_reg_rtx (Pmode);
29463 rtx cmp = gen_reg_rtx (CCUNSmode);
29464 rtx jump;
29465
29466 emit_insn (gen_load_split_stack_limit (limit));
29467 if (CONST_INT_P (size))
29468 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29469 else
29470 {
29471 size = force_reg (Pmode, size);
29472 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29473 }
29474 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29475 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29476 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29477 gen_rtx_LABEL_REF (VOIDmode, label),
29478 pc_rtx);
29479 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29480 JUMP_LABEL (jump) = label;
29481 }
29482 \f
29483 /* A C compound statement that outputs the assembler code for a thunk
29484 function, used to implement C++ virtual function calls with
29485 multiple inheritance. The thunk acts as a wrapper around a virtual
29486 function, adjusting the implicit object parameter before handing
29487 control off to the real function.
29488
29489 First, emit code to add the integer DELTA to the location that
29490 contains the incoming first argument. Assume that this argument
29491 contains a pointer, and is the one used to pass the `this' pointer
29492 in C++. This is the incoming argument *before* the function
29493 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29494 values of all other incoming arguments.
29495
29496 After the addition, emit code to jump to FUNCTION, which is a
29497 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29498 not touch the return address. Hence returning from FUNCTION will
29499 return to whoever called the current `thunk'.
29500
29501 The effect must be as if FUNCTION had been called directly with the
29502 adjusted first argument. This macro is responsible for emitting
29503 all of the code for a thunk function; output_function_prologue()
29504 and output_function_epilogue() are not invoked.
29505
29506 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29507 been extracted from it.) It might possibly be useful on some
29508 targets, but probably not.
29509
29510 If you do not define this macro, the target-independent code in the
29511 C++ frontend will generate a less efficient heavyweight thunk that
29512 calls FUNCTION instead of jumping to it. The generic approach does
29513 not support varargs. */
29514
29515 static void
29516 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29517 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29518 tree function)
29519 {
29520 rtx this_rtx, funexp;
29521 rtx_insn *insn;
29522
29523 reload_completed = 1;
29524 epilogue_completed = 1;
29525
29526 /* Mark the end of the (empty) prologue. */
29527 emit_note (NOTE_INSN_PROLOGUE_END);
29528
29529 /* Find the "this" pointer. If the function returns a structure,
29530 the structure return pointer is in r3. */
29531 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29532 this_rtx = gen_rtx_REG (Pmode, 4);
29533 else
29534 this_rtx = gen_rtx_REG (Pmode, 3);
29535
29536 /* Apply the constant offset, if required. */
29537 if (delta)
29538 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29539
29540 /* Apply the offset from the vtable, if required. */
29541 if (vcall_offset)
29542 {
29543 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29544 rtx tmp = gen_rtx_REG (Pmode, 12);
29545
29546 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29547 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29548 {
29549 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29550 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29551 }
29552 else
29553 {
29554 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29555
29556 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29557 }
29558 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29559 }
29560
29561 /* Generate a tail call to the target function. */
29562 if (!TREE_USED (function))
29563 {
29564 assemble_external (function);
29565 TREE_USED (function) = 1;
29566 }
29567 funexp = XEXP (DECL_RTL (function), 0);
29568 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29569
29570 #if TARGET_MACHO
29571 if (MACHOPIC_INDIRECT)
29572 funexp = machopic_indirect_call_target (funexp);
29573 #endif
29574
29575 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29576 generate sibcall RTL explicitly. */
29577 insn = emit_call_insn (
29578 gen_rtx_PARALLEL (VOIDmode,
29579 gen_rtvec (3,
29580 gen_rtx_CALL (VOIDmode,
29581 funexp, const0_rtx),
29582 gen_rtx_USE (VOIDmode, const0_rtx),
29583 simple_return_rtx)));
29584 SIBLING_CALL_P (insn) = 1;
29585 emit_barrier ();
29586
29587 /* Run just enough of rest_of_compilation to get the insns emitted.
29588 There's not really enough bulk here to make other passes such as
29589 instruction scheduling worth while. Note that use_thunk calls
29590 assemble_start_function and assemble_end_function. */
29591 insn = get_insns ();
29592 shorten_branches (insn);
29593 final_start_function (insn, file, 1);
29594 final (insn, file, 1);
29595 final_end_function ();
29596
29597 reload_completed = 0;
29598 epilogue_completed = 0;
29599 }
29600 \f
29601 /* A quick summary of the various types of 'constant-pool tables'
29602 under PowerPC:
29603
29604 Target Flags Name One table per
29605 AIX (none) AIX TOC object file
29606 AIX -mfull-toc AIX TOC object file
29607 AIX -mminimal-toc AIX minimal TOC translation unit
29608 SVR4/EABI (none) SVR4 SDATA object file
29609 SVR4/EABI -fpic SVR4 pic object file
29610 SVR4/EABI -fPIC SVR4 PIC translation unit
29611 SVR4/EABI -mrelocatable EABI TOC function
29612 SVR4/EABI -maix AIX TOC object file
29613 SVR4/EABI -maix -mminimal-toc
29614 AIX minimal TOC translation unit
29615
29616 Name Reg. Set by entries contains:
29617 made by addrs? fp? sum?
29618
29619 AIX TOC 2 crt0 as Y option option
29620 AIX minimal TOC 30 prolog gcc Y Y option
29621 SVR4 SDATA 13 crt0 gcc N Y N
29622 SVR4 pic 30 prolog ld Y not yet N
29623 SVR4 PIC 30 prolog gcc Y option option
29624 EABI TOC 30 prolog gcc Y option option
29625
29626 */
29627
29628 /* Hash functions for the hash table. */
29629
29630 static unsigned
29631 rs6000_hash_constant (rtx k)
29632 {
29633 enum rtx_code code = GET_CODE (k);
29634 machine_mode mode = GET_MODE (k);
29635 unsigned result = (code << 3) ^ mode;
29636 const char *format;
29637 int flen, fidx;
29638
29639 format = GET_RTX_FORMAT (code);
29640 flen = strlen (format);
29641 fidx = 0;
29642
29643 switch (code)
29644 {
29645 case LABEL_REF:
29646 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29647
29648 case CONST_WIDE_INT:
29649 {
29650 int i;
29651 flen = CONST_WIDE_INT_NUNITS (k);
29652 for (i = 0; i < flen; i++)
29653 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29654 return result;
29655 }
29656
29657 case CONST_DOUBLE:
29658 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29659
29660 case CODE_LABEL:
29661 fidx = 3;
29662 break;
29663
29664 default:
29665 break;
29666 }
29667
29668 for (; fidx < flen; fidx++)
29669 switch (format[fidx])
29670 {
29671 case 's':
29672 {
29673 unsigned i, len;
29674 const char *str = XSTR (k, fidx);
29675 len = strlen (str);
29676 result = result * 613 + len;
29677 for (i = 0; i < len; i++)
29678 result = result * 613 + (unsigned) str[i];
29679 break;
29680 }
29681 case 'u':
29682 case 'e':
29683 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29684 break;
29685 case 'i':
29686 case 'n':
29687 result = result * 613 + (unsigned) XINT (k, fidx);
29688 break;
29689 case 'w':
29690 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29691 result = result * 613 + (unsigned) XWINT (k, fidx);
29692 else
29693 {
29694 size_t i;
29695 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29696 result = result * 613 + (unsigned) (XWINT (k, fidx)
29697 >> CHAR_BIT * i);
29698 }
29699 break;
29700 case '0':
29701 break;
29702 default:
29703 gcc_unreachable ();
29704 }
29705
29706 return result;
29707 }
29708
29709 hashval_t
29710 toc_hasher::hash (toc_hash_struct *thc)
29711 {
29712 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29713 }
29714
29715 /* Compare H1 and H2 for equivalence. */
29716
29717 bool
29718 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29719 {
29720 rtx r1 = h1->key;
29721 rtx r2 = h2->key;
29722
29723 if (h1->key_mode != h2->key_mode)
29724 return 0;
29725
29726 return rtx_equal_p (r1, r2);
29727 }
29728
29729 /* These are the names given by the C++ front-end to vtables, and
29730 vtable-like objects. Ideally, this logic should not be here;
29731 instead, there should be some programmatic way of inquiring as
29732 to whether or not an object is a vtable. */
29733
29734 #define VTABLE_NAME_P(NAME) \
29735 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29736 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29737 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29738 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29739 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29740
29741 #ifdef NO_DOLLAR_IN_LABEL
29742 /* Return a GGC-allocated character string translating dollar signs in
29743 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29744
29745 const char *
29746 rs6000_xcoff_strip_dollar (const char *name)
29747 {
29748 char *strip, *p;
29749 const char *q;
29750 size_t len;
29751
29752 q = (const char *) strchr (name, '$');
29753
29754 if (q == 0 || q == name)
29755 return name;
29756
29757 len = strlen (name);
29758 strip = XALLOCAVEC (char, len + 1);
29759 strcpy (strip, name);
29760 p = strip + (q - name);
29761 while (p)
29762 {
29763 *p = '_';
29764 p = strchr (p + 1, '$');
29765 }
29766
29767 return ggc_alloc_string (strip, len);
29768 }
29769 #endif
29770
29771 void
29772 rs6000_output_symbol_ref (FILE *file, rtx x)
29773 {
29774 const char *name = XSTR (x, 0);
29775
29776 /* Currently C++ toc references to vtables can be emitted before it
29777 is decided whether the vtable is public or private. If this is
29778 the case, then the linker will eventually complain that there is
29779 a reference to an unknown section. Thus, for vtables only,
29780 we emit the TOC reference to reference the identifier and not the
29781 symbol. */
29782 if (VTABLE_NAME_P (name))
29783 {
29784 RS6000_OUTPUT_BASENAME (file, name);
29785 }
29786 else
29787 assemble_name (file, name);
29788 }
29789
29790 /* Output a TOC entry. We derive the entry name from what is being
29791 written. */
29792
29793 void
29794 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29795 {
29796 char buf[256];
29797 const char *name = buf;
29798 rtx base = x;
29799 HOST_WIDE_INT offset = 0;
29800
29801 gcc_assert (!TARGET_NO_TOC);
29802
29803 /* When the linker won't eliminate them, don't output duplicate
29804 TOC entries (this happens on AIX if there is any kind of TOC,
29805 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29806 CODE_LABELs. */
29807 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29808 {
29809 struct toc_hash_struct *h;
29810
29811 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29812 time because GGC is not initialized at that point. */
29813 if (toc_hash_table == NULL)
29814 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29815
29816 h = ggc_alloc<toc_hash_struct> ();
29817 h->key = x;
29818 h->key_mode = mode;
29819 h->labelno = labelno;
29820
29821 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29822 if (*found == NULL)
29823 *found = h;
29824 else /* This is indeed a duplicate.
29825 Set this label equal to that label. */
29826 {
29827 fputs ("\t.set ", file);
29828 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29829 fprintf (file, "%d,", labelno);
29830 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29831 fprintf (file, "%d\n", ((*found)->labelno));
29832
29833 #ifdef HAVE_AS_TLS
29834 if (TARGET_XCOFF && SYMBOL_REF_P (x)
29835 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29836 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29837 {
29838 fputs ("\t.set ", file);
29839 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29840 fprintf (file, "%d,", labelno);
29841 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29842 fprintf (file, "%d\n", ((*found)->labelno));
29843 }
29844 #endif
29845 return;
29846 }
29847 }
29848
29849 /* If we're going to put a double constant in the TOC, make sure it's
29850 aligned properly when strict alignment is on. */
29851 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29852 && STRICT_ALIGNMENT
29853 && GET_MODE_BITSIZE (mode) >= 64
29854 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29855 ASM_OUTPUT_ALIGN (file, 3);
29856 }
29857
29858 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29859
29860 /* Handle FP constants specially. Note that if we have a minimal
29861 TOC, things we put here aren't actually in the TOC, so we can allow
29862 FP constants. */
29863 if (CONST_DOUBLE_P (x)
29864 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29865 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29866 {
29867 long k[4];
29868
29869 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29870 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29871 else
29872 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29873
29874 if (TARGET_64BIT)
29875 {
29876 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29877 fputs (DOUBLE_INT_ASM_OP, file);
29878 else
29879 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29880 k[0] & 0xffffffff, k[1] & 0xffffffff,
29881 k[2] & 0xffffffff, k[3] & 0xffffffff);
29882 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29883 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29884 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29885 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29886 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29887 return;
29888 }
29889 else
29890 {
29891 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29892 fputs ("\t.long ", file);
29893 else
29894 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29895 k[0] & 0xffffffff, k[1] & 0xffffffff,
29896 k[2] & 0xffffffff, k[3] & 0xffffffff);
29897 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29898 k[0] & 0xffffffff, k[1] & 0xffffffff,
29899 k[2] & 0xffffffff, k[3] & 0xffffffff);
29900 return;
29901 }
29902 }
29903 else if (CONST_DOUBLE_P (x)
29904 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29905 {
29906 long k[2];
29907
29908 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29909 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29910 else
29911 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29912
29913 if (TARGET_64BIT)
29914 {
29915 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29916 fputs (DOUBLE_INT_ASM_OP, file);
29917 else
29918 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29919 k[0] & 0xffffffff, k[1] & 0xffffffff);
29920 fprintf (file, "0x%lx%08lx\n",
29921 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29922 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29923 return;
29924 }
29925 else
29926 {
29927 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29928 fputs ("\t.long ", file);
29929 else
29930 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29931 k[0] & 0xffffffff, k[1] & 0xffffffff);
29932 fprintf (file, "0x%lx,0x%lx\n",
29933 k[0] & 0xffffffff, k[1] & 0xffffffff);
29934 return;
29935 }
29936 }
29937 else if (CONST_DOUBLE_P (x)
29938 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29939 {
29940 long l;
29941
29942 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29943 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29944 else
29945 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29946
29947 if (TARGET_64BIT)
29948 {
29949 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29950 fputs (DOUBLE_INT_ASM_OP, file);
29951 else
29952 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29953 if (WORDS_BIG_ENDIAN)
29954 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29955 else
29956 fprintf (file, "0x%lx\n", l & 0xffffffff);
29957 return;
29958 }
29959 else
29960 {
29961 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29962 fputs ("\t.long ", file);
29963 else
29964 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29965 fprintf (file, "0x%lx\n", l & 0xffffffff);
29966 return;
29967 }
29968 }
29969 else if (GET_MODE (x) == VOIDmode && CONST_INT_P (x))
29970 {
29971 unsigned HOST_WIDE_INT low;
29972 HOST_WIDE_INT high;
29973
29974 low = INTVAL (x) & 0xffffffff;
29975 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29976
29977 /* TOC entries are always Pmode-sized, so when big-endian
29978 smaller integer constants in the TOC need to be padded.
29979 (This is still a win over putting the constants in
29980 a separate constant pool, because then we'd have
29981 to have both a TOC entry _and_ the actual constant.)
29982
29983 For a 32-bit target, CONST_INT values are loaded and shifted
29984 entirely within `low' and can be stored in one TOC entry. */
29985
29986 /* It would be easy to make this work, but it doesn't now. */
29987 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29988
29989 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29990 {
29991 low |= high << 32;
29992 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29993 high = (HOST_WIDE_INT) low >> 32;
29994 low &= 0xffffffff;
29995 }
29996
29997 if (TARGET_64BIT)
29998 {
29999 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30000 fputs (DOUBLE_INT_ASM_OP, file);
30001 else
30002 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30003 (long) high & 0xffffffff, (long) low & 0xffffffff);
30004 fprintf (file, "0x%lx%08lx\n",
30005 (long) high & 0xffffffff, (long) low & 0xffffffff);
30006 return;
30007 }
30008 else
30009 {
30010 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
30011 {
30012 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30013 fputs ("\t.long ", file);
30014 else
30015 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
30016 (long) high & 0xffffffff, (long) low & 0xffffffff);
30017 fprintf (file, "0x%lx,0x%lx\n",
30018 (long) high & 0xffffffff, (long) low & 0xffffffff);
30019 }
30020 else
30021 {
30022 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30023 fputs ("\t.long ", file);
30024 else
30025 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
30026 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
30027 }
30028 return;
30029 }
30030 }
30031
30032 if (GET_CODE (x) == CONST)
30033 {
30034 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
30035 && CONST_INT_P (XEXP (XEXP (x, 0), 1)));
30036
30037 base = XEXP (XEXP (x, 0), 0);
30038 offset = INTVAL (XEXP (XEXP (x, 0), 1));
30039 }
30040
30041 switch (GET_CODE (base))
30042 {
30043 case SYMBOL_REF:
30044 name = XSTR (base, 0);
30045 break;
30046
30047 case LABEL_REF:
30048 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
30049 CODE_LABEL_NUMBER (XEXP (base, 0)));
30050 break;
30051
30052 case CODE_LABEL:
30053 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
30054 break;
30055
30056 default:
30057 gcc_unreachable ();
30058 }
30059
30060 if (TARGET_ELF || TARGET_MINIMAL_TOC)
30061 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
30062 else
30063 {
30064 fputs ("\t.tc ", file);
30065 RS6000_OUTPUT_BASENAME (file, name);
30066
30067 if (offset < 0)
30068 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
30069 else if (offset)
30070 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
30071
30072 /* Mark large TOC symbols on AIX with [TE] so they are mapped
30073 after other TOC symbols, reducing overflow of small TOC access
30074 to [TC] symbols. */
30075 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
30076 ? "[TE]," : "[TC],", file);
30077 }
30078
30079 /* Currently C++ toc references to vtables can be emitted before it
30080 is decided whether the vtable is public or private. If this is
30081 the case, then the linker will eventually complain that there is
30082 a TOC reference to an unknown section. Thus, for vtables only,
30083 we emit the TOC reference to reference the symbol and not the
30084 section. */
30085 if (VTABLE_NAME_P (name))
30086 {
30087 RS6000_OUTPUT_BASENAME (file, name);
30088 if (offset < 0)
30089 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
30090 else if (offset > 0)
30091 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
30092 }
30093 else
30094 output_addr_const (file, x);
30095
30096 #if HAVE_AS_TLS
30097 if (TARGET_XCOFF && SYMBOL_REF_P (base))
30098 {
30099 switch (SYMBOL_REF_TLS_MODEL (base))
30100 {
30101 case 0:
30102 break;
30103 case TLS_MODEL_LOCAL_EXEC:
30104 fputs ("@le", file);
30105 break;
30106 case TLS_MODEL_INITIAL_EXEC:
30107 fputs ("@ie", file);
30108 break;
30109 /* Use global-dynamic for local-dynamic. */
30110 case TLS_MODEL_GLOBAL_DYNAMIC:
30111 case TLS_MODEL_LOCAL_DYNAMIC:
30112 putc ('\n', file);
30113 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
30114 fputs ("\t.tc .", file);
30115 RS6000_OUTPUT_BASENAME (file, name);
30116 fputs ("[TC],", file);
30117 output_addr_const (file, x);
30118 fputs ("@m", file);
30119 break;
30120 default:
30121 gcc_unreachable ();
30122 }
30123 }
30124 #endif
30125
30126 putc ('\n', file);
30127 }
30128 \f
30129 /* Output an assembler pseudo-op to write an ASCII string of N characters
30130 starting at P to FILE.
30131
30132 On the RS/6000, we have to do this using the .byte operation and
30133 write out special characters outside the quoted string.
30134 Also, the assembler is broken; very long strings are truncated,
30135 so we must artificially break them up early. */
30136
30137 void
30138 output_ascii (FILE *file, const char *p, int n)
30139 {
30140 char c;
30141 int i, count_string;
30142 const char *for_string = "\t.byte \"";
30143 const char *for_decimal = "\t.byte ";
30144 const char *to_close = NULL;
30145
30146 count_string = 0;
30147 for (i = 0; i < n; i++)
30148 {
30149 c = *p++;
30150 if (c >= ' ' && c < 0177)
30151 {
30152 if (for_string)
30153 fputs (for_string, file);
30154 putc (c, file);
30155
30156 /* Write two quotes to get one. */
30157 if (c == '"')
30158 {
30159 putc (c, file);
30160 ++count_string;
30161 }
30162
30163 for_string = NULL;
30164 for_decimal = "\"\n\t.byte ";
30165 to_close = "\"\n";
30166 ++count_string;
30167
30168 if (count_string >= 512)
30169 {
30170 fputs (to_close, file);
30171
30172 for_string = "\t.byte \"";
30173 for_decimal = "\t.byte ";
30174 to_close = NULL;
30175 count_string = 0;
30176 }
30177 }
30178 else
30179 {
30180 if (for_decimal)
30181 fputs (for_decimal, file);
30182 fprintf (file, "%d", c);
30183
30184 for_string = "\n\t.byte \"";
30185 for_decimal = ", ";
30186 to_close = "\n";
30187 count_string = 0;
30188 }
30189 }
30190
30191 /* Now close the string if we have written one. Then end the line. */
30192 if (to_close)
30193 fputs (to_close, file);
30194 }
30195 \f
30196 /* Generate a unique section name for FILENAME for a section type
30197 represented by SECTION_DESC. Output goes into BUF.
30198
30199 SECTION_DESC can be any string, as long as it is different for each
30200 possible section type.
30201
30202 We name the section in the same manner as xlc. The name begins with an
30203 underscore followed by the filename (after stripping any leading directory
30204 names) with the last period replaced by the string SECTION_DESC. If
30205 FILENAME does not contain a period, SECTION_DESC is appended to the end of
30206 the name. */
30207
30208 void
30209 rs6000_gen_section_name (char **buf, const char *filename,
30210 const char *section_desc)
30211 {
30212 const char *q, *after_last_slash, *last_period = 0;
30213 char *p;
30214 int len;
30215
30216 after_last_slash = filename;
30217 for (q = filename; *q; q++)
30218 {
30219 if (*q == '/')
30220 after_last_slash = q + 1;
30221 else if (*q == '.')
30222 last_period = q;
30223 }
30224
30225 len = strlen (after_last_slash) + strlen (section_desc) + 2;
30226 *buf = (char *) xmalloc (len);
30227
30228 p = *buf;
30229 *p++ = '_';
30230
30231 for (q = after_last_slash; *q; q++)
30232 {
30233 if (q == last_period)
30234 {
30235 strcpy (p, section_desc);
30236 p += strlen (section_desc);
30237 break;
30238 }
30239
30240 else if (ISALNUM (*q))
30241 *p++ = *q;
30242 }
30243
30244 if (last_period == 0)
30245 strcpy (p, section_desc);
30246 else
30247 *p = '\0';
30248 }
30249 \f
30250 /* Emit profile function. */
30251
30252 void
30253 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
30254 {
30255 /* Non-standard profiling for kernels, which just saves LR then calls
30256 _mcount without worrying about arg saves. The idea is to change
30257 the function prologue as little as possible as it isn't easy to
30258 account for arg save/restore code added just for _mcount. */
30259 if (TARGET_PROFILE_KERNEL)
30260 return;
30261
30262 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
30263 {
30264 #ifndef NO_PROFILE_COUNTERS
30265 # define NO_PROFILE_COUNTERS 0
30266 #endif
30267 if (NO_PROFILE_COUNTERS)
30268 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30269 LCT_NORMAL, VOIDmode);
30270 else
30271 {
30272 char buf[30];
30273 const char *label_name;
30274 rtx fun;
30275
30276 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30277 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30278 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30279
30280 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30281 LCT_NORMAL, VOIDmode, fun, Pmode);
30282 }
30283 }
30284 else if (DEFAULT_ABI == ABI_DARWIN)
30285 {
30286 const char *mcount_name = RS6000_MCOUNT;
30287 int caller_addr_regno = LR_REGNO;
30288
30289 /* Be conservative and always set this, at least for now. */
30290 crtl->uses_pic_offset_table = 1;
30291
30292 #if TARGET_MACHO
30293 /* For PIC code, set up a stub and collect the caller's address
30294 from r0, which is where the prologue puts it. */
30295 if (MACHOPIC_INDIRECT
30296 && crtl->uses_pic_offset_table)
30297 caller_addr_regno = 0;
30298 #endif
30299 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30300 LCT_NORMAL, VOIDmode,
30301 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30302 }
30303 }
30304
30305 /* Write function profiler code. */
30306
30307 void
30308 output_function_profiler (FILE *file, int labelno)
30309 {
30310 char buf[100];
30311
30312 switch (DEFAULT_ABI)
30313 {
30314 default:
30315 gcc_unreachable ();
30316
30317 case ABI_V4:
30318 if (!TARGET_32BIT)
30319 {
30320 warning (0, "no profiling of 64-bit code for this ABI");
30321 return;
30322 }
30323 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30324 fprintf (file, "\tmflr %s\n", reg_names[0]);
30325 if (NO_PROFILE_COUNTERS)
30326 {
30327 asm_fprintf (file, "\tstw %s,4(%s)\n",
30328 reg_names[0], reg_names[1]);
30329 }
30330 else if (TARGET_SECURE_PLT && flag_pic)
30331 {
30332 if (TARGET_LINK_STACK)
30333 {
30334 char name[32];
30335 get_ppc476_thunk_name (name);
30336 asm_fprintf (file, "\tbl %s\n", name);
30337 }
30338 else
30339 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30340 asm_fprintf (file, "\tstw %s,4(%s)\n",
30341 reg_names[0], reg_names[1]);
30342 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30343 asm_fprintf (file, "\taddis %s,%s,",
30344 reg_names[12], reg_names[12]);
30345 assemble_name (file, buf);
30346 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30347 assemble_name (file, buf);
30348 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30349 }
30350 else if (flag_pic == 1)
30351 {
30352 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30353 asm_fprintf (file, "\tstw %s,4(%s)\n",
30354 reg_names[0], reg_names[1]);
30355 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30356 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30357 assemble_name (file, buf);
30358 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30359 }
30360 else if (flag_pic > 1)
30361 {
30362 asm_fprintf (file, "\tstw %s,4(%s)\n",
30363 reg_names[0], reg_names[1]);
30364 /* Now, we need to get the address of the label. */
30365 if (TARGET_LINK_STACK)
30366 {
30367 char name[32];
30368 get_ppc476_thunk_name (name);
30369 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30370 assemble_name (file, buf);
30371 fputs ("-.\n1:", file);
30372 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30373 asm_fprintf (file, "\taddi %s,%s,4\n",
30374 reg_names[11], reg_names[11]);
30375 }
30376 else
30377 {
30378 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30379 assemble_name (file, buf);
30380 fputs ("-.\n1:", file);
30381 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30382 }
30383 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30384 reg_names[0], reg_names[11]);
30385 asm_fprintf (file, "\tadd %s,%s,%s\n",
30386 reg_names[0], reg_names[0], reg_names[11]);
30387 }
30388 else
30389 {
30390 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30391 assemble_name (file, buf);
30392 fputs ("@ha\n", file);
30393 asm_fprintf (file, "\tstw %s,4(%s)\n",
30394 reg_names[0], reg_names[1]);
30395 asm_fprintf (file, "\tla %s,", reg_names[0]);
30396 assemble_name (file, buf);
30397 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30398 }
30399
30400 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30401 fprintf (file, "\tbl %s%s\n",
30402 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30403 break;
30404
30405 case ABI_AIX:
30406 case ABI_ELFv2:
30407 case ABI_DARWIN:
30408 /* Don't do anything, done in output_profile_hook (). */
30409 break;
30410 }
30411 }
30412
30413 \f
30414
30415 /* The following variable value is the last issued insn. */
30416
30417 static rtx_insn *last_scheduled_insn;
30418
30419 /* The following variable helps to balance issuing of load and
30420 store instructions */
30421
30422 static int load_store_pendulum;
30423
30424 /* The following variable helps pair divide insns during scheduling. */
30425 static int divide_cnt;
30426 /* The following variable helps pair and alternate vector and vector load
30427 insns during scheduling. */
30428 static int vec_pairing;
30429
30430
30431 /* Power4 load update and store update instructions are cracked into a
30432 load or store and an integer insn which are executed in the same cycle.
30433 Branches have their own dispatch slot which does not count against the
30434 GCC issue rate, but it changes the program flow so there are no other
30435 instructions to issue in this cycle. */
30436
30437 static int
30438 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30439 {
30440 last_scheduled_insn = insn;
30441 if (GET_CODE (PATTERN (insn)) == USE
30442 || GET_CODE (PATTERN (insn)) == CLOBBER)
30443 {
30444 cached_can_issue_more = more;
30445 return cached_can_issue_more;
30446 }
30447
30448 if (insn_terminates_group_p (insn, current_group))
30449 {
30450 cached_can_issue_more = 0;
30451 return cached_can_issue_more;
30452 }
30453
30454 /* If no reservation, but reach here */
30455 if (recog_memoized (insn) < 0)
30456 return more;
30457
30458 if (rs6000_sched_groups)
30459 {
30460 if (is_microcoded_insn (insn))
30461 cached_can_issue_more = 0;
30462 else if (is_cracked_insn (insn))
30463 cached_can_issue_more = more > 2 ? more - 2 : 0;
30464 else
30465 cached_can_issue_more = more - 1;
30466
30467 return cached_can_issue_more;
30468 }
30469
30470 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30471 return 0;
30472
30473 cached_can_issue_more = more - 1;
30474 return cached_can_issue_more;
30475 }
30476
30477 static int
30478 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30479 {
30480 int r = rs6000_variable_issue_1 (insn, more);
30481 if (verbose)
30482 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30483 return r;
30484 }
30485
30486 /* Adjust the cost of a scheduling dependency. Return the new cost of
30487 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30488
30489 static int
30490 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30491 unsigned int)
30492 {
30493 enum attr_type attr_type;
30494
30495 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30496 return cost;
30497
30498 switch (dep_type)
30499 {
30500 case REG_DEP_TRUE:
30501 {
30502 /* Data dependency; DEP_INSN writes a register that INSN reads
30503 some cycles later. */
30504
30505 /* Separate a load from a narrower, dependent store. */
30506 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30507 && GET_CODE (PATTERN (insn)) == SET
30508 && GET_CODE (PATTERN (dep_insn)) == SET
30509 && MEM_P (XEXP (PATTERN (insn), 1))
30510 && MEM_P (XEXP (PATTERN (dep_insn), 0))
30511 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30512 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30513 return cost + 14;
30514
30515 attr_type = get_attr_type (insn);
30516
30517 switch (attr_type)
30518 {
30519 case TYPE_JMPREG:
30520 /* Tell the first scheduling pass about the latency between
30521 a mtctr and bctr (and mtlr and br/blr). The first
30522 scheduling pass will not know about this latency since
30523 the mtctr instruction, which has the latency associated
30524 to it, will be generated by reload. */
30525 return 4;
30526 case TYPE_BRANCH:
30527 /* Leave some extra cycles between a compare and its
30528 dependent branch, to inhibit expensive mispredicts. */
30529 if ((rs6000_tune == PROCESSOR_PPC603
30530 || rs6000_tune == PROCESSOR_PPC604
30531 || rs6000_tune == PROCESSOR_PPC604e
30532 || rs6000_tune == PROCESSOR_PPC620
30533 || rs6000_tune == PROCESSOR_PPC630
30534 || rs6000_tune == PROCESSOR_PPC750
30535 || rs6000_tune == PROCESSOR_PPC7400
30536 || rs6000_tune == PROCESSOR_PPC7450
30537 || rs6000_tune == PROCESSOR_PPCE5500
30538 || rs6000_tune == PROCESSOR_PPCE6500
30539 || rs6000_tune == PROCESSOR_POWER4
30540 || rs6000_tune == PROCESSOR_POWER5
30541 || rs6000_tune == PROCESSOR_POWER7
30542 || rs6000_tune == PROCESSOR_POWER8
30543 || rs6000_tune == PROCESSOR_POWER9
30544 || rs6000_tune == PROCESSOR_CELL)
30545 && recog_memoized (dep_insn)
30546 && (INSN_CODE (dep_insn) >= 0))
30547
30548 switch (get_attr_type (dep_insn))
30549 {
30550 case TYPE_CMP:
30551 case TYPE_FPCOMPARE:
30552 case TYPE_CR_LOGICAL:
30553 return cost + 2;
30554 case TYPE_EXTS:
30555 case TYPE_MUL:
30556 if (get_attr_dot (dep_insn) == DOT_YES)
30557 return cost + 2;
30558 else
30559 break;
30560 case TYPE_SHIFT:
30561 if (get_attr_dot (dep_insn) == DOT_YES
30562 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30563 return cost + 2;
30564 else
30565 break;
30566 default:
30567 break;
30568 }
30569 break;
30570
30571 case TYPE_STORE:
30572 case TYPE_FPSTORE:
30573 if ((rs6000_tune == PROCESSOR_POWER6)
30574 && recog_memoized (dep_insn)
30575 && (INSN_CODE (dep_insn) >= 0))
30576 {
30577
30578 if (GET_CODE (PATTERN (insn)) != SET)
30579 /* If this happens, we have to extend this to schedule
30580 optimally. Return default for now. */
30581 return cost;
30582
30583 /* Adjust the cost for the case where the value written
30584 by a fixed point operation is used as the address
30585 gen value on a store. */
30586 switch (get_attr_type (dep_insn))
30587 {
30588 case TYPE_LOAD:
30589 case TYPE_CNTLZ:
30590 {
30591 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30592 return get_attr_sign_extend (dep_insn)
30593 == SIGN_EXTEND_YES ? 6 : 4;
30594 break;
30595 }
30596 case TYPE_SHIFT:
30597 {
30598 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30599 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30600 6 : 3;
30601 break;
30602 }
30603 case TYPE_INTEGER:
30604 case TYPE_ADD:
30605 case TYPE_LOGICAL:
30606 case TYPE_EXTS:
30607 case TYPE_INSERT:
30608 {
30609 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30610 return 3;
30611 break;
30612 }
30613 case TYPE_STORE:
30614 case TYPE_FPLOAD:
30615 case TYPE_FPSTORE:
30616 {
30617 if (get_attr_update (dep_insn) == UPDATE_YES
30618 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30619 return 3;
30620 break;
30621 }
30622 case TYPE_MUL:
30623 {
30624 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30625 return 17;
30626 break;
30627 }
30628 case TYPE_DIV:
30629 {
30630 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30631 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30632 break;
30633 }
30634 default:
30635 break;
30636 }
30637 }
30638 break;
30639
30640 case TYPE_LOAD:
30641 if ((rs6000_tune == PROCESSOR_POWER6)
30642 && recog_memoized (dep_insn)
30643 && (INSN_CODE (dep_insn) >= 0))
30644 {
30645
30646 /* Adjust the cost for the case where the value written
30647 by a fixed point instruction is used within the address
30648 gen portion of a subsequent load(u)(x) */
30649 switch (get_attr_type (dep_insn))
30650 {
30651 case TYPE_LOAD:
30652 case TYPE_CNTLZ:
30653 {
30654 if (set_to_load_agen (dep_insn, insn))
30655 return get_attr_sign_extend (dep_insn)
30656 == SIGN_EXTEND_YES ? 6 : 4;
30657 break;
30658 }
30659 case TYPE_SHIFT:
30660 {
30661 if (set_to_load_agen (dep_insn, insn))
30662 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30663 6 : 3;
30664 break;
30665 }
30666 case TYPE_INTEGER:
30667 case TYPE_ADD:
30668 case TYPE_LOGICAL:
30669 case TYPE_EXTS:
30670 case TYPE_INSERT:
30671 {
30672 if (set_to_load_agen (dep_insn, insn))
30673 return 3;
30674 break;
30675 }
30676 case TYPE_STORE:
30677 case TYPE_FPLOAD:
30678 case TYPE_FPSTORE:
30679 {
30680 if (get_attr_update (dep_insn) == UPDATE_YES
30681 && set_to_load_agen (dep_insn, insn))
30682 return 3;
30683 break;
30684 }
30685 case TYPE_MUL:
30686 {
30687 if (set_to_load_agen (dep_insn, insn))
30688 return 17;
30689 break;
30690 }
30691 case TYPE_DIV:
30692 {
30693 if (set_to_load_agen (dep_insn, insn))
30694 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30695 break;
30696 }
30697 default:
30698 break;
30699 }
30700 }
30701 break;
30702
30703 case TYPE_FPLOAD:
30704 if ((rs6000_tune == PROCESSOR_POWER6)
30705 && get_attr_update (insn) == UPDATE_NO
30706 && recog_memoized (dep_insn)
30707 && (INSN_CODE (dep_insn) >= 0)
30708 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30709 return 2;
30710
30711 default:
30712 break;
30713 }
30714
30715 /* Fall out to return default cost. */
30716 }
30717 break;
30718
30719 case REG_DEP_OUTPUT:
30720 /* Output dependency; DEP_INSN writes a register that INSN writes some
30721 cycles later. */
30722 if ((rs6000_tune == PROCESSOR_POWER6)
30723 && recog_memoized (dep_insn)
30724 && (INSN_CODE (dep_insn) >= 0))
30725 {
30726 attr_type = get_attr_type (insn);
30727
30728 switch (attr_type)
30729 {
30730 case TYPE_FP:
30731 case TYPE_FPSIMPLE:
30732 if (get_attr_type (dep_insn) == TYPE_FP
30733 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30734 return 1;
30735 break;
30736 case TYPE_FPLOAD:
30737 if (get_attr_update (insn) == UPDATE_NO
30738 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30739 return 2;
30740 break;
30741 default:
30742 break;
30743 }
30744 }
30745 /* Fall through, no cost for output dependency. */
30746 /* FALLTHRU */
30747
30748 case REG_DEP_ANTI:
30749 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30750 cycles later. */
30751 return 0;
30752
30753 default:
30754 gcc_unreachable ();
30755 }
30756
30757 return cost;
30758 }
30759
30760 /* Debug version of rs6000_adjust_cost. */
30761
30762 static int
30763 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30764 int cost, unsigned int dw)
30765 {
30766 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30767
30768 if (ret != cost)
30769 {
30770 const char *dep;
30771
30772 switch (dep_type)
30773 {
30774 default: dep = "unknown depencency"; break;
30775 case REG_DEP_TRUE: dep = "data dependency"; break;
30776 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30777 case REG_DEP_ANTI: dep = "anti depencency"; break;
30778 }
30779
30780 fprintf (stderr,
30781 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30782 "%s, insn:\n", ret, cost, dep);
30783
30784 debug_rtx (insn);
30785 }
30786
30787 return ret;
30788 }
30789
30790 /* The function returns a true if INSN is microcoded.
30791 Return false otherwise. */
30792
30793 static bool
30794 is_microcoded_insn (rtx_insn *insn)
30795 {
30796 if (!insn || !NONDEBUG_INSN_P (insn)
30797 || GET_CODE (PATTERN (insn)) == USE
30798 || GET_CODE (PATTERN (insn)) == CLOBBER)
30799 return false;
30800
30801 if (rs6000_tune == PROCESSOR_CELL)
30802 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30803
30804 if (rs6000_sched_groups
30805 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30806 {
30807 enum attr_type type = get_attr_type (insn);
30808 if ((type == TYPE_LOAD
30809 && get_attr_update (insn) == UPDATE_YES
30810 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30811 || ((type == TYPE_LOAD || type == TYPE_STORE)
30812 && get_attr_update (insn) == UPDATE_YES
30813 && get_attr_indexed (insn) == INDEXED_YES)
30814 || type == TYPE_MFCR)
30815 return true;
30816 }
30817
30818 return false;
30819 }
30820
30821 /* The function returns true if INSN is cracked into 2 instructions
30822 by the processor (and therefore occupies 2 issue slots). */
30823
30824 static bool
30825 is_cracked_insn (rtx_insn *insn)
30826 {
30827 if (!insn || !NONDEBUG_INSN_P (insn)
30828 || GET_CODE (PATTERN (insn)) == USE
30829 || GET_CODE (PATTERN (insn)) == CLOBBER)
30830 return false;
30831
30832 if (rs6000_sched_groups
30833 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30834 {
30835 enum attr_type type = get_attr_type (insn);
30836 if ((type == TYPE_LOAD
30837 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30838 && get_attr_update (insn) == UPDATE_NO)
30839 || (type == TYPE_LOAD
30840 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30841 && get_attr_update (insn) == UPDATE_YES
30842 && get_attr_indexed (insn) == INDEXED_NO)
30843 || (type == TYPE_STORE
30844 && get_attr_update (insn) == UPDATE_YES
30845 && get_attr_indexed (insn) == INDEXED_NO)
30846 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30847 && get_attr_update (insn) == UPDATE_YES)
30848 || (type == TYPE_CR_LOGICAL
30849 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30850 || (type == TYPE_EXTS
30851 && get_attr_dot (insn) == DOT_YES)
30852 || (type == TYPE_SHIFT
30853 && get_attr_dot (insn) == DOT_YES
30854 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30855 || (type == TYPE_MUL
30856 && get_attr_dot (insn) == DOT_YES)
30857 || type == TYPE_DIV
30858 || (type == TYPE_INSERT
30859 && get_attr_size (insn) == SIZE_32))
30860 return true;
30861 }
30862
30863 return false;
30864 }
30865
30866 /* The function returns true if INSN can be issued only from
30867 the branch slot. */
30868
30869 static bool
30870 is_branch_slot_insn (rtx_insn *insn)
30871 {
30872 if (!insn || !NONDEBUG_INSN_P (insn)
30873 || GET_CODE (PATTERN (insn)) == USE
30874 || GET_CODE (PATTERN (insn)) == CLOBBER)
30875 return false;
30876
30877 if (rs6000_sched_groups)
30878 {
30879 enum attr_type type = get_attr_type (insn);
30880 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30881 return true;
30882 return false;
30883 }
30884
30885 return false;
30886 }
30887
30888 /* The function returns true if out_inst sets a value that is
30889 used in the address generation computation of in_insn */
30890 static bool
30891 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30892 {
30893 rtx out_set, in_set;
30894
30895 /* For performance reasons, only handle the simple case where
30896 both loads are a single_set. */
30897 out_set = single_set (out_insn);
30898 if (out_set)
30899 {
30900 in_set = single_set (in_insn);
30901 if (in_set)
30902 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30903 }
30904
30905 return false;
30906 }
30907
30908 /* Try to determine base/offset/size parts of the given MEM.
30909 Return true if successful, false if all the values couldn't
30910 be determined.
30911
30912 This function only looks for REG or REG+CONST address forms.
30913 REG+REG address form will return false. */
30914
30915 static bool
30916 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30917 HOST_WIDE_INT *size)
30918 {
30919 rtx addr_rtx;
30920 if MEM_SIZE_KNOWN_P (mem)
30921 *size = MEM_SIZE (mem);
30922 else
30923 return false;
30924
30925 addr_rtx = (XEXP (mem, 0));
30926 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30927 addr_rtx = XEXP (addr_rtx, 1);
30928
30929 *offset = 0;
30930 while (GET_CODE (addr_rtx) == PLUS
30931 && CONST_INT_P (XEXP (addr_rtx, 1)))
30932 {
30933 *offset += INTVAL (XEXP (addr_rtx, 1));
30934 addr_rtx = XEXP (addr_rtx, 0);
30935 }
30936 if (!REG_P (addr_rtx))
30937 return false;
30938
30939 *base = addr_rtx;
30940 return true;
30941 }
30942
30943 /* The function returns true if the target storage location of
30944 mem1 is adjacent to the target storage location of mem2 */
30945 /* Return 1 if memory locations are adjacent. */
30946
30947 static bool
30948 adjacent_mem_locations (rtx mem1, rtx mem2)
30949 {
30950 rtx reg1, reg2;
30951 HOST_WIDE_INT off1, size1, off2, size2;
30952
30953 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30954 && get_memref_parts (mem2, &reg2, &off2, &size2))
30955 return ((REGNO (reg1) == REGNO (reg2))
30956 && ((off1 + size1 == off2)
30957 || (off2 + size2 == off1)));
30958
30959 return false;
30960 }
30961
30962 /* This function returns true if it can be determined that the two MEM
30963 locations overlap by at least 1 byte based on base reg/offset/size. */
30964
30965 static bool
30966 mem_locations_overlap (rtx mem1, rtx mem2)
30967 {
30968 rtx reg1, reg2;
30969 HOST_WIDE_INT off1, size1, off2, size2;
30970
30971 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30972 && get_memref_parts (mem2, &reg2, &off2, &size2))
30973 return ((REGNO (reg1) == REGNO (reg2))
30974 && (((off1 <= off2) && (off1 + size1 > off2))
30975 || ((off2 <= off1) && (off2 + size2 > off1))));
30976
30977 return false;
30978 }
30979
30980 /* A C statement (sans semicolon) to update the integer scheduling
30981 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30982 INSN earlier, reduce the priority to execute INSN later. Do not
30983 define this macro if you do not need to adjust the scheduling
30984 priorities of insns. */
30985
30986 static int
30987 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30988 {
30989 rtx load_mem, str_mem;
30990 /* On machines (like the 750) which have asymmetric integer units,
30991 where one integer unit can do multiply and divides and the other
30992 can't, reduce the priority of multiply/divide so it is scheduled
30993 before other integer operations. */
30994
30995 #if 0
30996 if (! INSN_P (insn))
30997 return priority;
30998
30999 if (GET_CODE (PATTERN (insn)) == USE)
31000 return priority;
31001
31002 switch (rs6000_tune) {
31003 case PROCESSOR_PPC750:
31004 switch (get_attr_type (insn))
31005 {
31006 default:
31007 break;
31008
31009 case TYPE_MUL:
31010 case TYPE_DIV:
31011 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
31012 priority, priority);
31013 if (priority >= 0 && priority < 0x01000000)
31014 priority >>= 3;
31015 break;
31016 }
31017 }
31018 #endif
31019
31020 if (insn_must_be_first_in_group (insn)
31021 && reload_completed
31022 && current_sched_info->sched_max_insns_priority
31023 && rs6000_sched_restricted_insns_priority)
31024 {
31025
31026 /* Prioritize insns that can be dispatched only in the first
31027 dispatch slot. */
31028 if (rs6000_sched_restricted_insns_priority == 1)
31029 /* Attach highest priority to insn. This means that in
31030 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
31031 precede 'priority' (critical path) considerations. */
31032 return current_sched_info->sched_max_insns_priority;
31033 else if (rs6000_sched_restricted_insns_priority == 2)
31034 /* Increase priority of insn by a minimal amount. This means that in
31035 haifa-sched.c:ready_sort(), only 'priority' (critical path)
31036 considerations precede dispatch-slot restriction considerations. */
31037 return (priority + 1);
31038 }
31039
31040 if (rs6000_tune == PROCESSOR_POWER6
31041 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
31042 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
31043 /* Attach highest priority to insn if the scheduler has just issued two
31044 stores and this instruction is a load, or two loads and this instruction
31045 is a store. Power6 wants loads and stores scheduled alternately
31046 when possible */
31047 return current_sched_info->sched_max_insns_priority;
31048
31049 return priority;
31050 }
31051
31052 /* Return true if the instruction is nonpipelined on the Cell. */
31053 static bool
31054 is_nonpipeline_insn (rtx_insn *insn)
31055 {
31056 enum attr_type type;
31057 if (!insn || !NONDEBUG_INSN_P (insn)
31058 || GET_CODE (PATTERN (insn)) == USE
31059 || GET_CODE (PATTERN (insn)) == CLOBBER)
31060 return false;
31061
31062 type = get_attr_type (insn);
31063 if (type == TYPE_MUL
31064 || type == TYPE_DIV
31065 || type == TYPE_SDIV
31066 || type == TYPE_DDIV
31067 || type == TYPE_SSQRT
31068 || type == TYPE_DSQRT
31069 || type == TYPE_MFCR
31070 || type == TYPE_MFCRF
31071 || type == TYPE_MFJMPR)
31072 {
31073 return true;
31074 }
31075 return false;
31076 }
31077
31078
31079 /* Return how many instructions the machine can issue per cycle. */
31080
31081 static int
31082 rs6000_issue_rate (void)
31083 {
31084 /* Unless scheduling for register pressure, use issue rate of 1 for
31085 first scheduling pass to decrease degradation. */
31086 if (!reload_completed && !flag_sched_pressure)
31087 return 1;
31088
31089 switch (rs6000_tune) {
31090 case PROCESSOR_RS64A:
31091 case PROCESSOR_PPC601: /* ? */
31092 case PROCESSOR_PPC7450:
31093 return 3;
31094 case PROCESSOR_PPC440:
31095 case PROCESSOR_PPC603:
31096 case PROCESSOR_PPC750:
31097 case PROCESSOR_PPC7400:
31098 case PROCESSOR_PPC8540:
31099 case PROCESSOR_PPC8548:
31100 case PROCESSOR_CELL:
31101 case PROCESSOR_PPCE300C2:
31102 case PROCESSOR_PPCE300C3:
31103 case PROCESSOR_PPCE500MC:
31104 case PROCESSOR_PPCE500MC64:
31105 case PROCESSOR_PPCE5500:
31106 case PROCESSOR_PPCE6500:
31107 case PROCESSOR_TITAN:
31108 return 2;
31109 case PROCESSOR_PPC476:
31110 case PROCESSOR_PPC604:
31111 case PROCESSOR_PPC604e:
31112 case PROCESSOR_PPC620:
31113 case PROCESSOR_PPC630:
31114 return 4;
31115 case PROCESSOR_POWER4:
31116 case PROCESSOR_POWER5:
31117 case PROCESSOR_POWER6:
31118 case PROCESSOR_POWER7:
31119 return 5;
31120 case PROCESSOR_POWER8:
31121 return 7;
31122 case PROCESSOR_POWER9:
31123 return 6;
31124 default:
31125 return 1;
31126 }
31127 }
31128
31129 /* Return how many instructions to look ahead for better insn
31130 scheduling. */
31131
31132 static int
31133 rs6000_use_sched_lookahead (void)
31134 {
31135 switch (rs6000_tune)
31136 {
31137 case PROCESSOR_PPC8540:
31138 case PROCESSOR_PPC8548:
31139 return 4;
31140
31141 case PROCESSOR_CELL:
31142 return (reload_completed ? 8 : 0);
31143
31144 default:
31145 return 0;
31146 }
31147 }
31148
31149 /* We are choosing insn from the ready queue. Return zero if INSN can be
31150 chosen. */
31151 static int
31152 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
31153 {
31154 if (ready_index == 0)
31155 return 0;
31156
31157 if (rs6000_tune != PROCESSOR_CELL)
31158 return 0;
31159
31160 gcc_assert (insn != NULL_RTX && INSN_P (insn));
31161
31162 if (!reload_completed
31163 || is_nonpipeline_insn (insn)
31164 || is_microcoded_insn (insn))
31165 return 1;
31166
31167 return 0;
31168 }
31169
31170 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
31171 and return true. */
31172
31173 static bool
31174 find_mem_ref (rtx pat, rtx *mem_ref)
31175 {
31176 const char * fmt;
31177 int i, j;
31178
31179 /* stack_tie does not produce any real memory traffic. */
31180 if (tie_operand (pat, VOIDmode))
31181 return false;
31182
31183 if (MEM_P (pat))
31184 {
31185 *mem_ref = pat;
31186 return true;
31187 }
31188
31189 /* Recursively process the pattern. */
31190 fmt = GET_RTX_FORMAT (GET_CODE (pat));
31191
31192 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
31193 {
31194 if (fmt[i] == 'e')
31195 {
31196 if (find_mem_ref (XEXP (pat, i), mem_ref))
31197 return true;
31198 }
31199 else if (fmt[i] == 'E')
31200 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
31201 {
31202 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
31203 return true;
31204 }
31205 }
31206
31207 return false;
31208 }
31209
31210 /* Determine if PAT is a PATTERN of a load insn. */
31211
31212 static bool
31213 is_load_insn1 (rtx pat, rtx *load_mem)
31214 {
31215 if (!pat || pat == NULL_RTX)
31216 return false;
31217
31218 if (GET_CODE (pat) == SET)
31219 return find_mem_ref (SET_SRC (pat), load_mem);
31220
31221 if (GET_CODE (pat) == PARALLEL)
31222 {
31223 int i;
31224
31225 for (i = 0; i < XVECLEN (pat, 0); i++)
31226 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
31227 return true;
31228 }
31229
31230 return false;
31231 }
31232
31233 /* Determine if INSN loads from memory. */
31234
31235 static bool
31236 is_load_insn (rtx insn, rtx *load_mem)
31237 {
31238 if (!insn || !INSN_P (insn))
31239 return false;
31240
31241 if (CALL_P (insn))
31242 return false;
31243
31244 return is_load_insn1 (PATTERN (insn), load_mem);
31245 }
31246
31247 /* Determine if PAT is a PATTERN of a store insn. */
31248
31249 static bool
31250 is_store_insn1 (rtx pat, rtx *str_mem)
31251 {
31252 if (!pat || pat == NULL_RTX)
31253 return false;
31254
31255 if (GET_CODE (pat) == SET)
31256 return find_mem_ref (SET_DEST (pat), str_mem);
31257
31258 if (GET_CODE (pat) == PARALLEL)
31259 {
31260 int i;
31261
31262 for (i = 0; i < XVECLEN (pat, 0); i++)
31263 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
31264 return true;
31265 }
31266
31267 return false;
31268 }
31269
31270 /* Determine if INSN stores to memory. */
31271
31272 static bool
31273 is_store_insn (rtx insn, rtx *str_mem)
31274 {
31275 if (!insn || !INSN_P (insn))
31276 return false;
31277
31278 return is_store_insn1 (PATTERN (insn), str_mem);
31279 }
31280
31281 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31282
31283 static bool
31284 is_power9_pairable_vec_type (enum attr_type type)
31285 {
31286 switch (type)
31287 {
31288 case TYPE_VECSIMPLE:
31289 case TYPE_VECCOMPLEX:
31290 case TYPE_VECDIV:
31291 case TYPE_VECCMP:
31292 case TYPE_VECPERM:
31293 case TYPE_VECFLOAT:
31294 case TYPE_VECFDIV:
31295 case TYPE_VECDOUBLE:
31296 return true;
31297 default:
31298 break;
31299 }
31300 return false;
31301 }
31302
31303 /* Returns whether the dependence between INSN and NEXT is considered
31304 costly by the given target. */
31305
31306 static bool
31307 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31308 {
31309 rtx insn;
31310 rtx next;
31311 rtx load_mem, str_mem;
31312
31313 /* If the flag is not enabled - no dependence is considered costly;
31314 allow all dependent insns in the same group.
31315 This is the most aggressive option. */
31316 if (rs6000_sched_costly_dep == no_dep_costly)
31317 return false;
31318
31319 /* If the flag is set to 1 - a dependence is always considered costly;
31320 do not allow dependent instructions in the same group.
31321 This is the most conservative option. */
31322 if (rs6000_sched_costly_dep == all_deps_costly)
31323 return true;
31324
31325 insn = DEP_PRO (dep);
31326 next = DEP_CON (dep);
31327
31328 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31329 && is_load_insn (next, &load_mem)
31330 && is_store_insn (insn, &str_mem))
31331 /* Prevent load after store in the same group. */
31332 return true;
31333
31334 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31335 && is_load_insn (next, &load_mem)
31336 && is_store_insn (insn, &str_mem)
31337 && DEP_TYPE (dep) == REG_DEP_TRUE
31338 && mem_locations_overlap(str_mem, load_mem))
31339 /* Prevent load after store in the same group if it is a true
31340 dependence. */
31341 return true;
31342
31343 /* The flag is set to X; dependences with latency >= X are considered costly,
31344 and will not be scheduled in the same group. */
31345 if (rs6000_sched_costly_dep <= max_dep_latency
31346 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31347 return true;
31348
31349 return false;
31350 }
31351
31352 /* Return the next insn after INSN that is found before TAIL is reached,
31353 skipping any "non-active" insns - insns that will not actually occupy
31354 an issue slot. Return NULL_RTX if such an insn is not found. */
31355
31356 static rtx_insn *
31357 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31358 {
31359 if (insn == NULL_RTX || insn == tail)
31360 return NULL;
31361
31362 while (1)
31363 {
31364 insn = NEXT_INSN (insn);
31365 if (insn == NULL_RTX || insn == tail)
31366 return NULL;
31367
31368 if (CALL_P (insn)
31369 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31370 || (NONJUMP_INSN_P (insn)
31371 && GET_CODE (PATTERN (insn)) != USE
31372 && GET_CODE (PATTERN (insn)) != CLOBBER
31373 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31374 break;
31375 }
31376 return insn;
31377 }
31378
31379 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31380
31381 static int
31382 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31383 {
31384 int pos;
31385 int i;
31386 rtx_insn *tmp;
31387 enum attr_type type, type2;
31388
31389 type = get_attr_type (last_scheduled_insn);
31390
31391 /* Try to issue fixed point divides back-to-back in pairs so they will be
31392 routed to separate execution units and execute in parallel. */
31393 if (type == TYPE_DIV && divide_cnt == 0)
31394 {
31395 /* First divide has been scheduled. */
31396 divide_cnt = 1;
31397
31398 /* Scan the ready list looking for another divide, if found move it
31399 to the end of the list so it is chosen next. */
31400 pos = lastpos;
31401 while (pos >= 0)
31402 {
31403 if (recog_memoized (ready[pos]) >= 0
31404 && get_attr_type (ready[pos]) == TYPE_DIV)
31405 {
31406 tmp = ready[pos];
31407 for (i = pos; i < lastpos; i++)
31408 ready[i] = ready[i + 1];
31409 ready[lastpos] = tmp;
31410 break;
31411 }
31412 pos--;
31413 }
31414 }
31415 else
31416 {
31417 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31418 divide_cnt = 0;
31419
31420 /* The best dispatch throughput for vector and vector load insns can be
31421 achieved by interleaving a vector and vector load such that they'll
31422 dispatch to the same superslice. If this pairing cannot be achieved
31423 then it is best to pair vector insns together and vector load insns
31424 together.
31425
31426 To aid in this pairing, vec_pairing maintains the current state with
31427 the following values:
31428
31429 0 : Initial state, no vecload/vector pairing has been started.
31430
31431 1 : A vecload or vector insn has been issued and a candidate for
31432 pairing has been found and moved to the end of the ready
31433 list. */
31434 if (type == TYPE_VECLOAD)
31435 {
31436 /* Issued a vecload. */
31437 if (vec_pairing == 0)
31438 {
31439 int vecload_pos = -1;
31440 /* We issued a single vecload, look for a vector insn to pair it
31441 with. If one isn't found, try to pair another vecload. */
31442 pos = lastpos;
31443 while (pos >= 0)
31444 {
31445 if (recog_memoized (ready[pos]) >= 0)
31446 {
31447 type2 = get_attr_type (ready[pos]);
31448 if (is_power9_pairable_vec_type (type2))
31449 {
31450 /* Found a vector insn to pair with, move it to the
31451 end of the ready list so it is scheduled next. */
31452 tmp = ready[pos];
31453 for (i = pos; i < lastpos; i++)
31454 ready[i] = ready[i + 1];
31455 ready[lastpos] = tmp;
31456 vec_pairing = 1;
31457 return cached_can_issue_more;
31458 }
31459 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31460 /* Remember position of first vecload seen. */
31461 vecload_pos = pos;
31462 }
31463 pos--;
31464 }
31465 if (vecload_pos >= 0)
31466 {
31467 /* Didn't find a vector to pair with but did find a vecload,
31468 move it to the end of the ready list. */
31469 tmp = ready[vecload_pos];
31470 for (i = vecload_pos; i < lastpos; i++)
31471 ready[i] = ready[i + 1];
31472 ready[lastpos] = tmp;
31473 vec_pairing = 1;
31474 return cached_can_issue_more;
31475 }
31476 }
31477 }
31478 else if (is_power9_pairable_vec_type (type))
31479 {
31480 /* Issued a vector operation. */
31481 if (vec_pairing == 0)
31482 {
31483 int vec_pos = -1;
31484 /* We issued a single vector insn, look for a vecload to pair it
31485 with. If one isn't found, try to pair another vector. */
31486 pos = lastpos;
31487 while (pos >= 0)
31488 {
31489 if (recog_memoized (ready[pos]) >= 0)
31490 {
31491 type2 = get_attr_type (ready[pos]);
31492 if (type2 == TYPE_VECLOAD)
31493 {
31494 /* Found a vecload insn to pair with, move it to the
31495 end of the ready list so it is scheduled next. */
31496 tmp = ready[pos];
31497 for (i = pos; i < lastpos; i++)
31498 ready[i] = ready[i + 1];
31499 ready[lastpos] = tmp;
31500 vec_pairing = 1;
31501 return cached_can_issue_more;
31502 }
31503 else if (is_power9_pairable_vec_type (type2)
31504 && vec_pos == -1)
31505 /* Remember position of first vector insn seen. */
31506 vec_pos = pos;
31507 }
31508 pos--;
31509 }
31510 if (vec_pos >= 0)
31511 {
31512 /* Didn't find a vecload to pair with but did find a vector
31513 insn, move it to the end of the ready list. */
31514 tmp = ready[vec_pos];
31515 for (i = vec_pos; i < lastpos; i++)
31516 ready[i] = ready[i + 1];
31517 ready[lastpos] = tmp;
31518 vec_pairing = 1;
31519 return cached_can_issue_more;
31520 }
31521 }
31522 }
31523
31524 /* We've either finished a vec/vecload pair, couldn't find an insn to
31525 continue the current pair, or the last insn had nothing to do with
31526 with pairing. In any case, reset the state. */
31527 vec_pairing = 0;
31528 }
31529
31530 return cached_can_issue_more;
31531 }
31532
31533 /* We are about to begin issuing insns for this clock cycle. */
31534
31535 static int
31536 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31537 rtx_insn **ready ATTRIBUTE_UNUSED,
31538 int *pn_ready ATTRIBUTE_UNUSED,
31539 int clock_var ATTRIBUTE_UNUSED)
31540 {
31541 int n_ready = *pn_ready;
31542
31543 if (sched_verbose)
31544 fprintf (dump, "// rs6000_sched_reorder :\n");
31545
31546 /* Reorder the ready list, if the second to last ready insn
31547 is a nonepipeline insn. */
31548 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31549 {
31550 if (is_nonpipeline_insn (ready[n_ready - 1])
31551 && (recog_memoized (ready[n_ready - 2]) > 0))
31552 /* Simply swap first two insns. */
31553 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31554 }
31555
31556 if (rs6000_tune == PROCESSOR_POWER6)
31557 load_store_pendulum = 0;
31558
31559 return rs6000_issue_rate ();
31560 }
31561
31562 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31563
31564 static int
31565 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31566 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31567 {
31568 if (sched_verbose)
31569 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31570
31571 /* For Power6, we need to handle some special cases to try and keep the
31572 store queue from overflowing and triggering expensive flushes.
31573
31574 This code monitors how load and store instructions are being issued
31575 and skews the ready list one way or the other to increase the likelihood
31576 that a desired instruction is issued at the proper time.
31577
31578 A couple of things are done. First, we maintain a "load_store_pendulum"
31579 to track the current state of load/store issue.
31580
31581 - If the pendulum is at zero, then no loads or stores have been
31582 issued in the current cycle so we do nothing.
31583
31584 - If the pendulum is 1, then a single load has been issued in this
31585 cycle and we attempt to locate another load in the ready list to
31586 issue with it.
31587
31588 - If the pendulum is -2, then two stores have already been
31589 issued in this cycle, so we increase the priority of the first load
31590 in the ready list to increase it's likelihood of being chosen first
31591 in the next cycle.
31592
31593 - If the pendulum is -1, then a single store has been issued in this
31594 cycle and we attempt to locate another store in the ready list to
31595 issue with it, preferring a store to an adjacent memory location to
31596 facilitate store pairing in the store queue.
31597
31598 - If the pendulum is 2, then two loads have already been
31599 issued in this cycle, so we increase the priority of the first store
31600 in the ready list to increase it's likelihood of being chosen first
31601 in the next cycle.
31602
31603 - If the pendulum < -2 or > 2, then do nothing.
31604
31605 Note: This code covers the most common scenarios. There exist non
31606 load/store instructions which make use of the LSU and which
31607 would need to be accounted for to strictly model the behavior
31608 of the machine. Those instructions are currently unaccounted
31609 for to help minimize compile time overhead of this code.
31610 */
31611 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31612 {
31613 int pos;
31614 int i;
31615 rtx_insn *tmp;
31616 rtx load_mem, str_mem;
31617
31618 if (is_store_insn (last_scheduled_insn, &str_mem))
31619 /* Issuing a store, swing the load_store_pendulum to the left */
31620 load_store_pendulum--;
31621 else if (is_load_insn (last_scheduled_insn, &load_mem))
31622 /* Issuing a load, swing the load_store_pendulum to the right */
31623 load_store_pendulum++;
31624 else
31625 return cached_can_issue_more;
31626
31627 /* If the pendulum is balanced, or there is only one instruction on
31628 the ready list, then all is well, so return. */
31629 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31630 return cached_can_issue_more;
31631
31632 if (load_store_pendulum == 1)
31633 {
31634 /* A load has been issued in this cycle. Scan the ready list
31635 for another load to issue with it */
31636 pos = *pn_ready-1;
31637
31638 while (pos >= 0)
31639 {
31640 if (is_load_insn (ready[pos], &load_mem))
31641 {
31642 /* Found a load. Move it to the head of the ready list,
31643 and adjust it's priority so that it is more likely to
31644 stay there */
31645 tmp = ready[pos];
31646 for (i=pos; i<*pn_ready-1; i++)
31647 ready[i] = ready[i + 1];
31648 ready[*pn_ready-1] = tmp;
31649
31650 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31651 INSN_PRIORITY (tmp)++;
31652 break;
31653 }
31654 pos--;
31655 }
31656 }
31657 else if (load_store_pendulum == -2)
31658 {
31659 /* Two stores have been issued in this cycle. Increase the
31660 priority of the first load in the ready list to favor it for
31661 issuing in the next cycle. */
31662 pos = *pn_ready-1;
31663
31664 while (pos >= 0)
31665 {
31666 if (is_load_insn (ready[pos], &load_mem)
31667 && !sel_sched_p ()
31668 && INSN_PRIORITY_KNOWN (ready[pos]))
31669 {
31670 INSN_PRIORITY (ready[pos])++;
31671
31672 /* Adjust the pendulum to account for the fact that a load
31673 was found and increased in priority. This is to prevent
31674 increasing the priority of multiple loads */
31675 load_store_pendulum--;
31676
31677 break;
31678 }
31679 pos--;
31680 }
31681 }
31682 else if (load_store_pendulum == -1)
31683 {
31684 /* A store has been issued in this cycle. Scan the ready list for
31685 another store to issue with it, preferring a store to an adjacent
31686 memory location */
31687 int first_store_pos = -1;
31688
31689 pos = *pn_ready-1;
31690
31691 while (pos >= 0)
31692 {
31693 if (is_store_insn (ready[pos], &str_mem))
31694 {
31695 rtx str_mem2;
31696 /* Maintain the index of the first store found on the
31697 list */
31698 if (first_store_pos == -1)
31699 first_store_pos = pos;
31700
31701 if (is_store_insn (last_scheduled_insn, &str_mem2)
31702 && adjacent_mem_locations (str_mem, str_mem2))
31703 {
31704 /* Found an adjacent store. Move it to the head of the
31705 ready list, and adjust it's priority so that it is
31706 more likely to stay there */
31707 tmp = ready[pos];
31708 for (i=pos; i<*pn_ready-1; i++)
31709 ready[i] = ready[i + 1];
31710 ready[*pn_ready-1] = tmp;
31711
31712 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31713 INSN_PRIORITY (tmp)++;
31714
31715 first_store_pos = -1;
31716
31717 break;
31718 };
31719 }
31720 pos--;
31721 }
31722
31723 if (first_store_pos >= 0)
31724 {
31725 /* An adjacent store wasn't found, but a non-adjacent store was,
31726 so move the non-adjacent store to the front of the ready
31727 list, and adjust its priority so that it is more likely to
31728 stay there. */
31729 tmp = ready[first_store_pos];
31730 for (i=first_store_pos; i<*pn_ready-1; i++)
31731 ready[i] = ready[i + 1];
31732 ready[*pn_ready-1] = tmp;
31733 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31734 INSN_PRIORITY (tmp)++;
31735 }
31736 }
31737 else if (load_store_pendulum == 2)
31738 {
31739 /* Two loads have been issued in this cycle. Increase the priority
31740 of the first store in the ready list to favor it for issuing in
31741 the next cycle. */
31742 pos = *pn_ready-1;
31743
31744 while (pos >= 0)
31745 {
31746 if (is_store_insn (ready[pos], &str_mem)
31747 && !sel_sched_p ()
31748 && INSN_PRIORITY_KNOWN (ready[pos]))
31749 {
31750 INSN_PRIORITY (ready[pos])++;
31751
31752 /* Adjust the pendulum to account for the fact that a store
31753 was found and increased in priority. This is to prevent
31754 increasing the priority of multiple stores */
31755 load_store_pendulum++;
31756
31757 break;
31758 }
31759 pos--;
31760 }
31761 }
31762 }
31763
31764 /* Do Power9 dependent reordering if necessary. */
31765 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31766 && recog_memoized (last_scheduled_insn) >= 0)
31767 return power9_sched_reorder2 (ready, *pn_ready - 1);
31768
31769 return cached_can_issue_more;
31770 }
31771
31772 /* Return whether the presence of INSN causes a dispatch group termination
31773 of group WHICH_GROUP.
31774
31775 If WHICH_GROUP == current_group, this function will return true if INSN
31776 causes the termination of the current group (i.e, the dispatch group to
31777 which INSN belongs). This means that INSN will be the last insn in the
31778 group it belongs to.
31779
31780 If WHICH_GROUP == previous_group, this function will return true if INSN
31781 causes the termination of the previous group (i.e, the dispatch group that
31782 precedes the group to which INSN belongs). This means that INSN will be
31783 the first insn in the group it belongs to). */
31784
31785 static bool
31786 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31787 {
31788 bool first, last;
31789
31790 if (! insn)
31791 return false;
31792
31793 first = insn_must_be_first_in_group (insn);
31794 last = insn_must_be_last_in_group (insn);
31795
31796 if (first && last)
31797 return true;
31798
31799 if (which_group == current_group)
31800 return last;
31801 else if (which_group == previous_group)
31802 return first;
31803
31804 return false;
31805 }
31806
31807
31808 static bool
31809 insn_must_be_first_in_group (rtx_insn *insn)
31810 {
31811 enum attr_type type;
31812
31813 if (!insn
31814 || NOTE_P (insn)
31815 || DEBUG_INSN_P (insn)
31816 || GET_CODE (PATTERN (insn)) == USE
31817 || GET_CODE (PATTERN (insn)) == CLOBBER)
31818 return false;
31819
31820 switch (rs6000_tune)
31821 {
31822 case PROCESSOR_POWER5:
31823 if (is_cracked_insn (insn))
31824 return true;
31825 /* FALLTHRU */
31826 case PROCESSOR_POWER4:
31827 if (is_microcoded_insn (insn))
31828 return true;
31829
31830 if (!rs6000_sched_groups)
31831 return false;
31832
31833 type = get_attr_type (insn);
31834
31835 switch (type)
31836 {
31837 case TYPE_MFCR:
31838 case TYPE_MFCRF:
31839 case TYPE_MTCR:
31840 case TYPE_CR_LOGICAL:
31841 case TYPE_MTJMPR:
31842 case TYPE_MFJMPR:
31843 case TYPE_DIV:
31844 case TYPE_LOAD_L:
31845 case TYPE_STORE_C:
31846 case TYPE_ISYNC:
31847 case TYPE_SYNC:
31848 return true;
31849 default:
31850 break;
31851 }
31852 break;
31853 case PROCESSOR_POWER6:
31854 type = get_attr_type (insn);
31855
31856 switch (type)
31857 {
31858 case TYPE_EXTS:
31859 case TYPE_CNTLZ:
31860 case TYPE_TRAP:
31861 case TYPE_MUL:
31862 case TYPE_INSERT:
31863 case TYPE_FPCOMPARE:
31864 case TYPE_MFCR:
31865 case TYPE_MTCR:
31866 case TYPE_MFJMPR:
31867 case TYPE_MTJMPR:
31868 case TYPE_ISYNC:
31869 case TYPE_SYNC:
31870 case TYPE_LOAD_L:
31871 case TYPE_STORE_C:
31872 return true;
31873 case TYPE_SHIFT:
31874 if (get_attr_dot (insn) == DOT_NO
31875 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31876 return true;
31877 else
31878 break;
31879 case TYPE_DIV:
31880 if (get_attr_size (insn) == SIZE_32)
31881 return true;
31882 else
31883 break;
31884 case TYPE_LOAD:
31885 case TYPE_STORE:
31886 case TYPE_FPLOAD:
31887 case TYPE_FPSTORE:
31888 if (get_attr_update (insn) == UPDATE_YES)
31889 return true;
31890 else
31891 break;
31892 default:
31893 break;
31894 }
31895 break;
31896 case PROCESSOR_POWER7:
31897 type = get_attr_type (insn);
31898
31899 switch (type)
31900 {
31901 case TYPE_CR_LOGICAL:
31902 case TYPE_MFCR:
31903 case TYPE_MFCRF:
31904 case TYPE_MTCR:
31905 case TYPE_DIV:
31906 case TYPE_ISYNC:
31907 case TYPE_LOAD_L:
31908 case TYPE_STORE_C:
31909 case TYPE_MFJMPR:
31910 case TYPE_MTJMPR:
31911 return true;
31912 case TYPE_MUL:
31913 case TYPE_SHIFT:
31914 case TYPE_EXTS:
31915 if (get_attr_dot (insn) == DOT_YES)
31916 return true;
31917 else
31918 break;
31919 case TYPE_LOAD:
31920 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31921 || get_attr_update (insn) == UPDATE_YES)
31922 return true;
31923 else
31924 break;
31925 case TYPE_STORE:
31926 case TYPE_FPLOAD:
31927 case TYPE_FPSTORE:
31928 if (get_attr_update (insn) == UPDATE_YES)
31929 return true;
31930 else
31931 break;
31932 default:
31933 break;
31934 }
31935 break;
31936 case PROCESSOR_POWER8:
31937 type = get_attr_type (insn);
31938
31939 switch (type)
31940 {
31941 case TYPE_CR_LOGICAL:
31942 case TYPE_MFCR:
31943 case TYPE_MFCRF:
31944 case TYPE_MTCR:
31945 case TYPE_SYNC:
31946 case TYPE_ISYNC:
31947 case TYPE_LOAD_L:
31948 case TYPE_STORE_C:
31949 case TYPE_VECSTORE:
31950 case TYPE_MFJMPR:
31951 case TYPE_MTJMPR:
31952 return true;
31953 case TYPE_SHIFT:
31954 case TYPE_EXTS:
31955 case TYPE_MUL:
31956 if (get_attr_dot (insn) == DOT_YES)
31957 return true;
31958 else
31959 break;
31960 case TYPE_LOAD:
31961 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31962 || get_attr_update (insn) == UPDATE_YES)
31963 return true;
31964 else
31965 break;
31966 case TYPE_STORE:
31967 if (get_attr_update (insn) == UPDATE_YES
31968 && get_attr_indexed (insn) == INDEXED_YES)
31969 return true;
31970 else
31971 break;
31972 default:
31973 break;
31974 }
31975 break;
31976 default:
31977 break;
31978 }
31979
31980 return false;
31981 }
31982
31983 static bool
31984 insn_must_be_last_in_group (rtx_insn *insn)
31985 {
31986 enum attr_type type;
31987
31988 if (!insn
31989 || NOTE_P (insn)
31990 || DEBUG_INSN_P (insn)
31991 || GET_CODE (PATTERN (insn)) == USE
31992 || GET_CODE (PATTERN (insn)) == CLOBBER)
31993 return false;
31994
31995 switch (rs6000_tune) {
31996 case PROCESSOR_POWER4:
31997 case PROCESSOR_POWER5:
31998 if (is_microcoded_insn (insn))
31999 return true;
32000
32001 if (is_branch_slot_insn (insn))
32002 return true;
32003
32004 break;
32005 case PROCESSOR_POWER6:
32006 type = get_attr_type (insn);
32007
32008 switch (type)
32009 {
32010 case TYPE_EXTS:
32011 case TYPE_CNTLZ:
32012 case TYPE_TRAP:
32013 case TYPE_MUL:
32014 case TYPE_FPCOMPARE:
32015 case TYPE_MFCR:
32016 case TYPE_MTCR:
32017 case TYPE_MFJMPR:
32018 case TYPE_MTJMPR:
32019 case TYPE_ISYNC:
32020 case TYPE_SYNC:
32021 case TYPE_LOAD_L:
32022 case TYPE_STORE_C:
32023 return true;
32024 case TYPE_SHIFT:
32025 if (get_attr_dot (insn) == DOT_NO
32026 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
32027 return true;
32028 else
32029 break;
32030 case TYPE_DIV:
32031 if (get_attr_size (insn) == SIZE_32)
32032 return true;
32033 else
32034 break;
32035 default:
32036 break;
32037 }
32038 break;
32039 case PROCESSOR_POWER7:
32040 type = get_attr_type (insn);
32041
32042 switch (type)
32043 {
32044 case TYPE_ISYNC:
32045 case TYPE_SYNC:
32046 case TYPE_LOAD_L:
32047 case TYPE_STORE_C:
32048 return true;
32049 case TYPE_LOAD:
32050 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32051 && get_attr_update (insn) == UPDATE_YES)
32052 return true;
32053 else
32054 break;
32055 case TYPE_STORE:
32056 if (get_attr_update (insn) == UPDATE_YES
32057 && get_attr_indexed (insn) == INDEXED_YES)
32058 return true;
32059 else
32060 break;
32061 default:
32062 break;
32063 }
32064 break;
32065 case PROCESSOR_POWER8:
32066 type = get_attr_type (insn);
32067
32068 switch (type)
32069 {
32070 case TYPE_MFCR:
32071 case TYPE_MTCR:
32072 case TYPE_ISYNC:
32073 case TYPE_SYNC:
32074 case TYPE_LOAD_L:
32075 case TYPE_STORE_C:
32076 return true;
32077 case TYPE_LOAD:
32078 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32079 && get_attr_update (insn) == UPDATE_YES)
32080 return true;
32081 else
32082 break;
32083 case TYPE_STORE:
32084 if (get_attr_update (insn) == UPDATE_YES
32085 && get_attr_indexed (insn) == INDEXED_YES)
32086 return true;
32087 else
32088 break;
32089 default:
32090 break;
32091 }
32092 break;
32093 default:
32094 break;
32095 }
32096
32097 return false;
32098 }
32099
32100 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
32101 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
32102
32103 static bool
32104 is_costly_group (rtx *group_insns, rtx next_insn)
32105 {
32106 int i;
32107 int issue_rate = rs6000_issue_rate ();
32108
32109 for (i = 0; i < issue_rate; i++)
32110 {
32111 sd_iterator_def sd_it;
32112 dep_t dep;
32113 rtx insn = group_insns[i];
32114
32115 if (!insn)
32116 continue;
32117
32118 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
32119 {
32120 rtx next = DEP_CON (dep);
32121
32122 if (next == next_insn
32123 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
32124 return true;
32125 }
32126 }
32127
32128 return false;
32129 }
32130
32131 /* Utility of the function redefine_groups.
32132 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
32133 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
32134 to keep it "far" (in a separate group) from GROUP_INSNS, following
32135 one of the following schemes, depending on the value of the flag
32136 -minsert_sched_nops = X:
32137 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
32138 in order to force NEXT_INSN into a separate group.
32139 (2) X < sched_finish_regroup_exact: insert exactly X nops.
32140 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
32141 insertion (has a group just ended, how many vacant issue slots remain in the
32142 last group, and how many dispatch groups were encountered so far). */
32143
32144 static int
32145 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
32146 rtx_insn *next_insn, bool *group_end, int can_issue_more,
32147 int *group_count)
32148 {
32149 rtx nop;
32150 bool force;
32151 int issue_rate = rs6000_issue_rate ();
32152 bool end = *group_end;
32153 int i;
32154
32155 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
32156 return can_issue_more;
32157
32158 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
32159 return can_issue_more;
32160
32161 force = is_costly_group (group_insns, next_insn);
32162 if (!force)
32163 return can_issue_more;
32164
32165 if (sched_verbose > 6)
32166 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
32167 *group_count ,can_issue_more);
32168
32169 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
32170 {
32171 if (*group_end)
32172 can_issue_more = 0;
32173
32174 /* Since only a branch can be issued in the last issue_slot, it is
32175 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
32176 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
32177 in this case the last nop will start a new group and the branch
32178 will be forced to the new group. */
32179 if (can_issue_more && !is_branch_slot_insn (next_insn))
32180 can_issue_more--;
32181
32182 /* Do we have a special group ending nop? */
32183 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
32184 || rs6000_tune == PROCESSOR_POWER8)
32185 {
32186 nop = gen_group_ending_nop ();
32187 emit_insn_before (nop, next_insn);
32188 can_issue_more = 0;
32189 }
32190 else
32191 while (can_issue_more > 0)
32192 {
32193 nop = gen_nop ();
32194 emit_insn_before (nop, next_insn);
32195 can_issue_more--;
32196 }
32197
32198 *group_end = true;
32199 return 0;
32200 }
32201
32202 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
32203 {
32204 int n_nops = rs6000_sched_insert_nops;
32205
32206 /* Nops can't be issued from the branch slot, so the effective
32207 issue_rate for nops is 'issue_rate - 1'. */
32208 if (can_issue_more == 0)
32209 can_issue_more = issue_rate;
32210 can_issue_more--;
32211 if (can_issue_more == 0)
32212 {
32213 can_issue_more = issue_rate - 1;
32214 (*group_count)++;
32215 end = true;
32216 for (i = 0; i < issue_rate; i++)
32217 {
32218 group_insns[i] = 0;
32219 }
32220 }
32221
32222 while (n_nops > 0)
32223 {
32224 nop = gen_nop ();
32225 emit_insn_before (nop, next_insn);
32226 if (can_issue_more == issue_rate - 1) /* new group begins */
32227 end = false;
32228 can_issue_more--;
32229 if (can_issue_more == 0)
32230 {
32231 can_issue_more = issue_rate - 1;
32232 (*group_count)++;
32233 end = true;
32234 for (i = 0; i < issue_rate; i++)
32235 {
32236 group_insns[i] = 0;
32237 }
32238 }
32239 n_nops--;
32240 }
32241
32242 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
32243 can_issue_more++;
32244
32245 /* Is next_insn going to start a new group? */
32246 *group_end
32247 = (end
32248 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32249 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32250 || (can_issue_more < issue_rate &&
32251 insn_terminates_group_p (next_insn, previous_group)));
32252 if (*group_end && end)
32253 (*group_count)--;
32254
32255 if (sched_verbose > 6)
32256 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
32257 *group_count, can_issue_more);
32258 return can_issue_more;
32259 }
32260
32261 return can_issue_more;
32262 }
32263
32264 /* This function tries to synch the dispatch groups that the compiler "sees"
32265 with the dispatch groups that the processor dispatcher is expected to
32266 form in practice. It tries to achieve this synchronization by forcing the
32267 estimated processor grouping on the compiler (as opposed to the function
32268 'pad_goups' which tries to force the scheduler's grouping on the processor).
32269
32270 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
32271 examines the (estimated) dispatch groups that will be formed by the processor
32272 dispatcher. It marks these group boundaries to reflect the estimated
32273 processor grouping, overriding the grouping that the scheduler had marked.
32274 Depending on the value of the flag '-minsert-sched-nops' this function can
32275 force certain insns into separate groups or force a certain distance between
32276 them by inserting nops, for example, if there exists a "costly dependence"
32277 between the insns.
32278
32279 The function estimates the group boundaries that the processor will form as
32280 follows: It keeps track of how many vacant issue slots are available after
32281 each insn. A subsequent insn will start a new group if one of the following
32282 4 cases applies:
32283 - no more vacant issue slots remain in the current dispatch group.
32284 - only the last issue slot, which is the branch slot, is vacant, but the next
32285 insn is not a branch.
32286 - only the last 2 or less issue slots, including the branch slot, are vacant,
32287 which means that a cracked insn (which occupies two issue slots) can't be
32288 issued in this group.
32289 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32290 start a new group. */
32291
32292 static int
32293 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32294 rtx_insn *tail)
32295 {
32296 rtx_insn *insn, *next_insn;
32297 int issue_rate;
32298 int can_issue_more;
32299 int slot, i;
32300 bool group_end;
32301 int group_count = 0;
32302 rtx *group_insns;
32303
32304 /* Initialize. */
32305 issue_rate = rs6000_issue_rate ();
32306 group_insns = XALLOCAVEC (rtx, issue_rate);
32307 for (i = 0; i < issue_rate; i++)
32308 {
32309 group_insns[i] = 0;
32310 }
32311 can_issue_more = issue_rate;
32312 slot = 0;
32313 insn = get_next_active_insn (prev_head_insn, tail);
32314 group_end = false;
32315
32316 while (insn != NULL_RTX)
32317 {
32318 slot = (issue_rate - can_issue_more);
32319 group_insns[slot] = insn;
32320 can_issue_more =
32321 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32322 if (insn_terminates_group_p (insn, current_group))
32323 can_issue_more = 0;
32324
32325 next_insn = get_next_active_insn (insn, tail);
32326 if (next_insn == NULL_RTX)
32327 return group_count + 1;
32328
32329 /* Is next_insn going to start a new group? */
32330 group_end
32331 = (can_issue_more == 0
32332 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32333 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32334 || (can_issue_more < issue_rate &&
32335 insn_terminates_group_p (next_insn, previous_group)));
32336
32337 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32338 next_insn, &group_end, can_issue_more,
32339 &group_count);
32340
32341 if (group_end)
32342 {
32343 group_count++;
32344 can_issue_more = 0;
32345 for (i = 0; i < issue_rate; i++)
32346 {
32347 group_insns[i] = 0;
32348 }
32349 }
32350
32351 if (GET_MODE (next_insn) == TImode && can_issue_more)
32352 PUT_MODE (next_insn, VOIDmode);
32353 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32354 PUT_MODE (next_insn, TImode);
32355
32356 insn = next_insn;
32357 if (can_issue_more == 0)
32358 can_issue_more = issue_rate;
32359 } /* while */
32360
32361 return group_count;
32362 }
32363
32364 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32365 dispatch group boundaries that the scheduler had marked. Pad with nops
32366 any dispatch groups which have vacant issue slots, in order to force the
32367 scheduler's grouping on the processor dispatcher. The function
32368 returns the number of dispatch groups found. */
32369
32370 static int
32371 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32372 rtx_insn *tail)
32373 {
32374 rtx_insn *insn, *next_insn;
32375 rtx nop;
32376 int issue_rate;
32377 int can_issue_more;
32378 int group_end;
32379 int group_count = 0;
32380
32381 /* Initialize issue_rate. */
32382 issue_rate = rs6000_issue_rate ();
32383 can_issue_more = issue_rate;
32384
32385 insn = get_next_active_insn (prev_head_insn, tail);
32386 next_insn = get_next_active_insn (insn, tail);
32387
32388 while (insn != NULL_RTX)
32389 {
32390 can_issue_more =
32391 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32392
32393 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32394
32395 if (next_insn == NULL_RTX)
32396 break;
32397
32398 if (group_end)
32399 {
32400 /* If the scheduler had marked group termination at this location
32401 (between insn and next_insn), and neither insn nor next_insn will
32402 force group termination, pad the group with nops to force group
32403 termination. */
32404 if (can_issue_more
32405 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32406 && !insn_terminates_group_p (insn, current_group)
32407 && !insn_terminates_group_p (next_insn, previous_group))
32408 {
32409 if (!is_branch_slot_insn (next_insn))
32410 can_issue_more--;
32411
32412 while (can_issue_more)
32413 {
32414 nop = gen_nop ();
32415 emit_insn_before (nop, next_insn);
32416 can_issue_more--;
32417 }
32418 }
32419
32420 can_issue_more = issue_rate;
32421 group_count++;
32422 }
32423
32424 insn = next_insn;
32425 next_insn = get_next_active_insn (insn, tail);
32426 }
32427
32428 return group_count;
32429 }
32430
32431 /* We're beginning a new block. Initialize data structures as necessary. */
32432
32433 static void
32434 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32435 int sched_verbose ATTRIBUTE_UNUSED,
32436 int max_ready ATTRIBUTE_UNUSED)
32437 {
32438 last_scheduled_insn = NULL;
32439 load_store_pendulum = 0;
32440 divide_cnt = 0;
32441 vec_pairing = 0;
32442 }
32443
32444 /* The following function is called at the end of scheduling BB.
32445 After reload, it inserts nops at insn group bundling. */
32446
32447 static void
32448 rs6000_sched_finish (FILE *dump, int sched_verbose)
32449 {
32450 int n_groups;
32451
32452 if (sched_verbose)
32453 fprintf (dump, "=== Finishing schedule.\n");
32454
32455 if (reload_completed && rs6000_sched_groups)
32456 {
32457 /* Do not run sched_finish hook when selective scheduling enabled. */
32458 if (sel_sched_p ())
32459 return;
32460
32461 if (rs6000_sched_insert_nops == sched_finish_none)
32462 return;
32463
32464 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32465 n_groups = pad_groups (dump, sched_verbose,
32466 current_sched_info->prev_head,
32467 current_sched_info->next_tail);
32468 else
32469 n_groups = redefine_groups (dump, sched_verbose,
32470 current_sched_info->prev_head,
32471 current_sched_info->next_tail);
32472
32473 if (sched_verbose >= 6)
32474 {
32475 fprintf (dump, "ngroups = %d\n", n_groups);
32476 print_rtl (dump, current_sched_info->prev_head);
32477 fprintf (dump, "Done finish_sched\n");
32478 }
32479 }
32480 }
32481
32482 struct rs6000_sched_context
32483 {
32484 short cached_can_issue_more;
32485 rtx_insn *last_scheduled_insn;
32486 int load_store_pendulum;
32487 int divide_cnt;
32488 int vec_pairing;
32489 };
32490
32491 typedef struct rs6000_sched_context rs6000_sched_context_def;
32492 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32493
32494 /* Allocate store for new scheduling context. */
32495 static void *
32496 rs6000_alloc_sched_context (void)
32497 {
32498 return xmalloc (sizeof (rs6000_sched_context_def));
32499 }
32500
32501 /* If CLEAN_P is true then initializes _SC with clean data,
32502 and from the global context otherwise. */
32503 static void
32504 rs6000_init_sched_context (void *_sc, bool clean_p)
32505 {
32506 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32507
32508 if (clean_p)
32509 {
32510 sc->cached_can_issue_more = 0;
32511 sc->last_scheduled_insn = NULL;
32512 sc->load_store_pendulum = 0;
32513 sc->divide_cnt = 0;
32514 sc->vec_pairing = 0;
32515 }
32516 else
32517 {
32518 sc->cached_can_issue_more = cached_can_issue_more;
32519 sc->last_scheduled_insn = last_scheduled_insn;
32520 sc->load_store_pendulum = load_store_pendulum;
32521 sc->divide_cnt = divide_cnt;
32522 sc->vec_pairing = vec_pairing;
32523 }
32524 }
32525
32526 /* Sets the global scheduling context to the one pointed to by _SC. */
32527 static void
32528 rs6000_set_sched_context (void *_sc)
32529 {
32530 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32531
32532 gcc_assert (sc != NULL);
32533
32534 cached_can_issue_more = sc->cached_can_issue_more;
32535 last_scheduled_insn = sc->last_scheduled_insn;
32536 load_store_pendulum = sc->load_store_pendulum;
32537 divide_cnt = sc->divide_cnt;
32538 vec_pairing = sc->vec_pairing;
32539 }
32540
32541 /* Free _SC. */
32542 static void
32543 rs6000_free_sched_context (void *_sc)
32544 {
32545 gcc_assert (_sc != NULL);
32546
32547 free (_sc);
32548 }
32549
32550 static bool
32551 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32552 {
32553 switch (get_attr_type (insn))
32554 {
32555 case TYPE_DIV:
32556 case TYPE_SDIV:
32557 case TYPE_DDIV:
32558 case TYPE_VECDIV:
32559 case TYPE_SSQRT:
32560 case TYPE_DSQRT:
32561 return false;
32562
32563 default:
32564 return true;
32565 }
32566 }
32567 \f
32568 /* Length in units of the trampoline for entering a nested function. */
32569
32570 int
32571 rs6000_trampoline_size (void)
32572 {
32573 int ret = 0;
32574
32575 switch (DEFAULT_ABI)
32576 {
32577 default:
32578 gcc_unreachable ();
32579
32580 case ABI_AIX:
32581 ret = (TARGET_32BIT) ? 12 : 24;
32582 break;
32583
32584 case ABI_ELFv2:
32585 gcc_assert (!TARGET_32BIT);
32586 ret = 32;
32587 break;
32588
32589 case ABI_DARWIN:
32590 case ABI_V4:
32591 ret = (TARGET_32BIT) ? 40 : 48;
32592 break;
32593 }
32594
32595 return ret;
32596 }
32597
32598 /* Emit RTL insns to initialize the variable parts of a trampoline.
32599 FNADDR is an RTX for the address of the function's pure code.
32600 CXT is an RTX for the static chain value for the function. */
32601
32602 static void
32603 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32604 {
32605 int regsize = (TARGET_32BIT) ? 4 : 8;
32606 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32607 rtx ctx_reg = force_reg (Pmode, cxt);
32608 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32609
32610 switch (DEFAULT_ABI)
32611 {
32612 default:
32613 gcc_unreachable ();
32614
32615 /* Under AIX, just build the 3 word function descriptor */
32616 case ABI_AIX:
32617 {
32618 rtx fnmem, fn_reg, toc_reg;
32619
32620 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32621 error ("you cannot take the address of a nested function if you use "
32622 "the %qs option", "-mno-pointers-to-nested-functions");
32623
32624 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32625 fn_reg = gen_reg_rtx (Pmode);
32626 toc_reg = gen_reg_rtx (Pmode);
32627
32628 /* Macro to shorten the code expansions below. */
32629 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32630
32631 m_tramp = replace_equiv_address (m_tramp, addr);
32632
32633 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32634 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32635 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32636 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32637 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32638
32639 # undef MEM_PLUS
32640 }
32641 break;
32642
32643 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32644 case ABI_ELFv2:
32645 case ABI_DARWIN:
32646 case ABI_V4:
32647 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32648 LCT_NORMAL, VOIDmode,
32649 addr, Pmode,
32650 GEN_INT (rs6000_trampoline_size ()), SImode,
32651 fnaddr, Pmode,
32652 ctx_reg, Pmode);
32653 break;
32654 }
32655 }
32656
32657 \f
32658 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32659 identifier as an argument, so the front end shouldn't look it up. */
32660
32661 static bool
32662 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32663 {
32664 return is_attribute_p ("altivec", attr_id);
32665 }
32666
32667 /* Handle the "altivec" attribute. The attribute may have
32668 arguments as follows:
32669
32670 __attribute__((altivec(vector__)))
32671 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32672 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32673
32674 and may appear more than once (e.g., 'vector bool char') in a
32675 given declaration. */
32676
32677 static tree
32678 rs6000_handle_altivec_attribute (tree *node,
32679 tree name ATTRIBUTE_UNUSED,
32680 tree args,
32681 int flags ATTRIBUTE_UNUSED,
32682 bool *no_add_attrs)
32683 {
32684 tree type = *node, result = NULL_TREE;
32685 machine_mode mode;
32686 int unsigned_p;
32687 char altivec_type
32688 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32689 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32690 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32691 : '?');
32692
32693 while (POINTER_TYPE_P (type)
32694 || TREE_CODE (type) == FUNCTION_TYPE
32695 || TREE_CODE (type) == METHOD_TYPE
32696 || TREE_CODE (type) == ARRAY_TYPE)
32697 type = TREE_TYPE (type);
32698
32699 mode = TYPE_MODE (type);
32700
32701 /* Check for invalid AltiVec type qualifiers. */
32702 if (type == long_double_type_node)
32703 error ("use of %<long double%> in AltiVec types is invalid");
32704 else if (type == boolean_type_node)
32705 error ("use of boolean types in AltiVec types is invalid");
32706 else if (TREE_CODE (type) == COMPLEX_TYPE)
32707 error ("use of %<complex%> in AltiVec types is invalid");
32708 else if (DECIMAL_FLOAT_MODE_P (mode))
32709 error ("use of decimal floating point types in AltiVec types is invalid");
32710 else if (!TARGET_VSX)
32711 {
32712 if (type == long_unsigned_type_node || type == long_integer_type_node)
32713 {
32714 if (TARGET_64BIT)
32715 error ("use of %<long%> in AltiVec types is invalid for "
32716 "64-bit code without %qs", "-mvsx");
32717 else if (rs6000_warn_altivec_long)
32718 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32719 "use %<int%>");
32720 }
32721 else if (type == long_long_unsigned_type_node
32722 || type == long_long_integer_type_node)
32723 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32724 "-mvsx");
32725 else if (type == double_type_node)
32726 error ("use of %<double%> in AltiVec types is invalid without %qs",
32727 "-mvsx");
32728 }
32729
32730 switch (altivec_type)
32731 {
32732 case 'v':
32733 unsigned_p = TYPE_UNSIGNED (type);
32734 switch (mode)
32735 {
32736 case E_TImode:
32737 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32738 break;
32739 case E_DImode:
32740 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32741 break;
32742 case E_SImode:
32743 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32744 break;
32745 case E_HImode:
32746 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32747 break;
32748 case E_QImode:
32749 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32750 break;
32751 case E_SFmode: result = V4SF_type_node; break;
32752 case E_DFmode: result = V2DF_type_node; break;
32753 /* If the user says 'vector int bool', we may be handed the 'bool'
32754 attribute _before_ the 'vector' attribute, and so select the
32755 proper type in the 'b' case below. */
32756 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32757 case E_V2DImode: case E_V2DFmode:
32758 result = type;
32759 default: break;
32760 }
32761 break;
32762 case 'b':
32763 switch (mode)
32764 {
32765 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32766 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32767 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32768 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32769 default: break;
32770 }
32771 break;
32772 case 'p':
32773 switch (mode)
32774 {
32775 case E_V8HImode: result = pixel_V8HI_type_node;
32776 default: break;
32777 }
32778 default: break;
32779 }
32780
32781 /* Propagate qualifiers attached to the element type
32782 onto the vector type. */
32783 if (result && result != type && TYPE_QUALS (type))
32784 result = build_qualified_type (result, TYPE_QUALS (type));
32785
32786 *no_add_attrs = true; /* No need to hang on to the attribute. */
32787
32788 if (result)
32789 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32790
32791 return NULL_TREE;
32792 }
32793
32794 /* AltiVec defines five built-in scalar types that serve as vector
32795 elements; we must teach the compiler how to mangle them. The 128-bit
32796 floating point mangling is target-specific as well. */
32797
32798 static const char *
32799 rs6000_mangle_type (const_tree type)
32800 {
32801 type = TYPE_MAIN_VARIANT (type);
32802
32803 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32804 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32805 return NULL;
32806
32807 if (type == bool_char_type_node) return "U6__boolc";
32808 if (type == bool_short_type_node) return "U6__bools";
32809 if (type == pixel_type_node) return "u7__pixel";
32810 if (type == bool_int_type_node) return "U6__booli";
32811 if (type == bool_long_long_type_node) return "U6__boolx";
32812
32813 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32814 return "g";
32815 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32816 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32817
32818 /* For all other types, use the default mangling. */
32819 return NULL;
32820 }
32821
32822 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32823 struct attribute_spec.handler. */
32824
32825 static tree
32826 rs6000_handle_longcall_attribute (tree *node, tree name,
32827 tree args ATTRIBUTE_UNUSED,
32828 int flags ATTRIBUTE_UNUSED,
32829 bool *no_add_attrs)
32830 {
32831 if (TREE_CODE (*node) != FUNCTION_TYPE
32832 && TREE_CODE (*node) != FIELD_DECL
32833 && TREE_CODE (*node) != TYPE_DECL)
32834 {
32835 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32836 name);
32837 *no_add_attrs = true;
32838 }
32839
32840 return NULL_TREE;
32841 }
32842
32843 /* Set longcall attributes on all functions declared when
32844 rs6000_default_long_calls is true. */
32845 static void
32846 rs6000_set_default_type_attributes (tree type)
32847 {
32848 if (rs6000_default_long_calls
32849 && (TREE_CODE (type) == FUNCTION_TYPE
32850 || TREE_CODE (type) == METHOD_TYPE))
32851 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32852 NULL_TREE,
32853 TYPE_ATTRIBUTES (type));
32854
32855 #if TARGET_MACHO
32856 darwin_set_default_type_attributes (type);
32857 #endif
32858 }
32859
32860 /* Return a reference suitable for calling a function with the
32861 longcall attribute. */
32862
32863 static rtx
32864 rs6000_longcall_ref (rtx call_ref, rtx arg)
32865 {
32866 /* System V adds '.' to the internal name, so skip them. */
32867 const char *call_name = XSTR (call_ref, 0);
32868 if (*call_name == '.')
32869 {
32870 while (*call_name == '.')
32871 call_name++;
32872
32873 tree node = get_identifier (call_name);
32874 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32875 }
32876
32877 if (TARGET_PLTSEQ)
32878 {
32879 rtx base = const0_rtx;
32880 int regno;
32881 if (DEFAULT_ABI == ABI_ELFv2)
32882 {
32883 base = gen_rtx_REG (Pmode, TOC_REGISTER);
32884 regno = 12;
32885 }
32886 else
32887 {
32888 if (flag_pic)
32889 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32890 regno = 11;
32891 }
32892 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
32893 may be used by a function global entry point. For SysV4, r11
32894 is used by __glink_PLTresolve lazy resolver entry. */
32895 rtx reg = gen_rtx_REG (Pmode, regno);
32896 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
32897 UNSPEC_PLT16_HA);
32898 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
32899 UNSPEC_PLT16_LO);
32900 emit_insn (gen_rtx_SET (reg, hi));
32901 emit_insn (gen_rtx_SET (reg, lo));
32902 return reg;
32903 }
32904
32905 return force_reg (Pmode, call_ref);
32906 }
32907 \f
32908 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32909 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32910 #endif
32911
32912 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32913 struct attribute_spec.handler. */
32914 static tree
32915 rs6000_handle_struct_attribute (tree *node, tree name,
32916 tree args ATTRIBUTE_UNUSED,
32917 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32918 {
32919 tree *type = NULL;
32920 if (DECL_P (*node))
32921 {
32922 if (TREE_CODE (*node) == TYPE_DECL)
32923 type = &TREE_TYPE (*node);
32924 }
32925 else
32926 type = node;
32927
32928 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32929 || TREE_CODE (*type) == UNION_TYPE)))
32930 {
32931 warning (OPT_Wattributes, "%qE attribute ignored", name);
32932 *no_add_attrs = true;
32933 }
32934
32935 else if ((is_attribute_p ("ms_struct", name)
32936 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32937 || ((is_attribute_p ("gcc_struct", name)
32938 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32939 {
32940 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32941 name);
32942 *no_add_attrs = true;
32943 }
32944
32945 return NULL_TREE;
32946 }
32947
32948 static bool
32949 rs6000_ms_bitfield_layout_p (const_tree record_type)
32950 {
32951 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32952 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32953 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32954 }
32955 \f
32956 #ifdef USING_ELFOS_H
32957
32958 /* A get_unnamed_section callback, used for switching to toc_section. */
32959
32960 static void
32961 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32962 {
32963 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32964 && TARGET_MINIMAL_TOC)
32965 {
32966 if (!toc_initialized)
32967 {
32968 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32969 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32970 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32971 fprintf (asm_out_file, "\t.tc ");
32972 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32973 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32974 fprintf (asm_out_file, "\n");
32975
32976 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32977 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32978 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32979 fprintf (asm_out_file, " = .+32768\n");
32980 toc_initialized = 1;
32981 }
32982 else
32983 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32984 }
32985 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32986 {
32987 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32988 if (!toc_initialized)
32989 {
32990 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32991 toc_initialized = 1;
32992 }
32993 }
32994 else
32995 {
32996 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32997 if (!toc_initialized)
32998 {
32999 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
33000 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
33001 fprintf (asm_out_file, " = .+32768\n");
33002 toc_initialized = 1;
33003 }
33004 }
33005 }
33006
33007 /* Implement TARGET_ASM_INIT_SECTIONS. */
33008
33009 static void
33010 rs6000_elf_asm_init_sections (void)
33011 {
33012 toc_section
33013 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
33014
33015 sdata2_section
33016 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
33017 SDATA2_SECTION_ASM_OP);
33018 }
33019
33020 /* Implement TARGET_SELECT_RTX_SECTION. */
33021
33022 static section *
33023 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
33024 unsigned HOST_WIDE_INT align)
33025 {
33026 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33027 return toc_section;
33028 else
33029 return default_elf_select_rtx_section (mode, x, align);
33030 }
33031 \f
33032 /* For a SYMBOL_REF, set generic flags and then perform some
33033 target-specific processing.
33034
33035 When the AIX ABI is requested on a non-AIX system, replace the
33036 function name with the real name (with a leading .) rather than the
33037 function descriptor name. This saves a lot of overriding code to
33038 read the prefixes. */
33039
33040 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
33041 static void
33042 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
33043 {
33044 default_encode_section_info (decl, rtl, first);
33045
33046 if (first
33047 && TREE_CODE (decl) == FUNCTION_DECL
33048 && !TARGET_AIX
33049 && DEFAULT_ABI == ABI_AIX)
33050 {
33051 rtx sym_ref = XEXP (rtl, 0);
33052 size_t len = strlen (XSTR (sym_ref, 0));
33053 char *str = XALLOCAVEC (char, len + 2);
33054 str[0] = '.';
33055 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
33056 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
33057 }
33058 }
33059
33060 static inline bool
33061 compare_section_name (const char *section, const char *templ)
33062 {
33063 int len;
33064
33065 len = strlen (templ);
33066 return (strncmp (section, templ, len) == 0
33067 && (section[len] == 0 || section[len] == '.'));
33068 }
33069
33070 bool
33071 rs6000_elf_in_small_data_p (const_tree decl)
33072 {
33073 if (rs6000_sdata == SDATA_NONE)
33074 return false;
33075
33076 /* We want to merge strings, so we never consider them small data. */
33077 if (TREE_CODE (decl) == STRING_CST)
33078 return false;
33079
33080 /* Functions are never in the small data area. */
33081 if (TREE_CODE (decl) == FUNCTION_DECL)
33082 return false;
33083
33084 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
33085 {
33086 const char *section = DECL_SECTION_NAME (decl);
33087 if (compare_section_name (section, ".sdata")
33088 || compare_section_name (section, ".sdata2")
33089 || compare_section_name (section, ".gnu.linkonce.s")
33090 || compare_section_name (section, ".sbss")
33091 || compare_section_name (section, ".sbss2")
33092 || compare_section_name (section, ".gnu.linkonce.sb")
33093 || strcmp (section, ".PPC.EMB.sdata0") == 0
33094 || strcmp (section, ".PPC.EMB.sbss0") == 0)
33095 return true;
33096 }
33097 else
33098 {
33099 /* If we are told not to put readonly data in sdata, then don't. */
33100 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
33101 && !rs6000_readonly_in_sdata)
33102 return false;
33103
33104 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
33105
33106 if (size > 0
33107 && size <= g_switch_value
33108 /* If it's not public, and we're not going to reference it there,
33109 there's no need to put it in the small data section. */
33110 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
33111 return true;
33112 }
33113
33114 return false;
33115 }
33116
33117 #endif /* USING_ELFOS_H */
33118 \f
33119 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
33120
33121 static bool
33122 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
33123 {
33124 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
33125 }
33126
33127 /* Do not place thread-local symbols refs in the object blocks. */
33128
33129 static bool
33130 rs6000_use_blocks_for_decl_p (const_tree decl)
33131 {
33132 return !DECL_THREAD_LOCAL_P (decl);
33133 }
33134 \f
33135 /* Return a REG that occurs in ADDR with coefficient 1.
33136 ADDR can be effectively incremented by incrementing REG.
33137
33138 r0 is special and we must not select it as an address
33139 register by this routine since our caller will try to
33140 increment the returned register via an "la" instruction. */
33141
33142 rtx
33143 find_addr_reg (rtx addr)
33144 {
33145 while (GET_CODE (addr) == PLUS)
33146 {
33147 if (REG_P (XEXP (addr, 0))
33148 && REGNO (XEXP (addr, 0)) != 0)
33149 addr = XEXP (addr, 0);
33150 else if (REG_P (XEXP (addr, 1))
33151 && REGNO (XEXP (addr, 1)) != 0)
33152 addr = XEXP (addr, 1);
33153 else if (CONSTANT_P (XEXP (addr, 0)))
33154 addr = XEXP (addr, 1);
33155 else if (CONSTANT_P (XEXP (addr, 1)))
33156 addr = XEXP (addr, 0);
33157 else
33158 gcc_unreachable ();
33159 }
33160 gcc_assert (REG_P (addr) && REGNO (addr) != 0);
33161 return addr;
33162 }
33163
33164 void
33165 rs6000_fatal_bad_address (rtx op)
33166 {
33167 fatal_insn ("bad address", op);
33168 }
33169
33170 #if TARGET_MACHO
33171
33172 typedef struct branch_island_d {
33173 tree function_name;
33174 tree label_name;
33175 int line_number;
33176 } branch_island;
33177
33178
33179 static vec<branch_island, va_gc> *branch_islands;
33180
33181 /* Remember to generate a branch island for far calls to the given
33182 function. */
33183
33184 static void
33185 add_compiler_branch_island (tree label_name, tree function_name,
33186 int line_number)
33187 {
33188 branch_island bi = {function_name, label_name, line_number};
33189 vec_safe_push (branch_islands, bi);
33190 }
33191
33192 /* Generate far-jump branch islands for everything recorded in
33193 branch_islands. Invoked immediately after the last instruction of
33194 the epilogue has been emitted; the branch islands must be appended
33195 to, and contiguous with, the function body. Mach-O stubs are
33196 generated in machopic_output_stub(). */
33197
33198 static void
33199 macho_branch_islands (void)
33200 {
33201 char tmp_buf[512];
33202
33203 while (!vec_safe_is_empty (branch_islands))
33204 {
33205 branch_island *bi = &branch_islands->last ();
33206 const char *label = IDENTIFIER_POINTER (bi->label_name);
33207 const char *name = IDENTIFIER_POINTER (bi->function_name);
33208 char name_buf[512];
33209 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
33210 if (name[0] == '*' || name[0] == '&')
33211 strcpy (name_buf, name+1);
33212 else
33213 {
33214 name_buf[0] = '_';
33215 strcpy (name_buf+1, name);
33216 }
33217 strcpy (tmp_buf, "\n");
33218 strcat (tmp_buf, label);
33219 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33220 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33221 dbxout_stabd (N_SLINE, bi->line_number);
33222 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33223 if (flag_pic)
33224 {
33225 if (TARGET_LINK_STACK)
33226 {
33227 char name[32];
33228 get_ppc476_thunk_name (name);
33229 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
33230 strcat (tmp_buf, name);
33231 strcat (tmp_buf, "\n");
33232 strcat (tmp_buf, label);
33233 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33234 }
33235 else
33236 {
33237 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
33238 strcat (tmp_buf, label);
33239 strcat (tmp_buf, "_pic\n");
33240 strcat (tmp_buf, label);
33241 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
33242 }
33243
33244 strcat (tmp_buf, "\taddis r11,r11,ha16(");
33245 strcat (tmp_buf, name_buf);
33246 strcat (tmp_buf, " - ");
33247 strcat (tmp_buf, label);
33248 strcat (tmp_buf, "_pic)\n");
33249
33250 strcat (tmp_buf, "\tmtlr r0\n");
33251
33252 strcat (tmp_buf, "\taddi r12,r11,lo16(");
33253 strcat (tmp_buf, name_buf);
33254 strcat (tmp_buf, " - ");
33255 strcat (tmp_buf, label);
33256 strcat (tmp_buf, "_pic)\n");
33257
33258 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
33259 }
33260 else
33261 {
33262 strcat (tmp_buf, ":\nlis r12,hi16(");
33263 strcat (tmp_buf, name_buf);
33264 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
33265 strcat (tmp_buf, name_buf);
33266 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
33267 }
33268 output_asm_insn (tmp_buf, 0);
33269 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
33270 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33271 dbxout_stabd (N_SLINE, bi->line_number);
33272 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
33273 branch_islands->pop ();
33274 }
33275 }
33276
33277 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33278 already there or not. */
33279
33280 static int
33281 no_previous_def (tree function_name)
33282 {
33283 branch_island *bi;
33284 unsigned ix;
33285
33286 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33287 if (function_name == bi->function_name)
33288 return 0;
33289 return 1;
33290 }
33291
33292 /* GET_PREV_LABEL gets the label name from the previous definition of
33293 the function. */
33294
33295 static tree
33296 get_prev_label (tree function_name)
33297 {
33298 branch_island *bi;
33299 unsigned ix;
33300
33301 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33302 if (function_name == bi->function_name)
33303 return bi->label_name;
33304 return NULL_TREE;
33305 }
33306
33307 /* Generate PIC and indirect symbol stubs. */
33308
33309 void
33310 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33311 {
33312 unsigned int length;
33313 char *symbol_name, *lazy_ptr_name;
33314 char *local_label_0;
33315 static int label = 0;
33316
33317 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33318 symb = (*targetm.strip_name_encoding) (symb);
33319
33320
33321 length = strlen (symb);
33322 symbol_name = XALLOCAVEC (char, length + 32);
33323 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33324
33325 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33326 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33327
33328 if (flag_pic == 2)
33329 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33330 else
33331 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33332
33333 if (flag_pic == 2)
33334 {
33335 fprintf (file, "\t.align 5\n");
33336
33337 fprintf (file, "%s:\n", stub);
33338 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33339
33340 label++;
33341 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33342 sprintf (local_label_0, "\"L%011d$spb\"", label);
33343
33344 fprintf (file, "\tmflr r0\n");
33345 if (TARGET_LINK_STACK)
33346 {
33347 char name[32];
33348 get_ppc476_thunk_name (name);
33349 fprintf (file, "\tbl %s\n", name);
33350 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33351 }
33352 else
33353 {
33354 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33355 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33356 }
33357 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33358 lazy_ptr_name, local_label_0);
33359 fprintf (file, "\tmtlr r0\n");
33360 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33361 (TARGET_64BIT ? "ldu" : "lwzu"),
33362 lazy_ptr_name, local_label_0);
33363 fprintf (file, "\tmtctr r12\n");
33364 fprintf (file, "\tbctr\n");
33365 }
33366 else
33367 {
33368 fprintf (file, "\t.align 4\n");
33369
33370 fprintf (file, "%s:\n", stub);
33371 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33372
33373 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33374 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33375 (TARGET_64BIT ? "ldu" : "lwzu"),
33376 lazy_ptr_name);
33377 fprintf (file, "\tmtctr r12\n");
33378 fprintf (file, "\tbctr\n");
33379 }
33380
33381 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33382 fprintf (file, "%s:\n", lazy_ptr_name);
33383 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33384 fprintf (file, "%sdyld_stub_binding_helper\n",
33385 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33386 }
33387
33388 /* Legitimize PIC addresses. If the address is already
33389 position-independent, we return ORIG. Newly generated
33390 position-independent addresses go into a reg. This is REG if non
33391 zero, otherwise we allocate register(s) as necessary. */
33392
33393 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33394
33395 rtx
33396 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33397 rtx reg)
33398 {
33399 rtx base, offset;
33400
33401 if (reg == NULL && !reload_completed)
33402 reg = gen_reg_rtx (Pmode);
33403
33404 if (GET_CODE (orig) == CONST)
33405 {
33406 rtx reg_temp;
33407
33408 if (GET_CODE (XEXP (orig, 0)) == PLUS
33409 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33410 return orig;
33411
33412 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33413
33414 /* Use a different reg for the intermediate value, as
33415 it will be marked UNCHANGING. */
33416 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33417 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33418 Pmode, reg_temp);
33419 offset =
33420 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33421 Pmode, reg);
33422
33423 if (CONST_INT_P (offset))
33424 {
33425 if (SMALL_INT (offset))
33426 return plus_constant (Pmode, base, INTVAL (offset));
33427 else if (!reload_completed)
33428 offset = force_reg (Pmode, offset);
33429 else
33430 {
33431 rtx mem = force_const_mem (Pmode, orig);
33432 return machopic_legitimize_pic_address (mem, Pmode, reg);
33433 }
33434 }
33435 return gen_rtx_PLUS (Pmode, base, offset);
33436 }
33437
33438 /* Fall back on generic machopic code. */
33439 return machopic_legitimize_pic_address (orig, mode, reg);
33440 }
33441
33442 /* Output a .machine directive for the Darwin assembler, and call
33443 the generic start_file routine. */
33444
33445 static void
33446 rs6000_darwin_file_start (void)
33447 {
33448 static const struct
33449 {
33450 const char *arg;
33451 const char *name;
33452 HOST_WIDE_INT if_set;
33453 } mapping[] = {
33454 { "ppc64", "ppc64", MASK_64BIT },
33455 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33456 { "power4", "ppc970", 0 },
33457 { "G5", "ppc970", 0 },
33458 { "7450", "ppc7450", 0 },
33459 { "7400", "ppc7400", MASK_ALTIVEC },
33460 { "G4", "ppc7400", 0 },
33461 { "750", "ppc750", 0 },
33462 { "740", "ppc750", 0 },
33463 { "G3", "ppc750", 0 },
33464 { "604e", "ppc604e", 0 },
33465 { "604", "ppc604", 0 },
33466 { "603e", "ppc603", 0 },
33467 { "603", "ppc603", 0 },
33468 { "601", "ppc601", 0 },
33469 { NULL, "ppc", 0 } };
33470 const char *cpu_id = "";
33471 size_t i;
33472
33473 rs6000_file_start ();
33474 darwin_file_start ();
33475
33476 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33477
33478 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33479 cpu_id = rs6000_default_cpu;
33480
33481 if (global_options_set.x_rs6000_cpu_index)
33482 cpu_id = processor_target_table[rs6000_cpu_index].name;
33483
33484 /* Look through the mapping array. Pick the first name that either
33485 matches the argument, has a bit set in IF_SET that is also set
33486 in the target flags, or has a NULL name. */
33487
33488 i = 0;
33489 while (mapping[i].arg != NULL
33490 && strcmp (mapping[i].arg, cpu_id) != 0
33491 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33492 i++;
33493
33494 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33495 }
33496
33497 #endif /* TARGET_MACHO */
33498
33499 #if TARGET_ELF
33500 static int
33501 rs6000_elf_reloc_rw_mask (void)
33502 {
33503 if (flag_pic)
33504 return 3;
33505 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33506 return 2;
33507 else
33508 return 0;
33509 }
33510
33511 /* Record an element in the table of global constructors. SYMBOL is
33512 a SYMBOL_REF of the function to be called; PRIORITY is a number
33513 between 0 and MAX_INIT_PRIORITY.
33514
33515 This differs from default_named_section_asm_out_constructor in
33516 that we have special handling for -mrelocatable. */
33517
33518 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33519 static void
33520 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33521 {
33522 const char *section = ".ctors";
33523 char buf[18];
33524
33525 if (priority != DEFAULT_INIT_PRIORITY)
33526 {
33527 sprintf (buf, ".ctors.%.5u",
33528 /* Invert the numbering so the linker puts us in the proper
33529 order; constructors are run from right to left, and the
33530 linker sorts in increasing order. */
33531 MAX_INIT_PRIORITY - priority);
33532 section = buf;
33533 }
33534
33535 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33536 assemble_align (POINTER_SIZE);
33537
33538 if (DEFAULT_ABI == ABI_V4
33539 && (TARGET_RELOCATABLE || flag_pic > 1))
33540 {
33541 fputs ("\t.long (", asm_out_file);
33542 output_addr_const (asm_out_file, symbol);
33543 fputs (")@fixup\n", asm_out_file);
33544 }
33545 else
33546 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33547 }
33548
33549 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33550 static void
33551 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33552 {
33553 const char *section = ".dtors";
33554 char buf[18];
33555
33556 if (priority != DEFAULT_INIT_PRIORITY)
33557 {
33558 sprintf (buf, ".dtors.%.5u",
33559 /* Invert the numbering so the linker puts us in the proper
33560 order; constructors are run from right to left, and the
33561 linker sorts in increasing order. */
33562 MAX_INIT_PRIORITY - priority);
33563 section = buf;
33564 }
33565
33566 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33567 assemble_align (POINTER_SIZE);
33568
33569 if (DEFAULT_ABI == ABI_V4
33570 && (TARGET_RELOCATABLE || flag_pic > 1))
33571 {
33572 fputs ("\t.long (", asm_out_file);
33573 output_addr_const (asm_out_file, symbol);
33574 fputs (")@fixup\n", asm_out_file);
33575 }
33576 else
33577 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33578 }
33579
33580 void
33581 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33582 {
33583 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33584 {
33585 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33586 ASM_OUTPUT_LABEL (file, name);
33587 fputs (DOUBLE_INT_ASM_OP, file);
33588 rs6000_output_function_entry (file, name);
33589 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33590 if (DOT_SYMBOLS)
33591 {
33592 fputs ("\t.size\t", file);
33593 assemble_name (file, name);
33594 fputs (",24\n\t.type\t.", file);
33595 assemble_name (file, name);
33596 fputs (",@function\n", file);
33597 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33598 {
33599 fputs ("\t.globl\t.", file);
33600 assemble_name (file, name);
33601 putc ('\n', file);
33602 }
33603 }
33604 else
33605 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33606 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33607 rs6000_output_function_entry (file, name);
33608 fputs (":\n", file);
33609 return;
33610 }
33611
33612 int uses_toc;
33613 if (DEFAULT_ABI == ABI_V4
33614 && (TARGET_RELOCATABLE || flag_pic > 1)
33615 && !TARGET_SECURE_PLT
33616 && (!constant_pool_empty_p () || crtl->profile)
33617 && (uses_toc = uses_TOC ()))
33618 {
33619 char buf[256];
33620
33621 if (uses_toc == 2)
33622 switch_to_other_text_partition ();
33623 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33624
33625 fprintf (file, "\t.long ");
33626 assemble_name (file, toc_label_name);
33627 need_toc_init = 1;
33628 putc ('-', file);
33629 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33630 assemble_name (file, buf);
33631 putc ('\n', file);
33632 if (uses_toc == 2)
33633 switch_to_other_text_partition ();
33634 }
33635
33636 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33637 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33638
33639 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33640 {
33641 char buf[256];
33642
33643 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33644
33645 fprintf (file, "\t.quad .TOC.-");
33646 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33647 assemble_name (file, buf);
33648 putc ('\n', file);
33649 }
33650
33651 if (DEFAULT_ABI == ABI_AIX)
33652 {
33653 const char *desc_name, *orig_name;
33654
33655 orig_name = (*targetm.strip_name_encoding) (name);
33656 desc_name = orig_name;
33657 while (*desc_name == '.')
33658 desc_name++;
33659
33660 if (TREE_PUBLIC (decl))
33661 fprintf (file, "\t.globl %s\n", desc_name);
33662
33663 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33664 fprintf (file, "%s:\n", desc_name);
33665 fprintf (file, "\t.long %s\n", orig_name);
33666 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33667 fputs ("\t.long 0\n", file);
33668 fprintf (file, "\t.previous\n");
33669 }
33670 ASM_OUTPUT_LABEL (file, name);
33671 }
33672
33673 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33674 static void
33675 rs6000_elf_file_end (void)
33676 {
33677 #ifdef HAVE_AS_GNU_ATTRIBUTE
33678 /* ??? The value emitted depends on options active at file end.
33679 Assume anyone using #pragma or attributes that might change
33680 options knows what they are doing. */
33681 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33682 && rs6000_passes_float)
33683 {
33684 int fp;
33685
33686 if (TARGET_HARD_FLOAT)
33687 fp = 1;
33688 else
33689 fp = 2;
33690 if (rs6000_passes_long_double)
33691 {
33692 if (!TARGET_LONG_DOUBLE_128)
33693 fp |= 2 * 4;
33694 else if (TARGET_IEEEQUAD)
33695 fp |= 3 * 4;
33696 else
33697 fp |= 1 * 4;
33698 }
33699 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33700 }
33701 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33702 {
33703 if (rs6000_passes_vector)
33704 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33705 (TARGET_ALTIVEC_ABI ? 2 : 1));
33706 if (rs6000_returns_struct)
33707 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33708 aix_struct_return ? 2 : 1);
33709 }
33710 #endif
33711 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33712 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33713 file_end_indicate_exec_stack ();
33714 #endif
33715
33716 if (flag_split_stack)
33717 file_end_indicate_split_stack ();
33718
33719 if (cpu_builtin_p)
33720 {
33721 /* We have expanded a CPU builtin, so we need to emit a reference to
33722 the special symbol that LIBC uses to declare it supports the
33723 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33724 switch_to_section (data_section);
33725 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33726 fprintf (asm_out_file, "\t%s %s\n",
33727 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33728 }
33729 }
33730 #endif
33731
33732 #if TARGET_XCOFF
33733
33734 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33735 #define HAVE_XCOFF_DWARF_EXTRAS 0
33736 #endif
33737
33738 static enum unwind_info_type
33739 rs6000_xcoff_debug_unwind_info (void)
33740 {
33741 return UI_NONE;
33742 }
33743
33744 static void
33745 rs6000_xcoff_asm_output_anchor (rtx symbol)
33746 {
33747 char buffer[100];
33748
33749 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33750 SYMBOL_REF_BLOCK_OFFSET (symbol));
33751 fprintf (asm_out_file, "%s", SET_ASM_OP);
33752 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33753 fprintf (asm_out_file, ",");
33754 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33755 fprintf (asm_out_file, "\n");
33756 }
33757
33758 static void
33759 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33760 {
33761 fputs (GLOBAL_ASM_OP, stream);
33762 RS6000_OUTPUT_BASENAME (stream, name);
33763 putc ('\n', stream);
33764 }
33765
33766 /* A get_unnamed_decl callback, used for read-only sections. PTR
33767 points to the section string variable. */
33768
33769 static void
33770 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33771 {
33772 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33773 *(const char *const *) directive,
33774 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33775 }
33776
33777 /* Likewise for read-write sections. */
33778
33779 static void
33780 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33781 {
33782 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33783 *(const char *const *) directive,
33784 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33785 }
33786
33787 static void
33788 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33789 {
33790 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33791 *(const char *const *) directive,
33792 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33793 }
33794
33795 /* A get_unnamed_section callback, used for switching to toc_section. */
33796
33797 static void
33798 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33799 {
33800 if (TARGET_MINIMAL_TOC)
33801 {
33802 /* toc_section is always selected at least once from
33803 rs6000_xcoff_file_start, so this is guaranteed to
33804 always be defined once and only once in each file. */
33805 if (!toc_initialized)
33806 {
33807 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33808 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33809 toc_initialized = 1;
33810 }
33811 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33812 (TARGET_32BIT ? "" : ",3"));
33813 }
33814 else
33815 fputs ("\t.toc\n", asm_out_file);
33816 }
33817
33818 /* Implement TARGET_ASM_INIT_SECTIONS. */
33819
33820 static void
33821 rs6000_xcoff_asm_init_sections (void)
33822 {
33823 read_only_data_section
33824 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33825 &xcoff_read_only_section_name);
33826
33827 private_data_section
33828 = get_unnamed_section (SECTION_WRITE,
33829 rs6000_xcoff_output_readwrite_section_asm_op,
33830 &xcoff_private_data_section_name);
33831
33832 tls_data_section
33833 = get_unnamed_section (SECTION_TLS,
33834 rs6000_xcoff_output_tls_section_asm_op,
33835 &xcoff_tls_data_section_name);
33836
33837 tls_private_data_section
33838 = get_unnamed_section (SECTION_TLS,
33839 rs6000_xcoff_output_tls_section_asm_op,
33840 &xcoff_private_data_section_name);
33841
33842 read_only_private_data_section
33843 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33844 &xcoff_private_data_section_name);
33845
33846 toc_section
33847 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33848
33849 readonly_data_section = read_only_data_section;
33850 }
33851
33852 static int
33853 rs6000_xcoff_reloc_rw_mask (void)
33854 {
33855 return 3;
33856 }
33857
33858 static void
33859 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33860 tree decl ATTRIBUTE_UNUSED)
33861 {
33862 int smclass;
33863 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33864
33865 if (flags & SECTION_EXCLUDE)
33866 smclass = 4;
33867 else if (flags & SECTION_DEBUG)
33868 {
33869 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33870 return;
33871 }
33872 else if (flags & SECTION_CODE)
33873 smclass = 0;
33874 else if (flags & SECTION_TLS)
33875 smclass = 3;
33876 else if (flags & SECTION_WRITE)
33877 smclass = 2;
33878 else
33879 smclass = 1;
33880
33881 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33882 (flags & SECTION_CODE) ? "." : "",
33883 name, suffix[smclass], flags & SECTION_ENTSIZE);
33884 }
33885
33886 #define IN_NAMED_SECTION(DECL) \
33887 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33888 && DECL_SECTION_NAME (DECL) != NULL)
33889
33890 static section *
33891 rs6000_xcoff_select_section (tree decl, int reloc,
33892 unsigned HOST_WIDE_INT align)
33893 {
33894 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33895 named section. */
33896 if (align > BIGGEST_ALIGNMENT)
33897 {
33898 resolve_unique_section (decl, reloc, true);
33899 if (IN_NAMED_SECTION (decl))
33900 return get_named_section (decl, NULL, reloc);
33901 }
33902
33903 if (decl_readonly_section (decl, reloc))
33904 {
33905 if (TREE_PUBLIC (decl))
33906 return read_only_data_section;
33907 else
33908 return read_only_private_data_section;
33909 }
33910 else
33911 {
33912 #if HAVE_AS_TLS
33913 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33914 {
33915 if (TREE_PUBLIC (decl))
33916 return tls_data_section;
33917 else if (bss_initializer_p (decl))
33918 {
33919 /* Convert to COMMON to emit in BSS. */
33920 DECL_COMMON (decl) = 1;
33921 return tls_comm_section;
33922 }
33923 else
33924 return tls_private_data_section;
33925 }
33926 else
33927 #endif
33928 if (TREE_PUBLIC (decl))
33929 return data_section;
33930 else
33931 return private_data_section;
33932 }
33933 }
33934
33935 static void
33936 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33937 {
33938 const char *name;
33939
33940 /* Use select_section for private data and uninitialized data with
33941 alignment <= BIGGEST_ALIGNMENT. */
33942 if (!TREE_PUBLIC (decl)
33943 || DECL_COMMON (decl)
33944 || (DECL_INITIAL (decl) == NULL_TREE
33945 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33946 || DECL_INITIAL (decl) == error_mark_node
33947 || (flag_zero_initialized_in_bss
33948 && initializer_zerop (DECL_INITIAL (decl))))
33949 return;
33950
33951 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33952 name = (*targetm.strip_name_encoding) (name);
33953 set_decl_section_name (decl, name);
33954 }
33955
33956 /* Select section for constant in constant pool.
33957
33958 On RS/6000, all constants are in the private read-only data area.
33959 However, if this is being placed in the TOC it must be output as a
33960 toc entry. */
33961
33962 static section *
33963 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33964 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33965 {
33966 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33967 return toc_section;
33968 else
33969 return read_only_private_data_section;
33970 }
33971
33972 /* Remove any trailing [DS] or the like from the symbol name. */
33973
33974 static const char *
33975 rs6000_xcoff_strip_name_encoding (const char *name)
33976 {
33977 size_t len;
33978 if (*name == '*')
33979 name++;
33980 len = strlen (name);
33981 if (name[len - 1] == ']')
33982 return ggc_alloc_string (name, len - 4);
33983 else
33984 return name;
33985 }
33986
33987 /* Section attributes. AIX is always PIC. */
33988
33989 static unsigned int
33990 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33991 {
33992 unsigned int align;
33993 unsigned int flags = default_section_type_flags (decl, name, reloc);
33994
33995 /* Align to at least UNIT size. */
33996 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33997 align = MIN_UNITS_PER_WORD;
33998 else
33999 /* Increase alignment of large objects if not already stricter. */
34000 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
34001 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
34002 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
34003
34004 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
34005 }
34006
34007 /* Output at beginning of assembler file.
34008
34009 Initialize the section names for the RS/6000 at this point.
34010
34011 Specify filename, including full path, to assembler.
34012
34013 We want to go into the TOC section so at least one .toc will be emitted.
34014 Also, in order to output proper .bs/.es pairs, we need at least one static
34015 [RW] section emitted.
34016
34017 Finally, declare mcount when profiling to make the assembler happy. */
34018
34019 static void
34020 rs6000_xcoff_file_start (void)
34021 {
34022 rs6000_gen_section_name (&xcoff_bss_section_name,
34023 main_input_filename, ".bss_");
34024 rs6000_gen_section_name (&xcoff_private_data_section_name,
34025 main_input_filename, ".rw_");
34026 rs6000_gen_section_name (&xcoff_read_only_section_name,
34027 main_input_filename, ".ro_");
34028 rs6000_gen_section_name (&xcoff_tls_data_section_name,
34029 main_input_filename, ".tls_");
34030 rs6000_gen_section_name (&xcoff_tbss_section_name,
34031 main_input_filename, ".tbss_[UL]");
34032
34033 fputs ("\t.file\t", asm_out_file);
34034 output_quoted_string (asm_out_file, main_input_filename);
34035 fputc ('\n', asm_out_file);
34036 if (write_symbols != NO_DEBUG)
34037 switch_to_section (private_data_section);
34038 switch_to_section (toc_section);
34039 switch_to_section (text_section);
34040 if (profile_flag)
34041 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
34042 rs6000_file_start ();
34043 }
34044
34045 /* Output at end of assembler file.
34046 On the RS/6000, referencing data should automatically pull in text. */
34047
34048 static void
34049 rs6000_xcoff_file_end (void)
34050 {
34051 switch_to_section (text_section);
34052 fputs ("_section_.text:\n", asm_out_file);
34053 switch_to_section (data_section);
34054 fputs (TARGET_32BIT
34055 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
34056 asm_out_file);
34057 }
34058
34059 struct declare_alias_data
34060 {
34061 FILE *file;
34062 bool function_descriptor;
34063 };
34064
34065 /* Declare alias N. A helper function for for_node_and_aliases. */
34066
34067 static bool
34068 rs6000_declare_alias (struct symtab_node *n, void *d)
34069 {
34070 struct declare_alias_data *data = (struct declare_alias_data *)d;
34071 /* Main symbol is output specially, because varasm machinery does part of
34072 the job for us - we do not need to declare .globl/lglobs and such. */
34073 if (!n->alias || n->weakref)
34074 return false;
34075
34076 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
34077 return false;
34078
34079 /* Prevent assemble_alias from trying to use .set pseudo operation
34080 that does not behave as expected by the middle-end. */
34081 TREE_ASM_WRITTEN (n->decl) = true;
34082
34083 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
34084 char *buffer = (char *) alloca (strlen (name) + 2);
34085 char *p;
34086 int dollar_inside = 0;
34087
34088 strcpy (buffer, name);
34089 p = strchr (buffer, '$');
34090 while (p) {
34091 *p = '_';
34092 dollar_inside++;
34093 p = strchr (p + 1, '$');
34094 }
34095 if (TREE_PUBLIC (n->decl))
34096 {
34097 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
34098 {
34099 if (dollar_inside) {
34100 if (data->function_descriptor)
34101 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34102 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34103 }
34104 if (data->function_descriptor)
34105 {
34106 fputs ("\t.globl .", data->file);
34107 RS6000_OUTPUT_BASENAME (data->file, buffer);
34108 putc ('\n', data->file);
34109 }
34110 fputs ("\t.globl ", data->file);
34111 RS6000_OUTPUT_BASENAME (data->file, buffer);
34112 putc ('\n', data->file);
34113 }
34114 #ifdef ASM_WEAKEN_DECL
34115 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
34116 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
34117 #endif
34118 }
34119 else
34120 {
34121 if (dollar_inside)
34122 {
34123 if (data->function_descriptor)
34124 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
34125 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
34126 }
34127 if (data->function_descriptor)
34128 {
34129 fputs ("\t.lglobl .", data->file);
34130 RS6000_OUTPUT_BASENAME (data->file, buffer);
34131 putc ('\n', data->file);
34132 }
34133 fputs ("\t.lglobl ", data->file);
34134 RS6000_OUTPUT_BASENAME (data->file, buffer);
34135 putc ('\n', data->file);
34136 }
34137 if (data->function_descriptor)
34138 fputs (".", data->file);
34139 RS6000_OUTPUT_BASENAME (data->file, buffer);
34140 fputs (":\n", data->file);
34141 return false;
34142 }
34143
34144
34145 #ifdef HAVE_GAS_HIDDEN
34146 /* Helper function to calculate visibility of a DECL
34147 and return the value as a const string. */
34148
34149 static const char *
34150 rs6000_xcoff_visibility (tree decl)
34151 {
34152 static const char * const visibility_types[] = {
34153 "", ",protected", ",hidden", ",internal"
34154 };
34155
34156 enum symbol_visibility vis = DECL_VISIBILITY (decl);
34157 return visibility_types[vis];
34158 }
34159 #endif
34160
34161
34162 /* This macro produces the initial definition of a function name.
34163 On the RS/6000, we need to place an extra '.' in the function name and
34164 output the function descriptor.
34165 Dollar signs are converted to underscores.
34166
34167 The csect for the function will have already been created when
34168 text_section was selected. We do have to go back to that csect, however.
34169
34170 The third and fourth parameters to the .function pseudo-op (16 and 044)
34171 are placeholders which no longer have any use.
34172
34173 Because AIX assembler's .set command has unexpected semantics, we output
34174 all aliases as alternative labels in front of the definition. */
34175
34176 void
34177 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
34178 {
34179 char *buffer = (char *) alloca (strlen (name) + 1);
34180 char *p;
34181 int dollar_inside = 0;
34182 struct declare_alias_data data = {file, false};
34183
34184 strcpy (buffer, name);
34185 p = strchr (buffer, '$');
34186 while (p) {
34187 *p = '_';
34188 dollar_inside++;
34189 p = strchr (p + 1, '$');
34190 }
34191 if (TREE_PUBLIC (decl))
34192 {
34193 if (!RS6000_WEAK || !DECL_WEAK (decl))
34194 {
34195 if (dollar_inside) {
34196 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34197 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34198 }
34199 fputs ("\t.globl .", file);
34200 RS6000_OUTPUT_BASENAME (file, buffer);
34201 #ifdef HAVE_GAS_HIDDEN
34202 fputs (rs6000_xcoff_visibility (decl), file);
34203 #endif
34204 putc ('\n', file);
34205 }
34206 }
34207 else
34208 {
34209 if (dollar_inside) {
34210 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
34211 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
34212 }
34213 fputs ("\t.lglobl .", file);
34214 RS6000_OUTPUT_BASENAME (file, buffer);
34215 putc ('\n', file);
34216 }
34217 fputs ("\t.csect ", file);
34218 RS6000_OUTPUT_BASENAME (file, buffer);
34219 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
34220 RS6000_OUTPUT_BASENAME (file, buffer);
34221 fputs (":\n", file);
34222 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34223 &data, true);
34224 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
34225 RS6000_OUTPUT_BASENAME (file, buffer);
34226 fputs (", TOC[tc0], 0\n", file);
34227 in_section = NULL;
34228 switch_to_section (function_section (decl));
34229 putc ('.', file);
34230 RS6000_OUTPUT_BASENAME (file, buffer);
34231 fputs (":\n", file);
34232 data.function_descriptor = true;
34233 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34234 &data, true);
34235 if (!DECL_IGNORED_P (decl))
34236 {
34237 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
34238 xcoffout_declare_function (file, decl, buffer);
34239 else if (write_symbols == DWARF2_DEBUG)
34240 {
34241 name = (*targetm.strip_name_encoding) (name);
34242 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
34243 }
34244 }
34245 return;
34246 }
34247
34248
34249 /* Output assembly language to globalize a symbol from a DECL,
34250 possibly with visibility. */
34251
34252 void
34253 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
34254 {
34255 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
34256 fputs (GLOBAL_ASM_OP, stream);
34257 RS6000_OUTPUT_BASENAME (stream, name);
34258 #ifdef HAVE_GAS_HIDDEN
34259 fputs (rs6000_xcoff_visibility (decl), stream);
34260 #endif
34261 putc ('\n', stream);
34262 }
34263
34264 /* Output assembly language to define a symbol as COMMON from a DECL,
34265 possibly with visibility. */
34266
34267 void
34268 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
34269 tree decl ATTRIBUTE_UNUSED,
34270 const char *name,
34271 unsigned HOST_WIDE_INT size,
34272 unsigned HOST_WIDE_INT align)
34273 {
34274 unsigned HOST_WIDE_INT align2 = 2;
34275
34276 if (align > 32)
34277 align2 = floor_log2 (align / BITS_PER_UNIT);
34278 else if (size > 4)
34279 align2 = 3;
34280
34281 fputs (COMMON_ASM_OP, stream);
34282 RS6000_OUTPUT_BASENAME (stream, name);
34283
34284 fprintf (stream,
34285 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34286 size, align2);
34287
34288 #ifdef HAVE_GAS_HIDDEN
34289 if (decl != NULL)
34290 fputs (rs6000_xcoff_visibility (decl), stream);
34291 #endif
34292 putc ('\n', stream);
34293 }
34294
34295 /* This macro produces the initial definition of a object (variable) name.
34296 Because AIX assembler's .set command has unexpected semantics, we output
34297 all aliases as alternative labels in front of the definition. */
34298
34299 void
34300 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34301 {
34302 struct declare_alias_data data = {file, false};
34303 RS6000_OUTPUT_BASENAME (file, name);
34304 fputs (":\n", file);
34305 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34306 &data, true);
34307 }
34308
34309 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34310
34311 void
34312 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34313 {
34314 fputs (integer_asm_op (size, FALSE), file);
34315 assemble_name (file, label);
34316 fputs ("-$", file);
34317 }
34318
34319 /* Output a symbol offset relative to the dbase for the current object.
34320 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34321 signed offsets.
34322
34323 __gcc_unwind_dbase is embedded in all executables/libraries through
34324 libgcc/config/rs6000/crtdbase.S. */
34325
34326 void
34327 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34328 {
34329 fputs (integer_asm_op (size, FALSE), file);
34330 assemble_name (file, label);
34331 fputs("-__gcc_unwind_dbase", file);
34332 }
34333
34334 #ifdef HAVE_AS_TLS
34335 static void
34336 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34337 {
34338 rtx symbol;
34339 int flags;
34340 const char *symname;
34341
34342 default_encode_section_info (decl, rtl, first);
34343
34344 /* Careful not to prod global register variables. */
34345 if (!MEM_P (rtl))
34346 return;
34347 symbol = XEXP (rtl, 0);
34348 if (!SYMBOL_REF_P (symbol))
34349 return;
34350
34351 flags = SYMBOL_REF_FLAGS (symbol);
34352
34353 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34354 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34355
34356 SYMBOL_REF_FLAGS (symbol) = flags;
34357
34358 /* Append mapping class to extern decls. */
34359 symname = XSTR (symbol, 0);
34360 if (decl /* sync condition with assemble_external () */
34361 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34362 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34363 || TREE_CODE (decl) == FUNCTION_DECL)
34364 && symname[strlen (symname) - 1] != ']')
34365 {
34366 char *newname = (char *) alloca (strlen (symname) + 5);
34367 strcpy (newname, symname);
34368 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34369 ? "[DS]" : "[UA]"));
34370 XSTR (symbol, 0) = ggc_strdup (newname);
34371 }
34372 }
34373 #endif /* HAVE_AS_TLS */
34374 #endif /* TARGET_XCOFF */
34375
34376 void
34377 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34378 const char *name, const char *val)
34379 {
34380 fputs ("\t.weak\t", stream);
34381 RS6000_OUTPUT_BASENAME (stream, name);
34382 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34383 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34384 {
34385 if (TARGET_XCOFF)
34386 fputs ("[DS]", stream);
34387 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34388 if (TARGET_XCOFF)
34389 fputs (rs6000_xcoff_visibility (decl), stream);
34390 #endif
34391 fputs ("\n\t.weak\t.", stream);
34392 RS6000_OUTPUT_BASENAME (stream, name);
34393 }
34394 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34395 if (TARGET_XCOFF)
34396 fputs (rs6000_xcoff_visibility (decl), stream);
34397 #endif
34398 fputc ('\n', stream);
34399 if (val)
34400 {
34401 #ifdef ASM_OUTPUT_DEF
34402 ASM_OUTPUT_DEF (stream, name, val);
34403 #endif
34404 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34405 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34406 {
34407 fputs ("\t.set\t.", stream);
34408 RS6000_OUTPUT_BASENAME (stream, name);
34409 fputs (",.", stream);
34410 RS6000_OUTPUT_BASENAME (stream, val);
34411 fputc ('\n', stream);
34412 }
34413 }
34414 }
34415
34416
34417 /* Return true if INSN should not be copied. */
34418
34419 static bool
34420 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34421 {
34422 return recog_memoized (insn) >= 0
34423 && get_attr_cannot_copy (insn);
34424 }
34425
34426 /* Compute a (partial) cost for rtx X. Return true if the complete
34427 cost has been computed, and false if subexpressions should be
34428 scanned. In either case, *TOTAL contains the cost result. */
34429
34430 static bool
34431 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34432 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34433 {
34434 int code = GET_CODE (x);
34435
34436 switch (code)
34437 {
34438 /* On the RS/6000, if it is valid in the insn, it is free. */
34439 case CONST_INT:
34440 if (((outer_code == SET
34441 || outer_code == PLUS
34442 || outer_code == MINUS)
34443 && (satisfies_constraint_I (x)
34444 || satisfies_constraint_L (x)))
34445 || (outer_code == AND
34446 && (satisfies_constraint_K (x)
34447 || (mode == SImode
34448 ? satisfies_constraint_L (x)
34449 : satisfies_constraint_J (x))))
34450 || ((outer_code == IOR || outer_code == XOR)
34451 && (satisfies_constraint_K (x)
34452 || (mode == SImode
34453 ? satisfies_constraint_L (x)
34454 : satisfies_constraint_J (x))))
34455 || outer_code == ASHIFT
34456 || outer_code == ASHIFTRT
34457 || outer_code == LSHIFTRT
34458 || outer_code == ROTATE
34459 || outer_code == ROTATERT
34460 || outer_code == ZERO_EXTRACT
34461 || (outer_code == MULT
34462 && satisfies_constraint_I (x))
34463 || ((outer_code == DIV || outer_code == UDIV
34464 || outer_code == MOD || outer_code == UMOD)
34465 && exact_log2 (INTVAL (x)) >= 0)
34466 || (outer_code == COMPARE
34467 && (satisfies_constraint_I (x)
34468 || satisfies_constraint_K (x)))
34469 || ((outer_code == EQ || outer_code == NE)
34470 && (satisfies_constraint_I (x)
34471 || satisfies_constraint_K (x)
34472 || (mode == SImode
34473 ? satisfies_constraint_L (x)
34474 : satisfies_constraint_J (x))))
34475 || (outer_code == GTU
34476 && satisfies_constraint_I (x))
34477 || (outer_code == LTU
34478 && satisfies_constraint_P (x)))
34479 {
34480 *total = 0;
34481 return true;
34482 }
34483 else if ((outer_code == PLUS
34484 && reg_or_add_cint_operand (x, VOIDmode))
34485 || (outer_code == MINUS
34486 && reg_or_sub_cint_operand (x, VOIDmode))
34487 || ((outer_code == SET
34488 || outer_code == IOR
34489 || outer_code == XOR)
34490 && (INTVAL (x)
34491 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34492 {
34493 *total = COSTS_N_INSNS (1);
34494 return true;
34495 }
34496 /* FALLTHRU */
34497
34498 case CONST_DOUBLE:
34499 case CONST_WIDE_INT:
34500 case CONST:
34501 case HIGH:
34502 case SYMBOL_REF:
34503 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34504 return true;
34505
34506 case MEM:
34507 /* When optimizing for size, MEM should be slightly more expensive
34508 than generating address, e.g., (plus (reg) (const)).
34509 L1 cache latency is about two instructions. */
34510 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34511 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34512 *total += COSTS_N_INSNS (100);
34513 return true;
34514
34515 case LABEL_REF:
34516 *total = 0;
34517 return true;
34518
34519 case PLUS:
34520 case MINUS:
34521 if (FLOAT_MODE_P (mode))
34522 *total = rs6000_cost->fp;
34523 else
34524 *total = COSTS_N_INSNS (1);
34525 return false;
34526
34527 case MULT:
34528 if (CONST_INT_P (XEXP (x, 1))
34529 && satisfies_constraint_I (XEXP (x, 1)))
34530 {
34531 if (INTVAL (XEXP (x, 1)) >= -256
34532 && INTVAL (XEXP (x, 1)) <= 255)
34533 *total = rs6000_cost->mulsi_const9;
34534 else
34535 *total = rs6000_cost->mulsi_const;
34536 }
34537 else if (mode == SFmode)
34538 *total = rs6000_cost->fp;
34539 else if (FLOAT_MODE_P (mode))
34540 *total = rs6000_cost->dmul;
34541 else if (mode == DImode)
34542 *total = rs6000_cost->muldi;
34543 else
34544 *total = rs6000_cost->mulsi;
34545 return false;
34546
34547 case FMA:
34548 if (mode == SFmode)
34549 *total = rs6000_cost->fp;
34550 else
34551 *total = rs6000_cost->dmul;
34552 break;
34553
34554 case DIV:
34555 case MOD:
34556 if (FLOAT_MODE_P (mode))
34557 {
34558 *total = mode == DFmode ? rs6000_cost->ddiv
34559 : rs6000_cost->sdiv;
34560 return false;
34561 }
34562 /* FALLTHRU */
34563
34564 case UDIV:
34565 case UMOD:
34566 if (CONST_INT_P (XEXP (x, 1))
34567 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34568 {
34569 if (code == DIV || code == MOD)
34570 /* Shift, addze */
34571 *total = COSTS_N_INSNS (2);
34572 else
34573 /* Shift */
34574 *total = COSTS_N_INSNS (1);
34575 }
34576 else
34577 {
34578 if (GET_MODE (XEXP (x, 1)) == DImode)
34579 *total = rs6000_cost->divdi;
34580 else
34581 *total = rs6000_cost->divsi;
34582 }
34583 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34584 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34585 *total += COSTS_N_INSNS (2);
34586 return false;
34587
34588 case CTZ:
34589 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34590 return false;
34591
34592 case FFS:
34593 *total = COSTS_N_INSNS (4);
34594 return false;
34595
34596 case POPCOUNT:
34597 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34598 return false;
34599
34600 case PARITY:
34601 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34602 return false;
34603
34604 case NOT:
34605 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34606 *total = 0;
34607 else
34608 *total = COSTS_N_INSNS (1);
34609 return false;
34610
34611 case AND:
34612 if (CONST_INT_P (XEXP (x, 1)))
34613 {
34614 rtx left = XEXP (x, 0);
34615 rtx_code left_code = GET_CODE (left);
34616
34617 /* rotate-and-mask: 1 insn. */
34618 if ((left_code == ROTATE
34619 || left_code == ASHIFT
34620 || left_code == LSHIFTRT)
34621 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34622 {
34623 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34624 if (!CONST_INT_P (XEXP (left, 1)))
34625 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34626 *total += COSTS_N_INSNS (1);
34627 return true;
34628 }
34629
34630 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34631 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34632 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34633 || (val & 0xffff) == val
34634 || (val & 0xffff0000) == val
34635 || ((val & 0xffff) == 0 && mode == SImode))
34636 {
34637 *total = rtx_cost (left, mode, AND, 0, speed);
34638 *total += COSTS_N_INSNS (1);
34639 return true;
34640 }
34641
34642 /* 2 insns. */
34643 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34644 {
34645 *total = rtx_cost (left, mode, AND, 0, speed);
34646 *total += COSTS_N_INSNS (2);
34647 return true;
34648 }
34649 }
34650
34651 *total = COSTS_N_INSNS (1);
34652 return false;
34653
34654 case IOR:
34655 /* FIXME */
34656 *total = COSTS_N_INSNS (1);
34657 return true;
34658
34659 case CLZ:
34660 case XOR:
34661 case ZERO_EXTRACT:
34662 *total = COSTS_N_INSNS (1);
34663 return false;
34664
34665 case ASHIFT:
34666 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34667 the sign extend and shift separately within the insn. */
34668 if (TARGET_EXTSWSLI && mode == DImode
34669 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34670 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34671 {
34672 *total = 0;
34673 return false;
34674 }
34675 /* fall through */
34676
34677 case ASHIFTRT:
34678 case LSHIFTRT:
34679 case ROTATE:
34680 case ROTATERT:
34681 /* Handle mul_highpart. */
34682 if (outer_code == TRUNCATE
34683 && GET_CODE (XEXP (x, 0)) == MULT)
34684 {
34685 if (mode == DImode)
34686 *total = rs6000_cost->muldi;
34687 else
34688 *total = rs6000_cost->mulsi;
34689 return true;
34690 }
34691 else if (outer_code == AND)
34692 *total = 0;
34693 else
34694 *total = COSTS_N_INSNS (1);
34695 return false;
34696
34697 case SIGN_EXTEND:
34698 case ZERO_EXTEND:
34699 if (MEM_P (XEXP (x, 0)))
34700 *total = 0;
34701 else
34702 *total = COSTS_N_INSNS (1);
34703 return false;
34704
34705 case COMPARE:
34706 case NEG:
34707 case ABS:
34708 if (!FLOAT_MODE_P (mode))
34709 {
34710 *total = COSTS_N_INSNS (1);
34711 return false;
34712 }
34713 /* FALLTHRU */
34714
34715 case FLOAT:
34716 case UNSIGNED_FLOAT:
34717 case FIX:
34718 case UNSIGNED_FIX:
34719 case FLOAT_TRUNCATE:
34720 *total = rs6000_cost->fp;
34721 return false;
34722
34723 case FLOAT_EXTEND:
34724 if (mode == DFmode)
34725 *total = rs6000_cost->sfdf_convert;
34726 else
34727 *total = rs6000_cost->fp;
34728 return false;
34729
34730 case UNSPEC:
34731 switch (XINT (x, 1))
34732 {
34733 case UNSPEC_FRSP:
34734 *total = rs6000_cost->fp;
34735 return true;
34736
34737 default:
34738 break;
34739 }
34740 break;
34741
34742 case CALL:
34743 case IF_THEN_ELSE:
34744 if (!speed)
34745 {
34746 *total = COSTS_N_INSNS (1);
34747 return true;
34748 }
34749 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34750 {
34751 *total = rs6000_cost->fp;
34752 return false;
34753 }
34754 break;
34755
34756 case NE:
34757 case EQ:
34758 case GTU:
34759 case LTU:
34760 /* Carry bit requires mode == Pmode.
34761 NEG or PLUS already counted so only add one. */
34762 if (mode == Pmode
34763 && (outer_code == NEG || outer_code == PLUS))
34764 {
34765 *total = COSTS_N_INSNS (1);
34766 return true;
34767 }
34768 /* FALLTHRU */
34769
34770 case GT:
34771 case LT:
34772 case UNORDERED:
34773 if (outer_code == SET)
34774 {
34775 if (XEXP (x, 1) == const0_rtx)
34776 {
34777 *total = COSTS_N_INSNS (2);
34778 return true;
34779 }
34780 else
34781 {
34782 *total = COSTS_N_INSNS (3);
34783 return false;
34784 }
34785 }
34786 /* CC COMPARE. */
34787 if (outer_code == COMPARE)
34788 {
34789 *total = 0;
34790 return true;
34791 }
34792 break;
34793
34794 default:
34795 break;
34796 }
34797
34798 return false;
34799 }
34800
34801 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34802
34803 static bool
34804 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34805 int opno, int *total, bool speed)
34806 {
34807 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34808
34809 fprintf (stderr,
34810 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34811 "opno = %d, total = %d, speed = %s, x:\n",
34812 ret ? "complete" : "scan inner",
34813 GET_MODE_NAME (mode),
34814 GET_RTX_NAME (outer_code),
34815 opno,
34816 *total,
34817 speed ? "true" : "false");
34818
34819 debug_rtx (x);
34820
34821 return ret;
34822 }
34823
34824 static int
34825 rs6000_insn_cost (rtx_insn *insn, bool speed)
34826 {
34827 if (recog_memoized (insn) < 0)
34828 return 0;
34829
34830 if (!speed)
34831 return get_attr_length (insn);
34832
34833 int cost = get_attr_cost (insn);
34834 if (cost > 0)
34835 return cost;
34836
34837 int n = get_attr_length (insn) / 4;
34838 enum attr_type type = get_attr_type (insn);
34839
34840 switch (type)
34841 {
34842 case TYPE_LOAD:
34843 case TYPE_FPLOAD:
34844 case TYPE_VECLOAD:
34845 cost = COSTS_N_INSNS (n + 1);
34846 break;
34847
34848 case TYPE_MUL:
34849 switch (get_attr_size (insn))
34850 {
34851 case SIZE_8:
34852 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34853 break;
34854 case SIZE_16:
34855 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34856 break;
34857 case SIZE_32:
34858 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34859 break;
34860 case SIZE_64:
34861 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34862 break;
34863 default:
34864 gcc_unreachable ();
34865 }
34866 break;
34867 case TYPE_DIV:
34868 switch (get_attr_size (insn))
34869 {
34870 case SIZE_32:
34871 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34872 break;
34873 case SIZE_64:
34874 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34875 break;
34876 default:
34877 gcc_unreachable ();
34878 }
34879 break;
34880
34881 case TYPE_FP:
34882 cost = n * rs6000_cost->fp;
34883 break;
34884 case TYPE_DMUL:
34885 cost = n * rs6000_cost->dmul;
34886 break;
34887 case TYPE_SDIV:
34888 cost = n * rs6000_cost->sdiv;
34889 break;
34890 case TYPE_DDIV:
34891 cost = n * rs6000_cost->ddiv;
34892 break;
34893
34894 case TYPE_SYNC:
34895 case TYPE_LOAD_L:
34896 case TYPE_MFCR:
34897 case TYPE_MFCRF:
34898 cost = COSTS_N_INSNS (n + 2);
34899 break;
34900
34901 default:
34902 cost = COSTS_N_INSNS (n);
34903 }
34904
34905 return cost;
34906 }
34907
34908 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34909
34910 static int
34911 rs6000_debug_address_cost (rtx x, machine_mode mode,
34912 addr_space_t as, bool speed)
34913 {
34914 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34915
34916 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34917 ret, speed ? "true" : "false");
34918 debug_rtx (x);
34919
34920 return ret;
34921 }
34922
34923
34924 /* A C expression returning the cost of moving data from a register of class
34925 CLASS1 to one of CLASS2. */
34926
34927 static int
34928 rs6000_register_move_cost (machine_mode mode,
34929 reg_class_t from, reg_class_t to)
34930 {
34931 int ret;
34932
34933 if (TARGET_DEBUG_COST)
34934 dbg_cost_ctrl++;
34935
34936 /* Moves from/to GENERAL_REGS. */
34937 if (reg_classes_intersect_p (to, GENERAL_REGS)
34938 || reg_classes_intersect_p (from, GENERAL_REGS))
34939 {
34940 reg_class_t rclass = from;
34941
34942 if (! reg_classes_intersect_p (to, GENERAL_REGS))
34943 rclass = to;
34944
34945 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34946 ret = (rs6000_memory_move_cost (mode, rclass, false)
34947 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34948
34949 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34950 shift. */
34951 else if (rclass == CR_REGS)
34952 ret = 4;
34953
34954 /* For those processors that have slow LR/CTR moves, make them more
34955 expensive than memory in order to bias spills to memory .*/
34956 else if ((rs6000_tune == PROCESSOR_POWER6
34957 || rs6000_tune == PROCESSOR_POWER7
34958 || rs6000_tune == PROCESSOR_POWER8
34959 || rs6000_tune == PROCESSOR_POWER9)
34960 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
34961 ret = 6 * hard_regno_nregs (0, mode);
34962
34963 else
34964 /* A move will cost one instruction per GPR moved. */
34965 ret = 2 * hard_regno_nregs (0, mode);
34966 }
34967
34968 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34969 else if (VECTOR_MEM_VSX_P (mode)
34970 && reg_classes_intersect_p (to, VSX_REGS)
34971 && reg_classes_intersect_p (from, VSX_REGS))
34972 ret = 2 * hard_regno_nregs (FIRST_FPR_REGNO, mode);
34973
34974 /* Moving between two similar registers is just one instruction. */
34975 else if (reg_classes_intersect_p (to, from))
34976 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
34977
34978 /* Everything else has to go through GENERAL_REGS. */
34979 else
34980 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34981 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34982
34983 if (TARGET_DEBUG_COST)
34984 {
34985 if (dbg_cost_ctrl == 1)
34986 fprintf (stderr,
34987 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34988 ret, GET_MODE_NAME (mode), reg_class_names[from],
34989 reg_class_names[to]);
34990 dbg_cost_ctrl--;
34991 }
34992
34993 return ret;
34994 }
34995
34996 /* A C expressions returning the cost of moving data of MODE from a register to
34997 or from memory. */
34998
34999 static int
35000 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
35001 bool in ATTRIBUTE_UNUSED)
35002 {
35003 int ret;
35004
35005 if (TARGET_DEBUG_COST)
35006 dbg_cost_ctrl++;
35007
35008 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
35009 ret = 4 * hard_regno_nregs (0, mode);
35010 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
35011 || reg_classes_intersect_p (rclass, VSX_REGS)))
35012 ret = 4 * hard_regno_nregs (32, mode);
35013 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
35014 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
35015 else
35016 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
35017
35018 if (TARGET_DEBUG_COST)
35019 {
35020 if (dbg_cost_ctrl == 1)
35021 fprintf (stderr,
35022 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
35023 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
35024 dbg_cost_ctrl--;
35025 }
35026
35027 return ret;
35028 }
35029
35030 /* Returns a code for a target-specific builtin that implements
35031 reciprocal of the function, or NULL_TREE if not available. */
35032
35033 static tree
35034 rs6000_builtin_reciprocal (tree fndecl)
35035 {
35036 switch (DECL_FUNCTION_CODE (fndecl))
35037 {
35038 case VSX_BUILTIN_XVSQRTDP:
35039 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
35040 return NULL_TREE;
35041
35042 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
35043
35044 case VSX_BUILTIN_XVSQRTSP:
35045 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
35046 return NULL_TREE;
35047
35048 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
35049
35050 default:
35051 return NULL_TREE;
35052 }
35053 }
35054
35055 /* Load up a constant. If the mode is a vector mode, splat the value across
35056 all of the vector elements. */
35057
35058 static rtx
35059 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
35060 {
35061 rtx reg;
35062
35063 if (mode == SFmode || mode == DFmode)
35064 {
35065 rtx d = const_double_from_real_value (dconst, mode);
35066 reg = force_reg (mode, d);
35067 }
35068 else if (mode == V4SFmode)
35069 {
35070 rtx d = const_double_from_real_value (dconst, SFmode);
35071 rtvec v = gen_rtvec (4, d, d, d, d);
35072 reg = gen_reg_rtx (mode);
35073 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35074 }
35075 else if (mode == V2DFmode)
35076 {
35077 rtx d = const_double_from_real_value (dconst, DFmode);
35078 rtvec v = gen_rtvec (2, d, d);
35079 reg = gen_reg_rtx (mode);
35080 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
35081 }
35082 else
35083 gcc_unreachable ();
35084
35085 return reg;
35086 }
35087
35088 /* Generate an FMA instruction. */
35089
35090 static void
35091 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
35092 {
35093 machine_mode mode = GET_MODE (target);
35094 rtx dst;
35095
35096 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
35097 gcc_assert (dst != NULL);
35098
35099 if (dst != target)
35100 emit_move_insn (target, dst);
35101 }
35102
35103 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
35104
35105 static void
35106 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
35107 {
35108 machine_mode mode = GET_MODE (dst);
35109 rtx r;
35110
35111 /* This is a tad more complicated, since the fnma_optab is for
35112 a different expression: fma(-m1, m2, a), which is the same
35113 thing except in the case of signed zeros.
35114
35115 Fortunately we know that if FMA is supported that FNMSUB is
35116 also supported in the ISA. Just expand it directly. */
35117
35118 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
35119
35120 r = gen_rtx_NEG (mode, a);
35121 r = gen_rtx_FMA (mode, m1, m2, r);
35122 r = gen_rtx_NEG (mode, r);
35123 emit_insn (gen_rtx_SET (dst, r));
35124 }
35125
35126 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
35127 add a reg_note saying that this was a division. Support both scalar and
35128 vector divide. Assumes no trapping math and finite arguments. */
35129
35130 void
35131 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
35132 {
35133 machine_mode mode = GET_MODE (dst);
35134 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
35135 int i;
35136
35137 /* Low precision estimates guarantee 5 bits of accuracy. High
35138 precision estimates guarantee 14 bits of accuracy. SFmode
35139 requires 23 bits of accuracy. DFmode requires 52 bits of
35140 accuracy. Each pass at least doubles the accuracy, leading
35141 to the following. */
35142 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35143 if (mode == DFmode || mode == V2DFmode)
35144 passes++;
35145
35146 enum insn_code code = optab_handler (smul_optab, mode);
35147 insn_gen_fn gen_mul = GEN_FCN (code);
35148
35149 gcc_assert (code != CODE_FOR_nothing);
35150
35151 one = rs6000_load_constant_and_splat (mode, dconst1);
35152
35153 /* x0 = 1./d estimate */
35154 x0 = gen_reg_rtx (mode);
35155 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
35156 UNSPEC_FRES)));
35157
35158 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
35159 if (passes > 1) {
35160
35161 /* e0 = 1. - d * x0 */
35162 e0 = gen_reg_rtx (mode);
35163 rs6000_emit_nmsub (e0, d, x0, one);
35164
35165 /* x1 = x0 + e0 * x0 */
35166 x1 = gen_reg_rtx (mode);
35167 rs6000_emit_madd (x1, e0, x0, x0);
35168
35169 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
35170 ++i, xprev = xnext, eprev = enext) {
35171
35172 /* enext = eprev * eprev */
35173 enext = gen_reg_rtx (mode);
35174 emit_insn (gen_mul (enext, eprev, eprev));
35175
35176 /* xnext = xprev + enext * xprev */
35177 xnext = gen_reg_rtx (mode);
35178 rs6000_emit_madd (xnext, enext, xprev, xprev);
35179 }
35180
35181 } else
35182 xprev = x0;
35183
35184 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
35185
35186 /* u = n * xprev */
35187 u = gen_reg_rtx (mode);
35188 emit_insn (gen_mul (u, n, xprev));
35189
35190 /* v = n - (d * u) */
35191 v = gen_reg_rtx (mode);
35192 rs6000_emit_nmsub (v, d, u, n);
35193
35194 /* dst = (v * xprev) + u */
35195 rs6000_emit_madd (dst, v, xprev, u);
35196
35197 if (note_p)
35198 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
35199 }
35200
35201 /* Goldschmidt's Algorithm for single/double-precision floating point
35202 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35203
35204 void
35205 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
35206 {
35207 machine_mode mode = GET_MODE (src);
35208 rtx e = gen_reg_rtx (mode);
35209 rtx g = gen_reg_rtx (mode);
35210 rtx h = gen_reg_rtx (mode);
35211
35212 /* Low precision estimates guarantee 5 bits of accuracy. High
35213 precision estimates guarantee 14 bits of accuracy. SFmode
35214 requires 23 bits of accuracy. DFmode requires 52 bits of
35215 accuracy. Each pass at least doubles the accuracy, leading
35216 to the following. */
35217 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35218 if (mode == DFmode || mode == V2DFmode)
35219 passes++;
35220
35221 int i;
35222 rtx mhalf;
35223 enum insn_code code = optab_handler (smul_optab, mode);
35224 insn_gen_fn gen_mul = GEN_FCN (code);
35225
35226 gcc_assert (code != CODE_FOR_nothing);
35227
35228 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35229
35230 /* e = rsqrt estimate */
35231 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35232 UNSPEC_RSQRT)));
35233
35234 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35235 if (!recip)
35236 {
35237 rtx zero = force_reg (mode, CONST0_RTX (mode));
35238
35239 if (mode == SFmode)
35240 {
35241 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35242 e, zero, mode, 0);
35243 if (target != e)
35244 emit_move_insn (e, target);
35245 }
35246 else
35247 {
35248 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35249 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35250 }
35251 }
35252
35253 /* g = sqrt estimate. */
35254 emit_insn (gen_mul (g, e, src));
35255 /* h = 1/(2*sqrt) estimate. */
35256 emit_insn (gen_mul (h, e, mhalf));
35257
35258 if (recip)
35259 {
35260 if (passes == 1)
35261 {
35262 rtx t = gen_reg_rtx (mode);
35263 rs6000_emit_nmsub (t, g, h, mhalf);
35264 /* Apply correction directly to 1/rsqrt estimate. */
35265 rs6000_emit_madd (dst, e, t, e);
35266 }
35267 else
35268 {
35269 for (i = 0; i < passes; i++)
35270 {
35271 rtx t1 = gen_reg_rtx (mode);
35272 rtx g1 = gen_reg_rtx (mode);
35273 rtx h1 = gen_reg_rtx (mode);
35274
35275 rs6000_emit_nmsub (t1, g, h, mhalf);
35276 rs6000_emit_madd (g1, g, t1, g);
35277 rs6000_emit_madd (h1, h, t1, h);
35278
35279 g = g1;
35280 h = h1;
35281 }
35282 /* Multiply by 2 for 1/rsqrt. */
35283 emit_insn (gen_add3_insn (dst, h, h));
35284 }
35285 }
35286 else
35287 {
35288 rtx t = gen_reg_rtx (mode);
35289 rs6000_emit_nmsub (t, g, h, mhalf);
35290 rs6000_emit_madd (dst, g, t, g);
35291 }
35292
35293 return;
35294 }
35295
35296 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35297 (Power7) targets. DST is the target, and SRC is the argument operand. */
35298
35299 void
35300 rs6000_emit_popcount (rtx dst, rtx src)
35301 {
35302 machine_mode mode = GET_MODE (dst);
35303 rtx tmp1, tmp2;
35304
35305 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35306 if (TARGET_POPCNTD)
35307 {
35308 if (mode == SImode)
35309 emit_insn (gen_popcntdsi2 (dst, src));
35310 else
35311 emit_insn (gen_popcntddi2 (dst, src));
35312 return;
35313 }
35314
35315 tmp1 = gen_reg_rtx (mode);
35316
35317 if (mode == SImode)
35318 {
35319 emit_insn (gen_popcntbsi2 (tmp1, src));
35320 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35321 NULL_RTX, 0);
35322 tmp2 = force_reg (SImode, tmp2);
35323 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35324 }
35325 else
35326 {
35327 emit_insn (gen_popcntbdi2 (tmp1, src));
35328 tmp2 = expand_mult (DImode, tmp1,
35329 GEN_INT ((HOST_WIDE_INT)
35330 0x01010101 << 32 | 0x01010101),
35331 NULL_RTX, 0);
35332 tmp2 = force_reg (DImode, tmp2);
35333 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35334 }
35335 }
35336
35337
35338 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35339 target, and SRC is the argument operand. */
35340
35341 void
35342 rs6000_emit_parity (rtx dst, rtx src)
35343 {
35344 machine_mode mode = GET_MODE (dst);
35345 rtx tmp;
35346
35347 tmp = gen_reg_rtx (mode);
35348
35349 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35350 if (TARGET_CMPB)
35351 {
35352 if (mode == SImode)
35353 {
35354 emit_insn (gen_popcntbsi2 (tmp, src));
35355 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35356 }
35357 else
35358 {
35359 emit_insn (gen_popcntbdi2 (tmp, src));
35360 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35361 }
35362 return;
35363 }
35364
35365 if (mode == SImode)
35366 {
35367 /* Is mult+shift >= shift+xor+shift+xor? */
35368 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35369 {
35370 rtx tmp1, tmp2, tmp3, tmp4;
35371
35372 tmp1 = gen_reg_rtx (SImode);
35373 emit_insn (gen_popcntbsi2 (tmp1, src));
35374
35375 tmp2 = gen_reg_rtx (SImode);
35376 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35377 tmp3 = gen_reg_rtx (SImode);
35378 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35379
35380 tmp4 = gen_reg_rtx (SImode);
35381 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35382 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35383 }
35384 else
35385 rs6000_emit_popcount (tmp, src);
35386 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35387 }
35388 else
35389 {
35390 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35391 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35392 {
35393 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35394
35395 tmp1 = gen_reg_rtx (DImode);
35396 emit_insn (gen_popcntbdi2 (tmp1, src));
35397
35398 tmp2 = gen_reg_rtx (DImode);
35399 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35400 tmp3 = gen_reg_rtx (DImode);
35401 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35402
35403 tmp4 = gen_reg_rtx (DImode);
35404 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35405 tmp5 = gen_reg_rtx (DImode);
35406 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35407
35408 tmp6 = gen_reg_rtx (DImode);
35409 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35410 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35411 }
35412 else
35413 rs6000_emit_popcount (tmp, src);
35414 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35415 }
35416 }
35417
35418 /* Expand an Altivec constant permutation for little endian mode.
35419 OP0 and OP1 are the input vectors and TARGET is the output vector.
35420 SEL specifies the constant permutation vector.
35421
35422 There are two issues: First, the two input operands must be
35423 swapped so that together they form a double-wide array in LE
35424 order. Second, the vperm instruction has surprising behavior
35425 in LE mode: it interprets the elements of the source vectors
35426 in BE mode ("left to right") and interprets the elements of
35427 the destination vector in LE mode ("right to left"). To
35428 correct for this, we must subtract each element of the permute
35429 control vector from 31.
35430
35431 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35432 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35433 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35434 serve as the permute control vector. Then, in BE mode,
35435
35436 vperm 9,10,11,12
35437
35438 places the desired result in vr9. However, in LE mode the
35439 vector contents will be
35440
35441 vr10 = 00000003 00000002 00000001 00000000
35442 vr11 = 00000007 00000006 00000005 00000004
35443
35444 The result of the vperm using the same permute control vector is
35445
35446 vr9 = 05000000 07000000 01000000 03000000
35447
35448 That is, the leftmost 4 bytes of vr10 are interpreted as the
35449 source for the rightmost 4 bytes of vr9, and so on.
35450
35451 If we change the permute control vector to
35452
35453 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35454
35455 and issue
35456
35457 vperm 9,11,10,12
35458
35459 we get the desired
35460
35461 vr9 = 00000006 00000004 00000002 00000000. */
35462
35463 static void
35464 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35465 const vec_perm_indices &sel)
35466 {
35467 unsigned int i;
35468 rtx perm[16];
35469 rtx constv, unspec;
35470
35471 /* Unpack and adjust the constant selector. */
35472 for (i = 0; i < 16; ++i)
35473 {
35474 unsigned int elt = 31 - (sel[i] & 31);
35475 perm[i] = GEN_INT (elt);
35476 }
35477
35478 /* Expand to a permute, swapping the inputs and using the
35479 adjusted selector. */
35480 if (!REG_P (op0))
35481 op0 = force_reg (V16QImode, op0);
35482 if (!REG_P (op1))
35483 op1 = force_reg (V16QImode, op1);
35484
35485 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35486 constv = force_reg (V16QImode, constv);
35487 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35488 UNSPEC_VPERM);
35489 if (!REG_P (target))
35490 {
35491 rtx tmp = gen_reg_rtx (V16QImode);
35492 emit_move_insn (tmp, unspec);
35493 unspec = tmp;
35494 }
35495
35496 emit_move_insn (target, unspec);
35497 }
35498
35499 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35500 permute control vector. But here it's not a constant, so we must
35501 generate a vector NAND or NOR to do the adjustment. */
35502
35503 void
35504 altivec_expand_vec_perm_le (rtx operands[4])
35505 {
35506 rtx notx, iorx, unspec;
35507 rtx target = operands[0];
35508 rtx op0 = operands[1];
35509 rtx op1 = operands[2];
35510 rtx sel = operands[3];
35511 rtx tmp = target;
35512 rtx norreg = gen_reg_rtx (V16QImode);
35513 machine_mode mode = GET_MODE (target);
35514
35515 /* Get everything in regs so the pattern matches. */
35516 if (!REG_P (op0))
35517 op0 = force_reg (mode, op0);
35518 if (!REG_P (op1))
35519 op1 = force_reg (mode, op1);
35520 if (!REG_P (sel))
35521 sel = force_reg (V16QImode, sel);
35522 if (!REG_P (target))
35523 tmp = gen_reg_rtx (mode);
35524
35525 if (TARGET_P9_VECTOR)
35526 {
35527 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35528 UNSPEC_VPERMR);
35529 }
35530 else
35531 {
35532 /* Invert the selector with a VNAND if available, else a VNOR.
35533 The VNAND is preferred for future fusion opportunities. */
35534 notx = gen_rtx_NOT (V16QImode, sel);
35535 iorx = (TARGET_P8_VECTOR
35536 ? gen_rtx_IOR (V16QImode, notx, notx)
35537 : gen_rtx_AND (V16QImode, notx, notx));
35538 emit_insn (gen_rtx_SET (norreg, iorx));
35539
35540 /* Permute with operands reversed and adjusted selector. */
35541 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35542 UNSPEC_VPERM);
35543 }
35544
35545 /* Copy into target, possibly by way of a register. */
35546 if (!REG_P (target))
35547 {
35548 emit_move_insn (tmp, unspec);
35549 unspec = tmp;
35550 }
35551
35552 emit_move_insn (target, unspec);
35553 }
35554
35555 /* Expand an Altivec constant permutation. Return true if we match
35556 an efficient implementation; false to fall back to VPERM.
35557
35558 OP0 and OP1 are the input vectors and TARGET is the output vector.
35559 SEL specifies the constant permutation vector. */
35560
35561 static bool
35562 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35563 const vec_perm_indices &sel)
35564 {
35565 struct altivec_perm_insn {
35566 HOST_WIDE_INT mask;
35567 enum insn_code impl;
35568 unsigned char perm[16];
35569 };
35570 static const struct altivec_perm_insn patterns[] = {
35571 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35572 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35573 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35574 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35575 { OPTION_MASK_ALTIVEC,
35576 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35577 : CODE_FOR_altivec_vmrglb_direct),
35578 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35579 { OPTION_MASK_ALTIVEC,
35580 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35581 : CODE_FOR_altivec_vmrglh_direct),
35582 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35583 { OPTION_MASK_ALTIVEC,
35584 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35585 : CODE_FOR_altivec_vmrglw_direct),
35586 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35587 { OPTION_MASK_ALTIVEC,
35588 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35589 : CODE_FOR_altivec_vmrghb_direct),
35590 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35591 { OPTION_MASK_ALTIVEC,
35592 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35593 : CODE_FOR_altivec_vmrghh_direct),
35594 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35595 { OPTION_MASK_ALTIVEC,
35596 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35597 : CODE_FOR_altivec_vmrghw_direct),
35598 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35599 { OPTION_MASK_P8_VECTOR,
35600 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35601 : CODE_FOR_p8_vmrgow_v4sf_direct),
35602 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35603 { OPTION_MASK_P8_VECTOR,
35604 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35605 : CODE_FOR_p8_vmrgew_v4sf_direct),
35606 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35607 };
35608
35609 unsigned int i, j, elt, which;
35610 unsigned char perm[16];
35611 rtx x;
35612 bool one_vec;
35613
35614 /* Unpack the constant selector. */
35615 for (i = which = 0; i < 16; ++i)
35616 {
35617 elt = sel[i] & 31;
35618 which |= (elt < 16 ? 1 : 2);
35619 perm[i] = elt;
35620 }
35621
35622 /* Simplify the constant selector based on operands. */
35623 switch (which)
35624 {
35625 default:
35626 gcc_unreachable ();
35627
35628 case 3:
35629 one_vec = false;
35630 if (!rtx_equal_p (op0, op1))
35631 break;
35632 /* FALLTHRU */
35633
35634 case 2:
35635 for (i = 0; i < 16; ++i)
35636 perm[i] &= 15;
35637 op0 = op1;
35638 one_vec = true;
35639 break;
35640
35641 case 1:
35642 op1 = op0;
35643 one_vec = true;
35644 break;
35645 }
35646
35647 /* Look for splat patterns. */
35648 if (one_vec)
35649 {
35650 elt = perm[0];
35651
35652 for (i = 0; i < 16; ++i)
35653 if (perm[i] != elt)
35654 break;
35655 if (i == 16)
35656 {
35657 if (!BYTES_BIG_ENDIAN)
35658 elt = 15 - elt;
35659 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35660 return true;
35661 }
35662
35663 if (elt % 2 == 0)
35664 {
35665 for (i = 0; i < 16; i += 2)
35666 if (perm[i] != elt || perm[i + 1] != elt + 1)
35667 break;
35668 if (i == 16)
35669 {
35670 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35671 x = gen_reg_rtx (V8HImode);
35672 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35673 GEN_INT (field)));
35674 emit_move_insn (target, gen_lowpart (V16QImode, x));
35675 return true;
35676 }
35677 }
35678
35679 if (elt % 4 == 0)
35680 {
35681 for (i = 0; i < 16; i += 4)
35682 if (perm[i] != elt
35683 || perm[i + 1] != elt + 1
35684 || perm[i + 2] != elt + 2
35685 || perm[i + 3] != elt + 3)
35686 break;
35687 if (i == 16)
35688 {
35689 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35690 x = gen_reg_rtx (V4SImode);
35691 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35692 GEN_INT (field)));
35693 emit_move_insn (target, gen_lowpart (V16QImode, x));
35694 return true;
35695 }
35696 }
35697 }
35698
35699 /* Look for merge and pack patterns. */
35700 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35701 {
35702 bool swapped;
35703
35704 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35705 continue;
35706
35707 elt = patterns[j].perm[0];
35708 if (perm[0] == elt)
35709 swapped = false;
35710 else if (perm[0] == elt + 16)
35711 swapped = true;
35712 else
35713 continue;
35714 for (i = 1; i < 16; ++i)
35715 {
35716 elt = patterns[j].perm[i];
35717 if (swapped)
35718 elt = (elt >= 16 ? elt - 16 : elt + 16);
35719 else if (one_vec && elt >= 16)
35720 elt -= 16;
35721 if (perm[i] != elt)
35722 break;
35723 }
35724 if (i == 16)
35725 {
35726 enum insn_code icode = patterns[j].impl;
35727 machine_mode omode = insn_data[icode].operand[0].mode;
35728 machine_mode imode = insn_data[icode].operand[1].mode;
35729
35730 /* For little-endian, don't use vpkuwum and vpkuhum if the
35731 underlying vector type is not V4SI and V8HI, respectively.
35732 For example, using vpkuwum with a V8HI picks up the even
35733 halfwords (BE numbering) when the even halfwords (LE
35734 numbering) are what we need. */
35735 if (!BYTES_BIG_ENDIAN
35736 && icode == CODE_FOR_altivec_vpkuwum_direct
35737 && ((REG_P (op0)
35738 && GET_MODE (op0) != V4SImode)
35739 || (SUBREG_P (op0)
35740 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35741 continue;
35742 if (!BYTES_BIG_ENDIAN
35743 && icode == CODE_FOR_altivec_vpkuhum_direct
35744 && ((REG_P (op0)
35745 && GET_MODE (op0) != V8HImode)
35746 || (SUBREG_P (op0)
35747 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35748 continue;
35749
35750 /* For little-endian, the two input operands must be swapped
35751 (or swapped back) to ensure proper right-to-left numbering
35752 from 0 to 2N-1. */
35753 if (swapped ^ !BYTES_BIG_ENDIAN)
35754 std::swap (op0, op1);
35755 if (imode != V16QImode)
35756 {
35757 op0 = gen_lowpart (imode, op0);
35758 op1 = gen_lowpart (imode, op1);
35759 }
35760 if (omode == V16QImode)
35761 x = target;
35762 else
35763 x = gen_reg_rtx (omode);
35764 emit_insn (GEN_FCN (icode) (x, op0, op1));
35765 if (omode != V16QImode)
35766 emit_move_insn (target, gen_lowpart (V16QImode, x));
35767 return true;
35768 }
35769 }
35770
35771 if (!BYTES_BIG_ENDIAN)
35772 {
35773 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35774 return true;
35775 }
35776
35777 return false;
35778 }
35779
35780 /* Expand a VSX Permute Doubleword constant permutation.
35781 Return true if we match an efficient implementation. */
35782
35783 static bool
35784 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35785 unsigned char perm0, unsigned char perm1)
35786 {
35787 rtx x;
35788
35789 /* If both selectors come from the same operand, fold to single op. */
35790 if ((perm0 & 2) == (perm1 & 2))
35791 {
35792 if (perm0 & 2)
35793 op0 = op1;
35794 else
35795 op1 = op0;
35796 }
35797 /* If both operands are equal, fold to simpler permutation. */
35798 if (rtx_equal_p (op0, op1))
35799 {
35800 perm0 = perm0 & 1;
35801 perm1 = (perm1 & 1) + 2;
35802 }
35803 /* If the first selector comes from the second operand, swap. */
35804 else if (perm0 & 2)
35805 {
35806 if (perm1 & 2)
35807 return false;
35808 perm0 -= 2;
35809 perm1 += 2;
35810 std::swap (op0, op1);
35811 }
35812 /* If the second selector does not come from the second operand, fail. */
35813 else if ((perm1 & 2) == 0)
35814 return false;
35815
35816 /* Success! */
35817 if (target != NULL)
35818 {
35819 machine_mode vmode, dmode;
35820 rtvec v;
35821
35822 vmode = GET_MODE (target);
35823 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35824 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35825 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35826 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35827 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35828 emit_insn (gen_rtx_SET (target, x));
35829 }
35830 return true;
35831 }
35832
35833 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35834
35835 static bool
35836 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35837 rtx op1, const vec_perm_indices &sel)
35838 {
35839 bool testing_p = !target;
35840
35841 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35842 if (TARGET_ALTIVEC && testing_p)
35843 return true;
35844
35845 /* Check for ps_merge* or xxpermdi insns. */
35846 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35847 {
35848 if (testing_p)
35849 {
35850 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35851 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35852 }
35853 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35854 return true;
35855 }
35856
35857 if (TARGET_ALTIVEC)
35858 {
35859 /* Force the target-independent code to lower to V16QImode. */
35860 if (vmode != V16QImode)
35861 return false;
35862 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35863 return true;
35864 }
35865
35866 return false;
35867 }
35868
35869 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35870 OP0 and OP1 are the input vectors and TARGET is the output vector.
35871 PERM specifies the constant permutation vector. */
35872
35873 static void
35874 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35875 machine_mode vmode, const vec_perm_builder &perm)
35876 {
35877 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35878 if (x != target)
35879 emit_move_insn (target, x);
35880 }
35881
35882 /* Expand an extract even operation. */
35883
35884 void
35885 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35886 {
35887 machine_mode vmode = GET_MODE (target);
35888 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35889 vec_perm_builder perm (nelt, nelt, 1);
35890
35891 for (i = 0; i < nelt; i++)
35892 perm.quick_push (i * 2);
35893
35894 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35895 }
35896
35897 /* Expand a vector interleave operation. */
35898
35899 void
35900 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35901 {
35902 machine_mode vmode = GET_MODE (target);
35903 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35904 vec_perm_builder perm (nelt, nelt, 1);
35905
35906 high = (highp ? 0 : nelt / 2);
35907 for (i = 0; i < nelt / 2; i++)
35908 {
35909 perm.quick_push (i + high);
35910 perm.quick_push (i + nelt + high);
35911 }
35912
35913 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35914 }
35915
35916 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35917 void
35918 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35919 {
35920 HOST_WIDE_INT hwi_scale (scale);
35921 REAL_VALUE_TYPE r_pow;
35922 rtvec v = rtvec_alloc (2);
35923 rtx elt;
35924 rtx scale_vec = gen_reg_rtx (V2DFmode);
35925 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35926 elt = const_double_from_real_value (r_pow, DFmode);
35927 RTVEC_ELT (v, 0) = elt;
35928 RTVEC_ELT (v, 1) = elt;
35929 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35930 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35931 }
35932
35933 /* Return an RTX representing where to find the function value of a
35934 function returning MODE. */
35935 static rtx
35936 rs6000_complex_function_value (machine_mode mode)
35937 {
35938 unsigned int regno;
35939 rtx r1, r2;
35940 machine_mode inner = GET_MODE_INNER (mode);
35941 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35942
35943 if (TARGET_FLOAT128_TYPE
35944 && (mode == KCmode
35945 || (mode == TCmode && TARGET_IEEEQUAD)))
35946 regno = ALTIVEC_ARG_RETURN;
35947
35948 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35949 regno = FP_ARG_RETURN;
35950
35951 else
35952 {
35953 regno = GP_ARG_RETURN;
35954
35955 /* 32-bit is OK since it'll go in r3/r4. */
35956 if (TARGET_32BIT && inner_bytes >= 4)
35957 return gen_rtx_REG (mode, regno);
35958 }
35959
35960 if (inner_bytes >= 8)
35961 return gen_rtx_REG (mode, regno);
35962
35963 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35964 const0_rtx);
35965 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35966 GEN_INT (inner_bytes));
35967 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35968 }
35969
35970 /* Return an rtx describing a return value of MODE as a PARALLEL
35971 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35972 stride REG_STRIDE. */
35973
35974 static rtx
35975 rs6000_parallel_return (machine_mode mode,
35976 int n_elts, machine_mode elt_mode,
35977 unsigned int regno, unsigned int reg_stride)
35978 {
35979 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35980
35981 int i;
35982 for (i = 0; i < n_elts; i++)
35983 {
35984 rtx r = gen_rtx_REG (elt_mode, regno);
35985 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35986 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35987 regno += reg_stride;
35988 }
35989
35990 return par;
35991 }
35992
35993 /* Target hook for TARGET_FUNCTION_VALUE.
35994
35995 An integer value is in r3 and a floating-point value is in fp1,
35996 unless -msoft-float. */
35997
35998 static rtx
35999 rs6000_function_value (const_tree valtype,
36000 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
36001 bool outgoing ATTRIBUTE_UNUSED)
36002 {
36003 machine_mode mode;
36004 unsigned int regno;
36005 machine_mode elt_mode;
36006 int n_elts;
36007
36008 /* Special handling for structs in darwin64. */
36009 if (TARGET_MACHO
36010 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
36011 {
36012 CUMULATIVE_ARGS valcum;
36013 rtx valret;
36014
36015 valcum.words = 0;
36016 valcum.fregno = FP_ARG_MIN_REG;
36017 valcum.vregno = ALTIVEC_ARG_MIN_REG;
36018 /* Do a trial code generation as if this were going to be passed as
36019 an argument; if any part goes in memory, we return NULL. */
36020 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
36021 if (valret)
36022 return valret;
36023 /* Otherwise fall through to standard ABI rules. */
36024 }
36025
36026 mode = TYPE_MODE (valtype);
36027
36028 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
36029 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
36030 {
36031 int first_reg, n_regs;
36032
36033 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
36034 {
36035 /* _Decimal128 must use even/odd register pairs. */
36036 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36037 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
36038 }
36039 else
36040 {
36041 first_reg = ALTIVEC_ARG_RETURN;
36042 n_regs = 1;
36043 }
36044
36045 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
36046 }
36047
36048 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
36049 if (TARGET_32BIT && TARGET_POWERPC64)
36050 switch (mode)
36051 {
36052 default:
36053 break;
36054 case E_DImode:
36055 case E_SCmode:
36056 case E_DCmode:
36057 case E_TCmode:
36058 int count = GET_MODE_SIZE (mode) / 4;
36059 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
36060 }
36061
36062 if ((INTEGRAL_TYPE_P (valtype)
36063 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
36064 || POINTER_TYPE_P (valtype))
36065 mode = TARGET_32BIT ? SImode : DImode;
36066
36067 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36068 /* _Decimal128 must use an even/odd register pair. */
36069 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36070 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
36071 && !FLOAT128_VECTOR_P (mode))
36072 regno = FP_ARG_RETURN;
36073 else if (TREE_CODE (valtype) == COMPLEX_TYPE
36074 && targetm.calls.split_complex_arg)
36075 return rs6000_complex_function_value (mode);
36076 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36077 return register is used in both cases, and we won't see V2DImode/V2DFmode
36078 for pure altivec, combine the two cases. */
36079 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
36080 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
36081 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
36082 regno = ALTIVEC_ARG_RETURN;
36083 else
36084 regno = GP_ARG_RETURN;
36085
36086 return gen_rtx_REG (mode, regno);
36087 }
36088
36089 /* Define how to find the value returned by a library function
36090 assuming the value has mode MODE. */
36091 rtx
36092 rs6000_libcall_value (machine_mode mode)
36093 {
36094 unsigned int regno;
36095
36096 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
36097 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
36098 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
36099
36100 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
36101 /* _Decimal128 must use an even/odd register pair. */
36102 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
36103 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
36104 regno = FP_ARG_RETURN;
36105 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
36106 return register is used in both cases, and we won't see V2DImode/V2DFmode
36107 for pure altivec, combine the two cases. */
36108 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
36109 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
36110 regno = ALTIVEC_ARG_RETURN;
36111 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
36112 return rs6000_complex_function_value (mode);
36113 else
36114 regno = GP_ARG_RETURN;
36115
36116 return gen_rtx_REG (mode, regno);
36117 }
36118
36119 /* Compute register pressure classes. We implement the target hook to avoid
36120 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
36121 lead to incorrect estimates of number of available registers and therefor
36122 increased register pressure/spill. */
36123 static int
36124 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
36125 {
36126 int n;
36127
36128 n = 0;
36129 pressure_classes[n++] = GENERAL_REGS;
36130 if (TARGET_VSX)
36131 pressure_classes[n++] = VSX_REGS;
36132 else
36133 {
36134 if (TARGET_ALTIVEC)
36135 pressure_classes[n++] = ALTIVEC_REGS;
36136 if (TARGET_HARD_FLOAT)
36137 pressure_classes[n++] = FLOAT_REGS;
36138 }
36139 pressure_classes[n++] = CR_REGS;
36140 pressure_classes[n++] = SPECIAL_REGS;
36141
36142 return n;
36143 }
36144
36145 /* Given FROM and TO register numbers, say whether this elimination is allowed.
36146 Frame pointer elimination is automatically handled.
36147
36148 For the RS/6000, if frame pointer elimination is being done, we would like
36149 to convert ap into fp, not sp.
36150
36151 We need r30 if -mminimal-toc was specified, and there are constant pool
36152 references. */
36153
36154 static bool
36155 rs6000_can_eliminate (const int from, const int to)
36156 {
36157 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
36158 ? ! frame_pointer_needed
36159 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
36160 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
36161 || constant_pool_empty_p ()
36162 : true);
36163 }
36164
36165 /* Define the offset between two registers, FROM to be eliminated and its
36166 replacement TO, at the start of a routine. */
36167 HOST_WIDE_INT
36168 rs6000_initial_elimination_offset (int from, int to)
36169 {
36170 rs6000_stack_t *info = rs6000_stack_info ();
36171 HOST_WIDE_INT offset;
36172
36173 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36174 offset = info->push_p ? 0 : -info->total_size;
36175 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36176 {
36177 offset = info->push_p ? 0 : -info->total_size;
36178 if (FRAME_GROWS_DOWNWARD)
36179 offset += info->fixed_size + info->vars_size + info->parm_size;
36180 }
36181 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36182 offset = FRAME_GROWS_DOWNWARD
36183 ? info->fixed_size + info->vars_size + info->parm_size
36184 : 0;
36185 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
36186 offset = info->total_size;
36187 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
36188 offset = info->push_p ? info->total_size : 0;
36189 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
36190 offset = 0;
36191 else
36192 gcc_unreachable ();
36193
36194 return offset;
36195 }
36196
36197 /* Fill in sizes of registers used by unwinder. */
36198
36199 static void
36200 rs6000_init_dwarf_reg_sizes_extra (tree address)
36201 {
36202 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36203 {
36204 int i;
36205 machine_mode mode = TYPE_MODE (char_type_node);
36206 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36207 rtx mem = gen_rtx_MEM (BLKmode, addr);
36208 rtx value = gen_int_mode (16, mode);
36209
36210 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36211 The unwinder still needs to know the size of Altivec registers. */
36212
36213 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36214 {
36215 int column = DWARF_REG_TO_UNWIND_COLUMN
36216 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36217 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36218
36219 emit_move_insn (adjust_address (mem, mode, offset), value);
36220 }
36221 }
36222 }
36223
36224 /* Map internal gcc register numbers to debug format register numbers.
36225 FORMAT specifies the type of debug register number to use:
36226 0 -- debug information, except for frame-related sections
36227 1 -- DWARF .debug_frame section
36228 2 -- DWARF .eh_frame section */
36229
36230 unsigned int
36231 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36232 {
36233 /* Except for the above, we use the internal number for non-DWARF
36234 debug information, and also for .eh_frame. */
36235 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
36236 return regno;
36237
36238 /* On some platforms, we use the standard DWARF register
36239 numbering for .debug_info and .debug_frame. */
36240 #ifdef RS6000_USE_DWARF_NUMBERING
36241 if (regno <= 63)
36242 return regno;
36243 if (regno == LR_REGNO)
36244 return 108;
36245 if (regno == CTR_REGNO)
36246 return 109;
36247 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36248 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36249 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36250 to the DWARF reg for CR. */
36251 if (format == 1 && regno == CR2_REGNO)
36252 return 64;
36253 if (CR_REGNO_P (regno))
36254 return regno - CR0_REGNO + 86;
36255 if (regno == CA_REGNO)
36256 return 101; /* XER */
36257 if (ALTIVEC_REGNO_P (regno))
36258 return regno - FIRST_ALTIVEC_REGNO + 1124;
36259 if (regno == VRSAVE_REGNO)
36260 return 356;
36261 if (regno == VSCR_REGNO)
36262 return 67;
36263 #endif
36264 return regno;
36265 }
36266
36267 /* target hook eh_return_filter_mode */
36268 static scalar_int_mode
36269 rs6000_eh_return_filter_mode (void)
36270 {
36271 return TARGET_32BIT ? SImode : word_mode;
36272 }
36273
36274 /* Target hook for translate_mode_attribute. */
36275 static machine_mode
36276 rs6000_translate_mode_attribute (machine_mode mode)
36277 {
36278 if ((FLOAT128_IEEE_P (mode)
36279 && ieee128_float_type_node == long_double_type_node)
36280 || (FLOAT128_IBM_P (mode)
36281 && ibm128_float_type_node == long_double_type_node))
36282 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
36283 return mode;
36284 }
36285
36286 /* Target hook for scalar_mode_supported_p. */
36287 static bool
36288 rs6000_scalar_mode_supported_p (scalar_mode mode)
36289 {
36290 /* -m32 does not support TImode. This is the default, from
36291 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36292 same ABI as for -m32. But default_scalar_mode_supported_p allows
36293 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36294 for -mpowerpc64. */
36295 if (TARGET_32BIT && mode == TImode)
36296 return false;
36297
36298 if (DECIMAL_FLOAT_MODE_P (mode))
36299 return default_decimal_float_supported_p ();
36300 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36301 return true;
36302 else
36303 return default_scalar_mode_supported_p (mode);
36304 }
36305
36306 /* Target hook for vector_mode_supported_p. */
36307 static bool
36308 rs6000_vector_mode_supported_p (machine_mode mode)
36309 {
36310 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36311 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36312 double-double. */
36313 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36314 return true;
36315
36316 else
36317 return false;
36318 }
36319
36320 /* Target hook for floatn_mode. */
36321 static opt_scalar_float_mode
36322 rs6000_floatn_mode (int n, bool extended)
36323 {
36324 if (extended)
36325 {
36326 switch (n)
36327 {
36328 case 32:
36329 return DFmode;
36330
36331 case 64:
36332 if (TARGET_FLOAT128_TYPE)
36333 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36334 else
36335 return opt_scalar_float_mode ();
36336
36337 case 128:
36338 return opt_scalar_float_mode ();
36339
36340 default:
36341 /* Those are the only valid _FloatNx types. */
36342 gcc_unreachable ();
36343 }
36344 }
36345 else
36346 {
36347 switch (n)
36348 {
36349 case 32:
36350 return SFmode;
36351
36352 case 64:
36353 return DFmode;
36354
36355 case 128:
36356 if (TARGET_FLOAT128_TYPE)
36357 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36358 else
36359 return opt_scalar_float_mode ();
36360
36361 default:
36362 return opt_scalar_float_mode ();
36363 }
36364 }
36365
36366 }
36367
36368 /* Target hook for c_mode_for_suffix. */
36369 static machine_mode
36370 rs6000_c_mode_for_suffix (char suffix)
36371 {
36372 if (TARGET_FLOAT128_TYPE)
36373 {
36374 if (suffix == 'q' || suffix == 'Q')
36375 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36376
36377 /* At the moment, we are not defining a suffix for IBM extended double.
36378 If/when the default for -mabi=ieeelongdouble is changed, and we want
36379 to support __ibm128 constants in legacy library code, we may need to
36380 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36381 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36382 __float80 constants. */
36383 }
36384
36385 return VOIDmode;
36386 }
36387
36388 /* Target hook for invalid_arg_for_unprototyped_fn. */
36389 static const char *
36390 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36391 {
36392 return (!rs6000_darwin64_abi
36393 && typelist == 0
36394 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36395 && (funcdecl == NULL_TREE
36396 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36397 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36398 ? N_("AltiVec argument passed to unprototyped function")
36399 : NULL;
36400 }
36401
36402 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36403 setup by using __stack_chk_fail_local hidden function instead of
36404 calling __stack_chk_fail directly. Otherwise it is better to call
36405 __stack_chk_fail directly. */
36406
36407 static tree ATTRIBUTE_UNUSED
36408 rs6000_stack_protect_fail (void)
36409 {
36410 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36411 ? default_hidden_stack_protect_fail ()
36412 : default_external_stack_protect_fail ();
36413 }
36414
36415 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36416
36417 #if TARGET_ELF
36418 static unsigned HOST_WIDE_INT
36419 rs6000_asan_shadow_offset (void)
36420 {
36421 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36422 }
36423 #endif
36424 \f
36425 /* Mask options that we want to support inside of attribute((target)) and
36426 #pragma GCC target operations. Note, we do not include things like
36427 64/32-bit, endianness, hard/soft floating point, etc. that would have
36428 different calling sequences. */
36429
36430 struct rs6000_opt_mask {
36431 const char *name; /* option name */
36432 HOST_WIDE_INT mask; /* mask to set */
36433 bool invert; /* invert sense of mask */
36434 bool valid_target; /* option is a target option */
36435 };
36436
36437 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36438 {
36439 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36440 { "cmpb", OPTION_MASK_CMPB, false, true },
36441 { "crypto", OPTION_MASK_CRYPTO, false, true },
36442 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36443 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36444 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36445 false, true },
36446 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36447 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36448 { "fprnd", OPTION_MASK_FPRND, false, true },
36449 { "hard-dfp", OPTION_MASK_DFP, false, true },
36450 { "htm", OPTION_MASK_HTM, false, true },
36451 { "isel", OPTION_MASK_ISEL, false, true },
36452 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36453 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36454 { "modulo", OPTION_MASK_MODULO, false, true },
36455 { "mulhw", OPTION_MASK_MULHW, false, true },
36456 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36457 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36458 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36459 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36460 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36461 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36462 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36463 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36464 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36465 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36466 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36467 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36468 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36469 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36470 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36471 { "string", 0, false, true },
36472 { "update", OPTION_MASK_NO_UPDATE, true , true },
36473 { "vsx", OPTION_MASK_VSX, false, true },
36474 #ifdef OPTION_MASK_64BIT
36475 #if TARGET_AIX_OS
36476 { "aix64", OPTION_MASK_64BIT, false, false },
36477 { "aix32", OPTION_MASK_64BIT, true, false },
36478 #else
36479 { "64", OPTION_MASK_64BIT, false, false },
36480 { "32", OPTION_MASK_64BIT, true, false },
36481 #endif
36482 #endif
36483 #ifdef OPTION_MASK_EABI
36484 { "eabi", OPTION_MASK_EABI, false, false },
36485 #endif
36486 #ifdef OPTION_MASK_LITTLE_ENDIAN
36487 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36488 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36489 #endif
36490 #ifdef OPTION_MASK_RELOCATABLE
36491 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36492 #endif
36493 #ifdef OPTION_MASK_STRICT_ALIGN
36494 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36495 #endif
36496 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36497 { "string", 0, false, false },
36498 };
36499
36500 /* Builtin mask mapping for printing the flags. */
36501 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36502 {
36503 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36504 { "vsx", RS6000_BTM_VSX, false, false },
36505 { "fre", RS6000_BTM_FRE, false, false },
36506 { "fres", RS6000_BTM_FRES, false, false },
36507 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36508 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36509 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36510 { "cell", RS6000_BTM_CELL, false, false },
36511 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36512 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36513 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36514 { "crypto", RS6000_BTM_CRYPTO, false, false },
36515 { "htm", RS6000_BTM_HTM, false, false },
36516 { "hard-dfp", RS6000_BTM_DFP, false, false },
36517 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36518 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36519 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36520 { "float128", RS6000_BTM_FLOAT128, false, false },
36521 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36522 };
36523
36524 /* Option variables that we want to support inside attribute((target)) and
36525 #pragma GCC target operations. */
36526
36527 struct rs6000_opt_var {
36528 const char *name; /* option name */
36529 size_t global_offset; /* offset of the option in global_options. */
36530 size_t target_offset; /* offset of the option in target options. */
36531 };
36532
36533 static struct rs6000_opt_var const rs6000_opt_vars[] =
36534 {
36535 { "friz",
36536 offsetof (struct gcc_options, x_TARGET_FRIZ),
36537 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36538 { "avoid-indexed-addresses",
36539 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36540 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36541 { "longcall",
36542 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36543 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36544 { "optimize-swaps",
36545 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36546 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36547 { "allow-movmisalign",
36548 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36549 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36550 { "sched-groups",
36551 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36552 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36553 { "always-hint",
36554 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36555 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36556 { "align-branch-targets",
36557 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36558 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36559 { "tls-markers",
36560 offsetof (struct gcc_options, x_tls_markers),
36561 offsetof (struct cl_target_option, x_tls_markers), },
36562 { "sched-prolog",
36563 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36564 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36565 { "sched-epilog",
36566 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36567 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36568 { "speculate-indirect-jumps",
36569 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36570 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36571 };
36572
36573 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36574 parsing. Return true if there were no errors. */
36575
36576 static bool
36577 rs6000_inner_target_options (tree args, bool attr_p)
36578 {
36579 bool ret = true;
36580
36581 if (args == NULL_TREE)
36582 ;
36583
36584 else if (TREE_CODE (args) == STRING_CST)
36585 {
36586 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36587 char *q;
36588
36589 while ((q = strtok (p, ",")) != NULL)
36590 {
36591 bool error_p = false;
36592 bool not_valid_p = false;
36593 const char *cpu_opt = NULL;
36594
36595 p = NULL;
36596 if (strncmp (q, "cpu=", 4) == 0)
36597 {
36598 int cpu_index = rs6000_cpu_name_lookup (q+4);
36599 if (cpu_index >= 0)
36600 rs6000_cpu_index = cpu_index;
36601 else
36602 {
36603 error_p = true;
36604 cpu_opt = q+4;
36605 }
36606 }
36607 else if (strncmp (q, "tune=", 5) == 0)
36608 {
36609 int tune_index = rs6000_cpu_name_lookup (q+5);
36610 if (tune_index >= 0)
36611 rs6000_tune_index = tune_index;
36612 else
36613 {
36614 error_p = true;
36615 cpu_opt = q+5;
36616 }
36617 }
36618 else
36619 {
36620 size_t i;
36621 bool invert = false;
36622 char *r = q;
36623
36624 error_p = true;
36625 if (strncmp (r, "no-", 3) == 0)
36626 {
36627 invert = true;
36628 r += 3;
36629 }
36630
36631 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36632 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36633 {
36634 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36635
36636 if (!rs6000_opt_masks[i].valid_target)
36637 not_valid_p = true;
36638 else
36639 {
36640 error_p = false;
36641 rs6000_isa_flags_explicit |= mask;
36642
36643 /* VSX needs altivec, so -mvsx automagically sets
36644 altivec and disables -mavoid-indexed-addresses. */
36645 if (!invert)
36646 {
36647 if (mask == OPTION_MASK_VSX)
36648 {
36649 mask |= OPTION_MASK_ALTIVEC;
36650 TARGET_AVOID_XFORM = 0;
36651 }
36652 }
36653
36654 if (rs6000_opt_masks[i].invert)
36655 invert = !invert;
36656
36657 if (invert)
36658 rs6000_isa_flags &= ~mask;
36659 else
36660 rs6000_isa_flags |= mask;
36661 }
36662 break;
36663 }
36664
36665 if (error_p && !not_valid_p)
36666 {
36667 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36668 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36669 {
36670 size_t j = rs6000_opt_vars[i].global_offset;
36671 *((int *) ((char *)&global_options + j)) = !invert;
36672 error_p = false;
36673 not_valid_p = false;
36674 break;
36675 }
36676 }
36677 }
36678
36679 if (error_p)
36680 {
36681 const char *eprefix, *esuffix;
36682
36683 ret = false;
36684 if (attr_p)
36685 {
36686 eprefix = "__attribute__((__target__(";
36687 esuffix = ")))";
36688 }
36689 else
36690 {
36691 eprefix = "#pragma GCC target ";
36692 esuffix = "";
36693 }
36694
36695 if (cpu_opt)
36696 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36697 q, esuffix);
36698 else if (not_valid_p)
36699 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36700 else
36701 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36702 }
36703 }
36704 }
36705
36706 else if (TREE_CODE (args) == TREE_LIST)
36707 {
36708 do
36709 {
36710 tree value = TREE_VALUE (args);
36711 if (value)
36712 {
36713 bool ret2 = rs6000_inner_target_options (value, attr_p);
36714 if (!ret2)
36715 ret = false;
36716 }
36717 args = TREE_CHAIN (args);
36718 }
36719 while (args != NULL_TREE);
36720 }
36721
36722 else
36723 {
36724 error ("attribute %<target%> argument not a string");
36725 return false;
36726 }
36727
36728 return ret;
36729 }
36730
36731 /* Print out the target options as a list for -mdebug=target. */
36732
36733 static void
36734 rs6000_debug_target_options (tree args, const char *prefix)
36735 {
36736 if (args == NULL_TREE)
36737 fprintf (stderr, "%s<NULL>", prefix);
36738
36739 else if (TREE_CODE (args) == STRING_CST)
36740 {
36741 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36742 char *q;
36743
36744 while ((q = strtok (p, ",")) != NULL)
36745 {
36746 p = NULL;
36747 fprintf (stderr, "%s\"%s\"", prefix, q);
36748 prefix = ", ";
36749 }
36750 }
36751
36752 else if (TREE_CODE (args) == TREE_LIST)
36753 {
36754 do
36755 {
36756 tree value = TREE_VALUE (args);
36757 if (value)
36758 {
36759 rs6000_debug_target_options (value, prefix);
36760 prefix = ", ";
36761 }
36762 args = TREE_CHAIN (args);
36763 }
36764 while (args != NULL_TREE);
36765 }
36766
36767 else
36768 gcc_unreachable ();
36769
36770 return;
36771 }
36772
36773 \f
36774 /* Hook to validate attribute((target("..."))). */
36775
36776 static bool
36777 rs6000_valid_attribute_p (tree fndecl,
36778 tree ARG_UNUSED (name),
36779 tree args,
36780 int flags)
36781 {
36782 struct cl_target_option cur_target;
36783 bool ret;
36784 tree old_optimize;
36785 tree new_target, new_optimize;
36786 tree func_optimize;
36787
36788 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36789
36790 if (TARGET_DEBUG_TARGET)
36791 {
36792 tree tname = DECL_NAME (fndecl);
36793 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36794 if (tname)
36795 fprintf (stderr, "function: %.*s\n",
36796 (int) IDENTIFIER_LENGTH (tname),
36797 IDENTIFIER_POINTER (tname));
36798 else
36799 fprintf (stderr, "function: unknown\n");
36800
36801 fprintf (stderr, "args:");
36802 rs6000_debug_target_options (args, " ");
36803 fprintf (stderr, "\n");
36804
36805 if (flags)
36806 fprintf (stderr, "flags: 0x%x\n", flags);
36807
36808 fprintf (stderr, "--------------------\n");
36809 }
36810
36811 /* attribute((target("default"))) does nothing, beyond
36812 affecting multi-versioning. */
36813 if (TREE_VALUE (args)
36814 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36815 && TREE_CHAIN (args) == NULL_TREE
36816 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36817 return true;
36818
36819 old_optimize = build_optimization_node (&global_options);
36820 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36821
36822 /* If the function changed the optimization levels as well as setting target
36823 options, start with the optimizations specified. */
36824 if (func_optimize && func_optimize != old_optimize)
36825 cl_optimization_restore (&global_options,
36826 TREE_OPTIMIZATION (func_optimize));
36827
36828 /* The target attributes may also change some optimization flags, so update
36829 the optimization options if necessary. */
36830 cl_target_option_save (&cur_target, &global_options);
36831 rs6000_cpu_index = rs6000_tune_index = -1;
36832 ret = rs6000_inner_target_options (args, true);
36833
36834 /* Set up any additional state. */
36835 if (ret)
36836 {
36837 ret = rs6000_option_override_internal (false);
36838 new_target = build_target_option_node (&global_options);
36839 }
36840 else
36841 new_target = NULL;
36842
36843 new_optimize = build_optimization_node (&global_options);
36844
36845 if (!new_target)
36846 ret = false;
36847
36848 else if (fndecl)
36849 {
36850 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36851
36852 if (old_optimize != new_optimize)
36853 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36854 }
36855
36856 cl_target_option_restore (&global_options, &cur_target);
36857
36858 if (old_optimize != new_optimize)
36859 cl_optimization_restore (&global_options,
36860 TREE_OPTIMIZATION (old_optimize));
36861
36862 return ret;
36863 }
36864
36865 \f
36866 /* Hook to validate the current #pragma GCC target and set the state, and
36867 update the macros based on what was changed. If ARGS is NULL, then
36868 POP_TARGET is used to reset the options. */
36869
36870 bool
36871 rs6000_pragma_target_parse (tree args, tree pop_target)
36872 {
36873 tree prev_tree = build_target_option_node (&global_options);
36874 tree cur_tree;
36875 struct cl_target_option *prev_opt, *cur_opt;
36876 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36877 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36878
36879 if (TARGET_DEBUG_TARGET)
36880 {
36881 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36882 fprintf (stderr, "args:");
36883 rs6000_debug_target_options (args, " ");
36884 fprintf (stderr, "\n");
36885
36886 if (pop_target)
36887 {
36888 fprintf (stderr, "pop_target:\n");
36889 debug_tree (pop_target);
36890 }
36891 else
36892 fprintf (stderr, "pop_target: <NULL>\n");
36893
36894 fprintf (stderr, "--------------------\n");
36895 }
36896
36897 if (! args)
36898 {
36899 cur_tree = ((pop_target)
36900 ? pop_target
36901 : target_option_default_node);
36902 cl_target_option_restore (&global_options,
36903 TREE_TARGET_OPTION (cur_tree));
36904 }
36905 else
36906 {
36907 rs6000_cpu_index = rs6000_tune_index = -1;
36908 if (!rs6000_inner_target_options (args, false)
36909 || !rs6000_option_override_internal (false)
36910 || (cur_tree = build_target_option_node (&global_options))
36911 == NULL_TREE)
36912 {
36913 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36914 fprintf (stderr, "invalid pragma\n");
36915
36916 return false;
36917 }
36918 }
36919
36920 target_option_current_node = cur_tree;
36921 rs6000_activate_target_options (target_option_current_node);
36922
36923 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36924 change the macros that are defined. */
36925 if (rs6000_target_modify_macros_ptr)
36926 {
36927 prev_opt = TREE_TARGET_OPTION (prev_tree);
36928 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36929 prev_flags = prev_opt->x_rs6000_isa_flags;
36930
36931 cur_opt = TREE_TARGET_OPTION (cur_tree);
36932 cur_flags = cur_opt->x_rs6000_isa_flags;
36933 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36934
36935 diff_bumask = (prev_bumask ^ cur_bumask);
36936 diff_flags = (prev_flags ^ cur_flags);
36937
36938 if ((diff_flags != 0) || (diff_bumask != 0))
36939 {
36940 /* Delete old macros. */
36941 rs6000_target_modify_macros_ptr (false,
36942 prev_flags & diff_flags,
36943 prev_bumask & diff_bumask);
36944
36945 /* Define new macros. */
36946 rs6000_target_modify_macros_ptr (true,
36947 cur_flags & diff_flags,
36948 cur_bumask & diff_bumask);
36949 }
36950 }
36951
36952 return true;
36953 }
36954
36955 \f
36956 /* Remember the last target of rs6000_set_current_function. */
36957 static GTY(()) tree rs6000_previous_fndecl;
36958
36959 /* Restore target's globals from NEW_TREE and invalidate the
36960 rs6000_previous_fndecl cache. */
36961
36962 void
36963 rs6000_activate_target_options (tree new_tree)
36964 {
36965 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36966 if (TREE_TARGET_GLOBALS (new_tree))
36967 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36968 else if (new_tree == target_option_default_node)
36969 restore_target_globals (&default_target_globals);
36970 else
36971 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36972 rs6000_previous_fndecl = NULL_TREE;
36973 }
36974
36975 /* Establish appropriate back-end context for processing the function
36976 FNDECL. The argument might be NULL to indicate processing at top
36977 level, outside of any function scope. */
36978 static void
36979 rs6000_set_current_function (tree fndecl)
36980 {
36981 if (TARGET_DEBUG_TARGET)
36982 {
36983 fprintf (stderr, "\n==================== rs6000_set_current_function");
36984
36985 if (fndecl)
36986 fprintf (stderr, ", fndecl %s (%p)",
36987 (DECL_NAME (fndecl)
36988 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36989 : "<unknown>"), (void *)fndecl);
36990
36991 if (rs6000_previous_fndecl)
36992 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36993
36994 fprintf (stderr, "\n");
36995 }
36996
36997 /* Only change the context if the function changes. This hook is called
36998 several times in the course of compiling a function, and we don't want to
36999 slow things down too much or call target_reinit when it isn't safe. */
37000 if (fndecl == rs6000_previous_fndecl)
37001 return;
37002
37003 tree old_tree;
37004 if (rs6000_previous_fndecl == NULL_TREE)
37005 old_tree = target_option_current_node;
37006 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
37007 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
37008 else
37009 old_tree = target_option_default_node;
37010
37011 tree new_tree;
37012 if (fndecl == NULL_TREE)
37013 {
37014 if (old_tree != target_option_current_node)
37015 new_tree = target_option_current_node;
37016 else
37017 new_tree = NULL_TREE;
37018 }
37019 else
37020 {
37021 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37022 if (new_tree == NULL_TREE)
37023 new_tree = target_option_default_node;
37024 }
37025
37026 if (TARGET_DEBUG_TARGET)
37027 {
37028 if (new_tree)
37029 {
37030 fprintf (stderr, "\nnew fndecl target specific options:\n");
37031 debug_tree (new_tree);
37032 }
37033
37034 if (old_tree)
37035 {
37036 fprintf (stderr, "\nold fndecl target specific options:\n");
37037 debug_tree (old_tree);
37038 }
37039
37040 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
37041 fprintf (stderr, "--------------------\n");
37042 }
37043
37044 if (new_tree && old_tree != new_tree)
37045 rs6000_activate_target_options (new_tree);
37046
37047 if (fndecl)
37048 rs6000_previous_fndecl = fndecl;
37049 }
37050
37051 \f
37052 /* Save the current options */
37053
37054 static void
37055 rs6000_function_specific_save (struct cl_target_option *ptr,
37056 struct gcc_options *opts)
37057 {
37058 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
37059 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
37060 }
37061
37062 /* Restore the current options */
37063
37064 static void
37065 rs6000_function_specific_restore (struct gcc_options *opts,
37066 struct cl_target_option *ptr)
37067
37068 {
37069 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
37070 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
37071 (void) rs6000_option_override_internal (false);
37072 }
37073
37074 /* Print the current options */
37075
37076 static void
37077 rs6000_function_specific_print (FILE *file, int indent,
37078 struct cl_target_option *ptr)
37079 {
37080 rs6000_print_isa_options (file, indent, "Isa options set",
37081 ptr->x_rs6000_isa_flags);
37082
37083 rs6000_print_isa_options (file, indent, "Isa options explicit",
37084 ptr->x_rs6000_isa_flags_explicit);
37085 }
37086
37087 /* Helper function to print the current isa or misc options on a line. */
37088
37089 static void
37090 rs6000_print_options_internal (FILE *file,
37091 int indent,
37092 const char *string,
37093 HOST_WIDE_INT flags,
37094 const char *prefix,
37095 const struct rs6000_opt_mask *opts,
37096 size_t num_elements)
37097 {
37098 size_t i;
37099 size_t start_column = 0;
37100 size_t cur_column;
37101 size_t max_column = 120;
37102 size_t prefix_len = strlen (prefix);
37103 size_t comma_len = 0;
37104 const char *comma = "";
37105
37106 if (indent)
37107 start_column += fprintf (file, "%*s", indent, "");
37108
37109 if (!flags)
37110 {
37111 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
37112 return;
37113 }
37114
37115 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
37116
37117 /* Print the various mask options. */
37118 cur_column = start_column;
37119 for (i = 0; i < num_elements; i++)
37120 {
37121 bool invert = opts[i].invert;
37122 const char *name = opts[i].name;
37123 const char *no_str = "";
37124 HOST_WIDE_INT mask = opts[i].mask;
37125 size_t len = comma_len + prefix_len + strlen (name);
37126
37127 if (!invert)
37128 {
37129 if ((flags & mask) == 0)
37130 {
37131 no_str = "no-";
37132 len += sizeof ("no-") - 1;
37133 }
37134
37135 flags &= ~mask;
37136 }
37137
37138 else
37139 {
37140 if ((flags & mask) != 0)
37141 {
37142 no_str = "no-";
37143 len += sizeof ("no-") - 1;
37144 }
37145
37146 flags |= mask;
37147 }
37148
37149 cur_column += len;
37150 if (cur_column > max_column)
37151 {
37152 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
37153 cur_column = start_column + len;
37154 comma = "";
37155 }
37156
37157 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
37158 comma = ", ";
37159 comma_len = sizeof (", ") - 1;
37160 }
37161
37162 fputs ("\n", file);
37163 }
37164
37165 /* Helper function to print the current isa options on a line. */
37166
37167 static void
37168 rs6000_print_isa_options (FILE *file, int indent, const char *string,
37169 HOST_WIDE_INT flags)
37170 {
37171 rs6000_print_options_internal (file, indent, string, flags, "-m",
37172 &rs6000_opt_masks[0],
37173 ARRAY_SIZE (rs6000_opt_masks));
37174 }
37175
37176 static void
37177 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37178 HOST_WIDE_INT flags)
37179 {
37180 rs6000_print_options_internal (file, indent, string, flags, "",
37181 &rs6000_builtin_mask_names[0],
37182 ARRAY_SIZE (rs6000_builtin_mask_names));
37183 }
37184
37185 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37186 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37187 -mupper-regs-df, etc.).
37188
37189 If the user used -mno-power8-vector, we need to turn off all of the implicit
37190 ISA 2.07 and 3.0 options that relate to the vector unit.
37191
37192 If the user used -mno-power9-vector, we need to turn off all of the implicit
37193 ISA 3.0 options that relate to the vector unit.
37194
37195 This function does not handle explicit options such as the user specifying
37196 -mdirect-move. These are handled in rs6000_option_override_internal, and
37197 the appropriate error is given if needed.
37198
37199 We return a mask of all of the implicit options that should not be enabled
37200 by default. */
37201
37202 static HOST_WIDE_INT
37203 rs6000_disable_incompatible_switches (void)
37204 {
37205 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37206 size_t i, j;
37207
37208 static const struct {
37209 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37210 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37211 const char *const name; /* name of the switch. */
37212 } flags[] = {
37213 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37214 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37215 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37216 };
37217
37218 for (i = 0; i < ARRAY_SIZE (flags); i++)
37219 {
37220 HOST_WIDE_INT no_flag = flags[i].no_flag;
37221
37222 if ((rs6000_isa_flags & no_flag) == 0
37223 && (rs6000_isa_flags_explicit & no_flag) != 0)
37224 {
37225 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37226 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37227 & rs6000_isa_flags
37228 & dep_flags);
37229
37230 if (set_flags)
37231 {
37232 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37233 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37234 {
37235 set_flags &= ~rs6000_opt_masks[j].mask;
37236 error ("%<-mno-%s%> turns off %<-m%s%>",
37237 flags[i].name,
37238 rs6000_opt_masks[j].name);
37239 }
37240
37241 gcc_assert (!set_flags);
37242 }
37243
37244 rs6000_isa_flags &= ~dep_flags;
37245 ignore_masks |= no_flag | dep_flags;
37246 }
37247 }
37248
37249 return ignore_masks;
37250 }
37251
37252 \f
37253 /* Helper function for printing the function name when debugging. */
37254
37255 static const char *
37256 get_decl_name (tree fn)
37257 {
37258 tree name;
37259
37260 if (!fn)
37261 return "<null>";
37262
37263 name = DECL_NAME (fn);
37264 if (!name)
37265 return "<no-name>";
37266
37267 return IDENTIFIER_POINTER (name);
37268 }
37269
37270 /* Return the clone id of the target we are compiling code for in a target
37271 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37272 the priority list for the target clones (ordered from lowest to
37273 highest). */
37274
37275 static int
37276 rs6000_clone_priority (tree fndecl)
37277 {
37278 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37279 HOST_WIDE_INT isa_masks;
37280 int ret = CLONE_DEFAULT;
37281 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37282 const char *attrs_str = NULL;
37283
37284 attrs = TREE_VALUE (TREE_VALUE (attrs));
37285 attrs_str = TREE_STRING_POINTER (attrs);
37286
37287 /* Return priority zero for default function. Return the ISA needed for the
37288 function if it is not the default. */
37289 if (strcmp (attrs_str, "default") != 0)
37290 {
37291 if (fn_opts == NULL_TREE)
37292 fn_opts = target_option_default_node;
37293
37294 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37295 isa_masks = rs6000_isa_flags;
37296 else
37297 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37298
37299 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37300 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37301 break;
37302 }
37303
37304 if (TARGET_DEBUG_TARGET)
37305 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37306 get_decl_name (fndecl), ret);
37307
37308 return ret;
37309 }
37310
37311 /* This compares the priority of target features in function DECL1 and DECL2.
37312 It returns positive value if DECL1 is higher priority, negative value if
37313 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37314 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37315
37316 static int
37317 rs6000_compare_version_priority (tree decl1, tree decl2)
37318 {
37319 int priority1 = rs6000_clone_priority (decl1);
37320 int priority2 = rs6000_clone_priority (decl2);
37321 int ret = priority1 - priority2;
37322
37323 if (TARGET_DEBUG_TARGET)
37324 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37325 get_decl_name (decl1), get_decl_name (decl2), ret);
37326
37327 return ret;
37328 }
37329
37330 /* Make a dispatcher declaration for the multi-versioned function DECL.
37331 Calls to DECL function will be replaced with calls to the dispatcher
37332 by the front-end. Returns the decl of the dispatcher function. */
37333
37334 static tree
37335 rs6000_get_function_versions_dispatcher (void *decl)
37336 {
37337 tree fn = (tree) decl;
37338 struct cgraph_node *node = NULL;
37339 struct cgraph_node *default_node = NULL;
37340 struct cgraph_function_version_info *node_v = NULL;
37341 struct cgraph_function_version_info *first_v = NULL;
37342
37343 tree dispatch_decl = NULL;
37344
37345 struct cgraph_function_version_info *default_version_info = NULL;
37346 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37347
37348 if (TARGET_DEBUG_TARGET)
37349 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37350 get_decl_name (fn));
37351
37352 node = cgraph_node::get (fn);
37353 gcc_assert (node != NULL);
37354
37355 node_v = node->function_version ();
37356 gcc_assert (node_v != NULL);
37357
37358 if (node_v->dispatcher_resolver != NULL)
37359 return node_v->dispatcher_resolver;
37360
37361 /* Find the default version and make it the first node. */
37362 first_v = node_v;
37363 /* Go to the beginning of the chain. */
37364 while (first_v->prev != NULL)
37365 first_v = first_v->prev;
37366
37367 default_version_info = first_v;
37368 while (default_version_info != NULL)
37369 {
37370 const tree decl2 = default_version_info->this_node->decl;
37371 if (is_function_default_version (decl2))
37372 break;
37373 default_version_info = default_version_info->next;
37374 }
37375
37376 /* If there is no default node, just return NULL. */
37377 if (default_version_info == NULL)
37378 return NULL;
37379
37380 /* Make default info the first node. */
37381 if (first_v != default_version_info)
37382 {
37383 default_version_info->prev->next = default_version_info->next;
37384 if (default_version_info->next)
37385 default_version_info->next->prev = default_version_info->prev;
37386 first_v->prev = default_version_info;
37387 default_version_info->next = first_v;
37388 default_version_info->prev = NULL;
37389 }
37390
37391 default_node = default_version_info->this_node;
37392
37393 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37394 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37395 "target_clones attribute needs GLIBC (2.23 and newer) that "
37396 "exports hardware capability bits");
37397 #else
37398
37399 if (targetm.has_ifunc_p ())
37400 {
37401 struct cgraph_function_version_info *it_v = NULL;
37402 struct cgraph_node *dispatcher_node = NULL;
37403 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37404
37405 /* Right now, the dispatching is done via ifunc. */
37406 dispatch_decl = make_dispatcher_decl (default_node->decl);
37407
37408 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37409 gcc_assert (dispatcher_node != NULL);
37410 dispatcher_node->dispatcher_function = 1;
37411 dispatcher_version_info
37412 = dispatcher_node->insert_new_function_version ();
37413 dispatcher_version_info->next = default_version_info;
37414 dispatcher_node->definition = 1;
37415
37416 /* Set the dispatcher for all the versions. */
37417 it_v = default_version_info;
37418 while (it_v != NULL)
37419 {
37420 it_v->dispatcher_resolver = dispatch_decl;
37421 it_v = it_v->next;
37422 }
37423 }
37424 else
37425 {
37426 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37427 "multiversioning needs ifunc which is not supported "
37428 "on this target");
37429 }
37430 #endif
37431
37432 return dispatch_decl;
37433 }
37434
37435 /* Make the resolver function decl to dispatch the versions of a multi-
37436 versioned function, DEFAULT_DECL. Create an empty basic block in the
37437 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37438 function. */
37439
37440 static tree
37441 make_resolver_func (const tree default_decl,
37442 const tree dispatch_decl,
37443 basic_block *empty_bb)
37444 {
37445 /* Make the resolver function static. The resolver function returns
37446 void *. */
37447 tree decl_name = clone_function_name (default_decl, "resolver");
37448 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37449 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37450 tree decl = build_fn_decl (resolver_name, type);
37451 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37452
37453 DECL_NAME (decl) = decl_name;
37454 TREE_USED (decl) = 1;
37455 DECL_ARTIFICIAL (decl) = 1;
37456 DECL_IGNORED_P (decl) = 0;
37457 TREE_PUBLIC (decl) = 0;
37458 DECL_UNINLINABLE (decl) = 1;
37459
37460 /* Resolver is not external, body is generated. */
37461 DECL_EXTERNAL (decl) = 0;
37462 DECL_EXTERNAL (dispatch_decl) = 0;
37463
37464 DECL_CONTEXT (decl) = NULL_TREE;
37465 DECL_INITIAL (decl) = make_node (BLOCK);
37466 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37467
37468 /* Build result decl and add to function_decl. */
37469 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37470 DECL_CONTEXT (t) = decl;
37471 DECL_ARTIFICIAL (t) = 1;
37472 DECL_IGNORED_P (t) = 1;
37473 DECL_RESULT (decl) = t;
37474
37475 gimplify_function_tree (decl);
37476 push_cfun (DECL_STRUCT_FUNCTION (decl));
37477 *empty_bb = init_lowered_empty_function (decl, false,
37478 profile_count::uninitialized ());
37479
37480 cgraph_node::add_new_function (decl, true);
37481 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37482
37483 pop_cfun ();
37484
37485 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37486 DECL_ATTRIBUTES (dispatch_decl)
37487 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37488
37489 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37490
37491 return decl;
37492 }
37493
37494 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37495 return a pointer to VERSION_DECL if we are running on a machine that
37496 supports the index CLONE_ISA hardware architecture bits. This function will
37497 be called during version dispatch to decide which function version to
37498 execute. It returns the basic block at the end, to which more conditions
37499 can be added. */
37500
37501 static basic_block
37502 add_condition_to_bb (tree function_decl, tree version_decl,
37503 int clone_isa, basic_block new_bb)
37504 {
37505 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37506
37507 gcc_assert (new_bb != NULL);
37508 gimple_seq gseq = bb_seq (new_bb);
37509
37510
37511 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37512 build_fold_addr_expr (version_decl));
37513 tree result_var = create_tmp_var (ptr_type_node);
37514 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37515 gimple *return_stmt = gimple_build_return (result_var);
37516
37517 if (clone_isa == CLONE_DEFAULT)
37518 {
37519 gimple_seq_add_stmt (&gseq, convert_stmt);
37520 gimple_seq_add_stmt (&gseq, return_stmt);
37521 set_bb_seq (new_bb, gseq);
37522 gimple_set_bb (convert_stmt, new_bb);
37523 gimple_set_bb (return_stmt, new_bb);
37524 pop_cfun ();
37525 return new_bb;
37526 }
37527
37528 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37529 tree cond_var = create_tmp_var (bool_int_type_node);
37530 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37531 const char *arg_str = rs6000_clone_map[clone_isa].name;
37532 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37533 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37534 gimple_call_set_lhs (call_cond_stmt, cond_var);
37535
37536 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37537 gimple_set_bb (call_cond_stmt, new_bb);
37538 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37539
37540 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37541 NULL_TREE, NULL_TREE);
37542 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37543 gimple_set_bb (if_else_stmt, new_bb);
37544 gimple_seq_add_stmt (&gseq, if_else_stmt);
37545
37546 gimple_seq_add_stmt (&gseq, convert_stmt);
37547 gimple_seq_add_stmt (&gseq, return_stmt);
37548 set_bb_seq (new_bb, gseq);
37549
37550 basic_block bb1 = new_bb;
37551 edge e12 = split_block (bb1, if_else_stmt);
37552 basic_block bb2 = e12->dest;
37553 e12->flags &= ~EDGE_FALLTHRU;
37554 e12->flags |= EDGE_TRUE_VALUE;
37555
37556 edge e23 = split_block (bb2, return_stmt);
37557 gimple_set_bb (convert_stmt, bb2);
37558 gimple_set_bb (return_stmt, bb2);
37559
37560 basic_block bb3 = e23->dest;
37561 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37562
37563 remove_edge (e23);
37564 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37565
37566 pop_cfun ();
37567 return bb3;
37568 }
37569
37570 /* This function generates the dispatch function for multi-versioned functions.
37571 DISPATCH_DECL is the function which will contain the dispatch logic.
37572 FNDECLS are the function choices for dispatch, and is a tree chain.
37573 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37574 code is generated. */
37575
37576 static int
37577 dispatch_function_versions (tree dispatch_decl,
37578 void *fndecls_p,
37579 basic_block *empty_bb)
37580 {
37581 int ix;
37582 tree ele;
37583 vec<tree> *fndecls;
37584 tree clones[CLONE_MAX];
37585
37586 if (TARGET_DEBUG_TARGET)
37587 fputs ("dispatch_function_versions, top\n", stderr);
37588
37589 gcc_assert (dispatch_decl != NULL
37590 && fndecls_p != NULL
37591 && empty_bb != NULL);
37592
37593 /* fndecls_p is actually a vector. */
37594 fndecls = static_cast<vec<tree> *> (fndecls_p);
37595
37596 /* At least one more version other than the default. */
37597 gcc_assert (fndecls->length () >= 2);
37598
37599 /* The first version in the vector is the default decl. */
37600 memset ((void *) clones, '\0', sizeof (clones));
37601 clones[CLONE_DEFAULT] = (*fndecls)[0];
37602
37603 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37604 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37605 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37606 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37607 to insert the code here to do the call. */
37608
37609 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37610 {
37611 int priority = rs6000_clone_priority (ele);
37612 if (!clones[priority])
37613 clones[priority] = ele;
37614 }
37615
37616 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37617 if (clones[ix])
37618 {
37619 if (TARGET_DEBUG_TARGET)
37620 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37621 ix, get_decl_name (clones[ix]));
37622
37623 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37624 *empty_bb);
37625 }
37626
37627 return 0;
37628 }
37629
37630 /* Generate the dispatching code body to dispatch multi-versioned function
37631 DECL. The target hook is called to process the "target" attributes and
37632 provide the code to dispatch the right function at run-time. NODE points
37633 to the dispatcher decl whose body will be created. */
37634
37635 static tree
37636 rs6000_generate_version_dispatcher_body (void *node_p)
37637 {
37638 tree resolver;
37639 basic_block empty_bb;
37640 struct cgraph_node *node = (cgraph_node *) node_p;
37641 struct cgraph_function_version_info *ninfo = node->function_version ();
37642
37643 if (ninfo->dispatcher_resolver)
37644 return ninfo->dispatcher_resolver;
37645
37646 /* node is going to be an alias, so remove the finalized bit. */
37647 node->definition = false;
37648
37649 /* The first version in the chain corresponds to the default version. */
37650 ninfo->dispatcher_resolver = resolver
37651 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37652
37653 if (TARGET_DEBUG_TARGET)
37654 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37655 get_decl_name (resolver));
37656
37657 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37658 auto_vec<tree, 2> fn_ver_vec;
37659
37660 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37661 vinfo;
37662 vinfo = vinfo->next)
37663 {
37664 struct cgraph_node *version = vinfo->this_node;
37665 /* Check for virtual functions here again, as by this time it should
37666 have been determined if this function needs a vtable index or
37667 not. This happens for methods in derived classes that override
37668 virtual methods in base classes but are not explicitly marked as
37669 virtual. */
37670 if (DECL_VINDEX (version->decl))
37671 sorry ("Virtual function multiversioning not supported");
37672
37673 fn_ver_vec.safe_push (version->decl);
37674 }
37675
37676 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37677 cgraph_edge::rebuild_edges ();
37678 pop_cfun ();
37679 return resolver;
37680 }
37681
37682 \f
37683 /* Hook to determine if one function can safely inline another. */
37684
37685 static bool
37686 rs6000_can_inline_p (tree caller, tree callee)
37687 {
37688 bool ret = false;
37689 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37690 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37691
37692 /* If callee has no option attributes, then it is ok to inline. */
37693 if (!callee_tree)
37694 ret = true;
37695
37696 /* If caller has no option attributes, but callee does then it is not ok to
37697 inline. */
37698 else if (!caller_tree)
37699 ret = false;
37700
37701 else
37702 {
37703 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37704 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37705
37706 /* Callee's options should a subset of the caller's, i.e. a vsx function
37707 can inline an altivec function but a non-vsx function can't inline a
37708 vsx function. */
37709 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37710 == callee_opts->x_rs6000_isa_flags)
37711 ret = true;
37712 }
37713
37714 if (TARGET_DEBUG_TARGET)
37715 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37716 get_decl_name (caller), get_decl_name (callee),
37717 (ret ? "can" : "cannot"));
37718
37719 return ret;
37720 }
37721 \f
37722 /* Allocate a stack temp and fixup the address so it meets the particular
37723 memory requirements (either offetable or REG+REG addressing). */
37724
37725 rtx
37726 rs6000_allocate_stack_temp (machine_mode mode,
37727 bool offsettable_p,
37728 bool reg_reg_p)
37729 {
37730 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37731 rtx addr = XEXP (stack, 0);
37732 int strict_p = reload_completed;
37733
37734 if (!legitimate_indirect_address_p (addr, strict_p))
37735 {
37736 if (offsettable_p
37737 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37738 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37739
37740 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37741 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37742 }
37743
37744 return stack;
37745 }
37746
37747 /* Given a memory reference, if it is not a reg or reg+reg addressing,
37748 convert to such a form to deal with memory reference instructions
37749 like STFIWX and LDBRX that only take reg+reg addressing. */
37750
37751 rtx
37752 rs6000_force_indexed_or_indirect_mem (rtx x)
37753 {
37754 machine_mode mode = GET_MODE (x);
37755
37756 gcc_assert (MEM_P (x));
37757 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
37758 {
37759 rtx addr = XEXP (x, 0);
37760 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37761 {
37762 rtx reg = XEXP (addr, 0);
37763 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37764 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37765 gcc_assert (REG_P (reg));
37766 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37767 addr = reg;
37768 }
37769 else if (GET_CODE (addr) == PRE_MODIFY)
37770 {
37771 rtx reg = XEXP (addr, 0);
37772 rtx expr = XEXP (addr, 1);
37773 gcc_assert (REG_P (reg));
37774 gcc_assert (GET_CODE (expr) == PLUS);
37775 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37776 addr = reg;
37777 }
37778
37779 x = replace_equiv_address (x, force_reg (Pmode, addr));
37780 }
37781
37782 return x;
37783 }
37784
37785 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37786
37787 On the RS/6000, all integer constants are acceptable, most won't be valid
37788 for particular insns, though. Only easy FP constants are acceptable. */
37789
37790 static bool
37791 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37792 {
37793 if (TARGET_ELF && tls_referenced_p (x))
37794 return false;
37795
37796 if (CONST_DOUBLE_P (x))
37797 return easy_fp_constant (x, mode);
37798
37799 if (GET_CODE (x) == CONST_VECTOR)
37800 return easy_vector_constant (x, mode);
37801
37802 return true;
37803 }
37804
37805 \f
37806 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37807
37808 static bool
37809 chain_already_loaded (rtx_insn *last)
37810 {
37811 for (; last != NULL; last = PREV_INSN (last))
37812 {
37813 if (NONJUMP_INSN_P (last))
37814 {
37815 rtx patt = PATTERN (last);
37816
37817 if (GET_CODE (patt) == SET)
37818 {
37819 rtx lhs = XEXP (patt, 0);
37820
37821 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37822 return true;
37823 }
37824 }
37825 }
37826 return false;
37827 }
37828
37829 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37830
37831 void
37832 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37833 {
37834 rtx func = func_desc;
37835 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37836 rtx toc_load = NULL_RTX;
37837 rtx toc_restore = NULL_RTX;
37838 rtx func_addr;
37839 rtx abi_reg = NULL_RTX;
37840 rtx call[4];
37841 int n_call;
37842 rtx insn;
37843 bool is_pltseq_longcall;
37844
37845 if (global_tlsarg)
37846 tlsarg = global_tlsarg;
37847
37848 /* Handle longcall attributes. */
37849 is_pltseq_longcall = false;
37850 if ((INTVAL (cookie) & CALL_LONG) != 0
37851 && GET_CODE (func_desc) == SYMBOL_REF)
37852 {
37853 func = rs6000_longcall_ref (func_desc, tlsarg);
37854 if (TARGET_PLTSEQ)
37855 is_pltseq_longcall = true;
37856 }
37857
37858 /* Handle indirect calls. */
37859 if (!SYMBOL_REF_P (func)
37860 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
37861 {
37862 /* Save the TOC into its reserved slot before the call,
37863 and prepare to restore it after the call. */
37864 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37865 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37866 gen_rtvec (1, stack_toc_offset),
37867 UNSPEC_TOCSLOT);
37868 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37869
37870 /* Can we optimize saving the TOC in the prologue or
37871 do we need to do it at every call? */
37872 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37873 cfun->machine->save_toc_in_prologue = true;
37874 else
37875 {
37876 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37877 rtx stack_toc_mem = gen_frame_mem (Pmode,
37878 gen_rtx_PLUS (Pmode, stack_ptr,
37879 stack_toc_offset));
37880 MEM_VOLATILE_P (stack_toc_mem) = 1;
37881 if (is_pltseq_longcall)
37882 {
37883 /* Use USPEC_PLTSEQ here to emit every instruction in an
37884 inline PLT call sequence with a reloc, enabling the
37885 linker to edit the sequence back to a direct call
37886 when that makes sense. */
37887 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
37888 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37889 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
37890 }
37891 else
37892 emit_move_insn (stack_toc_mem, toc_reg);
37893 }
37894
37895 if (DEFAULT_ABI == ABI_ELFv2)
37896 {
37897 /* A function pointer in the ELFv2 ABI is just a plain address, but
37898 the ABI requires it to be loaded into r12 before the call. */
37899 func_addr = gen_rtx_REG (Pmode, 12);
37900 if (!rtx_equal_p (func_addr, func))
37901 emit_move_insn (func_addr, func);
37902 abi_reg = func_addr;
37903 /* Indirect calls via CTR are strongly preferred over indirect
37904 calls via LR, so move the address there. Needed to mark
37905 this insn for linker plt sequence editing too. */
37906 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37907 if (is_pltseq_longcall)
37908 {
37909 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
37910 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37911 emit_insn (gen_rtx_SET (func_addr, mark_func));
37912 v = gen_rtvec (2, func_addr, func_desc);
37913 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37914 }
37915 else
37916 emit_move_insn (func_addr, abi_reg);
37917 }
37918 else
37919 {
37920 /* A function pointer under AIX is a pointer to a data area whose
37921 first word contains the actual address of the function, whose
37922 second word contains a pointer to its TOC, and whose third word
37923 contains a value to place in the static chain register (r11).
37924 Note that if we load the static chain, our "trampoline" need
37925 not have any executable code. */
37926
37927 /* Load up address of the actual function. */
37928 func = force_reg (Pmode, func);
37929 func_addr = gen_reg_rtx (Pmode);
37930 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
37931
37932 /* Indirect calls via CTR are strongly preferred over indirect
37933 calls via LR, so move the address there. */
37934 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
37935 emit_move_insn (ctr_reg, func_addr);
37936 func_addr = ctr_reg;
37937
37938 /* Prepare to load the TOC of the called function. Note that the
37939 TOC load must happen immediately before the actual call so
37940 that unwinding the TOC registers works correctly. See the
37941 comment in frob_update_context. */
37942 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37943 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37944 gen_rtx_PLUS (Pmode, func,
37945 func_toc_offset));
37946 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37947
37948 /* If we have a static chain, load it up. But, if the call was
37949 originally direct, the 3rd word has not been written since no
37950 trampoline has been built, so we ought not to load it, lest we
37951 override a static chain value. */
37952 if (!(GET_CODE (func_desc) == SYMBOL_REF
37953 && SYMBOL_REF_FUNCTION_P (func_desc))
37954 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37955 && !chain_already_loaded (get_current_sequence ()->next->last))
37956 {
37957 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37958 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37959 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37960 gen_rtx_PLUS (Pmode, func,
37961 func_sc_offset));
37962 emit_move_insn (sc_reg, func_sc_mem);
37963 abi_reg = sc_reg;
37964 }
37965 }
37966 }
37967 else
37968 {
37969 /* Direct calls use the TOC: for local calls, the callee will
37970 assume the TOC register is set; for non-local calls, the
37971 PLT stub needs the TOC register. */
37972 abi_reg = toc_reg;
37973 func_addr = func;
37974 }
37975
37976 /* Create the call. */
37977 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37978 if (value != NULL_RTX)
37979 call[0] = gen_rtx_SET (value, call[0]);
37980 n_call = 1;
37981
37982 if (toc_load)
37983 call[n_call++] = toc_load;
37984 if (toc_restore)
37985 call[n_call++] = toc_restore;
37986
37987 call[n_call++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37988
37989 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37990 insn = emit_call_insn (insn);
37991
37992 /* Mention all registers defined by the ABI to hold information
37993 as uses in CALL_INSN_FUNCTION_USAGE. */
37994 if (abi_reg)
37995 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37996 }
37997
37998 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37999
38000 void
38001 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
38002 {
38003 rtx call[2];
38004 rtx insn;
38005
38006 gcc_assert (INTVAL (cookie) == 0);
38007
38008 if (global_tlsarg)
38009 tlsarg = global_tlsarg;
38010
38011 /* Create the call. */
38012 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
38013 if (value != NULL_RTX)
38014 call[0] = gen_rtx_SET (value, call[0]);
38015
38016 call[1] = simple_return_rtx;
38017
38018 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
38019 insn = emit_call_insn (insn);
38020
38021 /* Note use of the TOC register. */
38022 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
38023 }
38024
38025 /* Expand code to perform a call under the SYSV4 ABI. */
38026
38027 void
38028 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
38029 {
38030 rtx func = func_desc;
38031 rtx func_addr;
38032 rtx call[4];
38033 rtx insn;
38034 rtx abi_reg = NULL_RTX;
38035 int n;
38036
38037 if (global_tlsarg)
38038 tlsarg = global_tlsarg;
38039
38040 /* Handle longcall attributes. */
38041 if ((INTVAL (cookie) & CALL_LONG) != 0
38042 && GET_CODE (func_desc) == SYMBOL_REF)
38043 {
38044 func = rs6000_longcall_ref (func_desc, tlsarg);
38045 /* If the longcall was implemented as an inline PLT call using
38046 PLT unspecs then func will be REG:r11. If not, func will be
38047 a pseudo reg. The inline PLT call sequence supports lazy
38048 linking (and longcalls to functions in dlopen'd libraries).
38049 The other style of longcalls don't. The lazy linking entry
38050 to the dynamic symbol resolver requires r11 be the function
38051 address (as it is for linker generated PLT stubs). Ensure
38052 r11 stays valid to the bctrl by marking r11 used by the call. */
38053 if (TARGET_PLTSEQ)
38054 abi_reg = func;
38055 }
38056
38057 /* Handle indirect calls. */
38058 if (GET_CODE (func) != SYMBOL_REF)
38059 {
38060 func = force_reg (Pmode, func);
38061
38062 /* Indirect calls via CTR are strongly preferred over indirect
38063 calls via LR, so move the address there. That can't be left
38064 to reload because we want to mark every instruction in an
38065 inline PLT call sequence with a reloc, enabling the linker to
38066 edit the sequence back to a direct call when that makes sense. */
38067 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38068 if (abi_reg)
38069 {
38070 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
38071 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38072 emit_insn (gen_rtx_SET (func_addr, mark_func));
38073 v = gen_rtvec (2, func_addr, func_desc);
38074 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38075 }
38076 else
38077 emit_move_insn (func_addr, func);
38078 }
38079 else
38080 func_addr = func;
38081
38082 /* Create the call. */
38083 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38084 if (value != NULL_RTX)
38085 call[0] = gen_rtx_SET (value, call[0]);
38086
38087 call[1] = gen_rtx_USE (VOIDmode, cookie);
38088 n = 2;
38089 if (TARGET_SECURE_PLT
38090 && flag_pic
38091 && GET_CODE (func_addr) == SYMBOL_REF
38092 && !SYMBOL_REF_LOCAL_P (func_addr))
38093 call[n++] = gen_rtx_USE (VOIDmode, pic_offset_table_rtx);
38094
38095 call[n++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38096
38097 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n, call));
38098 insn = emit_call_insn (insn);
38099 if (abi_reg)
38100 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38101 }
38102
38103 /* Expand code to perform a sibling call under the SysV4 ABI. */
38104
38105 void
38106 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
38107 {
38108 rtx func = func_desc;
38109 rtx func_addr;
38110 rtx call[3];
38111 rtx insn;
38112 rtx abi_reg = NULL_RTX;
38113
38114 if (global_tlsarg)
38115 tlsarg = global_tlsarg;
38116
38117 /* Handle longcall attributes. */
38118 if ((INTVAL (cookie) & CALL_LONG) != 0
38119 && GET_CODE (func_desc) == SYMBOL_REF)
38120 {
38121 func = rs6000_longcall_ref (func_desc, tlsarg);
38122 /* If the longcall was implemented as an inline PLT call using
38123 PLT unspecs then func will be REG:r11. If not, func will be
38124 a pseudo reg. The inline PLT call sequence supports lazy
38125 linking (and longcalls to functions in dlopen'd libraries).
38126 The other style of longcalls don't. The lazy linking entry
38127 to the dynamic symbol resolver requires r11 be the function
38128 address (as it is for linker generated PLT stubs). Ensure
38129 r11 stays valid to the bctr by marking r11 used by the call. */
38130 if (TARGET_PLTSEQ)
38131 abi_reg = func;
38132 }
38133
38134 /* Handle indirect calls. */
38135 if (GET_CODE (func) != SYMBOL_REF)
38136 {
38137 func = force_reg (Pmode, func);
38138
38139 /* Indirect sibcalls must go via CTR. That can't be left to
38140 reload because we want to mark every instruction in an inline
38141 PLT call sequence with a reloc, enabling the linker to edit
38142 the sequence back to a direct call when that makes sense. */
38143 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38144 if (abi_reg)
38145 {
38146 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
38147 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38148 emit_insn (gen_rtx_SET (func_addr, mark_func));
38149 v = gen_rtvec (2, func_addr, func_desc);
38150 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
38151 }
38152 else
38153 emit_move_insn (func_addr, func);
38154 }
38155 else
38156 func_addr = func;
38157
38158 /* Create the call. */
38159 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38160 if (value != NULL_RTX)
38161 call[0] = gen_rtx_SET (value, call[0]);
38162
38163 call[1] = gen_rtx_USE (VOIDmode, cookie);
38164 call[2] = simple_return_rtx;
38165
38166 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38167 insn = emit_call_insn (insn);
38168 if (abi_reg)
38169 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38170 }
38171
38172 #if TARGET_MACHO
38173
38174 /* Expand code to perform a call under the Darwin ABI.
38175 Modulo handling of mlongcall, this is much the same as sysv.
38176 if/when the longcall optimisation is removed, we could drop this
38177 code and use the sysv case (taking care to avoid the tls stuff).
38178
38179 We can use this for sibcalls too, if needed. */
38180
38181 void
38182 rs6000_call_darwin_1 (rtx value, rtx func_desc, rtx tlsarg,
38183 rtx cookie, bool sibcall)
38184 {
38185 rtx func = func_desc;
38186 rtx func_addr;
38187 rtx call[3];
38188 rtx insn;
38189 int cookie_val = INTVAL (cookie);
38190 bool make_island = false;
38191
38192 /* Handle longcall attributes, there are two cases for Darwin:
38193 1) Newer linkers are capable of synthesising any branch islands needed.
38194 2) We need a helper branch island synthesised by the compiler.
38195 The second case has mostly been retired and we don't use it for m64.
38196 In fact, it's is an optimisation, we could just indirect as sysv does..
38197 ... however, backwards compatibility for now.
38198 If we're going to use this, then we need to keep the CALL_LONG bit set,
38199 so that we can pick up the special insn form later. */
38200 if ((cookie_val & CALL_LONG) != 0
38201 && GET_CODE (func_desc) == SYMBOL_REF)
38202 {
38203 if (darwin_emit_branch_islands && TARGET_32BIT)
38204 make_island = true; /* Do nothing yet, retain the CALL_LONG flag. */
38205 else
38206 {
38207 /* The linker is capable of doing this, but the user explicitly
38208 asked for -mlongcall, so we'll do the 'normal' version. */
38209 func = rs6000_longcall_ref (func_desc, NULL_RTX);
38210 cookie_val &= ~CALL_LONG; /* Handled, zap it. */
38211 }
38212 }
38213
38214 /* Handle indirect calls. */
38215 if (GET_CODE (func) != SYMBOL_REF)
38216 {
38217 func = force_reg (Pmode, func);
38218
38219 /* Indirect calls via CTR are strongly preferred over indirect
38220 calls via LR, and are required for indirect sibcalls, so move
38221 the address there. */
38222 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38223 emit_move_insn (func_addr, func);
38224 }
38225 else
38226 func_addr = func;
38227
38228 /* Create the call. */
38229 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38230 if (value != NULL_RTX)
38231 call[0] = gen_rtx_SET (value, call[0]);
38232
38233 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (cookie_val));
38234
38235 if (sibcall)
38236 call[2] = simple_return_rtx;
38237 else
38238 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38239
38240 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38241 insn = emit_call_insn (insn);
38242 /* Now we have the debug info in the insn, we can set up the branch island
38243 if we're using one. */
38244 if (make_island)
38245 {
38246 tree funname = get_identifier (XSTR (func_desc, 0));
38247
38248 if (no_previous_def (funname))
38249 {
38250 rtx label_rtx = gen_label_rtx ();
38251 char *label_buf, temp_buf[256];
38252 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
38253 CODE_LABEL_NUMBER (label_rtx));
38254 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
38255 tree labelname = get_identifier (label_buf);
38256 add_compiler_branch_island (labelname, funname,
38257 insn_line ((const rtx_insn*)insn));
38258 }
38259 }
38260 }
38261 #endif
38262
38263 void
38264 rs6000_call_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38265 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38266 {
38267 #if TARGET_MACHO
38268 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, false);
38269 #else
38270 gcc_unreachable();
38271 #endif
38272 }
38273
38274
38275 void
38276 rs6000_sibcall_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38277 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38278 {
38279 #if TARGET_MACHO
38280 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, true);
38281 #else
38282 gcc_unreachable();
38283 #endif
38284 }
38285
38286
38287 /* Return whether we need to always update the saved TOC pointer when we update
38288 the stack pointer. */
38289
38290 static bool
38291 rs6000_save_toc_in_prologue_p (void)
38292 {
38293 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38294 }
38295
38296 #ifdef HAVE_GAS_HIDDEN
38297 # define USE_HIDDEN_LINKONCE 1
38298 #else
38299 # define USE_HIDDEN_LINKONCE 0
38300 #endif
38301
38302 /* Fills in the label name that should be used for a 476 link stack thunk. */
38303
38304 void
38305 get_ppc476_thunk_name (char name[32])
38306 {
38307 gcc_assert (TARGET_LINK_STACK);
38308
38309 if (USE_HIDDEN_LINKONCE)
38310 sprintf (name, "__ppc476.get_thunk");
38311 else
38312 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38313 }
38314
38315 /* This function emits the simple thunk routine that is used to preserve
38316 the link stack on the 476 cpu. */
38317
38318 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38319 static void
38320 rs6000_code_end (void)
38321 {
38322 char name[32];
38323 tree decl;
38324
38325 if (!TARGET_LINK_STACK)
38326 return;
38327
38328 get_ppc476_thunk_name (name);
38329
38330 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38331 build_function_type_list (void_type_node, NULL_TREE));
38332 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38333 NULL_TREE, void_type_node);
38334 TREE_PUBLIC (decl) = 1;
38335 TREE_STATIC (decl) = 1;
38336
38337 #if RS6000_WEAK
38338 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38339 {
38340 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38341 targetm.asm_out.unique_section (decl, 0);
38342 switch_to_section (get_named_section (decl, NULL, 0));
38343 DECL_WEAK (decl) = 1;
38344 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38345 targetm.asm_out.globalize_label (asm_out_file, name);
38346 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38347 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38348 }
38349 else
38350 #endif
38351 {
38352 switch_to_section (text_section);
38353 ASM_OUTPUT_LABEL (asm_out_file, name);
38354 }
38355
38356 DECL_INITIAL (decl) = make_node (BLOCK);
38357 current_function_decl = decl;
38358 allocate_struct_function (decl, false);
38359 init_function_start (decl);
38360 first_function_block_is_cold = false;
38361 /* Make sure unwind info is emitted for the thunk if needed. */
38362 final_start_function (emit_barrier (), asm_out_file, 1);
38363
38364 fputs ("\tblr\n", asm_out_file);
38365
38366 final_end_function ();
38367 init_insn_lengths ();
38368 free_after_compilation (cfun);
38369 set_cfun (NULL);
38370 current_function_decl = NULL;
38371 }
38372
38373 /* Add r30 to hard reg set if the prologue sets it up and it is not
38374 pic_offset_table_rtx. */
38375
38376 static void
38377 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38378 {
38379 if (!TARGET_SINGLE_PIC_BASE
38380 && TARGET_TOC
38381 && TARGET_MINIMAL_TOC
38382 && !constant_pool_empty_p ())
38383 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38384 if (cfun->machine->split_stack_argp_used)
38385 add_to_hard_reg_set (&set->set, Pmode, 12);
38386
38387 /* Make sure the hard reg set doesn't include r2, which was possibly added
38388 via PIC_OFFSET_TABLE_REGNUM. */
38389 if (TARGET_TOC)
38390 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38391 }
38392
38393 \f
38394 /* Helper function for rs6000_split_logical to emit a logical instruction after
38395 spliting the operation to single GPR registers.
38396
38397 DEST is the destination register.
38398 OP1 and OP2 are the input source registers.
38399 CODE is the base operation (AND, IOR, XOR, NOT).
38400 MODE is the machine mode.
38401 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38402 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38403 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38404
38405 static void
38406 rs6000_split_logical_inner (rtx dest,
38407 rtx op1,
38408 rtx op2,
38409 enum rtx_code code,
38410 machine_mode mode,
38411 bool complement_final_p,
38412 bool complement_op1_p,
38413 bool complement_op2_p)
38414 {
38415 rtx bool_rtx;
38416
38417 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38418 if (op2 && CONST_INT_P (op2)
38419 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38420 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38421 {
38422 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38423 HOST_WIDE_INT value = INTVAL (op2) & mask;
38424
38425 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38426 if (code == AND)
38427 {
38428 if (value == 0)
38429 {
38430 emit_insn (gen_rtx_SET (dest, const0_rtx));
38431 return;
38432 }
38433
38434 else if (value == mask)
38435 {
38436 if (!rtx_equal_p (dest, op1))
38437 emit_insn (gen_rtx_SET (dest, op1));
38438 return;
38439 }
38440 }
38441
38442 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38443 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38444 else if (code == IOR || code == XOR)
38445 {
38446 if (value == 0)
38447 {
38448 if (!rtx_equal_p (dest, op1))
38449 emit_insn (gen_rtx_SET (dest, op1));
38450 return;
38451 }
38452 }
38453 }
38454
38455 if (code == AND && mode == SImode
38456 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38457 {
38458 emit_insn (gen_andsi3 (dest, op1, op2));
38459 return;
38460 }
38461
38462 if (complement_op1_p)
38463 op1 = gen_rtx_NOT (mode, op1);
38464
38465 if (complement_op2_p)
38466 op2 = gen_rtx_NOT (mode, op2);
38467
38468 /* For canonical RTL, if only one arm is inverted it is the first. */
38469 if (!complement_op1_p && complement_op2_p)
38470 std::swap (op1, op2);
38471
38472 bool_rtx = ((code == NOT)
38473 ? gen_rtx_NOT (mode, op1)
38474 : gen_rtx_fmt_ee (code, mode, op1, op2));
38475
38476 if (complement_final_p)
38477 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38478
38479 emit_insn (gen_rtx_SET (dest, bool_rtx));
38480 }
38481
38482 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38483 operations are split immediately during RTL generation to allow for more
38484 optimizations of the AND/IOR/XOR.
38485
38486 OPERANDS is an array containing the destination and two input operands.
38487 CODE is the base operation (AND, IOR, XOR, NOT).
38488 MODE is the machine mode.
38489 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38490 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38491 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38492 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38493 formation of the AND instructions. */
38494
38495 static void
38496 rs6000_split_logical_di (rtx operands[3],
38497 enum rtx_code code,
38498 bool complement_final_p,
38499 bool complement_op1_p,
38500 bool complement_op2_p)
38501 {
38502 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38503 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38504 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38505 enum hi_lo { hi = 0, lo = 1 };
38506 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38507 size_t i;
38508
38509 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38510 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38511 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38512 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38513
38514 if (code == NOT)
38515 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38516 else
38517 {
38518 if (!CONST_INT_P (operands[2]))
38519 {
38520 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38521 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38522 }
38523 else
38524 {
38525 HOST_WIDE_INT value = INTVAL (operands[2]);
38526 HOST_WIDE_INT value_hi_lo[2];
38527
38528 gcc_assert (!complement_final_p);
38529 gcc_assert (!complement_op1_p);
38530 gcc_assert (!complement_op2_p);
38531
38532 value_hi_lo[hi] = value >> 32;
38533 value_hi_lo[lo] = value & lower_32bits;
38534
38535 for (i = 0; i < 2; i++)
38536 {
38537 HOST_WIDE_INT sub_value = value_hi_lo[i];
38538
38539 if (sub_value & sign_bit)
38540 sub_value |= upper_32bits;
38541
38542 op2_hi_lo[i] = GEN_INT (sub_value);
38543
38544 /* If this is an AND instruction, check to see if we need to load
38545 the value in a register. */
38546 if (code == AND && sub_value != -1 && sub_value != 0
38547 && !and_operand (op2_hi_lo[i], SImode))
38548 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38549 }
38550 }
38551 }
38552
38553 for (i = 0; i < 2; i++)
38554 {
38555 /* Split large IOR/XOR operations. */
38556 if ((code == IOR || code == XOR)
38557 && CONST_INT_P (op2_hi_lo[i])
38558 && !complement_final_p
38559 && !complement_op1_p
38560 && !complement_op2_p
38561 && !logical_const_operand (op2_hi_lo[i], SImode))
38562 {
38563 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38564 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38565 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38566 rtx tmp = gen_reg_rtx (SImode);
38567
38568 /* Make sure the constant is sign extended. */
38569 if ((hi_16bits & sign_bit) != 0)
38570 hi_16bits |= upper_32bits;
38571
38572 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38573 code, SImode, false, false, false);
38574
38575 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38576 code, SImode, false, false, false);
38577 }
38578 else
38579 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38580 code, SImode, complement_final_p,
38581 complement_op1_p, complement_op2_p);
38582 }
38583
38584 return;
38585 }
38586
38587 /* Split the insns that make up boolean operations operating on multiple GPR
38588 registers. The boolean MD patterns ensure that the inputs either are
38589 exactly the same as the output registers, or there is no overlap.
38590
38591 OPERANDS is an array containing the destination and two input operands.
38592 CODE is the base operation (AND, IOR, XOR, NOT).
38593 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38594 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38595 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38596
38597 void
38598 rs6000_split_logical (rtx operands[3],
38599 enum rtx_code code,
38600 bool complement_final_p,
38601 bool complement_op1_p,
38602 bool complement_op2_p)
38603 {
38604 machine_mode mode = GET_MODE (operands[0]);
38605 machine_mode sub_mode;
38606 rtx op0, op1, op2;
38607 int sub_size, regno0, regno1, nregs, i;
38608
38609 /* If this is DImode, use the specialized version that can run before
38610 register allocation. */
38611 if (mode == DImode && !TARGET_POWERPC64)
38612 {
38613 rs6000_split_logical_di (operands, code, complement_final_p,
38614 complement_op1_p, complement_op2_p);
38615 return;
38616 }
38617
38618 op0 = operands[0];
38619 op1 = operands[1];
38620 op2 = (code == NOT) ? NULL_RTX : operands[2];
38621 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38622 sub_size = GET_MODE_SIZE (sub_mode);
38623 regno0 = REGNO (op0);
38624 regno1 = REGNO (op1);
38625
38626 gcc_assert (reload_completed);
38627 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38628 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38629
38630 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38631 gcc_assert (nregs > 1);
38632
38633 if (op2 && REG_P (op2))
38634 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38635
38636 for (i = 0; i < nregs; i++)
38637 {
38638 int offset = i * sub_size;
38639 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38640 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38641 rtx sub_op2 = ((code == NOT)
38642 ? NULL_RTX
38643 : simplify_subreg (sub_mode, op2, mode, offset));
38644
38645 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38646 complement_final_p, complement_op1_p,
38647 complement_op2_p);
38648 }
38649
38650 return;
38651 }
38652
38653 \f
38654 /* Return true if the peephole2 can combine a load involving a combination of
38655 an addis instruction and a load with an offset that can be fused together on
38656 a power8. */
38657
38658 bool
38659 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38660 rtx addis_value, /* addis value. */
38661 rtx target, /* target register that is loaded. */
38662 rtx mem) /* bottom part of the memory addr. */
38663 {
38664 rtx addr;
38665 rtx base_reg;
38666
38667 /* Validate arguments. */
38668 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38669 return false;
38670
38671 if (!base_reg_operand (target, GET_MODE (target)))
38672 return false;
38673
38674 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38675 return false;
38676
38677 /* Allow sign/zero extension. */
38678 if (GET_CODE (mem) == ZERO_EXTEND
38679 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38680 mem = XEXP (mem, 0);
38681
38682 if (!MEM_P (mem))
38683 return false;
38684
38685 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38686 return false;
38687
38688 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38689 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38690 return false;
38691
38692 /* Validate that the register used to load the high value is either the
38693 register being loaded, or we can safely replace its use.
38694
38695 This function is only called from the peephole2 pass and we assume that
38696 there are 2 instructions in the peephole (addis and load), so we want to
38697 check if the target register was not used in the memory address and the
38698 register to hold the addis result is dead after the peephole. */
38699 if (REGNO (addis_reg) != REGNO (target))
38700 {
38701 if (reg_mentioned_p (target, mem))
38702 return false;
38703
38704 if (!peep2_reg_dead_p (2, addis_reg))
38705 return false;
38706
38707 /* If the target register being loaded is the stack pointer, we must
38708 avoid loading any other value into it, even temporarily. */
38709 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38710 return false;
38711 }
38712
38713 base_reg = XEXP (addr, 0);
38714 return REGNO (addis_reg) == REGNO (base_reg);
38715 }
38716
38717 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38718 sequence. We adjust the addis register to use the target register. If the
38719 load sign extends, we adjust the code to do the zero extending load, and an
38720 explicit sign extension later since the fusion only covers zero extending
38721 loads.
38722
38723 The operands are:
38724 operands[0] register set with addis (to be replaced with target)
38725 operands[1] value set via addis
38726 operands[2] target register being loaded
38727 operands[3] D-form memory reference using operands[0]. */
38728
38729 void
38730 expand_fusion_gpr_load (rtx *operands)
38731 {
38732 rtx addis_value = operands[1];
38733 rtx target = operands[2];
38734 rtx orig_mem = operands[3];
38735 rtx new_addr, new_mem, orig_addr, offset;
38736 enum rtx_code plus_or_lo_sum;
38737 machine_mode target_mode = GET_MODE (target);
38738 machine_mode extend_mode = target_mode;
38739 machine_mode ptr_mode = Pmode;
38740 enum rtx_code extend = UNKNOWN;
38741
38742 if (GET_CODE (orig_mem) == ZERO_EXTEND
38743 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38744 {
38745 extend = GET_CODE (orig_mem);
38746 orig_mem = XEXP (orig_mem, 0);
38747 target_mode = GET_MODE (orig_mem);
38748 }
38749
38750 gcc_assert (MEM_P (orig_mem));
38751
38752 orig_addr = XEXP (orig_mem, 0);
38753 plus_or_lo_sum = GET_CODE (orig_addr);
38754 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38755
38756 offset = XEXP (orig_addr, 1);
38757 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38758 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38759
38760 if (extend != UNKNOWN)
38761 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38762
38763 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38764 UNSPEC_FUSION_GPR);
38765 emit_insn (gen_rtx_SET (target, new_mem));
38766
38767 if (extend == SIGN_EXTEND)
38768 {
38769 int sub_off = ((BYTES_BIG_ENDIAN)
38770 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38771 : 0);
38772 rtx sign_reg
38773 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38774
38775 emit_insn (gen_rtx_SET (target,
38776 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38777 }
38778
38779 return;
38780 }
38781
38782 /* Emit the addis instruction that will be part of a fused instruction
38783 sequence. */
38784
38785 void
38786 emit_fusion_addis (rtx target, rtx addis_value)
38787 {
38788 rtx fuse_ops[10];
38789 const char *addis_str = NULL;
38790
38791 /* Emit the addis instruction. */
38792 fuse_ops[0] = target;
38793 if (satisfies_constraint_L (addis_value))
38794 {
38795 fuse_ops[1] = addis_value;
38796 addis_str = "lis %0,%v1";
38797 }
38798
38799 else if (GET_CODE (addis_value) == PLUS)
38800 {
38801 rtx op0 = XEXP (addis_value, 0);
38802 rtx op1 = XEXP (addis_value, 1);
38803
38804 if (REG_P (op0) && CONST_INT_P (op1)
38805 && satisfies_constraint_L (op1))
38806 {
38807 fuse_ops[1] = op0;
38808 fuse_ops[2] = op1;
38809 addis_str = "addis %0,%1,%v2";
38810 }
38811 }
38812
38813 else if (GET_CODE (addis_value) == HIGH)
38814 {
38815 rtx value = XEXP (addis_value, 0);
38816 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38817 {
38818 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38819 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38820 if (TARGET_ELF)
38821 addis_str = "addis %0,%2,%1@toc@ha";
38822
38823 else if (TARGET_XCOFF)
38824 addis_str = "addis %0,%1@u(%2)";
38825
38826 else
38827 gcc_unreachable ();
38828 }
38829
38830 else if (GET_CODE (value) == PLUS)
38831 {
38832 rtx op0 = XEXP (value, 0);
38833 rtx op1 = XEXP (value, 1);
38834
38835 if (GET_CODE (op0) == UNSPEC
38836 && XINT (op0, 1) == UNSPEC_TOCREL
38837 && CONST_INT_P (op1))
38838 {
38839 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38840 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38841 fuse_ops[3] = op1;
38842 if (TARGET_ELF)
38843 addis_str = "addis %0,%2,%1+%3@toc@ha";
38844
38845 else if (TARGET_XCOFF)
38846 addis_str = "addis %0,%1+%3@u(%2)";
38847
38848 else
38849 gcc_unreachable ();
38850 }
38851 }
38852
38853 else if (satisfies_constraint_L (value))
38854 {
38855 fuse_ops[1] = value;
38856 addis_str = "lis %0,%v1";
38857 }
38858
38859 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38860 {
38861 fuse_ops[1] = value;
38862 addis_str = "lis %0,%1@ha";
38863 }
38864 }
38865
38866 if (!addis_str)
38867 fatal_insn ("Could not generate addis value for fusion", addis_value);
38868
38869 output_asm_insn (addis_str, fuse_ops);
38870 }
38871
38872 /* Emit a D-form load or store instruction that is the second instruction
38873 of a fusion sequence. */
38874
38875 static void
38876 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
38877 {
38878 rtx fuse_ops[10];
38879 char insn_template[80];
38880
38881 fuse_ops[0] = load_reg;
38882 fuse_ops[1] = addis_reg;
38883
38884 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38885 {
38886 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38887 fuse_ops[2] = offset;
38888 output_asm_insn (insn_template, fuse_ops);
38889 }
38890
38891 else if (GET_CODE (offset) == UNSPEC
38892 && XINT (offset, 1) == UNSPEC_TOCREL)
38893 {
38894 if (TARGET_ELF)
38895 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38896
38897 else if (TARGET_XCOFF)
38898 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38899
38900 else
38901 gcc_unreachable ();
38902
38903 fuse_ops[2] = XVECEXP (offset, 0, 0);
38904 output_asm_insn (insn_template, fuse_ops);
38905 }
38906
38907 else if (GET_CODE (offset) == PLUS
38908 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38909 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38910 && CONST_INT_P (XEXP (offset, 1)))
38911 {
38912 rtx tocrel_unspec = XEXP (offset, 0);
38913 if (TARGET_ELF)
38914 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38915
38916 else if (TARGET_XCOFF)
38917 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38918
38919 else
38920 gcc_unreachable ();
38921
38922 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38923 fuse_ops[3] = XEXP (offset, 1);
38924 output_asm_insn (insn_template, fuse_ops);
38925 }
38926
38927 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38928 {
38929 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38930
38931 fuse_ops[2] = offset;
38932 output_asm_insn (insn_template, fuse_ops);
38933 }
38934
38935 else
38936 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38937
38938 return;
38939 }
38940
38941 /* Given an address, convert it into the addis and load offset parts. Addresses
38942 created during the peephole2 process look like:
38943 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38944 (unspec [(...)] UNSPEC_TOCREL)) */
38945
38946 static void
38947 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38948 {
38949 rtx hi, lo;
38950
38951 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38952 {
38953 hi = XEXP (addr, 0);
38954 lo = XEXP (addr, 1);
38955 }
38956 else
38957 gcc_unreachable ();
38958
38959 *p_hi = hi;
38960 *p_lo = lo;
38961 }
38962
38963 /* Return a string to fuse an addis instruction with a gpr load to the same
38964 register that we loaded up the addis instruction. The address that is used
38965 is the logical address that was formed during peephole2:
38966 (lo_sum (high) (low-part))
38967
38968 The code is complicated, so we call output_asm_insn directly, and just
38969 return "". */
38970
38971 const char *
38972 emit_fusion_gpr_load (rtx target, rtx mem)
38973 {
38974 rtx addis_value;
38975 rtx addr;
38976 rtx load_offset;
38977 const char *load_str = NULL;
38978 machine_mode mode;
38979
38980 if (GET_CODE (mem) == ZERO_EXTEND)
38981 mem = XEXP (mem, 0);
38982
38983 gcc_assert (REG_P (target) && MEM_P (mem));
38984
38985 addr = XEXP (mem, 0);
38986 fusion_split_address (addr, &addis_value, &load_offset);
38987
38988 /* Now emit the load instruction to the same register. */
38989 mode = GET_MODE (mem);
38990 switch (mode)
38991 {
38992 case E_QImode:
38993 load_str = "lbz";
38994 break;
38995
38996 case E_HImode:
38997 load_str = "lhz";
38998 break;
38999
39000 case E_SImode:
39001 case E_SFmode:
39002 load_str = "lwz";
39003 break;
39004
39005 case E_DImode:
39006 case E_DFmode:
39007 gcc_assert (TARGET_POWERPC64);
39008 load_str = "ld";
39009 break;
39010
39011 default:
39012 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
39013 }
39014
39015 /* Emit the addis instruction. */
39016 emit_fusion_addis (target, addis_value);
39017
39018 /* Emit the D-form load instruction. */
39019 emit_fusion_load (target, target, load_offset, load_str);
39020
39021 return "";
39022 }
39023 \f
39024
39025 #ifdef RS6000_GLIBC_ATOMIC_FENV
39026 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
39027 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
39028 #endif
39029
39030 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
39031
39032 static void
39033 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
39034 {
39035 if (!TARGET_HARD_FLOAT)
39036 {
39037 #ifdef RS6000_GLIBC_ATOMIC_FENV
39038 if (atomic_hold_decl == NULL_TREE)
39039 {
39040 atomic_hold_decl
39041 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39042 get_identifier ("__atomic_feholdexcept"),
39043 build_function_type_list (void_type_node,
39044 double_ptr_type_node,
39045 NULL_TREE));
39046 TREE_PUBLIC (atomic_hold_decl) = 1;
39047 DECL_EXTERNAL (atomic_hold_decl) = 1;
39048 }
39049
39050 if (atomic_clear_decl == NULL_TREE)
39051 {
39052 atomic_clear_decl
39053 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39054 get_identifier ("__atomic_feclearexcept"),
39055 build_function_type_list (void_type_node,
39056 NULL_TREE));
39057 TREE_PUBLIC (atomic_clear_decl) = 1;
39058 DECL_EXTERNAL (atomic_clear_decl) = 1;
39059 }
39060
39061 tree const_double = build_qualified_type (double_type_node,
39062 TYPE_QUAL_CONST);
39063 tree const_double_ptr = build_pointer_type (const_double);
39064 if (atomic_update_decl == NULL_TREE)
39065 {
39066 atomic_update_decl
39067 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
39068 get_identifier ("__atomic_feupdateenv"),
39069 build_function_type_list (void_type_node,
39070 const_double_ptr,
39071 NULL_TREE));
39072 TREE_PUBLIC (atomic_update_decl) = 1;
39073 DECL_EXTERNAL (atomic_update_decl) = 1;
39074 }
39075
39076 tree fenv_var = create_tmp_var_raw (double_type_node);
39077 TREE_ADDRESSABLE (fenv_var) = 1;
39078 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
39079
39080 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
39081 *clear = build_call_expr (atomic_clear_decl, 0);
39082 *update = build_call_expr (atomic_update_decl, 1,
39083 fold_convert (const_double_ptr, fenv_addr));
39084 #endif
39085 return;
39086 }
39087
39088 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
39089 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
39090 tree call_mffs = build_call_expr (mffs, 0);
39091
39092 /* Generates the equivalent of feholdexcept (&fenv_var)
39093
39094 *fenv_var = __builtin_mffs ();
39095 double fenv_hold;
39096 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
39097 __builtin_mtfsf (0xff, fenv_hold); */
39098
39099 /* Mask to clear everything except for the rounding modes and non-IEEE
39100 arithmetic flag. */
39101 const unsigned HOST_WIDE_INT hold_exception_mask =
39102 HOST_WIDE_INT_C (0xffffffff00000007);
39103
39104 tree fenv_var = create_tmp_var_raw (double_type_node);
39105
39106 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
39107
39108 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
39109 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39110 build_int_cst (uint64_type_node,
39111 hold_exception_mask));
39112
39113 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39114 fenv_llu_and);
39115
39116 tree hold_mtfsf = build_call_expr (mtfsf, 2,
39117 build_int_cst (unsigned_type_node, 0xff),
39118 fenv_hold_mtfsf);
39119
39120 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
39121
39122 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
39123
39124 double fenv_clear = __builtin_mffs ();
39125 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
39126 __builtin_mtfsf (0xff, fenv_clear); */
39127
39128 /* Mask to clear everything except for the rounding modes and non-IEEE
39129 arithmetic flag. */
39130 const unsigned HOST_WIDE_INT clear_exception_mask =
39131 HOST_WIDE_INT_C (0xffffffff00000000);
39132
39133 tree fenv_clear = create_tmp_var_raw (double_type_node);
39134
39135 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
39136
39137 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
39138 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
39139 fenv_clean_llu,
39140 build_int_cst (uint64_type_node,
39141 clear_exception_mask));
39142
39143 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39144 fenv_clear_llu_and);
39145
39146 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39147 build_int_cst (unsigned_type_node, 0xff),
39148 fenv_clear_mtfsf);
39149
39150 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39151
39152 /* Generates the equivalent of feupdateenv (&fenv_var)
39153
39154 double old_fenv = __builtin_mffs ();
39155 double fenv_update;
39156 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39157 (*(uint64_t*)fenv_var 0x1ff80fff);
39158 __builtin_mtfsf (0xff, fenv_update); */
39159
39160 const unsigned HOST_WIDE_INT update_exception_mask =
39161 HOST_WIDE_INT_C (0xffffffff1fffff00);
39162 const unsigned HOST_WIDE_INT new_exception_mask =
39163 HOST_WIDE_INT_C (0x1ff80fff);
39164
39165 tree old_fenv = create_tmp_var_raw (double_type_node);
39166 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39167
39168 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39169 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39170 build_int_cst (uint64_type_node,
39171 update_exception_mask));
39172
39173 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39174 build_int_cst (uint64_type_node,
39175 new_exception_mask));
39176
39177 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39178 old_llu_and, new_llu_and);
39179
39180 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39181 new_llu_mask);
39182
39183 tree update_mtfsf = build_call_expr (mtfsf, 2,
39184 build_int_cst (unsigned_type_node, 0xff),
39185 fenv_update_mtfsf);
39186
39187 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39188 }
39189
39190 void
39191 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
39192 {
39193 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39194
39195 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39196 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39197
39198 /* The destination of the vmrgew instruction layout is:
39199 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39200 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39201 vmrgew instruction will be correct. */
39202 if (BYTES_BIG_ENDIAN)
39203 {
39204 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
39205 GEN_INT (0)));
39206 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
39207 GEN_INT (3)));
39208 }
39209 else
39210 {
39211 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
39212 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
39213 }
39214
39215 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39216 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39217
39218 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
39219 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
39220
39221 if (BYTES_BIG_ENDIAN)
39222 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39223 else
39224 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39225 }
39226
39227 void
39228 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39229 {
39230 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39231
39232 rtx_tmp0 = gen_reg_rtx (V2DImode);
39233 rtx_tmp1 = gen_reg_rtx (V2DImode);
39234
39235 /* The destination of the vmrgew instruction layout is:
39236 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39237 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39238 vmrgew instruction will be correct. */
39239 if (BYTES_BIG_ENDIAN)
39240 {
39241 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39242 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39243 }
39244 else
39245 {
39246 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39247 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39248 }
39249
39250 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39251 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39252
39253 if (signed_convert)
39254 {
39255 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39256 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39257 }
39258 else
39259 {
39260 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39261 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39262 }
39263
39264 if (BYTES_BIG_ENDIAN)
39265 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39266 else
39267 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39268 }
39269
39270 void
39271 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39272 rtx src2)
39273 {
39274 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39275
39276 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39277 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39278
39279 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39280 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39281
39282 rtx_tmp2 = gen_reg_rtx (V4SImode);
39283 rtx_tmp3 = gen_reg_rtx (V4SImode);
39284
39285 if (signed_convert)
39286 {
39287 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39288 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39289 }
39290 else
39291 {
39292 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39293 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39294 }
39295
39296 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39297 }
39298
39299 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39300
39301 static bool
39302 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39303 optimization_type opt_type)
39304 {
39305 switch (op)
39306 {
39307 case rsqrt_optab:
39308 return (opt_type == OPTIMIZE_FOR_SPEED
39309 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39310
39311 default:
39312 return true;
39313 }
39314 }
39315
39316 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39317
39318 static HOST_WIDE_INT
39319 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39320 {
39321 if (TREE_CODE (exp) == STRING_CST
39322 && (STRICT_ALIGNMENT || !optimize_size))
39323 return MAX (align, BITS_PER_WORD);
39324 return align;
39325 }
39326
39327 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39328
39329 static HOST_WIDE_INT
39330 rs6000_starting_frame_offset (void)
39331 {
39332 if (FRAME_GROWS_DOWNWARD)
39333 return 0;
39334 return RS6000_STARTING_FRAME_OFFSET;
39335 }
39336 \f
39337
39338 /* Create an alias for a mangled name where we have changed the mangling (in
39339 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
39340 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
39341
39342 #if TARGET_ELF && RS6000_WEAK
39343 static void
39344 rs6000_globalize_decl_name (FILE * stream, tree decl)
39345 {
39346 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
39347
39348 targetm.asm_out.globalize_label (stream, name);
39349
39350 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
39351 {
39352 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
39353 const char *old_name;
39354
39355 ieee128_mangling_gcc_8_1 = true;
39356 lang_hooks.set_decl_assembler_name (decl);
39357 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
39358 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
39359 ieee128_mangling_gcc_8_1 = false;
39360
39361 if (strcmp (name, old_name) != 0)
39362 {
39363 fprintf (stream, "\t.weak %s\n", old_name);
39364 fprintf (stream, "\t.set %s,%s\n", old_name, name);
39365 }
39366 }
39367 }
39368 #endif
39369
39370 \f
39371 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
39372 function names from <foo>l to <foo>f128 if the default long double type is
39373 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
39374 include file switches the names on systems that support long double as IEEE
39375 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
39376 In the future, glibc will export names like __ieee128_sinf128 and we can
39377 switch to using those instead of using sinf128, which pollutes the user's
39378 namespace.
39379
39380 This will switch the names for Fortran math functions as well (which doesn't
39381 use math.h). However, Fortran needs other changes to the compiler and
39382 library before you can switch the real*16 type at compile time.
39383
39384 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
39385 only do this if the default is that long double is IBM extended double, and
39386 the user asked for IEEE 128-bit. */
39387
39388 static tree
39389 rs6000_mangle_decl_assembler_name (tree decl, tree id)
39390 {
39391 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
39392 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
39393 {
39394 size_t len = IDENTIFIER_LENGTH (id);
39395 const char *name = IDENTIFIER_POINTER (id);
39396
39397 if (name[len - 1] == 'l')
39398 {
39399 bool uses_ieee128_p = false;
39400 tree type = TREE_TYPE (decl);
39401 machine_mode ret_mode = TYPE_MODE (type);
39402
39403 /* See if the function returns a IEEE 128-bit floating point type or
39404 complex type. */
39405 if (ret_mode == TFmode || ret_mode == TCmode)
39406 uses_ieee128_p = true;
39407 else
39408 {
39409 function_args_iterator args_iter;
39410 tree arg;
39411
39412 /* See if the function passes a IEEE 128-bit floating point type
39413 or complex type. */
39414 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
39415 {
39416 machine_mode arg_mode = TYPE_MODE (arg);
39417 if (arg_mode == TFmode || arg_mode == TCmode)
39418 {
39419 uses_ieee128_p = true;
39420 break;
39421 }
39422 }
39423 }
39424
39425 /* If we passed or returned an IEEE 128-bit floating point type,
39426 change the name. */
39427 if (uses_ieee128_p)
39428 {
39429 char *name2 = (char *) alloca (len + 4);
39430 memcpy (name2, name, len - 1);
39431 strcpy (name2 + len - 1, "f128");
39432 id = get_identifier (name2);
39433 }
39434 }
39435 }
39436
39437 return id;
39438 }
39439
39440 \f
39441 struct gcc_target targetm = TARGET_INITIALIZER;
39442
39443 #include "gt-rs6000.h"