rs6000-cpus.def (ISA_FUTURE_MASKS_SERVER): Add OPTION_MASK_PCREL.
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2019 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84 #include "tree-vrp.h"
85 #include "tree-ssanames.h"
86
87 /* This file should be included last. */
88 #include "target-def.h"
89
90 #ifndef TARGET_NO_PROTOTYPE
91 #define TARGET_NO_PROTOTYPE 0
92 #endif
93
94 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
95 systems will also set long double to be IEEE 128-bit. AIX and Darwin
96 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
97 those systems will not pick up this default. This needs to be after all
98 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
99 properly defined. */
100 #ifndef TARGET_IEEEQUAD_DEFAULT
101 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
102 #define TARGET_IEEEQUAD_DEFAULT 1
103 #else
104 #define TARGET_IEEEQUAD_DEFAULT 0
105 #endif
106 #endif
107
108 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
109
110 /* Structure used to define the rs6000 stack */
111 typedef struct rs6000_stack {
112 int reload_completed; /* stack info won't change from here on */
113 int first_gp_reg_save; /* first callee saved GP register used */
114 int first_fp_reg_save; /* first callee saved FP register used */
115 int first_altivec_reg_save; /* first callee saved AltiVec register used */
116 int lr_save_p; /* true if the link reg needs to be saved */
117 int cr_save_p; /* true if the CR reg needs to be saved */
118 unsigned int vrsave_mask; /* mask of vec registers to save */
119 int push_p; /* true if we need to allocate stack space */
120 int calls_p; /* true if the function makes any calls */
121 int world_save_p; /* true if we're saving *everything*:
122 r13-r31, cr, f14-f31, vrsave, v20-v31 */
123 enum rs6000_abi abi; /* which ABI to use */
124 int gp_save_offset; /* offset to save GP regs from initial SP */
125 int fp_save_offset; /* offset to save FP regs from initial SP */
126 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
127 int lr_save_offset; /* offset to save LR from initial SP */
128 int cr_save_offset; /* offset to save CR from initial SP */
129 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
130 int varargs_save_offset; /* offset to save the varargs registers */
131 int ehrd_offset; /* offset to EH return data */
132 int ehcr_offset; /* offset to EH CR field data */
133 int reg_size; /* register size (4 or 8) */
134 HOST_WIDE_INT vars_size; /* variable save area size */
135 int parm_size; /* outgoing parameter size */
136 int save_size; /* save area size */
137 int fixed_size; /* fixed size of stack frame */
138 int gp_size; /* size of saved GP registers */
139 int fp_size; /* size of saved FP registers */
140 int altivec_size; /* size of saved AltiVec registers */
141 int cr_size; /* size to hold CR if not in fixed area */
142 int vrsave_size; /* size to hold VRSAVE */
143 int altivec_padding_size; /* size of altivec alignment padding */
144 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
145 int savres_strategy;
146 } rs6000_stack_t;
147
148 /* A C structure for machine-specific, per-function data.
149 This is added to the cfun structure. */
150 typedef struct GTY(()) machine_function
151 {
152 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
153 int ra_needs_full_frame;
154 /* Flags if __builtin_return_address (0) was used. */
155 int ra_need_lr;
156 /* Cache lr_save_p after expansion of builtin_eh_return. */
157 int lr_save_state;
158 /* Whether we need to save the TOC to the reserved stack location in the
159 function prologue. */
160 bool save_toc_in_prologue;
161 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
162 varargs save area. */
163 HOST_WIDE_INT varargs_save_offset;
164 /* Alternative internal arg pointer for -fsplit-stack. */
165 rtx split_stack_arg_pointer;
166 bool split_stack_argp_used;
167 /* Flag if r2 setup is needed with ELFv2 ABI. */
168 bool r2_setup_needed;
169 /* The number of components we use for separate shrink-wrapping. */
170 int n_components;
171 /* The components already handled by separate shrink-wrapping, which should
172 not be considered by the prologue and epilogue. */
173 bool gpr_is_wrapped_separately[32];
174 bool fpr_is_wrapped_separately[32];
175 bool lr_is_wrapped_separately;
176 bool toc_is_wrapped_separately;
177 } machine_function;
178
179 /* Support targetm.vectorize.builtin_mask_for_load. */
180 static GTY(()) tree altivec_builtin_mask_for_load;
181
182 /* Set to nonzero once AIX common-mode calls have been defined. */
183 static GTY(()) int common_mode_defined;
184
185 /* Label number of label created for -mrelocatable, to call to so we can
186 get the address of the GOT section */
187 static int rs6000_pic_labelno;
188
189 #ifdef USING_ELFOS_H
190 /* Counter for labels which are to be placed in .fixup. */
191 int fixuplabelno = 0;
192 #endif
193
194 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
195 int dot_symbols;
196
197 /* Specify the machine mode that pointers have. After generation of rtl, the
198 compiler makes no further distinction between pointers and any other objects
199 of this machine mode. */
200 scalar_int_mode rs6000_pmode;
201
202 #if TARGET_ELF
203 /* Note whether IEEE 128-bit floating point was passed or returned, either as
204 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
205 floating point. We changed the default C++ mangling for these types and we
206 may want to generate a weak alias of the old mangling (U10__float128) to the
207 new mangling (u9__ieee128). */
208 static bool rs6000_passes_ieee128;
209 #endif
210
211 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
212 name used in current releases (i.e. u9__ieee128). */
213 static bool ieee128_mangling_gcc_8_1;
214
215 /* Width in bits of a pointer. */
216 unsigned rs6000_pointer_size;
217
218 #ifdef HAVE_AS_GNU_ATTRIBUTE
219 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
220 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
221 # endif
222 /* Flag whether floating point values have been passed/returned.
223 Note that this doesn't say whether fprs are used, since the
224 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
225 should be set for soft-float values passed in gprs and ieee128
226 values passed in vsx registers. */
227 static bool rs6000_passes_float;
228 static bool rs6000_passes_long_double;
229 /* Flag whether vector values have been passed/returned. */
230 static bool rs6000_passes_vector;
231 /* Flag whether small (<= 8 byte) structures have been returned. */
232 static bool rs6000_returns_struct;
233 #endif
234
235 /* Value is TRUE if register/mode pair is acceptable. */
236 static bool rs6000_hard_regno_mode_ok_p
237 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
238
239 /* Maximum number of registers needed for a given register class and mode. */
240 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
241
242 /* How many registers are needed for a given register and mode. */
243 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
244
245 /* Map register number to register class. */
246 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
247
248 static int dbg_cost_ctrl;
249
250 /* Built in types. */
251 tree rs6000_builtin_types[RS6000_BTI_MAX];
252 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
253
254 /* Flag to say the TOC is initialized */
255 int toc_initialized, need_toc_init;
256 char toc_label_name[10];
257
258 /* Cached value of rs6000_variable_issue. This is cached in
259 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
260 static short cached_can_issue_more;
261
262 static GTY(()) section *read_only_data_section;
263 static GTY(()) section *private_data_section;
264 static GTY(()) section *tls_data_section;
265 static GTY(()) section *tls_private_data_section;
266 static GTY(()) section *read_only_private_data_section;
267 static GTY(()) section *sdata2_section;
268 static GTY(()) section *toc_section;
269
270 struct builtin_description
271 {
272 const HOST_WIDE_INT mask;
273 const enum insn_code icode;
274 const char *const name;
275 const enum rs6000_builtins code;
276 };
277
278 /* Describe the vector unit used for modes. */
279 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
280 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
281
282 /* Register classes for various constraints that are based on the target
283 switches. */
284 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
285
286 /* Describe the alignment of a vector. */
287 int rs6000_vector_align[NUM_MACHINE_MODES];
288
289 /* Map selected modes to types for builtins. */
290 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
291
292 /* What modes to automatically generate reciprocal divide estimate (fre) and
293 reciprocal sqrt (frsqrte) for. */
294 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
295
296 /* Masks to determine which reciprocal esitmate instructions to generate
297 automatically. */
298 enum rs6000_recip_mask {
299 RECIP_SF_DIV = 0x001, /* Use divide estimate */
300 RECIP_DF_DIV = 0x002,
301 RECIP_V4SF_DIV = 0x004,
302 RECIP_V2DF_DIV = 0x008,
303
304 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
305 RECIP_DF_RSQRT = 0x020,
306 RECIP_V4SF_RSQRT = 0x040,
307 RECIP_V2DF_RSQRT = 0x080,
308
309 /* Various combination of flags for -mrecip=xxx. */
310 RECIP_NONE = 0,
311 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
312 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
313 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
314
315 RECIP_HIGH_PRECISION = RECIP_ALL,
316
317 /* On low precision machines like the power5, don't enable double precision
318 reciprocal square root estimate, since it isn't accurate enough. */
319 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
320 };
321
322 /* -mrecip options. */
323 static struct
324 {
325 const char *string; /* option name */
326 unsigned int mask; /* mask bits to set */
327 } recip_options[] = {
328 { "all", RECIP_ALL },
329 { "none", RECIP_NONE },
330 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
331 | RECIP_V2DF_DIV) },
332 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
333 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
334 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
335 | RECIP_V2DF_RSQRT) },
336 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
337 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
338 };
339
340 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
341 static const struct
342 {
343 const char *cpu;
344 unsigned int cpuid;
345 } cpu_is_info[] = {
346 { "power9", PPC_PLATFORM_POWER9 },
347 { "power8", PPC_PLATFORM_POWER8 },
348 { "power7", PPC_PLATFORM_POWER7 },
349 { "power6x", PPC_PLATFORM_POWER6X },
350 { "power6", PPC_PLATFORM_POWER6 },
351 { "power5+", PPC_PLATFORM_POWER5_PLUS },
352 { "power5", PPC_PLATFORM_POWER5 },
353 { "ppc970", PPC_PLATFORM_PPC970 },
354 { "power4", PPC_PLATFORM_POWER4 },
355 { "ppca2", PPC_PLATFORM_PPCA2 },
356 { "ppc476", PPC_PLATFORM_PPC476 },
357 { "ppc464", PPC_PLATFORM_PPC464 },
358 { "ppc440", PPC_PLATFORM_PPC440 },
359 { "ppc405", PPC_PLATFORM_PPC405 },
360 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
361 };
362
363 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
364 static const struct
365 {
366 const char *hwcap;
367 int mask;
368 unsigned int id;
369 } cpu_supports_info[] = {
370 /* AT_HWCAP masks. */
371 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
372 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
373 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
374 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
375 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
376 { "booke", PPC_FEATURE_BOOKE, 0 },
377 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
378 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
379 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
380 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
381 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
382 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
383 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
384 { "notb", PPC_FEATURE_NO_TB, 0 },
385 { "pa6t", PPC_FEATURE_PA6T, 0 },
386 { "power4", PPC_FEATURE_POWER4, 0 },
387 { "power5", PPC_FEATURE_POWER5, 0 },
388 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
389 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
390 { "ppc32", PPC_FEATURE_32, 0 },
391 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
392 { "ppc64", PPC_FEATURE_64, 0 },
393 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
394 { "smt", PPC_FEATURE_SMT, 0 },
395 { "spe", PPC_FEATURE_HAS_SPE, 0 },
396 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
397 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
398 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
399
400 /* AT_HWCAP2 masks. */
401 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
402 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
403 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
404 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
405 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
406 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
407 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
408 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
409 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
410 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
411 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
412 { "darn", PPC_FEATURE2_DARN, 1 },
413 { "scv", PPC_FEATURE2_SCV, 1 }
414 };
415
416 /* On PowerPC, we have a limited number of target clones that we care about
417 which means we can use an array to hold the options, rather than having more
418 elaborate data structures to identify each possible variation. Order the
419 clones from the default to the highest ISA. */
420 enum {
421 CLONE_DEFAULT = 0, /* default clone. */
422 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
423 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
424 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
425 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
426 CLONE_MAX
427 };
428
429 /* Map compiler ISA bits into HWCAP names. */
430 struct clone_map {
431 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
432 const char *name; /* name to use in __builtin_cpu_supports. */
433 };
434
435 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
436 { 0, "" }, /* Default options. */
437 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
438 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
439 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
440 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
441 };
442
443
444 /* Newer LIBCs explicitly export this symbol to declare that they provide
445 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
446 reference to this symbol whenever we expand a CPU builtin, so that
447 we never link against an old LIBC. */
448 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
449
450 /* True if we have expanded a CPU builtin. */
451 bool cpu_builtin_p;
452
453 /* Pointer to function (in rs6000-c.c) that can define or undefine target
454 macros that have changed. Languages that don't support the preprocessor
455 don't link in rs6000-c.c, so we can't call it directly. */
456 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
457
458 /* Simplfy register classes into simpler classifications. We assume
459 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
460 check for standard register classes (gpr/floating/altivec/vsx) and
461 floating/vector classes (float/altivec/vsx). */
462
463 enum rs6000_reg_type {
464 NO_REG_TYPE,
465 PSEUDO_REG_TYPE,
466 GPR_REG_TYPE,
467 VSX_REG_TYPE,
468 ALTIVEC_REG_TYPE,
469 FPR_REG_TYPE,
470 SPR_REG_TYPE,
471 CR_REG_TYPE
472 };
473
474 /* Map register class to register type. */
475 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
476
477 /* First/last register type for the 'normal' register types (i.e. general
478 purpose, floating point, altivec, and VSX registers). */
479 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
480
481 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
482
483
484 /* Register classes we care about in secondary reload or go if legitimate
485 address. We only need to worry about GPR, FPR, and Altivec registers here,
486 along an ANY field that is the OR of the 3 register classes. */
487
488 enum rs6000_reload_reg_type {
489 RELOAD_REG_GPR, /* General purpose registers. */
490 RELOAD_REG_FPR, /* Traditional floating point regs. */
491 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
492 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
493 N_RELOAD_REG
494 };
495
496 /* For setting up register classes, loop through the 3 register classes mapping
497 into real registers, and skip the ANY class, which is just an OR of the
498 bits. */
499 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
500 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
501
502 /* Map reload register type to a register in the register class. */
503 struct reload_reg_map_type {
504 const char *name; /* Register class name. */
505 int reg; /* Register in the register class. */
506 };
507
508 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
509 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
510 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
511 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
512 { "Any", -1 }, /* RELOAD_REG_ANY. */
513 };
514
515 /* Mask bits for each register class, indexed per mode. Historically the
516 compiler has been more restrictive which types can do PRE_MODIFY instead of
517 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
518 typedef unsigned char addr_mask_type;
519
520 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
521 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
522 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
523 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
524 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
525 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
526 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
527 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
528
529 /* Register type masks based on the type, of valid addressing modes. */
530 struct rs6000_reg_addr {
531 enum insn_code reload_load; /* INSN to reload for loading. */
532 enum insn_code reload_store; /* INSN to reload for storing. */
533 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
534 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
535 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
536 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
537 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
538 };
539
540 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
541
542 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
543 static inline bool
544 mode_supports_pre_incdec_p (machine_mode mode)
545 {
546 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
547 != 0);
548 }
549
550 /* Helper function to say whether a mode supports PRE_MODIFY. */
551 static inline bool
552 mode_supports_pre_modify_p (machine_mode mode)
553 {
554 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
555 != 0);
556 }
557
558 /* Return true if we have D-form addressing in altivec registers. */
559 static inline bool
560 mode_supports_vmx_dform (machine_mode mode)
561 {
562 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
563 }
564
565 /* Return true if we have D-form addressing in VSX registers. This addressing
566 is more limited than normal d-form addressing in that the offset must be
567 aligned on a 16-byte boundary. */
568 static inline bool
569 mode_supports_dq_form (machine_mode mode)
570 {
571 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
572 != 0);
573 }
574
575 /* Given that there exists at least one variable that is set (produced)
576 by OUT_INSN and read (consumed) by IN_INSN, return true iff
577 IN_INSN represents one or more memory store operations and none of
578 the variables set by OUT_INSN is used by IN_INSN as the address of a
579 store operation. If either IN_INSN or OUT_INSN does not represent
580 a "single" RTL SET expression (as loosely defined by the
581 implementation of the single_set function) or a PARALLEL with only
582 SETs, CLOBBERs, and USEs inside, this function returns false.
583
584 This rs6000-specific version of store_data_bypass_p checks for
585 certain conditions that result in assertion failures (and internal
586 compiler errors) in the generic store_data_bypass_p function and
587 returns false rather than calling store_data_bypass_p if one of the
588 problematic conditions is detected. */
589
590 int
591 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
592 {
593 rtx out_set, in_set;
594 rtx out_pat, in_pat;
595 rtx out_exp, in_exp;
596 int i, j;
597
598 in_set = single_set (in_insn);
599 if (in_set)
600 {
601 if (MEM_P (SET_DEST (in_set)))
602 {
603 out_set = single_set (out_insn);
604 if (!out_set)
605 {
606 out_pat = PATTERN (out_insn);
607 if (GET_CODE (out_pat) == PARALLEL)
608 {
609 for (i = 0; i < XVECLEN (out_pat, 0); i++)
610 {
611 out_exp = XVECEXP (out_pat, 0, i);
612 if ((GET_CODE (out_exp) == CLOBBER)
613 || (GET_CODE (out_exp) == USE))
614 continue;
615 else if (GET_CODE (out_exp) != SET)
616 return false;
617 }
618 }
619 }
620 }
621 }
622 else
623 {
624 in_pat = PATTERN (in_insn);
625 if (GET_CODE (in_pat) != PARALLEL)
626 return false;
627
628 for (i = 0; i < XVECLEN (in_pat, 0); i++)
629 {
630 in_exp = XVECEXP (in_pat, 0, i);
631 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
632 continue;
633 else if (GET_CODE (in_exp) != SET)
634 return false;
635
636 if (MEM_P (SET_DEST (in_exp)))
637 {
638 out_set = single_set (out_insn);
639 if (!out_set)
640 {
641 out_pat = PATTERN (out_insn);
642 if (GET_CODE (out_pat) != PARALLEL)
643 return false;
644 for (j = 0; j < XVECLEN (out_pat, 0); j++)
645 {
646 out_exp = XVECEXP (out_pat, 0, j);
647 if ((GET_CODE (out_exp) == CLOBBER)
648 || (GET_CODE (out_exp) == USE))
649 continue;
650 else if (GET_CODE (out_exp) != SET)
651 return false;
652 }
653 }
654 }
655 }
656 }
657 return store_data_bypass_p (out_insn, in_insn);
658 }
659
660 \f
661 /* Processor costs (relative to an add) */
662
663 const struct processor_costs *rs6000_cost;
664
665 /* Instruction size costs on 32bit processors. */
666 static const
667 struct processor_costs size32_cost = {
668 COSTS_N_INSNS (1), /* mulsi */
669 COSTS_N_INSNS (1), /* mulsi_const */
670 COSTS_N_INSNS (1), /* mulsi_const9 */
671 COSTS_N_INSNS (1), /* muldi */
672 COSTS_N_INSNS (1), /* divsi */
673 COSTS_N_INSNS (1), /* divdi */
674 COSTS_N_INSNS (1), /* fp */
675 COSTS_N_INSNS (1), /* dmul */
676 COSTS_N_INSNS (1), /* sdiv */
677 COSTS_N_INSNS (1), /* ddiv */
678 32, /* cache line size */
679 0, /* l1 cache */
680 0, /* l2 cache */
681 0, /* streams */
682 0, /* SF->DF convert */
683 };
684
685 /* Instruction size costs on 64bit processors. */
686 static const
687 struct processor_costs size64_cost = {
688 COSTS_N_INSNS (1), /* mulsi */
689 COSTS_N_INSNS (1), /* mulsi_const */
690 COSTS_N_INSNS (1), /* mulsi_const9 */
691 COSTS_N_INSNS (1), /* muldi */
692 COSTS_N_INSNS (1), /* divsi */
693 COSTS_N_INSNS (1), /* divdi */
694 COSTS_N_INSNS (1), /* fp */
695 COSTS_N_INSNS (1), /* dmul */
696 COSTS_N_INSNS (1), /* sdiv */
697 COSTS_N_INSNS (1), /* ddiv */
698 128, /* cache line size */
699 0, /* l1 cache */
700 0, /* l2 cache */
701 0, /* streams */
702 0, /* SF->DF convert */
703 };
704
705 /* Instruction costs on RS64A processors. */
706 static const
707 struct processor_costs rs64a_cost = {
708 COSTS_N_INSNS (20), /* mulsi */
709 COSTS_N_INSNS (12), /* mulsi_const */
710 COSTS_N_INSNS (8), /* mulsi_const9 */
711 COSTS_N_INSNS (34), /* muldi */
712 COSTS_N_INSNS (65), /* divsi */
713 COSTS_N_INSNS (67), /* divdi */
714 COSTS_N_INSNS (4), /* fp */
715 COSTS_N_INSNS (4), /* dmul */
716 COSTS_N_INSNS (31), /* sdiv */
717 COSTS_N_INSNS (31), /* ddiv */
718 128, /* cache line size */
719 128, /* l1 cache */
720 2048, /* l2 cache */
721 1, /* streams */
722 0, /* SF->DF convert */
723 };
724
725 /* Instruction costs on MPCCORE processors. */
726 static const
727 struct processor_costs mpccore_cost = {
728 COSTS_N_INSNS (2), /* mulsi */
729 COSTS_N_INSNS (2), /* mulsi_const */
730 COSTS_N_INSNS (2), /* mulsi_const9 */
731 COSTS_N_INSNS (2), /* muldi */
732 COSTS_N_INSNS (6), /* divsi */
733 COSTS_N_INSNS (6), /* divdi */
734 COSTS_N_INSNS (4), /* fp */
735 COSTS_N_INSNS (5), /* dmul */
736 COSTS_N_INSNS (10), /* sdiv */
737 COSTS_N_INSNS (17), /* ddiv */
738 32, /* cache line size */
739 4, /* l1 cache */
740 16, /* l2 cache */
741 1, /* streams */
742 0, /* SF->DF convert */
743 };
744
745 /* Instruction costs on PPC403 processors. */
746 static const
747 struct processor_costs ppc403_cost = {
748 COSTS_N_INSNS (4), /* mulsi */
749 COSTS_N_INSNS (4), /* mulsi_const */
750 COSTS_N_INSNS (4), /* mulsi_const9 */
751 COSTS_N_INSNS (4), /* muldi */
752 COSTS_N_INSNS (33), /* divsi */
753 COSTS_N_INSNS (33), /* divdi */
754 COSTS_N_INSNS (11), /* fp */
755 COSTS_N_INSNS (11), /* dmul */
756 COSTS_N_INSNS (11), /* sdiv */
757 COSTS_N_INSNS (11), /* ddiv */
758 32, /* cache line size */
759 4, /* l1 cache */
760 16, /* l2 cache */
761 1, /* streams */
762 0, /* SF->DF convert */
763 };
764
765 /* Instruction costs on PPC405 processors. */
766 static const
767 struct processor_costs ppc405_cost = {
768 COSTS_N_INSNS (5), /* mulsi */
769 COSTS_N_INSNS (4), /* mulsi_const */
770 COSTS_N_INSNS (3), /* mulsi_const9 */
771 COSTS_N_INSNS (5), /* muldi */
772 COSTS_N_INSNS (35), /* divsi */
773 COSTS_N_INSNS (35), /* divdi */
774 COSTS_N_INSNS (11), /* fp */
775 COSTS_N_INSNS (11), /* dmul */
776 COSTS_N_INSNS (11), /* sdiv */
777 COSTS_N_INSNS (11), /* ddiv */
778 32, /* cache line size */
779 16, /* l1 cache */
780 128, /* l2 cache */
781 1, /* streams */
782 0, /* SF->DF convert */
783 };
784
785 /* Instruction costs on PPC440 processors. */
786 static const
787 struct processor_costs ppc440_cost = {
788 COSTS_N_INSNS (3), /* mulsi */
789 COSTS_N_INSNS (2), /* mulsi_const */
790 COSTS_N_INSNS (2), /* mulsi_const9 */
791 COSTS_N_INSNS (3), /* muldi */
792 COSTS_N_INSNS (34), /* divsi */
793 COSTS_N_INSNS (34), /* divdi */
794 COSTS_N_INSNS (5), /* fp */
795 COSTS_N_INSNS (5), /* dmul */
796 COSTS_N_INSNS (19), /* sdiv */
797 COSTS_N_INSNS (33), /* ddiv */
798 32, /* cache line size */
799 32, /* l1 cache */
800 256, /* l2 cache */
801 1, /* streams */
802 0, /* SF->DF convert */
803 };
804
805 /* Instruction costs on PPC476 processors. */
806 static const
807 struct processor_costs ppc476_cost = {
808 COSTS_N_INSNS (4), /* mulsi */
809 COSTS_N_INSNS (4), /* mulsi_const */
810 COSTS_N_INSNS (4), /* mulsi_const9 */
811 COSTS_N_INSNS (4), /* muldi */
812 COSTS_N_INSNS (11), /* divsi */
813 COSTS_N_INSNS (11), /* divdi */
814 COSTS_N_INSNS (6), /* fp */
815 COSTS_N_INSNS (6), /* dmul */
816 COSTS_N_INSNS (19), /* sdiv */
817 COSTS_N_INSNS (33), /* ddiv */
818 32, /* l1 cache line size */
819 32, /* l1 cache */
820 512, /* l2 cache */
821 1, /* streams */
822 0, /* SF->DF convert */
823 };
824
825 /* Instruction costs on PPC601 processors. */
826 static const
827 struct processor_costs ppc601_cost = {
828 COSTS_N_INSNS (5), /* mulsi */
829 COSTS_N_INSNS (5), /* mulsi_const */
830 COSTS_N_INSNS (5), /* mulsi_const9 */
831 COSTS_N_INSNS (5), /* muldi */
832 COSTS_N_INSNS (36), /* divsi */
833 COSTS_N_INSNS (36), /* divdi */
834 COSTS_N_INSNS (4), /* fp */
835 COSTS_N_INSNS (5), /* dmul */
836 COSTS_N_INSNS (17), /* sdiv */
837 COSTS_N_INSNS (31), /* ddiv */
838 32, /* cache line size */
839 32, /* l1 cache */
840 256, /* l2 cache */
841 1, /* streams */
842 0, /* SF->DF convert */
843 };
844
845 /* Instruction costs on PPC603 processors. */
846 static const
847 struct processor_costs ppc603_cost = {
848 COSTS_N_INSNS (5), /* mulsi */
849 COSTS_N_INSNS (3), /* mulsi_const */
850 COSTS_N_INSNS (2), /* mulsi_const9 */
851 COSTS_N_INSNS (5), /* muldi */
852 COSTS_N_INSNS (37), /* divsi */
853 COSTS_N_INSNS (37), /* divdi */
854 COSTS_N_INSNS (3), /* fp */
855 COSTS_N_INSNS (4), /* dmul */
856 COSTS_N_INSNS (18), /* sdiv */
857 COSTS_N_INSNS (33), /* ddiv */
858 32, /* cache line size */
859 8, /* l1 cache */
860 64, /* l2 cache */
861 1, /* streams */
862 0, /* SF->DF convert */
863 };
864
865 /* Instruction costs on PPC604 processors. */
866 static const
867 struct processor_costs ppc604_cost = {
868 COSTS_N_INSNS (4), /* mulsi */
869 COSTS_N_INSNS (4), /* mulsi_const */
870 COSTS_N_INSNS (4), /* mulsi_const9 */
871 COSTS_N_INSNS (4), /* muldi */
872 COSTS_N_INSNS (20), /* divsi */
873 COSTS_N_INSNS (20), /* divdi */
874 COSTS_N_INSNS (3), /* fp */
875 COSTS_N_INSNS (3), /* dmul */
876 COSTS_N_INSNS (18), /* sdiv */
877 COSTS_N_INSNS (32), /* ddiv */
878 32, /* cache line size */
879 16, /* l1 cache */
880 512, /* l2 cache */
881 1, /* streams */
882 0, /* SF->DF convert */
883 };
884
885 /* Instruction costs on PPC604e processors. */
886 static const
887 struct processor_costs ppc604e_cost = {
888 COSTS_N_INSNS (2), /* mulsi */
889 COSTS_N_INSNS (2), /* mulsi_const */
890 COSTS_N_INSNS (2), /* mulsi_const9 */
891 COSTS_N_INSNS (2), /* muldi */
892 COSTS_N_INSNS (20), /* divsi */
893 COSTS_N_INSNS (20), /* divdi */
894 COSTS_N_INSNS (3), /* fp */
895 COSTS_N_INSNS (3), /* dmul */
896 COSTS_N_INSNS (18), /* sdiv */
897 COSTS_N_INSNS (32), /* ddiv */
898 32, /* cache line size */
899 32, /* l1 cache */
900 1024, /* l2 cache */
901 1, /* streams */
902 0, /* SF->DF convert */
903 };
904
905 /* Instruction costs on PPC620 processors. */
906 static const
907 struct processor_costs ppc620_cost = {
908 COSTS_N_INSNS (5), /* mulsi */
909 COSTS_N_INSNS (4), /* mulsi_const */
910 COSTS_N_INSNS (3), /* mulsi_const9 */
911 COSTS_N_INSNS (7), /* muldi */
912 COSTS_N_INSNS (21), /* divsi */
913 COSTS_N_INSNS (37), /* divdi */
914 COSTS_N_INSNS (3), /* fp */
915 COSTS_N_INSNS (3), /* dmul */
916 COSTS_N_INSNS (18), /* sdiv */
917 COSTS_N_INSNS (32), /* ddiv */
918 128, /* cache line size */
919 32, /* l1 cache */
920 1024, /* l2 cache */
921 1, /* streams */
922 0, /* SF->DF convert */
923 };
924
925 /* Instruction costs on PPC630 processors. */
926 static const
927 struct processor_costs ppc630_cost = {
928 COSTS_N_INSNS (5), /* mulsi */
929 COSTS_N_INSNS (4), /* mulsi_const */
930 COSTS_N_INSNS (3), /* mulsi_const9 */
931 COSTS_N_INSNS (7), /* muldi */
932 COSTS_N_INSNS (21), /* divsi */
933 COSTS_N_INSNS (37), /* divdi */
934 COSTS_N_INSNS (3), /* fp */
935 COSTS_N_INSNS (3), /* dmul */
936 COSTS_N_INSNS (17), /* sdiv */
937 COSTS_N_INSNS (21), /* ddiv */
938 128, /* cache line size */
939 64, /* l1 cache */
940 1024, /* l2 cache */
941 1, /* streams */
942 0, /* SF->DF convert */
943 };
944
945 /* Instruction costs on Cell processor. */
946 /* COSTS_N_INSNS (1) ~ one add. */
947 static const
948 struct processor_costs ppccell_cost = {
949 COSTS_N_INSNS (9/2)+2, /* mulsi */
950 COSTS_N_INSNS (6/2), /* mulsi_const */
951 COSTS_N_INSNS (6/2), /* mulsi_const9 */
952 COSTS_N_INSNS (15/2)+2, /* muldi */
953 COSTS_N_INSNS (38/2), /* divsi */
954 COSTS_N_INSNS (70/2), /* divdi */
955 COSTS_N_INSNS (10/2), /* fp */
956 COSTS_N_INSNS (10/2), /* dmul */
957 COSTS_N_INSNS (74/2), /* sdiv */
958 COSTS_N_INSNS (74/2), /* ddiv */
959 128, /* cache line size */
960 32, /* l1 cache */
961 512, /* l2 cache */
962 6, /* streams */
963 0, /* SF->DF convert */
964 };
965
966 /* Instruction costs on PPC750 and PPC7400 processors. */
967 static const
968 struct processor_costs ppc750_cost = {
969 COSTS_N_INSNS (5), /* mulsi */
970 COSTS_N_INSNS (3), /* mulsi_const */
971 COSTS_N_INSNS (2), /* mulsi_const9 */
972 COSTS_N_INSNS (5), /* muldi */
973 COSTS_N_INSNS (17), /* divsi */
974 COSTS_N_INSNS (17), /* divdi */
975 COSTS_N_INSNS (3), /* fp */
976 COSTS_N_INSNS (3), /* dmul */
977 COSTS_N_INSNS (17), /* sdiv */
978 COSTS_N_INSNS (31), /* ddiv */
979 32, /* cache line size */
980 32, /* l1 cache */
981 512, /* l2 cache */
982 1, /* streams */
983 0, /* SF->DF convert */
984 };
985
986 /* Instruction costs on PPC7450 processors. */
987 static const
988 struct processor_costs ppc7450_cost = {
989 COSTS_N_INSNS (4), /* mulsi */
990 COSTS_N_INSNS (3), /* mulsi_const */
991 COSTS_N_INSNS (3), /* mulsi_const9 */
992 COSTS_N_INSNS (4), /* muldi */
993 COSTS_N_INSNS (23), /* divsi */
994 COSTS_N_INSNS (23), /* divdi */
995 COSTS_N_INSNS (5), /* fp */
996 COSTS_N_INSNS (5), /* dmul */
997 COSTS_N_INSNS (21), /* sdiv */
998 COSTS_N_INSNS (35), /* ddiv */
999 32, /* cache line size */
1000 32, /* l1 cache */
1001 1024, /* l2 cache */
1002 1, /* streams */
1003 0, /* SF->DF convert */
1004 };
1005
1006 /* Instruction costs on PPC8540 processors. */
1007 static const
1008 struct processor_costs ppc8540_cost = {
1009 COSTS_N_INSNS (4), /* mulsi */
1010 COSTS_N_INSNS (4), /* mulsi_const */
1011 COSTS_N_INSNS (4), /* mulsi_const9 */
1012 COSTS_N_INSNS (4), /* muldi */
1013 COSTS_N_INSNS (19), /* divsi */
1014 COSTS_N_INSNS (19), /* divdi */
1015 COSTS_N_INSNS (4), /* fp */
1016 COSTS_N_INSNS (4), /* dmul */
1017 COSTS_N_INSNS (29), /* sdiv */
1018 COSTS_N_INSNS (29), /* ddiv */
1019 32, /* cache line size */
1020 32, /* l1 cache */
1021 256, /* l2 cache */
1022 1, /* prefetch streams /*/
1023 0, /* SF->DF convert */
1024 };
1025
1026 /* Instruction costs on E300C2 and E300C3 cores. */
1027 static const
1028 struct processor_costs ppce300c2c3_cost = {
1029 COSTS_N_INSNS (4), /* mulsi */
1030 COSTS_N_INSNS (4), /* mulsi_const */
1031 COSTS_N_INSNS (4), /* mulsi_const9 */
1032 COSTS_N_INSNS (4), /* muldi */
1033 COSTS_N_INSNS (19), /* divsi */
1034 COSTS_N_INSNS (19), /* divdi */
1035 COSTS_N_INSNS (3), /* fp */
1036 COSTS_N_INSNS (4), /* dmul */
1037 COSTS_N_INSNS (18), /* sdiv */
1038 COSTS_N_INSNS (33), /* ddiv */
1039 32,
1040 16, /* l1 cache */
1041 16, /* l2 cache */
1042 1, /* prefetch streams /*/
1043 0, /* SF->DF convert */
1044 };
1045
1046 /* Instruction costs on PPCE500MC processors. */
1047 static const
1048 struct processor_costs ppce500mc_cost = {
1049 COSTS_N_INSNS (4), /* mulsi */
1050 COSTS_N_INSNS (4), /* mulsi_const */
1051 COSTS_N_INSNS (4), /* mulsi_const9 */
1052 COSTS_N_INSNS (4), /* muldi */
1053 COSTS_N_INSNS (14), /* divsi */
1054 COSTS_N_INSNS (14), /* divdi */
1055 COSTS_N_INSNS (8), /* fp */
1056 COSTS_N_INSNS (10), /* dmul */
1057 COSTS_N_INSNS (36), /* sdiv */
1058 COSTS_N_INSNS (66), /* ddiv */
1059 64, /* cache line size */
1060 32, /* l1 cache */
1061 128, /* l2 cache */
1062 1, /* prefetch streams /*/
1063 0, /* SF->DF convert */
1064 };
1065
1066 /* Instruction costs on PPCE500MC64 processors. */
1067 static const
1068 struct processor_costs ppce500mc64_cost = {
1069 COSTS_N_INSNS (4), /* mulsi */
1070 COSTS_N_INSNS (4), /* mulsi_const */
1071 COSTS_N_INSNS (4), /* mulsi_const9 */
1072 COSTS_N_INSNS (4), /* muldi */
1073 COSTS_N_INSNS (14), /* divsi */
1074 COSTS_N_INSNS (14), /* divdi */
1075 COSTS_N_INSNS (4), /* fp */
1076 COSTS_N_INSNS (10), /* dmul */
1077 COSTS_N_INSNS (36), /* sdiv */
1078 COSTS_N_INSNS (66), /* ddiv */
1079 64, /* cache line size */
1080 32, /* l1 cache */
1081 128, /* l2 cache */
1082 1, /* prefetch streams /*/
1083 0, /* SF->DF convert */
1084 };
1085
1086 /* Instruction costs on PPCE5500 processors. */
1087 static const
1088 struct processor_costs ppce5500_cost = {
1089 COSTS_N_INSNS (5), /* mulsi */
1090 COSTS_N_INSNS (5), /* mulsi_const */
1091 COSTS_N_INSNS (4), /* mulsi_const9 */
1092 COSTS_N_INSNS (5), /* muldi */
1093 COSTS_N_INSNS (14), /* divsi */
1094 COSTS_N_INSNS (14), /* divdi */
1095 COSTS_N_INSNS (7), /* fp */
1096 COSTS_N_INSNS (10), /* dmul */
1097 COSTS_N_INSNS (36), /* sdiv */
1098 COSTS_N_INSNS (66), /* ddiv */
1099 64, /* cache line size */
1100 32, /* l1 cache */
1101 128, /* l2 cache */
1102 1, /* prefetch streams /*/
1103 0, /* SF->DF convert */
1104 };
1105
1106 /* Instruction costs on PPCE6500 processors. */
1107 static const
1108 struct processor_costs ppce6500_cost = {
1109 COSTS_N_INSNS (5), /* mulsi */
1110 COSTS_N_INSNS (5), /* mulsi_const */
1111 COSTS_N_INSNS (4), /* mulsi_const9 */
1112 COSTS_N_INSNS (5), /* muldi */
1113 COSTS_N_INSNS (14), /* divsi */
1114 COSTS_N_INSNS (14), /* divdi */
1115 COSTS_N_INSNS (7), /* fp */
1116 COSTS_N_INSNS (10), /* dmul */
1117 COSTS_N_INSNS (36), /* sdiv */
1118 COSTS_N_INSNS (66), /* ddiv */
1119 64, /* cache line size */
1120 32, /* l1 cache */
1121 128, /* l2 cache */
1122 1, /* prefetch streams /*/
1123 0, /* SF->DF convert */
1124 };
1125
1126 /* Instruction costs on AppliedMicro Titan processors. */
1127 static const
1128 struct processor_costs titan_cost = {
1129 COSTS_N_INSNS (5), /* mulsi */
1130 COSTS_N_INSNS (5), /* mulsi_const */
1131 COSTS_N_INSNS (5), /* mulsi_const9 */
1132 COSTS_N_INSNS (5), /* muldi */
1133 COSTS_N_INSNS (18), /* divsi */
1134 COSTS_N_INSNS (18), /* divdi */
1135 COSTS_N_INSNS (10), /* fp */
1136 COSTS_N_INSNS (10), /* dmul */
1137 COSTS_N_INSNS (46), /* sdiv */
1138 COSTS_N_INSNS (72), /* ddiv */
1139 32, /* cache line size */
1140 32, /* l1 cache */
1141 512, /* l2 cache */
1142 1, /* prefetch streams /*/
1143 0, /* SF->DF convert */
1144 };
1145
1146 /* Instruction costs on POWER4 and POWER5 processors. */
1147 static const
1148 struct processor_costs power4_cost = {
1149 COSTS_N_INSNS (3), /* mulsi */
1150 COSTS_N_INSNS (2), /* mulsi_const */
1151 COSTS_N_INSNS (2), /* mulsi_const9 */
1152 COSTS_N_INSNS (4), /* muldi */
1153 COSTS_N_INSNS (18), /* divsi */
1154 COSTS_N_INSNS (34), /* divdi */
1155 COSTS_N_INSNS (3), /* fp */
1156 COSTS_N_INSNS (3), /* dmul */
1157 COSTS_N_INSNS (17), /* sdiv */
1158 COSTS_N_INSNS (17), /* ddiv */
1159 128, /* cache line size */
1160 32, /* l1 cache */
1161 1024, /* l2 cache */
1162 8, /* prefetch streams /*/
1163 0, /* SF->DF convert */
1164 };
1165
1166 /* Instruction costs on POWER6 processors. */
1167 static const
1168 struct processor_costs power6_cost = {
1169 COSTS_N_INSNS (8), /* mulsi */
1170 COSTS_N_INSNS (8), /* mulsi_const */
1171 COSTS_N_INSNS (8), /* mulsi_const9 */
1172 COSTS_N_INSNS (8), /* muldi */
1173 COSTS_N_INSNS (22), /* divsi */
1174 COSTS_N_INSNS (28), /* divdi */
1175 COSTS_N_INSNS (3), /* fp */
1176 COSTS_N_INSNS (3), /* dmul */
1177 COSTS_N_INSNS (13), /* sdiv */
1178 COSTS_N_INSNS (16), /* ddiv */
1179 128, /* cache line size */
1180 64, /* l1 cache */
1181 2048, /* l2 cache */
1182 16, /* prefetch streams */
1183 0, /* SF->DF convert */
1184 };
1185
1186 /* Instruction costs on POWER7 processors. */
1187 static const
1188 struct processor_costs power7_cost = {
1189 COSTS_N_INSNS (2), /* mulsi */
1190 COSTS_N_INSNS (2), /* mulsi_const */
1191 COSTS_N_INSNS (2), /* mulsi_const9 */
1192 COSTS_N_INSNS (2), /* muldi */
1193 COSTS_N_INSNS (18), /* divsi */
1194 COSTS_N_INSNS (34), /* divdi */
1195 COSTS_N_INSNS (3), /* fp */
1196 COSTS_N_INSNS (3), /* dmul */
1197 COSTS_N_INSNS (13), /* sdiv */
1198 COSTS_N_INSNS (16), /* ddiv */
1199 128, /* cache line size */
1200 32, /* l1 cache */
1201 256, /* l2 cache */
1202 12, /* prefetch streams */
1203 COSTS_N_INSNS (3), /* SF->DF convert */
1204 };
1205
1206 /* Instruction costs on POWER8 processors. */
1207 static const
1208 struct processor_costs power8_cost = {
1209 COSTS_N_INSNS (3), /* mulsi */
1210 COSTS_N_INSNS (3), /* mulsi_const */
1211 COSTS_N_INSNS (3), /* mulsi_const9 */
1212 COSTS_N_INSNS (3), /* muldi */
1213 COSTS_N_INSNS (19), /* divsi */
1214 COSTS_N_INSNS (35), /* divdi */
1215 COSTS_N_INSNS (3), /* fp */
1216 COSTS_N_INSNS (3), /* dmul */
1217 COSTS_N_INSNS (14), /* sdiv */
1218 COSTS_N_INSNS (17), /* ddiv */
1219 128, /* cache line size */
1220 32, /* l1 cache */
1221 256, /* l2 cache */
1222 12, /* prefetch streams */
1223 COSTS_N_INSNS (3), /* SF->DF convert */
1224 };
1225
1226 /* Instruction costs on POWER9 processors. */
1227 static const
1228 struct processor_costs power9_cost = {
1229 COSTS_N_INSNS (3), /* mulsi */
1230 COSTS_N_INSNS (3), /* mulsi_const */
1231 COSTS_N_INSNS (3), /* mulsi_const9 */
1232 COSTS_N_INSNS (3), /* muldi */
1233 COSTS_N_INSNS (8), /* divsi */
1234 COSTS_N_INSNS (12), /* divdi */
1235 COSTS_N_INSNS (3), /* fp */
1236 COSTS_N_INSNS (3), /* dmul */
1237 COSTS_N_INSNS (13), /* sdiv */
1238 COSTS_N_INSNS (18), /* ddiv */
1239 128, /* cache line size */
1240 32, /* l1 cache */
1241 512, /* l2 cache */
1242 8, /* prefetch streams */
1243 COSTS_N_INSNS (3), /* SF->DF convert */
1244 };
1245
1246 /* Instruction costs on POWER A2 processors. */
1247 static const
1248 struct processor_costs ppca2_cost = {
1249 COSTS_N_INSNS (16), /* mulsi */
1250 COSTS_N_INSNS (16), /* mulsi_const */
1251 COSTS_N_INSNS (16), /* mulsi_const9 */
1252 COSTS_N_INSNS (16), /* muldi */
1253 COSTS_N_INSNS (22), /* divsi */
1254 COSTS_N_INSNS (28), /* divdi */
1255 COSTS_N_INSNS (3), /* fp */
1256 COSTS_N_INSNS (3), /* dmul */
1257 COSTS_N_INSNS (59), /* sdiv */
1258 COSTS_N_INSNS (72), /* ddiv */
1259 64,
1260 16, /* l1 cache */
1261 2048, /* l2 cache */
1262 16, /* prefetch streams */
1263 0, /* SF->DF convert */
1264 };
1265
1266 \f
1267 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1268 #undef RS6000_BUILTIN_0
1269 #undef RS6000_BUILTIN_1
1270 #undef RS6000_BUILTIN_2
1271 #undef RS6000_BUILTIN_3
1272 #undef RS6000_BUILTIN_A
1273 #undef RS6000_BUILTIN_D
1274 #undef RS6000_BUILTIN_H
1275 #undef RS6000_BUILTIN_P
1276 #undef RS6000_BUILTIN_X
1277
1278 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1279 { NAME, ICODE, MASK, ATTR },
1280
1281 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1282 { NAME, ICODE, MASK, ATTR },
1283
1284 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1285 { NAME, ICODE, MASK, ATTR },
1286
1287 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1288 { NAME, ICODE, MASK, ATTR },
1289
1290 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1291 { NAME, ICODE, MASK, ATTR },
1292
1293 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1294 { NAME, ICODE, MASK, ATTR },
1295
1296 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1297 { NAME, ICODE, MASK, ATTR },
1298
1299 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1300 { NAME, ICODE, MASK, ATTR },
1301
1302 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1303 { NAME, ICODE, MASK, ATTR },
1304
1305 struct rs6000_builtin_info_type {
1306 const char *name;
1307 const enum insn_code icode;
1308 const HOST_WIDE_INT mask;
1309 const unsigned attr;
1310 };
1311
1312 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1313 {
1314 #include "rs6000-builtin.def"
1315 };
1316
1317 #undef RS6000_BUILTIN_0
1318 #undef RS6000_BUILTIN_1
1319 #undef RS6000_BUILTIN_2
1320 #undef RS6000_BUILTIN_3
1321 #undef RS6000_BUILTIN_A
1322 #undef RS6000_BUILTIN_D
1323 #undef RS6000_BUILTIN_H
1324 #undef RS6000_BUILTIN_P
1325 #undef RS6000_BUILTIN_X
1326
1327 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1328 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1329
1330 \f
1331 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1332 static struct machine_function * rs6000_init_machine_status (void);
1333 static int rs6000_ra_ever_killed (void);
1334 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1336 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1337 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1338 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1339 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1340 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1341 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1342 bool);
1343 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1344 unsigned int);
1345 static bool is_microcoded_insn (rtx_insn *);
1346 static bool is_nonpipeline_insn (rtx_insn *);
1347 static bool is_cracked_insn (rtx_insn *);
1348 static bool is_load_insn (rtx, rtx *);
1349 static bool is_store_insn (rtx, rtx *);
1350 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1351 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1352 static bool insn_must_be_first_in_group (rtx_insn *);
1353 static bool insn_must_be_last_in_group (rtx_insn *);
1354 static void altivec_init_builtins (void);
1355 static tree builtin_function_type (machine_mode, machine_mode,
1356 machine_mode, machine_mode,
1357 enum rs6000_builtins, const char *name);
1358 static void rs6000_common_init_builtins (void);
1359 static void htm_init_builtins (void);
1360 static rs6000_stack_t *rs6000_stack_info (void);
1361 static void is_altivec_return_reg (rtx, void *);
1362 int easy_vector_constant (rtx, machine_mode);
1363 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1364 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1365 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1366 bool, bool);
1367 #if TARGET_MACHO
1368 static void macho_branch_islands (void);
1369 static tree get_prev_label (tree);
1370 #endif
1371 static bool rs6000_mode_dependent_address (const_rtx);
1372 static bool rs6000_debug_mode_dependent_address (const_rtx);
1373 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1374 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1375 machine_mode, rtx);
1376 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1377 machine_mode,
1378 rtx);
1379 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1380 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1381 enum reg_class);
1382 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1383 reg_class_t,
1384 reg_class_t);
1385 static bool rs6000_debug_can_change_mode_class (machine_mode,
1386 machine_mode,
1387 reg_class_t);
1388 static bool rs6000_save_toc_in_prologue_p (void);
1389 static rtx rs6000_internal_arg_pointer (void);
1390
1391 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1392 = rs6000_mode_dependent_address;
1393
1394 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1395 machine_mode, rtx)
1396 = rs6000_secondary_reload_class;
1397
1398 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1399 = rs6000_preferred_reload_class;
1400
1401 const int INSN_NOT_AVAILABLE = -1;
1402
1403 static void rs6000_print_isa_options (FILE *, int, const char *,
1404 HOST_WIDE_INT);
1405 static void rs6000_print_builtin_options (FILE *, int, const char *,
1406 HOST_WIDE_INT);
1407 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1408
1409 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1410 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1411 enum rs6000_reg_type,
1412 machine_mode,
1413 secondary_reload_info *,
1414 bool);
1415 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1416 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1417 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1418
1419 /* Hash table stuff for keeping track of TOC entries. */
1420
1421 struct GTY((for_user)) toc_hash_struct
1422 {
1423 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1424 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1425 rtx key;
1426 machine_mode key_mode;
1427 int labelno;
1428 };
1429
1430 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1431 {
1432 static hashval_t hash (toc_hash_struct *);
1433 static bool equal (toc_hash_struct *, toc_hash_struct *);
1434 };
1435
1436 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1437
1438 /* Hash table to keep track of the argument types for builtin functions. */
1439
1440 struct GTY((for_user)) builtin_hash_struct
1441 {
1442 tree type;
1443 machine_mode mode[4]; /* return value + 3 arguments. */
1444 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1445 };
1446
1447 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1448 {
1449 static hashval_t hash (builtin_hash_struct *);
1450 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1451 };
1452
1453 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1454
1455 \f
1456 /* Default register names. */
1457 char rs6000_reg_names[][8] =
1458 {
1459 /* GPRs */
1460 "0", "1", "2", "3", "4", "5", "6", "7",
1461 "8", "9", "10", "11", "12", "13", "14", "15",
1462 "16", "17", "18", "19", "20", "21", "22", "23",
1463 "24", "25", "26", "27", "28", "29", "30", "31",
1464 /* FPRs */
1465 "0", "1", "2", "3", "4", "5", "6", "7",
1466 "8", "9", "10", "11", "12", "13", "14", "15",
1467 "16", "17", "18", "19", "20", "21", "22", "23",
1468 "24", "25", "26", "27", "28", "29", "30", "31",
1469 /* VRs */
1470 "0", "1", "2", "3", "4", "5", "6", "7",
1471 "8", "9", "10", "11", "12", "13", "14", "15",
1472 "16", "17", "18", "19", "20", "21", "22", "23",
1473 "24", "25", "26", "27", "28", "29", "30", "31",
1474 /* lr ctr ca ap */
1475 "lr", "ctr", "ca", "ap",
1476 /* cr0..cr7 */
1477 "0", "1", "2", "3", "4", "5", "6", "7",
1478 /* vrsave vscr sfp */
1479 "vrsave", "vscr", "sfp",
1480 };
1481
1482 #ifdef TARGET_REGNAMES
1483 static const char alt_reg_names[][8] =
1484 {
1485 /* GPRs */
1486 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1487 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1488 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1489 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1490 /* FPRs */
1491 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1492 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1493 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1494 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1495 /* VRs */
1496 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1497 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1498 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1499 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1500 /* lr ctr ca ap */
1501 "lr", "ctr", "ca", "ap",
1502 /* cr0..cr7 */
1503 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1504 /* vrsave vscr sfp */
1505 "vrsave", "vscr", "sfp",
1506 };
1507 #endif
1508
1509 /* Table of valid machine attributes. */
1510
1511 static const struct attribute_spec rs6000_attribute_table[] =
1512 {
1513 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1514 affects_type_identity, handler, exclude } */
1515 { "altivec", 1, 1, false, true, false, false,
1516 rs6000_handle_altivec_attribute, NULL },
1517 { "longcall", 0, 0, false, true, true, false,
1518 rs6000_handle_longcall_attribute, NULL },
1519 { "shortcall", 0, 0, false, true, true, false,
1520 rs6000_handle_longcall_attribute, NULL },
1521 { "ms_struct", 0, 0, false, false, false, false,
1522 rs6000_handle_struct_attribute, NULL },
1523 { "gcc_struct", 0, 0, false, false, false, false,
1524 rs6000_handle_struct_attribute, NULL },
1525 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1526 SUBTARGET_ATTRIBUTE_TABLE,
1527 #endif
1528 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1529 };
1530 \f
1531 #ifndef TARGET_PROFILE_KERNEL
1532 #define TARGET_PROFILE_KERNEL 0
1533 #endif
1534
1535 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1536 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1537 \f
1538 /* Initialize the GCC target structure. */
1539 #undef TARGET_ATTRIBUTE_TABLE
1540 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1541 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1542 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1543 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1544 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1545
1546 #undef TARGET_ASM_ALIGNED_DI_OP
1547 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1548
1549 /* Default unaligned ops are only provided for ELF. Find the ops needed
1550 for non-ELF systems. */
1551 #ifndef OBJECT_FORMAT_ELF
1552 #if TARGET_XCOFF
1553 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1554 64-bit targets. */
1555 #undef TARGET_ASM_UNALIGNED_HI_OP
1556 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1557 #undef TARGET_ASM_UNALIGNED_SI_OP
1558 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1559 #undef TARGET_ASM_UNALIGNED_DI_OP
1560 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1561 #else
1562 /* For Darwin. */
1563 #undef TARGET_ASM_UNALIGNED_HI_OP
1564 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1565 #undef TARGET_ASM_UNALIGNED_SI_OP
1566 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1567 #undef TARGET_ASM_UNALIGNED_DI_OP
1568 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1569 #undef TARGET_ASM_ALIGNED_DI_OP
1570 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1571 #endif
1572 #endif
1573
1574 /* This hook deals with fixups for relocatable code and DI-mode objects
1575 in 64-bit code. */
1576 #undef TARGET_ASM_INTEGER
1577 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1578
1579 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1580 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1581 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1582 #endif
1583
1584 #undef TARGET_SET_UP_BY_PROLOGUE
1585 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1586
1587 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1588 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1589 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1590 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1591 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1592 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1593 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1594 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1595 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1596 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1597 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1598 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1599
1600 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1601 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1602
1603 #undef TARGET_INTERNAL_ARG_POINTER
1604 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1605
1606 #undef TARGET_HAVE_TLS
1607 #define TARGET_HAVE_TLS HAVE_AS_TLS
1608
1609 #undef TARGET_CANNOT_FORCE_CONST_MEM
1610 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1611
1612 #undef TARGET_DELEGITIMIZE_ADDRESS
1613 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1614
1615 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1616 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1617
1618 #undef TARGET_LEGITIMATE_COMBINED_INSN
1619 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1620
1621 #undef TARGET_ASM_FUNCTION_PROLOGUE
1622 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1623 #undef TARGET_ASM_FUNCTION_EPILOGUE
1624 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1625
1626 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1627 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1628
1629 #undef TARGET_LEGITIMIZE_ADDRESS
1630 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1631
1632 #undef TARGET_SCHED_VARIABLE_ISSUE
1633 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1634
1635 #undef TARGET_SCHED_ISSUE_RATE
1636 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1637 #undef TARGET_SCHED_ADJUST_COST
1638 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1639 #undef TARGET_SCHED_ADJUST_PRIORITY
1640 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1641 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1642 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1643 #undef TARGET_SCHED_INIT
1644 #define TARGET_SCHED_INIT rs6000_sched_init
1645 #undef TARGET_SCHED_FINISH
1646 #define TARGET_SCHED_FINISH rs6000_sched_finish
1647 #undef TARGET_SCHED_REORDER
1648 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1649 #undef TARGET_SCHED_REORDER2
1650 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1651
1652 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1653 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1654
1655 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1656 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1657
1658 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1659 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1660 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1661 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1662 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1663 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1664 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1665 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1666
1667 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1668 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1669
1670 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1671 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1672 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1673 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1674 rs6000_builtin_support_vector_misalignment
1675 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1676 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1677 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1678 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1679 rs6000_builtin_vectorization_cost
1680 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1681 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1682 rs6000_preferred_simd_mode
1683 #undef TARGET_VECTORIZE_INIT_COST
1684 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1685 #undef TARGET_VECTORIZE_ADD_STMT_COST
1686 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1687 #undef TARGET_VECTORIZE_FINISH_COST
1688 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1689 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1690 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1691
1692 #undef TARGET_INIT_BUILTINS
1693 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1694 #undef TARGET_BUILTIN_DECL
1695 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1696
1697 #undef TARGET_FOLD_BUILTIN
1698 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1699 #undef TARGET_GIMPLE_FOLD_BUILTIN
1700 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1701
1702 #undef TARGET_EXPAND_BUILTIN
1703 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1704
1705 #undef TARGET_MANGLE_TYPE
1706 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1707
1708 #undef TARGET_INIT_LIBFUNCS
1709 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1710
1711 #if TARGET_MACHO
1712 #undef TARGET_BINDS_LOCAL_P
1713 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1714 #endif
1715
1716 #undef TARGET_MS_BITFIELD_LAYOUT_P
1717 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1718
1719 #undef TARGET_ASM_OUTPUT_MI_THUNK
1720 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1721
1722 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1723 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1724
1725 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1726 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1727
1728 #undef TARGET_REGISTER_MOVE_COST
1729 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1730 #undef TARGET_MEMORY_MOVE_COST
1731 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1732 #undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS
1733 #define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS \
1734 rs6000_ira_change_pseudo_allocno_class
1735 #undef TARGET_CANNOT_COPY_INSN_P
1736 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1737 #undef TARGET_RTX_COSTS
1738 #define TARGET_RTX_COSTS rs6000_rtx_costs
1739 #undef TARGET_ADDRESS_COST
1740 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1741 #undef TARGET_INSN_COST
1742 #define TARGET_INSN_COST rs6000_insn_cost
1743
1744 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1745 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1746
1747 #undef TARGET_PROMOTE_FUNCTION_MODE
1748 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1749
1750 #undef TARGET_RETURN_IN_MEMORY
1751 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1752
1753 #undef TARGET_RETURN_IN_MSB
1754 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1755
1756 #undef TARGET_SETUP_INCOMING_VARARGS
1757 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1758
1759 /* Always strict argument naming on rs6000. */
1760 #undef TARGET_STRICT_ARGUMENT_NAMING
1761 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1762 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1763 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1764 #undef TARGET_SPLIT_COMPLEX_ARG
1765 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1766 #undef TARGET_MUST_PASS_IN_STACK
1767 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1768 #undef TARGET_PASS_BY_REFERENCE
1769 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1770 #undef TARGET_ARG_PARTIAL_BYTES
1771 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1772 #undef TARGET_FUNCTION_ARG_ADVANCE
1773 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1774 #undef TARGET_FUNCTION_ARG
1775 #define TARGET_FUNCTION_ARG rs6000_function_arg
1776 #undef TARGET_FUNCTION_ARG_PADDING
1777 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1778 #undef TARGET_FUNCTION_ARG_BOUNDARY
1779 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1780
1781 #undef TARGET_BUILD_BUILTIN_VA_LIST
1782 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1783
1784 #undef TARGET_EXPAND_BUILTIN_VA_START
1785 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1786
1787 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1788 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1789
1790 #undef TARGET_EH_RETURN_FILTER_MODE
1791 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1792
1793 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1794 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1795
1796 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1797 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1798
1799 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1800 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1801
1802 #undef TARGET_FLOATN_MODE
1803 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1804
1805 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1806 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1807
1808 #undef TARGET_MD_ASM_ADJUST
1809 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1810
1811 #undef TARGET_OPTION_OVERRIDE
1812 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1813
1814 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1815 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1816 rs6000_builtin_vectorized_function
1817
1818 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1819 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1820 rs6000_builtin_md_vectorized_function
1821
1822 #undef TARGET_STACK_PROTECT_GUARD
1823 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1824
1825 #if !TARGET_MACHO
1826 #undef TARGET_STACK_PROTECT_FAIL
1827 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1828 #endif
1829
1830 #ifdef HAVE_AS_TLS
1831 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1832 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1833 #endif
1834
1835 /* Use a 32-bit anchor range. This leads to sequences like:
1836
1837 addis tmp,anchor,high
1838 add dest,tmp,low
1839
1840 where tmp itself acts as an anchor, and can be shared between
1841 accesses to the same 64k page. */
1842 #undef TARGET_MIN_ANCHOR_OFFSET
1843 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1844 #undef TARGET_MAX_ANCHOR_OFFSET
1845 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1846 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1847 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1848 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1849 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1850
1851 #undef TARGET_BUILTIN_RECIPROCAL
1852 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1853
1854 #undef TARGET_SECONDARY_RELOAD
1855 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1856 #undef TARGET_SECONDARY_MEMORY_NEEDED
1857 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1858 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1859 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1860
1861 #undef TARGET_LEGITIMATE_ADDRESS_P
1862 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1863
1864 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1865 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1866
1867 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1868 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1869
1870 #undef TARGET_CAN_ELIMINATE
1871 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1872
1873 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1874 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1875
1876 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1877 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1878
1879 #undef TARGET_TRAMPOLINE_INIT
1880 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1881
1882 #undef TARGET_FUNCTION_VALUE
1883 #define TARGET_FUNCTION_VALUE rs6000_function_value
1884
1885 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1886 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1887
1888 #undef TARGET_OPTION_SAVE
1889 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1890
1891 #undef TARGET_OPTION_RESTORE
1892 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1893
1894 #undef TARGET_OPTION_PRINT
1895 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1896
1897 #undef TARGET_CAN_INLINE_P
1898 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1899
1900 #undef TARGET_SET_CURRENT_FUNCTION
1901 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1902
1903 #undef TARGET_LEGITIMATE_CONSTANT_P
1904 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1905
1906 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1907 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1908
1909 #undef TARGET_CAN_USE_DOLOOP_P
1910 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1911
1912 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1913 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1914
1915 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1916 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1917 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1918 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1919 #undef TARGET_UNWIND_WORD_MODE
1920 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1921
1922 #undef TARGET_OFFLOAD_OPTIONS
1923 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1924
1925 #undef TARGET_C_MODE_FOR_SUFFIX
1926 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1927
1928 #undef TARGET_INVALID_BINARY_OP
1929 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1930
1931 #undef TARGET_OPTAB_SUPPORTED_P
1932 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1933
1934 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1935 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1936
1937 #undef TARGET_COMPARE_VERSION_PRIORITY
1938 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1939
1940 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1941 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1942 rs6000_generate_version_dispatcher_body
1943
1944 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1945 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1946 rs6000_get_function_versions_dispatcher
1947
1948 #undef TARGET_OPTION_FUNCTION_VERSIONS
1949 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1950
1951 #undef TARGET_HARD_REGNO_NREGS
1952 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1953 #undef TARGET_HARD_REGNO_MODE_OK
1954 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1955
1956 #undef TARGET_MODES_TIEABLE_P
1957 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1958
1959 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1960 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1961 rs6000_hard_regno_call_part_clobbered
1962
1963 #undef TARGET_SLOW_UNALIGNED_ACCESS
1964 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1965
1966 #undef TARGET_CAN_CHANGE_MODE_CLASS
1967 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1968
1969 #undef TARGET_CONSTANT_ALIGNMENT
1970 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1971
1972 #undef TARGET_STARTING_FRAME_OFFSET
1973 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1974
1975 #if TARGET_ELF && RS6000_WEAK
1976 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1977 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1978 #endif
1979
1980 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1981 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1982
1983 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1984 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1985 \f
1986
1987 /* Processor table. */
1988 struct rs6000_ptt
1989 {
1990 const char *const name; /* Canonical processor name. */
1991 const enum processor_type processor; /* Processor type enum value. */
1992 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1993 };
1994
1995 static struct rs6000_ptt const processor_target_table[] =
1996 {
1997 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1998 #include "rs6000-cpus.def"
1999 #undef RS6000_CPU
2000 };
2001
2002 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2003 name is invalid. */
2004
2005 static int
2006 rs6000_cpu_name_lookup (const char *name)
2007 {
2008 size_t i;
2009
2010 if (name != NULL)
2011 {
2012 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2013 if (! strcmp (name, processor_target_table[i].name))
2014 return (int)i;
2015 }
2016
2017 return -1;
2018 }
2019
2020 \f
2021 /* Return number of consecutive hard regs needed starting at reg REGNO
2022 to hold something of mode MODE.
2023 This is ordinarily the length in words of a value of mode MODE
2024 but can be less for certain modes in special long registers.
2025
2026 POWER and PowerPC GPRs hold 32 bits worth;
2027 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2028
2029 static int
2030 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2031 {
2032 unsigned HOST_WIDE_INT reg_size;
2033
2034 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2035 128-bit floating point that can go in vector registers, which has VSX
2036 memory addressing. */
2037 if (FP_REGNO_P (regno))
2038 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2039 ? UNITS_PER_VSX_WORD
2040 : UNITS_PER_FP_WORD);
2041
2042 else if (ALTIVEC_REGNO_P (regno))
2043 reg_size = UNITS_PER_ALTIVEC_WORD;
2044
2045 else
2046 reg_size = UNITS_PER_WORD;
2047
2048 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2049 }
2050
2051 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2052 MODE. */
2053 static int
2054 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2055 {
2056 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2057
2058 if (COMPLEX_MODE_P (mode))
2059 mode = GET_MODE_INNER (mode);
2060
2061 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2062 register combinations, and use PTImode where we need to deal with quad
2063 word memory operations. Don't allow quad words in the argument or frame
2064 pointer registers, just registers 0..31. */
2065 if (mode == PTImode)
2066 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2067 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2068 && ((regno & 1) == 0));
2069
2070 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2071 implementations. Don't allow an item to be split between a FP register
2072 and an Altivec register. Allow TImode in all VSX registers if the user
2073 asked for it. */
2074 if (TARGET_VSX && VSX_REGNO_P (regno)
2075 && (VECTOR_MEM_VSX_P (mode)
2076 || FLOAT128_VECTOR_P (mode)
2077 || reg_addr[mode].scalar_in_vmx_p
2078 || mode == TImode
2079 || (TARGET_VADDUQM && mode == V1TImode)))
2080 {
2081 if (FP_REGNO_P (regno))
2082 return FP_REGNO_P (last_regno);
2083
2084 if (ALTIVEC_REGNO_P (regno))
2085 {
2086 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2087 return 0;
2088
2089 return ALTIVEC_REGNO_P (last_regno);
2090 }
2091 }
2092
2093 /* The GPRs can hold any mode, but values bigger than one register
2094 cannot go past R31. */
2095 if (INT_REGNO_P (regno))
2096 return INT_REGNO_P (last_regno);
2097
2098 /* The float registers (except for VSX vector modes) can only hold floating
2099 modes and DImode. */
2100 if (FP_REGNO_P (regno))
2101 {
2102 if (FLOAT128_VECTOR_P (mode))
2103 return false;
2104
2105 if (SCALAR_FLOAT_MODE_P (mode)
2106 && (mode != TDmode || (regno % 2) == 0)
2107 && FP_REGNO_P (last_regno))
2108 return 1;
2109
2110 if (GET_MODE_CLASS (mode) == MODE_INT)
2111 {
2112 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2113 return 1;
2114
2115 if (TARGET_P8_VECTOR && (mode == SImode))
2116 return 1;
2117
2118 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2119 return 1;
2120 }
2121
2122 return 0;
2123 }
2124
2125 /* The CR register can only hold CC modes. */
2126 if (CR_REGNO_P (regno))
2127 return GET_MODE_CLASS (mode) == MODE_CC;
2128
2129 if (CA_REGNO_P (regno))
2130 return mode == Pmode || mode == SImode;
2131
2132 /* AltiVec only in AldyVec registers. */
2133 if (ALTIVEC_REGNO_P (regno))
2134 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2135 || mode == V1TImode);
2136
2137 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2138 and it must be able to fit within the register set. */
2139
2140 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2141 }
2142
2143 /* Implement TARGET_HARD_REGNO_NREGS. */
2144
2145 static unsigned int
2146 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2147 {
2148 return rs6000_hard_regno_nregs[mode][regno];
2149 }
2150
2151 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2152
2153 static bool
2154 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2155 {
2156 return rs6000_hard_regno_mode_ok_p[mode][regno];
2157 }
2158
2159 /* Implement TARGET_MODES_TIEABLE_P.
2160
2161 PTImode cannot tie with other modes because PTImode is restricted to even
2162 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2163 57744).
2164
2165 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2166 128-bit floating point on VSX systems ties with other vectors. */
2167
2168 static bool
2169 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2170 {
2171 if (mode1 == PTImode)
2172 return mode2 == PTImode;
2173 if (mode2 == PTImode)
2174 return false;
2175
2176 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2177 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2178 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2179 return false;
2180
2181 if (SCALAR_FLOAT_MODE_P (mode1))
2182 return SCALAR_FLOAT_MODE_P (mode2);
2183 if (SCALAR_FLOAT_MODE_P (mode2))
2184 return false;
2185
2186 if (GET_MODE_CLASS (mode1) == MODE_CC)
2187 return GET_MODE_CLASS (mode2) == MODE_CC;
2188 if (GET_MODE_CLASS (mode2) == MODE_CC)
2189 return false;
2190
2191 return true;
2192 }
2193
2194 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2195
2196 static bool
2197 rs6000_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
2198 unsigned int regno, machine_mode mode)
2199 {
2200 if (TARGET_32BIT
2201 && TARGET_POWERPC64
2202 && GET_MODE_SIZE (mode) > 4
2203 && INT_REGNO_P (regno))
2204 return true;
2205
2206 if (TARGET_VSX
2207 && FP_REGNO_P (regno)
2208 && GET_MODE_SIZE (mode) > 8
2209 && !FLOAT128_2REG_P (mode))
2210 return true;
2211
2212 return false;
2213 }
2214
2215 /* Print interesting facts about registers. */
2216 static void
2217 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2218 {
2219 int r, m;
2220
2221 for (r = first_regno; r <= last_regno; ++r)
2222 {
2223 const char *comma = "";
2224 int len;
2225
2226 if (first_regno == last_regno)
2227 fprintf (stderr, "%s:\t", reg_name);
2228 else
2229 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2230
2231 len = 8;
2232 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2233 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2234 {
2235 if (len > 70)
2236 {
2237 fprintf (stderr, ",\n\t");
2238 len = 8;
2239 comma = "";
2240 }
2241
2242 if (rs6000_hard_regno_nregs[m][r] > 1)
2243 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2244 rs6000_hard_regno_nregs[m][r]);
2245 else
2246 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2247
2248 comma = ", ";
2249 }
2250
2251 if (call_used_regs[r])
2252 {
2253 if (len > 70)
2254 {
2255 fprintf (stderr, ",\n\t");
2256 len = 8;
2257 comma = "";
2258 }
2259
2260 len += fprintf (stderr, "%s%s", comma, "call-used");
2261 comma = ", ";
2262 }
2263
2264 if (fixed_regs[r])
2265 {
2266 if (len > 70)
2267 {
2268 fprintf (stderr, ",\n\t");
2269 len = 8;
2270 comma = "";
2271 }
2272
2273 len += fprintf (stderr, "%s%s", comma, "fixed");
2274 comma = ", ";
2275 }
2276
2277 if (len > 70)
2278 {
2279 fprintf (stderr, ",\n\t");
2280 comma = "";
2281 }
2282
2283 len += fprintf (stderr, "%sreg-class = %s", comma,
2284 reg_class_names[(int)rs6000_regno_regclass[r]]);
2285 comma = ", ";
2286
2287 if (len > 70)
2288 {
2289 fprintf (stderr, ",\n\t");
2290 comma = "";
2291 }
2292
2293 fprintf (stderr, "%sregno = %d\n", comma, r);
2294 }
2295 }
2296
2297 static const char *
2298 rs6000_debug_vector_unit (enum rs6000_vector v)
2299 {
2300 const char *ret;
2301
2302 switch (v)
2303 {
2304 case VECTOR_NONE: ret = "none"; break;
2305 case VECTOR_ALTIVEC: ret = "altivec"; break;
2306 case VECTOR_VSX: ret = "vsx"; break;
2307 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2308 default: ret = "unknown"; break;
2309 }
2310
2311 return ret;
2312 }
2313
2314 /* Inner function printing just the address mask for a particular reload
2315 register class. */
2316 DEBUG_FUNCTION char *
2317 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2318 {
2319 static char ret[8];
2320 char *p = ret;
2321
2322 if ((mask & RELOAD_REG_VALID) != 0)
2323 *p++ = 'v';
2324 else if (keep_spaces)
2325 *p++ = ' ';
2326
2327 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2328 *p++ = 'm';
2329 else if (keep_spaces)
2330 *p++ = ' ';
2331
2332 if ((mask & RELOAD_REG_INDEXED) != 0)
2333 *p++ = 'i';
2334 else if (keep_spaces)
2335 *p++ = ' ';
2336
2337 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2338 *p++ = 'O';
2339 else if ((mask & RELOAD_REG_OFFSET) != 0)
2340 *p++ = 'o';
2341 else if (keep_spaces)
2342 *p++ = ' ';
2343
2344 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2345 *p++ = '+';
2346 else if (keep_spaces)
2347 *p++ = ' ';
2348
2349 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2350 *p++ = '+';
2351 else if (keep_spaces)
2352 *p++ = ' ';
2353
2354 if ((mask & RELOAD_REG_AND_M16) != 0)
2355 *p++ = '&';
2356 else if (keep_spaces)
2357 *p++ = ' ';
2358
2359 *p = '\0';
2360
2361 return ret;
2362 }
2363
2364 /* Print the address masks in a human readble fashion. */
2365 DEBUG_FUNCTION void
2366 rs6000_debug_print_mode (ssize_t m)
2367 {
2368 ssize_t rc;
2369 int spaces = 0;
2370
2371 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2372 for (rc = 0; rc < N_RELOAD_REG; rc++)
2373 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2374 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2375
2376 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2377 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2378 {
2379 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2380 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2381 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2382 spaces = 0;
2383 }
2384 else
2385 spaces += sizeof (" Reload=sl") - 1;
2386
2387 if (reg_addr[m].scalar_in_vmx_p)
2388 {
2389 fprintf (stderr, "%*s Upper=y", spaces, "");
2390 spaces = 0;
2391 }
2392 else
2393 spaces += sizeof (" Upper=y") - 1;
2394
2395 if (rs6000_vector_unit[m] != VECTOR_NONE
2396 || rs6000_vector_mem[m] != VECTOR_NONE)
2397 {
2398 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2399 spaces, "",
2400 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2401 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2402 }
2403
2404 fputs ("\n", stderr);
2405 }
2406
2407 #define DEBUG_FMT_ID "%-32s= "
2408 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2409 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2410 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2411
2412 /* Print various interesting information with -mdebug=reg. */
2413 static void
2414 rs6000_debug_reg_global (void)
2415 {
2416 static const char *const tf[2] = { "false", "true" };
2417 const char *nl = (const char *)0;
2418 int m;
2419 size_t m1, m2, v;
2420 char costly_num[20];
2421 char nop_num[20];
2422 char flags_buffer[40];
2423 const char *costly_str;
2424 const char *nop_str;
2425 const char *trace_str;
2426 const char *abi_str;
2427 const char *cmodel_str;
2428 struct cl_target_option cl_opts;
2429
2430 /* Modes we want tieable information on. */
2431 static const machine_mode print_tieable_modes[] = {
2432 QImode,
2433 HImode,
2434 SImode,
2435 DImode,
2436 TImode,
2437 PTImode,
2438 SFmode,
2439 DFmode,
2440 TFmode,
2441 IFmode,
2442 KFmode,
2443 SDmode,
2444 DDmode,
2445 TDmode,
2446 V16QImode,
2447 V8HImode,
2448 V4SImode,
2449 V2DImode,
2450 V1TImode,
2451 V32QImode,
2452 V16HImode,
2453 V8SImode,
2454 V4DImode,
2455 V2TImode,
2456 V4SFmode,
2457 V2DFmode,
2458 V8SFmode,
2459 V4DFmode,
2460 CCmode,
2461 CCUNSmode,
2462 CCEQmode,
2463 };
2464
2465 /* Virtual regs we are interested in. */
2466 const static struct {
2467 int regno; /* register number. */
2468 const char *name; /* register name. */
2469 } virtual_regs[] = {
2470 { STACK_POINTER_REGNUM, "stack pointer:" },
2471 { TOC_REGNUM, "toc: " },
2472 { STATIC_CHAIN_REGNUM, "static chain: " },
2473 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2474 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2475 { ARG_POINTER_REGNUM, "arg pointer: " },
2476 { FRAME_POINTER_REGNUM, "frame pointer:" },
2477 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2478 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2479 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2480 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2481 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2482 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2483 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2484 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2485 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2486 };
2487
2488 fputs ("\nHard register information:\n", stderr);
2489 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2490 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2491 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2492 LAST_ALTIVEC_REGNO,
2493 "vs");
2494 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2495 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2496 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2497 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2498 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2499 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2500
2501 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2502 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2503 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2504
2505 fprintf (stderr,
2506 "\n"
2507 "d reg_class = %s\n"
2508 "f reg_class = %s\n"
2509 "v reg_class = %s\n"
2510 "wa reg_class = %s\n"
2511 "wd reg_class = %s\n"
2512 "we reg_class = %s\n"
2513 "wf reg_class = %s\n"
2514 "wg reg_class = %s\n"
2515 "wi reg_class = %s\n"
2516 "wp reg_class = %s\n"
2517 "wq reg_class = %s\n"
2518 "wr reg_class = %s\n"
2519 "ws reg_class = %s\n"
2520 "wt reg_class = %s\n"
2521 "wv reg_class = %s\n"
2522 "ww reg_class = %s\n"
2523 "wx reg_class = %s\n"
2524 "wA reg_class = %s\n"
2525 "\n",
2526 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2527 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2528 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2529 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2530 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2531 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2532 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2533 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2534 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2535 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2536 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2537 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2538 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2539 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2540 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2541 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2542 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2543 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]]);
2544
2545 nl = "\n";
2546 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2547 rs6000_debug_print_mode (m);
2548
2549 fputs ("\n", stderr);
2550
2551 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2552 {
2553 machine_mode mode1 = print_tieable_modes[m1];
2554 bool first_time = true;
2555
2556 nl = (const char *)0;
2557 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2558 {
2559 machine_mode mode2 = print_tieable_modes[m2];
2560 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2561 {
2562 if (first_time)
2563 {
2564 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2565 nl = "\n";
2566 first_time = false;
2567 }
2568
2569 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2570 }
2571 }
2572
2573 if (!first_time)
2574 fputs ("\n", stderr);
2575 }
2576
2577 if (nl)
2578 fputs (nl, stderr);
2579
2580 if (rs6000_recip_control)
2581 {
2582 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2583
2584 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2585 if (rs6000_recip_bits[m])
2586 {
2587 fprintf (stderr,
2588 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2589 GET_MODE_NAME (m),
2590 (RS6000_RECIP_AUTO_RE_P (m)
2591 ? "auto"
2592 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2593 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2594 ? "auto"
2595 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2596 }
2597
2598 fputs ("\n", stderr);
2599 }
2600
2601 if (rs6000_cpu_index >= 0)
2602 {
2603 const char *name = processor_target_table[rs6000_cpu_index].name;
2604 HOST_WIDE_INT flags
2605 = processor_target_table[rs6000_cpu_index].target_enable;
2606
2607 sprintf (flags_buffer, "-mcpu=%s flags", name);
2608 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2609 }
2610 else
2611 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2612
2613 if (rs6000_tune_index >= 0)
2614 {
2615 const char *name = processor_target_table[rs6000_tune_index].name;
2616 HOST_WIDE_INT flags
2617 = processor_target_table[rs6000_tune_index].target_enable;
2618
2619 sprintf (flags_buffer, "-mtune=%s flags", name);
2620 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2621 }
2622 else
2623 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2624
2625 cl_target_option_save (&cl_opts, &global_options);
2626 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2627 rs6000_isa_flags);
2628
2629 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2630 rs6000_isa_flags_explicit);
2631
2632 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2633 rs6000_builtin_mask);
2634
2635 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2636
2637 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2638 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2639
2640 switch (rs6000_sched_costly_dep)
2641 {
2642 case max_dep_latency:
2643 costly_str = "max_dep_latency";
2644 break;
2645
2646 case no_dep_costly:
2647 costly_str = "no_dep_costly";
2648 break;
2649
2650 case all_deps_costly:
2651 costly_str = "all_deps_costly";
2652 break;
2653
2654 case true_store_to_load_dep_costly:
2655 costly_str = "true_store_to_load_dep_costly";
2656 break;
2657
2658 case store_to_load_dep_costly:
2659 costly_str = "store_to_load_dep_costly";
2660 break;
2661
2662 default:
2663 costly_str = costly_num;
2664 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2665 break;
2666 }
2667
2668 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2669
2670 switch (rs6000_sched_insert_nops)
2671 {
2672 case sched_finish_regroup_exact:
2673 nop_str = "sched_finish_regroup_exact";
2674 break;
2675
2676 case sched_finish_pad_groups:
2677 nop_str = "sched_finish_pad_groups";
2678 break;
2679
2680 case sched_finish_none:
2681 nop_str = "sched_finish_none";
2682 break;
2683
2684 default:
2685 nop_str = nop_num;
2686 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2687 break;
2688 }
2689
2690 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2691
2692 switch (rs6000_sdata)
2693 {
2694 default:
2695 case SDATA_NONE:
2696 break;
2697
2698 case SDATA_DATA:
2699 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2700 break;
2701
2702 case SDATA_SYSV:
2703 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2704 break;
2705
2706 case SDATA_EABI:
2707 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2708 break;
2709
2710 }
2711
2712 switch (rs6000_traceback)
2713 {
2714 case traceback_default: trace_str = "default"; break;
2715 case traceback_none: trace_str = "none"; break;
2716 case traceback_part: trace_str = "part"; break;
2717 case traceback_full: trace_str = "full"; break;
2718 default: trace_str = "unknown"; break;
2719 }
2720
2721 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2722
2723 switch (rs6000_current_cmodel)
2724 {
2725 case CMODEL_SMALL: cmodel_str = "small"; break;
2726 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2727 case CMODEL_LARGE: cmodel_str = "large"; break;
2728 default: cmodel_str = "unknown"; break;
2729 }
2730
2731 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2732
2733 switch (rs6000_current_abi)
2734 {
2735 case ABI_NONE: abi_str = "none"; break;
2736 case ABI_AIX: abi_str = "aix"; break;
2737 case ABI_ELFv2: abi_str = "ELFv2"; break;
2738 case ABI_V4: abi_str = "V4"; break;
2739 case ABI_DARWIN: abi_str = "darwin"; break;
2740 default: abi_str = "unknown"; break;
2741 }
2742
2743 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2744
2745 if (rs6000_altivec_abi)
2746 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2747
2748 if (rs6000_darwin64_abi)
2749 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2750
2751 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2752 (TARGET_SOFT_FLOAT ? "true" : "false"));
2753
2754 if (TARGET_LINK_STACK)
2755 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2756
2757 if (TARGET_P8_FUSION)
2758 {
2759 char options[80];
2760
2761 strcpy (options, "power8");
2762 if (TARGET_P8_FUSION_SIGN)
2763 strcat (options, ", sign");
2764
2765 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2766 }
2767
2768 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2769 TARGET_SECURE_PLT ? "secure" : "bss");
2770 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2771 aix_struct_return ? "aix" : "sysv");
2772 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2773 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2774 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2775 tf[!!rs6000_align_branch_targets]);
2776 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2777 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2778 rs6000_long_double_type_size);
2779 if (rs6000_long_double_type_size > 64)
2780 {
2781 fprintf (stderr, DEBUG_FMT_S, "long double type",
2782 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2783 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2784 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2785 }
2786 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2787 (int)rs6000_sched_restricted_insns_priority);
2788 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2789 (int)END_BUILTINS);
2790 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2791 (int)RS6000_BUILTIN_COUNT);
2792
2793 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2794 (int)TARGET_FLOAT128_ENABLE_TYPE);
2795
2796 if (TARGET_VSX)
2797 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2798 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2799
2800 if (TARGET_DIRECT_MOVE_128)
2801 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2802 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2803 }
2804
2805 \f
2806 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2807 legitimate address support to figure out the appropriate addressing to
2808 use. */
2809
2810 static void
2811 rs6000_setup_reg_addr_masks (void)
2812 {
2813 ssize_t rc, reg, m, nregs;
2814 addr_mask_type any_addr_mask, addr_mask;
2815
2816 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2817 {
2818 machine_mode m2 = (machine_mode) m;
2819 bool complex_p = false;
2820 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2821 size_t msize;
2822
2823 if (COMPLEX_MODE_P (m2))
2824 {
2825 complex_p = true;
2826 m2 = GET_MODE_INNER (m2);
2827 }
2828
2829 msize = GET_MODE_SIZE (m2);
2830
2831 /* SDmode is special in that we want to access it only via REG+REG
2832 addressing on power7 and above, since we want to use the LFIWZX and
2833 STFIWZX instructions to load it. */
2834 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2835
2836 any_addr_mask = 0;
2837 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2838 {
2839 addr_mask = 0;
2840 reg = reload_reg_map[rc].reg;
2841
2842 /* Can mode values go in the GPR/FPR/Altivec registers? */
2843 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2844 {
2845 bool small_int_vsx_p = (small_int_p
2846 && (rc == RELOAD_REG_FPR
2847 || rc == RELOAD_REG_VMX));
2848
2849 nregs = rs6000_hard_regno_nregs[m][reg];
2850 addr_mask |= RELOAD_REG_VALID;
2851
2852 /* Indicate if the mode takes more than 1 physical register. If
2853 it takes a single register, indicate it can do REG+REG
2854 addressing. Small integers in VSX registers can only do
2855 REG+REG addressing. */
2856 if (small_int_vsx_p)
2857 addr_mask |= RELOAD_REG_INDEXED;
2858 else if (nregs > 1 || m == BLKmode || complex_p)
2859 addr_mask |= RELOAD_REG_MULTIPLE;
2860 else
2861 addr_mask |= RELOAD_REG_INDEXED;
2862
2863 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2864 addressing. If we allow scalars into Altivec registers,
2865 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2866
2867 For VSX systems, we don't allow update addressing for
2868 DFmode/SFmode if those registers can go in both the
2869 traditional floating point registers and Altivec registers.
2870 The load/store instructions for the Altivec registers do not
2871 have update forms. If we allowed update addressing, it seems
2872 to break IV-OPT code using floating point if the index type is
2873 int instead of long (PR target/81550 and target/84042). */
2874
2875 if (TARGET_UPDATE
2876 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2877 && msize <= 8
2878 && !VECTOR_MODE_P (m2)
2879 && !FLOAT128_VECTOR_P (m2)
2880 && !complex_p
2881 && (m != E_DFmode || !TARGET_VSX)
2882 && (m != E_SFmode || !TARGET_P8_VECTOR)
2883 && !small_int_vsx_p)
2884 {
2885 addr_mask |= RELOAD_REG_PRE_INCDEC;
2886
2887 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2888 we don't allow PRE_MODIFY for some multi-register
2889 operations. */
2890 switch (m)
2891 {
2892 default:
2893 addr_mask |= RELOAD_REG_PRE_MODIFY;
2894 break;
2895
2896 case E_DImode:
2897 if (TARGET_POWERPC64)
2898 addr_mask |= RELOAD_REG_PRE_MODIFY;
2899 break;
2900
2901 case E_DFmode:
2902 case E_DDmode:
2903 if (TARGET_HARD_FLOAT)
2904 addr_mask |= RELOAD_REG_PRE_MODIFY;
2905 break;
2906 }
2907 }
2908 }
2909
2910 /* GPR and FPR registers can do REG+OFFSET addressing, except
2911 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2912 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2913 if ((addr_mask != 0) && !indexed_only_p
2914 && msize <= 8
2915 && (rc == RELOAD_REG_GPR
2916 || ((msize == 8 || m2 == SFmode)
2917 && (rc == RELOAD_REG_FPR
2918 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2919 addr_mask |= RELOAD_REG_OFFSET;
2920
2921 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2922 instructions are enabled. The offset for 128-bit VSX registers is
2923 only 12-bits. While GPRs can handle the full offset range, VSX
2924 registers can only handle the restricted range. */
2925 else if ((addr_mask != 0) && !indexed_only_p
2926 && msize == 16 && TARGET_P9_VECTOR
2927 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2928 || (m2 == TImode && TARGET_VSX)))
2929 {
2930 addr_mask |= RELOAD_REG_OFFSET;
2931 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2932 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2933 }
2934
2935 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2936 addressing on 128-bit types. */
2937 if (rc == RELOAD_REG_VMX && msize == 16
2938 && (addr_mask & RELOAD_REG_VALID) != 0)
2939 addr_mask |= RELOAD_REG_AND_M16;
2940
2941 reg_addr[m].addr_mask[rc] = addr_mask;
2942 any_addr_mask |= addr_mask;
2943 }
2944
2945 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2946 }
2947 }
2948
2949 \f
2950 /* Initialize the various global tables that are based on register size. */
2951 static void
2952 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2953 {
2954 ssize_t r, m, c;
2955 int align64;
2956 int align32;
2957
2958 /* Precalculate REGNO_REG_CLASS. */
2959 rs6000_regno_regclass[0] = GENERAL_REGS;
2960 for (r = 1; r < 32; ++r)
2961 rs6000_regno_regclass[r] = BASE_REGS;
2962
2963 for (r = 32; r < 64; ++r)
2964 rs6000_regno_regclass[r] = FLOAT_REGS;
2965
2966 for (r = 64; HARD_REGISTER_NUM_P (r); ++r)
2967 rs6000_regno_regclass[r] = NO_REGS;
2968
2969 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2970 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2971
2972 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2973 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2974 rs6000_regno_regclass[r] = CR_REGS;
2975
2976 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2977 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2978 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
2979 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2980 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2981 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2982 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2983
2984 /* Precalculate register class to simpler reload register class. We don't
2985 need all of the register classes that are combinations of different
2986 classes, just the simple ones that have constraint letters. */
2987 for (c = 0; c < N_REG_CLASSES; c++)
2988 reg_class_to_reg_type[c] = NO_REG_TYPE;
2989
2990 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2991 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2992 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2993 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2994 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2995 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2996 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2997 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2998 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2999 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3000
3001 if (TARGET_VSX)
3002 {
3003 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3004 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3005 }
3006 else
3007 {
3008 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3009 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3010 }
3011
3012 /* Precalculate the valid memory formats as well as the vector information,
3013 this must be set up before the rs6000_hard_regno_nregs_internal calls
3014 below. */
3015 gcc_assert ((int)VECTOR_NONE == 0);
3016 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3017 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_mem));
3018
3019 gcc_assert ((int)CODE_FOR_nothing == 0);
3020 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3021
3022 gcc_assert ((int)NO_REGS == 0);
3023 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3024
3025 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3026 believes it can use native alignment or still uses 128-bit alignment. */
3027 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3028 {
3029 align64 = 64;
3030 align32 = 32;
3031 }
3032 else
3033 {
3034 align64 = 128;
3035 align32 = 128;
3036 }
3037
3038 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3039 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3040 if (TARGET_FLOAT128_TYPE)
3041 {
3042 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3043 rs6000_vector_align[KFmode] = 128;
3044
3045 if (FLOAT128_IEEE_P (TFmode))
3046 {
3047 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3048 rs6000_vector_align[TFmode] = 128;
3049 }
3050 }
3051
3052 /* V2DF mode, VSX only. */
3053 if (TARGET_VSX)
3054 {
3055 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3056 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3057 rs6000_vector_align[V2DFmode] = align64;
3058 }
3059
3060 /* V4SF mode, either VSX or Altivec. */
3061 if (TARGET_VSX)
3062 {
3063 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3064 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3065 rs6000_vector_align[V4SFmode] = align32;
3066 }
3067 else if (TARGET_ALTIVEC)
3068 {
3069 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3070 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3071 rs6000_vector_align[V4SFmode] = align32;
3072 }
3073
3074 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3075 and stores. */
3076 if (TARGET_ALTIVEC)
3077 {
3078 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3079 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3080 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3081 rs6000_vector_align[V4SImode] = align32;
3082 rs6000_vector_align[V8HImode] = align32;
3083 rs6000_vector_align[V16QImode] = align32;
3084
3085 if (TARGET_VSX)
3086 {
3087 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3088 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3089 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3090 }
3091 else
3092 {
3093 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3094 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3095 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3096 }
3097 }
3098
3099 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3100 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3101 if (TARGET_VSX)
3102 {
3103 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3104 rs6000_vector_unit[V2DImode]
3105 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3106 rs6000_vector_align[V2DImode] = align64;
3107
3108 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3109 rs6000_vector_unit[V1TImode]
3110 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3111 rs6000_vector_align[V1TImode] = 128;
3112 }
3113
3114 /* DFmode, see if we want to use the VSX unit. Memory is handled
3115 differently, so don't set rs6000_vector_mem. */
3116 if (TARGET_VSX)
3117 {
3118 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3119 rs6000_vector_align[DFmode] = 64;
3120 }
3121
3122 /* SFmode, see if we want to use the VSX unit. */
3123 if (TARGET_P8_VECTOR)
3124 {
3125 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3126 rs6000_vector_align[SFmode] = 32;
3127 }
3128
3129 /* Allow TImode in VSX register and set the VSX memory macros. */
3130 if (TARGET_VSX)
3131 {
3132 rs6000_vector_mem[TImode] = VECTOR_VSX;
3133 rs6000_vector_align[TImode] = align64;
3134 }
3135
3136 /* Register class constraints for the constraints that depend on compile
3137 switches. When the VSX code was added, different constraints were added
3138 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3139 of the VSX registers are used. The register classes for scalar floating
3140 point types is set, based on whether we allow that type into the upper
3141 (Altivec) registers. GCC has register classes to target the Altivec
3142 registers for load/store operations, to select using a VSX memory
3143 operation instead of the traditional floating point operation. The
3144 constraints are:
3145
3146 d - Register class to use with traditional DFmode instructions.
3147 f - Register class to use with traditional SFmode instructions.
3148 v - Altivec register.
3149 wa - Any VSX register.
3150 wc - Reserved to represent individual CR bits (used in LLVM).
3151 wd - Preferred register class for V2DFmode.
3152 wf - Preferred register class for V4SFmode.
3153 wg - Float register for power6x move insns.
3154 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3155 wn - always NO_REGS.
3156 wr - GPR if 64-bit mode is permitted.
3157 ws - Register class to do ISA 2.06 DF operations.
3158 wt - VSX register for TImode in VSX registers.
3159 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3160 ww - Register class to do SF conversions in with VSX operations.
3161 wx - Float register if we can do 32-bit int stores. */
3162
3163 if (TARGET_HARD_FLOAT)
3164 {
3165 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3166 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3167 }
3168
3169 if (TARGET_VSX)
3170 {
3171 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3172 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3173 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3174 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3175 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3176 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3177 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3178 }
3179
3180 /* Add conditional constraints based on various options, to allow us to
3181 collapse multiple insn patterns. */
3182 if (TARGET_ALTIVEC)
3183 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3184
3185 if (TARGET_MFPGPR) /* DFmode */
3186 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3187
3188 if (TARGET_POWERPC64)
3189 {
3190 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3191 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3192 }
3193
3194 if (TARGET_P8_VECTOR) /* SFmode */
3195 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3196 else if (TARGET_VSX)
3197 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3198
3199 if (TARGET_STFIWX)
3200 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3201
3202 if (TARGET_FLOAT128_TYPE)
3203 {
3204 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3205 if (FLOAT128_IEEE_P (TFmode))
3206 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3207 }
3208
3209 /* Support for new direct moves (ISA 3.0 + 64bit). */
3210 if (TARGET_DIRECT_MOVE_128)
3211 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3212
3213 /* Set up the reload helper and direct move functions. */
3214 if (TARGET_VSX || TARGET_ALTIVEC)
3215 {
3216 if (TARGET_64BIT)
3217 {
3218 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3219 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3220 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3221 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3222 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3223 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3224 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3225 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3226 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3227 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3228 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3229 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3230 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3231 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3232 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3233 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3234 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3235 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3236 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3237 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3238
3239 if (FLOAT128_VECTOR_P (KFmode))
3240 {
3241 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3242 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3243 }
3244
3245 if (FLOAT128_VECTOR_P (TFmode))
3246 {
3247 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3248 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3249 }
3250
3251 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3252 available. */
3253 if (TARGET_NO_SDMODE_STACK)
3254 {
3255 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3256 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3257 }
3258
3259 if (TARGET_VSX)
3260 {
3261 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3262 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3263 }
3264
3265 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3266 {
3267 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3268 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3269 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3270 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3271 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3272 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3273 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3274 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3275 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3276
3277 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3278 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3279 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3280 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3281 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3282 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3283 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3284 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3285 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3286
3287 if (FLOAT128_VECTOR_P (KFmode))
3288 {
3289 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3290 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3291 }
3292
3293 if (FLOAT128_VECTOR_P (TFmode))
3294 {
3295 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3296 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3297 }
3298 }
3299 }
3300 else
3301 {
3302 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3303 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3304 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3305 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3306 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3307 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3308 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3309 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3310 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3311 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3312 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3313 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3314 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3315 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3316 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3317 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3318 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3319 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3320 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3321 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3322
3323 if (FLOAT128_VECTOR_P (KFmode))
3324 {
3325 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3326 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3327 }
3328
3329 if (FLOAT128_IEEE_P (TFmode))
3330 {
3331 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3332 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3333 }
3334
3335 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3336 available. */
3337 if (TARGET_NO_SDMODE_STACK)
3338 {
3339 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3340 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3341 }
3342
3343 if (TARGET_VSX)
3344 {
3345 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3346 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3347 }
3348
3349 if (TARGET_DIRECT_MOVE)
3350 {
3351 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3352 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3353 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3354 }
3355 }
3356
3357 reg_addr[DFmode].scalar_in_vmx_p = true;
3358 reg_addr[DImode].scalar_in_vmx_p = true;
3359
3360 if (TARGET_P8_VECTOR)
3361 {
3362 reg_addr[SFmode].scalar_in_vmx_p = true;
3363 reg_addr[SImode].scalar_in_vmx_p = true;
3364
3365 if (TARGET_P9_VECTOR)
3366 {
3367 reg_addr[HImode].scalar_in_vmx_p = true;
3368 reg_addr[QImode].scalar_in_vmx_p = true;
3369 }
3370 }
3371 }
3372
3373 /* Precalculate HARD_REGNO_NREGS. */
3374 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3375 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3376 rs6000_hard_regno_nregs[m][r]
3377 = rs6000_hard_regno_nregs_internal (r, (machine_mode) m);
3378
3379 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3380 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3381 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3382 rs6000_hard_regno_mode_ok_p[m][r]
3383 = rs6000_hard_regno_mode_ok_uncached (r, (machine_mode) m);
3384
3385 /* Precalculate CLASS_MAX_NREGS sizes. */
3386 for (c = 0; c < LIM_REG_CLASSES; ++c)
3387 {
3388 int reg_size;
3389
3390 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3391 reg_size = UNITS_PER_VSX_WORD;
3392
3393 else if (c == ALTIVEC_REGS)
3394 reg_size = UNITS_PER_ALTIVEC_WORD;
3395
3396 else if (c == FLOAT_REGS)
3397 reg_size = UNITS_PER_FP_WORD;
3398
3399 else
3400 reg_size = UNITS_PER_WORD;
3401
3402 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3403 {
3404 machine_mode m2 = (machine_mode)m;
3405 int reg_size2 = reg_size;
3406
3407 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3408 in VSX. */
3409 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3410 reg_size2 = UNITS_PER_FP_WORD;
3411
3412 rs6000_class_max_nregs[m][c]
3413 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3414 }
3415 }
3416
3417 /* Calculate which modes to automatically generate code to use a the
3418 reciprocal divide and square root instructions. In the future, possibly
3419 automatically generate the instructions even if the user did not specify
3420 -mrecip. The older machines double precision reciprocal sqrt estimate is
3421 not accurate enough. */
3422 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3423 if (TARGET_FRES)
3424 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3425 if (TARGET_FRE)
3426 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3427 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3428 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3429 if (VECTOR_UNIT_VSX_P (V2DFmode))
3430 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3431
3432 if (TARGET_FRSQRTES)
3433 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3434 if (TARGET_FRSQRTE)
3435 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3436 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3437 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3438 if (VECTOR_UNIT_VSX_P (V2DFmode))
3439 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3440
3441 if (rs6000_recip_control)
3442 {
3443 if (!flag_finite_math_only)
3444 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3445 "-ffast-math");
3446 if (flag_trapping_math)
3447 warning (0, "%qs requires %qs or %qs", "-mrecip",
3448 "-fno-trapping-math", "-ffast-math");
3449 if (!flag_reciprocal_math)
3450 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3451 "-ffast-math");
3452 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3453 {
3454 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3455 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3456 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3457
3458 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3459 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3460 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3461
3462 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3463 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3464 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3465
3466 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3467 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3468 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3469
3470 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3471 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3472 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3473
3474 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3475 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3476 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3477
3478 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3479 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3480 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3481
3482 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3483 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3484 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3485 }
3486 }
3487
3488 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3489 legitimate address support to figure out the appropriate addressing to
3490 use. */
3491 rs6000_setup_reg_addr_masks ();
3492
3493 if (global_init_p || TARGET_DEBUG_TARGET)
3494 {
3495 if (TARGET_DEBUG_REG)
3496 rs6000_debug_reg_global ();
3497
3498 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3499 fprintf (stderr,
3500 "SImode variable mult cost = %d\n"
3501 "SImode constant mult cost = %d\n"
3502 "SImode short constant mult cost = %d\n"
3503 "DImode multipliciation cost = %d\n"
3504 "SImode division cost = %d\n"
3505 "DImode division cost = %d\n"
3506 "Simple fp operation cost = %d\n"
3507 "DFmode multiplication cost = %d\n"
3508 "SFmode division cost = %d\n"
3509 "DFmode division cost = %d\n"
3510 "cache line size = %d\n"
3511 "l1 cache size = %d\n"
3512 "l2 cache size = %d\n"
3513 "simultaneous prefetches = %d\n"
3514 "\n",
3515 rs6000_cost->mulsi,
3516 rs6000_cost->mulsi_const,
3517 rs6000_cost->mulsi_const9,
3518 rs6000_cost->muldi,
3519 rs6000_cost->divsi,
3520 rs6000_cost->divdi,
3521 rs6000_cost->fp,
3522 rs6000_cost->dmul,
3523 rs6000_cost->sdiv,
3524 rs6000_cost->ddiv,
3525 rs6000_cost->cache_line_size,
3526 rs6000_cost->l1_cache_size,
3527 rs6000_cost->l2_cache_size,
3528 rs6000_cost->simultaneous_prefetches);
3529 }
3530 }
3531
3532 #if TARGET_MACHO
3533 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3534
3535 static void
3536 darwin_rs6000_override_options (void)
3537 {
3538 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3539 off. */
3540 rs6000_altivec_abi = 1;
3541 TARGET_ALTIVEC_VRSAVE = 1;
3542 rs6000_current_abi = ABI_DARWIN;
3543
3544 if (DEFAULT_ABI == ABI_DARWIN
3545 && TARGET_64BIT)
3546 darwin_one_byte_bool = 1;
3547
3548 if (TARGET_64BIT && ! TARGET_POWERPC64)
3549 {
3550 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3551 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3552 }
3553 if (flag_mkernel)
3554 {
3555 rs6000_default_long_calls = 1;
3556 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3557 }
3558
3559 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3560 Altivec. */
3561 if (!flag_mkernel && !flag_apple_kext
3562 && TARGET_64BIT
3563 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3564 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3565
3566 /* Unless the user (not the configurer) has explicitly overridden
3567 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3568 G4 unless targeting the kernel. */
3569 if (!flag_mkernel
3570 && !flag_apple_kext
3571 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3572 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3573 && ! global_options_set.x_rs6000_cpu_index)
3574 {
3575 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3576 }
3577 }
3578 #endif
3579
3580 /* If not otherwise specified by a target, make 'long double' equivalent to
3581 'double'. */
3582
3583 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3584 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3585 #endif
3586
3587 /* Return the builtin mask of the various options used that could affect which
3588 builtins were used. In the past we used target_flags, but we've run out of
3589 bits, and some options are no longer in target_flags. */
3590
3591 HOST_WIDE_INT
3592 rs6000_builtin_mask_calculate (void)
3593 {
3594 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3595 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3596 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3597 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3598 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3599 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3600 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3601 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3602 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3603 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3604 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3605 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3606 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3607 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3608 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3609 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3610 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3611 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3612 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3613 | ((TARGET_LONG_DOUBLE_128
3614 && TARGET_HARD_FLOAT
3615 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3616 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3617 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3618 }
3619
3620 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3621 to clobber the XER[CA] bit because clobbering that bit without telling
3622 the compiler worked just fine with versions of GCC before GCC 5, and
3623 breaking a lot of older code in ways that are hard to track down is
3624 not such a great idea. */
3625
3626 static rtx_insn *
3627 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3628 vec<const char *> &/*constraints*/,
3629 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3630 {
3631 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3632 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3633 return NULL;
3634 }
3635
3636 /* Override command line options.
3637
3638 Combine build-specific configuration information with options
3639 specified on the command line to set various state variables which
3640 influence code generation, optimization, and expansion of built-in
3641 functions. Assure that command-line configuration preferences are
3642 compatible with each other and with the build configuration; issue
3643 warnings while adjusting configuration or error messages while
3644 rejecting configuration.
3645
3646 Upon entry to this function:
3647
3648 This function is called once at the beginning of
3649 compilation, and then again at the start and end of compiling
3650 each section of code that has a different configuration, as
3651 indicated, for example, by adding the
3652
3653 __attribute__((__target__("cpu=power9")))
3654
3655 qualifier to a function definition or, for example, by bracketing
3656 code between
3657
3658 #pragma GCC target("altivec")
3659
3660 and
3661
3662 #pragma GCC reset_options
3663
3664 directives. Parameter global_init_p is true for the initial
3665 invocation, which initializes global variables, and false for all
3666 subsequent invocations.
3667
3668
3669 Various global state information is assumed to be valid. This
3670 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3671 default CPU specified at build configure time, TARGET_DEFAULT,
3672 representing the default set of option flags for the default
3673 target, and global_options_set.x_rs6000_isa_flags, representing
3674 which options were requested on the command line.
3675
3676 Upon return from this function:
3677
3678 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3679 was set by name on the command line. Additionally, if certain
3680 attributes are automatically enabled or disabled by this function
3681 in order to assure compatibility between options and
3682 configuration, the flags associated with those attributes are
3683 also set. By setting these "explicit bits", we avoid the risk
3684 that other code might accidentally overwrite these particular
3685 attributes with "default values".
3686
3687 The various bits of rs6000_isa_flags are set to indicate the
3688 target options that have been selected for the most current
3689 compilation efforts. This has the effect of also turning on the
3690 associated TARGET_XXX values since these are macros which are
3691 generally defined to test the corresponding bit of the
3692 rs6000_isa_flags variable.
3693
3694 The variable rs6000_builtin_mask is set to represent the target
3695 options for the most current compilation efforts, consistent with
3696 the current contents of rs6000_isa_flags. This variable controls
3697 expansion of built-in functions.
3698
3699 Various other global variables and fields of global structures
3700 (over 50 in all) are initialized to reflect the desired options
3701 for the most current compilation efforts. */
3702
3703 static bool
3704 rs6000_option_override_internal (bool global_init_p)
3705 {
3706 bool ret = true;
3707
3708 HOST_WIDE_INT set_masks;
3709 HOST_WIDE_INT ignore_masks;
3710 int cpu_index = -1;
3711 int tune_index;
3712 struct cl_target_option *main_target_opt
3713 = ((global_init_p || target_option_default_node == NULL)
3714 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3715
3716 /* Print defaults. */
3717 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3718 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3719
3720 /* Remember the explicit arguments. */
3721 if (global_init_p)
3722 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3723
3724 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3725 library functions, so warn about it. The flag may be useful for
3726 performance studies from time to time though, so don't disable it
3727 entirely. */
3728 if (global_options_set.x_rs6000_alignment_flags
3729 && rs6000_alignment_flags == MASK_ALIGN_POWER
3730 && DEFAULT_ABI == ABI_DARWIN
3731 && TARGET_64BIT)
3732 warning (0, "%qs is not supported for 64-bit Darwin;"
3733 " it is incompatible with the installed C and C++ libraries",
3734 "-malign-power");
3735
3736 /* Numerous experiment shows that IRA based loop pressure
3737 calculation works better for RTL loop invariant motion on targets
3738 with enough (>= 32) registers. It is an expensive optimization.
3739 So it is on only for peak performance. */
3740 if (optimize >= 3 && global_init_p
3741 && !global_options_set.x_flag_ira_loop_pressure)
3742 flag_ira_loop_pressure = 1;
3743
3744 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3745 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3746 options were already specified. */
3747 if (flag_sanitize & SANITIZE_USER_ADDRESS
3748 && !global_options_set.x_flag_asynchronous_unwind_tables)
3749 flag_asynchronous_unwind_tables = 1;
3750
3751 /* Set the pointer size. */
3752 if (TARGET_64BIT)
3753 {
3754 rs6000_pmode = DImode;
3755 rs6000_pointer_size = 64;
3756 }
3757 else
3758 {
3759 rs6000_pmode = SImode;
3760 rs6000_pointer_size = 32;
3761 }
3762
3763 /* Some OSs don't support saving the high part of 64-bit registers on context
3764 switch. Other OSs don't support saving Altivec registers. On those OSs,
3765 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3766 if the user wants either, the user must explicitly specify them and we
3767 won't interfere with the user's specification. */
3768
3769 set_masks = POWERPC_MASKS;
3770 #ifdef OS_MISSING_POWERPC64
3771 if (OS_MISSING_POWERPC64)
3772 set_masks &= ~OPTION_MASK_POWERPC64;
3773 #endif
3774 #ifdef OS_MISSING_ALTIVEC
3775 if (OS_MISSING_ALTIVEC)
3776 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3777 | OTHER_VSX_VECTOR_MASKS);
3778 #endif
3779
3780 /* Don't override by the processor default if given explicitly. */
3781 set_masks &= ~rs6000_isa_flags_explicit;
3782
3783 if (global_init_p && rs6000_dejagnu_cpu_index >= 0)
3784 rs6000_cpu_index = rs6000_dejagnu_cpu_index;
3785
3786 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3787 the cpu in a target attribute or pragma, but did not specify a tuning
3788 option, use the cpu for the tuning option rather than the option specified
3789 with -mtune on the command line. Process a '--with-cpu' configuration
3790 request as an implicit --cpu. */
3791 if (rs6000_cpu_index >= 0)
3792 cpu_index = rs6000_cpu_index;
3793 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3794 cpu_index = main_target_opt->x_rs6000_cpu_index;
3795 else if (OPTION_TARGET_CPU_DEFAULT)
3796 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3797
3798 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3799 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3800 with those from the cpu, except for options that were explicitly set. If
3801 we don't have a cpu, do not override the target bits set in
3802 TARGET_DEFAULT. */
3803 if (cpu_index >= 0)
3804 {
3805 rs6000_cpu_index = cpu_index;
3806 rs6000_isa_flags &= ~set_masks;
3807 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3808 & set_masks);
3809 }
3810 else
3811 {
3812 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3813 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3814 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3815 to using rs6000_isa_flags, we need to do the initialization here.
3816
3817 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3818 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3819 HOST_WIDE_INT flags;
3820 if (TARGET_DEFAULT)
3821 flags = TARGET_DEFAULT;
3822 else
3823 {
3824 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3825 const char *default_cpu = (!TARGET_POWERPC64
3826 ? "powerpc"
3827 : (BYTES_BIG_ENDIAN
3828 ? "powerpc64"
3829 : "powerpc64le"));
3830 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3831 flags = processor_target_table[default_cpu_index].target_enable;
3832 }
3833 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3834 }
3835
3836 if (rs6000_tune_index >= 0)
3837 tune_index = rs6000_tune_index;
3838 else if (cpu_index >= 0)
3839 rs6000_tune_index = tune_index = cpu_index;
3840 else
3841 {
3842 size_t i;
3843 enum processor_type tune_proc
3844 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3845
3846 tune_index = -1;
3847 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3848 if (processor_target_table[i].processor == tune_proc)
3849 {
3850 tune_index = i;
3851 break;
3852 }
3853 }
3854
3855 if (cpu_index >= 0)
3856 rs6000_cpu = processor_target_table[cpu_index].processor;
3857 else
3858 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3859
3860 gcc_assert (tune_index >= 0);
3861 rs6000_tune = processor_target_table[tune_index].processor;
3862
3863 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3864 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3865 || rs6000_cpu == PROCESSOR_PPCE5500)
3866 {
3867 if (TARGET_ALTIVEC)
3868 error ("AltiVec not supported in this target");
3869 }
3870
3871 /* If we are optimizing big endian systems for space, use the load/store
3872 multiple instructions. */
3873 if (BYTES_BIG_ENDIAN && optimize_size)
3874 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3875
3876 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3877 because the hardware doesn't support the instructions used in little
3878 endian mode, and causes an alignment trap. The 750 does not cause an
3879 alignment trap (except when the target is unaligned). */
3880
3881 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3882 {
3883 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3884 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3885 warning (0, "%qs is not supported on little endian systems",
3886 "-mmultiple");
3887 }
3888
3889 /* If little-endian, default to -mstrict-align on older processors.
3890 Testing for htm matches power8 and later. */
3891 if (!BYTES_BIG_ENDIAN
3892 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3893 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3894
3895 if (!rs6000_fold_gimple)
3896 fprintf (stderr,
3897 "gimple folding of rs6000 builtins has been disabled.\n");
3898
3899 /* Add some warnings for VSX. */
3900 if (TARGET_VSX)
3901 {
3902 const char *msg = NULL;
3903 if (!TARGET_HARD_FLOAT)
3904 {
3905 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3906 msg = N_("%<-mvsx%> requires hardware floating point");
3907 else
3908 {
3909 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3910 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3911 }
3912 }
3913 else if (TARGET_AVOID_XFORM > 0)
3914 msg = N_("%<-mvsx%> needs indexed addressing");
3915 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3916 & OPTION_MASK_ALTIVEC))
3917 {
3918 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3919 msg = N_("%<-mvsx%> and %<-mno-altivec%> are incompatible");
3920 else
3921 msg = N_("%<-mno-altivec%> disables vsx");
3922 }
3923
3924 if (msg)
3925 {
3926 warning (0, msg);
3927 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3928 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3929 }
3930 }
3931
3932 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
3933 the -mcpu setting to enable options that conflict. */
3934 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
3935 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
3936 | OPTION_MASK_ALTIVEC
3937 | OPTION_MASK_VSX)) != 0)
3938 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
3939 | OPTION_MASK_DIRECT_MOVE)
3940 & ~rs6000_isa_flags_explicit);
3941
3942 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3943 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
3944
3945 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
3946 off all of the options that depend on those flags. */
3947 ignore_masks = rs6000_disable_incompatible_switches ();
3948
3949 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
3950 unless the user explicitly used the -mno-<option> to disable the code. */
3951 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
3952 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
3953 else if (TARGET_P9_MINMAX)
3954 {
3955 if (cpu_index >= 0)
3956 {
3957 if (cpu_index == PROCESSOR_POWER9)
3958 {
3959 /* legacy behavior: allow -mcpu=power9 with certain
3960 capabilities explicitly disabled. */
3961 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
3962 }
3963 else
3964 error ("power9 target option is incompatible with %<%s=<xxx>%> "
3965 "for <xxx> less than power9", "-mcpu");
3966 }
3967 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
3968 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
3969 & rs6000_isa_flags_explicit))
3970 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
3971 were explicitly cleared. */
3972 error ("%qs incompatible with explicitly disabled options",
3973 "-mpower9-minmax");
3974 else
3975 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
3976 }
3977 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
3978 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
3979 else if (TARGET_VSX)
3980 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
3981 else if (TARGET_POPCNTD)
3982 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
3983 else if (TARGET_DFP)
3984 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
3985 else if (TARGET_CMPB)
3986 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
3987 else if (TARGET_FPRND)
3988 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
3989 else if (TARGET_POPCNTB)
3990 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
3991 else if (TARGET_ALTIVEC)
3992 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
3993
3994 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
3995 {
3996 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
3997 error ("%qs requires %qs", "-mcrypto", "-maltivec");
3998 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
3999 }
4000
4001 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4002 {
4003 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4004 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4005 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4006 }
4007
4008 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4009 {
4010 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4011 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4012 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4013 }
4014
4015 if (TARGET_P8_VECTOR && !TARGET_VSX)
4016 {
4017 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4018 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4019 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4020 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4021 {
4022 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4023 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4024 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4025 }
4026 else
4027 {
4028 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4029 not explicit. */
4030 rs6000_isa_flags |= OPTION_MASK_VSX;
4031 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4032 }
4033 }
4034
4035 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4036 {
4037 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4038 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4039 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4040 }
4041
4042 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4043 silently turn off quad memory mode. */
4044 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4045 {
4046 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4047 warning (0, N_("%<-mquad-memory%> requires 64-bit mode"));
4048
4049 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4050 warning (0, N_("%<-mquad-memory-atomic%> requires 64-bit mode"));
4051
4052 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4053 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4054 }
4055
4056 /* Non-atomic quad memory load/store are disabled for little endian, since
4057 the words are reversed, but atomic operations can still be done by
4058 swapping the words. */
4059 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4060 {
4061 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4062 warning (0, N_("%<-mquad-memory%> is not available in little endian "
4063 "mode"));
4064
4065 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4066 }
4067
4068 /* Assume if the user asked for normal quad memory instructions, they want
4069 the atomic versions as well, unless they explicity told us not to use quad
4070 word atomic instructions. */
4071 if (TARGET_QUAD_MEMORY
4072 && !TARGET_QUAD_MEMORY_ATOMIC
4073 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4074 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4075
4076 /* If we can shrink-wrap the TOC register save separately, then use
4077 -msave-toc-indirect unless explicitly disabled. */
4078 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4079 && flag_shrink_wrap_separate
4080 && optimize_function_for_speed_p (cfun))
4081 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4082
4083 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4084 generating power8 instructions. Power9 does not optimize power8 fusion
4085 cases. */
4086 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4087 {
4088 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
4089 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4090 else
4091 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4092 }
4093
4094 /* Setting additional fusion flags turns on base fusion. */
4095 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4096 {
4097 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4098 {
4099 if (TARGET_P8_FUSION_SIGN)
4100 error ("%qs requires %qs", "-mpower8-fusion-sign",
4101 "-mpower8-fusion");
4102
4103 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4104 }
4105 else
4106 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4107 }
4108
4109 /* Power8 does not fuse sign extended loads with the addis. If we are
4110 optimizing at high levels for speed, convert a sign extended load into a
4111 zero extending load, and an explicit sign extension. */
4112 if (TARGET_P8_FUSION
4113 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4114 && optimize_function_for_speed_p (cfun)
4115 && optimize >= 3)
4116 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4117
4118 /* ISA 3.0 vector instructions include ISA 2.07. */
4119 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4120 {
4121 /* We prefer to not mention undocumented options in
4122 error messages. However, if users have managed to select
4123 power9-vector without selecting power8-vector, they
4124 already know about undocumented flags. */
4125 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4126 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4127 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4128 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4129 {
4130 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4131 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4132 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4133 }
4134 else
4135 {
4136 /* OPTION_MASK_P9_VECTOR is explicit and
4137 OPTION_MASK_P8_VECTOR is not explicit. */
4138 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4139 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4140 }
4141 }
4142
4143 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4144 support. If we only have ISA 2.06 support, and the user did not specify
4145 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4146 but we don't enable the full vectorization support */
4147 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4148 TARGET_ALLOW_MOVMISALIGN = 1;
4149
4150 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4151 {
4152 if (TARGET_ALLOW_MOVMISALIGN > 0
4153 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4154 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4155
4156 TARGET_ALLOW_MOVMISALIGN = 0;
4157 }
4158
4159 /* Determine when unaligned vector accesses are permitted, and when
4160 they are preferred over masked Altivec loads. Note that if
4161 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4162 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4163 not true. */
4164 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4165 {
4166 if (!TARGET_VSX)
4167 {
4168 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4169 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4170
4171 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4172 }
4173
4174 else if (!TARGET_ALLOW_MOVMISALIGN)
4175 {
4176 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4177 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4178 "-mallow-movmisalign");
4179
4180 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4181 }
4182 }
4183
4184 /* Use long double size to select the appropriate long double. We use
4185 TYPE_PRECISION to differentiate the 3 different long double types. We map
4186 128 into the precision used for TFmode. */
4187 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4188 ? 64
4189 : FLOAT_PRECISION_TFmode);
4190
4191 /* Set long double size before the IEEE 128-bit tests. */
4192 if (!global_options_set.x_rs6000_long_double_type_size)
4193 {
4194 if (main_target_opt != NULL
4195 && (main_target_opt->x_rs6000_long_double_type_size
4196 != default_long_double_size))
4197 error ("target attribute or pragma changes %<long double%> size");
4198 else
4199 rs6000_long_double_type_size = default_long_double_size;
4200 }
4201 else if (rs6000_long_double_type_size == 128)
4202 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4203 else if (global_options_set.x_rs6000_ieeequad)
4204 {
4205 if (global_options.x_rs6000_ieeequad)
4206 error ("%qs requires %qs", "-mabi=ieeelongdouble", "-mlong-double-128");
4207 else
4208 error ("%qs requires %qs", "-mabi=ibmlongdouble", "-mlong-double-128");
4209 }
4210
4211 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4212 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4213 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4214 those systems will not pick up this default. Warn if the user changes the
4215 default unless -Wno-psabi. */
4216 if (!global_options_set.x_rs6000_ieeequad)
4217 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4218
4219 else
4220 {
4221 if (global_options.x_rs6000_ieeequad
4222 && (!TARGET_POPCNTD || !TARGET_VSX))
4223 error ("%qs requires full ISA 2.06 support", "-mabi=ieeelongdouble");
4224
4225 if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4226 {
4227 static bool warned_change_long_double;
4228 if (!warned_change_long_double)
4229 {
4230 warned_change_long_double = true;
4231 if (TARGET_IEEEQUAD)
4232 warning (OPT_Wpsabi, "Using IEEE extended precision "
4233 "%<long double%>");
4234 else
4235 warning (OPT_Wpsabi, "Using IBM extended precision "
4236 "%<long double%>");
4237 }
4238 }
4239 }
4240
4241 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4242 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4243 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4244 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4245 the keyword as well as the type. */
4246 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4247
4248 /* IEEE 128-bit floating point requires VSX support. */
4249 if (TARGET_FLOAT128_KEYWORD)
4250 {
4251 if (!TARGET_VSX)
4252 {
4253 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4254 error ("%qs requires VSX support", "%<-mfloat128%>");
4255
4256 TARGET_FLOAT128_TYPE = 0;
4257 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4258 | OPTION_MASK_FLOAT128_HW);
4259 }
4260 else if (!TARGET_FLOAT128_TYPE)
4261 {
4262 TARGET_FLOAT128_TYPE = 1;
4263 warning (0, "The %<-mfloat128%> option may not be fully supported");
4264 }
4265 }
4266
4267 /* Enable the __float128 keyword under Linux by default. */
4268 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4269 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4270 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4271
4272 /* If we have are supporting the float128 type and full ISA 3.0 support,
4273 enable -mfloat128-hardware by default. However, don't enable the
4274 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4275 because sometimes the compiler wants to put things in an integer
4276 container, and if we don't have __int128 support, it is impossible. */
4277 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4278 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4279 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4280 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4281
4282 if (TARGET_FLOAT128_HW
4283 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4284 {
4285 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4286 error ("%qs requires full ISA 3.0 support", "%<-mfloat128-hardware%>");
4287
4288 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4289 }
4290
4291 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4292 {
4293 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4294 error ("%qs requires %qs", "%<-mfloat128-hardware%>", "-m64");
4295
4296 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4297 }
4298
4299 /* -mpcrel requires the prefixed load/store support on FUTURE systems. */
4300 if (!TARGET_FUTURE && TARGET_PCREL)
4301 {
4302 if ((rs6000_isa_flags_explicit & OPTION_MASK_PCREL) != 0)
4303 error ("%qs requires %qs", "-mpcrel", "-mcpu=future");
4304
4305 rs6000_isa_flags &= ~OPTION_MASK_PCREL;
4306 }
4307
4308 /* Print the options after updating the defaults. */
4309 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4310 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4311
4312 /* E500mc does "better" if we inline more aggressively. Respect the
4313 user's opinion, though. */
4314 if (rs6000_block_move_inline_limit == 0
4315 && (rs6000_tune == PROCESSOR_PPCE500MC
4316 || rs6000_tune == PROCESSOR_PPCE500MC64
4317 || rs6000_tune == PROCESSOR_PPCE5500
4318 || rs6000_tune == PROCESSOR_PPCE6500))
4319 rs6000_block_move_inline_limit = 128;
4320
4321 /* store_one_arg depends on expand_block_move to handle at least the
4322 size of reg_parm_stack_space. */
4323 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4324 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4325
4326 if (global_init_p)
4327 {
4328 /* If the appropriate debug option is enabled, replace the target hooks
4329 with debug versions that call the real version and then prints
4330 debugging information. */
4331 if (TARGET_DEBUG_COST)
4332 {
4333 targetm.rtx_costs = rs6000_debug_rtx_costs;
4334 targetm.address_cost = rs6000_debug_address_cost;
4335 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4336 }
4337
4338 if (TARGET_DEBUG_ADDR)
4339 {
4340 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4341 targetm.legitimize_address = rs6000_debug_legitimize_address;
4342 rs6000_secondary_reload_class_ptr
4343 = rs6000_debug_secondary_reload_class;
4344 targetm.secondary_memory_needed
4345 = rs6000_debug_secondary_memory_needed;
4346 targetm.can_change_mode_class
4347 = rs6000_debug_can_change_mode_class;
4348 rs6000_preferred_reload_class_ptr
4349 = rs6000_debug_preferred_reload_class;
4350 rs6000_mode_dependent_address_ptr
4351 = rs6000_debug_mode_dependent_address;
4352 }
4353
4354 if (rs6000_veclibabi_name)
4355 {
4356 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4357 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4358 else
4359 {
4360 error ("unknown vectorization library ABI type (%qs) for "
4361 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4362 ret = false;
4363 }
4364 }
4365 }
4366
4367 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4368 target attribute or pragma which automatically enables both options,
4369 unless the altivec ABI was set. This is set by default for 64-bit, but
4370 not for 32-bit. */
4371 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4372 {
4373 TARGET_FLOAT128_TYPE = 0;
4374 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4375 | OPTION_MASK_FLOAT128_KEYWORD)
4376 & ~rs6000_isa_flags_explicit);
4377 }
4378
4379 /* Enable Altivec ABI for AIX -maltivec. */
4380 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4381 {
4382 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4383 error ("target attribute or pragma changes AltiVec ABI");
4384 else
4385 rs6000_altivec_abi = 1;
4386 }
4387
4388 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4389 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4390 be explicitly overridden in either case. */
4391 if (TARGET_ELF)
4392 {
4393 if (!global_options_set.x_rs6000_altivec_abi
4394 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4395 {
4396 if (main_target_opt != NULL &&
4397 !main_target_opt->x_rs6000_altivec_abi)
4398 error ("target attribute or pragma changes AltiVec ABI");
4399 else
4400 rs6000_altivec_abi = 1;
4401 }
4402 }
4403
4404 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4405 So far, the only darwin64 targets are also MACH-O. */
4406 if (TARGET_MACHO
4407 && DEFAULT_ABI == ABI_DARWIN
4408 && TARGET_64BIT)
4409 {
4410 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4411 error ("target attribute or pragma changes darwin64 ABI");
4412 else
4413 {
4414 rs6000_darwin64_abi = 1;
4415 /* Default to natural alignment, for better performance. */
4416 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4417 }
4418 }
4419
4420 /* Place FP constants in the constant pool instead of TOC
4421 if section anchors enabled. */
4422 if (flag_section_anchors
4423 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4424 TARGET_NO_FP_IN_TOC = 1;
4425
4426 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4427 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4428
4429 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4430 SUBTARGET_OVERRIDE_OPTIONS;
4431 #endif
4432 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4433 SUBSUBTARGET_OVERRIDE_OPTIONS;
4434 #endif
4435 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4436 SUB3TARGET_OVERRIDE_OPTIONS;
4437 #endif
4438
4439 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4440 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4441
4442 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4443 && rs6000_tune != PROCESSOR_POWER5
4444 && rs6000_tune != PROCESSOR_POWER6
4445 && rs6000_tune != PROCESSOR_POWER7
4446 && rs6000_tune != PROCESSOR_POWER8
4447 && rs6000_tune != PROCESSOR_POWER9
4448 && rs6000_tune != PROCESSOR_FUTURE
4449 && rs6000_tune != PROCESSOR_PPCA2
4450 && rs6000_tune != PROCESSOR_CELL
4451 && rs6000_tune != PROCESSOR_PPC476);
4452 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4453 || rs6000_tune == PROCESSOR_POWER5
4454 || rs6000_tune == PROCESSOR_POWER7
4455 || rs6000_tune == PROCESSOR_POWER8);
4456 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4457 || rs6000_tune == PROCESSOR_POWER5
4458 || rs6000_tune == PROCESSOR_POWER6
4459 || rs6000_tune == PROCESSOR_POWER7
4460 || rs6000_tune == PROCESSOR_POWER8
4461 || rs6000_tune == PROCESSOR_POWER9
4462 || rs6000_tune == PROCESSOR_FUTURE
4463 || rs6000_tune == PROCESSOR_PPCE500MC
4464 || rs6000_tune == PROCESSOR_PPCE500MC64
4465 || rs6000_tune == PROCESSOR_PPCE5500
4466 || rs6000_tune == PROCESSOR_PPCE6500);
4467
4468 /* Allow debug switches to override the above settings. These are set to -1
4469 in rs6000.opt to indicate the user hasn't directly set the switch. */
4470 if (TARGET_ALWAYS_HINT >= 0)
4471 rs6000_always_hint = TARGET_ALWAYS_HINT;
4472
4473 if (TARGET_SCHED_GROUPS >= 0)
4474 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4475
4476 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4477 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4478
4479 rs6000_sched_restricted_insns_priority
4480 = (rs6000_sched_groups ? 1 : 0);
4481
4482 /* Handle -msched-costly-dep option. */
4483 rs6000_sched_costly_dep
4484 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4485
4486 if (rs6000_sched_costly_dep_str)
4487 {
4488 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4489 rs6000_sched_costly_dep = no_dep_costly;
4490 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4491 rs6000_sched_costly_dep = all_deps_costly;
4492 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4493 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4494 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4495 rs6000_sched_costly_dep = store_to_load_dep_costly;
4496 else
4497 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4498 atoi (rs6000_sched_costly_dep_str));
4499 }
4500
4501 /* Handle -minsert-sched-nops option. */
4502 rs6000_sched_insert_nops
4503 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4504
4505 if (rs6000_sched_insert_nops_str)
4506 {
4507 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4508 rs6000_sched_insert_nops = sched_finish_none;
4509 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4510 rs6000_sched_insert_nops = sched_finish_pad_groups;
4511 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4512 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4513 else
4514 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4515 atoi (rs6000_sched_insert_nops_str));
4516 }
4517
4518 /* Handle stack protector */
4519 if (!global_options_set.x_rs6000_stack_protector_guard)
4520 #ifdef TARGET_THREAD_SSP_OFFSET
4521 rs6000_stack_protector_guard = SSP_TLS;
4522 #else
4523 rs6000_stack_protector_guard = SSP_GLOBAL;
4524 #endif
4525
4526 #ifdef TARGET_THREAD_SSP_OFFSET
4527 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4528 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4529 #endif
4530
4531 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4532 {
4533 char *endp;
4534 const char *str = rs6000_stack_protector_guard_offset_str;
4535
4536 errno = 0;
4537 long offset = strtol (str, &endp, 0);
4538 if (!*str || *endp || errno)
4539 error ("%qs is not a valid number in %qs", str,
4540 "-mstack-protector-guard-offset=");
4541
4542 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4543 || (TARGET_64BIT && (offset & 3)))
4544 error ("%qs is not a valid offset in %qs", str,
4545 "-mstack-protector-guard-offset=");
4546
4547 rs6000_stack_protector_guard_offset = offset;
4548 }
4549
4550 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4551 {
4552 const char *str = rs6000_stack_protector_guard_reg_str;
4553 int reg = decode_reg_name (str);
4554
4555 if (!IN_RANGE (reg, 1, 31))
4556 error ("%qs is not a valid base register in %qs", str,
4557 "-mstack-protector-guard-reg=");
4558
4559 rs6000_stack_protector_guard_reg = reg;
4560 }
4561
4562 if (rs6000_stack_protector_guard == SSP_TLS
4563 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4564 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4565
4566 if (global_init_p)
4567 {
4568 #ifdef TARGET_REGNAMES
4569 /* If the user desires alternate register names, copy in the
4570 alternate names now. */
4571 if (TARGET_REGNAMES)
4572 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4573 #endif
4574
4575 /* Set aix_struct_return last, after the ABI is determined.
4576 If -maix-struct-return or -msvr4-struct-return was explicitly
4577 used, don't override with the ABI default. */
4578 if (!global_options_set.x_aix_struct_return)
4579 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4580
4581 #if 0
4582 /* IBM XL compiler defaults to unsigned bitfields. */
4583 if (TARGET_XL_COMPAT)
4584 flag_signed_bitfields = 0;
4585 #endif
4586
4587 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4588 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4589
4590 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4591
4592 /* We can only guarantee the availability of DI pseudo-ops when
4593 assembling for 64-bit targets. */
4594 if (!TARGET_64BIT)
4595 {
4596 targetm.asm_out.aligned_op.di = NULL;
4597 targetm.asm_out.unaligned_op.di = NULL;
4598 }
4599
4600
4601 /* Set branch target alignment, if not optimizing for size. */
4602 if (!optimize_size)
4603 {
4604 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4605 aligned 8byte to avoid misprediction by the branch predictor. */
4606 if (rs6000_tune == PROCESSOR_TITAN
4607 || rs6000_tune == PROCESSOR_CELL)
4608 {
4609 if (flag_align_functions && !str_align_functions)
4610 str_align_functions = "8";
4611 if (flag_align_jumps && !str_align_jumps)
4612 str_align_jumps = "8";
4613 if (flag_align_loops && !str_align_loops)
4614 str_align_loops = "8";
4615 }
4616 if (rs6000_align_branch_targets)
4617 {
4618 if (flag_align_functions && !str_align_functions)
4619 str_align_functions = "16";
4620 if (flag_align_jumps && !str_align_jumps)
4621 str_align_jumps = "16";
4622 if (flag_align_loops && !str_align_loops)
4623 {
4624 can_override_loop_align = 1;
4625 str_align_loops = "16";
4626 }
4627 }
4628
4629 if (flag_align_jumps && !str_align_jumps)
4630 str_align_jumps = "16";
4631 if (flag_align_loops && !str_align_loops)
4632 str_align_loops = "16";
4633 }
4634
4635 /* Arrange to save and restore machine status around nested functions. */
4636 init_machine_status = rs6000_init_machine_status;
4637
4638 /* We should always be splitting complex arguments, but we can't break
4639 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4640 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4641 targetm.calls.split_complex_arg = NULL;
4642
4643 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4644 if (DEFAULT_ABI == ABI_AIX)
4645 targetm.calls.custom_function_descriptors = 0;
4646 }
4647
4648 /* Initialize rs6000_cost with the appropriate target costs. */
4649 if (optimize_size)
4650 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4651 else
4652 switch (rs6000_tune)
4653 {
4654 case PROCESSOR_RS64A:
4655 rs6000_cost = &rs64a_cost;
4656 break;
4657
4658 case PROCESSOR_MPCCORE:
4659 rs6000_cost = &mpccore_cost;
4660 break;
4661
4662 case PROCESSOR_PPC403:
4663 rs6000_cost = &ppc403_cost;
4664 break;
4665
4666 case PROCESSOR_PPC405:
4667 rs6000_cost = &ppc405_cost;
4668 break;
4669
4670 case PROCESSOR_PPC440:
4671 rs6000_cost = &ppc440_cost;
4672 break;
4673
4674 case PROCESSOR_PPC476:
4675 rs6000_cost = &ppc476_cost;
4676 break;
4677
4678 case PROCESSOR_PPC601:
4679 rs6000_cost = &ppc601_cost;
4680 break;
4681
4682 case PROCESSOR_PPC603:
4683 rs6000_cost = &ppc603_cost;
4684 break;
4685
4686 case PROCESSOR_PPC604:
4687 rs6000_cost = &ppc604_cost;
4688 break;
4689
4690 case PROCESSOR_PPC604e:
4691 rs6000_cost = &ppc604e_cost;
4692 break;
4693
4694 case PROCESSOR_PPC620:
4695 rs6000_cost = &ppc620_cost;
4696 break;
4697
4698 case PROCESSOR_PPC630:
4699 rs6000_cost = &ppc630_cost;
4700 break;
4701
4702 case PROCESSOR_CELL:
4703 rs6000_cost = &ppccell_cost;
4704 break;
4705
4706 case PROCESSOR_PPC750:
4707 case PROCESSOR_PPC7400:
4708 rs6000_cost = &ppc750_cost;
4709 break;
4710
4711 case PROCESSOR_PPC7450:
4712 rs6000_cost = &ppc7450_cost;
4713 break;
4714
4715 case PROCESSOR_PPC8540:
4716 case PROCESSOR_PPC8548:
4717 rs6000_cost = &ppc8540_cost;
4718 break;
4719
4720 case PROCESSOR_PPCE300C2:
4721 case PROCESSOR_PPCE300C3:
4722 rs6000_cost = &ppce300c2c3_cost;
4723 break;
4724
4725 case PROCESSOR_PPCE500MC:
4726 rs6000_cost = &ppce500mc_cost;
4727 break;
4728
4729 case PROCESSOR_PPCE500MC64:
4730 rs6000_cost = &ppce500mc64_cost;
4731 break;
4732
4733 case PROCESSOR_PPCE5500:
4734 rs6000_cost = &ppce5500_cost;
4735 break;
4736
4737 case PROCESSOR_PPCE6500:
4738 rs6000_cost = &ppce6500_cost;
4739 break;
4740
4741 case PROCESSOR_TITAN:
4742 rs6000_cost = &titan_cost;
4743 break;
4744
4745 case PROCESSOR_POWER4:
4746 case PROCESSOR_POWER5:
4747 rs6000_cost = &power4_cost;
4748 break;
4749
4750 case PROCESSOR_POWER6:
4751 rs6000_cost = &power6_cost;
4752 break;
4753
4754 case PROCESSOR_POWER7:
4755 rs6000_cost = &power7_cost;
4756 break;
4757
4758 case PROCESSOR_POWER8:
4759 rs6000_cost = &power8_cost;
4760 break;
4761
4762 case PROCESSOR_POWER9:
4763 case PROCESSOR_FUTURE:
4764 rs6000_cost = &power9_cost;
4765 break;
4766
4767 case PROCESSOR_PPCA2:
4768 rs6000_cost = &ppca2_cost;
4769 break;
4770
4771 default:
4772 gcc_unreachable ();
4773 }
4774
4775 if (global_init_p)
4776 {
4777 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4778 rs6000_cost->simultaneous_prefetches,
4779 global_options.x_param_values,
4780 global_options_set.x_param_values);
4781 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4782 global_options.x_param_values,
4783 global_options_set.x_param_values);
4784 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4785 rs6000_cost->cache_line_size,
4786 global_options.x_param_values,
4787 global_options_set.x_param_values);
4788 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4789 global_options.x_param_values,
4790 global_options_set.x_param_values);
4791
4792 /* Increase loop peeling limits based on performance analysis. */
4793 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4794 global_options.x_param_values,
4795 global_options_set.x_param_values);
4796 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4797 global_options.x_param_values,
4798 global_options_set.x_param_values);
4799
4800 /* Use the 'model' -fsched-pressure algorithm by default. */
4801 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4802 SCHED_PRESSURE_MODEL,
4803 global_options.x_param_values,
4804 global_options_set.x_param_values);
4805
4806 /* If using typedef char *va_list, signal that
4807 __builtin_va_start (&ap, 0) can be optimized to
4808 ap = __builtin_next_arg (0). */
4809 if (DEFAULT_ABI != ABI_V4)
4810 targetm.expand_builtin_va_start = NULL;
4811 }
4812
4813 /* If not explicitly specified via option, decide whether to generate indexed
4814 load/store instructions. A value of -1 indicates that the
4815 initial value of this variable has not been overwritten. During
4816 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4817 if (TARGET_AVOID_XFORM == -1)
4818 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4819 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4820 need indexed accesses and the type used is the scalar type of the element
4821 being loaded or stored. */
4822 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4823 && !TARGET_ALTIVEC);
4824
4825 /* Set the -mrecip options. */
4826 if (rs6000_recip_name)
4827 {
4828 char *p = ASTRDUP (rs6000_recip_name);
4829 char *q;
4830 unsigned int mask, i;
4831 bool invert;
4832
4833 while ((q = strtok (p, ",")) != NULL)
4834 {
4835 p = NULL;
4836 if (*q == '!')
4837 {
4838 invert = true;
4839 q++;
4840 }
4841 else
4842 invert = false;
4843
4844 if (!strcmp (q, "default"))
4845 mask = ((TARGET_RECIP_PRECISION)
4846 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4847 else
4848 {
4849 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4850 if (!strcmp (q, recip_options[i].string))
4851 {
4852 mask = recip_options[i].mask;
4853 break;
4854 }
4855
4856 if (i == ARRAY_SIZE (recip_options))
4857 {
4858 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4859 invert = false;
4860 mask = 0;
4861 ret = false;
4862 }
4863 }
4864
4865 if (invert)
4866 rs6000_recip_control &= ~mask;
4867 else
4868 rs6000_recip_control |= mask;
4869 }
4870 }
4871
4872 /* Set the builtin mask of the various options used that could affect which
4873 builtins were used. In the past we used target_flags, but we've run out
4874 of bits, and some options are no longer in target_flags. */
4875 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4876 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4877 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4878 rs6000_builtin_mask);
4879
4880 /* Initialize all of the registers. */
4881 rs6000_init_hard_regno_mode_ok (global_init_p);
4882
4883 /* Save the initial options in case the user does function specific options */
4884 if (global_init_p)
4885 target_option_default_node = target_option_current_node
4886 = build_target_option_node (&global_options);
4887
4888 /* If not explicitly specified via option, decide whether to generate the
4889 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4890 if (TARGET_LINK_STACK == -1)
4891 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4892
4893 /* Deprecate use of -mno-speculate-indirect-jumps. */
4894 if (!rs6000_speculate_indirect_jumps)
4895 warning (0, "%qs is deprecated and not recommended in any circumstances",
4896 "-mno-speculate-indirect-jumps");
4897
4898 return ret;
4899 }
4900
4901 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4902 define the target cpu type. */
4903
4904 static void
4905 rs6000_option_override (void)
4906 {
4907 (void) rs6000_option_override_internal (true);
4908 }
4909
4910 \f
4911 /* Implement targetm.vectorize.builtin_mask_for_load. */
4912 static tree
4913 rs6000_builtin_mask_for_load (void)
4914 {
4915 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4916 if ((TARGET_ALTIVEC && !TARGET_VSX)
4917 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4918 return altivec_builtin_mask_for_load;
4919 else
4920 return 0;
4921 }
4922
4923 /* Implement LOOP_ALIGN. */
4924 align_flags
4925 rs6000_loop_align (rtx label)
4926 {
4927 basic_block bb;
4928 int ninsns;
4929
4930 /* Don't override loop alignment if -falign-loops was specified. */
4931 if (!can_override_loop_align)
4932 return align_loops;
4933
4934 bb = BLOCK_FOR_INSN (label);
4935 ninsns = num_loop_insns(bb->loop_father);
4936
4937 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
4938 if (ninsns > 4 && ninsns <= 8
4939 && (rs6000_tune == PROCESSOR_POWER4
4940 || rs6000_tune == PROCESSOR_POWER5
4941 || rs6000_tune == PROCESSOR_POWER6
4942 || rs6000_tune == PROCESSOR_POWER7
4943 || rs6000_tune == PROCESSOR_POWER8))
4944 return align_flags (5);
4945 else
4946 return align_loops;
4947 }
4948
4949 /* Return true iff, data reference of TYPE can reach vector alignment (16)
4950 after applying N number of iterations. This routine does not determine
4951 how may iterations are required to reach desired alignment. */
4952
4953 static bool
4954 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
4955 {
4956 if (is_packed)
4957 return false;
4958
4959 if (TARGET_32BIT)
4960 {
4961 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
4962 return true;
4963
4964 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
4965 return true;
4966
4967 return false;
4968 }
4969 else
4970 {
4971 if (TARGET_MACHO)
4972 return false;
4973
4974 /* Assuming that all other types are naturally aligned. CHECKME! */
4975 return true;
4976 }
4977 }
4978
4979 /* Return true if the vector misalignment factor is supported by the
4980 target. */
4981 static bool
4982 rs6000_builtin_support_vector_misalignment (machine_mode mode,
4983 const_tree type,
4984 int misalignment,
4985 bool is_packed)
4986 {
4987 if (TARGET_VSX)
4988 {
4989 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4990 return true;
4991
4992 /* Return if movmisalign pattern is not supported for this mode. */
4993 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
4994 return false;
4995
4996 if (misalignment == -1)
4997 {
4998 /* Misalignment factor is unknown at compile time but we know
4999 it's word aligned. */
5000 if (rs6000_vector_alignment_reachable (type, is_packed))
5001 {
5002 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5003
5004 if (element_size == 64 || element_size == 32)
5005 return true;
5006 }
5007
5008 return false;
5009 }
5010
5011 /* VSX supports word-aligned vector. */
5012 if (misalignment % 4 == 0)
5013 return true;
5014 }
5015 return false;
5016 }
5017
5018 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5019 static int
5020 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5021 tree vectype, int misalign)
5022 {
5023 unsigned elements;
5024 tree elem_type;
5025
5026 switch (type_of_cost)
5027 {
5028 case scalar_stmt:
5029 case scalar_load:
5030 case scalar_store:
5031 case vector_stmt:
5032 case vector_load:
5033 case vector_store:
5034 case vec_to_scalar:
5035 case scalar_to_vec:
5036 case cond_branch_not_taken:
5037 return 1;
5038
5039 case vec_perm:
5040 if (TARGET_VSX)
5041 return 3;
5042 else
5043 return 1;
5044
5045 case vec_promote_demote:
5046 if (TARGET_VSX)
5047 return 4;
5048 else
5049 return 1;
5050
5051 case cond_branch_taken:
5052 return 3;
5053
5054 case unaligned_load:
5055 case vector_gather_load:
5056 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5057 return 1;
5058
5059 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5060 {
5061 elements = TYPE_VECTOR_SUBPARTS (vectype);
5062 if (elements == 2)
5063 /* Double word aligned. */
5064 return 2;
5065
5066 if (elements == 4)
5067 {
5068 switch (misalign)
5069 {
5070 case 8:
5071 /* Double word aligned. */
5072 return 2;
5073
5074 case -1:
5075 /* Unknown misalignment. */
5076 case 4:
5077 case 12:
5078 /* Word aligned. */
5079 return 22;
5080
5081 default:
5082 gcc_unreachable ();
5083 }
5084 }
5085 }
5086
5087 if (TARGET_ALTIVEC)
5088 /* Misaligned loads are not supported. */
5089 gcc_unreachable ();
5090
5091 return 2;
5092
5093 case unaligned_store:
5094 case vector_scatter_store:
5095 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5096 return 1;
5097
5098 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5099 {
5100 elements = TYPE_VECTOR_SUBPARTS (vectype);
5101 if (elements == 2)
5102 /* Double word aligned. */
5103 return 2;
5104
5105 if (elements == 4)
5106 {
5107 switch (misalign)
5108 {
5109 case 8:
5110 /* Double word aligned. */
5111 return 2;
5112
5113 case -1:
5114 /* Unknown misalignment. */
5115 case 4:
5116 case 12:
5117 /* Word aligned. */
5118 return 23;
5119
5120 default:
5121 gcc_unreachable ();
5122 }
5123 }
5124 }
5125
5126 if (TARGET_ALTIVEC)
5127 /* Misaligned stores are not supported. */
5128 gcc_unreachable ();
5129
5130 return 2;
5131
5132 case vec_construct:
5133 /* This is a rough approximation assuming non-constant elements
5134 constructed into a vector via element insertion. FIXME:
5135 vec_construct is not granular enough for uniformly good
5136 decisions. If the initialization is a splat, this is
5137 cheaper than we estimate. Improve this someday. */
5138 elem_type = TREE_TYPE (vectype);
5139 /* 32-bit vectors loaded into registers are stored as double
5140 precision, so we need 2 permutes, 2 converts, and 1 merge
5141 to construct a vector of short floats from them. */
5142 if (SCALAR_FLOAT_TYPE_P (elem_type)
5143 && TYPE_PRECISION (elem_type) == 32)
5144 return 5;
5145 /* On POWER9, integer vector types are built up in GPRs and then
5146 use a direct move (2 cycles). For POWER8 this is even worse,
5147 as we need two direct moves and a merge, and the direct moves
5148 are five cycles. */
5149 else if (INTEGRAL_TYPE_P (elem_type))
5150 {
5151 if (TARGET_P9_VECTOR)
5152 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5153 else
5154 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5155 }
5156 else
5157 /* V2DFmode doesn't need a direct move. */
5158 return 2;
5159
5160 default:
5161 gcc_unreachable ();
5162 }
5163 }
5164
5165 /* Implement targetm.vectorize.preferred_simd_mode. */
5166
5167 static machine_mode
5168 rs6000_preferred_simd_mode (scalar_mode mode)
5169 {
5170 if (TARGET_VSX)
5171 switch (mode)
5172 {
5173 case E_DFmode:
5174 return V2DFmode;
5175 default:;
5176 }
5177 if (TARGET_ALTIVEC || TARGET_VSX)
5178 switch (mode)
5179 {
5180 case E_SFmode:
5181 return V4SFmode;
5182 case E_TImode:
5183 return V1TImode;
5184 case E_DImode:
5185 return V2DImode;
5186 case E_SImode:
5187 return V4SImode;
5188 case E_HImode:
5189 return V8HImode;
5190 case E_QImode:
5191 return V16QImode;
5192 default:;
5193 }
5194 return word_mode;
5195 }
5196
5197 typedef struct _rs6000_cost_data
5198 {
5199 struct loop *loop_info;
5200 unsigned cost[3];
5201 } rs6000_cost_data;
5202
5203 /* Test for likely overcommitment of vector hardware resources. If a
5204 loop iteration is relatively large, and too large a percentage of
5205 instructions in the loop are vectorized, the cost model may not
5206 adequately reflect delays from unavailable vector resources.
5207 Penalize the loop body cost for this case. */
5208
5209 static void
5210 rs6000_density_test (rs6000_cost_data *data)
5211 {
5212 const int DENSITY_PCT_THRESHOLD = 85;
5213 const int DENSITY_SIZE_THRESHOLD = 70;
5214 const int DENSITY_PENALTY = 10;
5215 struct loop *loop = data->loop_info;
5216 basic_block *bbs = get_loop_body (loop);
5217 int nbbs = loop->num_nodes;
5218 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5219 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5220 int i, density_pct;
5221
5222 for (i = 0; i < nbbs; i++)
5223 {
5224 basic_block bb = bbs[i];
5225 gimple_stmt_iterator gsi;
5226
5227 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5228 {
5229 gimple *stmt = gsi_stmt (gsi);
5230 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5231
5232 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5233 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5234 not_vec_cost++;
5235 }
5236 }
5237
5238 free (bbs);
5239 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5240
5241 if (density_pct > DENSITY_PCT_THRESHOLD
5242 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5243 {
5244 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5245 if (dump_enabled_p ())
5246 dump_printf_loc (MSG_NOTE, vect_location,
5247 "density %d%%, cost %d exceeds threshold, penalizing "
5248 "loop body cost by %d%%", density_pct,
5249 vec_cost + not_vec_cost, DENSITY_PENALTY);
5250 }
5251 }
5252
5253 /* Implement targetm.vectorize.init_cost. */
5254
5255 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5256 instruction is needed by the vectorization. */
5257 static bool rs6000_vect_nonmem;
5258
5259 static void *
5260 rs6000_init_cost (struct loop *loop_info)
5261 {
5262 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5263 data->loop_info = loop_info;
5264 data->cost[vect_prologue] = 0;
5265 data->cost[vect_body] = 0;
5266 data->cost[vect_epilogue] = 0;
5267 rs6000_vect_nonmem = false;
5268 return data;
5269 }
5270
5271 /* Implement targetm.vectorize.add_stmt_cost. */
5272
5273 static unsigned
5274 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5275 struct _stmt_vec_info *stmt_info, int misalign,
5276 enum vect_cost_model_location where)
5277 {
5278 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5279 unsigned retval = 0;
5280
5281 if (flag_vect_cost_model)
5282 {
5283 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5284 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5285 misalign);
5286 /* Statements in an inner loop relative to the loop being
5287 vectorized are weighted more heavily. The value here is
5288 arbitrary and could potentially be improved with analysis. */
5289 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5290 count *= 50; /* FIXME. */
5291
5292 retval = (unsigned) (count * stmt_cost);
5293 cost_data->cost[where] += retval;
5294
5295 /* Check whether we're doing something other than just a copy loop.
5296 Not all such loops may be profitably vectorized; see
5297 rs6000_finish_cost. */
5298 if ((kind == vec_to_scalar || kind == vec_perm
5299 || kind == vec_promote_demote || kind == vec_construct
5300 || kind == scalar_to_vec)
5301 || (where == vect_body && kind == vector_stmt))
5302 rs6000_vect_nonmem = true;
5303 }
5304
5305 return retval;
5306 }
5307
5308 /* Implement targetm.vectorize.finish_cost. */
5309
5310 static void
5311 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5312 unsigned *body_cost, unsigned *epilogue_cost)
5313 {
5314 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5315
5316 if (cost_data->loop_info)
5317 rs6000_density_test (cost_data);
5318
5319 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5320 that require versioning for any reason. The vectorization is at
5321 best a wash inside the loop, and the versioning checks make
5322 profitability highly unlikely and potentially quite harmful. */
5323 if (cost_data->loop_info)
5324 {
5325 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5326 if (!rs6000_vect_nonmem
5327 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5328 && LOOP_REQUIRES_VERSIONING (vec_info))
5329 cost_data->cost[vect_body] += 10000;
5330 }
5331
5332 *prologue_cost = cost_data->cost[vect_prologue];
5333 *body_cost = cost_data->cost[vect_body];
5334 *epilogue_cost = cost_data->cost[vect_epilogue];
5335 }
5336
5337 /* Implement targetm.vectorize.destroy_cost_data. */
5338
5339 static void
5340 rs6000_destroy_cost_data (void *data)
5341 {
5342 free (data);
5343 }
5344
5345 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5346 library with vectorized intrinsics. */
5347
5348 static tree
5349 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5350 tree type_in)
5351 {
5352 char name[32];
5353 const char *suffix = NULL;
5354 tree fntype, new_fndecl, bdecl = NULL_TREE;
5355 int n_args = 1;
5356 const char *bname;
5357 machine_mode el_mode, in_mode;
5358 int n, in_n;
5359
5360 /* Libmass is suitable for unsafe math only as it does not correctly support
5361 parts of IEEE with the required precision such as denormals. Only support
5362 it if we have VSX to use the simd d2 or f4 functions.
5363 XXX: Add variable length support. */
5364 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5365 return NULL_TREE;
5366
5367 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5368 n = TYPE_VECTOR_SUBPARTS (type_out);
5369 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5370 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5371 if (el_mode != in_mode
5372 || n != in_n)
5373 return NULL_TREE;
5374
5375 switch (fn)
5376 {
5377 CASE_CFN_ATAN2:
5378 CASE_CFN_HYPOT:
5379 CASE_CFN_POW:
5380 n_args = 2;
5381 gcc_fallthrough ();
5382
5383 CASE_CFN_ACOS:
5384 CASE_CFN_ACOSH:
5385 CASE_CFN_ASIN:
5386 CASE_CFN_ASINH:
5387 CASE_CFN_ATAN:
5388 CASE_CFN_ATANH:
5389 CASE_CFN_CBRT:
5390 CASE_CFN_COS:
5391 CASE_CFN_COSH:
5392 CASE_CFN_ERF:
5393 CASE_CFN_ERFC:
5394 CASE_CFN_EXP2:
5395 CASE_CFN_EXP:
5396 CASE_CFN_EXPM1:
5397 CASE_CFN_LGAMMA:
5398 CASE_CFN_LOG10:
5399 CASE_CFN_LOG1P:
5400 CASE_CFN_LOG2:
5401 CASE_CFN_LOG:
5402 CASE_CFN_SIN:
5403 CASE_CFN_SINH:
5404 CASE_CFN_SQRT:
5405 CASE_CFN_TAN:
5406 CASE_CFN_TANH:
5407 if (el_mode == DFmode && n == 2)
5408 {
5409 bdecl = mathfn_built_in (double_type_node, fn);
5410 suffix = "d2"; /* pow -> powd2 */
5411 }
5412 else if (el_mode == SFmode && n == 4)
5413 {
5414 bdecl = mathfn_built_in (float_type_node, fn);
5415 suffix = "4"; /* powf -> powf4 */
5416 }
5417 else
5418 return NULL_TREE;
5419 if (!bdecl)
5420 return NULL_TREE;
5421 break;
5422
5423 default:
5424 return NULL_TREE;
5425 }
5426
5427 gcc_assert (suffix != NULL);
5428 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5429 if (!bname)
5430 return NULL_TREE;
5431
5432 strcpy (name, bname + sizeof ("__builtin_") - 1);
5433 strcat (name, suffix);
5434
5435 if (n_args == 1)
5436 fntype = build_function_type_list (type_out, type_in, NULL);
5437 else if (n_args == 2)
5438 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5439 else
5440 gcc_unreachable ();
5441
5442 /* Build a function declaration for the vectorized function. */
5443 new_fndecl = build_decl (BUILTINS_LOCATION,
5444 FUNCTION_DECL, get_identifier (name), fntype);
5445 TREE_PUBLIC (new_fndecl) = 1;
5446 DECL_EXTERNAL (new_fndecl) = 1;
5447 DECL_IS_NOVOPS (new_fndecl) = 1;
5448 TREE_READONLY (new_fndecl) = 1;
5449
5450 return new_fndecl;
5451 }
5452
5453 /* Returns a function decl for a vectorized version of the builtin function
5454 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5455 if it is not available. */
5456
5457 static tree
5458 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5459 tree type_in)
5460 {
5461 machine_mode in_mode, out_mode;
5462 int in_n, out_n;
5463
5464 if (TARGET_DEBUG_BUILTIN)
5465 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5466 combined_fn_name (combined_fn (fn)),
5467 GET_MODE_NAME (TYPE_MODE (type_out)),
5468 GET_MODE_NAME (TYPE_MODE (type_in)));
5469
5470 if (TREE_CODE (type_out) != VECTOR_TYPE
5471 || TREE_CODE (type_in) != VECTOR_TYPE)
5472 return NULL_TREE;
5473
5474 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5475 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5476 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5477 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5478
5479 switch (fn)
5480 {
5481 CASE_CFN_COPYSIGN:
5482 if (VECTOR_UNIT_VSX_P (V2DFmode)
5483 && out_mode == DFmode && out_n == 2
5484 && in_mode == DFmode && in_n == 2)
5485 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5486 if (VECTOR_UNIT_VSX_P (V4SFmode)
5487 && out_mode == SFmode && out_n == 4
5488 && in_mode == SFmode && in_n == 4)
5489 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5490 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5491 && out_mode == SFmode && out_n == 4
5492 && in_mode == SFmode && in_n == 4)
5493 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5494 break;
5495 CASE_CFN_CEIL:
5496 if (VECTOR_UNIT_VSX_P (V2DFmode)
5497 && out_mode == DFmode && out_n == 2
5498 && in_mode == DFmode && in_n == 2)
5499 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5500 if (VECTOR_UNIT_VSX_P (V4SFmode)
5501 && out_mode == SFmode && out_n == 4
5502 && in_mode == SFmode && in_n == 4)
5503 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5504 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5505 && out_mode == SFmode && out_n == 4
5506 && in_mode == SFmode && in_n == 4)
5507 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5508 break;
5509 CASE_CFN_FLOOR:
5510 if (VECTOR_UNIT_VSX_P (V2DFmode)
5511 && out_mode == DFmode && out_n == 2
5512 && in_mode == DFmode && in_n == 2)
5513 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5514 if (VECTOR_UNIT_VSX_P (V4SFmode)
5515 && out_mode == SFmode && out_n == 4
5516 && in_mode == SFmode && in_n == 4)
5517 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5518 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5519 && out_mode == SFmode && out_n == 4
5520 && in_mode == SFmode && in_n == 4)
5521 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5522 break;
5523 CASE_CFN_FMA:
5524 if (VECTOR_UNIT_VSX_P (V2DFmode)
5525 && out_mode == DFmode && out_n == 2
5526 && in_mode == DFmode && in_n == 2)
5527 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5528 if (VECTOR_UNIT_VSX_P (V4SFmode)
5529 && out_mode == SFmode && out_n == 4
5530 && in_mode == SFmode && in_n == 4)
5531 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5532 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5533 && out_mode == SFmode && out_n == 4
5534 && in_mode == SFmode && in_n == 4)
5535 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5536 break;
5537 CASE_CFN_TRUNC:
5538 if (VECTOR_UNIT_VSX_P (V2DFmode)
5539 && out_mode == DFmode && out_n == 2
5540 && in_mode == DFmode && in_n == 2)
5541 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5542 if (VECTOR_UNIT_VSX_P (V4SFmode)
5543 && out_mode == SFmode && out_n == 4
5544 && in_mode == SFmode && in_n == 4)
5545 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5546 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5547 && out_mode == SFmode && out_n == 4
5548 && in_mode == SFmode && in_n == 4)
5549 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5550 break;
5551 CASE_CFN_NEARBYINT:
5552 if (VECTOR_UNIT_VSX_P (V2DFmode)
5553 && flag_unsafe_math_optimizations
5554 && out_mode == DFmode && out_n == 2
5555 && in_mode == DFmode && in_n == 2)
5556 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5557 if (VECTOR_UNIT_VSX_P (V4SFmode)
5558 && flag_unsafe_math_optimizations
5559 && out_mode == SFmode && out_n == 4
5560 && in_mode == SFmode && in_n == 4)
5561 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5562 break;
5563 CASE_CFN_RINT:
5564 if (VECTOR_UNIT_VSX_P (V2DFmode)
5565 && !flag_trapping_math
5566 && out_mode == DFmode && out_n == 2
5567 && in_mode == DFmode && in_n == 2)
5568 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5569 if (VECTOR_UNIT_VSX_P (V4SFmode)
5570 && !flag_trapping_math
5571 && out_mode == SFmode && out_n == 4
5572 && in_mode == SFmode && in_n == 4)
5573 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5574 break;
5575 default:
5576 break;
5577 }
5578
5579 /* Generate calls to libmass if appropriate. */
5580 if (rs6000_veclib_handler)
5581 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5582
5583 return NULL_TREE;
5584 }
5585
5586 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5587
5588 static tree
5589 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5590 tree type_in)
5591 {
5592 machine_mode in_mode, out_mode;
5593 int in_n, out_n;
5594
5595 if (TARGET_DEBUG_BUILTIN)
5596 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5597 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5598 GET_MODE_NAME (TYPE_MODE (type_out)),
5599 GET_MODE_NAME (TYPE_MODE (type_in)));
5600
5601 if (TREE_CODE (type_out) != VECTOR_TYPE
5602 || TREE_CODE (type_in) != VECTOR_TYPE)
5603 return NULL_TREE;
5604
5605 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5606 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5607 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5608 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5609
5610 enum rs6000_builtins fn
5611 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5612 switch (fn)
5613 {
5614 case RS6000_BUILTIN_RSQRTF:
5615 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5616 && out_mode == SFmode && out_n == 4
5617 && in_mode == SFmode && in_n == 4)
5618 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5619 break;
5620 case RS6000_BUILTIN_RSQRT:
5621 if (VECTOR_UNIT_VSX_P (V2DFmode)
5622 && out_mode == DFmode && out_n == 2
5623 && in_mode == DFmode && in_n == 2)
5624 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5625 break;
5626 case RS6000_BUILTIN_RECIPF:
5627 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5628 && out_mode == SFmode && out_n == 4
5629 && in_mode == SFmode && in_n == 4)
5630 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5631 break;
5632 case RS6000_BUILTIN_RECIP:
5633 if (VECTOR_UNIT_VSX_P (V2DFmode)
5634 && out_mode == DFmode && out_n == 2
5635 && in_mode == DFmode && in_n == 2)
5636 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5637 break;
5638 default:
5639 break;
5640 }
5641 return NULL_TREE;
5642 }
5643 \f
5644 /* Default CPU string for rs6000*_file_start functions. */
5645 static const char *rs6000_default_cpu;
5646
5647 #ifdef USING_ELFOS_H
5648 static const char *rs6000_machine;
5649
5650 static const char *
5651 rs6000_machine_from_flags (void)
5652 {
5653 if ((rs6000_isa_flags & (ISA_FUTURE_MASKS_SERVER & ~ISA_3_0_MASKS_SERVER))
5654 != 0)
5655 return "future";
5656 if ((rs6000_isa_flags & (ISA_3_0_MASKS_SERVER & ~ISA_2_7_MASKS_SERVER)) != 0)
5657 return "power9";
5658 if ((rs6000_isa_flags & (ISA_2_7_MASKS_SERVER & ~ISA_2_6_MASKS_SERVER)) != 0)
5659 return "power8";
5660 if ((rs6000_isa_flags & (ISA_2_6_MASKS_SERVER & ~ISA_2_5_MASKS_SERVER)) != 0)
5661 return "power7";
5662 if ((rs6000_isa_flags & (ISA_2_5_MASKS_SERVER & ~ISA_2_4_MASKS)) != 0)
5663 return "power6";
5664 if ((rs6000_isa_flags & (ISA_2_4_MASKS & ~ISA_2_1_MASKS)) != 0)
5665 return "power5";
5666 if ((rs6000_isa_flags & ISA_2_1_MASKS) != 0)
5667 return "power4";
5668 if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5669 return "ppc64";
5670 return "ppc";
5671 }
5672
5673 static void
5674 emit_asm_machine (void)
5675 {
5676 fprintf (asm_out_file, "\t.machine %s\n", rs6000_machine);
5677 }
5678 #endif
5679
5680 /* Do anything needed at the start of the asm file. */
5681
5682 static void
5683 rs6000_file_start (void)
5684 {
5685 char buffer[80];
5686 const char *start = buffer;
5687 FILE *file = asm_out_file;
5688
5689 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5690
5691 default_file_start ();
5692
5693 if (flag_verbose_asm)
5694 {
5695 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5696
5697 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5698 {
5699 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5700 start = "";
5701 }
5702
5703 if (global_options_set.x_rs6000_cpu_index)
5704 {
5705 fprintf (file, "%s -mcpu=%s", start,
5706 processor_target_table[rs6000_cpu_index].name);
5707 start = "";
5708 }
5709
5710 if (global_options_set.x_rs6000_tune_index)
5711 {
5712 fprintf (file, "%s -mtune=%s", start,
5713 processor_target_table[rs6000_tune_index].name);
5714 start = "";
5715 }
5716
5717 if (PPC405_ERRATUM77)
5718 {
5719 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5720 start = "";
5721 }
5722
5723 #ifdef USING_ELFOS_H
5724 switch (rs6000_sdata)
5725 {
5726 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5727 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5728 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5729 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5730 }
5731
5732 if (rs6000_sdata && g_switch_value)
5733 {
5734 fprintf (file, "%s -G %d", start,
5735 g_switch_value);
5736 start = "";
5737 }
5738 #endif
5739
5740 if (*start == '\0')
5741 putc ('\n', file);
5742 }
5743
5744 #ifdef USING_ELFOS_H
5745 rs6000_machine = rs6000_machine_from_flags ();
5746 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5747 && !global_options_set.x_rs6000_cpu_index)
5748 emit_asm_machine ();
5749 #endif
5750
5751 if (DEFAULT_ABI == ABI_ELFv2)
5752 fprintf (file, "\t.abiversion 2\n");
5753 }
5754
5755 \f
5756 /* Return nonzero if this function is known to have a null epilogue. */
5757
5758 int
5759 direct_return (void)
5760 {
5761 if (reload_completed)
5762 {
5763 rs6000_stack_t *info = rs6000_stack_info ();
5764
5765 if (info->first_gp_reg_save == 32
5766 && info->first_fp_reg_save == 64
5767 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5768 && ! info->lr_save_p
5769 && ! info->cr_save_p
5770 && info->vrsave_size == 0
5771 && ! info->push_p)
5772 return 1;
5773 }
5774
5775 return 0;
5776 }
5777
5778 /* Helper for num_insns_constant. Calculate number of instructions to
5779 load VALUE to a single gpr using combinations of addi, addis, ori,
5780 oris and sldi instructions. */
5781
5782 static int
5783 num_insns_constant_gpr (HOST_WIDE_INT value)
5784 {
5785 /* signed constant loadable with addi */
5786 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5787 return 1;
5788
5789 /* constant loadable with addis */
5790 else if ((value & 0xffff) == 0
5791 && (value >> 31 == -1 || value >> 31 == 0))
5792 return 1;
5793
5794 else if (TARGET_POWERPC64)
5795 {
5796 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5797 HOST_WIDE_INT high = value >> 31;
5798
5799 if (high == 0 || high == -1)
5800 return 2;
5801
5802 high >>= 1;
5803
5804 if (low == 0)
5805 return num_insns_constant_gpr (high) + 1;
5806 else if (high == 0)
5807 return num_insns_constant_gpr (low) + 1;
5808 else
5809 return (num_insns_constant_gpr (high)
5810 + num_insns_constant_gpr (low) + 1);
5811 }
5812
5813 else
5814 return 2;
5815 }
5816
5817 /* Helper for num_insns_constant. Allow constants formed by the
5818 num_insns_constant_gpr sequences, plus li -1, rldicl/rldicr/rlwinm,
5819 and handle modes that require multiple gprs. */
5820
5821 static int
5822 num_insns_constant_multi (HOST_WIDE_INT value, machine_mode mode)
5823 {
5824 int nregs = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5825 int total = 0;
5826 while (nregs-- > 0)
5827 {
5828 HOST_WIDE_INT low = sext_hwi (value, BITS_PER_WORD);
5829 int insns = num_insns_constant_gpr (low);
5830 if (insns > 2
5831 /* We won't get more than 2 from num_insns_constant_gpr
5832 except when TARGET_POWERPC64 and mode is DImode or
5833 wider, so the register mode must be DImode. */
5834 && rs6000_is_valid_and_mask (GEN_INT (low), DImode))
5835 insns = 2;
5836 total += insns;
5837 value >>= BITS_PER_WORD;
5838 }
5839 return total;
5840 }
5841
5842 /* Return the number of instructions it takes to form a constant in as
5843 many gprs are needed for MODE. */
5844
5845 int
5846 num_insns_constant (rtx op, machine_mode mode)
5847 {
5848 HOST_WIDE_INT val;
5849
5850 switch (GET_CODE (op))
5851 {
5852 case CONST_INT:
5853 val = INTVAL (op);
5854 break;
5855
5856 case CONST_WIDE_INT:
5857 {
5858 int insns = 0;
5859 for (int i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5860 insns += num_insns_constant_multi (CONST_WIDE_INT_ELT (op, i),
5861 DImode);
5862 return insns;
5863 }
5864
5865 case CONST_DOUBLE:
5866 {
5867 const struct real_value *rv = CONST_DOUBLE_REAL_VALUE (op);
5868
5869 if (mode == SFmode || mode == SDmode)
5870 {
5871 long l;
5872
5873 if (mode == SDmode)
5874 REAL_VALUE_TO_TARGET_DECIMAL32 (*rv, l);
5875 else
5876 REAL_VALUE_TO_TARGET_SINGLE (*rv, l);
5877 /* See the first define_split in rs6000.md handling a
5878 const_double_operand. */
5879 val = l;
5880 mode = SImode;
5881 }
5882 else if (mode == DFmode || mode == DDmode)
5883 {
5884 long l[2];
5885
5886 if (mode == DDmode)
5887 REAL_VALUE_TO_TARGET_DECIMAL64 (*rv, l);
5888 else
5889 REAL_VALUE_TO_TARGET_DOUBLE (*rv, l);
5890
5891 /* See the second (32-bit) and third (64-bit) define_split
5892 in rs6000.md handling a const_double_operand. */
5893 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 1] << 32;
5894 val |= l[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffffUL;
5895 mode = DImode;
5896 }
5897 else if (mode == TFmode || mode == TDmode
5898 || mode == KFmode || mode == IFmode)
5899 {
5900 long l[4];
5901 int insns;
5902
5903 if (mode == TDmode)
5904 REAL_VALUE_TO_TARGET_DECIMAL128 (*rv, l);
5905 else
5906 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*rv, l);
5907
5908 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 3] << 32;
5909 val |= l[WORDS_BIG_ENDIAN ? 1 : 2] & 0xffffffffUL;
5910 insns = num_insns_constant_multi (val, DImode);
5911 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 2 : 1] << 32;
5912 val |= l[WORDS_BIG_ENDIAN ? 3 : 0] & 0xffffffffUL;
5913 insns += num_insns_constant_multi (val, DImode);
5914 return insns;
5915 }
5916 else
5917 gcc_unreachable ();
5918 }
5919 break;
5920
5921 default:
5922 gcc_unreachable ();
5923 }
5924
5925 return num_insns_constant_multi (val, mode);
5926 }
5927
5928 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5929 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5930 corresponding element of the vector, but for V4SFmode, the
5931 corresponding "float" is interpreted as an SImode integer. */
5932
5933 HOST_WIDE_INT
5934 const_vector_elt_as_int (rtx op, unsigned int elt)
5935 {
5936 rtx tmp;
5937
5938 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5939 gcc_assert (GET_MODE (op) != V2DImode
5940 && GET_MODE (op) != V2DFmode);
5941
5942 tmp = CONST_VECTOR_ELT (op, elt);
5943 if (GET_MODE (op) == V4SFmode)
5944 tmp = gen_lowpart (SImode, tmp);
5945 return INTVAL (tmp);
5946 }
5947
5948 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5949 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5950 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5951 all items are set to the same value and contain COPIES replicas of the
5952 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5953 operand and the others are set to the value of the operand's msb. */
5954
5955 static bool
5956 vspltis_constant (rtx op, unsigned step, unsigned copies)
5957 {
5958 machine_mode mode = GET_MODE (op);
5959 machine_mode inner = GET_MODE_INNER (mode);
5960
5961 unsigned i;
5962 unsigned nunits;
5963 unsigned bitsize;
5964 unsigned mask;
5965
5966 HOST_WIDE_INT val;
5967 HOST_WIDE_INT splat_val;
5968 HOST_WIDE_INT msb_val;
5969
5970 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5971 return false;
5972
5973 nunits = GET_MODE_NUNITS (mode);
5974 bitsize = GET_MODE_BITSIZE (inner);
5975 mask = GET_MODE_MASK (inner);
5976
5977 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5978 splat_val = val;
5979 msb_val = val >= 0 ? 0 : -1;
5980
5981 /* Construct the value to be splatted, if possible. If not, return 0. */
5982 for (i = 2; i <= copies; i *= 2)
5983 {
5984 HOST_WIDE_INT small_val;
5985 bitsize /= 2;
5986 small_val = splat_val >> bitsize;
5987 mask >>= bitsize;
5988 if (splat_val != ((HOST_WIDE_INT)
5989 ((unsigned HOST_WIDE_INT) small_val << bitsize)
5990 | (small_val & mask)))
5991 return false;
5992 splat_val = small_val;
5993 }
5994
5995 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
5996 if (EASY_VECTOR_15 (splat_val))
5997 ;
5998
5999 /* Also check if we can splat, and then add the result to itself. Do so if
6000 the value is positive, of if the splat instruction is using OP's mode;
6001 for splat_val < 0, the splat and the add should use the same mode. */
6002 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6003 && (splat_val >= 0 || (step == 1 && copies == 1)))
6004 ;
6005
6006 /* Also check if are loading up the most significant bit which can be done by
6007 loading up -1 and shifting the value left by -1. */
6008 else if (EASY_VECTOR_MSB (splat_val, inner))
6009 ;
6010
6011 else
6012 return false;
6013
6014 /* Check if VAL is present in every STEP-th element, and the
6015 other elements are filled with its most significant bit. */
6016 for (i = 1; i < nunits; ++i)
6017 {
6018 HOST_WIDE_INT desired_val;
6019 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6020 if ((i & (step - 1)) == 0)
6021 desired_val = val;
6022 else
6023 desired_val = msb_val;
6024
6025 if (desired_val != const_vector_elt_as_int (op, elt))
6026 return false;
6027 }
6028
6029 return true;
6030 }
6031
6032 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6033 instruction, filling in the bottom elements with 0 or -1.
6034
6035 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6036 for the number of zeroes to shift in, or negative for the number of 0xff
6037 bytes to shift in.
6038
6039 OP is a CONST_VECTOR. */
6040
6041 int
6042 vspltis_shifted (rtx op)
6043 {
6044 machine_mode mode = GET_MODE (op);
6045 machine_mode inner = GET_MODE_INNER (mode);
6046
6047 unsigned i, j;
6048 unsigned nunits;
6049 unsigned mask;
6050
6051 HOST_WIDE_INT val;
6052
6053 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6054 return false;
6055
6056 /* We need to create pseudo registers to do the shift, so don't recognize
6057 shift vector constants after reload. */
6058 if (!can_create_pseudo_p ())
6059 return false;
6060
6061 nunits = GET_MODE_NUNITS (mode);
6062 mask = GET_MODE_MASK (inner);
6063
6064 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6065
6066 /* Check if the value can really be the operand of a vspltis[bhw]. */
6067 if (EASY_VECTOR_15 (val))
6068 ;
6069
6070 /* Also check if we are loading up the most significant bit which can be done
6071 by loading up -1 and shifting the value left by -1. */
6072 else if (EASY_VECTOR_MSB (val, inner))
6073 ;
6074
6075 else
6076 return 0;
6077
6078 /* Check if VAL is present in every STEP-th element until we find elements
6079 that are 0 or all 1 bits. */
6080 for (i = 1; i < nunits; ++i)
6081 {
6082 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6083 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6084
6085 /* If the value isn't the splat value, check for the remaining elements
6086 being 0/-1. */
6087 if (val != elt_val)
6088 {
6089 if (elt_val == 0)
6090 {
6091 for (j = i+1; j < nunits; ++j)
6092 {
6093 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6094 if (const_vector_elt_as_int (op, elt2) != 0)
6095 return 0;
6096 }
6097
6098 return (nunits - i) * GET_MODE_SIZE (inner);
6099 }
6100
6101 else if ((elt_val & mask) == mask)
6102 {
6103 for (j = i+1; j < nunits; ++j)
6104 {
6105 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6106 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6107 return 0;
6108 }
6109
6110 return -((nunits - i) * GET_MODE_SIZE (inner));
6111 }
6112
6113 else
6114 return 0;
6115 }
6116 }
6117
6118 /* If all elements are equal, we don't need to do VLSDOI. */
6119 return 0;
6120 }
6121
6122
6123 /* Return true if OP is of the given MODE and can be synthesized
6124 with a vspltisb, vspltish or vspltisw. */
6125
6126 bool
6127 easy_altivec_constant (rtx op, machine_mode mode)
6128 {
6129 unsigned step, copies;
6130
6131 if (mode == VOIDmode)
6132 mode = GET_MODE (op);
6133 else if (mode != GET_MODE (op))
6134 return false;
6135
6136 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6137 constants. */
6138 if (mode == V2DFmode)
6139 return zero_constant (op, mode);
6140
6141 else if (mode == V2DImode)
6142 {
6143 if (!CONST_INT_P (CONST_VECTOR_ELT (op, 0))
6144 || !CONST_INT_P (CONST_VECTOR_ELT (op, 1)))
6145 return false;
6146
6147 if (zero_constant (op, mode))
6148 return true;
6149
6150 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6151 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6152 return true;
6153
6154 return false;
6155 }
6156
6157 /* V1TImode is a special container for TImode. Ignore for now. */
6158 else if (mode == V1TImode)
6159 return false;
6160
6161 /* Start with a vspltisw. */
6162 step = GET_MODE_NUNITS (mode) / 4;
6163 copies = 1;
6164
6165 if (vspltis_constant (op, step, copies))
6166 return true;
6167
6168 /* Then try with a vspltish. */
6169 if (step == 1)
6170 copies <<= 1;
6171 else
6172 step >>= 1;
6173
6174 if (vspltis_constant (op, step, copies))
6175 return true;
6176
6177 /* And finally a vspltisb. */
6178 if (step == 1)
6179 copies <<= 1;
6180 else
6181 step >>= 1;
6182
6183 if (vspltis_constant (op, step, copies))
6184 return true;
6185
6186 if (vspltis_shifted (op) != 0)
6187 return true;
6188
6189 return false;
6190 }
6191
6192 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6193 result is OP. Abort if it is not possible. */
6194
6195 rtx
6196 gen_easy_altivec_constant (rtx op)
6197 {
6198 machine_mode mode = GET_MODE (op);
6199 int nunits = GET_MODE_NUNITS (mode);
6200 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6201 unsigned step = nunits / 4;
6202 unsigned copies = 1;
6203
6204 /* Start with a vspltisw. */
6205 if (vspltis_constant (op, step, copies))
6206 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6207
6208 /* Then try with a vspltish. */
6209 if (step == 1)
6210 copies <<= 1;
6211 else
6212 step >>= 1;
6213
6214 if (vspltis_constant (op, step, copies))
6215 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6216
6217 /* And finally a vspltisb. */
6218 if (step == 1)
6219 copies <<= 1;
6220 else
6221 step >>= 1;
6222
6223 if (vspltis_constant (op, step, copies))
6224 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6225
6226 gcc_unreachable ();
6227 }
6228
6229 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6230 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6231
6232 Return the number of instructions needed (1 or 2) into the address pointed
6233 via NUM_INSNS_PTR.
6234
6235 Return the constant that is being split via CONSTANT_PTR. */
6236
6237 bool
6238 xxspltib_constant_p (rtx op,
6239 machine_mode mode,
6240 int *num_insns_ptr,
6241 int *constant_ptr)
6242 {
6243 size_t nunits = GET_MODE_NUNITS (mode);
6244 size_t i;
6245 HOST_WIDE_INT value;
6246 rtx element;
6247
6248 /* Set the returned values to out of bound values. */
6249 *num_insns_ptr = -1;
6250 *constant_ptr = 256;
6251
6252 if (!TARGET_P9_VECTOR)
6253 return false;
6254
6255 if (mode == VOIDmode)
6256 mode = GET_MODE (op);
6257
6258 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6259 return false;
6260
6261 /* Handle (vec_duplicate <constant>). */
6262 if (GET_CODE (op) == VEC_DUPLICATE)
6263 {
6264 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6265 && mode != V2DImode)
6266 return false;
6267
6268 element = XEXP (op, 0);
6269 if (!CONST_INT_P (element))
6270 return false;
6271
6272 value = INTVAL (element);
6273 if (!IN_RANGE (value, -128, 127))
6274 return false;
6275 }
6276
6277 /* Handle (const_vector [...]). */
6278 else if (GET_CODE (op) == CONST_VECTOR)
6279 {
6280 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6281 && mode != V2DImode)
6282 return false;
6283
6284 element = CONST_VECTOR_ELT (op, 0);
6285 if (!CONST_INT_P (element))
6286 return false;
6287
6288 value = INTVAL (element);
6289 if (!IN_RANGE (value, -128, 127))
6290 return false;
6291
6292 for (i = 1; i < nunits; i++)
6293 {
6294 element = CONST_VECTOR_ELT (op, i);
6295 if (!CONST_INT_P (element))
6296 return false;
6297
6298 if (value != INTVAL (element))
6299 return false;
6300 }
6301 }
6302
6303 /* Handle integer constants being loaded into the upper part of the VSX
6304 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6305 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6306 else if (CONST_INT_P (op))
6307 {
6308 if (!SCALAR_INT_MODE_P (mode))
6309 return false;
6310
6311 value = INTVAL (op);
6312 if (!IN_RANGE (value, -128, 127))
6313 return false;
6314
6315 if (!IN_RANGE (value, -1, 0))
6316 {
6317 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6318 return false;
6319
6320 if (EASY_VECTOR_15 (value))
6321 return false;
6322 }
6323 }
6324
6325 else
6326 return false;
6327
6328 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6329 sign extend. Special case 0/-1 to allow getting any VSX register instead
6330 of an Altivec register. */
6331 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6332 && EASY_VECTOR_15 (value))
6333 return false;
6334
6335 /* Return # of instructions and the constant byte for XXSPLTIB. */
6336 if (mode == V16QImode)
6337 *num_insns_ptr = 1;
6338
6339 else if (IN_RANGE (value, -1, 0))
6340 *num_insns_ptr = 1;
6341
6342 else
6343 *num_insns_ptr = 2;
6344
6345 *constant_ptr = (int) value;
6346 return true;
6347 }
6348
6349 const char *
6350 output_vec_const_move (rtx *operands)
6351 {
6352 int shift;
6353 machine_mode mode;
6354 rtx dest, vec;
6355
6356 dest = operands[0];
6357 vec = operands[1];
6358 mode = GET_MODE (dest);
6359
6360 if (TARGET_VSX)
6361 {
6362 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6363 int xxspltib_value = 256;
6364 int num_insns = -1;
6365
6366 if (zero_constant (vec, mode))
6367 {
6368 if (TARGET_P9_VECTOR)
6369 return "xxspltib %x0,0";
6370
6371 else if (dest_vmx_p)
6372 return "vspltisw %0,0";
6373
6374 else
6375 return "xxlxor %x0,%x0,%x0";
6376 }
6377
6378 if (all_ones_constant (vec, mode))
6379 {
6380 if (TARGET_P9_VECTOR)
6381 return "xxspltib %x0,255";
6382
6383 else if (dest_vmx_p)
6384 return "vspltisw %0,-1";
6385
6386 else if (TARGET_P8_VECTOR)
6387 return "xxlorc %x0,%x0,%x0";
6388
6389 else
6390 gcc_unreachable ();
6391 }
6392
6393 if (TARGET_P9_VECTOR
6394 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6395 {
6396 if (num_insns == 1)
6397 {
6398 operands[2] = GEN_INT (xxspltib_value & 0xff);
6399 return "xxspltib %x0,%2";
6400 }
6401
6402 return "#";
6403 }
6404 }
6405
6406 if (TARGET_ALTIVEC)
6407 {
6408 rtx splat_vec;
6409
6410 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6411 if (zero_constant (vec, mode))
6412 return "vspltisw %0,0";
6413
6414 if (all_ones_constant (vec, mode))
6415 return "vspltisw %0,-1";
6416
6417 /* Do we need to construct a value using VSLDOI? */
6418 shift = vspltis_shifted (vec);
6419 if (shift != 0)
6420 return "#";
6421
6422 splat_vec = gen_easy_altivec_constant (vec);
6423 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6424 operands[1] = XEXP (splat_vec, 0);
6425 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6426 return "#";
6427
6428 switch (GET_MODE (splat_vec))
6429 {
6430 case E_V4SImode:
6431 return "vspltisw %0,%1";
6432
6433 case E_V8HImode:
6434 return "vspltish %0,%1";
6435
6436 case E_V16QImode:
6437 return "vspltisb %0,%1";
6438
6439 default:
6440 gcc_unreachable ();
6441 }
6442 }
6443
6444 gcc_unreachable ();
6445 }
6446
6447 /* Initialize vector TARGET to VALS. */
6448
6449 void
6450 rs6000_expand_vector_init (rtx target, rtx vals)
6451 {
6452 machine_mode mode = GET_MODE (target);
6453 machine_mode inner_mode = GET_MODE_INNER (mode);
6454 int n_elts = GET_MODE_NUNITS (mode);
6455 int n_var = 0, one_var = -1;
6456 bool all_same = true, all_const_zero = true;
6457 rtx x, mem;
6458 int i;
6459
6460 for (i = 0; i < n_elts; ++i)
6461 {
6462 x = XVECEXP (vals, 0, i);
6463 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6464 ++n_var, one_var = i;
6465 else if (x != CONST0_RTX (inner_mode))
6466 all_const_zero = false;
6467
6468 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6469 all_same = false;
6470 }
6471
6472 if (n_var == 0)
6473 {
6474 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6475 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6476 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6477 {
6478 /* Zero register. */
6479 emit_move_insn (target, CONST0_RTX (mode));
6480 return;
6481 }
6482 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6483 {
6484 /* Splat immediate. */
6485 emit_insn (gen_rtx_SET (target, const_vec));
6486 return;
6487 }
6488 else
6489 {
6490 /* Load from constant pool. */
6491 emit_move_insn (target, const_vec);
6492 return;
6493 }
6494 }
6495
6496 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6497 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6498 {
6499 rtx op[2];
6500 size_t i;
6501 size_t num_elements = all_same ? 1 : 2;
6502 for (i = 0; i < num_elements; i++)
6503 {
6504 op[i] = XVECEXP (vals, 0, i);
6505 /* Just in case there is a SUBREG with a smaller mode, do a
6506 conversion. */
6507 if (GET_MODE (op[i]) != inner_mode)
6508 {
6509 rtx tmp = gen_reg_rtx (inner_mode);
6510 convert_move (tmp, op[i], 0);
6511 op[i] = tmp;
6512 }
6513 /* Allow load with splat double word. */
6514 else if (MEM_P (op[i]))
6515 {
6516 if (!all_same)
6517 op[i] = force_reg (inner_mode, op[i]);
6518 }
6519 else if (!REG_P (op[i]))
6520 op[i] = force_reg (inner_mode, op[i]);
6521 }
6522
6523 if (all_same)
6524 {
6525 if (mode == V2DFmode)
6526 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6527 else
6528 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6529 }
6530 else
6531 {
6532 if (mode == V2DFmode)
6533 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6534 else
6535 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6536 }
6537 return;
6538 }
6539
6540 /* Special case initializing vector int if we are on 64-bit systems with
6541 direct move or we have the ISA 3.0 instructions. */
6542 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6543 && TARGET_DIRECT_MOVE_64BIT)
6544 {
6545 if (all_same)
6546 {
6547 rtx element0 = XVECEXP (vals, 0, 0);
6548 if (MEM_P (element0))
6549 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6550 else
6551 element0 = force_reg (SImode, element0);
6552
6553 if (TARGET_P9_VECTOR)
6554 emit_insn (gen_vsx_splat_v4si (target, element0));
6555 else
6556 {
6557 rtx tmp = gen_reg_rtx (DImode);
6558 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6559 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6560 }
6561 return;
6562 }
6563 else
6564 {
6565 rtx elements[4];
6566 size_t i;
6567
6568 for (i = 0; i < 4; i++)
6569 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6570
6571 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6572 elements[2], elements[3]));
6573 return;
6574 }
6575 }
6576
6577 /* With single precision floating point on VSX, know that internally single
6578 precision is actually represented as a double, and either make 2 V2DF
6579 vectors, and convert these vectors to single precision, or do one
6580 conversion, and splat the result to the other elements. */
6581 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6582 {
6583 if (all_same)
6584 {
6585 rtx element0 = XVECEXP (vals, 0, 0);
6586
6587 if (TARGET_P9_VECTOR)
6588 {
6589 if (MEM_P (element0))
6590 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6591
6592 emit_insn (gen_vsx_splat_v4sf (target, element0));
6593 }
6594
6595 else
6596 {
6597 rtx freg = gen_reg_rtx (V4SFmode);
6598 rtx sreg = force_reg (SFmode, element0);
6599 rtx cvt = (TARGET_XSCVDPSPN
6600 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6601 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6602
6603 emit_insn (cvt);
6604 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6605 const0_rtx));
6606 }
6607 }
6608 else
6609 {
6610 rtx dbl_even = gen_reg_rtx (V2DFmode);
6611 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6612 rtx flt_even = gen_reg_rtx (V4SFmode);
6613 rtx flt_odd = gen_reg_rtx (V4SFmode);
6614 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6615 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6616 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6617 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6618
6619 /* Use VMRGEW if we can instead of doing a permute. */
6620 if (TARGET_P8_VECTOR)
6621 {
6622 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6623 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6624 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6625 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6626 if (BYTES_BIG_ENDIAN)
6627 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6628 else
6629 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6630 }
6631 else
6632 {
6633 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6634 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6635 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6636 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6637 rs6000_expand_extract_even (target, flt_even, flt_odd);
6638 }
6639 }
6640 return;
6641 }
6642
6643 /* Special case initializing vector short/char that are splats if we are on
6644 64-bit systems with direct move. */
6645 if (all_same && TARGET_DIRECT_MOVE_64BIT
6646 && (mode == V16QImode || mode == V8HImode))
6647 {
6648 rtx op0 = XVECEXP (vals, 0, 0);
6649 rtx di_tmp = gen_reg_rtx (DImode);
6650
6651 if (!REG_P (op0))
6652 op0 = force_reg (GET_MODE_INNER (mode), op0);
6653
6654 if (mode == V16QImode)
6655 {
6656 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6657 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6658 return;
6659 }
6660
6661 if (mode == V8HImode)
6662 {
6663 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6664 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6665 return;
6666 }
6667 }
6668
6669 /* Store value to stack temp. Load vector element. Splat. However, splat
6670 of 64-bit items is not supported on Altivec. */
6671 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6672 {
6673 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6674 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6675 XVECEXP (vals, 0, 0));
6676 x = gen_rtx_UNSPEC (VOIDmode,
6677 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6678 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6679 gen_rtvec (2,
6680 gen_rtx_SET (target, mem),
6681 x)));
6682 x = gen_rtx_VEC_SELECT (inner_mode, target,
6683 gen_rtx_PARALLEL (VOIDmode,
6684 gen_rtvec (1, const0_rtx)));
6685 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6686 return;
6687 }
6688
6689 /* One field is non-constant. Load constant then overwrite
6690 varying field. */
6691 if (n_var == 1)
6692 {
6693 rtx copy = copy_rtx (vals);
6694
6695 /* Load constant part of vector, substitute neighboring value for
6696 varying element. */
6697 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6698 rs6000_expand_vector_init (target, copy);
6699
6700 /* Insert variable. */
6701 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6702 return;
6703 }
6704
6705 /* Construct the vector in memory one field at a time
6706 and load the whole vector. */
6707 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6708 for (i = 0; i < n_elts; i++)
6709 emit_move_insn (adjust_address_nv (mem, inner_mode,
6710 i * GET_MODE_SIZE (inner_mode)),
6711 XVECEXP (vals, 0, i));
6712 emit_move_insn (target, mem);
6713 }
6714
6715 /* Set field ELT of TARGET to VAL. */
6716
6717 void
6718 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6719 {
6720 machine_mode mode = GET_MODE (target);
6721 machine_mode inner_mode = GET_MODE_INNER (mode);
6722 rtx reg = gen_reg_rtx (mode);
6723 rtx mask, mem, x;
6724 int width = GET_MODE_SIZE (inner_mode);
6725 int i;
6726
6727 val = force_reg (GET_MODE (val), val);
6728
6729 if (VECTOR_MEM_VSX_P (mode))
6730 {
6731 rtx insn = NULL_RTX;
6732 rtx elt_rtx = GEN_INT (elt);
6733
6734 if (mode == V2DFmode)
6735 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6736
6737 else if (mode == V2DImode)
6738 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6739
6740 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6741 {
6742 if (mode == V4SImode)
6743 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6744 else if (mode == V8HImode)
6745 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6746 else if (mode == V16QImode)
6747 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6748 else if (mode == V4SFmode)
6749 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6750 }
6751
6752 if (insn)
6753 {
6754 emit_insn (insn);
6755 return;
6756 }
6757 }
6758
6759 /* Simplify setting single element vectors like V1TImode. */
6760 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6761 {
6762 emit_move_insn (target, gen_lowpart (mode, val));
6763 return;
6764 }
6765
6766 /* Load single variable value. */
6767 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6768 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6769 x = gen_rtx_UNSPEC (VOIDmode,
6770 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6771 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6772 gen_rtvec (2,
6773 gen_rtx_SET (reg, mem),
6774 x)));
6775
6776 /* Linear sequence. */
6777 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6778 for (i = 0; i < 16; ++i)
6779 XVECEXP (mask, 0, i) = GEN_INT (i);
6780
6781 /* Set permute mask to insert element into target. */
6782 for (i = 0; i < width; ++i)
6783 XVECEXP (mask, 0, elt*width + i)
6784 = GEN_INT (i + 0x10);
6785 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6786
6787 if (BYTES_BIG_ENDIAN)
6788 x = gen_rtx_UNSPEC (mode,
6789 gen_rtvec (3, target, reg,
6790 force_reg (V16QImode, x)),
6791 UNSPEC_VPERM);
6792 else
6793 {
6794 if (TARGET_P9_VECTOR)
6795 x = gen_rtx_UNSPEC (mode,
6796 gen_rtvec (3, reg, target,
6797 force_reg (V16QImode, x)),
6798 UNSPEC_VPERMR);
6799 else
6800 {
6801 /* Invert selector. We prefer to generate VNAND on P8 so
6802 that future fusion opportunities can kick in, but must
6803 generate VNOR elsewhere. */
6804 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6805 rtx iorx = (TARGET_P8_VECTOR
6806 ? gen_rtx_IOR (V16QImode, notx, notx)
6807 : gen_rtx_AND (V16QImode, notx, notx));
6808 rtx tmp = gen_reg_rtx (V16QImode);
6809 emit_insn (gen_rtx_SET (tmp, iorx));
6810
6811 /* Permute with operands reversed and adjusted selector. */
6812 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6813 UNSPEC_VPERM);
6814 }
6815 }
6816
6817 emit_insn (gen_rtx_SET (target, x));
6818 }
6819
6820 /* Extract field ELT from VEC into TARGET. */
6821
6822 void
6823 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6824 {
6825 machine_mode mode = GET_MODE (vec);
6826 machine_mode inner_mode = GET_MODE_INNER (mode);
6827 rtx mem;
6828
6829 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6830 {
6831 switch (mode)
6832 {
6833 default:
6834 break;
6835 case E_V1TImode:
6836 emit_move_insn (target, gen_lowpart (TImode, vec));
6837 break;
6838 case E_V2DFmode:
6839 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6840 return;
6841 case E_V2DImode:
6842 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6843 return;
6844 case E_V4SFmode:
6845 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6846 return;
6847 case E_V16QImode:
6848 if (TARGET_DIRECT_MOVE_64BIT)
6849 {
6850 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6851 return;
6852 }
6853 else
6854 break;
6855 case E_V8HImode:
6856 if (TARGET_DIRECT_MOVE_64BIT)
6857 {
6858 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6859 return;
6860 }
6861 else
6862 break;
6863 case E_V4SImode:
6864 if (TARGET_DIRECT_MOVE_64BIT)
6865 {
6866 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6867 return;
6868 }
6869 break;
6870 }
6871 }
6872 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6873 && TARGET_DIRECT_MOVE_64BIT)
6874 {
6875 if (GET_MODE (elt) != DImode)
6876 {
6877 rtx tmp = gen_reg_rtx (DImode);
6878 convert_move (tmp, elt, 0);
6879 elt = tmp;
6880 }
6881 else if (!REG_P (elt))
6882 elt = force_reg (DImode, elt);
6883
6884 switch (mode)
6885 {
6886 case E_V1TImode:
6887 emit_move_insn (target, gen_lowpart (TImode, vec));
6888 return;
6889
6890 case E_V2DFmode:
6891 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6892 return;
6893
6894 case E_V2DImode:
6895 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6896 return;
6897
6898 case E_V4SFmode:
6899 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6900 return;
6901
6902 case E_V4SImode:
6903 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6904 return;
6905
6906 case E_V8HImode:
6907 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6908 return;
6909
6910 case E_V16QImode:
6911 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6912 return;
6913
6914 default:
6915 gcc_unreachable ();
6916 }
6917 }
6918
6919 /* Allocate mode-sized buffer. */
6920 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6921
6922 emit_move_insn (mem, vec);
6923 if (CONST_INT_P (elt))
6924 {
6925 int modulo_elt = INTVAL (elt) % GET_MODE_NUNITS (mode);
6926
6927 /* Add offset to field within buffer matching vector element. */
6928 mem = adjust_address_nv (mem, inner_mode,
6929 modulo_elt * GET_MODE_SIZE (inner_mode));
6930 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6931 }
6932 else
6933 {
6934 unsigned int ele_size = GET_MODE_SIZE (inner_mode);
6935 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
6936 rtx new_addr = gen_reg_rtx (Pmode);
6937
6938 elt = gen_rtx_AND (Pmode, elt, num_ele_m1);
6939 if (ele_size > 1)
6940 elt = gen_rtx_MULT (Pmode, elt, GEN_INT (ele_size));
6941 new_addr = gen_rtx_PLUS (Pmode, XEXP (mem, 0), elt);
6942 new_addr = change_address (mem, inner_mode, new_addr);
6943 emit_move_insn (target, new_addr);
6944 }
6945 }
6946
6947 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6948 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6949 temporary (BASE_TMP) to fixup the address. Return the new memory address
6950 that is valid for reads or writes to a given register (SCALAR_REG). */
6951
6952 rtx
6953 rs6000_adjust_vec_address (rtx scalar_reg,
6954 rtx mem,
6955 rtx element,
6956 rtx base_tmp,
6957 machine_mode scalar_mode)
6958 {
6959 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
6960 rtx addr = XEXP (mem, 0);
6961 rtx element_offset;
6962 rtx new_addr;
6963 bool valid_addr_p;
6964
6965 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
6966 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
6967
6968 /* Calculate what we need to add to the address to get the element
6969 address. */
6970 if (CONST_INT_P (element))
6971 element_offset = GEN_INT (INTVAL (element) * scalar_size);
6972 else
6973 {
6974 int byte_shift = exact_log2 (scalar_size);
6975 gcc_assert (byte_shift >= 0);
6976
6977 if (byte_shift == 0)
6978 element_offset = element;
6979
6980 else
6981 {
6982 if (TARGET_POWERPC64)
6983 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
6984 else
6985 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
6986
6987 element_offset = base_tmp;
6988 }
6989 }
6990
6991 /* Create the new address pointing to the element within the vector. If we
6992 are adding 0, we don't have to change the address. */
6993 if (element_offset == const0_rtx)
6994 new_addr = addr;
6995
6996 /* A simple indirect address can be converted into a reg + offset
6997 address. */
6998 else if (REG_P (addr) || SUBREG_P (addr))
6999 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7000
7001 /* Optimize D-FORM addresses with constant offset with a constant element, to
7002 include the element offset in the address directly. */
7003 else if (GET_CODE (addr) == PLUS)
7004 {
7005 rtx op0 = XEXP (addr, 0);
7006 rtx op1 = XEXP (addr, 1);
7007 rtx insn;
7008
7009 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7010 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7011 {
7012 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7013 rtx offset_rtx = GEN_INT (offset);
7014
7015 if (IN_RANGE (offset, -32768, 32767)
7016 && (scalar_size < 8 || (offset & 0x3) == 0))
7017 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7018 else
7019 {
7020 emit_move_insn (base_tmp, offset_rtx);
7021 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7022 }
7023 }
7024 else
7025 {
7026 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7027 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7028
7029 /* Note, ADDI requires the register being added to be a base
7030 register. If the register was R0, load it up into the temporary
7031 and do the add. */
7032 if (op1_reg_p
7033 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7034 {
7035 insn = gen_add3_insn (base_tmp, op1, element_offset);
7036 gcc_assert (insn != NULL_RTX);
7037 emit_insn (insn);
7038 }
7039
7040 else if (ele_reg_p
7041 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7042 {
7043 insn = gen_add3_insn (base_tmp, element_offset, op1);
7044 gcc_assert (insn != NULL_RTX);
7045 emit_insn (insn);
7046 }
7047
7048 else
7049 {
7050 emit_move_insn (base_tmp, op1);
7051 emit_insn (gen_add2_insn (base_tmp, element_offset));
7052 }
7053
7054 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7055 }
7056 }
7057
7058 else
7059 {
7060 emit_move_insn (base_tmp, addr);
7061 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7062 }
7063
7064 /* If we have a PLUS, we need to see whether the particular register class
7065 allows for D-FORM or X-FORM addressing. */
7066 if (GET_CODE (new_addr) == PLUS)
7067 {
7068 rtx op1 = XEXP (new_addr, 1);
7069 addr_mask_type addr_mask;
7070 unsigned int scalar_regno = reg_or_subregno (scalar_reg);
7071
7072 gcc_assert (HARD_REGISTER_NUM_P (scalar_regno));
7073 if (INT_REGNO_P (scalar_regno))
7074 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7075
7076 else if (FP_REGNO_P (scalar_regno))
7077 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7078
7079 else if (ALTIVEC_REGNO_P (scalar_regno))
7080 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7081
7082 else
7083 gcc_unreachable ();
7084
7085 if (REG_P (op1) || SUBREG_P (op1))
7086 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7087 else
7088 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7089 }
7090
7091 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7092 valid_addr_p = true;
7093
7094 else
7095 valid_addr_p = false;
7096
7097 if (!valid_addr_p)
7098 {
7099 emit_move_insn (base_tmp, new_addr);
7100 new_addr = base_tmp;
7101 }
7102
7103 return change_address (mem, scalar_mode, new_addr);
7104 }
7105
7106 /* Split a variable vec_extract operation into the component instructions. */
7107
7108 void
7109 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7110 rtx tmp_altivec)
7111 {
7112 machine_mode mode = GET_MODE (src);
7113 machine_mode scalar_mode = GET_MODE_INNER (GET_MODE (src));
7114 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7115 int byte_shift = exact_log2 (scalar_size);
7116
7117 gcc_assert (byte_shift >= 0);
7118
7119 /* If we are given a memory address, optimize to load just the element. We
7120 don't have to adjust the vector element number on little endian
7121 systems. */
7122 if (MEM_P (src))
7123 {
7124 int num_elements = GET_MODE_NUNITS (mode);
7125 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7126
7127 emit_insn (gen_anddi3 (element, element, num_ele_m1));
7128 gcc_assert (REG_P (tmp_gpr));
7129 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7130 tmp_gpr, scalar_mode));
7131 return;
7132 }
7133
7134 else if (REG_P (src) || SUBREG_P (src))
7135 {
7136 int num_elements = GET_MODE_NUNITS (mode);
7137 int bits_in_element = mode_to_bits (GET_MODE_INNER (mode));
7138 int bit_shift = 7 - exact_log2 (num_elements);
7139 rtx element2;
7140 unsigned int dest_regno = reg_or_subregno (dest);
7141 unsigned int src_regno = reg_or_subregno (src);
7142 unsigned int element_regno = reg_or_subregno (element);
7143
7144 gcc_assert (REG_P (tmp_gpr));
7145
7146 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7147 a general purpose register. */
7148 if (TARGET_P9_VECTOR
7149 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7150 && INT_REGNO_P (dest_regno)
7151 && ALTIVEC_REGNO_P (src_regno)
7152 && INT_REGNO_P (element_regno))
7153 {
7154 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7155 rtx element_si = gen_rtx_REG (SImode, element_regno);
7156
7157 if (mode == V16QImode)
7158 emit_insn (BYTES_BIG_ENDIAN
7159 ? gen_vextublx (dest_si, element_si, src)
7160 : gen_vextubrx (dest_si, element_si, src));
7161
7162 else if (mode == V8HImode)
7163 {
7164 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7165 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7166 emit_insn (BYTES_BIG_ENDIAN
7167 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7168 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7169 }
7170
7171
7172 else
7173 {
7174 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7175 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7176 emit_insn (BYTES_BIG_ENDIAN
7177 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7178 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7179 }
7180
7181 return;
7182 }
7183
7184
7185 gcc_assert (REG_P (tmp_altivec));
7186
7187 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7188 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7189 will shift the element into the upper position (adding 3 to convert a
7190 byte shift into a bit shift). */
7191 if (scalar_size == 8)
7192 {
7193 if (!BYTES_BIG_ENDIAN)
7194 {
7195 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7196 element2 = tmp_gpr;
7197 }
7198 else
7199 element2 = element;
7200
7201 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7202 bit. */
7203 emit_insn (gen_rtx_SET (tmp_gpr,
7204 gen_rtx_AND (DImode,
7205 gen_rtx_ASHIFT (DImode,
7206 element2,
7207 GEN_INT (6)),
7208 GEN_INT (64))));
7209 }
7210 else
7211 {
7212 if (!BYTES_BIG_ENDIAN)
7213 {
7214 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7215
7216 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7217 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7218 element2 = tmp_gpr;
7219 }
7220 else
7221 element2 = element;
7222
7223 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7224 }
7225
7226 /* Get the value into the lower byte of the Altivec register where VSLO
7227 expects it. */
7228 if (TARGET_P9_VECTOR)
7229 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7230 else if (can_create_pseudo_p ())
7231 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7232 else
7233 {
7234 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7235 emit_move_insn (tmp_di, tmp_gpr);
7236 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7237 }
7238
7239 /* Do the VSLO to get the value into the final location. */
7240 switch (mode)
7241 {
7242 case E_V2DFmode:
7243 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7244 return;
7245
7246 case E_V2DImode:
7247 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7248 return;
7249
7250 case E_V4SFmode:
7251 {
7252 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7253 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7254 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7255 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7256 tmp_altivec));
7257
7258 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7259 return;
7260 }
7261
7262 case E_V4SImode:
7263 case E_V8HImode:
7264 case E_V16QImode:
7265 {
7266 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7267 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7268 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7269 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7270 tmp_altivec));
7271 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7272 emit_insn (gen_lshrdi3 (tmp_gpr_di, tmp_gpr_di,
7273 GEN_INT (64 - bits_in_element)));
7274 return;
7275 }
7276
7277 default:
7278 gcc_unreachable ();
7279 }
7280
7281 return;
7282 }
7283 else
7284 gcc_unreachable ();
7285 }
7286
7287 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7288 selects whether the alignment is abi mandated, optional, or
7289 both abi and optional alignment. */
7290
7291 unsigned int
7292 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7293 {
7294 if (how != align_opt)
7295 {
7296 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7297 align = 128;
7298 }
7299
7300 if (how != align_abi)
7301 {
7302 if (TREE_CODE (type) == ARRAY_TYPE
7303 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7304 {
7305 if (align < BITS_PER_WORD)
7306 align = BITS_PER_WORD;
7307 }
7308 }
7309
7310 return align;
7311 }
7312
7313 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7314 instructions simply ignore the low bits; VSX memory instructions
7315 are aligned to 4 or 8 bytes. */
7316
7317 static bool
7318 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7319 {
7320 return (STRICT_ALIGNMENT
7321 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7322 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7323 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7324 && (int) align < VECTOR_ALIGN (mode)))));
7325 }
7326
7327 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7328
7329 bool
7330 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7331 {
7332 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7333 {
7334 if (computed != 128)
7335 {
7336 static bool warned;
7337 if (!warned && warn_psabi)
7338 {
7339 warned = true;
7340 inform (input_location,
7341 "the layout of aggregates containing vectors with"
7342 " %d-byte alignment has changed in GCC 5",
7343 computed / BITS_PER_UNIT);
7344 }
7345 }
7346 /* In current GCC there is no special case. */
7347 return false;
7348 }
7349
7350 return false;
7351 }
7352
7353 /* AIX increases natural record alignment to doubleword if the first
7354 field is an FP double while the FP fields remain word aligned. */
7355
7356 unsigned int
7357 rs6000_special_round_type_align (tree type, unsigned int computed,
7358 unsigned int specified)
7359 {
7360 unsigned int align = MAX (computed, specified);
7361 tree field = TYPE_FIELDS (type);
7362
7363 /* Skip all non field decls */
7364 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7365 field = DECL_CHAIN (field);
7366
7367 if (field != NULL && field != type)
7368 {
7369 type = TREE_TYPE (field);
7370 while (TREE_CODE (type) == ARRAY_TYPE)
7371 type = TREE_TYPE (type);
7372
7373 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7374 align = MAX (align, 64);
7375 }
7376
7377 return align;
7378 }
7379
7380 /* Darwin increases record alignment to the natural alignment of
7381 the first field. */
7382
7383 unsigned int
7384 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7385 unsigned int specified)
7386 {
7387 unsigned int align = MAX (computed, specified);
7388
7389 if (TYPE_PACKED (type))
7390 return align;
7391
7392 /* Find the first field, looking down into aggregates. */
7393 do {
7394 tree field = TYPE_FIELDS (type);
7395 /* Skip all non field decls */
7396 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7397 field = DECL_CHAIN (field);
7398 if (! field)
7399 break;
7400 /* A packed field does not contribute any extra alignment. */
7401 if (DECL_PACKED (field))
7402 return align;
7403 type = TREE_TYPE (field);
7404 while (TREE_CODE (type) == ARRAY_TYPE)
7405 type = TREE_TYPE (type);
7406 } while (AGGREGATE_TYPE_P (type));
7407
7408 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7409 align = MAX (align, TYPE_ALIGN (type));
7410
7411 return align;
7412 }
7413
7414 /* Return 1 for an operand in small memory on V.4/eabi. */
7415
7416 int
7417 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7418 machine_mode mode ATTRIBUTE_UNUSED)
7419 {
7420 #if TARGET_ELF
7421 rtx sym_ref;
7422
7423 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7424 return 0;
7425
7426 if (DEFAULT_ABI != ABI_V4)
7427 return 0;
7428
7429 if (SYMBOL_REF_P (op))
7430 sym_ref = op;
7431
7432 else if (GET_CODE (op) != CONST
7433 || GET_CODE (XEXP (op, 0)) != PLUS
7434 || !SYMBOL_REF_P (XEXP (XEXP (op, 0), 0))
7435 || !CONST_INT_P (XEXP (XEXP (op, 0), 1)))
7436 return 0;
7437
7438 else
7439 {
7440 rtx sum = XEXP (op, 0);
7441 HOST_WIDE_INT summand;
7442
7443 /* We have to be careful here, because it is the referenced address
7444 that must be 32k from _SDA_BASE_, not just the symbol. */
7445 summand = INTVAL (XEXP (sum, 1));
7446 if (summand < 0 || summand > g_switch_value)
7447 return 0;
7448
7449 sym_ref = XEXP (sum, 0);
7450 }
7451
7452 return SYMBOL_REF_SMALL_P (sym_ref);
7453 #else
7454 return 0;
7455 #endif
7456 }
7457
7458 /* Return true if either operand is a general purpose register. */
7459
7460 bool
7461 gpr_or_gpr_p (rtx op0, rtx op1)
7462 {
7463 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7464 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7465 }
7466
7467 /* Return true if this is a move direct operation between GPR registers and
7468 floating point/VSX registers. */
7469
7470 bool
7471 direct_move_p (rtx op0, rtx op1)
7472 {
7473 int regno0, regno1;
7474
7475 if (!REG_P (op0) || !REG_P (op1))
7476 return false;
7477
7478 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7479 return false;
7480
7481 regno0 = REGNO (op0);
7482 regno1 = REGNO (op1);
7483 if (!HARD_REGISTER_NUM_P (regno0) || !HARD_REGISTER_NUM_P (regno1))
7484 return false;
7485
7486 if (INT_REGNO_P (regno0))
7487 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7488
7489 else if (INT_REGNO_P (regno1))
7490 {
7491 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7492 return true;
7493
7494 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7495 return true;
7496 }
7497
7498 return false;
7499 }
7500
7501 /* Return true if the OFFSET is valid for the quad address instructions that
7502 use d-form (register + offset) addressing. */
7503
7504 static inline bool
7505 quad_address_offset_p (HOST_WIDE_INT offset)
7506 {
7507 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7508 }
7509
7510 /* Return true if the ADDR is an acceptable address for a quad memory
7511 operation of mode MODE (either LQ/STQ for general purpose registers, or
7512 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7513 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7514 3.0 LXV/STXV instruction. */
7515
7516 bool
7517 quad_address_p (rtx addr, machine_mode mode, bool strict)
7518 {
7519 rtx op0, op1;
7520
7521 if (GET_MODE_SIZE (mode) != 16)
7522 return false;
7523
7524 if (legitimate_indirect_address_p (addr, strict))
7525 return true;
7526
7527 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7528 return false;
7529
7530 if (GET_CODE (addr) != PLUS)
7531 return false;
7532
7533 op0 = XEXP (addr, 0);
7534 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7535 return false;
7536
7537 op1 = XEXP (addr, 1);
7538 if (!CONST_INT_P (op1))
7539 return false;
7540
7541 return quad_address_offset_p (INTVAL (op1));
7542 }
7543
7544 /* Return true if this is a load or store quad operation. This function does
7545 not handle the atomic quad memory instructions. */
7546
7547 bool
7548 quad_load_store_p (rtx op0, rtx op1)
7549 {
7550 bool ret;
7551
7552 if (!TARGET_QUAD_MEMORY)
7553 ret = false;
7554
7555 else if (REG_P (op0) && MEM_P (op1))
7556 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7557 && quad_memory_operand (op1, GET_MODE (op1))
7558 && !reg_overlap_mentioned_p (op0, op1));
7559
7560 else if (MEM_P (op0) && REG_P (op1))
7561 ret = (quad_memory_operand (op0, GET_MODE (op0))
7562 && quad_int_reg_operand (op1, GET_MODE (op1)));
7563
7564 else
7565 ret = false;
7566
7567 if (TARGET_DEBUG_ADDR)
7568 {
7569 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7570 ret ? "true" : "false");
7571 debug_rtx (gen_rtx_SET (op0, op1));
7572 }
7573
7574 return ret;
7575 }
7576
7577 /* Given an address, return a constant offset term if one exists. */
7578
7579 static rtx
7580 address_offset (rtx op)
7581 {
7582 if (GET_CODE (op) == PRE_INC
7583 || GET_CODE (op) == PRE_DEC)
7584 op = XEXP (op, 0);
7585 else if (GET_CODE (op) == PRE_MODIFY
7586 || GET_CODE (op) == LO_SUM)
7587 op = XEXP (op, 1);
7588
7589 if (GET_CODE (op) == CONST)
7590 op = XEXP (op, 0);
7591
7592 if (GET_CODE (op) == PLUS)
7593 op = XEXP (op, 1);
7594
7595 if (CONST_INT_P (op))
7596 return op;
7597
7598 return NULL_RTX;
7599 }
7600
7601 /* Return true if the MEM operand is a memory operand suitable for use
7602 with a (full width, possibly multiple) gpr load/store. On
7603 powerpc64 this means the offset must be divisible by 4.
7604 Implements 'Y' constraint.
7605
7606 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7607 a constraint function we know the operand has satisfied a suitable
7608 memory predicate.
7609
7610 Offsetting a lo_sum should not be allowed, except where we know by
7611 alignment that a 32k boundary is not crossed. Note that by
7612 "offsetting" here we mean a further offset to access parts of the
7613 MEM. It's fine to have a lo_sum where the inner address is offset
7614 from a sym, since the same sym+offset will appear in the high part
7615 of the address calculation. */
7616
7617 bool
7618 mem_operand_gpr (rtx op, machine_mode mode)
7619 {
7620 unsigned HOST_WIDE_INT offset;
7621 int extra;
7622 rtx addr = XEXP (op, 0);
7623
7624 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7625 if (TARGET_UPDATE
7626 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7627 && mode_supports_pre_incdec_p (mode)
7628 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7629 return true;
7630
7631 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7632 if (!rs6000_offsettable_memref_p (op, mode, false))
7633 return false;
7634
7635 op = address_offset (addr);
7636 if (op == NULL_RTX)
7637 return true;
7638
7639 offset = INTVAL (op);
7640 if (TARGET_POWERPC64 && (offset & 3) != 0)
7641 return false;
7642
7643 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7644 if (extra < 0)
7645 extra = 0;
7646
7647 if (GET_CODE (addr) == LO_SUM)
7648 /* For lo_sum addresses, we must allow any offset except one that
7649 causes a wrap, so test only the low 16 bits. */
7650 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7651
7652 return offset + 0x8000 < 0x10000u - extra;
7653 }
7654
7655 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7656 enforce an offset divisible by 4 even for 32-bit. */
7657
7658 bool
7659 mem_operand_ds_form (rtx op, machine_mode mode)
7660 {
7661 unsigned HOST_WIDE_INT offset;
7662 int extra;
7663 rtx addr = XEXP (op, 0);
7664
7665 if (!offsettable_address_p (false, mode, addr))
7666 return false;
7667
7668 op = address_offset (addr);
7669 if (op == NULL_RTX)
7670 return true;
7671
7672 offset = INTVAL (op);
7673 if ((offset & 3) != 0)
7674 return false;
7675
7676 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7677 if (extra < 0)
7678 extra = 0;
7679
7680 if (GET_CODE (addr) == LO_SUM)
7681 /* For lo_sum addresses, we must allow any offset except one that
7682 causes a wrap, so test only the low 16 bits. */
7683 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7684
7685 return offset + 0x8000 < 0x10000u - extra;
7686 }
7687 \f
7688 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7689
7690 static bool
7691 reg_offset_addressing_ok_p (machine_mode mode)
7692 {
7693 switch (mode)
7694 {
7695 case E_V16QImode:
7696 case E_V8HImode:
7697 case E_V4SFmode:
7698 case E_V4SImode:
7699 case E_V2DFmode:
7700 case E_V2DImode:
7701 case E_V1TImode:
7702 case E_TImode:
7703 case E_TFmode:
7704 case E_KFmode:
7705 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7706 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7707 a vector mode, if we want to use the VSX registers to move it around,
7708 we need to restrict ourselves to reg+reg addressing. Similarly for
7709 IEEE 128-bit floating point that is passed in a single vector
7710 register. */
7711 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7712 return mode_supports_dq_form (mode);
7713 break;
7714
7715 case E_SDmode:
7716 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7717 addressing for the LFIWZX and STFIWX instructions. */
7718 if (TARGET_NO_SDMODE_STACK)
7719 return false;
7720 break;
7721
7722 default:
7723 break;
7724 }
7725
7726 return true;
7727 }
7728
7729 static bool
7730 virtual_stack_registers_memory_p (rtx op)
7731 {
7732 int regnum;
7733
7734 if (REG_P (op))
7735 regnum = REGNO (op);
7736
7737 else if (GET_CODE (op) == PLUS
7738 && REG_P (XEXP (op, 0))
7739 && CONST_INT_P (XEXP (op, 1)))
7740 regnum = REGNO (XEXP (op, 0));
7741
7742 else
7743 return false;
7744
7745 return (regnum >= FIRST_VIRTUAL_REGISTER
7746 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7747 }
7748
7749 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7750 is known to not straddle a 32k boundary. This function is used
7751 to determine whether -mcmodel=medium code can use TOC pointer
7752 relative addressing for OP. This means the alignment of the TOC
7753 pointer must also be taken into account, and unfortunately that is
7754 only 8 bytes. */
7755
7756 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7757 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7758 #endif
7759
7760 static bool
7761 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7762 machine_mode mode)
7763 {
7764 tree decl;
7765 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7766
7767 if (!SYMBOL_REF_P (op))
7768 return false;
7769
7770 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7771 SYMBOL_REF. */
7772 if (mode_supports_dq_form (mode))
7773 return false;
7774
7775 dsize = GET_MODE_SIZE (mode);
7776 decl = SYMBOL_REF_DECL (op);
7777 if (!decl)
7778 {
7779 if (dsize == 0)
7780 return false;
7781
7782 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7783 replacing memory addresses with an anchor plus offset. We
7784 could find the decl by rummaging around in the block->objects
7785 VEC for the given offset but that seems like too much work. */
7786 dalign = BITS_PER_UNIT;
7787 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7788 && SYMBOL_REF_ANCHOR_P (op)
7789 && SYMBOL_REF_BLOCK (op) != NULL)
7790 {
7791 struct object_block *block = SYMBOL_REF_BLOCK (op);
7792
7793 dalign = block->alignment;
7794 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7795 }
7796 else if (CONSTANT_POOL_ADDRESS_P (op))
7797 {
7798 /* It would be nice to have get_pool_align().. */
7799 machine_mode cmode = get_pool_mode (op);
7800
7801 dalign = GET_MODE_ALIGNMENT (cmode);
7802 }
7803 }
7804 else if (DECL_P (decl))
7805 {
7806 dalign = DECL_ALIGN (decl);
7807
7808 if (dsize == 0)
7809 {
7810 /* Allow BLKmode when the entire object is known to not
7811 cross a 32k boundary. */
7812 if (!DECL_SIZE_UNIT (decl))
7813 return false;
7814
7815 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7816 return false;
7817
7818 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7819 if (dsize > 32768)
7820 return false;
7821
7822 dalign /= BITS_PER_UNIT;
7823 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7824 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7825 return dalign >= dsize;
7826 }
7827 }
7828 else
7829 gcc_unreachable ();
7830
7831 /* Find how many bits of the alignment we know for this access. */
7832 dalign /= BITS_PER_UNIT;
7833 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7834 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7835 mask = dalign - 1;
7836 lsb = offset & -offset;
7837 mask &= lsb - 1;
7838 dalign = mask + 1;
7839
7840 return dalign >= dsize;
7841 }
7842
7843 static bool
7844 constant_pool_expr_p (rtx op)
7845 {
7846 rtx base, offset;
7847
7848 split_const (op, &base, &offset);
7849 return (SYMBOL_REF_P (base)
7850 && CONSTANT_POOL_ADDRESS_P (base)
7851 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7852 }
7853
7854 /* These are only used to pass through from print_operand/print_operand_address
7855 to rs6000_output_addr_const_extra over the intervening function
7856 output_addr_const which is not target code. */
7857 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7858
7859 /* Return true if OP is a toc pointer relative address (the output
7860 of create_TOC_reference). If STRICT, do not match non-split
7861 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7862 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7863 TOCREL_OFFSET_RET respectively. */
7864
7865 bool
7866 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7867 const_rtx *tocrel_offset_ret)
7868 {
7869 if (!TARGET_TOC)
7870 return false;
7871
7872 if (TARGET_CMODEL != CMODEL_SMALL)
7873 {
7874 /* When strict ensure we have everything tidy. */
7875 if (strict
7876 && !(GET_CODE (op) == LO_SUM
7877 && REG_P (XEXP (op, 0))
7878 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7879 return false;
7880
7881 /* When not strict, allow non-split TOC addresses and also allow
7882 (lo_sum (high ..)) TOC addresses created during reload. */
7883 if (GET_CODE (op) == LO_SUM)
7884 op = XEXP (op, 1);
7885 }
7886
7887 const_rtx tocrel_base = op;
7888 const_rtx tocrel_offset = const0_rtx;
7889
7890 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7891 {
7892 tocrel_base = XEXP (op, 0);
7893 tocrel_offset = XEXP (op, 1);
7894 }
7895
7896 if (tocrel_base_ret)
7897 *tocrel_base_ret = tocrel_base;
7898 if (tocrel_offset_ret)
7899 *tocrel_offset_ret = tocrel_offset;
7900
7901 return (GET_CODE (tocrel_base) == UNSPEC
7902 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7903 && REG_P (XVECEXP (tocrel_base, 0, 1))
7904 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7905 }
7906
7907 /* Return true if X is a constant pool address, and also for cmodel=medium
7908 if X is a toc-relative address known to be offsettable within MODE. */
7909
7910 bool
7911 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7912 bool strict)
7913 {
7914 const_rtx tocrel_base, tocrel_offset;
7915 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7916 && (TARGET_CMODEL != CMODEL_MEDIUM
7917 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7918 || mode == QImode
7919 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7920 INTVAL (tocrel_offset), mode)));
7921 }
7922
7923 static bool
7924 legitimate_small_data_p (machine_mode mode, rtx x)
7925 {
7926 return (DEFAULT_ABI == ABI_V4
7927 && !flag_pic && !TARGET_TOC
7928 && (SYMBOL_REF_P (x) || GET_CODE (x) == CONST)
7929 && small_data_operand (x, mode));
7930 }
7931
7932 bool
7933 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7934 bool strict, bool worst_case)
7935 {
7936 unsigned HOST_WIDE_INT offset;
7937 unsigned int extra;
7938
7939 if (GET_CODE (x) != PLUS)
7940 return false;
7941 if (!REG_P (XEXP (x, 0)))
7942 return false;
7943 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7944 return false;
7945 if (mode_supports_dq_form (mode))
7946 return quad_address_p (x, mode, strict);
7947 if (!reg_offset_addressing_ok_p (mode))
7948 return virtual_stack_registers_memory_p (x);
7949 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7950 return true;
7951 if (!CONST_INT_P (XEXP (x, 1)))
7952 return false;
7953
7954 offset = INTVAL (XEXP (x, 1));
7955 extra = 0;
7956 switch (mode)
7957 {
7958 case E_DFmode:
7959 case E_DDmode:
7960 case E_DImode:
7961 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
7962 addressing. */
7963 if (VECTOR_MEM_VSX_P (mode))
7964 return false;
7965
7966 if (!worst_case)
7967 break;
7968 if (!TARGET_POWERPC64)
7969 extra = 4;
7970 else if (offset & 3)
7971 return false;
7972 break;
7973
7974 case E_TFmode:
7975 case E_IFmode:
7976 case E_KFmode:
7977 case E_TDmode:
7978 case E_TImode:
7979 case E_PTImode:
7980 extra = 8;
7981 if (!worst_case)
7982 break;
7983 if (!TARGET_POWERPC64)
7984 extra = 12;
7985 else if (offset & 3)
7986 return false;
7987 break;
7988
7989 default:
7990 break;
7991 }
7992
7993 offset += 0x8000;
7994 return offset < 0x10000 - extra;
7995 }
7996
7997 bool
7998 legitimate_indexed_address_p (rtx x, int strict)
7999 {
8000 rtx op0, op1;
8001
8002 if (GET_CODE (x) != PLUS)
8003 return false;
8004
8005 op0 = XEXP (x, 0);
8006 op1 = XEXP (x, 1);
8007
8008 return (REG_P (op0) && REG_P (op1)
8009 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8010 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8011 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8012 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8013 }
8014
8015 bool
8016 avoiding_indexed_address_p (machine_mode mode)
8017 {
8018 /* Avoid indexed addressing for modes that have non-indexed
8019 load/store instruction forms. */
8020 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8021 }
8022
8023 bool
8024 legitimate_indirect_address_p (rtx x, int strict)
8025 {
8026 return REG_P (x) && INT_REG_OK_FOR_BASE_P (x, strict);
8027 }
8028
8029 bool
8030 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8031 {
8032 if (!TARGET_MACHO || !flag_pic
8033 || mode != SImode || !MEM_P (x))
8034 return false;
8035 x = XEXP (x, 0);
8036
8037 if (GET_CODE (x) != LO_SUM)
8038 return false;
8039 if (!REG_P (XEXP (x, 0)))
8040 return false;
8041 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8042 return false;
8043 x = XEXP (x, 1);
8044
8045 return CONSTANT_P (x);
8046 }
8047
8048 static bool
8049 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8050 {
8051 if (GET_CODE (x) != LO_SUM)
8052 return false;
8053 if (!REG_P (XEXP (x, 0)))
8054 return false;
8055 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8056 return false;
8057 /* quad word addresses are restricted, and we can't use LO_SUM. */
8058 if (mode_supports_dq_form (mode))
8059 return false;
8060 x = XEXP (x, 1);
8061
8062 if (TARGET_ELF || TARGET_MACHO)
8063 {
8064 bool large_toc_ok;
8065
8066 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8067 return false;
8068 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8069 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8070 recognizes some LO_SUM addresses as valid although this
8071 function says opposite. In most cases, LRA through different
8072 transformations can generate correct code for address reloads.
8073 It cannot manage only some LO_SUM cases. So we need to add
8074 code here saying that some addresses are still valid. */
8075 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8076 && small_toc_ref (x, VOIDmode));
8077 if (TARGET_TOC && ! large_toc_ok)
8078 return false;
8079 if (GET_MODE_NUNITS (mode) != 1)
8080 return false;
8081 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8082 && !(/* ??? Assume floating point reg based on mode? */
8083 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8084 return false;
8085
8086 return CONSTANT_P (x) || large_toc_ok;
8087 }
8088
8089 return false;
8090 }
8091
8092
8093 /* Try machine-dependent ways of modifying an illegitimate address
8094 to be legitimate. If we find one, return the new, valid address.
8095 This is used from only one place: `memory_address' in explow.c.
8096
8097 OLDX is the address as it was before break_out_memory_refs was
8098 called. In some cases it is useful to look at this to decide what
8099 needs to be done.
8100
8101 It is always safe for this function to do nothing. It exists to
8102 recognize opportunities to optimize the output.
8103
8104 On RS/6000, first check for the sum of a register with a constant
8105 integer that is out of range. If so, generate code to add the
8106 constant with the low-order 16 bits masked to the register and force
8107 this result into another register (this can be done with `cau').
8108 Then generate an address of REG+(CONST&0xffff), allowing for the
8109 possibility of bit 16 being a one.
8110
8111 Then check for the sum of a register and something not constant, try to
8112 load the other things into a register and return the sum. */
8113
8114 static rtx
8115 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8116 machine_mode mode)
8117 {
8118 unsigned int extra;
8119
8120 if (!reg_offset_addressing_ok_p (mode)
8121 || mode_supports_dq_form (mode))
8122 {
8123 if (virtual_stack_registers_memory_p (x))
8124 return x;
8125
8126 /* In theory we should not be seeing addresses of the form reg+0,
8127 but just in case it is generated, optimize it away. */
8128 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8129 return force_reg (Pmode, XEXP (x, 0));
8130
8131 /* For TImode with load/store quad, restrict addresses to just a single
8132 pointer, so it works with both GPRs and VSX registers. */
8133 /* Make sure both operands are registers. */
8134 else if (GET_CODE (x) == PLUS
8135 && (mode != TImode || !TARGET_VSX))
8136 return gen_rtx_PLUS (Pmode,
8137 force_reg (Pmode, XEXP (x, 0)),
8138 force_reg (Pmode, XEXP (x, 1)));
8139 else
8140 return force_reg (Pmode, x);
8141 }
8142 if (SYMBOL_REF_P (x))
8143 {
8144 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8145 if (model != 0)
8146 return rs6000_legitimize_tls_address (x, model);
8147 }
8148
8149 extra = 0;
8150 switch (mode)
8151 {
8152 case E_TFmode:
8153 case E_TDmode:
8154 case E_TImode:
8155 case E_PTImode:
8156 case E_IFmode:
8157 case E_KFmode:
8158 /* As in legitimate_offset_address_p we do not assume
8159 worst-case. The mode here is just a hint as to the registers
8160 used. A TImode is usually in gprs, but may actually be in
8161 fprs. Leave worst-case scenario for reload to handle via
8162 insn constraints. PTImode is only GPRs. */
8163 extra = 8;
8164 break;
8165 default:
8166 break;
8167 }
8168
8169 if (GET_CODE (x) == PLUS
8170 && REG_P (XEXP (x, 0))
8171 && CONST_INT_P (XEXP (x, 1))
8172 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8173 >= 0x10000 - extra))
8174 {
8175 HOST_WIDE_INT high_int, low_int;
8176 rtx sum;
8177 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8178 if (low_int >= 0x8000 - extra)
8179 low_int = 0;
8180 high_int = INTVAL (XEXP (x, 1)) - low_int;
8181 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8182 GEN_INT (high_int)), 0);
8183 return plus_constant (Pmode, sum, low_int);
8184 }
8185 else if (GET_CODE (x) == PLUS
8186 && REG_P (XEXP (x, 0))
8187 && !CONST_INT_P (XEXP (x, 1))
8188 && GET_MODE_NUNITS (mode) == 1
8189 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8190 || (/* ??? Assume floating point reg based on mode? */
8191 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8192 && !avoiding_indexed_address_p (mode))
8193 {
8194 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8195 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8196 }
8197 else if ((TARGET_ELF
8198 #if TARGET_MACHO
8199 || !MACHO_DYNAMIC_NO_PIC_P
8200 #endif
8201 )
8202 && TARGET_32BIT
8203 && TARGET_NO_TOC
8204 && !flag_pic
8205 && !CONST_INT_P (x)
8206 && !CONST_WIDE_INT_P (x)
8207 && !CONST_DOUBLE_P (x)
8208 && CONSTANT_P (x)
8209 && GET_MODE_NUNITS (mode) == 1
8210 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8211 || (/* ??? Assume floating point reg based on mode? */
8212 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8213 {
8214 rtx reg = gen_reg_rtx (Pmode);
8215 if (TARGET_ELF)
8216 emit_insn (gen_elf_high (reg, x));
8217 else
8218 emit_insn (gen_macho_high (reg, x));
8219 return gen_rtx_LO_SUM (Pmode, reg, x);
8220 }
8221 else if (TARGET_TOC
8222 && SYMBOL_REF_P (x)
8223 && constant_pool_expr_p (x)
8224 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8225 return create_TOC_reference (x, NULL_RTX);
8226 else
8227 return x;
8228 }
8229
8230 /* Debug version of rs6000_legitimize_address. */
8231 static rtx
8232 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8233 {
8234 rtx ret;
8235 rtx_insn *insns;
8236
8237 start_sequence ();
8238 ret = rs6000_legitimize_address (x, oldx, mode);
8239 insns = get_insns ();
8240 end_sequence ();
8241
8242 if (ret != x)
8243 {
8244 fprintf (stderr,
8245 "\nrs6000_legitimize_address: mode %s, old code %s, "
8246 "new code %s, modified\n",
8247 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8248 GET_RTX_NAME (GET_CODE (ret)));
8249
8250 fprintf (stderr, "Original address:\n");
8251 debug_rtx (x);
8252
8253 fprintf (stderr, "oldx:\n");
8254 debug_rtx (oldx);
8255
8256 fprintf (stderr, "New address:\n");
8257 debug_rtx (ret);
8258
8259 if (insns)
8260 {
8261 fprintf (stderr, "Insns added:\n");
8262 debug_rtx_list (insns, 20);
8263 }
8264 }
8265 else
8266 {
8267 fprintf (stderr,
8268 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8269 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8270
8271 debug_rtx (x);
8272 }
8273
8274 if (insns)
8275 emit_insn (insns);
8276
8277 return ret;
8278 }
8279
8280 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8281 We need to emit DTP-relative relocations. */
8282
8283 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8284 static void
8285 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8286 {
8287 switch (size)
8288 {
8289 case 4:
8290 fputs ("\t.long\t", file);
8291 break;
8292 case 8:
8293 fputs (DOUBLE_INT_ASM_OP, file);
8294 break;
8295 default:
8296 gcc_unreachable ();
8297 }
8298 output_addr_const (file, x);
8299 if (TARGET_ELF)
8300 fputs ("@dtprel+0x8000", file);
8301 else if (TARGET_XCOFF && SYMBOL_REF_P (x))
8302 {
8303 switch (SYMBOL_REF_TLS_MODEL (x))
8304 {
8305 case 0:
8306 break;
8307 case TLS_MODEL_LOCAL_EXEC:
8308 fputs ("@le", file);
8309 break;
8310 case TLS_MODEL_INITIAL_EXEC:
8311 fputs ("@ie", file);
8312 break;
8313 case TLS_MODEL_GLOBAL_DYNAMIC:
8314 case TLS_MODEL_LOCAL_DYNAMIC:
8315 fputs ("@m", file);
8316 break;
8317 default:
8318 gcc_unreachable ();
8319 }
8320 }
8321 }
8322
8323 /* Return true if X is a symbol that refers to real (rather than emulated)
8324 TLS. */
8325
8326 static bool
8327 rs6000_real_tls_symbol_ref_p (rtx x)
8328 {
8329 return (SYMBOL_REF_P (x)
8330 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8331 }
8332
8333 /* In the name of slightly smaller debug output, and to cater to
8334 general assembler lossage, recognize various UNSPEC sequences
8335 and turn them back into a direct symbol reference. */
8336
8337 static rtx
8338 rs6000_delegitimize_address (rtx orig_x)
8339 {
8340 rtx x, y, offset;
8341
8342 if (GET_CODE (orig_x) == UNSPEC && XINT (orig_x, 1) == UNSPEC_FUSION_GPR)
8343 orig_x = XVECEXP (orig_x, 0, 0);
8344
8345 orig_x = delegitimize_mem_from_attrs (orig_x);
8346
8347 x = orig_x;
8348 if (MEM_P (x))
8349 x = XEXP (x, 0);
8350
8351 y = x;
8352 if (TARGET_CMODEL != CMODEL_SMALL && GET_CODE (y) == LO_SUM)
8353 y = XEXP (y, 1);
8354
8355 offset = NULL_RTX;
8356 if (GET_CODE (y) == PLUS
8357 && GET_MODE (y) == Pmode
8358 && CONST_INT_P (XEXP (y, 1)))
8359 {
8360 offset = XEXP (y, 1);
8361 y = XEXP (y, 0);
8362 }
8363
8364 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_TOCREL)
8365 {
8366 y = XVECEXP (y, 0, 0);
8367
8368 #ifdef HAVE_AS_TLS
8369 /* Do not associate thread-local symbols with the original
8370 constant pool symbol. */
8371 if (TARGET_XCOFF
8372 && SYMBOL_REF_P (y)
8373 && CONSTANT_POOL_ADDRESS_P (y)
8374 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8375 return orig_x;
8376 #endif
8377
8378 if (offset != NULL_RTX)
8379 y = gen_rtx_PLUS (Pmode, y, offset);
8380 if (!MEM_P (orig_x))
8381 return y;
8382 else
8383 return replace_equiv_address_nv (orig_x, y);
8384 }
8385
8386 if (TARGET_MACHO
8387 && GET_CODE (orig_x) == LO_SUM
8388 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8389 {
8390 y = XEXP (XEXP (orig_x, 1), 0);
8391 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8392 return XVECEXP (y, 0, 0);
8393 }
8394
8395 return orig_x;
8396 }
8397
8398 /* Return true if X shouldn't be emitted into the debug info.
8399 The linker doesn't like .toc section references from
8400 .debug_* sections, so reject .toc section symbols. */
8401
8402 static bool
8403 rs6000_const_not_ok_for_debug_p (rtx x)
8404 {
8405 if (GET_CODE (x) == UNSPEC)
8406 return true;
8407 if (SYMBOL_REF_P (x)
8408 && CONSTANT_POOL_ADDRESS_P (x))
8409 {
8410 rtx c = get_pool_constant (x);
8411 machine_mode cmode = get_pool_mode (x);
8412 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8413 return true;
8414 }
8415
8416 return false;
8417 }
8418
8419 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8420
8421 static bool
8422 rs6000_legitimate_combined_insn (rtx_insn *insn)
8423 {
8424 int icode = INSN_CODE (insn);
8425
8426 /* Reject creating doloop insns. Combine should not be allowed
8427 to create these for a number of reasons:
8428 1) In a nested loop, if combine creates one of these in an
8429 outer loop and the register allocator happens to allocate ctr
8430 to the outer loop insn, then the inner loop can't use ctr.
8431 Inner loops ought to be more highly optimized.
8432 2) Combine often wants to create one of these from what was
8433 originally a three insn sequence, first combining the three
8434 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8435 allocated ctr, the splitter takes use back to the three insn
8436 sequence. It's better to stop combine at the two insn
8437 sequence.
8438 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8439 insns, the register allocator sometimes uses floating point
8440 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8441 jump insn and output reloads are not implemented for jumps,
8442 the ctrsi/ctrdi splitters need to handle all possible cases.
8443 That's a pain, and it gets to be seriously difficult when a
8444 splitter that runs after reload needs memory to transfer from
8445 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8446 for the difficult case. It's better to not create problems
8447 in the first place. */
8448 if (icode != CODE_FOR_nothing
8449 && (icode == CODE_FOR_bdz_si
8450 || icode == CODE_FOR_bdz_di
8451 || icode == CODE_FOR_bdnz_si
8452 || icode == CODE_FOR_bdnz_di
8453 || icode == CODE_FOR_bdztf_si
8454 || icode == CODE_FOR_bdztf_di
8455 || icode == CODE_FOR_bdnztf_si
8456 || icode == CODE_FOR_bdnztf_di))
8457 return false;
8458
8459 return true;
8460 }
8461
8462 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8463
8464 static GTY(()) rtx rs6000_tls_symbol;
8465 static rtx
8466 rs6000_tls_get_addr (void)
8467 {
8468 if (!rs6000_tls_symbol)
8469 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8470
8471 return rs6000_tls_symbol;
8472 }
8473
8474 /* Construct the SYMBOL_REF for TLS GOT references. */
8475
8476 static GTY(()) rtx rs6000_got_symbol;
8477 static rtx
8478 rs6000_got_sym (void)
8479 {
8480 if (!rs6000_got_symbol)
8481 {
8482 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8483 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8484 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8485 }
8486
8487 return rs6000_got_symbol;
8488 }
8489
8490 /* AIX Thread-Local Address support. */
8491
8492 static rtx
8493 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8494 {
8495 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8496 const char *name;
8497 char *tlsname;
8498
8499 name = XSTR (addr, 0);
8500 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8501 or the symbol will be in TLS private data section. */
8502 if (name[strlen (name) - 1] != ']'
8503 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8504 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8505 {
8506 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8507 strcpy (tlsname, name);
8508 strcat (tlsname,
8509 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8510 tlsaddr = copy_rtx (addr);
8511 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8512 }
8513 else
8514 tlsaddr = addr;
8515
8516 /* Place addr into TOC constant pool. */
8517 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8518
8519 /* Output the TOC entry and create the MEM referencing the value. */
8520 if (constant_pool_expr_p (XEXP (sym, 0))
8521 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8522 {
8523 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8524 mem = gen_const_mem (Pmode, tocref);
8525 set_mem_alias_set (mem, get_TOC_alias_set ());
8526 }
8527 else
8528 return sym;
8529
8530 /* Use global-dynamic for local-dynamic. */
8531 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8532 || model == TLS_MODEL_LOCAL_DYNAMIC)
8533 {
8534 /* Create new TOC reference for @m symbol. */
8535 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8536 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8537 strcpy (tlsname, "*LCM");
8538 strcat (tlsname, name + 3);
8539 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8540 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8541 tocref = create_TOC_reference (modaddr, NULL_RTX);
8542 rtx modmem = gen_const_mem (Pmode, tocref);
8543 set_mem_alias_set (modmem, get_TOC_alias_set ());
8544
8545 rtx modreg = gen_reg_rtx (Pmode);
8546 emit_insn (gen_rtx_SET (modreg, modmem));
8547
8548 tmpreg = gen_reg_rtx (Pmode);
8549 emit_insn (gen_rtx_SET (tmpreg, mem));
8550
8551 dest = gen_reg_rtx (Pmode);
8552 if (TARGET_32BIT)
8553 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8554 else
8555 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8556 return dest;
8557 }
8558 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8559 else if (TARGET_32BIT)
8560 {
8561 tlsreg = gen_reg_rtx (SImode);
8562 emit_insn (gen_tls_get_tpointer (tlsreg));
8563 }
8564 else
8565 tlsreg = gen_rtx_REG (DImode, 13);
8566
8567 /* Load the TOC value into temporary register. */
8568 tmpreg = gen_reg_rtx (Pmode);
8569 emit_insn (gen_rtx_SET (tmpreg, mem));
8570 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8571 gen_rtx_MINUS (Pmode, addr, tlsreg));
8572
8573 /* Add TOC symbol value to TLS pointer. */
8574 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8575
8576 return dest;
8577 }
8578
8579 /* Output arg setup instructions for a !TARGET_TLS_MARKERS
8580 __tls_get_addr call. */
8581
8582 void
8583 rs6000_output_tlsargs (rtx *operands)
8584 {
8585 /* Set up operands for output_asm_insn, without modifying OPERANDS. */
8586 rtx op[3];
8587
8588 /* The set dest of the call, ie. r3, which is also the first arg reg. */
8589 op[0] = operands[0];
8590 /* The TLS symbol from global_tlsarg stashed as CALL operand 2. */
8591 op[1] = XVECEXP (operands[2], 0, 0);
8592 if (XINT (operands[2], 1) == UNSPEC_TLSGD)
8593 {
8594 /* The GOT register. */
8595 op[2] = XVECEXP (operands[2], 0, 1);
8596 if (TARGET_CMODEL != CMODEL_SMALL)
8597 output_asm_insn ("addis %0,%2,%1@got@tlsgd@ha\n\t"
8598 "addi %0,%0,%1@got@tlsgd@l", op);
8599 else
8600 output_asm_insn ("addi %0,%2,%1@got@tlsgd", op);
8601 }
8602 else if (XINT (operands[2], 1) == UNSPEC_TLSLD)
8603 {
8604 if (TARGET_CMODEL != CMODEL_SMALL)
8605 output_asm_insn ("addis %0,%1,%&@got@tlsld@ha\n\t"
8606 "addi %0,%0,%&@got@tlsld@l", op);
8607 else
8608 output_asm_insn ("addi %0,%1,%&@got@tlsld", op);
8609 }
8610 else
8611 gcc_unreachable ();
8612 }
8613
8614 /* Passes the tls arg value for global dynamic and local dynamic
8615 emit_library_call_value in rs6000_legitimize_tls_address to
8616 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8617 marker relocs put on __tls_get_addr calls. */
8618 static rtx global_tlsarg;
8619
8620 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8621 this (thread-local) address. */
8622
8623 static rtx
8624 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8625 {
8626 rtx dest, insn;
8627
8628 if (TARGET_XCOFF)
8629 return rs6000_legitimize_tls_address_aix (addr, model);
8630
8631 dest = gen_reg_rtx (Pmode);
8632 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8633 {
8634 rtx tlsreg;
8635
8636 if (TARGET_64BIT)
8637 {
8638 tlsreg = gen_rtx_REG (Pmode, 13);
8639 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8640 }
8641 else
8642 {
8643 tlsreg = gen_rtx_REG (Pmode, 2);
8644 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8645 }
8646 emit_insn (insn);
8647 }
8648 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8649 {
8650 rtx tlsreg, tmp;
8651
8652 tmp = gen_reg_rtx (Pmode);
8653 if (TARGET_64BIT)
8654 {
8655 tlsreg = gen_rtx_REG (Pmode, 13);
8656 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8657 }
8658 else
8659 {
8660 tlsreg = gen_rtx_REG (Pmode, 2);
8661 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8662 }
8663 emit_insn (insn);
8664 if (TARGET_64BIT)
8665 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8666 else
8667 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8668 emit_insn (insn);
8669 }
8670 else
8671 {
8672 rtx got, tga, tmp1, tmp2;
8673
8674 /* We currently use relocations like @got@tlsgd for tls, which
8675 means the linker will handle allocation of tls entries, placing
8676 them in the .got section. So use a pointer to the .got section,
8677 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8678 or to secondary GOT sections used by 32-bit -fPIC. */
8679 if (TARGET_64BIT)
8680 got = gen_rtx_REG (Pmode, 2);
8681 else
8682 {
8683 if (flag_pic == 1)
8684 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8685 else
8686 {
8687 rtx gsym = rs6000_got_sym ();
8688 got = gen_reg_rtx (Pmode);
8689 if (flag_pic == 0)
8690 rs6000_emit_move (got, gsym, Pmode);
8691 else
8692 {
8693 rtx mem, lab;
8694
8695 tmp1 = gen_reg_rtx (Pmode);
8696 tmp2 = gen_reg_rtx (Pmode);
8697 mem = gen_const_mem (Pmode, tmp1);
8698 lab = gen_label_rtx ();
8699 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8700 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8701 if (TARGET_LINK_STACK)
8702 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8703 emit_move_insn (tmp2, mem);
8704 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8705 set_unique_reg_note (last, REG_EQUAL, gsym);
8706 }
8707 }
8708 }
8709
8710 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8711 {
8712 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8713 UNSPEC_TLSGD);
8714 tga = rs6000_tls_get_addr ();
8715 global_tlsarg = arg;
8716 if (TARGET_TLS_MARKERS)
8717 {
8718 rtx argreg = gen_rtx_REG (Pmode, 3);
8719 emit_insn (gen_rtx_SET (argreg, arg));
8720 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8721 argreg, Pmode);
8722 }
8723 else
8724 emit_library_call_value (tga, dest, LCT_CONST, Pmode);
8725 global_tlsarg = NULL_RTX;
8726
8727 /* Make a note so that the result of this call can be CSEd. */
8728 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8729 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8730 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8731 }
8732 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8733 {
8734 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got), UNSPEC_TLSLD);
8735 tga = rs6000_tls_get_addr ();
8736 tmp1 = gen_reg_rtx (Pmode);
8737 global_tlsarg = arg;
8738 if (TARGET_TLS_MARKERS)
8739 {
8740 rtx argreg = gen_rtx_REG (Pmode, 3);
8741 emit_insn (gen_rtx_SET (argreg, arg));
8742 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8743 argreg, Pmode);
8744 }
8745 else
8746 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode);
8747 global_tlsarg = NULL_RTX;
8748
8749 /* Make a note so that the result of this call can be CSEd. */
8750 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8751 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8752 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8753
8754 if (rs6000_tls_size == 16)
8755 {
8756 if (TARGET_64BIT)
8757 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8758 else
8759 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8760 }
8761 else if (rs6000_tls_size == 32)
8762 {
8763 tmp2 = gen_reg_rtx (Pmode);
8764 if (TARGET_64BIT)
8765 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8766 else
8767 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8768 emit_insn (insn);
8769 if (TARGET_64BIT)
8770 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8771 else
8772 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8773 }
8774 else
8775 {
8776 tmp2 = gen_reg_rtx (Pmode);
8777 if (TARGET_64BIT)
8778 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8779 else
8780 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8781 emit_insn (insn);
8782 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8783 }
8784 emit_insn (insn);
8785 }
8786 else
8787 {
8788 /* IE, or 64-bit offset LE. */
8789 tmp2 = gen_reg_rtx (Pmode);
8790 if (TARGET_64BIT)
8791 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8792 else
8793 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8794 emit_insn (insn);
8795 if (TARGET_64BIT)
8796 insn = gen_tls_tls_64 (dest, tmp2, addr);
8797 else
8798 insn = gen_tls_tls_32 (dest, tmp2, addr);
8799 emit_insn (insn);
8800 }
8801 }
8802
8803 return dest;
8804 }
8805
8806 /* Only create the global variable for the stack protect guard if we are using
8807 the global flavor of that guard. */
8808 static tree
8809 rs6000_init_stack_protect_guard (void)
8810 {
8811 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8812 return default_stack_protect_guard ();
8813
8814 return NULL_TREE;
8815 }
8816
8817 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8818
8819 static bool
8820 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8821 {
8822 if (GET_CODE (x) == HIGH
8823 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8824 return true;
8825
8826 /* A TLS symbol in the TOC cannot contain a sum. */
8827 if (GET_CODE (x) == CONST
8828 && GET_CODE (XEXP (x, 0)) == PLUS
8829 && SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
8830 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8831 return true;
8832
8833 /* Do not place an ELF TLS symbol in the constant pool. */
8834 return TARGET_ELF && tls_referenced_p (x);
8835 }
8836
8837 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8838 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8839 can be addressed relative to the toc pointer. */
8840
8841 static bool
8842 use_toc_relative_ref (rtx sym, machine_mode mode)
8843 {
8844 return ((constant_pool_expr_p (sym)
8845 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8846 get_pool_mode (sym)))
8847 || (TARGET_CMODEL == CMODEL_MEDIUM
8848 && SYMBOL_REF_LOCAL_P (sym)
8849 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8850 }
8851
8852 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
8853 that is a valid memory address for an instruction.
8854 The MODE argument is the machine mode for the MEM expression
8855 that wants to use this address.
8856
8857 On the RS/6000, there are four valid address: a SYMBOL_REF that
8858 refers to a constant pool entry of an address (or the sum of it
8859 plus a constant), a short (16-bit signed) constant plus a register,
8860 the sum of two registers, or a register indirect, possibly with an
8861 auto-increment. For DFmode, DDmode and DImode with a constant plus
8862 register, we must ensure that both words are addressable or PowerPC64
8863 with offset word aligned.
8864
8865 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
8866 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
8867 because adjacent memory cells are accessed by adding word-sized offsets
8868 during assembly output. */
8869 static bool
8870 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
8871 {
8872 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8873 bool quad_offset_p = mode_supports_dq_form (mode);
8874
8875 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
8876 if (VECTOR_MEM_ALTIVEC_P (mode)
8877 && GET_CODE (x) == AND
8878 && CONST_INT_P (XEXP (x, 1))
8879 && INTVAL (XEXP (x, 1)) == -16)
8880 x = XEXP (x, 0);
8881
8882 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
8883 return 0;
8884 if (legitimate_indirect_address_p (x, reg_ok_strict))
8885 return 1;
8886 if (TARGET_UPDATE
8887 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
8888 && mode_supports_pre_incdec_p (mode)
8889 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
8890 return 1;
8891 /* Handle restricted vector d-form offsets in ISA 3.0. */
8892 if (quad_offset_p)
8893 {
8894 if (quad_address_p (x, mode, reg_ok_strict))
8895 return 1;
8896 }
8897 else if (virtual_stack_registers_memory_p (x))
8898 return 1;
8899
8900 else if (reg_offset_p)
8901 {
8902 if (legitimate_small_data_p (mode, x))
8903 return 1;
8904 if (legitimate_constant_pool_address_p (x, mode,
8905 reg_ok_strict || lra_in_progress))
8906 return 1;
8907 }
8908
8909 /* For TImode, if we have TImode in VSX registers, only allow register
8910 indirect addresses. This will allow the values to go in either GPRs
8911 or VSX registers without reloading. The vector types would tend to
8912 go into VSX registers, so we allow REG+REG, while TImode seems
8913 somewhat split, in that some uses are GPR based, and some VSX based. */
8914 /* FIXME: We could loosen this by changing the following to
8915 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
8916 but currently we cannot allow REG+REG addressing for TImode. See
8917 PR72827 for complete details on how this ends up hoodwinking DSE. */
8918 if (mode == TImode && TARGET_VSX)
8919 return 0;
8920 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
8921 if (! reg_ok_strict
8922 && reg_offset_p
8923 && GET_CODE (x) == PLUS
8924 && REG_P (XEXP (x, 0))
8925 && (XEXP (x, 0) == virtual_stack_vars_rtx
8926 || XEXP (x, 0) == arg_pointer_rtx)
8927 && CONST_INT_P (XEXP (x, 1)))
8928 return 1;
8929 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
8930 return 1;
8931 if (!FLOAT128_2REG_P (mode)
8932 && (TARGET_HARD_FLOAT
8933 || TARGET_POWERPC64
8934 || (mode != DFmode && mode != DDmode))
8935 && (TARGET_POWERPC64 || mode != DImode)
8936 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
8937 && mode != PTImode
8938 && !avoiding_indexed_address_p (mode)
8939 && legitimate_indexed_address_p (x, reg_ok_strict))
8940 return 1;
8941 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
8942 && mode_supports_pre_modify_p (mode)
8943 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
8944 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
8945 reg_ok_strict, false)
8946 || (!avoiding_indexed_address_p (mode)
8947 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
8948 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
8949 return 1;
8950 if (reg_offset_p && !quad_offset_p
8951 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
8952 return 1;
8953 return 0;
8954 }
8955
8956 /* Debug version of rs6000_legitimate_address_p. */
8957 static bool
8958 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
8959 bool reg_ok_strict)
8960 {
8961 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
8962 fprintf (stderr,
8963 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
8964 "strict = %d, reload = %s, code = %s\n",
8965 ret ? "true" : "false",
8966 GET_MODE_NAME (mode),
8967 reg_ok_strict,
8968 (reload_completed ? "after" : "before"),
8969 GET_RTX_NAME (GET_CODE (x)));
8970 debug_rtx (x);
8971
8972 return ret;
8973 }
8974
8975 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
8976
8977 static bool
8978 rs6000_mode_dependent_address_p (const_rtx addr,
8979 addr_space_t as ATTRIBUTE_UNUSED)
8980 {
8981 return rs6000_mode_dependent_address_ptr (addr);
8982 }
8983
8984 /* Go to LABEL if ADDR (a legitimate address expression)
8985 has an effect that depends on the machine mode it is used for.
8986
8987 On the RS/6000 this is true of all integral offsets (since AltiVec
8988 and VSX modes don't allow them) or is a pre-increment or decrement.
8989
8990 ??? Except that due to conceptual problems in offsettable_address_p
8991 we can't really report the problems of integral offsets. So leave
8992 this assuming that the adjustable offset must be valid for the
8993 sub-words of a TFmode operand, which is what we had before. */
8994
8995 static bool
8996 rs6000_mode_dependent_address (const_rtx addr)
8997 {
8998 switch (GET_CODE (addr))
8999 {
9000 case PLUS:
9001 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9002 is considered a legitimate address before reload, so there
9003 are no offset restrictions in that case. Note that this
9004 condition is safe in strict mode because any address involving
9005 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9006 been rejected as illegitimate. */
9007 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9008 && XEXP (addr, 0) != arg_pointer_rtx
9009 && CONST_INT_P (XEXP (addr, 1)))
9010 {
9011 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9012 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9013 }
9014 break;
9015
9016 case LO_SUM:
9017 /* Anything in the constant pool is sufficiently aligned that
9018 all bytes have the same high part address. */
9019 return !legitimate_constant_pool_address_p (addr, QImode, false);
9020
9021 /* Auto-increment cases are now treated generically in recog.c. */
9022 case PRE_MODIFY:
9023 return TARGET_UPDATE;
9024
9025 /* AND is only allowed in Altivec loads. */
9026 case AND:
9027 return true;
9028
9029 default:
9030 break;
9031 }
9032
9033 return false;
9034 }
9035
9036 /* Debug version of rs6000_mode_dependent_address. */
9037 static bool
9038 rs6000_debug_mode_dependent_address (const_rtx addr)
9039 {
9040 bool ret = rs6000_mode_dependent_address (addr);
9041
9042 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9043 ret ? "true" : "false");
9044 debug_rtx (addr);
9045
9046 return ret;
9047 }
9048
9049 /* Implement FIND_BASE_TERM. */
9050
9051 rtx
9052 rs6000_find_base_term (rtx op)
9053 {
9054 rtx base;
9055
9056 base = op;
9057 if (GET_CODE (base) == CONST)
9058 base = XEXP (base, 0);
9059 if (GET_CODE (base) == PLUS)
9060 base = XEXP (base, 0);
9061 if (GET_CODE (base) == UNSPEC)
9062 switch (XINT (base, 1))
9063 {
9064 case UNSPEC_TOCREL:
9065 case UNSPEC_MACHOPIC_OFFSET:
9066 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9067 for aliasing purposes. */
9068 return XVECEXP (base, 0, 0);
9069 }
9070
9071 return op;
9072 }
9073
9074 /* More elaborate version of recog's offsettable_memref_p predicate
9075 that works around the ??? note of rs6000_mode_dependent_address.
9076 In particular it accepts
9077
9078 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9079
9080 in 32-bit mode, that the recog predicate rejects. */
9081
9082 static bool
9083 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9084 {
9085 bool worst_case;
9086
9087 if (!MEM_P (op))
9088 return false;
9089
9090 /* First mimic offsettable_memref_p. */
9091 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9092 return true;
9093
9094 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9095 the latter predicate knows nothing about the mode of the memory
9096 reference and, therefore, assumes that it is the largest supported
9097 mode (TFmode). As a consequence, legitimate offsettable memory
9098 references are rejected. rs6000_legitimate_offset_address_p contains
9099 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9100 at least with a little bit of help here given that we know the
9101 actual registers used. */
9102 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9103 || GET_MODE_SIZE (reg_mode) == 4);
9104 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9105 strict, worst_case);
9106 }
9107
9108 /* Determine the reassociation width to be used in reassociate_bb.
9109 This takes into account how many parallel operations we
9110 can actually do of a given type, and also the latency.
9111 P8:
9112 int add/sub 6/cycle
9113 mul 2/cycle
9114 vect add/sub/mul 2/cycle
9115 fp add/sub/mul 2/cycle
9116 dfp 1/cycle
9117 */
9118
9119 static int
9120 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9121 machine_mode mode)
9122 {
9123 switch (rs6000_tune)
9124 {
9125 case PROCESSOR_POWER8:
9126 case PROCESSOR_POWER9:
9127 case PROCESSOR_FUTURE:
9128 if (DECIMAL_FLOAT_MODE_P (mode))
9129 return 1;
9130 if (VECTOR_MODE_P (mode))
9131 return 4;
9132 if (INTEGRAL_MODE_P (mode))
9133 return 1;
9134 if (FLOAT_MODE_P (mode))
9135 return 4;
9136 break;
9137 default:
9138 break;
9139 }
9140 return 1;
9141 }
9142
9143 /* Change register usage conditional on target flags. */
9144 static void
9145 rs6000_conditional_register_usage (void)
9146 {
9147 int i;
9148
9149 if (TARGET_DEBUG_TARGET)
9150 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9151
9152 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9153 if (TARGET_64BIT)
9154 fixed_regs[13] = call_used_regs[13]
9155 = call_really_used_regs[13] = 1;
9156
9157 /* Conditionally disable FPRs. */
9158 if (TARGET_SOFT_FLOAT)
9159 for (i = 32; i < 64; i++)
9160 fixed_regs[i] = call_used_regs[i]
9161 = call_really_used_regs[i] = 1;
9162
9163 /* The TOC register is not killed across calls in a way that is
9164 visible to the compiler. */
9165 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9166 call_really_used_regs[2] = 0;
9167
9168 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9169 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9170
9171 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9172 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9173 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9174 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9175
9176 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9177 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9178 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9179 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9180
9181 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9182 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9183 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9184
9185 if (!TARGET_ALTIVEC && !TARGET_VSX)
9186 {
9187 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9188 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9189 call_really_used_regs[VRSAVE_REGNO] = 1;
9190 }
9191
9192 if (TARGET_ALTIVEC || TARGET_VSX)
9193 global_regs[VSCR_REGNO] = 1;
9194
9195 if (TARGET_ALTIVEC_ABI)
9196 {
9197 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9198 call_used_regs[i] = call_really_used_regs[i] = 1;
9199
9200 /* AIX reserves VR20:31 in non-extended ABI mode. */
9201 if (TARGET_XCOFF)
9202 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9203 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9204 }
9205 }
9206
9207 \f
9208 /* Output insns to set DEST equal to the constant SOURCE as a series of
9209 lis, ori and shl instructions and return TRUE. */
9210
9211 bool
9212 rs6000_emit_set_const (rtx dest, rtx source)
9213 {
9214 machine_mode mode = GET_MODE (dest);
9215 rtx temp, set;
9216 rtx_insn *insn;
9217 HOST_WIDE_INT c;
9218
9219 gcc_checking_assert (CONST_INT_P (source));
9220 c = INTVAL (source);
9221 switch (mode)
9222 {
9223 case E_QImode:
9224 case E_HImode:
9225 emit_insn (gen_rtx_SET (dest, source));
9226 return true;
9227
9228 case E_SImode:
9229 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9230
9231 emit_insn (gen_rtx_SET (copy_rtx (temp),
9232 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9233 emit_insn (gen_rtx_SET (dest,
9234 gen_rtx_IOR (SImode, copy_rtx (temp),
9235 GEN_INT (c & 0xffff))));
9236 break;
9237
9238 case E_DImode:
9239 if (!TARGET_POWERPC64)
9240 {
9241 rtx hi, lo;
9242
9243 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9244 DImode);
9245 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9246 DImode);
9247 emit_move_insn (hi, GEN_INT (c >> 32));
9248 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9249 emit_move_insn (lo, GEN_INT (c));
9250 }
9251 else
9252 rs6000_emit_set_long_const (dest, c);
9253 break;
9254
9255 default:
9256 gcc_unreachable ();
9257 }
9258
9259 insn = get_last_insn ();
9260 set = single_set (insn);
9261 if (! CONSTANT_P (SET_SRC (set)))
9262 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9263
9264 return true;
9265 }
9266
9267 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9268 Output insns to set DEST equal to the constant C as a series of
9269 lis, ori and shl instructions. */
9270
9271 static void
9272 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9273 {
9274 rtx temp;
9275 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9276
9277 ud1 = c & 0xffff;
9278 c = c >> 16;
9279 ud2 = c & 0xffff;
9280 c = c >> 16;
9281 ud3 = c & 0xffff;
9282 c = c >> 16;
9283 ud4 = c & 0xffff;
9284
9285 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9286 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9287 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9288
9289 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9290 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9291 {
9292 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9293
9294 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9295 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9296 if (ud1 != 0)
9297 emit_move_insn (dest,
9298 gen_rtx_IOR (DImode, copy_rtx (temp),
9299 GEN_INT (ud1)));
9300 }
9301 else if (ud3 == 0 && ud4 == 0)
9302 {
9303 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9304
9305 gcc_assert (ud2 & 0x8000);
9306 emit_move_insn (copy_rtx (temp),
9307 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9308 if (ud1 != 0)
9309 emit_move_insn (copy_rtx (temp),
9310 gen_rtx_IOR (DImode, copy_rtx (temp),
9311 GEN_INT (ud1)));
9312 emit_move_insn (dest,
9313 gen_rtx_ZERO_EXTEND (DImode,
9314 gen_lowpart (SImode,
9315 copy_rtx (temp))));
9316 }
9317 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9318 || (ud4 == 0 && ! (ud3 & 0x8000)))
9319 {
9320 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9321
9322 emit_move_insn (copy_rtx (temp),
9323 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9324 if (ud2 != 0)
9325 emit_move_insn (copy_rtx (temp),
9326 gen_rtx_IOR (DImode, copy_rtx (temp),
9327 GEN_INT (ud2)));
9328 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9329 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9330 GEN_INT (16)));
9331 if (ud1 != 0)
9332 emit_move_insn (dest,
9333 gen_rtx_IOR (DImode, copy_rtx (temp),
9334 GEN_INT (ud1)));
9335 }
9336 else
9337 {
9338 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9339
9340 emit_move_insn (copy_rtx (temp),
9341 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9342 if (ud3 != 0)
9343 emit_move_insn (copy_rtx (temp),
9344 gen_rtx_IOR (DImode, copy_rtx (temp),
9345 GEN_INT (ud3)));
9346
9347 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9348 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9349 GEN_INT (32)));
9350 if (ud2 != 0)
9351 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9352 gen_rtx_IOR (DImode, copy_rtx (temp),
9353 GEN_INT (ud2 << 16)));
9354 if (ud1 != 0)
9355 emit_move_insn (dest,
9356 gen_rtx_IOR (DImode, copy_rtx (temp),
9357 GEN_INT (ud1)));
9358 }
9359 }
9360
9361 /* Helper for the following. Get rid of [r+r] memory refs
9362 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9363
9364 static void
9365 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9366 {
9367 if (MEM_P (operands[0])
9368 && !REG_P (XEXP (operands[0], 0))
9369 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9370 GET_MODE (operands[0]), false))
9371 operands[0]
9372 = replace_equiv_address (operands[0],
9373 copy_addr_to_reg (XEXP (operands[0], 0)));
9374
9375 if (MEM_P (operands[1])
9376 && !REG_P (XEXP (operands[1], 0))
9377 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9378 GET_MODE (operands[1]), false))
9379 operands[1]
9380 = replace_equiv_address (operands[1],
9381 copy_addr_to_reg (XEXP (operands[1], 0)));
9382 }
9383
9384 /* Generate a vector of constants to permute MODE for a little-endian
9385 storage operation by swapping the two halves of a vector. */
9386 static rtvec
9387 rs6000_const_vec (machine_mode mode)
9388 {
9389 int i, subparts;
9390 rtvec v;
9391
9392 switch (mode)
9393 {
9394 case E_V1TImode:
9395 subparts = 1;
9396 break;
9397 case E_V2DFmode:
9398 case E_V2DImode:
9399 subparts = 2;
9400 break;
9401 case E_V4SFmode:
9402 case E_V4SImode:
9403 subparts = 4;
9404 break;
9405 case E_V8HImode:
9406 subparts = 8;
9407 break;
9408 case E_V16QImode:
9409 subparts = 16;
9410 break;
9411 default:
9412 gcc_unreachable();
9413 }
9414
9415 v = rtvec_alloc (subparts);
9416
9417 for (i = 0; i < subparts / 2; ++i)
9418 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9419 for (i = subparts / 2; i < subparts; ++i)
9420 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9421
9422 return v;
9423 }
9424
9425 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9426 store operation. */
9427 void
9428 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9429 {
9430 /* Scalar permutations are easier to express in integer modes rather than
9431 floating-point modes, so cast them here. We use V1TImode instead
9432 of TImode to ensure that the values don't go through GPRs. */
9433 if (FLOAT128_VECTOR_P (mode))
9434 {
9435 dest = gen_lowpart (V1TImode, dest);
9436 source = gen_lowpart (V1TImode, source);
9437 mode = V1TImode;
9438 }
9439
9440 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9441 scalar. */
9442 if (mode == TImode || mode == V1TImode)
9443 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9444 GEN_INT (64))));
9445 else
9446 {
9447 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9448 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9449 }
9450 }
9451
9452 /* Emit a little-endian load from vector memory location SOURCE to VSX
9453 register DEST in mode MODE. The load is done with two permuting
9454 insn's that represent an lxvd2x and xxpermdi. */
9455 void
9456 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9457 {
9458 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9459 V1TImode). */
9460 if (mode == TImode || mode == V1TImode)
9461 {
9462 mode = V2DImode;
9463 dest = gen_lowpart (V2DImode, dest);
9464 source = adjust_address (source, V2DImode, 0);
9465 }
9466
9467 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9468 rs6000_emit_le_vsx_permute (tmp, source, mode);
9469 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9470 }
9471
9472 /* Emit a little-endian store to vector memory location DEST from VSX
9473 register SOURCE in mode MODE. The store is done with two permuting
9474 insn's that represent an xxpermdi and an stxvd2x. */
9475 void
9476 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9477 {
9478 /* This should never be called during or after LRA, because it does
9479 not re-permute the source register. It is intended only for use
9480 during expand. */
9481 gcc_assert (!lra_in_progress && !reload_completed);
9482
9483 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9484 V1TImode). */
9485 if (mode == TImode || mode == V1TImode)
9486 {
9487 mode = V2DImode;
9488 dest = adjust_address (dest, V2DImode, 0);
9489 source = gen_lowpart (V2DImode, source);
9490 }
9491
9492 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9493 rs6000_emit_le_vsx_permute (tmp, source, mode);
9494 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9495 }
9496
9497 /* Emit a sequence representing a little-endian VSX load or store,
9498 moving data from SOURCE to DEST in mode MODE. This is done
9499 separately from rs6000_emit_move to ensure it is called only
9500 during expand. LE VSX loads and stores introduced later are
9501 handled with a split. The expand-time RTL generation allows
9502 us to optimize away redundant pairs of register-permutes. */
9503 void
9504 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9505 {
9506 gcc_assert (!BYTES_BIG_ENDIAN
9507 && VECTOR_MEM_VSX_P (mode)
9508 && !TARGET_P9_VECTOR
9509 && !gpr_or_gpr_p (dest, source)
9510 && (MEM_P (source) ^ MEM_P (dest)));
9511
9512 if (MEM_P (source))
9513 {
9514 gcc_assert (REG_P (dest) || SUBREG_P (dest));
9515 rs6000_emit_le_vsx_load (dest, source, mode);
9516 }
9517 else
9518 {
9519 if (!REG_P (source))
9520 source = force_reg (mode, source);
9521 rs6000_emit_le_vsx_store (dest, source, mode);
9522 }
9523 }
9524
9525 /* Return whether a SFmode or SImode move can be done without converting one
9526 mode to another. This arrises when we have:
9527
9528 (SUBREG:SF (REG:SI ...))
9529 (SUBREG:SI (REG:SF ...))
9530
9531 and one of the values is in a floating point/vector register, where SFmode
9532 scalars are stored in DFmode format. */
9533
9534 bool
9535 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9536 {
9537 if (TARGET_ALLOW_SF_SUBREG)
9538 return true;
9539
9540 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9541 return true;
9542
9543 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9544 return true;
9545
9546 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9547 if (SUBREG_P (dest))
9548 {
9549 rtx dest_subreg = SUBREG_REG (dest);
9550 rtx src_subreg = SUBREG_REG (src);
9551 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9552 }
9553
9554 return false;
9555 }
9556
9557
9558 /* Helper function to change moves with:
9559
9560 (SUBREG:SF (REG:SI)) and
9561 (SUBREG:SI (REG:SF))
9562
9563 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9564 values are stored as DFmode values in the VSX registers. We need to convert
9565 the bits before we can use a direct move or operate on the bits in the
9566 vector register as an integer type.
9567
9568 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9569
9570 static bool
9571 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9572 {
9573 if (TARGET_DIRECT_MOVE_64BIT && !reload_completed
9574 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9575 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9576 {
9577 rtx inner_source = SUBREG_REG (source);
9578 machine_mode inner_mode = GET_MODE (inner_source);
9579
9580 if (mode == SImode && inner_mode == SFmode)
9581 {
9582 emit_insn (gen_movsi_from_sf (dest, inner_source));
9583 return true;
9584 }
9585
9586 if (mode == SFmode && inner_mode == SImode)
9587 {
9588 emit_insn (gen_movsf_from_si (dest, inner_source));
9589 return true;
9590 }
9591 }
9592
9593 return false;
9594 }
9595
9596 /* Emit a move from SOURCE to DEST in mode MODE. */
9597 void
9598 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9599 {
9600 rtx operands[2];
9601 operands[0] = dest;
9602 operands[1] = source;
9603
9604 if (TARGET_DEBUG_ADDR)
9605 {
9606 fprintf (stderr,
9607 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9608 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9609 GET_MODE_NAME (mode),
9610 lra_in_progress,
9611 reload_completed,
9612 can_create_pseudo_p ());
9613 debug_rtx (dest);
9614 fprintf (stderr, "source:\n");
9615 debug_rtx (source);
9616 }
9617
9618 /* Check that we get CONST_WIDE_INT only when we should. */
9619 if (CONST_WIDE_INT_P (operands[1])
9620 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9621 gcc_unreachable ();
9622
9623 #ifdef HAVE_AS_GNU_ATTRIBUTE
9624 /* If we use a long double type, set the flags in .gnu_attribute that say
9625 what the long double type is. This is to allow the linker's warning
9626 message for the wrong long double to be useful, even if the function does
9627 not do a call (for example, doing a 128-bit add on power9 if the long
9628 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9629 used if they aren't the default long dobule type. */
9630 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9631 {
9632 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9633 rs6000_passes_float = rs6000_passes_long_double = true;
9634
9635 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9636 rs6000_passes_float = rs6000_passes_long_double = true;
9637 }
9638 #endif
9639
9640 /* See if we need to special case SImode/SFmode SUBREG moves. */
9641 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9642 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9643 return;
9644
9645 /* Check if GCC is setting up a block move that will end up using FP
9646 registers as temporaries. We must make sure this is acceptable. */
9647 if (MEM_P (operands[0])
9648 && MEM_P (operands[1])
9649 && mode == DImode
9650 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9651 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9652 && ! (rs6000_slow_unaligned_access (SImode,
9653 (MEM_ALIGN (operands[0]) > 32
9654 ? 32 : MEM_ALIGN (operands[0])))
9655 || rs6000_slow_unaligned_access (SImode,
9656 (MEM_ALIGN (operands[1]) > 32
9657 ? 32 : MEM_ALIGN (operands[1]))))
9658 && ! MEM_VOLATILE_P (operands [0])
9659 && ! MEM_VOLATILE_P (operands [1]))
9660 {
9661 emit_move_insn (adjust_address (operands[0], SImode, 0),
9662 adjust_address (operands[1], SImode, 0));
9663 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9664 adjust_address (copy_rtx (operands[1]), SImode, 4));
9665 return;
9666 }
9667
9668 if (can_create_pseudo_p () && MEM_P (operands[0])
9669 && !gpc_reg_operand (operands[1], mode))
9670 operands[1] = force_reg (mode, operands[1]);
9671
9672 /* Recognize the case where operand[1] is a reference to thread-local
9673 data and load its address to a register. */
9674 if (tls_referenced_p (operands[1]))
9675 {
9676 enum tls_model model;
9677 rtx tmp = operands[1];
9678 rtx addend = NULL;
9679
9680 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9681 {
9682 addend = XEXP (XEXP (tmp, 0), 1);
9683 tmp = XEXP (XEXP (tmp, 0), 0);
9684 }
9685
9686 gcc_assert (SYMBOL_REF_P (tmp));
9687 model = SYMBOL_REF_TLS_MODEL (tmp);
9688 gcc_assert (model != 0);
9689
9690 tmp = rs6000_legitimize_tls_address (tmp, model);
9691 if (addend)
9692 {
9693 tmp = gen_rtx_PLUS (mode, tmp, addend);
9694 tmp = force_operand (tmp, operands[0]);
9695 }
9696 operands[1] = tmp;
9697 }
9698
9699 /* 128-bit constant floating-point values on Darwin should really be loaded
9700 as two parts. However, this premature splitting is a problem when DFmode
9701 values can go into Altivec registers. */
9702 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
9703 && !reg_addr[DFmode].scalar_in_vmx_p)
9704 {
9705 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
9706 simplify_gen_subreg (DFmode, operands[1], mode, 0),
9707 DFmode);
9708 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
9709 GET_MODE_SIZE (DFmode)),
9710 simplify_gen_subreg (DFmode, operands[1], mode,
9711 GET_MODE_SIZE (DFmode)),
9712 DFmode);
9713 return;
9714 }
9715
9716 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9717 p1:SD) if p1 is not of floating point class and p0 is spilled as
9718 we can have no analogous movsd_store for this. */
9719 if (lra_in_progress && mode == DDmode
9720 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
9721 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9722 && SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1]))
9723 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
9724 {
9725 enum reg_class cl;
9726 int regno = REGNO (SUBREG_REG (operands[1]));
9727
9728 if (!HARD_REGISTER_NUM_P (regno))
9729 {
9730 cl = reg_preferred_class (regno);
9731 regno = reg_renumber[regno];
9732 if (regno < 0)
9733 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
9734 }
9735 if (regno >= 0 && ! FP_REGNO_P (regno))
9736 {
9737 mode = SDmode;
9738 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
9739 operands[1] = SUBREG_REG (operands[1]);
9740 }
9741 }
9742 if (lra_in_progress
9743 && mode == SDmode
9744 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
9745 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9746 && (REG_P (operands[1])
9747 || (SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1])))))
9748 {
9749 int regno = reg_or_subregno (operands[1]);
9750 enum reg_class cl;
9751
9752 if (!HARD_REGISTER_NUM_P (regno))
9753 {
9754 cl = reg_preferred_class (regno);
9755 gcc_assert (cl != NO_REGS);
9756 regno = reg_renumber[regno];
9757 if (regno < 0)
9758 regno = ira_class_hard_regs[cl][0];
9759 }
9760 if (FP_REGNO_P (regno))
9761 {
9762 if (GET_MODE (operands[0]) != DDmode)
9763 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
9764 emit_insn (gen_movsd_store (operands[0], operands[1]));
9765 }
9766 else if (INT_REGNO_P (regno))
9767 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9768 else
9769 gcc_unreachable();
9770 return;
9771 }
9772 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
9773 p:DD)) if p0 is not of floating point class and p1 is spilled as
9774 we can have no analogous movsd_load for this. */
9775 if (lra_in_progress && mode == DDmode
9776 && SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))
9777 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
9778 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
9779 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9780 {
9781 enum reg_class cl;
9782 int regno = REGNO (SUBREG_REG (operands[0]));
9783
9784 if (!HARD_REGISTER_NUM_P (regno))
9785 {
9786 cl = reg_preferred_class (regno);
9787 regno = reg_renumber[regno];
9788 if (regno < 0)
9789 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
9790 }
9791 if (regno >= 0 && ! FP_REGNO_P (regno))
9792 {
9793 mode = SDmode;
9794 operands[0] = SUBREG_REG (operands[0]);
9795 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
9796 }
9797 }
9798 if (lra_in_progress
9799 && mode == SDmode
9800 && (REG_P (operands[0])
9801 || (SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))))
9802 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
9803 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9804 {
9805 int regno = reg_or_subregno (operands[0]);
9806 enum reg_class cl;
9807
9808 if (!HARD_REGISTER_NUM_P (regno))
9809 {
9810 cl = reg_preferred_class (regno);
9811 gcc_assert (cl != NO_REGS);
9812 regno = reg_renumber[regno];
9813 if (regno < 0)
9814 regno = ira_class_hard_regs[cl][0];
9815 }
9816 if (FP_REGNO_P (regno))
9817 {
9818 if (GET_MODE (operands[1]) != DDmode)
9819 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
9820 emit_insn (gen_movsd_load (operands[0], operands[1]));
9821 }
9822 else if (INT_REGNO_P (regno))
9823 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9824 else
9825 gcc_unreachable();
9826 return;
9827 }
9828
9829 /* FIXME: In the long term, this switch statement should go away
9830 and be replaced by a sequence of tests based on things like
9831 mode == Pmode. */
9832 switch (mode)
9833 {
9834 case E_HImode:
9835 case E_QImode:
9836 if (CONSTANT_P (operands[1])
9837 && !CONST_INT_P (operands[1]))
9838 operands[1] = force_const_mem (mode, operands[1]);
9839 break;
9840
9841 case E_TFmode:
9842 case E_TDmode:
9843 case E_IFmode:
9844 case E_KFmode:
9845 if (FLOAT128_2REG_P (mode))
9846 rs6000_eliminate_indexed_memrefs (operands);
9847 /* fall through */
9848
9849 case E_DFmode:
9850 case E_DDmode:
9851 case E_SFmode:
9852 case E_SDmode:
9853 if (CONSTANT_P (operands[1])
9854 && ! easy_fp_constant (operands[1], mode))
9855 operands[1] = force_const_mem (mode, operands[1]);
9856 break;
9857
9858 case E_V16QImode:
9859 case E_V8HImode:
9860 case E_V4SFmode:
9861 case E_V4SImode:
9862 case E_V2DFmode:
9863 case E_V2DImode:
9864 case E_V1TImode:
9865 if (CONSTANT_P (operands[1])
9866 && !easy_vector_constant (operands[1], mode))
9867 operands[1] = force_const_mem (mode, operands[1]);
9868 break;
9869
9870 case E_SImode:
9871 case E_DImode:
9872 /* Use default pattern for address of ELF small data */
9873 if (TARGET_ELF
9874 && mode == Pmode
9875 && DEFAULT_ABI == ABI_V4
9876 && (SYMBOL_REF_P (operands[1])
9877 || GET_CODE (operands[1]) == CONST)
9878 && small_data_operand (operands[1], mode))
9879 {
9880 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9881 return;
9882 }
9883
9884 if (DEFAULT_ABI == ABI_V4
9885 && mode == Pmode && mode == SImode
9886 && flag_pic == 1 && got_operand (operands[1], mode))
9887 {
9888 emit_insn (gen_movsi_got (operands[0], operands[1]));
9889 return;
9890 }
9891
9892 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
9893 && TARGET_NO_TOC
9894 && ! flag_pic
9895 && mode == Pmode
9896 && CONSTANT_P (operands[1])
9897 && GET_CODE (operands[1]) != HIGH
9898 && !CONST_INT_P (operands[1]))
9899 {
9900 rtx target = (!can_create_pseudo_p ()
9901 ? operands[0]
9902 : gen_reg_rtx (mode));
9903
9904 /* If this is a function address on -mcall-aixdesc,
9905 convert it to the address of the descriptor. */
9906 if (DEFAULT_ABI == ABI_AIX
9907 && SYMBOL_REF_P (operands[1])
9908 && XSTR (operands[1], 0)[0] == '.')
9909 {
9910 const char *name = XSTR (operands[1], 0);
9911 rtx new_ref;
9912 while (*name == '.')
9913 name++;
9914 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
9915 CONSTANT_POOL_ADDRESS_P (new_ref)
9916 = CONSTANT_POOL_ADDRESS_P (operands[1]);
9917 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
9918 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
9919 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
9920 operands[1] = new_ref;
9921 }
9922
9923 if (DEFAULT_ABI == ABI_DARWIN)
9924 {
9925 #if TARGET_MACHO
9926 if (MACHO_DYNAMIC_NO_PIC_P)
9927 {
9928 /* Take care of any required data indirection. */
9929 operands[1] = rs6000_machopic_legitimize_pic_address (
9930 operands[1], mode, operands[0]);
9931 if (operands[0] != operands[1])
9932 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9933 return;
9934 }
9935 #endif
9936 emit_insn (gen_macho_high (target, operands[1]));
9937 emit_insn (gen_macho_low (operands[0], target, operands[1]));
9938 return;
9939 }
9940
9941 emit_insn (gen_elf_high (target, operands[1]));
9942 emit_insn (gen_elf_low (operands[0], target, operands[1]));
9943 return;
9944 }
9945
9946 /* If this is a SYMBOL_REF that refers to a constant pool entry,
9947 and we have put it in the TOC, we just need to make a TOC-relative
9948 reference to it. */
9949 if (TARGET_TOC
9950 && SYMBOL_REF_P (operands[1])
9951 && use_toc_relative_ref (operands[1], mode))
9952 operands[1] = create_TOC_reference (operands[1], operands[0]);
9953 else if (mode == Pmode
9954 && CONSTANT_P (operands[1])
9955 && GET_CODE (operands[1]) != HIGH
9956 && ((REG_P (operands[0])
9957 && FP_REGNO_P (REGNO (operands[0])))
9958 || !CONST_INT_P (operands[1])
9959 || (num_insns_constant (operands[1], mode)
9960 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
9961 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
9962 && (TARGET_CMODEL == CMODEL_SMALL
9963 || can_create_pseudo_p ()
9964 || (REG_P (operands[0])
9965 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
9966 {
9967
9968 #if TARGET_MACHO
9969 /* Darwin uses a special PIC legitimizer. */
9970 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
9971 {
9972 operands[1] =
9973 rs6000_machopic_legitimize_pic_address (operands[1], mode,
9974 operands[0]);
9975 if (operands[0] != operands[1])
9976 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9977 return;
9978 }
9979 #endif
9980
9981 /* If we are to limit the number of things we put in the TOC and
9982 this is a symbol plus a constant we can add in one insn,
9983 just put the symbol in the TOC and add the constant. */
9984 if (GET_CODE (operands[1]) == CONST
9985 && TARGET_NO_SUM_IN_TOC
9986 && GET_CODE (XEXP (operands[1], 0)) == PLUS
9987 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
9988 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
9989 || SYMBOL_REF_P (XEXP (XEXP (operands[1], 0), 0)))
9990 && ! side_effects_p (operands[0]))
9991 {
9992 rtx sym =
9993 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
9994 rtx other = XEXP (XEXP (operands[1], 0), 1);
9995
9996 sym = force_reg (mode, sym);
9997 emit_insn (gen_add3_insn (operands[0], sym, other));
9998 return;
9999 }
10000
10001 operands[1] = force_const_mem (mode, operands[1]);
10002
10003 if (TARGET_TOC
10004 && SYMBOL_REF_P (XEXP (operands[1], 0))
10005 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10006 {
10007 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10008 operands[0]);
10009 operands[1] = gen_const_mem (mode, tocref);
10010 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10011 }
10012 }
10013 break;
10014
10015 case E_TImode:
10016 if (!VECTOR_MEM_VSX_P (TImode))
10017 rs6000_eliminate_indexed_memrefs (operands);
10018 break;
10019
10020 case E_PTImode:
10021 rs6000_eliminate_indexed_memrefs (operands);
10022 break;
10023
10024 default:
10025 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10026 }
10027
10028 /* Above, we may have called force_const_mem which may have returned
10029 an invalid address. If we can, fix this up; otherwise, reload will
10030 have to deal with it. */
10031 if (MEM_P (operands[1]))
10032 operands[1] = validize_mem (operands[1]);
10033
10034 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10035 }
10036 \f
10037 /* Nonzero if we can use a floating-point register to pass this arg. */
10038 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10039 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10040 && (CUM)->fregno <= FP_ARG_MAX_REG \
10041 && TARGET_HARD_FLOAT)
10042
10043 /* Nonzero if we can use an AltiVec register to pass this arg. */
10044 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10045 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10046 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10047 && TARGET_ALTIVEC_ABI \
10048 && (NAMED))
10049
10050 /* Walk down the type tree of TYPE counting consecutive base elements.
10051 If *MODEP is VOIDmode, then set it to the first valid floating point
10052 or vector type. If a non-floating point or vector type is found, or
10053 if a floating point or vector type that doesn't match a non-VOIDmode
10054 *MODEP is found, then return -1, otherwise return the count in the
10055 sub-tree. */
10056
10057 static int
10058 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10059 {
10060 machine_mode mode;
10061 HOST_WIDE_INT size;
10062
10063 switch (TREE_CODE (type))
10064 {
10065 case REAL_TYPE:
10066 mode = TYPE_MODE (type);
10067 if (!SCALAR_FLOAT_MODE_P (mode))
10068 return -1;
10069
10070 if (*modep == VOIDmode)
10071 *modep = mode;
10072
10073 if (*modep == mode)
10074 return 1;
10075
10076 break;
10077
10078 case COMPLEX_TYPE:
10079 mode = TYPE_MODE (TREE_TYPE (type));
10080 if (!SCALAR_FLOAT_MODE_P (mode))
10081 return -1;
10082
10083 if (*modep == VOIDmode)
10084 *modep = mode;
10085
10086 if (*modep == mode)
10087 return 2;
10088
10089 break;
10090
10091 case VECTOR_TYPE:
10092 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10093 return -1;
10094
10095 /* Use V4SImode as representative of all 128-bit vector types. */
10096 size = int_size_in_bytes (type);
10097 switch (size)
10098 {
10099 case 16:
10100 mode = V4SImode;
10101 break;
10102 default:
10103 return -1;
10104 }
10105
10106 if (*modep == VOIDmode)
10107 *modep = mode;
10108
10109 /* Vector modes are considered to be opaque: two vectors are
10110 equivalent for the purposes of being homogeneous aggregates
10111 if they are the same size. */
10112 if (*modep == mode)
10113 return 1;
10114
10115 break;
10116
10117 case ARRAY_TYPE:
10118 {
10119 int count;
10120 tree index = TYPE_DOMAIN (type);
10121
10122 /* Can't handle incomplete types nor sizes that are not
10123 fixed. */
10124 if (!COMPLETE_TYPE_P (type)
10125 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10126 return -1;
10127
10128 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10129 if (count == -1
10130 || !index
10131 || !TYPE_MAX_VALUE (index)
10132 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10133 || !TYPE_MIN_VALUE (index)
10134 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10135 || count < 0)
10136 return -1;
10137
10138 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10139 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10140
10141 /* There must be no padding. */
10142 if (wi::to_wide (TYPE_SIZE (type))
10143 != count * GET_MODE_BITSIZE (*modep))
10144 return -1;
10145
10146 return count;
10147 }
10148
10149 case RECORD_TYPE:
10150 {
10151 int count = 0;
10152 int sub_count;
10153 tree field;
10154
10155 /* Can't handle incomplete types nor sizes that are not
10156 fixed. */
10157 if (!COMPLETE_TYPE_P (type)
10158 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10159 return -1;
10160
10161 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10162 {
10163 if (TREE_CODE (field) != FIELD_DECL)
10164 continue;
10165
10166 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10167 if (sub_count < 0)
10168 return -1;
10169 count += sub_count;
10170 }
10171
10172 /* There must be no padding. */
10173 if (wi::to_wide (TYPE_SIZE (type))
10174 != count * GET_MODE_BITSIZE (*modep))
10175 return -1;
10176
10177 return count;
10178 }
10179
10180 case UNION_TYPE:
10181 case QUAL_UNION_TYPE:
10182 {
10183 /* These aren't very interesting except in a degenerate case. */
10184 int count = 0;
10185 int sub_count;
10186 tree field;
10187
10188 /* Can't handle incomplete types nor sizes that are not
10189 fixed. */
10190 if (!COMPLETE_TYPE_P (type)
10191 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10192 return -1;
10193
10194 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10195 {
10196 if (TREE_CODE (field) != FIELD_DECL)
10197 continue;
10198
10199 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10200 if (sub_count < 0)
10201 return -1;
10202 count = count > sub_count ? count : sub_count;
10203 }
10204
10205 /* There must be no padding. */
10206 if (wi::to_wide (TYPE_SIZE (type))
10207 != count * GET_MODE_BITSIZE (*modep))
10208 return -1;
10209
10210 return count;
10211 }
10212
10213 default:
10214 break;
10215 }
10216
10217 return -1;
10218 }
10219
10220 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10221 float or vector aggregate that shall be passed in FP/vector registers
10222 according to the ELFv2 ABI, return the homogeneous element mode in
10223 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10224
10225 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10226
10227 static bool
10228 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10229 machine_mode *elt_mode,
10230 int *n_elts)
10231 {
10232 /* Note that we do not accept complex types at the top level as
10233 homogeneous aggregates; these types are handled via the
10234 targetm.calls.split_complex_arg mechanism. Complex types
10235 can be elements of homogeneous aggregates, however. */
10236 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10237 && AGGREGATE_TYPE_P (type))
10238 {
10239 machine_mode field_mode = VOIDmode;
10240 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10241
10242 if (field_count > 0)
10243 {
10244 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10245 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10246
10247 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10248 up to AGGR_ARG_NUM_REG registers. */
10249 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10250 {
10251 if (elt_mode)
10252 *elt_mode = field_mode;
10253 if (n_elts)
10254 *n_elts = field_count;
10255 return true;
10256 }
10257 }
10258 }
10259
10260 if (elt_mode)
10261 *elt_mode = mode;
10262 if (n_elts)
10263 *n_elts = 1;
10264 return false;
10265 }
10266
10267 /* Return a nonzero value to say to return the function value in
10268 memory, just as large structures are always returned. TYPE will be
10269 the data type of the value, and FNTYPE will be the type of the
10270 function doing the returning, or @code{NULL} for libcalls.
10271
10272 The AIX ABI for the RS/6000 specifies that all structures are
10273 returned in memory. The Darwin ABI does the same.
10274
10275 For the Darwin 64 Bit ABI, a function result can be returned in
10276 registers or in memory, depending on the size of the return data
10277 type. If it is returned in registers, the value occupies the same
10278 registers as it would if it were the first and only function
10279 argument. Otherwise, the function places its result in memory at
10280 the location pointed to by GPR3.
10281
10282 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10283 but a draft put them in memory, and GCC used to implement the draft
10284 instead of the final standard. Therefore, aix_struct_return
10285 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10286 compatibility can change DRAFT_V4_STRUCT_RET to override the
10287 default, and -m switches get the final word. See
10288 rs6000_option_override_internal for more details.
10289
10290 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10291 long double support is enabled. These values are returned in memory.
10292
10293 int_size_in_bytes returns -1 for variable size objects, which go in
10294 memory always. The cast to unsigned makes -1 > 8. */
10295
10296 static bool
10297 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10298 {
10299 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10300 if (TARGET_MACHO
10301 && rs6000_darwin64_abi
10302 && TREE_CODE (type) == RECORD_TYPE
10303 && int_size_in_bytes (type) > 0)
10304 {
10305 CUMULATIVE_ARGS valcum;
10306 rtx valret;
10307
10308 valcum.words = 0;
10309 valcum.fregno = FP_ARG_MIN_REG;
10310 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10311 /* Do a trial code generation as if this were going to be passed
10312 as an argument; if any part goes in memory, we return NULL. */
10313 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10314 if (valret)
10315 return false;
10316 /* Otherwise fall through to more conventional ABI rules. */
10317 }
10318
10319 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10320 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10321 NULL, NULL))
10322 return false;
10323
10324 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10325 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10326 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10327 return false;
10328
10329 if (AGGREGATE_TYPE_P (type)
10330 && (aix_struct_return
10331 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10332 return true;
10333
10334 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10335 modes only exist for GCC vector types if -maltivec. */
10336 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10337 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10338 return false;
10339
10340 /* Return synthetic vectors in memory. */
10341 if (TREE_CODE (type) == VECTOR_TYPE
10342 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10343 {
10344 static bool warned_for_return_big_vectors = false;
10345 if (!warned_for_return_big_vectors)
10346 {
10347 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10348 "non-standard ABI extension with no compatibility "
10349 "guarantee");
10350 warned_for_return_big_vectors = true;
10351 }
10352 return true;
10353 }
10354
10355 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10356 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10357 return true;
10358
10359 return false;
10360 }
10361
10362 /* Specify whether values returned in registers should be at the most
10363 significant end of a register. We want aggregates returned by
10364 value to match the way aggregates are passed to functions. */
10365
10366 static bool
10367 rs6000_return_in_msb (const_tree valtype)
10368 {
10369 return (DEFAULT_ABI == ABI_ELFv2
10370 && BYTES_BIG_ENDIAN
10371 && AGGREGATE_TYPE_P (valtype)
10372 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10373 == PAD_UPWARD));
10374 }
10375
10376 #ifdef HAVE_AS_GNU_ATTRIBUTE
10377 /* Return TRUE if a call to function FNDECL may be one that
10378 potentially affects the function calling ABI of the object file. */
10379
10380 static bool
10381 call_ABI_of_interest (tree fndecl)
10382 {
10383 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10384 {
10385 struct cgraph_node *c_node;
10386
10387 /* Libcalls are always interesting. */
10388 if (fndecl == NULL_TREE)
10389 return true;
10390
10391 /* Any call to an external function is interesting. */
10392 if (DECL_EXTERNAL (fndecl))
10393 return true;
10394
10395 /* Interesting functions that we are emitting in this object file. */
10396 c_node = cgraph_node::get (fndecl);
10397 c_node = c_node->ultimate_alias_target ();
10398 return !c_node->only_called_directly_p ();
10399 }
10400 return false;
10401 }
10402 #endif
10403
10404 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10405 for a call to a function whose data type is FNTYPE.
10406 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10407
10408 For incoming args we set the number of arguments in the prototype large
10409 so we never return a PARALLEL. */
10410
10411 void
10412 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10413 rtx libname ATTRIBUTE_UNUSED, int incoming,
10414 int libcall, int n_named_args,
10415 tree fndecl,
10416 machine_mode return_mode ATTRIBUTE_UNUSED)
10417 {
10418 static CUMULATIVE_ARGS zero_cumulative;
10419
10420 *cum = zero_cumulative;
10421 cum->words = 0;
10422 cum->fregno = FP_ARG_MIN_REG;
10423 cum->vregno = ALTIVEC_ARG_MIN_REG;
10424 cum->prototype = (fntype && prototype_p (fntype));
10425 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10426 ? CALL_LIBCALL : CALL_NORMAL);
10427 cum->sysv_gregno = GP_ARG_MIN_REG;
10428 cum->stdarg = stdarg_p (fntype);
10429 cum->libcall = libcall;
10430
10431 cum->nargs_prototype = 0;
10432 if (incoming || cum->prototype)
10433 cum->nargs_prototype = n_named_args;
10434
10435 /* Check for a longcall attribute. */
10436 if ((!fntype && rs6000_default_long_calls)
10437 || (fntype
10438 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10439 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10440 cum->call_cookie |= CALL_LONG;
10441 else if (DEFAULT_ABI != ABI_DARWIN)
10442 {
10443 bool is_local = (fndecl
10444 && !DECL_EXTERNAL (fndecl)
10445 && !DECL_WEAK (fndecl)
10446 && (*targetm.binds_local_p) (fndecl));
10447 if (is_local)
10448 ;
10449 else if (flag_plt)
10450 {
10451 if (fntype
10452 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10453 cum->call_cookie |= CALL_LONG;
10454 }
10455 else
10456 {
10457 if (!(fntype
10458 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10459 cum->call_cookie |= CALL_LONG;
10460 }
10461 }
10462
10463 if (TARGET_DEBUG_ARG)
10464 {
10465 fprintf (stderr, "\ninit_cumulative_args:");
10466 if (fntype)
10467 {
10468 tree ret_type = TREE_TYPE (fntype);
10469 fprintf (stderr, " ret code = %s,",
10470 get_tree_code_name (TREE_CODE (ret_type)));
10471 }
10472
10473 if (cum->call_cookie & CALL_LONG)
10474 fprintf (stderr, " longcall,");
10475
10476 fprintf (stderr, " proto = %d, nargs = %d\n",
10477 cum->prototype, cum->nargs_prototype);
10478 }
10479
10480 #ifdef HAVE_AS_GNU_ATTRIBUTE
10481 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10482 {
10483 cum->escapes = call_ABI_of_interest (fndecl);
10484 if (cum->escapes)
10485 {
10486 tree return_type;
10487
10488 if (fntype)
10489 {
10490 return_type = TREE_TYPE (fntype);
10491 return_mode = TYPE_MODE (return_type);
10492 }
10493 else
10494 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10495
10496 if (return_type != NULL)
10497 {
10498 if (TREE_CODE (return_type) == RECORD_TYPE
10499 && TYPE_TRANSPARENT_AGGR (return_type))
10500 {
10501 return_type = TREE_TYPE (first_field (return_type));
10502 return_mode = TYPE_MODE (return_type);
10503 }
10504 if (AGGREGATE_TYPE_P (return_type)
10505 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10506 <= 8))
10507 rs6000_returns_struct = true;
10508 }
10509 if (SCALAR_FLOAT_MODE_P (return_mode))
10510 {
10511 rs6000_passes_float = true;
10512 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10513 && (FLOAT128_IBM_P (return_mode)
10514 || FLOAT128_IEEE_P (return_mode)
10515 || (return_type != NULL
10516 && (TYPE_MAIN_VARIANT (return_type)
10517 == long_double_type_node))))
10518 rs6000_passes_long_double = true;
10519
10520 /* Note if we passed or return a IEEE 128-bit type. We changed
10521 the mangling for these types, and we may need to make an alias
10522 with the old mangling. */
10523 if (FLOAT128_IEEE_P (return_mode))
10524 rs6000_passes_ieee128 = true;
10525 }
10526 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10527 rs6000_passes_vector = true;
10528 }
10529 }
10530 #endif
10531
10532 if (fntype
10533 && !TARGET_ALTIVEC
10534 && TARGET_ALTIVEC_ABI
10535 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10536 {
10537 error ("cannot return value in vector register because"
10538 " altivec instructions are disabled, use %qs"
10539 " to enable them", "-maltivec");
10540 }
10541 }
10542 \f
10543 /* The mode the ABI uses for a word. This is not the same as word_mode
10544 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10545
10546 static scalar_int_mode
10547 rs6000_abi_word_mode (void)
10548 {
10549 return TARGET_32BIT ? SImode : DImode;
10550 }
10551
10552 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10553 static char *
10554 rs6000_offload_options (void)
10555 {
10556 if (TARGET_64BIT)
10557 return xstrdup ("-foffload-abi=lp64");
10558 else
10559 return xstrdup ("-foffload-abi=ilp32");
10560 }
10561
10562 /* On rs6000, function arguments are promoted, as are function return
10563 values. */
10564
10565 static machine_mode
10566 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10567 machine_mode mode,
10568 int *punsignedp ATTRIBUTE_UNUSED,
10569 const_tree, int)
10570 {
10571 PROMOTE_MODE (mode, *punsignedp, type);
10572
10573 return mode;
10574 }
10575
10576 /* Return true if TYPE must be passed on the stack and not in registers. */
10577
10578 static bool
10579 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10580 {
10581 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10582 return must_pass_in_stack_var_size (mode, type);
10583 else
10584 return must_pass_in_stack_var_size_or_pad (mode, type);
10585 }
10586
10587 static inline bool
10588 is_complex_IBM_long_double (machine_mode mode)
10589 {
10590 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10591 }
10592
10593 /* Whether ABI_V4 passes MODE args to a function in floating point
10594 registers. */
10595
10596 static bool
10597 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10598 {
10599 if (!TARGET_HARD_FLOAT)
10600 return false;
10601 if (mode == DFmode)
10602 return true;
10603 if (mode == SFmode && named)
10604 return true;
10605 /* ABI_V4 passes complex IBM long double in 8 gprs.
10606 Stupid, but we can't change the ABI now. */
10607 if (is_complex_IBM_long_double (mode))
10608 return false;
10609 if (FLOAT128_2REG_P (mode))
10610 return true;
10611 if (DECIMAL_FLOAT_MODE_P (mode))
10612 return true;
10613 return false;
10614 }
10615
10616 /* Implement TARGET_FUNCTION_ARG_PADDING.
10617
10618 For the AIX ABI structs are always stored left shifted in their
10619 argument slot. */
10620
10621 static pad_direction
10622 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10623 {
10624 #ifndef AGGREGATE_PADDING_FIXED
10625 #define AGGREGATE_PADDING_FIXED 0
10626 #endif
10627 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10628 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10629 #endif
10630
10631 if (!AGGREGATE_PADDING_FIXED)
10632 {
10633 /* GCC used to pass structures of the same size as integer types as
10634 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10635 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10636 passed padded downward, except that -mstrict-align further
10637 muddied the water in that multi-component structures of 2 and 4
10638 bytes in size were passed padded upward.
10639
10640 The following arranges for best compatibility with previous
10641 versions of gcc, but removes the -mstrict-align dependency. */
10642 if (BYTES_BIG_ENDIAN)
10643 {
10644 HOST_WIDE_INT size = 0;
10645
10646 if (mode == BLKmode)
10647 {
10648 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10649 size = int_size_in_bytes (type);
10650 }
10651 else
10652 size = GET_MODE_SIZE (mode);
10653
10654 if (size == 1 || size == 2 || size == 4)
10655 return PAD_DOWNWARD;
10656 }
10657 return PAD_UPWARD;
10658 }
10659
10660 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10661 {
10662 if (type != 0 && AGGREGATE_TYPE_P (type))
10663 return PAD_UPWARD;
10664 }
10665
10666 /* Fall back to the default. */
10667 return default_function_arg_padding (mode, type);
10668 }
10669
10670 /* If defined, a C expression that gives the alignment boundary, in bits,
10671 of an argument with the specified mode and type. If it is not defined,
10672 PARM_BOUNDARY is used for all arguments.
10673
10674 V.4 wants long longs and doubles to be double word aligned. Just
10675 testing the mode size is a boneheaded way to do this as it means
10676 that other types such as complex int are also double word aligned.
10677 However, we're stuck with this because changing the ABI might break
10678 existing library interfaces.
10679
10680 Quadword align Altivec/VSX vectors.
10681 Quadword align large synthetic vector types. */
10682
10683 static unsigned int
10684 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
10685 {
10686 machine_mode elt_mode;
10687 int n_elts;
10688
10689 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10690
10691 if (DEFAULT_ABI == ABI_V4
10692 && (GET_MODE_SIZE (mode) == 8
10693 || (TARGET_HARD_FLOAT
10694 && !is_complex_IBM_long_double (mode)
10695 && FLOAT128_2REG_P (mode))))
10696 return 64;
10697 else if (FLOAT128_VECTOR_P (mode))
10698 return 128;
10699 else if (type && TREE_CODE (type) == VECTOR_TYPE
10700 && int_size_in_bytes (type) >= 8
10701 && int_size_in_bytes (type) < 16)
10702 return 64;
10703 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10704 || (type && TREE_CODE (type) == VECTOR_TYPE
10705 && int_size_in_bytes (type) >= 16))
10706 return 128;
10707
10708 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10709 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10710 -mcompat-align-parm is used. */
10711 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
10712 || DEFAULT_ABI == ABI_ELFv2)
10713 && type && TYPE_ALIGN (type) > 64)
10714 {
10715 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
10716 or homogeneous float/vector aggregates here. We already handled
10717 vector aggregates above, but still need to check for float here. */
10718 bool aggregate_p = (AGGREGATE_TYPE_P (type)
10719 && !SCALAR_FLOAT_MODE_P (elt_mode));
10720
10721 /* We used to check for BLKmode instead of the above aggregate type
10722 check. Warn when this results in any difference to the ABI. */
10723 if (aggregate_p != (mode == BLKmode))
10724 {
10725 static bool warned;
10726 if (!warned && warn_psabi)
10727 {
10728 warned = true;
10729 inform (input_location,
10730 "the ABI of passing aggregates with %d-byte alignment"
10731 " has changed in GCC 5",
10732 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
10733 }
10734 }
10735
10736 if (aggregate_p)
10737 return 128;
10738 }
10739
10740 /* Similar for the Darwin64 ABI. Note that for historical reasons we
10741 implement the "aggregate type" check as a BLKmode check here; this
10742 means certain aggregate types are in fact not aligned. */
10743 if (TARGET_MACHO && rs6000_darwin64_abi
10744 && mode == BLKmode
10745 && type && TYPE_ALIGN (type) > 64)
10746 return 128;
10747
10748 return PARM_BOUNDARY;
10749 }
10750
10751 /* The offset in words to the start of the parameter save area. */
10752
10753 static unsigned int
10754 rs6000_parm_offset (void)
10755 {
10756 return (DEFAULT_ABI == ABI_V4 ? 2
10757 : DEFAULT_ABI == ABI_ELFv2 ? 4
10758 : 6);
10759 }
10760
10761 /* For a function parm of MODE and TYPE, return the starting word in
10762 the parameter area. NWORDS of the parameter area are already used. */
10763
10764 static unsigned int
10765 rs6000_parm_start (machine_mode mode, const_tree type,
10766 unsigned int nwords)
10767 {
10768 unsigned int align;
10769
10770 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
10771 return nwords + (-(rs6000_parm_offset () + nwords) & align);
10772 }
10773
10774 /* Compute the size (in words) of a function argument. */
10775
10776 static unsigned long
10777 rs6000_arg_size (machine_mode mode, const_tree type)
10778 {
10779 unsigned long size;
10780
10781 if (mode != BLKmode)
10782 size = GET_MODE_SIZE (mode);
10783 else
10784 size = int_size_in_bytes (type);
10785
10786 if (TARGET_32BIT)
10787 return (size + 3) >> 2;
10788 else
10789 return (size + 7) >> 3;
10790 }
10791 \f
10792 /* Use this to flush pending int fields. */
10793
10794 static void
10795 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
10796 HOST_WIDE_INT bitpos, int final)
10797 {
10798 unsigned int startbit, endbit;
10799 int intregs, intoffset;
10800
10801 /* Handle the situations where a float is taking up the first half
10802 of the GPR, and the other half is empty (typically due to
10803 alignment restrictions). We can detect this by a 8-byte-aligned
10804 int field, or by seeing that this is the final flush for this
10805 argument. Count the word and continue on. */
10806 if (cum->floats_in_gpr == 1
10807 && (cum->intoffset % 64 == 0
10808 || (cum->intoffset == -1 && final)))
10809 {
10810 cum->words++;
10811 cum->floats_in_gpr = 0;
10812 }
10813
10814 if (cum->intoffset == -1)
10815 return;
10816
10817 intoffset = cum->intoffset;
10818 cum->intoffset = -1;
10819 cum->floats_in_gpr = 0;
10820
10821 if (intoffset % BITS_PER_WORD != 0)
10822 {
10823 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
10824 if (!int_mode_for_size (bits, 0).exists ())
10825 {
10826 /* We couldn't find an appropriate mode, which happens,
10827 e.g., in packed structs when there are 3 bytes to load.
10828 Back intoffset back to the beginning of the word in this
10829 case. */
10830 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
10831 }
10832 }
10833
10834 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
10835 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
10836 intregs = (endbit - startbit) / BITS_PER_WORD;
10837 cum->words += intregs;
10838 /* words should be unsigned. */
10839 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
10840 {
10841 int pad = (endbit/BITS_PER_WORD) - cum->words;
10842 cum->words += pad;
10843 }
10844 }
10845
10846 /* The darwin64 ABI calls for us to recurse down through structs,
10847 looking for elements passed in registers. Unfortunately, we have
10848 to track int register count here also because of misalignments
10849 in powerpc alignment mode. */
10850
10851 static void
10852 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
10853 const_tree type,
10854 HOST_WIDE_INT startbitpos)
10855 {
10856 tree f;
10857
10858 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
10859 if (TREE_CODE (f) == FIELD_DECL)
10860 {
10861 HOST_WIDE_INT bitpos = startbitpos;
10862 tree ftype = TREE_TYPE (f);
10863 machine_mode mode;
10864 if (ftype == error_mark_node)
10865 continue;
10866 mode = TYPE_MODE (ftype);
10867
10868 if (DECL_SIZE (f) != 0
10869 && tree_fits_uhwi_p (bit_position (f)))
10870 bitpos += int_bit_position (f);
10871
10872 /* ??? FIXME: else assume zero offset. */
10873
10874 if (TREE_CODE (ftype) == RECORD_TYPE)
10875 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
10876 else if (USE_FP_FOR_ARG_P (cum, mode))
10877 {
10878 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
10879 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10880 cum->fregno += n_fpregs;
10881 /* Single-precision floats present a special problem for
10882 us, because they are smaller than an 8-byte GPR, and so
10883 the structure-packing rules combined with the standard
10884 varargs behavior mean that we want to pack float/float
10885 and float/int combinations into a single register's
10886 space. This is complicated by the arg advance flushing,
10887 which works on arbitrarily large groups of int-type
10888 fields. */
10889 if (mode == SFmode)
10890 {
10891 if (cum->floats_in_gpr == 1)
10892 {
10893 /* Two floats in a word; count the word and reset
10894 the float count. */
10895 cum->words++;
10896 cum->floats_in_gpr = 0;
10897 }
10898 else if (bitpos % 64 == 0)
10899 {
10900 /* A float at the beginning of an 8-byte word;
10901 count it and put off adjusting cum->words until
10902 we see if a arg advance flush is going to do it
10903 for us. */
10904 cum->floats_in_gpr++;
10905 }
10906 else
10907 {
10908 /* The float is at the end of a word, preceded
10909 by integer fields, so the arg advance flush
10910 just above has already set cum->words and
10911 everything is taken care of. */
10912 }
10913 }
10914 else
10915 cum->words += n_fpregs;
10916 }
10917 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
10918 {
10919 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10920 cum->vregno++;
10921 cum->words += 2;
10922 }
10923 else if (cum->intoffset == -1)
10924 cum->intoffset = bitpos;
10925 }
10926 }
10927
10928 /* Check for an item that needs to be considered specially under the darwin 64
10929 bit ABI. These are record types where the mode is BLK or the structure is
10930 8 bytes in size. */
10931 static int
10932 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
10933 {
10934 return rs6000_darwin64_abi
10935 && ((mode == BLKmode
10936 && TREE_CODE (type) == RECORD_TYPE
10937 && int_size_in_bytes (type) > 0)
10938 || (type && TREE_CODE (type) == RECORD_TYPE
10939 && int_size_in_bytes (type) == 8)) ? 1 : 0;
10940 }
10941
10942 /* Update the data in CUM to advance over an argument
10943 of mode MODE and data type TYPE.
10944 (TYPE is null for libcalls where that information may not be available.)
10945
10946 Note that for args passed by reference, function_arg will be called
10947 with MODE and TYPE set to that of the pointer to the arg, not the arg
10948 itself. */
10949
10950 static void
10951 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
10952 const_tree type, bool named, int depth)
10953 {
10954 machine_mode elt_mode;
10955 int n_elts;
10956
10957 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10958
10959 /* Only tick off an argument if we're not recursing. */
10960 if (depth == 0)
10961 cum->nargs_prototype--;
10962
10963 #ifdef HAVE_AS_GNU_ATTRIBUTE
10964 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
10965 && cum->escapes)
10966 {
10967 if (SCALAR_FLOAT_MODE_P (mode))
10968 {
10969 rs6000_passes_float = true;
10970 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10971 && (FLOAT128_IBM_P (mode)
10972 || FLOAT128_IEEE_P (mode)
10973 || (type != NULL
10974 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
10975 rs6000_passes_long_double = true;
10976
10977 /* Note if we passed or return a IEEE 128-bit type. We changed the
10978 mangling for these types, and we may need to make an alias with
10979 the old mangling. */
10980 if (FLOAT128_IEEE_P (mode))
10981 rs6000_passes_ieee128 = true;
10982 }
10983 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
10984 rs6000_passes_vector = true;
10985 }
10986 #endif
10987
10988 if (TARGET_ALTIVEC_ABI
10989 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10990 || (type && TREE_CODE (type) == VECTOR_TYPE
10991 && int_size_in_bytes (type) == 16)))
10992 {
10993 bool stack = false;
10994
10995 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10996 {
10997 cum->vregno += n_elts;
10998
10999 if (!TARGET_ALTIVEC)
11000 error ("cannot pass argument in vector register because"
11001 " altivec instructions are disabled, use %qs"
11002 " to enable them", "-maltivec");
11003
11004 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11005 even if it is going to be passed in a vector register.
11006 Darwin does the same for variable-argument functions. */
11007 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11008 && TARGET_64BIT)
11009 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11010 stack = true;
11011 }
11012 else
11013 stack = true;
11014
11015 if (stack)
11016 {
11017 int align;
11018
11019 /* Vector parameters must be 16-byte aligned. In 32-bit
11020 mode this means we need to take into account the offset
11021 to the parameter save area. In 64-bit mode, they just
11022 have to start on an even word, since the parameter save
11023 area is 16-byte aligned. */
11024 if (TARGET_32BIT)
11025 align = -(rs6000_parm_offset () + cum->words) & 3;
11026 else
11027 align = cum->words & 1;
11028 cum->words += align + rs6000_arg_size (mode, type);
11029
11030 if (TARGET_DEBUG_ARG)
11031 {
11032 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11033 cum->words, align);
11034 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11035 cum->nargs_prototype, cum->prototype,
11036 GET_MODE_NAME (mode));
11037 }
11038 }
11039 }
11040 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11041 {
11042 int size = int_size_in_bytes (type);
11043 /* Variable sized types have size == -1 and are
11044 treated as if consisting entirely of ints.
11045 Pad to 16 byte boundary if needed. */
11046 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11047 && (cum->words % 2) != 0)
11048 cum->words++;
11049 /* For varargs, we can just go up by the size of the struct. */
11050 if (!named)
11051 cum->words += (size + 7) / 8;
11052 else
11053 {
11054 /* It is tempting to say int register count just goes up by
11055 sizeof(type)/8, but this is wrong in a case such as
11056 { int; double; int; } [powerpc alignment]. We have to
11057 grovel through the fields for these too. */
11058 cum->intoffset = 0;
11059 cum->floats_in_gpr = 0;
11060 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11061 rs6000_darwin64_record_arg_advance_flush (cum,
11062 size * BITS_PER_UNIT, 1);
11063 }
11064 if (TARGET_DEBUG_ARG)
11065 {
11066 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11067 cum->words, TYPE_ALIGN (type), size);
11068 fprintf (stderr,
11069 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11070 cum->nargs_prototype, cum->prototype,
11071 GET_MODE_NAME (mode));
11072 }
11073 }
11074 else if (DEFAULT_ABI == ABI_V4)
11075 {
11076 if (abi_v4_pass_in_fpr (mode, named))
11077 {
11078 /* _Decimal128 must use an even/odd register pair. This assumes
11079 that the register number is odd when fregno is odd. */
11080 if (mode == TDmode && (cum->fregno % 2) == 1)
11081 cum->fregno++;
11082
11083 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11084 <= FP_ARG_V4_MAX_REG)
11085 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11086 else
11087 {
11088 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11089 if (mode == DFmode || FLOAT128_IBM_P (mode)
11090 || mode == DDmode || mode == TDmode)
11091 cum->words += cum->words & 1;
11092 cum->words += rs6000_arg_size (mode, type);
11093 }
11094 }
11095 else
11096 {
11097 int n_words = rs6000_arg_size (mode, type);
11098 int gregno = cum->sysv_gregno;
11099
11100 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11101 As does any other 2 word item such as complex int due to a
11102 historical mistake. */
11103 if (n_words == 2)
11104 gregno += (1 - gregno) & 1;
11105
11106 /* Multi-reg args are not split between registers and stack. */
11107 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11108 {
11109 /* Long long is aligned on the stack. So are other 2 word
11110 items such as complex int due to a historical mistake. */
11111 if (n_words == 2)
11112 cum->words += cum->words & 1;
11113 cum->words += n_words;
11114 }
11115
11116 /* Note: continuing to accumulate gregno past when we've started
11117 spilling to the stack indicates the fact that we've started
11118 spilling to the stack to expand_builtin_saveregs. */
11119 cum->sysv_gregno = gregno + n_words;
11120 }
11121
11122 if (TARGET_DEBUG_ARG)
11123 {
11124 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11125 cum->words, cum->fregno);
11126 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11127 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11128 fprintf (stderr, "mode = %4s, named = %d\n",
11129 GET_MODE_NAME (mode), named);
11130 }
11131 }
11132 else
11133 {
11134 int n_words = rs6000_arg_size (mode, type);
11135 int start_words = cum->words;
11136 int align_words = rs6000_parm_start (mode, type, start_words);
11137
11138 cum->words = align_words + n_words;
11139
11140 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11141 {
11142 /* _Decimal128 must be passed in an even/odd float register pair.
11143 This assumes that the register number is odd when fregno is
11144 odd. */
11145 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11146 cum->fregno++;
11147 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11148 }
11149
11150 if (TARGET_DEBUG_ARG)
11151 {
11152 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11153 cum->words, cum->fregno);
11154 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11155 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11156 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11157 named, align_words - start_words, depth);
11158 }
11159 }
11160 }
11161
11162 static void
11163 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11164 const_tree type, bool named)
11165 {
11166 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11167 0);
11168 }
11169
11170 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11171 structure between cum->intoffset and bitpos to integer registers. */
11172
11173 static void
11174 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11175 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11176 {
11177 machine_mode mode;
11178 unsigned int regno;
11179 unsigned int startbit, endbit;
11180 int this_regno, intregs, intoffset;
11181 rtx reg;
11182
11183 if (cum->intoffset == -1)
11184 return;
11185
11186 intoffset = cum->intoffset;
11187 cum->intoffset = -1;
11188
11189 /* If this is the trailing part of a word, try to only load that
11190 much into the register. Otherwise load the whole register. Note
11191 that in the latter case we may pick up unwanted bits. It's not a
11192 problem at the moment but may wish to revisit. */
11193
11194 if (intoffset % BITS_PER_WORD != 0)
11195 {
11196 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11197 if (!int_mode_for_size (bits, 0).exists (&mode))
11198 {
11199 /* We couldn't find an appropriate mode, which happens,
11200 e.g., in packed structs when there are 3 bytes to load.
11201 Back intoffset back to the beginning of the word in this
11202 case. */
11203 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11204 mode = word_mode;
11205 }
11206 }
11207 else
11208 mode = word_mode;
11209
11210 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11211 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11212 intregs = (endbit - startbit) / BITS_PER_WORD;
11213 this_regno = cum->words + intoffset / BITS_PER_WORD;
11214
11215 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11216 cum->use_stack = 1;
11217
11218 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11219 if (intregs <= 0)
11220 return;
11221
11222 intoffset /= BITS_PER_UNIT;
11223 do
11224 {
11225 regno = GP_ARG_MIN_REG + this_regno;
11226 reg = gen_rtx_REG (mode, regno);
11227 rvec[(*k)++] =
11228 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11229
11230 this_regno += 1;
11231 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11232 mode = word_mode;
11233 intregs -= 1;
11234 }
11235 while (intregs > 0);
11236 }
11237
11238 /* Recursive workhorse for the following. */
11239
11240 static void
11241 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11242 HOST_WIDE_INT startbitpos, rtx rvec[],
11243 int *k)
11244 {
11245 tree f;
11246
11247 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11248 if (TREE_CODE (f) == FIELD_DECL)
11249 {
11250 HOST_WIDE_INT bitpos = startbitpos;
11251 tree ftype = TREE_TYPE (f);
11252 machine_mode mode;
11253 if (ftype == error_mark_node)
11254 continue;
11255 mode = TYPE_MODE (ftype);
11256
11257 if (DECL_SIZE (f) != 0
11258 && tree_fits_uhwi_p (bit_position (f)))
11259 bitpos += int_bit_position (f);
11260
11261 /* ??? FIXME: else assume zero offset. */
11262
11263 if (TREE_CODE (ftype) == RECORD_TYPE)
11264 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11265 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11266 {
11267 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11268 #if 0
11269 switch (mode)
11270 {
11271 case E_SCmode: mode = SFmode; break;
11272 case E_DCmode: mode = DFmode; break;
11273 case E_TCmode: mode = TFmode; break;
11274 default: break;
11275 }
11276 #endif
11277 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11278 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11279 {
11280 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11281 && (mode == TFmode || mode == TDmode));
11282 /* Long double or _Decimal128 split over regs and memory. */
11283 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11284 cum->use_stack=1;
11285 }
11286 rvec[(*k)++]
11287 = gen_rtx_EXPR_LIST (VOIDmode,
11288 gen_rtx_REG (mode, cum->fregno++),
11289 GEN_INT (bitpos / BITS_PER_UNIT));
11290 if (FLOAT128_2REG_P (mode))
11291 cum->fregno++;
11292 }
11293 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11294 {
11295 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11296 rvec[(*k)++]
11297 = gen_rtx_EXPR_LIST (VOIDmode,
11298 gen_rtx_REG (mode, cum->vregno++),
11299 GEN_INT (bitpos / BITS_PER_UNIT));
11300 }
11301 else if (cum->intoffset == -1)
11302 cum->intoffset = bitpos;
11303 }
11304 }
11305
11306 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11307 the register(s) to be used for each field and subfield of a struct
11308 being passed by value, along with the offset of where the
11309 register's value may be found in the block. FP fields go in FP
11310 register, vector fields go in vector registers, and everything
11311 else goes in int registers, packed as in memory.
11312
11313 This code is also used for function return values. RETVAL indicates
11314 whether this is the case.
11315
11316 Much of this is taken from the SPARC V9 port, which has a similar
11317 calling convention. */
11318
11319 static rtx
11320 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11321 bool named, bool retval)
11322 {
11323 rtx rvec[FIRST_PSEUDO_REGISTER];
11324 int k = 1, kbase = 1;
11325 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11326 /* This is a copy; modifications are not visible to our caller. */
11327 CUMULATIVE_ARGS copy_cum = *orig_cum;
11328 CUMULATIVE_ARGS *cum = &copy_cum;
11329
11330 /* Pad to 16 byte boundary if needed. */
11331 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11332 && (cum->words % 2) != 0)
11333 cum->words++;
11334
11335 cum->intoffset = 0;
11336 cum->use_stack = 0;
11337 cum->named = named;
11338
11339 /* Put entries into rvec[] for individual FP and vector fields, and
11340 for the chunks of memory that go in int regs. Note we start at
11341 element 1; 0 is reserved for an indication of using memory, and
11342 may or may not be filled in below. */
11343 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11344 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11345
11346 /* If any part of the struct went on the stack put all of it there.
11347 This hack is because the generic code for
11348 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11349 parts of the struct are not at the beginning. */
11350 if (cum->use_stack)
11351 {
11352 if (retval)
11353 return NULL_RTX; /* doesn't go in registers at all */
11354 kbase = 0;
11355 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11356 }
11357 if (k > 1 || cum->use_stack)
11358 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11359 else
11360 return NULL_RTX;
11361 }
11362
11363 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11364
11365 static rtx
11366 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11367 int align_words)
11368 {
11369 int n_units;
11370 int i, k;
11371 rtx rvec[GP_ARG_NUM_REG + 1];
11372
11373 if (align_words >= GP_ARG_NUM_REG)
11374 return NULL_RTX;
11375
11376 n_units = rs6000_arg_size (mode, type);
11377
11378 /* Optimize the simple case where the arg fits in one gpr, except in
11379 the case of BLKmode due to assign_parms assuming that registers are
11380 BITS_PER_WORD wide. */
11381 if (n_units == 0
11382 || (n_units == 1 && mode != BLKmode))
11383 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11384
11385 k = 0;
11386 if (align_words + n_units > GP_ARG_NUM_REG)
11387 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11388 using a magic NULL_RTX component.
11389 This is not strictly correct. Only some of the arg belongs in
11390 memory, not all of it. However, the normal scheme using
11391 function_arg_partial_nregs can result in unusual subregs, eg.
11392 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11393 store the whole arg to memory is often more efficient than code
11394 to store pieces, and we know that space is available in the right
11395 place for the whole arg. */
11396 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11397
11398 i = 0;
11399 do
11400 {
11401 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11402 rtx off = GEN_INT (i++ * 4);
11403 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11404 }
11405 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11406
11407 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11408 }
11409
11410 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11411 but must also be copied into the parameter save area starting at
11412 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11413 to the GPRs and/or memory. Return the number of elements used. */
11414
11415 static int
11416 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11417 int align_words, rtx *rvec)
11418 {
11419 int k = 0;
11420
11421 if (align_words < GP_ARG_NUM_REG)
11422 {
11423 int n_words = rs6000_arg_size (mode, type);
11424
11425 if (align_words + n_words > GP_ARG_NUM_REG
11426 || mode == BLKmode
11427 || (TARGET_32BIT && TARGET_POWERPC64))
11428 {
11429 /* If this is partially on the stack, then we only
11430 include the portion actually in registers here. */
11431 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11432 int i = 0;
11433
11434 if (align_words + n_words > GP_ARG_NUM_REG)
11435 {
11436 /* Not all of the arg fits in gprs. Say that it goes in memory
11437 too, using a magic NULL_RTX component. Also see comment in
11438 rs6000_mixed_function_arg for why the normal
11439 function_arg_partial_nregs scheme doesn't work in this case. */
11440 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11441 }
11442
11443 do
11444 {
11445 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11446 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11447 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11448 }
11449 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11450 }
11451 else
11452 {
11453 /* The whole arg fits in gprs. */
11454 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11455 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11456 }
11457 }
11458 else
11459 {
11460 /* It's entirely in memory. */
11461 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11462 }
11463
11464 return k;
11465 }
11466
11467 /* RVEC is a vector of K components of an argument of mode MODE.
11468 Construct the final function_arg return value from it. */
11469
11470 static rtx
11471 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11472 {
11473 gcc_assert (k >= 1);
11474
11475 /* Avoid returning a PARALLEL in the trivial cases. */
11476 if (k == 1)
11477 {
11478 if (XEXP (rvec[0], 0) == NULL_RTX)
11479 return NULL_RTX;
11480
11481 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11482 return XEXP (rvec[0], 0);
11483 }
11484
11485 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11486 }
11487
11488 /* Determine where to put an argument to a function.
11489 Value is zero to push the argument on the stack,
11490 or a hard register in which to store the argument.
11491
11492 MODE is the argument's machine mode.
11493 TYPE is the data type of the argument (as a tree).
11494 This is null for libcalls where that information may
11495 not be available.
11496 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11497 the preceding args and about the function being called. It is
11498 not modified in this routine.
11499 NAMED is nonzero if this argument is a named parameter
11500 (otherwise it is an extra parameter matching an ellipsis).
11501
11502 On RS/6000 the first eight words of non-FP are normally in registers
11503 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11504 Under V.4, the first 8 FP args are in registers.
11505
11506 If this is floating-point and no prototype is specified, we use
11507 both an FP and integer register (or possibly FP reg and stack). Library
11508 functions (when CALL_LIBCALL is set) always have the proper types for args,
11509 so we can pass the FP value just in one register. emit_library_function
11510 doesn't support PARALLEL anyway.
11511
11512 Note that for args passed by reference, function_arg will be called
11513 with MODE and TYPE set to that of the pointer to the arg, not the arg
11514 itself. */
11515
11516 static rtx
11517 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11518 const_tree type, bool named)
11519 {
11520 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11521 enum rs6000_abi abi = DEFAULT_ABI;
11522 machine_mode elt_mode;
11523 int n_elts;
11524
11525 /* Return a marker to indicate whether CR1 needs to set or clear the
11526 bit that V.4 uses to say fp args were passed in registers.
11527 Assume that we don't need the marker for software floating point,
11528 or compiler generated library calls. */
11529 if (mode == VOIDmode)
11530 {
11531 if (abi == ABI_V4
11532 && (cum->call_cookie & CALL_LIBCALL) == 0
11533 && (cum->stdarg
11534 || (cum->nargs_prototype < 0
11535 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11536 && TARGET_HARD_FLOAT)
11537 return GEN_INT (cum->call_cookie
11538 | ((cum->fregno == FP_ARG_MIN_REG)
11539 ? CALL_V4_SET_FP_ARGS
11540 : CALL_V4_CLEAR_FP_ARGS));
11541
11542 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11543 }
11544
11545 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11546
11547 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11548 {
11549 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11550 if (rslt != NULL_RTX)
11551 return rslt;
11552 /* Else fall through to usual handling. */
11553 }
11554
11555 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11556 {
11557 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11558 rtx r, off;
11559 int i, k = 0;
11560
11561 /* Do we also need to pass this argument in the parameter save area?
11562 Library support functions for IEEE 128-bit are assumed to not need the
11563 value passed both in GPRs and in vector registers. */
11564 if (TARGET_64BIT && !cum->prototype
11565 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11566 {
11567 int align_words = ROUND_UP (cum->words, 2);
11568 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11569 }
11570
11571 /* Describe where this argument goes in the vector registers. */
11572 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11573 {
11574 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11575 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11576 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11577 }
11578
11579 return rs6000_finish_function_arg (mode, rvec, k);
11580 }
11581 else if (TARGET_ALTIVEC_ABI
11582 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11583 || (type && TREE_CODE (type) == VECTOR_TYPE
11584 && int_size_in_bytes (type) == 16)))
11585 {
11586 if (named || abi == ABI_V4)
11587 return NULL_RTX;
11588 else
11589 {
11590 /* Vector parameters to varargs functions under AIX or Darwin
11591 get passed in memory and possibly also in GPRs. */
11592 int align, align_words, n_words;
11593 machine_mode part_mode;
11594
11595 /* Vector parameters must be 16-byte aligned. In 32-bit
11596 mode this means we need to take into account the offset
11597 to the parameter save area. In 64-bit mode, they just
11598 have to start on an even word, since the parameter save
11599 area is 16-byte aligned. */
11600 if (TARGET_32BIT)
11601 align = -(rs6000_parm_offset () + cum->words) & 3;
11602 else
11603 align = cum->words & 1;
11604 align_words = cum->words + align;
11605
11606 /* Out of registers? Memory, then. */
11607 if (align_words >= GP_ARG_NUM_REG)
11608 return NULL_RTX;
11609
11610 if (TARGET_32BIT && TARGET_POWERPC64)
11611 return rs6000_mixed_function_arg (mode, type, align_words);
11612
11613 /* The vector value goes in GPRs. Only the part of the
11614 value in GPRs is reported here. */
11615 part_mode = mode;
11616 n_words = rs6000_arg_size (mode, type);
11617 if (align_words + n_words > GP_ARG_NUM_REG)
11618 /* Fortunately, there are only two possibilities, the value
11619 is either wholly in GPRs or half in GPRs and half not. */
11620 part_mode = DImode;
11621
11622 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11623 }
11624 }
11625
11626 else if (abi == ABI_V4)
11627 {
11628 if (abi_v4_pass_in_fpr (mode, named))
11629 {
11630 /* _Decimal128 must use an even/odd register pair. This assumes
11631 that the register number is odd when fregno is odd. */
11632 if (mode == TDmode && (cum->fregno % 2) == 1)
11633 cum->fregno++;
11634
11635 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11636 <= FP_ARG_V4_MAX_REG)
11637 return gen_rtx_REG (mode, cum->fregno);
11638 else
11639 return NULL_RTX;
11640 }
11641 else
11642 {
11643 int n_words = rs6000_arg_size (mode, type);
11644 int gregno = cum->sysv_gregno;
11645
11646 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11647 As does any other 2 word item such as complex int due to a
11648 historical mistake. */
11649 if (n_words == 2)
11650 gregno += (1 - gregno) & 1;
11651
11652 /* Multi-reg args are not split between registers and stack. */
11653 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11654 return NULL_RTX;
11655
11656 if (TARGET_32BIT && TARGET_POWERPC64)
11657 return rs6000_mixed_function_arg (mode, type,
11658 gregno - GP_ARG_MIN_REG);
11659 return gen_rtx_REG (mode, gregno);
11660 }
11661 }
11662 else
11663 {
11664 int align_words = rs6000_parm_start (mode, type, cum->words);
11665
11666 /* _Decimal128 must be passed in an even/odd float register pair.
11667 This assumes that the register number is odd when fregno is odd. */
11668 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11669 cum->fregno++;
11670
11671 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11672 && !(TARGET_AIX && !TARGET_ELF
11673 && type != NULL && AGGREGATE_TYPE_P (type)))
11674 {
11675 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11676 rtx r, off;
11677 int i, k = 0;
11678 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11679 int fpr_words;
11680
11681 /* Do we also need to pass this argument in the parameter
11682 save area? */
11683 if (type && (cum->nargs_prototype <= 0
11684 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11685 && TARGET_XL_COMPAT
11686 && align_words >= GP_ARG_NUM_REG)))
11687 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11688
11689 /* Describe where this argument goes in the fprs. */
11690 for (i = 0; i < n_elts
11691 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
11692 {
11693 /* Check if the argument is split over registers and memory.
11694 This can only ever happen for long double or _Decimal128;
11695 complex types are handled via split_complex_arg. */
11696 machine_mode fmode = elt_mode;
11697 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
11698 {
11699 gcc_assert (FLOAT128_2REG_P (fmode));
11700 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
11701 }
11702
11703 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
11704 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11705 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11706 }
11707
11708 /* If there were not enough FPRs to hold the argument, the rest
11709 usually goes into memory. However, if the current position
11710 is still within the register parameter area, a portion may
11711 actually have to go into GPRs.
11712
11713 Note that it may happen that the portion of the argument
11714 passed in the first "half" of the first GPR was already
11715 passed in the last FPR as well.
11716
11717 For unnamed arguments, we already set up GPRs to cover the
11718 whole argument in rs6000_psave_function_arg, so there is
11719 nothing further to do at this point. */
11720 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
11721 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
11722 && cum->nargs_prototype > 0)
11723 {
11724 static bool warned;
11725
11726 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11727 int n_words = rs6000_arg_size (mode, type);
11728
11729 align_words += fpr_words;
11730 n_words -= fpr_words;
11731
11732 do
11733 {
11734 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11735 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
11736 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11737 }
11738 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11739
11740 if (!warned && warn_psabi)
11741 {
11742 warned = true;
11743 inform (input_location,
11744 "the ABI of passing homogeneous %<float%> aggregates"
11745 " has changed in GCC 5");
11746 }
11747 }
11748
11749 return rs6000_finish_function_arg (mode, rvec, k);
11750 }
11751 else if (align_words < GP_ARG_NUM_REG)
11752 {
11753 if (TARGET_32BIT && TARGET_POWERPC64)
11754 return rs6000_mixed_function_arg (mode, type, align_words);
11755
11756 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11757 }
11758 else
11759 return NULL_RTX;
11760 }
11761 }
11762 \f
11763 /* For an arg passed partly in registers and partly in memory, this is
11764 the number of bytes passed in registers. For args passed entirely in
11765 registers or entirely in memory, zero. When an arg is described by a
11766 PARALLEL, perhaps using more than one register type, this function
11767 returns the number of bytes used by the first element of the PARALLEL. */
11768
11769 static int
11770 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
11771 tree type, bool named)
11772 {
11773 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11774 bool passed_in_gprs = true;
11775 int ret = 0;
11776 int align_words;
11777 machine_mode elt_mode;
11778 int n_elts;
11779
11780 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11781
11782 if (DEFAULT_ABI == ABI_V4)
11783 return 0;
11784
11785 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11786 {
11787 /* If we are passing this arg in the fixed parameter save area (gprs or
11788 memory) as well as VRs, we do not use the partial bytes mechanism;
11789 instead, rs6000_function_arg will return a PARALLEL including a memory
11790 element as necessary. Library support functions for IEEE 128-bit are
11791 assumed to not need the value passed both in GPRs and in vector
11792 registers. */
11793 if (TARGET_64BIT && !cum->prototype
11794 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11795 return 0;
11796
11797 /* Otherwise, we pass in VRs only. Check for partial copies. */
11798 passed_in_gprs = false;
11799 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
11800 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
11801 }
11802
11803 /* In this complicated case we just disable the partial_nregs code. */
11804 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11805 return 0;
11806
11807 align_words = rs6000_parm_start (mode, type, cum->words);
11808
11809 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11810 && !(TARGET_AIX && !TARGET_ELF
11811 && type != NULL && AGGREGATE_TYPE_P (type)))
11812 {
11813 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11814
11815 /* If we are passing this arg in the fixed parameter save area
11816 (gprs or memory) as well as FPRs, we do not use the partial
11817 bytes mechanism; instead, rs6000_function_arg will return a
11818 PARALLEL including a memory element as necessary. */
11819 if (type
11820 && (cum->nargs_prototype <= 0
11821 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11822 && TARGET_XL_COMPAT
11823 && align_words >= GP_ARG_NUM_REG)))
11824 return 0;
11825
11826 /* Otherwise, we pass in FPRs only. Check for partial copies. */
11827 passed_in_gprs = false;
11828 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
11829 {
11830 /* Compute number of bytes / words passed in FPRs. If there
11831 is still space available in the register parameter area
11832 *after* that amount, a part of the argument will be passed
11833 in GPRs. In that case, the total amount passed in any
11834 registers is equal to the amount that would have been passed
11835 in GPRs if everything were passed there, so we fall back to
11836 the GPR code below to compute the appropriate value. */
11837 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
11838 * MIN (8, GET_MODE_SIZE (elt_mode)));
11839 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
11840
11841 if (align_words + fpr_words < GP_ARG_NUM_REG)
11842 passed_in_gprs = true;
11843 else
11844 ret = fpr;
11845 }
11846 }
11847
11848 if (passed_in_gprs
11849 && align_words < GP_ARG_NUM_REG
11850 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
11851 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
11852
11853 if (ret != 0 && TARGET_DEBUG_ARG)
11854 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
11855
11856 return ret;
11857 }
11858 \f
11859 /* A C expression that indicates when an argument must be passed by
11860 reference. If nonzero for an argument, a copy of that argument is
11861 made in memory and a pointer to the argument is passed instead of
11862 the argument itself. The pointer is passed in whatever way is
11863 appropriate for passing a pointer to that type.
11864
11865 Under V.4, aggregates and long double are passed by reference.
11866
11867 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
11868 reference unless the AltiVec vector extension ABI is in force.
11869
11870 As an extension to all ABIs, variable sized types are passed by
11871 reference. */
11872
11873 static bool
11874 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
11875 machine_mode mode, const_tree type,
11876 bool named ATTRIBUTE_UNUSED)
11877 {
11878 if (!type)
11879 return 0;
11880
11881 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11882 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11883 {
11884 if (TARGET_DEBUG_ARG)
11885 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
11886 return 1;
11887 }
11888
11889 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
11890 {
11891 if (TARGET_DEBUG_ARG)
11892 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
11893 return 1;
11894 }
11895
11896 if (int_size_in_bytes (type) < 0)
11897 {
11898 if (TARGET_DEBUG_ARG)
11899 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
11900 return 1;
11901 }
11902
11903 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11904 modes only exist for GCC vector types if -maltivec. */
11905 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
11906 {
11907 if (TARGET_DEBUG_ARG)
11908 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
11909 return 1;
11910 }
11911
11912 /* Pass synthetic vectors in memory. */
11913 if (TREE_CODE (type) == VECTOR_TYPE
11914 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11915 {
11916 static bool warned_for_pass_big_vectors = false;
11917 if (TARGET_DEBUG_ARG)
11918 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
11919 if (!warned_for_pass_big_vectors)
11920 {
11921 warning (OPT_Wpsabi, "GCC vector passed by reference: "
11922 "non-standard ABI extension with no compatibility "
11923 "guarantee");
11924 warned_for_pass_big_vectors = true;
11925 }
11926 return 1;
11927 }
11928
11929 return 0;
11930 }
11931
11932 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
11933 already processes. Return true if the parameter must be passed
11934 (fully or partially) on the stack. */
11935
11936 static bool
11937 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
11938 {
11939 machine_mode mode;
11940 int unsignedp;
11941 rtx entry_parm;
11942
11943 /* Catch errors. */
11944 if (type == NULL || type == error_mark_node)
11945 return true;
11946
11947 /* Handle types with no storage requirement. */
11948 if (TYPE_MODE (type) == VOIDmode)
11949 return false;
11950
11951 /* Handle complex types. */
11952 if (TREE_CODE (type) == COMPLEX_TYPE)
11953 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
11954 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
11955
11956 /* Handle transparent aggregates. */
11957 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
11958 && TYPE_TRANSPARENT_AGGR (type))
11959 type = TREE_TYPE (first_field (type));
11960
11961 /* See if this arg was passed by invisible reference. */
11962 if (pass_by_reference (get_cumulative_args (args_so_far),
11963 TYPE_MODE (type), type, true))
11964 type = build_pointer_type (type);
11965
11966 /* Find mode as it is passed by the ABI. */
11967 unsignedp = TYPE_UNSIGNED (type);
11968 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
11969
11970 /* If we must pass in stack, we need a stack. */
11971 if (rs6000_must_pass_in_stack (mode, type))
11972 return true;
11973
11974 /* If there is no incoming register, we need a stack. */
11975 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
11976 if (entry_parm == NULL)
11977 return true;
11978
11979 /* Likewise if we need to pass both in registers and on the stack. */
11980 if (GET_CODE (entry_parm) == PARALLEL
11981 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
11982 return true;
11983
11984 /* Also true if we're partially in registers and partially not. */
11985 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
11986 return true;
11987
11988 /* Update info on where next arg arrives in registers. */
11989 rs6000_function_arg_advance (args_so_far, mode, type, true);
11990 return false;
11991 }
11992
11993 /* Return true if FUN has no prototype, has a variable argument
11994 list, or passes any parameter in memory. */
11995
11996 static bool
11997 rs6000_function_parms_need_stack (tree fun, bool incoming)
11998 {
11999 tree fntype, result;
12000 CUMULATIVE_ARGS args_so_far_v;
12001 cumulative_args_t args_so_far;
12002
12003 if (!fun)
12004 /* Must be a libcall, all of which only use reg parms. */
12005 return false;
12006
12007 fntype = fun;
12008 if (!TYPE_P (fun))
12009 fntype = TREE_TYPE (fun);
12010
12011 /* Varargs functions need the parameter save area. */
12012 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12013 return true;
12014
12015 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12016 args_so_far = pack_cumulative_args (&args_so_far_v);
12017
12018 /* When incoming, we will have been passed the function decl.
12019 It is necessary to use the decl to handle K&R style functions,
12020 where TYPE_ARG_TYPES may not be available. */
12021 if (incoming)
12022 {
12023 gcc_assert (DECL_P (fun));
12024 result = DECL_RESULT (fun);
12025 }
12026 else
12027 result = TREE_TYPE (fntype);
12028
12029 if (result && aggregate_value_p (result, fntype))
12030 {
12031 if (!TYPE_P (result))
12032 result = TREE_TYPE (result);
12033 result = build_pointer_type (result);
12034 rs6000_parm_needs_stack (args_so_far, result);
12035 }
12036
12037 if (incoming)
12038 {
12039 tree parm;
12040
12041 for (parm = DECL_ARGUMENTS (fun);
12042 parm && parm != void_list_node;
12043 parm = TREE_CHAIN (parm))
12044 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12045 return true;
12046 }
12047 else
12048 {
12049 function_args_iterator args_iter;
12050 tree arg_type;
12051
12052 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12053 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12054 return true;
12055 }
12056
12057 return false;
12058 }
12059
12060 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12061 usually a constant depending on the ABI. However, in the ELFv2 ABI
12062 the register parameter area is optional when calling a function that
12063 has a prototype is scope, has no variable argument list, and passes
12064 all parameters in registers. */
12065
12066 int
12067 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12068 {
12069 int reg_parm_stack_space;
12070
12071 switch (DEFAULT_ABI)
12072 {
12073 default:
12074 reg_parm_stack_space = 0;
12075 break;
12076
12077 case ABI_AIX:
12078 case ABI_DARWIN:
12079 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12080 break;
12081
12082 case ABI_ELFv2:
12083 /* ??? Recomputing this every time is a bit expensive. Is there
12084 a place to cache this information? */
12085 if (rs6000_function_parms_need_stack (fun, incoming))
12086 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12087 else
12088 reg_parm_stack_space = 0;
12089 break;
12090 }
12091
12092 return reg_parm_stack_space;
12093 }
12094
12095 static void
12096 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12097 {
12098 int i;
12099 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12100
12101 if (nregs == 0)
12102 return;
12103
12104 for (i = 0; i < nregs; i++)
12105 {
12106 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12107 if (reload_completed)
12108 {
12109 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12110 tem = NULL_RTX;
12111 else
12112 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12113 i * GET_MODE_SIZE (reg_mode));
12114 }
12115 else
12116 tem = replace_equiv_address (tem, XEXP (tem, 0));
12117
12118 gcc_assert (tem);
12119
12120 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12121 }
12122 }
12123 \f
12124 /* Perform any needed actions needed for a function that is receiving a
12125 variable number of arguments.
12126
12127 CUM is as above.
12128
12129 MODE and TYPE are the mode and type of the current parameter.
12130
12131 PRETEND_SIZE is a variable that should be set to the amount of stack
12132 that must be pushed by the prolog to pretend that our caller pushed
12133 it.
12134
12135 Normally, this macro will push all remaining incoming registers on the
12136 stack and set PRETEND_SIZE to the length of the registers pushed. */
12137
12138 static void
12139 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12140 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12141 int no_rtl)
12142 {
12143 CUMULATIVE_ARGS next_cum;
12144 int reg_size = TARGET_32BIT ? 4 : 8;
12145 rtx save_area = NULL_RTX, mem;
12146 int first_reg_offset;
12147 alias_set_type set;
12148
12149 /* Skip the last named argument. */
12150 next_cum = *get_cumulative_args (cum);
12151 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12152
12153 if (DEFAULT_ABI == ABI_V4)
12154 {
12155 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12156
12157 if (! no_rtl)
12158 {
12159 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12160 HOST_WIDE_INT offset = 0;
12161
12162 /* Try to optimize the size of the varargs save area.
12163 The ABI requires that ap.reg_save_area is doubleword
12164 aligned, but we don't need to allocate space for all
12165 the bytes, only those to which we actually will save
12166 anything. */
12167 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12168 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12169 if (TARGET_HARD_FLOAT
12170 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12171 && cfun->va_list_fpr_size)
12172 {
12173 if (gpr_reg_num)
12174 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12175 * UNITS_PER_FP_WORD;
12176 if (cfun->va_list_fpr_size
12177 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12178 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12179 else
12180 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12181 * UNITS_PER_FP_WORD;
12182 }
12183 if (gpr_reg_num)
12184 {
12185 offset = -((first_reg_offset * reg_size) & ~7);
12186 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12187 {
12188 gpr_reg_num = cfun->va_list_gpr_size;
12189 if (reg_size == 4 && (first_reg_offset & 1))
12190 gpr_reg_num++;
12191 }
12192 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12193 }
12194 else if (fpr_size)
12195 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12196 * UNITS_PER_FP_WORD
12197 - (int) (GP_ARG_NUM_REG * reg_size);
12198
12199 if (gpr_size + fpr_size)
12200 {
12201 rtx reg_save_area
12202 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12203 gcc_assert (MEM_P (reg_save_area));
12204 reg_save_area = XEXP (reg_save_area, 0);
12205 if (GET_CODE (reg_save_area) == PLUS)
12206 {
12207 gcc_assert (XEXP (reg_save_area, 0)
12208 == virtual_stack_vars_rtx);
12209 gcc_assert (CONST_INT_P (XEXP (reg_save_area, 1)));
12210 offset += INTVAL (XEXP (reg_save_area, 1));
12211 }
12212 else
12213 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12214 }
12215
12216 cfun->machine->varargs_save_offset = offset;
12217 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12218 }
12219 }
12220 else
12221 {
12222 first_reg_offset = next_cum.words;
12223 save_area = crtl->args.internal_arg_pointer;
12224
12225 if (targetm.calls.must_pass_in_stack (mode, type))
12226 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12227 }
12228
12229 set = get_varargs_alias_set ();
12230 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12231 && cfun->va_list_gpr_size)
12232 {
12233 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12234
12235 if (va_list_gpr_counter_field)
12236 /* V4 va_list_gpr_size counts number of registers needed. */
12237 n_gpr = cfun->va_list_gpr_size;
12238 else
12239 /* char * va_list instead counts number of bytes needed. */
12240 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12241
12242 if (nregs > n_gpr)
12243 nregs = n_gpr;
12244
12245 mem = gen_rtx_MEM (BLKmode,
12246 plus_constant (Pmode, save_area,
12247 first_reg_offset * reg_size));
12248 MEM_NOTRAP_P (mem) = 1;
12249 set_mem_alias_set (mem, set);
12250 set_mem_align (mem, BITS_PER_WORD);
12251
12252 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12253 nregs);
12254 }
12255
12256 /* Save FP registers if needed. */
12257 if (DEFAULT_ABI == ABI_V4
12258 && TARGET_HARD_FLOAT
12259 && ! no_rtl
12260 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12261 && cfun->va_list_fpr_size)
12262 {
12263 int fregno = next_cum.fregno, nregs;
12264 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12265 rtx lab = gen_label_rtx ();
12266 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12267 * UNITS_PER_FP_WORD);
12268
12269 emit_jump_insn
12270 (gen_rtx_SET (pc_rtx,
12271 gen_rtx_IF_THEN_ELSE (VOIDmode,
12272 gen_rtx_NE (VOIDmode, cr1,
12273 const0_rtx),
12274 gen_rtx_LABEL_REF (VOIDmode, lab),
12275 pc_rtx)));
12276
12277 for (nregs = 0;
12278 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12279 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12280 {
12281 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12282 plus_constant (Pmode, save_area, off));
12283 MEM_NOTRAP_P (mem) = 1;
12284 set_mem_alias_set (mem, set);
12285 set_mem_align (mem, GET_MODE_ALIGNMENT (
12286 TARGET_HARD_FLOAT ? DFmode : SFmode));
12287 emit_move_insn (mem, gen_rtx_REG (
12288 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12289 }
12290
12291 emit_label (lab);
12292 }
12293 }
12294
12295 /* Create the va_list data type. */
12296
12297 static tree
12298 rs6000_build_builtin_va_list (void)
12299 {
12300 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12301
12302 /* For AIX, prefer 'char *' because that's what the system
12303 header files like. */
12304 if (DEFAULT_ABI != ABI_V4)
12305 return build_pointer_type (char_type_node);
12306
12307 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12308 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12309 get_identifier ("__va_list_tag"), record);
12310
12311 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12312 unsigned_char_type_node);
12313 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12314 unsigned_char_type_node);
12315 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12316 every user file. */
12317 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12318 get_identifier ("reserved"), short_unsigned_type_node);
12319 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12320 get_identifier ("overflow_arg_area"),
12321 ptr_type_node);
12322 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12323 get_identifier ("reg_save_area"),
12324 ptr_type_node);
12325
12326 va_list_gpr_counter_field = f_gpr;
12327 va_list_fpr_counter_field = f_fpr;
12328
12329 DECL_FIELD_CONTEXT (f_gpr) = record;
12330 DECL_FIELD_CONTEXT (f_fpr) = record;
12331 DECL_FIELD_CONTEXT (f_res) = record;
12332 DECL_FIELD_CONTEXT (f_ovf) = record;
12333 DECL_FIELD_CONTEXT (f_sav) = record;
12334
12335 TYPE_STUB_DECL (record) = type_decl;
12336 TYPE_NAME (record) = type_decl;
12337 TYPE_FIELDS (record) = f_gpr;
12338 DECL_CHAIN (f_gpr) = f_fpr;
12339 DECL_CHAIN (f_fpr) = f_res;
12340 DECL_CHAIN (f_res) = f_ovf;
12341 DECL_CHAIN (f_ovf) = f_sav;
12342
12343 layout_type (record);
12344
12345 /* The correct type is an array type of one element. */
12346 return build_array_type (record, build_index_type (size_zero_node));
12347 }
12348
12349 /* Implement va_start. */
12350
12351 static void
12352 rs6000_va_start (tree valist, rtx nextarg)
12353 {
12354 HOST_WIDE_INT words, n_gpr, n_fpr;
12355 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12356 tree gpr, fpr, ovf, sav, t;
12357
12358 /* Only SVR4 needs something special. */
12359 if (DEFAULT_ABI != ABI_V4)
12360 {
12361 std_expand_builtin_va_start (valist, nextarg);
12362 return;
12363 }
12364
12365 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12366 f_fpr = DECL_CHAIN (f_gpr);
12367 f_res = DECL_CHAIN (f_fpr);
12368 f_ovf = DECL_CHAIN (f_res);
12369 f_sav = DECL_CHAIN (f_ovf);
12370
12371 valist = build_simple_mem_ref (valist);
12372 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12373 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12374 f_fpr, NULL_TREE);
12375 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12376 f_ovf, NULL_TREE);
12377 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12378 f_sav, NULL_TREE);
12379
12380 /* Count number of gp and fp argument registers used. */
12381 words = crtl->args.info.words;
12382 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12383 GP_ARG_NUM_REG);
12384 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12385 FP_ARG_NUM_REG);
12386
12387 if (TARGET_DEBUG_ARG)
12388 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12389 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12390 words, n_gpr, n_fpr);
12391
12392 if (cfun->va_list_gpr_size)
12393 {
12394 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12395 build_int_cst (NULL_TREE, n_gpr));
12396 TREE_SIDE_EFFECTS (t) = 1;
12397 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12398 }
12399
12400 if (cfun->va_list_fpr_size)
12401 {
12402 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12403 build_int_cst (NULL_TREE, n_fpr));
12404 TREE_SIDE_EFFECTS (t) = 1;
12405 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12406
12407 #ifdef HAVE_AS_GNU_ATTRIBUTE
12408 if (call_ABI_of_interest (cfun->decl))
12409 rs6000_passes_float = true;
12410 #endif
12411 }
12412
12413 /* Find the overflow area. */
12414 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12415 if (words != 0)
12416 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12417 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12418 TREE_SIDE_EFFECTS (t) = 1;
12419 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12420
12421 /* If there were no va_arg invocations, don't set up the register
12422 save area. */
12423 if (!cfun->va_list_gpr_size
12424 && !cfun->va_list_fpr_size
12425 && n_gpr < GP_ARG_NUM_REG
12426 && n_fpr < FP_ARG_V4_MAX_REG)
12427 return;
12428
12429 /* Find the register save area. */
12430 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12431 if (cfun->machine->varargs_save_offset)
12432 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12433 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12434 TREE_SIDE_EFFECTS (t) = 1;
12435 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12436 }
12437
12438 /* Implement va_arg. */
12439
12440 static tree
12441 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12442 gimple_seq *post_p)
12443 {
12444 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12445 tree gpr, fpr, ovf, sav, reg, t, u;
12446 int size, rsize, n_reg, sav_ofs, sav_scale;
12447 tree lab_false, lab_over, addr;
12448 int align;
12449 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12450 int regalign = 0;
12451 gimple *stmt;
12452
12453 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12454 {
12455 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12456 return build_va_arg_indirect_ref (t);
12457 }
12458
12459 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12460 earlier version of gcc, with the property that it always applied alignment
12461 adjustments to the va-args (even for zero-sized types). The cheapest way
12462 to deal with this is to replicate the effect of the part of
12463 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12464 of relevance.
12465 We don't need to check for pass-by-reference because of the test above.
12466 We can return a simplifed answer, since we know there's no offset to add. */
12467
12468 if (((TARGET_MACHO
12469 && rs6000_darwin64_abi)
12470 || DEFAULT_ABI == ABI_ELFv2
12471 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12472 && integer_zerop (TYPE_SIZE (type)))
12473 {
12474 unsigned HOST_WIDE_INT align, boundary;
12475 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12476 align = PARM_BOUNDARY / BITS_PER_UNIT;
12477 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12478 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12479 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12480 boundary /= BITS_PER_UNIT;
12481 if (boundary > align)
12482 {
12483 tree t ;
12484 /* This updates arg ptr by the amount that would be necessary
12485 to align the zero-sized (but not zero-alignment) item. */
12486 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12487 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12488 gimplify_and_add (t, pre_p);
12489
12490 t = fold_convert (sizetype, valist_tmp);
12491 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12492 fold_convert (TREE_TYPE (valist),
12493 fold_build2 (BIT_AND_EXPR, sizetype, t,
12494 size_int (-boundary))));
12495 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12496 gimplify_and_add (t, pre_p);
12497 }
12498 /* Since it is zero-sized there's no increment for the item itself. */
12499 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12500 return build_va_arg_indirect_ref (valist_tmp);
12501 }
12502
12503 if (DEFAULT_ABI != ABI_V4)
12504 {
12505 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12506 {
12507 tree elem_type = TREE_TYPE (type);
12508 machine_mode elem_mode = TYPE_MODE (elem_type);
12509 int elem_size = GET_MODE_SIZE (elem_mode);
12510
12511 if (elem_size < UNITS_PER_WORD)
12512 {
12513 tree real_part, imag_part;
12514 gimple_seq post = NULL;
12515
12516 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12517 &post);
12518 /* Copy the value into a temporary, lest the formal temporary
12519 be reused out from under us. */
12520 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12521 gimple_seq_add_seq (pre_p, post);
12522
12523 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12524 post_p);
12525
12526 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12527 }
12528 }
12529
12530 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12531 }
12532
12533 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12534 f_fpr = DECL_CHAIN (f_gpr);
12535 f_res = DECL_CHAIN (f_fpr);
12536 f_ovf = DECL_CHAIN (f_res);
12537 f_sav = DECL_CHAIN (f_ovf);
12538
12539 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12540 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12541 f_fpr, NULL_TREE);
12542 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12543 f_ovf, NULL_TREE);
12544 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12545 f_sav, NULL_TREE);
12546
12547 size = int_size_in_bytes (type);
12548 rsize = (size + 3) / 4;
12549 int pad = 4 * rsize - size;
12550 align = 1;
12551
12552 machine_mode mode = TYPE_MODE (type);
12553 if (abi_v4_pass_in_fpr (mode, false))
12554 {
12555 /* FP args go in FP registers, if present. */
12556 reg = fpr;
12557 n_reg = (size + 7) / 8;
12558 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12559 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12560 if (mode != SFmode && mode != SDmode)
12561 align = 8;
12562 }
12563 else
12564 {
12565 /* Otherwise into GP registers. */
12566 reg = gpr;
12567 n_reg = rsize;
12568 sav_ofs = 0;
12569 sav_scale = 4;
12570 if (n_reg == 2)
12571 align = 8;
12572 }
12573
12574 /* Pull the value out of the saved registers.... */
12575
12576 lab_over = NULL;
12577 addr = create_tmp_var (ptr_type_node, "addr");
12578
12579 /* AltiVec vectors never go in registers when -mabi=altivec. */
12580 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12581 align = 16;
12582 else
12583 {
12584 lab_false = create_artificial_label (input_location);
12585 lab_over = create_artificial_label (input_location);
12586
12587 /* Long long is aligned in the registers. As are any other 2 gpr
12588 item such as complex int due to a historical mistake. */
12589 u = reg;
12590 if (n_reg == 2 && reg == gpr)
12591 {
12592 regalign = 1;
12593 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12594 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12595 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12596 unshare_expr (reg), u);
12597 }
12598 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12599 reg number is 0 for f1, so we want to make it odd. */
12600 else if (reg == fpr && mode == TDmode)
12601 {
12602 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12603 build_int_cst (TREE_TYPE (reg), 1));
12604 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12605 }
12606
12607 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12608 t = build2 (GE_EXPR, boolean_type_node, u, t);
12609 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12610 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12611 gimplify_and_add (t, pre_p);
12612
12613 t = sav;
12614 if (sav_ofs)
12615 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12616
12617 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12618 build_int_cst (TREE_TYPE (reg), n_reg));
12619 u = fold_convert (sizetype, u);
12620 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12621 t = fold_build_pointer_plus (t, u);
12622
12623 /* _Decimal32 varargs are located in the second word of the 64-bit
12624 FP register for 32-bit binaries. */
12625 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12626 t = fold_build_pointer_plus_hwi (t, size);
12627
12628 /* Args are passed right-aligned. */
12629 if (BYTES_BIG_ENDIAN)
12630 t = fold_build_pointer_plus_hwi (t, pad);
12631
12632 gimplify_assign (addr, t, pre_p);
12633
12634 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12635
12636 stmt = gimple_build_label (lab_false);
12637 gimple_seq_add_stmt (pre_p, stmt);
12638
12639 if ((n_reg == 2 && !regalign) || n_reg > 2)
12640 {
12641 /* Ensure that we don't find any more args in regs.
12642 Alignment has taken care of for special cases. */
12643 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12644 }
12645 }
12646
12647 /* ... otherwise out of the overflow area. */
12648
12649 /* Care for on-stack alignment if needed. */
12650 t = ovf;
12651 if (align != 1)
12652 {
12653 t = fold_build_pointer_plus_hwi (t, align - 1);
12654 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12655 build_int_cst (TREE_TYPE (t), -align));
12656 }
12657
12658 /* Args are passed right-aligned. */
12659 if (BYTES_BIG_ENDIAN)
12660 t = fold_build_pointer_plus_hwi (t, pad);
12661
12662 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12663
12664 gimplify_assign (unshare_expr (addr), t, pre_p);
12665
12666 t = fold_build_pointer_plus_hwi (t, size);
12667 gimplify_assign (unshare_expr (ovf), t, pre_p);
12668
12669 if (lab_over)
12670 {
12671 stmt = gimple_build_label (lab_over);
12672 gimple_seq_add_stmt (pre_p, stmt);
12673 }
12674
12675 if (STRICT_ALIGNMENT
12676 && (TYPE_ALIGN (type)
12677 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12678 {
12679 /* The value (of type complex double, for example) may not be
12680 aligned in memory in the saved registers, so copy via a
12681 temporary. (This is the same code as used for SPARC.) */
12682 tree tmp = create_tmp_var (type, "va_arg_tmp");
12683 tree dest_addr = build_fold_addr_expr (tmp);
12684
12685 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
12686 3, dest_addr, addr, size_int (rsize * 4));
12687 TREE_ADDRESSABLE (tmp) = 1;
12688
12689 gimplify_and_add (copy, pre_p);
12690 addr = dest_addr;
12691 }
12692
12693 addr = fold_convert (ptrtype, addr);
12694 return build_va_arg_indirect_ref (addr);
12695 }
12696
12697 /* Builtins. */
12698
12699 static void
12700 def_builtin (const char *name, tree type, enum rs6000_builtins code)
12701 {
12702 tree t;
12703 unsigned classify = rs6000_builtin_info[(int)code].attr;
12704 const char *attr_string = "";
12705
12706 gcc_assert (name != NULL);
12707 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
12708
12709 if (rs6000_builtin_decls[(int)code])
12710 fatal_error (input_location,
12711 "internal error: builtin function %qs already processed",
12712 name);
12713
12714 rs6000_builtin_decls[(int)code] = t =
12715 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
12716
12717 /* Set any special attributes. */
12718 if ((classify & RS6000_BTC_CONST) != 0)
12719 {
12720 /* const function, function only depends on the inputs. */
12721 TREE_READONLY (t) = 1;
12722 TREE_NOTHROW (t) = 1;
12723 attr_string = ", const";
12724 }
12725 else if ((classify & RS6000_BTC_PURE) != 0)
12726 {
12727 /* pure function, function can read global memory, but does not set any
12728 external state. */
12729 DECL_PURE_P (t) = 1;
12730 TREE_NOTHROW (t) = 1;
12731 attr_string = ", pure";
12732 }
12733 else if ((classify & RS6000_BTC_FP) != 0)
12734 {
12735 /* Function is a math function. If rounding mode is on, then treat the
12736 function as not reading global memory, but it can have arbitrary side
12737 effects. If it is off, then assume the function is a const function.
12738 This mimics the ATTR_MATHFN_FPROUNDING attribute in
12739 builtin-attribute.def that is used for the math functions. */
12740 TREE_NOTHROW (t) = 1;
12741 if (flag_rounding_math)
12742 {
12743 DECL_PURE_P (t) = 1;
12744 DECL_IS_NOVOPS (t) = 1;
12745 attr_string = ", fp, pure";
12746 }
12747 else
12748 {
12749 TREE_READONLY (t) = 1;
12750 attr_string = ", fp, const";
12751 }
12752 }
12753 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
12754 gcc_unreachable ();
12755
12756 if (TARGET_DEBUG_BUILTIN)
12757 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
12758 (int)code, name, attr_string);
12759 }
12760
12761 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
12762
12763 #undef RS6000_BUILTIN_0
12764 #undef RS6000_BUILTIN_1
12765 #undef RS6000_BUILTIN_2
12766 #undef RS6000_BUILTIN_3
12767 #undef RS6000_BUILTIN_A
12768 #undef RS6000_BUILTIN_D
12769 #undef RS6000_BUILTIN_H
12770 #undef RS6000_BUILTIN_P
12771 #undef RS6000_BUILTIN_X
12772
12773 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12774 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12775 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12776 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
12777 { MASK, ICODE, NAME, ENUM },
12778
12779 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12780 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12781 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12782 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12783 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12784
12785 static const struct builtin_description bdesc_3arg[] =
12786 {
12787 #include "rs6000-builtin.def"
12788 };
12789
12790 /* DST operations: void foo (void *, const int, const char). */
12791
12792 #undef RS6000_BUILTIN_0
12793 #undef RS6000_BUILTIN_1
12794 #undef RS6000_BUILTIN_2
12795 #undef RS6000_BUILTIN_3
12796 #undef RS6000_BUILTIN_A
12797 #undef RS6000_BUILTIN_D
12798 #undef RS6000_BUILTIN_H
12799 #undef RS6000_BUILTIN_P
12800 #undef RS6000_BUILTIN_X
12801
12802 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12803 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12804 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12805 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12806 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12807 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
12808 { MASK, ICODE, NAME, ENUM },
12809
12810 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12811 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12812 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12813
12814 static const struct builtin_description bdesc_dst[] =
12815 {
12816 #include "rs6000-builtin.def"
12817 };
12818
12819 /* Simple binary operations: VECc = foo (VECa, VECb). */
12820
12821 #undef RS6000_BUILTIN_0
12822 #undef RS6000_BUILTIN_1
12823 #undef RS6000_BUILTIN_2
12824 #undef RS6000_BUILTIN_3
12825 #undef RS6000_BUILTIN_A
12826 #undef RS6000_BUILTIN_D
12827 #undef RS6000_BUILTIN_H
12828 #undef RS6000_BUILTIN_P
12829 #undef RS6000_BUILTIN_X
12830
12831 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12832 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12833 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
12834 { MASK, ICODE, NAME, ENUM },
12835
12836 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12837 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12838 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12839 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12840 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12841 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12842
12843 static const struct builtin_description bdesc_2arg[] =
12844 {
12845 #include "rs6000-builtin.def"
12846 };
12847
12848 #undef RS6000_BUILTIN_0
12849 #undef RS6000_BUILTIN_1
12850 #undef RS6000_BUILTIN_2
12851 #undef RS6000_BUILTIN_3
12852 #undef RS6000_BUILTIN_A
12853 #undef RS6000_BUILTIN_D
12854 #undef RS6000_BUILTIN_H
12855 #undef RS6000_BUILTIN_P
12856 #undef RS6000_BUILTIN_X
12857
12858 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12859 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12860 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12861 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12862 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12863 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12864 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12865 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
12866 { MASK, ICODE, NAME, ENUM },
12867
12868 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12869
12870 /* AltiVec predicates. */
12871
12872 static const struct builtin_description bdesc_altivec_preds[] =
12873 {
12874 #include "rs6000-builtin.def"
12875 };
12876
12877 /* ABS* operations. */
12878
12879 #undef RS6000_BUILTIN_0
12880 #undef RS6000_BUILTIN_1
12881 #undef RS6000_BUILTIN_2
12882 #undef RS6000_BUILTIN_3
12883 #undef RS6000_BUILTIN_A
12884 #undef RS6000_BUILTIN_D
12885 #undef RS6000_BUILTIN_H
12886 #undef RS6000_BUILTIN_P
12887 #undef RS6000_BUILTIN_X
12888
12889 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12890 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12891 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12892 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12893 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
12894 { MASK, ICODE, NAME, ENUM },
12895
12896 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12897 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12898 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12899 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12900
12901 static const struct builtin_description bdesc_abs[] =
12902 {
12903 #include "rs6000-builtin.def"
12904 };
12905
12906 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
12907 foo (VECa). */
12908
12909 #undef RS6000_BUILTIN_0
12910 #undef RS6000_BUILTIN_1
12911 #undef RS6000_BUILTIN_2
12912 #undef RS6000_BUILTIN_3
12913 #undef RS6000_BUILTIN_A
12914 #undef RS6000_BUILTIN_D
12915 #undef RS6000_BUILTIN_H
12916 #undef RS6000_BUILTIN_P
12917 #undef RS6000_BUILTIN_X
12918
12919 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12920 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
12921 { MASK, ICODE, NAME, ENUM },
12922
12923 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12924 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12925 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12926 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12927 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12928 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12929 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12930
12931 static const struct builtin_description bdesc_1arg[] =
12932 {
12933 #include "rs6000-builtin.def"
12934 };
12935
12936 /* Simple no-argument operations: result = __builtin_darn_32 () */
12937
12938 #undef RS6000_BUILTIN_0
12939 #undef RS6000_BUILTIN_1
12940 #undef RS6000_BUILTIN_2
12941 #undef RS6000_BUILTIN_3
12942 #undef RS6000_BUILTIN_A
12943 #undef RS6000_BUILTIN_D
12944 #undef RS6000_BUILTIN_H
12945 #undef RS6000_BUILTIN_P
12946 #undef RS6000_BUILTIN_X
12947
12948 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
12949 { MASK, ICODE, NAME, ENUM },
12950
12951 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12952 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12953 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12954 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12955 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12956 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12957 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12958 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12959
12960 static const struct builtin_description bdesc_0arg[] =
12961 {
12962 #include "rs6000-builtin.def"
12963 };
12964
12965 /* HTM builtins. */
12966 #undef RS6000_BUILTIN_0
12967 #undef RS6000_BUILTIN_1
12968 #undef RS6000_BUILTIN_2
12969 #undef RS6000_BUILTIN_3
12970 #undef RS6000_BUILTIN_A
12971 #undef RS6000_BUILTIN_D
12972 #undef RS6000_BUILTIN_H
12973 #undef RS6000_BUILTIN_P
12974 #undef RS6000_BUILTIN_X
12975
12976 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12977 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12978 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12979 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12980 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12981 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12982 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
12983 { MASK, ICODE, NAME, ENUM },
12984
12985 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12986 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12987
12988 static const struct builtin_description bdesc_htm[] =
12989 {
12990 #include "rs6000-builtin.def"
12991 };
12992
12993 #undef RS6000_BUILTIN_0
12994 #undef RS6000_BUILTIN_1
12995 #undef RS6000_BUILTIN_2
12996 #undef RS6000_BUILTIN_3
12997 #undef RS6000_BUILTIN_A
12998 #undef RS6000_BUILTIN_D
12999 #undef RS6000_BUILTIN_H
13000 #undef RS6000_BUILTIN_P
13001
13002 /* Return true if a builtin function is overloaded. */
13003 bool
13004 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13005 {
13006 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13007 }
13008
13009 const char *
13010 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13011 {
13012 return rs6000_builtin_info[(int)fncode].name;
13013 }
13014
13015 /* Expand an expression EXP that calls a builtin without arguments. */
13016 static rtx
13017 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13018 {
13019 rtx pat;
13020 machine_mode tmode = insn_data[icode].operand[0].mode;
13021
13022 if (icode == CODE_FOR_nothing)
13023 /* Builtin not supported on this processor. */
13024 return 0;
13025
13026 if (icode == CODE_FOR_rs6000_mffsl
13027 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13028 {
13029 error ("%<__builtin_mffsl%> not supported with %<-msoft-float%>");
13030 return const0_rtx;
13031 }
13032
13033 if (target == 0
13034 || GET_MODE (target) != tmode
13035 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13036 target = gen_reg_rtx (tmode);
13037
13038 pat = GEN_FCN (icode) (target);
13039 if (! pat)
13040 return 0;
13041 emit_insn (pat);
13042
13043 return target;
13044 }
13045
13046
13047 static rtx
13048 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13049 {
13050 rtx pat;
13051 tree arg0 = CALL_EXPR_ARG (exp, 0);
13052 tree arg1 = CALL_EXPR_ARG (exp, 1);
13053 rtx op0 = expand_normal (arg0);
13054 rtx op1 = expand_normal (arg1);
13055 machine_mode mode0 = insn_data[icode].operand[0].mode;
13056 machine_mode mode1 = insn_data[icode].operand[1].mode;
13057
13058 if (icode == CODE_FOR_nothing)
13059 /* Builtin not supported on this processor. */
13060 return 0;
13061
13062 /* If we got invalid arguments bail out before generating bad rtl. */
13063 if (arg0 == error_mark_node || arg1 == error_mark_node)
13064 return const0_rtx;
13065
13066 if (!CONST_INT_P (op0)
13067 || INTVAL (op0) > 255
13068 || INTVAL (op0) < 0)
13069 {
13070 error ("argument 1 must be an 8-bit field value");
13071 return const0_rtx;
13072 }
13073
13074 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13075 op0 = copy_to_mode_reg (mode0, op0);
13076
13077 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13078 op1 = copy_to_mode_reg (mode1, op1);
13079
13080 pat = GEN_FCN (icode) (op0, op1);
13081 if (!pat)
13082 return const0_rtx;
13083 emit_insn (pat);
13084
13085 return NULL_RTX;
13086 }
13087
13088 static rtx
13089 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13090 {
13091 rtx pat;
13092 tree arg0 = CALL_EXPR_ARG (exp, 0);
13093 rtx op0 = expand_normal (arg0);
13094
13095 if (icode == CODE_FOR_nothing)
13096 /* Builtin not supported on this processor. */
13097 return 0;
13098
13099 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13100 {
13101 error ("%<__builtin_mtfsb0%> and %<__builtin_mtfsb1%> not supported with "
13102 "%<-msoft-float%>");
13103 return const0_rtx;
13104 }
13105
13106 /* If we got invalid arguments bail out before generating bad rtl. */
13107 if (arg0 == error_mark_node)
13108 return const0_rtx;
13109
13110 /* Only allow bit numbers 0 to 31. */
13111 if (!u5bit_cint_operand (op0, VOIDmode))
13112 {
13113 error ("Argument must be a constant between 0 and 31.");
13114 return const0_rtx;
13115 }
13116
13117 pat = GEN_FCN (icode) (op0);
13118 if (!pat)
13119 return const0_rtx;
13120 emit_insn (pat);
13121
13122 return NULL_RTX;
13123 }
13124
13125 static rtx
13126 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13127 {
13128 rtx pat;
13129 tree arg0 = CALL_EXPR_ARG (exp, 0);
13130 rtx op0 = expand_normal (arg0);
13131 machine_mode mode0 = insn_data[icode].operand[0].mode;
13132
13133 if (icode == CODE_FOR_nothing)
13134 /* Builtin not supported on this processor. */
13135 return 0;
13136
13137 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13138 {
13139 error ("%<__builtin_set_fpscr_rn%> not supported with %<-msoft-float%>");
13140 return const0_rtx;
13141 }
13142
13143 /* If we got invalid arguments bail out before generating bad rtl. */
13144 if (arg0 == error_mark_node)
13145 return const0_rtx;
13146
13147 /* If the argument is a constant, check the range. Argument can only be a
13148 2-bit value. Unfortunately, can't check the range of the value at
13149 compile time if the argument is a variable. The least significant two
13150 bits of the argument, regardless of type, are used to set the rounding
13151 mode. All other bits are ignored. */
13152 if (CONST_INT_P (op0) && !const_0_to_3_operand(op0, VOIDmode))
13153 {
13154 error ("Argument must be a value between 0 and 3.");
13155 return const0_rtx;
13156 }
13157
13158 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13159 op0 = copy_to_mode_reg (mode0, op0);
13160
13161 pat = GEN_FCN (icode) (op0);
13162 if (!pat)
13163 return const0_rtx;
13164 emit_insn (pat);
13165
13166 return NULL_RTX;
13167 }
13168 static rtx
13169 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13170 {
13171 rtx pat;
13172 tree arg0 = CALL_EXPR_ARG (exp, 0);
13173 rtx op0 = expand_normal (arg0);
13174 machine_mode mode0 = insn_data[icode].operand[0].mode;
13175
13176 if (TARGET_32BIT)
13177 /* Builtin not supported in 32-bit mode. */
13178 fatal_error (input_location,
13179 "%<__builtin_set_fpscr_drn%> is not supported "
13180 "in 32-bit mode");
13181
13182 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13183 {
13184 error ("%<__builtin_set_fpscr_drn%> not supported with %<-msoft-float%>");
13185 return const0_rtx;
13186 }
13187
13188 if (icode == CODE_FOR_nothing)
13189 /* Builtin not supported on this processor. */
13190 return 0;
13191
13192 /* If we got invalid arguments bail out before generating bad rtl. */
13193 if (arg0 == error_mark_node)
13194 return const0_rtx;
13195
13196 /* If the argument is a constant, check the range. Agrument can only be a
13197 3-bit value. Unfortunately, can't check the range of the value at
13198 compile time if the argument is a variable. The least significant two
13199 bits of the argument, regardless of type, are used to set the rounding
13200 mode. All other bits are ignored. */
13201 if (CONST_INT_P (op0) && !const_0_to_7_operand(op0, VOIDmode))
13202 {
13203 error ("Argument must be a value between 0 and 7.");
13204 return const0_rtx;
13205 }
13206
13207 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13208 op0 = copy_to_mode_reg (mode0, op0);
13209
13210 pat = GEN_FCN (icode) (op0);
13211 if (! pat)
13212 return const0_rtx;
13213 emit_insn (pat);
13214
13215 return NULL_RTX;
13216 }
13217
13218 static rtx
13219 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13220 {
13221 rtx pat;
13222 tree arg0 = CALL_EXPR_ARG (exp, 0);
13223 rtx op0 = expand_normal (arg0);
13224 machine_mode tmode = insn_data[icode].operand[0].mode;
13225 machine_mode mode0 = insn_data[icode].operand[1].mode;
13226
13227 if (icode == CODE_FOR_nothing)
13228 /* Builtin not supported on this processor. */
13229 return 0;
13230
13231 /* If we got invalid arguments bail out before generating bad rtl. */
13232 if (arg0 == error_mark_node)
13233 return const0_rtx;
13234
13235 if (icode == CODE_FOR_altivec_vspltisb
13236 || icode == CODE_FOR_altivec_vspltish
13237 || icode == CODE_FOR_altivec_vspltisw)
13238 {
13239 /* Only allow 5-bit *signed* literals. */
13240 if (!CONST_INT_P (op0)
13241 || INTVAL (op0) > 15
13242 || INTVAL (op0) < -16)
13243 {
13244 error ("argument 1 must be a 5-bit signed literal");
13245 return CONST0_RTX (tmode);
13246 }
13247 }
13248
13249 if (target == 0
13250 || GET_MODE (target) != tmode
13251 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13252 target = gen_reg_rtx (tmode);
13253
13254 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13255 op0 = copy_to_mode_reg (mode0, op0);
13256
13257 pat = GEN_FCN (icode) (target, op0);
13258 if (! pat)
13259 return 0;
13260 emit_insn (pat);
13261
13262 return target;
13263 }
13264
13265 static rtx
13266 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13267 {
13268 rtx pat, scratch1, scratch2;
13269 tree arg0 = CALL_EXPR_ARG (exp, 0);
13270 rtx op0 = expand_normal (arg0);
13271 machine_mode tmode = insn_data[icode].operand[0].mode;
13272 machine_mode mode0 = insn_data[icode].operand[1].mode;
13273
13274 /* If we have invalid arguments, bail out before generating bad rtl. */
13275 if (arg0 == error_mark_node)
13276 return const0_rtx;
13277
13278 if (target == 0
13279 || GET_MODE (target) != tmode
13280 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13281 target = gen_reg_rtx (tmode);
13282
13283 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13284 op0 = copy_to_mode_reg (mode0, op0);
13285
13286 scratch1 = gen_reg_rtx (mode0);
13287 scratch2 = gen_reg_rtx (mode0);
13288
13289 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13290 if (! pat)
13291 return 0;
13292 emit_insn (pat);
13293
13294 return target;
13295 }
13296
13297 static rtx
13298 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13299 {
13300 rtx pat;
13301 tree arg0 = CALL_EXPR_ARG (exp, 0);
13302 tree arg1 = CALL_EXPR_ARG (exp, 1);
13303 rtx op0 = expand_normal (arg0);
13304 rtx op1 = expand_normal (arg1);
13305 machine_mode tmode = insn_data[icode].operand[0].mode;
13306 machine_mode mode0 = insn_data[icode].operand[1].mode;
13307 machine_mode mode1 = insn_data[icode].operand[2].mode;
13308
13309 if (icode == CODE_FOR_nothing)
13310 /* Builtin not supported on this processor. */
13311 return 0;
13312
13313 /* If we got invalid arguments bail out before generating bad rtl. */
13314 if (arg0 == error_mark_node || arg1 == error_mark_node)
13315 return const0_rtx;
13316
13317 if (icode == CODE_FOR_unpackv1ti
13318 || icode == CODE_FOR_unpackkf
13319 || icode == CODE_FOR_unpacktf
13320 || icode == CODE_FOR_unpackif
13321 || icode == CODE_FOR_unpacktd)
13322 {
13323 /* Only allow 1-bit unsigned literals. */
13324 STRIP_NOPS (arg1);
13325 if (TREE_CODE (arg1) != INTEGER_CST
13326 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13327 {
13328 error ("argument 2 must be a 1-bit unsigned literal");
13329 return CONST0_RTX (tmode);
13330 }
13331 }
13332 else if (icode == CODE_FOR_altivec_vspltw)
13333 {
13334 /* Only allow 2-bit unsigned literals. */
13335 STRIP_NOPS (arg1);
13336 if (TREE_CODE (arg1) != INTEGER_CST
13337 || TREE_INT_CST_LOW (arg1) & ~3)
13338 {
13339 error ("argument 2 must be a 2-bit unsigned literal");
13340 return CONST0_RTX (tmode);
13341 }
13342 }
13343 else if (icode == CODE_FOR_altivec_vsplth)
13344 {
13345 /* Only allow 3-bit unsigned literals. */
13346 STRIP_NOPS (arg1);
13347 if (TREE_CODE (arg1) != INTEGER_CST
13348 || TREE_INT_CST_LOW (arg1) & ~7)
13349 {
13350 error ("argument 2 must be a 3-bit unsigned literal");
13351 return CONST0_RTX (tmode);
13352 }
13353 }
13354 else if (icode == CODE_FOR_altivec_vspltb)
13355 {
13356 /* Only allow 4-bit unsigned literals. */
13357 STRIP_NOPS (arg1);
13358 if (TREE_CODE (arg1) != INTEGER_CST
13359 || TREE_INT_CST_LOW (arg1) & ~15)
13360 {
13361 error ("argument 2 must be a 4-bit unsigned literal");
13362 return CONST0_RTX (tmode);
13363 }
13364 }
13365 else if (icode == CODE_FOR_altivec_vcfux
13366 || icode == CODE_FOR_altivec_vcfsx
13367 || icode == CODE_FOR_altivec_vctsxs
13368 || icode == CODE_FOR_altivec_vctuxs)
13369 {
13370 /* Only allow 5-bit unsigned literals. */
13371 STRIP_NOPS (arg1);
13372 if (TREE_CODE (arg1) != INTEGER_CST
13373 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13374 {
13375 error ("argument 2 must be a 5-bit unsigned literal");
13376 return CONST0_RTX (tmode);
13377 }
13378 }
13379 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13380 || icode == CODE_FOR_dfptstsfi_lt_dd
13381 || icode == CODE_FOR_dfptstsfi_gt_dd
13382 || icode == CODE_FOR_dfptstsfi_unordered_dd
13383 || icode == CODE_FOR_dfptstsfi_eq_td
13384 || icode == CODE_FOR_dfptstsfi_lt_td
13385 || icode == CODE_FOR_dfptstsfi_gt_td
13386 || icode == CODE_FOR_dfptstsfi_unordered_td)
13387 {
13388 /* Only allow 6-bit unsigned literals. */
13389 STRIP_NOPS (arg0);
13390 if (TREE_CODE (arg0) != INTEGER_CST
13391 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13392 {
13393 error ("argument 1 must be a 6-bit unsigned literal");
13394 return CONST0_RTX (tmode);
13395 }
13396 }
13397 else if (icode == CODE_FOR_xststdcqp_kf
13398 || icode == CODE_FOR_xststdcqp_tf
13399 || icode == CODE_FOR_xststdcdp
13400 || icode == CODE_FOR_xststdcsp
13401 || icode == CODE_FOR_xvtstdcdp
13402 || icode == CODE_FOR_xvtstdcsp)
13403 {
13404 /* Only allow 7-bit unsigned literals. */
13405 STRIP_NOPS (arg1);
13406 if (TREE_CODE (arg1) != INTEGER_CST
13407 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13408 {
13409 error ("argument 2 must be a 7-bit unsigned literal");
13410 return CONST0_RTX (tmode);
13411 }
13412 }
13413
13414 if (target == 0
13415 || GET_MODE (target) != tmode
13416 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13417 target = gen_reg_rtx (tmode);
13418
13419 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13420 op0 = copy_to_mode_reg (mode0, op0);
13421 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13422 op1 = copy_to_mode_reg (mode1, op1);
13423
13424 pat = GEN_FCN (icode) (target, op0, op1);
13425 if (! pat)
13426 return 0;
13427 emit_insn (pat);
13428
13429 return target;
13430 }
13431
13432 static rtx
13433 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13434 {
13435 rtx pat, scratch;
13436 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13437 tree arg0 = CALL_EXPR_ARG (exp, 1);
13438 tree arg1 = CALL_EXPR_ARG (exp, 2);
13439 rtx op0 = expand_normal (arg0);
13440 rtx op1 = expand_normal (arg1);
13441 machine_mode tmode = SImode;
13442 machine_mode mode0 = insn_data[icode].operand[1].mode;
13443 machine_mode mode1 = insn_data[icode].operand[2].mode;
13444 int cr6_form_int;
13445
13446 if (TREE_CODE (cr6_form) != INTEGER_CST)
13447 {
13448 error ("argument 1 of %qs must be a constant",
13449 "__builtin_altivec_predicate");
13450 return const0_rtx;
13451 }
13452 else
13453 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13454
13455 gcc_assert (mode0 == mode1);
13456
13457 /* If we have invalid arguments, bail out before generating bad rtl. */
13458 if (arg0 == error_mark_node || arg1 == error_mark_node)
13459 return const0_rtx;
13460
13461 if (target == 0
13462 || GET_MODE (target) != tmode
13463 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13464 target = gen_reg_rtx (tmode);
13465
13466 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13467 op0 = copy_to_mode_reg (mode0, op0);
13468 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13469 op1 = copy_to_mode_reg (mode1, op1);
13470
13471 /* Note that for many of the relevant operations (e.g. cmpne or
13472 cmpeq) with float or double operands, it makes more sense for the
13473 mode of the allocated scratch register to select a vector of
13474 integer. But the choice to copy the mode of operand 0 was made
13475 long ago and there are no plans to change it. */
13476 scratch = gen_reg_rtx (mode0);
13477
13478 pat = GEN_FCN (icode) (scratch, op0, op1);
13479 if (! pat)
13480 return 0;
13481 emit_insn (pat);
13482
13483 /* The vec_any* and vec_all* predicates use the same opcodes for two
13484 different operations, but the bits in CR6 will be different
13485 depending on what information we want. So we have to play tricks
13486 with CR6 to get the right bits out.
13487
13488 If you think this is disgusting, look at the specs for the
13489 AltiVec predicates. */
13490
13491 switch (cr6_form_int)
13492 {
13493 case 0:
13494 emit_insn (gen_cr6_test_for_zero (target));
13495 break;
13496 case 1:
13497 emit_insn (gen_cr6_test_for_zero_reverse (target));
13498 break;
13499 case 2:
13500 emit_insn (gen_cr6_test_for_lt (target));
13501 break;
13502 case 3:
13503 emit_insn (gen_cr6_test_for_lt_reverse (target));
13504 break;
13505 default:
13506 error ("argument 1 of %qs is out of range",
13507 "__builtin_altivec_predicate");
13508 break;
13509 }
13510
13511 return target;
13512 }
13513
13514 rtx
13515 swap_endian_selector_for_mode (machine_mode mode)
13516 {
13517 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13518 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13519 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13520 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13521
13522 unsigned int *swaparray, i;
13523 rtx perm[16];
13524
13525 switch (mode)
13526 {
13527 case E_V1TImode:
13528 swaparray = swap1;
13529 break;
13530 case E_V2DFmode:
13531 case E_V2DImode:
13532 swaparray = swap2;
13533 break;
13534 case E_V4SFmode:
13535 case E_V4SImode:
13536 swaparray = swap4;
13537 break;
13538 case E_V8HImode:
13539 swaparray = swap8;
13540 break;
13541 default:
13542 gcc_unreachable ();
13543 }
13544
13545 for (i = 0; i < 16; ++i)
13546 perm[i] = GEN_INT (swaparray[i]);
13547
13548 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13549 gen_rtvec_v (16, perm)));
13550 }
13551
13552 static rtx
13553 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13554 {
13555 rtx pat, addr;
13556 tree arg0 = CALL_EXPR_ARG (exp, 0);
13557 tree arg1 = CALL_EXPR_ARG (exp, 1);
13558 machine_mode tmode = insn_data[icode].operand[0].mode;
13559 machine_mode mode0 = Pmode;
13560 machine_mode mode1 = Pmode;
13561 rtx op0 = expand_normal (arg0);
13562 rtx op1 = expand_normal (arg1);
13563
13564 if (icode == CODE_FOR_nothing)
13565 /* Builtin not supported on this processor. */
13566 return 0;
13567
13568 /* If we got invalid arguments bail out before generating bad rtl. */
13569 if (arg0 == error_mark_node || arg1 == error_mark_node)
13570 return const0_rtx;
13571
13572 if (target == 0
13573 || GET_MODE (target) != tmode
13574 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13575 target = gen_reg_rtx (tmode);
13576
13577 op1 = copy_to_mode_reg (mode1, op1);
13578
13579 /* For LVX, express the RTL accurately by ANDing the address with -16.
13580 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13581 so the raw address is fine. */
13582 if (icode == CODE_FOR_altivec_lvx_v1ti
13583 || icode == CODE_FOR_altivec_lvx_v2df
13584 || icode == CODE_FOR_altivec_lvx_v2di
13585 || icode == CODE_FOR_altivec_lvx_v4sf
13586 || icode == CODE_FOR_altivec_lvx_v4si
13587 || icode == CODE_FOR_altivec_lvx_v8hi
13588 || icode == CODE_FOR_altivec_lvx_v16qi)
13589 {
13590 rtx rawaddr;
13591 if (op0 == const0_rtx)
13592 rawaddr = op1;
13593 else
13594 {
13595 op0 = copy_to_mode_reg (mode0, op0);
13596 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13597 }
13598 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13599 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13600
13601 emit_insn (gen_rtx_SET (target, addr));
13602 }
13603 else
13604 {
13605 if (op0 == const0_rtx)
13606 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13607 else
13608 {
13609 op0 = copy_to_mode_reg (mode0, op0);
13610 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13611 gen_rtx_PLUS (Pmode, op1, op0));
13612 }
13613
13614 pat = GEN_FCN (icode) (target, addr);
13615 if (! pat)
13616 return 0;
13617 emit_insn (pat);
13618 }
13619
13620 return target;
13621 }
13622
13623 static rtx
13624 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13625 {
13626 rtx pat;
13627 tree arg0 = CALL_EXPR_ARG (exp, 0);
13628 tree arg1 = CALL_EXPR_ARG (exp, 1);
13629 tree arg2 = CALL_EXPR_ARG (exp, 2);
13630 rtx op0 = expand_normal (arg0);
13631 rtx op1 = expand_normal (arg1);
13632 rtx op2 = expand_normal (arg2);
13633 machine_mode mode0 = insn_data[icode].operand[0].mode;
13634 machine_mode mode1 = insn_data[icode].operand[1].mode;
13635 machine_mode mode2 = insn_data[icode].operand[2].mode;
13636
13637 if (icode == CODE_FOR_nothing)
13638 /* Builtin not supported on this processor. */
13639 return NULL_RTX;
13640
13641 /* If we got invalid arguments bail out before generating bad rtl. */
13642 if (arg0 == error_mark_node
13643 || arg1 == error_mark_node
13644 || arg2 == error_mark_node)
13645 return NULL_RTX;
13646
13647 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13648 op0 = copy_to_mode_reg (mode0, op0);
13649 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13650 op1 = copy_to_mode_reg (mode1, op1);
13651 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13652 op2 = copy_to_mode_reg (mode2, op2);
13653
13654 pat = GEN_FCN (icode) (op0, op1, op2);
13655 if (pat)
13656 emit_insn (pat);
13657
13658 return NULL_RTX;
13659 }
13660
13661 static rtx
13662 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13663 {
13664 tree arg0 = CALL_EXPR_ARG (exp, 0);
13665 tree arg1 = CALL_EXPR_ARG (exp, 1);
13666 tree arg2 = CALL_EXPR_ARG (exp, 2);
13667 rtx op0 = expand_normal (arg0);
13668 rtx op1 = expand_normal (arg1);
13669 rtx op2 = expand_normal (arg2);
13670 rtx pat, addr, rawaddr;
13671 machine_mode tmode = insn_data[icode].operand[0].mode;
13672 machine_mode smode = insn_data[icode].operand[1].mode;
13673 machine_mode mode1 = Pmode;
13674 machine_mode mode2 = Pmode;
13675
13676 /* Invalid arguments. Bail before doing anything stoopid! */
13677 if (arg0 == error_mark_node
13678 || arg1 == error_mark_node
13679 || arg2 == error_mark_node)
13680 return const0_rtx;
13681
13682 op2 = copy_to_mode_reg (mode2, op2);
13683
13684 /* For STVX, express the RTL accurately by ANDing the address with -16.
13685 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
13686 so the raw address is fine. */
13687 if (icode == CODE_FOR_altivec_stvx_v2df
13688 || icode == CODE_FOR_altivec_stvx_v2di
13689 || icode == CODE_FOR_altivec_stvx_v4sf
13690 || icode == CODE_FOR_altivec_stvx_v4si
13691 || icode == CODE_FOR_altivec_stvx_v8hi
13692 || icode == CODE_FOR_altivec_stvx_v16qi)
13693 {
13694 if (op1 == const0_rtx)
13695 rawaddr = op2;
13696 else
13697 {
13698 op1 = copy_to_mode_reg (mode1, op1);
13699 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
13700 }
13701
13702 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13703 addr = gen_rtx_MEM (tmode, addr);
13704
13705 op0 = copy_to_mode_reg (tmode, op0);
13706
13707 emit_insn (gen_rtx_SET (addr, op0));
13708 }
13709 else
13710 {
13711 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
13712 op0 = copy_to_mode_reg (smode, op0);
13713
13714 if (op1 == const0_rtx)
13715 addr = gen_rtx_MEM (tmode, op2);
13716 else
13717 {
13718 op1 = copy_to_mode_reg (mode1, op1);
13719 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
13720 }
13721
13722 pat = GEN_FCN (icode) (addr, op0);
13723 if (pat)
13724 emit_insn (pat);
13725 }
13726
13727 return NULL_RTX;
13728 }
13729
13730 /* Return the appropriate SPR number associated with the given builtin. */
13731 static inline HOST_WIDE_INT
13732 htm_spr_num (enum rs6000_builtins code)
13733 {
13734 if (code == HTM_BUILTIN_GET_TFHAR
13735 || code == HTM_BUILTIN_SET_TFHAR)
13736 return TFHAR_SPR;
13737 else if (code == HTM_BUILTIN_GET_TFIAR
13738 || code == HTM_BUILTIN_SET_TFIAR)
13739 return TFIAR_SPR;
13740 else if (code == HTM_BUILTIN_GET_TEXASR
13741 || code == HTM_BUILTIN_SET_TEXASR)
13742 return TEXASR_SPR;
13743 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
13744 || code == HTM_BUILTIN_SET_TEXASRU);
13745 return TEXASRU_SPR;
13746 }
13747
13748 /* Return the correct ICODE value depending on whether we are
13749 setting or reading the HTM SPRs. */
13750 static inline enum insn_code
13751 rs6000_htm_spr_icode (bool nonvoid)
13752 {
13753 if (nonvoid)
13754 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
13755 else
13756 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
13757 }
13758
13759 /* Expand the HTM builtin in EXP and store the result in TARGET.
13760 Store true in *EXPANDEDP if we found a builtin to expand. */
13761 static rtx
13762 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
13763 {
13764 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13765 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
13766 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13767 const struct builtin_description *d;
13768 size_t i;
13769
13770 *expandedp = true;
13771
13772 if (!TARGET_POWERPC64
13773 && (fcode == HTM_BUILTIN_TABORTDC
13774 || fcode == HTM_BUILTIN_TABORTDCI))
13775 {
13776 size_t uns_fcode = (size_t)fcode;
13777 const char *name = rs6000_builtin_info[uns_fcode].name;
13778 error ("builtin %qs is only valid in 64-bit mode", name);
13779 return const0_rtx;
13780 }
13781
13782 /* Expand the HTM builtins. */
13783 d = bdesc_htm;
13784 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
13785 if (d->code == fcode)
13786 {
13787 rtx op[MAX_HTM_OPERANDS], pat;
13788 int nopnds = 0;
13789 tree arg;
13790 call_expr_arg_iterator iter;
13791 unsigned attr = rs6000_builtin_info[fcode].attr;
13792 enum insn_code icode = d->icode;
13793 const struct insn_operand_data *insn_op;
13794 bool uses_spr = (attr & RS6000_BTC_SPR);
13795 rtx cr = NULL_RTX;
13796
13797 if (uses_spr)
13798 icode = rs6000_htm_spr_icode (nonvoid);
13799 insn_op = &insn_data[icode].operand[0];
13800
13801 if (nonvoid)
13802 {
13803 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
13804 if (!target
13805 || GET_MODE (target) != tmode
13806 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
13807 target = gen_reg_rtx (tmode);
13808 if (uses_spr)
13809 op[nopnds++] = target;
13810 }
13811
13812 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
13813 {
13814 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
13815 return const0_rtx;
13816
13817 insn_op = &insn_data[icode].operand[nopnds];
13818
13819 op[nopnds] = expand_normal (arg);
13820
13821 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
13822 {
13823 if (!strcmp (insn_op->constraint, "n"))
13824 {
13825 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
13826 if (!CONST_INT_P (op[nopnds]))
13827 error ("argument %d must be an unsigned literal", arg_num);
13828 else
13829 error ("argument %d is an unsigned literal that is "
13830 "out of range", arg_num);
13831 return const0_rtx;
13832 }
13833 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
13834 }
13835
13836 nopnds++;
13837 }
13838
13839 /* Handle the builtins for extended mnemonics. These accept
13840 no arguments, but map to builtins that take arguments. */
13841 switch (fcode)
13842 {
13843 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
13844 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
13845 op[nopnds++] = GEN_INT (1);
13846 if (flag_checking)
13847 attr |= RS6000_BTC_UNARY;
13848 break;
13849 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
13850 op[nopnds++] = GEN_INT (0);
13851 if (flag_checking)
13852 attr |= RS6000_BTC_UNARY;
13853 break;
13854 default:
13855 break;
13856 }
13857
13858 /* If this builtin accesses SPRs, then pass in the appropriate
13859 SPR number and SPR regno as the last two operands. */
13860 if (uses_spr)
13861 {
13862 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
13863 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
13864 }
13865 /* If this builtin accesses a CR, then pass in a scratch
13866 CR as the last operand. */
13867 else if (attr & RS6000_BTC_CR)
13868 { cr = gen_reg_rtx (CCmode);
13869 op[nopnds++] = cr;
13870 }
13871
13872 if (flag_checking)
13873 {
13874 int expected_nopnds = 0;
13875 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
13876 expected_nopnds = 1;
13877 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
13878 expected_nopnds = 2;
13879 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
13880 expected_nopnds = 3;
13881 if (!(attr & RS6000_BTC_VOID))
13882 expected_nopnds += 1;
13883 if (uses_spr)
13884 expected_nopnds += 1;
13885
13886 gcc_assert (nopnds == expected_nopnds
13887 && nopnds <= MAX_HTM_OPERANDS);
13888 }
13889
13890 switch (nopnds)
13891 {
13892 case 1:
13893 pat = GEN_FCN (icode) (op[0]);
13894 break;
13895 case 2:
13896 pat = GEN_FCN (icode) (op[0], op[1]);
13897 break;
13898 case 3:
13899 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
13900 break;
13901 case 4:
13902 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
13903 break;
13904 default:
13905 gcc_unreachable ();
13906 }
13907 if (!pat)
13908 return NULL_RTX;
13909 emit_insn (pat);
13910
13911 if (attr & RS6000_BTC_CR)
13912 {
13913 if (fcode == HTM_BUILTIN_TBEGIN)
13914 {
13915 /* Emit code to set TARGET to true or false depending on
13916 whether the tbegin. instruction successfully or failed
13917 to start a transaction. We do this by placing the 1's
13918 complement of CR's EQ bit into TARGET. */
13919 rtx scratch = gen_reg_rtx (SImode);
13920 emit_insn (gen_rtx_SET (scratch,
13921 gen_rtx_EQ (SImode, cr,
13922 const0_rtx)));
13923 emit_insn (gen_rtx_SET (target,
13924 gen_rtx_XOR (SImode, scratch,
13925 GEN_INT (1))));
13926 }
13927 else
13928 {
13929 /* Emit code to copy the 4-bit condition register field
13930 CR into the least significant end of register TARGET. */
13931 rtx scratch1 = gen_reg_rtx (SImode);
13932 rtx scratch2 = gen_reg_rtx (SImode);
13933 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
13934 emit_insn (gen_movcc (subreg, cr));
13935 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
13936 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
13937 }
13938 }
13939
13940 if (nonvoid)
13941 return target;
13942 return const0_rtx;
13943 }
13944
13945 *expandedp = false;
13946 return NULL_RTX;
13947 }
13948
13949 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
13950
13951 static rtx
13952 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
13953 rtx target)
13954 {
13955 /* __builtin_cpu_init () is a nop, so expand to nothing. */
13956 if (fcode == RS6000_BUILTIN_CPU_INIT)
13957 return const0_rtx;
13958
13959 if (target == 0 || GET_MODE (target) != SImode)
13960 target = gen_reg_rtx (SImode);
13961
13962 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
13963 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
13964 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
13965 to a STRING_CST. */
13966 if (TREE_CODE (arg) == ARRAY_REF
13967 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
13968 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
13969 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
13970 arg = TREE_OPERAND (arg, 0);
13971
13972 if (TREE_CODE (arg) != STRING_CST)
13973 {
13974 error ("builtin %qs only accepts a string argument",
13975 rs6000_builtin_info[(size_t) fcode].name);
13976 return const0_rtx;
13977 }
13978
13979 if (fcode == RS6000_BUILTIN_CPU_IS)
13980 {
13981 const char *cpu = TREE_STRING_POINTER (arg);
13982 rtx cpuid = NULL_RTX;
13983 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
13984 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
13985 {
13986 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
13987 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
13988 break;
13989 }
13990 if (cpuid == NULL_RTX)
13991 {
13992 /* Invalid CPU argument. */
13993 error ("cpu %qs is an invalid argument to builtin %qs",
13994 cpu, rs6000_builtin_info[(size_t) fcode].name);
13995 return const0_rtx;
13996 }
13997
13998 rtx platform = gen_reg_rtx (SImode);
13999 rtx tcbmem = gen_const_mem (SImode,
14000 gen_rtx_PLUS (Pmode,
14001 gen_rtx_REG (Pmode, TLS_REGNUM),
14002 GEN_INT (TCB_PLATFORM_OFFSET)));
14003 emit_move_insn (platform, tcbmem);
14004 emit_insn (gen_eqsi3 (target, platform, cpuid));
14005 }
14006 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14007 {
14008 const char *hwcap = TREE_STRING_POINTER (arg);
14009 rtx mask = NULL_RTX;
14010 int hwcap_offset;
14011 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14012 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14013 {
14014 mask = GEN_INT (cpu_supports_info[i].mask);
14015 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14016 break;
14017 }
14018 if (mask == NULL_RTX)
14019 {
14020 /* Invalid HWCAP argument. */
14021 error ("%s %qs is an invalid argument to builtin %qs",
14022 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14023 return const0_rtx;
14024 }
14025
14026 rtx tcb_hwcap = gen_reg_rtx (SImode);
14027 rtx tcbmem = gen_const_mem (SImode,
14028 gen_rtx_PLUS (Pmode,
14029 gen_rtx_REG (Pmode, TLS_REGNUM),
14030 GEN_INT (hwcap_offset)));
14031 emit_move_insn (tcb_hwcap, tcbmem);
14032 rtx scratch1 = gen_reg_rtx (SImode);
14033 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14034 rtx scratch2 = gen_reg_rtx (SImode);
14035 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14036 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14037 }
14038 else
14039 gcc_unreachable ();
14040
14041 /* Record that we have expanded a CPU builtin, so that we can later
14042 emit a reference to the special symbol exported by LIBC to ensure we
14043 do not link against an old LIBC that doesn't support this feature. */
14044 cpu_builtin_p = true;
14045
14046 #else
14047 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14048 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14049
14050 /* For old LIBCs, always return FALSE. */
14051 emit_move_insn (target, GEN_INT (0));
14052 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14053
14054 return target;
14055 }
14056
14057 static rtx
14058 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14059 {
14060 rtx pat;
14061 tree arg0 = CALL_EXPR_ARG (exp, 0);
14062 tree arg1 = CALL_EXPR_ARG (exp, 1);
14063 tree arg2 = CALL_EXPR_ARG (exp, 2);
14064 rtx op0 = expand_normal (arg0);
14065 rtx op1 = expand_normal (arg1);
14066 rtx op2 = expand_normal (arg2);
14067 machine_mode tmode = insn_data[icode].operand[0].mode;
14068 machine_mode mode0 = insn_data[icode].operand[1].mode;
14069 machine_mode mode1 = insn_data[icode].operand[2].mode;
14070 machine_mode mode2 = insn_data[icode].operand[3].mode;
14071
14072 if (icode == CODE_FOR_nothing)
14073 /* Builtin not supported on this processor. */
14074 return 0;
14075
14076 /* If we got invalid arguments bail out before generating bad rtl. */
14077 if (arg0 == error_mark_node
14078 || arg1 == error_mark_node
14079 || arg2 == error_mark_node)
14080 return const0_rtx;
14081
14082 /* Check and prepare argument depending on the instruction code.
14083
14084 Note that a switch statement instead of the sequence of tests
14085 would be incorrect as many of the CODE_FOR values could be
14086 CODE_FOR_nothing and that would yield multiple alternatives
14087 with identical values. We'd never reach here at runtime in
14088 this case. */
14089 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14090 || icode == CODE_FOR_altivec_vsldoi_v2df
14091 || icode == CODE_FOR_altivec_vsldoi_v4si
14092 || icode == CODE_FOR_altivec_vsldoi_v8hi
14093 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14094 {
14095 /* Only allow 4-bit unsigned literals. */
14096 STRIP_NOPS (arg2);
14097 if (TREE_CODE (arg2) != INTEGER_CST
14098 || TREE_INT_CST_LOW (arg2) & ~0xf)
14099 {
14100 error ("argument 3 must be a 4-bit unsigned literal");
14101 return CONST0_RTX (tmode);
14102 }
14103 }
14104 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14105 || icode == CODE_FOR_vsx_xxpermdi_v2di
14106 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14107 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14108 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14109 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14110 || icode == CODE_FOR_vsx_xxpermdi_v4si
14111 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14112 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14113 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14114 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14115 || icode == CODE_FOR_vsx_xxsldwi_v4si
14116 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14117 || icode == CODE_FOR_vsx_xxsldwi_v2di
14118 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14119 {
14120 /* Only allow 2-bit unsigned literals. */
14121 STRIP_NOPS (arg2);
14122 if (TREE_CODE (arg2) != INTEGER_CST
14123 || TREE_INT_CST_LOW (arg2) & ~0x3)
14124 {
14125 error ("argument 3 must be a 2-bit unsigned literal");
14126 return CONST0_RTX (tmode);
14127 }
14128 }
14129 else if (icode == CODE_FOR_vsx_set_v2df
14130 || icode == CODE_FOR_vsx_set_v2di
14131 || icode == CODE_FOR_bcdadd
14132 || icode == CODE_FOR_bcdadd_lt
14133 || icode == CODE_FOR_bcdadd_eq
14134 || icode == CODE_FOR_bcdadd_gt
14135 || icode == CODE_FOR_bcdsub
14136 || icode == CODE_FOR_bcdsub_lt
14137 || icode == CODE_FOR_bcdsub_eq
14138 || icode == CODE_FOR_bcdsub_gt)
14139 {
14140 /* Only allow 1-bit unsigned literals. */
14141 STRIP_NOPS (arg2);
14142 if (TREE_CODE (arg2) != INTEGER_CST
14143 || TREE_INT_CST_LOW (arg2) & ~0x1)
14144 {
14145 error ("argument 3 must be a 1-bit unsigned literal");
14146 return CONST0_RTX (tmode);
14147 }
14148 }
14149 else if (icode == CODE_FOR_dfp_ddedpd_dd
14150 || icode == CODE_FOR_dfp_ddedpd_td)
14151 {
14152 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14153 STRIP_NOPS (arg0);
14154 if (TREE_CODE (arg0) != INTEGER_CST
14155 || TREE_INT_CST_LOW (arg2) & ~0x3)
14156 {
14157 error ("argument 1 must be 0 or 2");
14158 return CONST0_RTX (tmode);
14159 }
14160 }
14161 else if (icode == CODE_FOR_dfp_denbcd_dd
14162 || icode == CODE_FOR_dfp_denbcd_td)
14163 {
14164 /* Only allow 1-bit unsigned literals. */
14165 STRIP_NOPS (arg0);
14166 if (TREE_CODE (arg0) != INTEGER_CST
14167 || TREE_INT_CST_LOW (arg0) & ~0x1)
14168 {
14169 error ("argument 1 must be a 1-bit unsigned literal");
14170 return CONST0_RTX (tmode);
14171 }
14172 }
14173 else if (icode == CODE_FOR_dfp_dscli_dd
14174 || icode == CODE_FOR_dfp_dscli_td
14175 || icode == CODE_FOR_dfp_dscri_dd
14176 || icode == CODE_FOR_dfp_dscri_td)
14177 {
14178 /* Only allow 6-bit unsigned literals. */
14179 STRIP_NOPS (arg1);
14180 if (TREE_CODE (arg1) != INTEGER_CST
14181 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14182 {
14183 error ("argument 2 must be a 6-bit unsigned literal");
14184 return CONST0_RTX (tmode);
14185 }
14186 }
14187 else if (icode == CODE_FOR_crypto_vshasigmaw
14188 || icode == CODE_FOR_crypto_vshasigmad)
14189 {
14190 /* Check whether the 2nd and 3rd arguments are integer constants and in
14191 range and prepare arguments. */
14192 STRIP_NOPS (arg1);
14193 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14194 {
14195 error ("argument 2 must be 0 or 1");
14196 return CONST0_RTX (tmode);
14197 }
14198
14199 STRIP_NOPS (arg2);
14200 if (TREE_CODE (arg2) != INTEGER_CST
14201 || wi::geu_p (wi::to_wide (arg2), 16))
14202 {
14203 error ("argument 3 must be in the range [0, 15]");
14204 return CONST0_RTX (tmode);
14205 }
14206 }
14207
14208 if (target == 0
14209 || GET_MODE (target) != tmode
14210 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14211 target = gen_reg_rtx (tmode);
14212
14213 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14214 op0 = copy_to_mode_reg (mode0, op0);
14215 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14216 op1 = copy_to_mode_reg (mode1, op1);
14217 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14218 op2 = copy_to_mode_reg (mode2, op2);
14219
14220 pat = GEN_FCN (icode) (target, op0, op1, op2);
14221 if (! pat)
14222 return 0;
14223 emit_insn (pat);
14224
14225 return target;
14226 }
14227
14228
14229 /* Expand the dst builtins. */
14230 static rtx
14231 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14232 bool *expandedp)
14233 {
14234 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14235 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14236 tree arg0, arg1, arg2;
14237 machine_mode mode0, mode1;
14238 rtx pat, op0, op1, op2;
14239 const struct builtin_description *d;
14240 size_t i;
14241
14242 *expandedp = false;
14243
14244 /* Handle DST variants. */
14245 d = bdesc_dst;
14246 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14247 if (d->code == fcode)
14248 {
14249 arg0 = CALL_EXPR_ARG (exp, 0);
14250 arg1 = CALL_EXPR_ARG (exp, 1);
14251 arg2 = CALL_EXPR_ARG (exp, 2);
14252 op0 = expand_normal (arg0);
14253 op1 = expand_normal (arg1);
14254 op2 = expand_normal (arg2);
14255 mode0 = insn_data[d->icode].operand[0].mode;
14256 mode1 = insn_data[d->icode].operand[1].mode;
14257
14258 /* Invalid arguments, bail out before generating bad rtl. */
14259 if (arg0 == error_mark_node
14260 || arg1 == error_mark_node
14261 || arg2 == error_mark_node)
14262 return const0_rtx;
14263
14264 *expandedp = true;
14265 STRIP_NOPS (arg2);
14266 if (TREE_CODE (arg2) != INTEGER_CST
14267 || TREE_INT_CST_LOW (arg2) & ~0x3)
14268 {
14269 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14270 return const0_rtx;
14271 }
14272
14273 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14274 op0 = copy_to_mode_reg (Pmode, op0);
14275 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14276 op1 = copy_to_mode_reg (mode1, op1);
14277
14278 pat = GEN_FCN (d->icode) (op0, op1, op2);
14279 if (pat != 0)
14280 emit_insn (pat);
14281
14282 return NULL_RTX;
14283 }
14284
14285 return NULL_RTX;
14286 }
14287
14288 /* Expand vec_init builtin. */
14289 static rtx
14290 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14291 {
14292 machine_mode tmode = TYPE_MODE (type);
14293 machine_mode inner_mode = GET_MODE_INNER (tmode);
14294 int i, n_elt = GET_MODE_NUNITS (tmode);
14295
14296 gcc_assert (VECTOR_MODE_P (tmode));
14297 gcc_assert (n_elt == call_expr_nargs (exp));
14298
14299 if (!target || !register_operand (target, tmode))
14300 target = gen_reg_rtx (tmode);
14301
14302 /* If we have a vector compromised of a single element, such as V1TImode, do
14303 the initialization directly. */
14304 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14305 {
14306 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14307 emit_move_insn (target, gen_lowpart (tmode, x));
14308 }
14309 else
14310 {
14311 rtvec v = rtvec_alloc (n_elt);
14312
14313 for (i = 0; i < n_elt; ++i)
14314 {
14315 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14316 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14317 }
14318
14319 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14320 }
14321
14322 return target;
14323 }
14324
14325 /* Return the integer constant in ARG. Constrain it to be in the range
14326 of the subparts of VEC_TYPE; issue an error if not. */
14327
14328 static int
14329 get_element_number (tree vec_type, tree arg)
14330 {
14331 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14332
14333 if (!tree_fits_uhwi_p (arg)
14334 || (elt = tree_to_uhwi (arg), elt > max))
14335 {
14336 error ("selector must be an integer constant in the range [0, %wi]", max);
14337 return 0;
14338 }
14339
14340 return elt;
14341 }
14342
14343 /* Expand vec_set builtin. */
14344 static rtx
14345 altivec_expand_vec_set_builtin (tree exp)
14346 {
14347 machine_mode tmode, mode1;
14348 tree arg0, arg1, arg2;
14349 int elt;
14350 rtx op0, op1;
14351
14352 arg0 = CALL_EXPR_ARG (exp, 0);
14353 arg1 = CALL_EXPR_ARG (exp, 1);
14354 arg2 = CALL_EXPR_ARG (exp, 2);
14355
14356 tmode = TYPE_MODE (TREE_TYPE (arg0));
14357 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14358 gcc_assert (VECTOR_MODE_P (tmode));
14359
14360 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14361 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14362 elt = get_element_number (TREE_TYPE (arg0), arg2);
14363
14364 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14365 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14366
14367 op0 = force_reg (tmode, op0);
14368 op1 = force_reg (mode1, op1);
14369
14370 rs6000_expand_vector_set (op0, op1, elt);
14371
14372 return op0;
14373 }
14374
14375 /* Expand vec_ext builtin. */
14376 static rtx
14377 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14378 {
14379 machine_mode tmode, mode0;
14380 tree arg0, arg1;
14381 rtx op0;
14382 rtx op1;
14383
14384 arg0 = CALL_EXPR_ARG (exp, 0);
14385 arg1 = CALL_EXPR_ARG (exp, 1);
14386
14387 op0 = expand_normal (arg0);
14388 op1 = expand_normal (arg1);
14389
14390 if (TREE_CODE (arg1) == INTEGER_CST)
14391 {
14392 unsigned HOST_WIDE_INT elt;
14393 unsigned HOST_WIDE_INT size = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
14394 unsigned int truncated_selector;
14395 /* Even if !tree_fits_uhwi_p (arg1)), TREE_INT_CST_LOW (arg0)
14396 returns low-order bits of INTEGER_CST for modulo indexing. */
14397 elt = TREE_INT_CST_LOW (arg1);
14398 truncated_selector = elt % size;
14399 op1 = GEN_INT (truncated_selector);
14400 }
14401
14402 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14403 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14404 gcc_assert (VECTOR_MODE_P (mode0));
14405
14406 op0 = force_reg (mode0, op0);
14407
14408 if (optimize || !target || !register_operand (target, tmode))
14409 target = gen_reg_rtx (tmode);
14410
14411 rs6000_expand_vector_extract (target, op0, op1);
14412
14413 return target;
14414 }
14415
14416 /* Expand the builtin in EXP and store the result in TARGET. Store
14417 true in *EXPANDEDP if we found a builtin to expand. */
14418 static rtx
14419 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14420 {
14421 const struct builtin_description *d;
14422 size_t i;
14423 enum insn_code icode;
14424 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14425 tree arg0, arg1, arg2;
14426 rtx op0, pat;
14427 machine_mode tmode, mode0;
14428 enum rs6000_builtins fcode
14429 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14430
14431 if (rs6000_overloaded_builtin_p (fcode))
14432 {
14433 *expandedp = true;
14434 error ("unresolved overload for Altivec builtin %qF", fndecl);
14435
14436 /* Given it is invalid, just generate a normal call. */
14437 return expand_call (exp, target, false);
14438 }
14439
14440 target = altivec_expand_dst_builtin (exp, target, expandedp);
14441 if (*expandedp)
14442 return target;
14443
14444 *expandedp = true;
14445
14446 switch (fcode)
14447 {
14448 case ALTIVEC_BUILTIN_STVX_V2DF:
14449 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14450 case ALTIVEC_BUILTIN_STVX_V2DI:
14451 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14452 case ALTIVEC_BUILTIN_STVX_V4SF:
14453 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14454 case ALTIVEC_BUILTIN_STVX:
14455 case ALTIVEC_BUILTIN_STVX_V4SI:
14456 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14457 case ALTIVEC_BUILTIN_STVX_V8HI:
14458 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14459 case ALTIVEC_BUILTIN_STVX_V16QI:
14460 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14461 case ALTIVEC_BUILTIN_STVEBX:
14462 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14463 case ALTIVEC_BUILTIN_STVEHX:
14464 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14465 case ALTIVEC_BUILTIN_STVEWX:
14466 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14467 case ALTIVEC_BUILTIN_STVXL_V2DF:
14468 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14469 case ALTIVEC_BUILTIN_STVXL_V2DI:
14470 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14471 case ALTIVEC_BUILTIN_STVXL_V4SF:
14472 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14473 case ALTIVEC_BUILTIN_STVXL:
14474 case ALTIVEC_BUILTIN_STVXL_V4SI:
14475 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14476 case ALTIVEC_BUILTIN_STVXL_V8HI:
14477 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14478 case ALTIVEC_BUILTIN_STVXL_V16QI:
14479 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14480
14481 case ALTIVEC_BUILTIN_STVLX:
14482 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14483 case ALTIVEC_BUILTIN_STVLXL:
14484 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14485 case ALTIVEC_BUILTIN_STVRX:
14486 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14487 case ALTIVEC_BUILTIN_STVRXL:
14488 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14489
14490 case P9V_BUILTIN_STXVL:
14491 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14492
14493 case P9V_BUILTIN_XST_LEN_R:
14494 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14495
14496 case VSX_BUILTIN_STXVD2X_V1TI:
14497 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14498 case VSX_BUILTIN_STXVD2X_V2DF:
14499 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14500 case VSX_BUILTIN_STXVD2X_V2DI:
14501 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14502 case VSX_BUILTIN_STXVW4X_V4SF:
14503 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14504 case VSX_BUILTIN_STXVW4X_V4SI:
14505 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14506 case VSX_BUILTIN_STXVW4X_V8HI:
14507 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14508 case VSX_BUILTIN_STXVW4X_V16QI:
14509 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14510
14511 /* For the following on big endian, it's ok to use any appropriate
14512 unaligned-supporting store, so use a generic expander. For
14513 little-endian, the exact element-reversing instruction must
14514 be used. */
14515 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14516 {
14517 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14518 : CODE_FOR_vsx_st_elemrev_v1ti);
14519 return altivec_expand_stv_builtin (code, exp);
14520 }
14521 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14522 {
14523 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14524 : CODE_FOR_vsx_st_elemrev_v2df);
14525 return altivec_expand_stv_builtin (code, exp);
14526 }
14527 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14528 {
14529 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14530 : CODE_FOR_vsx_st_elemrev_v2di);
14531 return altivec_expand_stv_builtin (code, exp);
14532 }
14533 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14534 {
14535 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14536 : CODE_FOR_vsx_st_elemrev_v4sf);
14537 return altivec_expand_stv_builtin (code, exp);
14538 }
14539 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14540 {
14541 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14542 : CODE_FOR_vsx_st_elemrev_v4si);
14543 return altivec_expand_stv_builtin (code, exp);
14544 }
14545 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14546 {
14547 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14548 : CODE_FOR_vsx_st_elemrev_v8hi);
14549 return altivec_expand_stv_builtin (code, exp);
14550 }
14551 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14552 {
14553 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14554 : CODE_FOR_vsx_st_elemrev_v16qi);
14555 return altivec_expand_stv_builtin (code, exp);
14556 }
14557
14558 case ALTIVEC_BUILTIN_MFVSCR:
14559 icode = CODE_FOR_altivec_mfvscr;
14560 tmode = insn_data[icode].operand[0].mode;
14561
14562 if (target == 0
14563 || GET_MODE (target) != tmode
14564 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14565 target = gen_reg_rtx (tmode);
14566
14567 pat = GEN_FCN (icode) (target);
14568 if (! pat)
14569 return 0;
14570 emit_insn (pat);
14571 return target;
14572
14573 case ALTIVEC_BUILTIN_MTVSCR:
14574 icode = CODE_FOR_altivec_mtvscr;
14575 arg0 = CALL_EXPR_ARG (exp, 0);
14576 op0 = expand_normal (arg0);
14577 mode0 = insn_data[icode].operand[0].mode;
14578
14579 /* If we got invalid arguments bail out before generating bad rtl. */
14580 if (arg0 == error_mark_node)
14581 return const0_rtx;
14582
14583 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14584 op0 = copy_to_mode_reg (mode0, op0);
14585
14586 pat = GEN_FCN (icode) (op0);
14587 if (pat)
14588 emit_insn (pat);
14589 return NULL_RTX;
14590
14591 case ALTIVEC_BUILTIN_DSSALL:
14592 emit_insn (gen_altivec_dssall ());
14593 return NULL_RTX;
14594
14595 case ALTIVEC_BUILTIN_DSS:
14596 icode = CODE_FOR_altivec_dss;
14597 arg0 = CALL_EXPR_ARG (exp, 0);
14598 STRIP_NOPS (arg0);
14599 op0 = expand_normal (arg0);
14600 mode0 = insn_data[icode].operand[0].mode;
14601
14602 /* If we got invalid arguments bail out before generating bad rtl. */
14603 if (arg0 == error_mark_node)
14604 return const0_rtx;
14605
14606 if (TREE_CODE (arg0) != INTEGER_CST
14607 || TREE_INT_CST_LOW (arg0) & ~0x3)
14608 {
14609 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14610 return const0_rtx;
14611 }
14612
14613 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14614 op0 = copy_to_mode_reg (mode0, op0);
14615
14616 emit_insn (gen_altivec_dss (op0));
14617 return NULL_RTX;
14618
14619 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14620 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14621 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14622 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14623 case VSX_BUILTIN_VEC_INIT_V2DF:
14624 case VSX_BUILTIN_VEC_INIT_V2DI:
14625 case VSX_BUILTIN_VEC_INIT_V1TI:
14626 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14627
14628 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14629 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14630 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14631 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14632 case VSX_BUILTIN_VEC_SET_V2DF:
14633 case VSX_BUILTIN_VEC_SET_V2DI:
14634 case VSX_BUILTIN_VEC_SET_V1TI:
14635 return altivec_expand_vec_set_builtin (exp);
14636
14637 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14638 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14639 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14640 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14641 case VSX_BUILTIN_VEC_EXT_V2DF:
14642 case VSX_BUILTIN_VEC_EXT_V2DI:
14643 case VSX_BUILTIN_VEC_EXT_V1TI:
14644 return altivec_expand_vec_ext_builtin (exp, target);
14645
14646 case P9V_BUILTIN_VEC_EXTRACT4B:
14647 arg1 = CALL_EXPR_ARG (exp, 1);
14648 STRIP_NOPS (arg1);
14649
14650 /* Generate a normal call if it is invalid. */
14651 if (arg1 == error_mark_node)
14652 return expand_call (exp, target, false);
14653
14654 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14655 {
14656 error ("second argument to %qs must be [0, 12]", "vec_vextract4b");
14657 return expand_call (exp, target, false);
14658 }
14659 break;
14660
14661 case P9V_BUILTIN_VEC_INSERT4B:
14662 arg2 = CALL_EXPR_ARG (exp, 2);
14663 STRIP_NOPS (arg2);
14664
14665 /* Generate a normal call if it is invalid. */
14666 if (arg2 == error_mark_node)
14667 return expand_call (exp, target, false);
14668
14669 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
14670 {
14671 error ("third argument to %qs must be [0, 12]", "vec_vinsert4b");
14672 return expand_call (exp, target, false);
14673 }
14674 break;
14675
14676 default:
14677 break;
14678 /* Fall through. */
14679 }
14680
14681 /* Expand abs* operations. */
14682 d = bdesc_abs;
14683 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14684 if (d->code == fcode)
14685 return altivec_expand_abs_builtin (d->icode, exp, target);
14686
14687 /* Expand the AltiVec predicates. */
14688 d = bdesc_altivec_preds;
14689 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14690 if (d->code == fcode)
14691 return altivec_expand_predicate_builtin (d->icode, exp, target);
14692
14693 /* LV* are funky. We initialized them differently. */
14694 switch (fcode)
14695 {
14696 case ALTIVEC_BUILTIN_LVSL:
14697 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
14698 exp, target, false);
14699 case ALTIVEC_BUILTIN_LVSR:
14700 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
14701 exp, target, false);
14702 case ALTIVEC_BUILTIN_LVEBX:
14703 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
14704 exp, target, false);
14705 case ALTIVEC_BUILTIN_LVEHX:
14706 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
14707 exp, target, false);
14708 case ALTIVEC_BUILTIN_LVEWX:
14709 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
14710 exp, target, false);
14711 case ALTIVEC_BUILTIN_LVXL_V2DF:
14712 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
14713 exp, target, false);
14714 case ALTIVEC_BUILTIN_LVXL_V2DI:
14715 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
14716 exp, target, false);
14717 case ALTIVEC_BUILTIN_LVXL_V4SF:
14718 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
14719 exp, target, false);
14720 case ALTIVEC_BUILTIN_LVXL:
14721 case ALTIVEC_BUILTIN_LVXL_V4SI:
14722 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
14723 exp, target, false);
14724 case ALTIVEC_BUILTIN_LVXL_V8HI:
14725 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
14726 exp, target, false);
14727 case ALTIVEC_BUILTIN_LVXL_V16QI:
14728 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
14729 exp, target, false);
14730 case ALTIVEC_BUILTIN_LVX_V1TI:
14731 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
14732 exp, target, false);
14733 case ALTIVEC_BUILTIN_LVX_V2DF:
14734 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
14735 exp, target, false);
14736 case ALTIVEC_BUILTIN_LVX_V2DI:
14737 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
14738 exp, target, false);
14739 case ALTIVEC_BUILTIN_LVX_V4SF:
14740 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
14741 exp, target, false);
14742 case ALTIVEC_BUILTIN_LVX:
14743 case ALTIVEC_BUILTIN_LVX_V4SI:
14744 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
14745 exp, target, false);
14746 case ALTIVEC_BUILTIN_LVX_V8HI:
14747 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
14748 exp, target, false);
14749 case ALTIVEC_BUILTIN_LVX_V16QI:
14750 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
14751 exp, target, false);
14752 case ALTIVEC_BUILTIN_LVLX:
14753 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
14754 exp, target, true);
14755 case ALTIVEC_BUILTIN_LVLXL:
14756 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
14757 exp, target, true);
14758 case ALTIVEC_BUILTIN_LVRX:
14759 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
14760 exp, target, true);
14761 case ALTIVEC_BUILTIN_LVRXL:
14762 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
14763 exp, target, true);
14764 case VSX_BUILTIN_LXVD2X_V1TI:
14765 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
14766 exp, target, false);
14767 case VSX_BUILTIN_LXVD2X_V2DF:
14768 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
14769 exp, target, false);
14770 case VSX_BUILTIN_LXVD2X_V2DI:
14771 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
14772 exp, target, false);
14773 case VSX_BUILTIN_LXVW4X_V4SF:
14774 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
14775 exp, target, false);
14776 case VSX_BUILTIN_LXVW4X_V4SI:
14777 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
14778 exp, target, false);
14779 case VSX_BUILTIN_LXVW4X_V8HI:
14780 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
14781 exp, target, false);
14782 case VSX_BUILTIN_LXVW4X_V16QI:
14783 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
14784 exp, target, false);
14785 /* For the following on big endian, it's ok to use any appropriate
14786 unaligned-supporting load, so use a generic expander. For
14787 little-endian, the exact element-reversing instruction must
14788 be used. */
14789 case VSX_BUILTIN_LD_ELEMREV_V2DF:
14790 {
14791 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
14792 : CODE_FOR_vsx_ld_elemrev_v2df);
14793 return altivec_expand_lv_builtin (code, exp, target, false);
14794 }
14795 case VSX_BUILTIN_LD_ELEMREV_V1TI:
14796 {
14797 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
14798 : CODE_FOR_vsx_ld_elemrev_v1ti);
14799 return altivec_expand_lv_builtin (code, exp, target, false);
14800 }
14801 case VSX_BUILTIN_LD_ELEMREV_V2DI:
14802 {
14803 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
14804 : CODE_FOR_vsx_ld_elemrev_v2di);
14805 return altivec_expand_lv_builtin (code, exp, target, false);
14806 }
14807 case VSX_BUILTIN_LD_ELEMREV_V4SF:
14808 {
14809 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
14810 : CODE_FOR_vsx_ld_elemrev_v4sf);
14811 return altivec_expand_lv_builtin (code, exp, target, false);
14812 }
14813 case VSX_BUILTIN_LD_ELEMREV_V4SI:
14814 {
14815 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
14816 : CODE_FOR_vsx_ld_elemrev_v4si);
14817 return altivec_expand_lv_builtin (code, exp, target, false);
14818 }
14819 case VSX_BUILTIN_LD_ELEMREV_V8HI:
14820 {
14821 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
14822 : CODE_FOR_vsx_ld_elemrev_v8hi);
14823 return altivec_expand_lv_builtin (code, exp, target, false);
14824 }
14825 case VSX_BUILTIN_LD_ELEMREV_V16QI:
14826 {
14827 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
14828 : CODE_FOR_vsx_ld_elemrev_v16qi);
14829 return altivec_expand_lv_builtin (code, exp, target, false);
14830 }
14831 break;
14832 default:
14833 break;
14834 /* Fall through. */
14835 }
14836
14837 *expandedp = false;
14838 return NULL_RTX;
14839 }
14840
14841 /* Check whether a builtin function is supported in this target
14842 configuration. */
14843 bool
14844 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
14845 {
14846 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
14847 if ((fnmask & rs6000_builtin_mask) != fnmask)
14848 return false;
14849 else
14850 return true;
14851 }
14852
14853 /* Raise an error message for a builtin function that is called without the
14854 appropriate target options being set. */
14855
14856 static void
14857 rs6000_invalid_builtin (enum rs6000_builtins fncode)
14858 {
14859 size_t uns_fncode = (size_t) fncode;
14860 const char *name = rs6000_builtin_info[uns_fncode].name;
14861 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
14862
14863 gcc_assert (name != NULL);
14864 if ((fnmask & RS6000_BTM_CELL) != 0)
14865 error ("builtin function %qs is only valid for the cell processor", name);
14866 else if ((fnmask & RS6000_BTM_VSX) != 0)
14867 error ("builtin function %qs requires the %qs option", name, "-mvsx");
14868 else if ((fnmask & RS6000_BTM_HTM) != 0)
14869 error ("builtin function %qs requires the %qs option", name, "-mhtm");
14870 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
14871 error ("builtin function %qs requires the %qs option", name, "-maltivec");
14872 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14873 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14874 error ("builtin function %qs requires the %qs and %qs options",
14875 name, "-mhard-dfp", "-mpower8-vector");
14876 else if ((fnmask & RS6000_BTM_DFP) != 0)
14877 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
14878 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
14879 error ("builtin function %qs requires the %qs option", name,
14880 "-mpower8-vector");
14881 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14882 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14883 error ("builtin function %qs requires the %qs and %qs options",
14884 name, "-mcpu=power9", "-m64");
14885 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
14886 error ("builtin function %qs requires the %qs option", name,
14887 "-mcpu=power9");
14888 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
14889 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
14890 error ("builtin function %qs requires the %qs and %qs options",
14891 name, "-mcpu=power9", "-m64");
14892 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
14893 error ("builtin function %qs requires the %qs option", name,
14894 "-mcpu=power9");
14895 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
14896 {
14897 if (!TARGET_HARD_FLOAT)
14898 error ("builtin function %qs requires the %qs option", name,
14899 "-mhard-float");
14900 else
14901 error ("builtin function %qs requires the %qs option", name,
14902 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
14903 }
14904 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
14905 error ("builtin function %qs requires the %qs option", name,
14906 "-mhard-float");
14907 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
14908 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
14909 name);
14910 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
14911 error ("builtin function %qs requires the %qs option", name,
14912 "%<-mfloat128%>");
14913 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
14914 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
14915 error ("builtin function %qs requires the %qs (or newer), and "
14916 "%qs or %qs options",
14917 name, "-mcpu=power7", "-m64", "-mpowerpc64");
14918 else
14919 error ("builtin function %qs is not supported with the current options",
14920 name);
14921 }
14922
14923 /* Target hook for early folding of built-ins, shamelessly stolen
14924 from ia64.c. */
14925
14926 static tree
14927 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
14928 int n_args ATTRIBUTE_UNUSED,
14929 tree *args ATTRIBUTE_UNUSED,
14930 bool ignore ATTRIBUTE_UNUSED)
14931 {
14932 #ifdef SUBTARGET_FOLD_BUILTIN
14933 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
14934 #else
14935 return NULL_TREE;
14936 #endif
14937 }
14938
14939 /* Helper function to sort out which built-ins may be valid without having
14940 a LHS. */
14941 static bool
14942 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
14943 {
14944 switch (fn_code)
14945 {
14946 case ALTIVEC_BUILTIN_STVX_V16QI:
14947 case ALTIVEC_BUILTIN_STVX_V8HI:
14948 case ALTIVEC_BUILTIN_STVX_V4SI:
14949 case ALTIVEC_BUILTIN_STVX_V4SF:
14950 case ALTIVEC_BUILTIN_STVX_V2DI:
14951 case ALTIVEC_BUILTIN_STVX_V2DF:
14952 case VSX_BUILTIN_STXVW4X_V16QI:
14953 case VSX_BUILTIN_STXVW4X_V8HI:
14954 case VSX_BUILTIN_STXVW4X_V4SF:
14955 case VSX_BUILTIN_STXVW4X_V4SI:
14956 case VSX_BUILTIN_STXVD2X_V2DF:
14957 case VSX_BUILTIN_STXVD2X_V2DI:
14958 return true;
14959 default:
14960 return false;
14961 }
14962 }
14963
14964 /* Helper function to handle the gimple folding of a vector compare
14965 operation. This sets up true/false vectors, and uses the
14966 VEC_COND_EXPR operation.
14967 CODE indicates which comparison is to be made. (EQ, GT, ...).
14968 TYPE indicates the type of the result. */
14969 static tree
14970 fold_build_vec_cmp (tree_code code, tree type,
14971 tree arg0, tree arg1)
14972 {
14973 tree cmp_type = build_same_sized_truth_vector_type (type);
14974 tree zero_vec = build_zero_cst (type);
14975 tree minus_one_vec = build_minus_one_cst (type);
14976 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
14977 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
14978 }
14979
14980 /* Helper function to handle the in-between steps for the
14981 vector compare built-ins. */
14982 static void
14983 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
14984 {
14985 tree arg0 = gimple_call_arg (stmt, 0);
14986 tree arg1 = gimple_call_arg (stmt, 1);
14987 tree lhs = gimple_call_lhs (stmt);
14988 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
14989 gimple *g = gimple_build_assign (lhs, cmp);
14990 gimple_set_location (g, gimple_location (stmt));
14991 gsi_replace (gsi, g, true);
14992 }
14993
14994 /* Helper function to map V2DF and V4SF types to their
14995 integral equivalents (V2DI and V4SI). */
14996 tree map_to_integral_tree_type (tree input_tree_type)
14997 {
14998 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
14999 return input_tree_type;
15000 else
15001 {
15002 if (types_compatible_p (TREE_TYPE (input_tree_type),
15003 TREE_TYPE (V2DF_type_node)))
15004 return V2DI_type_node;
15005 else if (types_compatible_p (TREE_TYPE (input_tree_type),
15006 TREE_TYPE (V4SF_type_node)))
15007 return V4SI_type_node;
15008 else
15009 gcc_unreachable ();
15010 }
15011 }
15012
15013 /* Helper function to handle the vector merge[hl] built-ins. The
15014 implementation difference between h and l versions for this code are in
15015 the values used when building of the permute vector for high word versus
15016 low word merge. The variance is keyed off the use_high parameter. */
15017 static void
15018 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15019 {
15020 tree arg0 = gimple_call_arg (stmt, 0);
15021 tree arg1 = gimple_call_arg (stmt, 1);
15022 tree lhs = gimple_call_lhs (stmt);
15023 tree lhs_type = TREE_TYPE (lhs);
15024 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15025 int midpoint = n_elts / 2;
15026 int offset = 0;
15027
15028 if (use_high == 1)
15029 offset = midpoint;
15030
15031 /* The permute_type will match the lhs for integral types. For double and
15032 float types, the permute type needs to map to the V2 or V4 type that
15033 matches size. */
15034 tree permute_type;
15035 permute_type = map_to_integral_tree_type (lhs_type);
15036 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15037
15038 for (int i = 0; i < midpoint; i++)
15039 {
15040 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15041 offset + i));
15042 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15043 offset + n_elts + i));
15044 }
15045
15046 tree permute = elts.build ();
15047
15048 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15049 gimple_set_location (g, gimple_location (stmt));
15050 gsi_replace (gsi, g, true);
15051 }
15052
15053 /* Helper function to handle the vector merge[eo] built-ins. */
15054 static void
15055 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
15056 {
15057 tree arg0 = gimple_call_arg (stmt, 0);
15058 tree arg1 = gimple_call_arg (stmt, 1);
15059 tree lhs = gimple_call_lhs (stmt);
15060 tree lhs_type = TREE_TYPE (lhs);
15061 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15062
15063 /* The permute_type will match the lhs for integral types. For double and
15064 float types, the permute type needs to map to the V2 or V4 type that
15065 matches size. */
15066 tree permute_type;
15067 permute_type = map_to_integral_tree_type (lhs_type);
15068
15069 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15070
15071 /* Build the permute vector. */
15072 for (int i = 0; i < n_elts / 2; i++)
15073 {
15074 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15075 2*i + use_odd));
15076 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15077 2*i + use_odd + n_elts));
15078 }
15079
15080 tree permute = elts.build ();
15081
15082 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15083 gimple_set_location (g, gimple_location (stmt));
15084 gsi_replace (gsi, g, true);
15085 }
15086
15087 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15088 a constant, use rs6000_fold_builtin.) */
15089
15090 bool
15091 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15092 {
15093 gimple *stmt = gsi_stmt (*gsi);
15094 tree fndecl = gimple_call_fndecl (stmt);
15095 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15096 enum rs6000_builtins fn_code
15097 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15098 tree arg0, arg1, lhs, temp;
15099 enum tree_code bcode;
15100 gimple *g;
15101
15102 size_t uns_fncode = (size_t) fn_code;
15103 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15104 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15105 const char *fn_name2 = (icode != CODE_FOR_nothing)
15106 ? get_insn_name ((int) icode)
15107 : "nothing";
15108
15109 if (TARGET_DEBUG_BUILTIN)
15110 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15111 fn_code, fn_name1, fn_name2);
15112
15113 if (!rs6000_fold_gimple)
15114 return false;
15115
15116 /* Prevent gimple folding for code that does not have a LHS, unless it is
15117 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15118 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15119 return false;
15120
15121 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15122 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15123 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15124 if (!func_valid_p)
15125 return false;
15126
15127 switch (fn_code)
15128 {
15129 /* Flavors of vec_add. We deliberately don't expand
15130 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15131 TImode, resulting in much poorer code generation. */
15132 case ALTIVEC_BUILTIN_VADDUBM:
15133 case ALTIVEC_BUILTIN_VADDUHM:
15134 case ALTIVEC_BUILTIN_VADDUWM:
15135 case P8V_BUILTIN_VADDUDM:
15136 case ALTIVEC_BUILTIN_VADDFP:
15137 case VSX_BUILTIN_XVADDDP:
15138 bcode = PLUS_EXPR;
15139 do_binary:
15140 arg0 = gimple_call_arg (stmt, 0);
15141 arg1 = gimple_call_arg (stmt, 1);
15142 lhs = gimple_call_lhs (stmt);
15143 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15144 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15145 {
15146 /* Ensure the binary operation is performed in a type
15147 that wraps if it is integral type. */
15148 gimple_seq stmts = NULL;
15149 tree type = unsigned_type_for (TREE_TYPE (lhs));
15150 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15151 type, arg0);
15152 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15153 type, arg1);
15154 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15155 type, uarg0, uarg1);
15156 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15157 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15158 build1 (VIEW_CONVERT_EXPR,
15159 TREE_TYPE (lhs), res));
15160 gsi_replace (gsi, g, true);
15161 return true;
15162 }
15163 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15164 gimple_set_location (g, gimple_location (stmt));
15165 gsi_replace (gsi, g, true);
15166 return true;
15167 /* Flavors of vec_sub. We deliberately don't expand
15168 P8V_BUILTIN_VSUBUQM. */
15169 case ALTIVEC_BUILTIN_VSUBUBM:
15170 case ALTIVEC_BUILTIN_VSUBUHM:
15171 case ALTIVEC_BUILTIN_VSUBUWM:
15172 case P8V_BUILTIN_VSUBUDM:
15173 case ALTIVEC_BUILTIN_VSUBFP:
15174 case VSX_BUILTIN_XVSUBDP:
15175 bcode = MINUS_EXPR;
15176 goto do_binary;
15177 case VSX_BUILTIN_XVMULSP:
15178 case VSX_BUILTIN_XVMULDP:
15179 arg0 = gimple_call_arg (stmt, 0);
15180 arg1 = gimple_call_arg (stmt, 1);
15181 lhs = gimple_call_lhs (stmt);
15182 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15183 gimple_set_location (g, gimple_location (stmt));
15184 gsi_replace (gsi, g, true);
15185 return true;
15186 /* Even element flavors of vec_mul (signed). */
15187 case ALTIVEC_BUILTIN_VMULESB:
15188 case ALTIVEC_BUILTIN_VMULESH:
15189 case P8V_BUILTIN_VMULESW:
15190 /* Even element flavors of vec_mul (unsigned). */
15191 case ALTIVEC_BUILTIN_VMULEUB:
15192 case ALTIVEC_BUILTIN_VMULEUH:
15193 case P8V_BUILTIN_VMULEUW:
15194 arg0 = gimple_call_arg (stmt, 0);
15195 arg1 = gimple_call_arg (stmt, 1);
15196 lhs = gimple_call_lhs (stmt);
15197 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15198 gimple_set_location (g, gimple_location (stmt));
15199 gsi_replace (gsi, g, true);
15200 return true;
15201 /* Odd element flavors of vec_mul (signed). */
15202 case ALTIVEC_BUILTIN_VMULOSB:
15203 case ALTIVEC_BUILTIN_VMULOSH:
15204 case P8V_BUILTIN_VMULOSW:
15205 /* Odd element flavors of vec_mul (unsigned). */
15206 case ALTIVEC_BUILTIN_VMULOUB:
15207 case ALTIVEC_BUILTIN_VMULOUH:
15208 case P8V_BUILTIN_VMULOUW:
15209 arg0 = gimple_call_arg (stmt, 0);
15210 arg1 = gimple_call_arg (stmt, 1);
15211 lhs = gimple_call_lhs (stmt);
15212 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15213 gimple_set_location (g, gimple_location (stmt));
15214 gsi_replace (gsi, g, true);
15215 return true;
15216 /* Flavors of vec_div (Integer). */
15217 case VSX_BUILTIN_DIV_V2DI:
15218 case VSX_BUILTIN_UDIV_V2DI:
15219 arg0 = gimple_call_arg (stmt, 0);
15220 arg1 = gimple_call_arg (stmt, 1);
15221 lhs = gimple_call_lhs (stmt);
15222 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15223 gimple_set_location (g, gimple_location (stmt));
15224 gsi_replace (gsi, g, true);
15225 return true;
15226 /* Flavors of vec_div (Float). */
15227 case VSX_BUILTIN_XVDIVSP:
15228 case VSX_BUILTIN_XVDIVDP:
15229 arg0 = gimple_call_arg (stmt, 0);
15230 arg1 = gimple_call_arg (stmt, 1);
15231 lhs = gimple_call_lhs (stmt);
15232 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15233 gimple_set_location (g, gimple_location (stmt));
15234 gsi_replace (gsi, g, true);
15235 return true;
15236 /* Flavors of vec_and. */
15237 case ALTIVEC_BUILTIN_VAND:
15238 arg0 = gimple_call_arg (stmt, 0);
15239 arg1 = gimple_call_arg (stmt, 1);
15240 lhs = gimple_call_lhs (stmt);
15241 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15242 gimple_set_location (g, gimple_location (stmt));
15243 gsi_replace (gsi, g, true);
15244 return true;
15245 /* Flavors of vec_andc. */
15246 case ALTIVEC_BUILTIN_VANDC:
15247 arg0 = gimple_call_arg (stmt, 0);
15248 arg1 = gimple_call_arg (stmt, 1);
15249 lhs = gimple_call_lhs (stmt);
15250 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15251 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15252 gimple_set_location (g, gimple_location (stmt));
15253 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15254 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15255 gimple_set_location (g, gimple_location (stmt));
15256 gsi_replace (gsi, g, true);
15257 return true;
15258 /* Flavors of vec_nand. */
15259 case P8V_BUILTIN_VEC_NAND:
15260 case P8V_BUILTIN_NAND_V16QI:
15261 case P8V_BUILTIN_NAND_V8HI:
15262 case P8V_BUILTIN_NAND_V4SI:
15263 case P8V_BUILTIN_NAND_V4SF:
15264 case P8V_BUILTIN_NAND_V2DF:
15265 case P8V_BUILTIN_NAND_V2DI:
15266 arg0 = gimple_call_arg (stmt, 0);
15267 arg1 = gimple_call_arg (stmt, 1);
15268 lhs = gimple_call_lhs (stmt);
15269 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15270 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15271 gimple_set_location (g, gimple_location (stmt));
15272 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15273 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15274 gimple_set_location (g, gimple_location (stmt));
15275 gsi_replace (gsi, g, true);
15276 return true;
15277 /* Flavors of vec_or. */
15278 case ALTIVEC_BUILTIN_VOR:
15279 arg0 = gimple_call_arg (stmt, 0);
15280 arg1 = gimple_call_arg (stmt, 1);
15281 lhs = gimple_call_lhs (stmt);
15282 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15283 gimple_set_location (g, gimple_location (stmt));
15284 gsi_replace (gsi, g, true);
15285 return true;
15286 /* flavors of vec_orc. */
15287 case P8V_BUILTIN_ORC_V16QI:
15288 case P8V_BUILTIN_ORC_V8HI:
15289 case P8V_BUILTIN_ORC_V4SI:
15290 case P8V_BUILTIN_ORC_V4SF:
15291 case P8V_BUILTIN_ORC_V2DF:
15292 case P8V_BUILTIN_ORC_V2DI:
15293 arg0 = gimple_call_arg (stmt, 0);
15294 arg1 = gimple_call_arg (stmt, 1);
15295 lhs = gimple_call_lhs (stmt);
15296 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15297 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15298 gimple_set_location (g, gimple_location (stmt));
15299 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15300 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15301 gimple_set_location (g, gimple_location (stmt));
15302 gsi_replace (gsi, g, true);
15303 return true;
15304 /* Flavors of vec_xor. */
15305 case ALTIVEC_BUILTIN_VXOR:
15306 arg0 = gimple_call_arg (stmt, 0);
15307 arg1 = gimple_call_arg (stmt, 1);
15308 lhs = gimple_call_lhs (stmt);
15309 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15310 gimple_set_location (g, gimple_location (stmt));
15311 gsi_replace (gsi, g, true);
15312 return true;
15313 /* Flavors of vec_nor. */
15314 case ALTIVEC_BUILTIN_VNOR:
15315 arg0 = gimple_call_arg (stmt, 0);
15316 arg1 = gimple_call_arg (stmt, 1);
15317 lhs = gimple_call_lhs (stmt);
15318 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15319 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15320 gimple_set_location (g, gimple_location (stmt));
15321 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15322 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15323 gimple_set_location (g, gimple_location (stmt));
15324 gsi_replace (gsi, g, true);
15325 return true;
15326 /* flavors of vec_abs. */
15327 case ALTIVEC_BUILTIN_ABS_V16QI:
15328 case ALTIVEC_BUILTIN_ABS_V8HI:
15329 case ALTIVEC_BUILTIN_ABS_V4SI:
15330 case ALTIVEC_BUILTIN_ABS_V4SF:
15331 case P8V_BUILTIN_ABS_V2DI:
15332 case VSX_BUILTIN_XVABSDP:
15333 arg0 = gimple_call_arg (stmt, 0);
15334 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15335 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15336 return false;
15337 lhs = gimple_call_lhs (stmt);
15338 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15339 gimple_set_location (g, gimple_location (stmt));
15340 gsi_replace (gsi, g, true);
15341 return true;
15342 /* flavors of vec_min. */
15343 case VSX_BUILTIN_XVMINDP:
15344 case P8V_BUILTIN_VMINSD:
15345 case P8V_BUILTIN_VMINUD:
15346 case ALTIVEC_BUILTIN_VMINSB:
15347 case ALTIVEC_BUILTIN_VMINSH:
15348 case ALTIVEC_BUILTIN_VMINSW:
15349 case ALTIVEC_BUILTIN_VMINUB:
15350 case ALTIVEC_BUILTIN_VMINUH:
15351 case ALTIVEC_BUILTIN_VMINUW:
15352 case ALTIVEC_BUILTIN_VMINFP:
15353 arg0 = gimple_call_arg (stmt, 0);
15354 arg1 = gimple_call_arg (stmt, 1);
15355 lhs = gimple_call_lhs (stmt);
15356 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15357 gimple_set_location (g, gimple_location (stmt));
15358 gsi_replace (gsi, g, true);
15359 return true;
15360 /* flavors of vec_max. */
15361 case VSX_BUILTIN_XVMAXDP:
15362 case P8V_BUILTIN_VMAXSD:
15363 case P8V_BUILTIN_VMAXUD:
15364 case ALTIVEC_BUILTIN_VMAXSB:
15365 case ALTIVEC_BUILTIN_VMAXSH:
15366 case ALTIVEC_BUILTIN_VMAXSW:
15367 case ALTIVEC_BUILTIN_VMAXUB:
15368 case ALTIVEC_BUILTIN_VMAXUH:
15369 case ALTIVEC_BUILTIN_VMAXUW:
15370 case ALTIVEC_BUILTIN_VMAXFP:
15371 arg0 = gimple_call_arg (stmt, 0);
15372 arg1 = gimple_call_arg (stmt, 1);
15373 lhs = gimple_call_lhs (stmt);
15374 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15375 gimple_set_location (g, gimple_location (stmt));
15376 gsi_replace (gsi, g, true);
15377 return true;
15378 /* Flavors of vec_eqv. */
15379 case P8V_BUILTIN_EQV_V16QI:
15380 case P8V_BUILTIN_EQV_V8HI:
15381 case P8V_BUILTIN_EQV_V4SI:
15382 case P8V_BUILTIN_EQV_V4SF:
15383 case P8V_BUILTIN_EQV_V2DF:
15384 case P8V_BUILTIN_EQV_V2DI:
15385 arg0 = gimple_call_arg (stmt, 0);
15386 arg1 = gimple_call_arg (stmt, 1);
15387 lhs = gimple_call_lhs (stmt);
15388 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15389 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15390 gimple_set_location (g, gimple_location (stmt));
15391 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15392 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15393 gimple_set_location (g, gimple_location (stmt));
15394 gsi_replace (gsi, g, true);
15395 return true;
15396 /* Flavors of vec_rotate_left. */
15397 case ALTIVEC_BUILTIN_VRLB:
15398 case ALTIVEC_BUILTIN_VRLH:
15399 case ALTIVEC_BUILTIN_VRLW:
15400 case P8V_BUILTIN_VRLD:
15401 arg0 = gimple_call_arg (stmt, 0);
15402 arg1 = gimple_call_arg (stmt, 1);
15403 lhs = gimple_call_lhs (stmt);
15404 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15405 gimple_set_location (g, gimple_location (stmt));
15406 gsi_replace (gsi, g, true);
15407 return true;
15408 /* Flavors of vector shift right algebraic.
15409 vec_sra{b,h,w} -> vsra{b,h,w}. */
15410 case ALTIVEC_BUILTIN_VSRAB:
15411 case ALTIVEC_BUILTIN_VSRAH:
15412 case ALTIVEC_BUILTIN_VSRAW:
15413 case P8V_BUILTIN_VSRAD:
15414 {
15415 arg0 = gimple_call_arg (stmt, 0);
15416 arg1 = gimple_call_arg (stmt, 1);
15417 lhs = gimple_call_lhs (stmt);
15418 tree arg1_type = TREE_TYPE (arg1);
15419 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15420 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15421 location_t loc = gimple_location (stmt);
15422 /* Force arg1 into the range valid matching the arg0 type. */
15423 /* Build a vector consisting of the max valid bit-size values. */
15424 int n_elts = VECTOR_CST_NELTS (arg1);
15425 tree element_size = build_int_cst (unsigned_element_type,
15426 128 / n_elts);
15427 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15428 for (int i = 0; i < n_elts; i++)
15429 elts.safe_push (element_size);
15430 tree modulo_tree = elts.build ();
15431 /* Modulo the provided shift value against that vector. */
15432 gimple_seq stmts = NULL;
15433 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15434 unsigned_arg1_type, arg1);
15435 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15436 unsigned_arg1_type, unsigned_arg1,
15437 modulo_tree);
15438 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15439 /* And finally, do the shift. */
15440 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, new_arg1);
15441 gimple_set_location (g, loc);
15442 gsi_replace (gsi, g, true);
15443 return true;
15444 }
15445 /* Flavors of vector shift left.
15446 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15447 case ALTIVEC_BUILTIN_VSLB:
15448 case ALTIVEC_BUILTIN_VSLH:
15449 case ALTIVEC_BUILTIN_VSLW:
15450 case P8V_BUILTIN_VSLD:
15451 {
15452 location_t loc;
15453 gimple_seq stmts = NULL;
15454 arg0 = gimple_call_arg (stmt, 0);
15455 tree arg0_type = TREE_TYPE (arg0);
15456 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15457 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15458 return false;
15459 arg1 = gimple_call_arg (stmt, 1);
15460 tree arg1_type = TREE_TYPE (arg1);
15461 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15462 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15463 loc = gimple_location (stmt);
15464 lhs = gimple_call_lhs (stmt);
15465 /* Force arg1 into the range valid matching the arg0 type. */
15466 /* Build a vector consisting of the max valid bit-size values. */
15467 int n_elts = VECTOR_CST_NELTS (arg1);
15468 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15469 * BITS_PER_UNIT;
15470 tree element_size = build_int_cst (unsigned_element_type,
15471 tree_size_in_bits / n_elts);
15472 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15473 for (int i = 0; i < n_elts; i++)
15474 elts.safe_push (element_size);
15475 tree modulo_tree = elts.build ();
15476 /* Modulo the provided shift value against that vector. */
15477 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15478 unsigned_arg1_type, arg1);
15479 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15480 unsigned_arg1_type, unsigned_arg1,
15481 modulo_tree);
15482 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15483 /* And finally, do the shift. */
15484 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15485 gimple_set_location (g, gimple_location (stmt));
15486 gsi_replace (gsi, g, true);
15487 return true;
15488 }
15489 /* Flavors of vector shift right. */
15490 case ALTIVEC_BUILTIN_VSRB:
15491 case ALTIVEC_BUILTIN_VSRH:
15492 case ALTIVEC_BUILTIN_VSRW:
15493 case P8V_BUILTIN_VSRD:
15494 {
15495 arg0 = gimple_call_arg (stmt, 0);
15496 arg1 = gimple_call_arg (stmt, 1);
15497 lhs = gimple_call_lhs (stmt);
15498 tree arg1_type = TREE_TYPE (arg1);
15499 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15500 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15501 location_t loc = gimple_location (stmt);
15502 gimple_seq stmts = NULL;
15503 /* Convert arg0 to unsigned. */
15504 tree arg0_unsigned
15505 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15506 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15507 /* Force arg1 into the range valid matching the arg0 type. */
15508 /* Build a vector consisting of the max valid bit-size values. */
15509 int n_elts = VECTOR_CST_NELTS (arg1);
15510 tree element_size = build_int_cst (unsigned_element_type,
15511 128 / n_elts);
15512 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15513 for (int i = 0; i < n_elts; i++)
15514 elts.safe_push (element_size);
15515 tree modulo_tree = elts.build ();
15516 /* Modulo the provided shift value against that vector. */
15517 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15518 unsigned_arg1_type, arg1);
15519 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15520 unsigned_arg1_type, unsigned_arg1,
15521 modulo_tree);
15522 /* Do the shift. */
15523 tree res
15524 = gimple_build (&stmts, RSHIFT_EXPR,
15525 TREE_TYPE (arg0_unsigned), arg0_unsigned, new_arg1);
15526 /* Convert result back to the lhs type. */
15527 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15528 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15529 update_call_from_tree (gsi, res);
15530 return true;
15531 }
15532 /* Vector loads. */
15533 case ALTIVEC_BUILTIN_LVX_V16QI:
15534 case ALTIVEC_BUILTIN_LVX_V8HI:
15535 case ALTIVEC_BUILTIN_LVX_V4SI:
15536 case ALTIVEC_BUILTIN_LVX_V4SF:
15537 case ALTIVEC_BUILTIN_LVX_V2DI:
15538 case ALTIVEC_BUILTIN_LVX_V2DF:
15539 case ALTIVEC_BUILTIN_LVX_V1TI:
15540 {
15541 arg0 = gimple_call_arg (stmt, 0); // offset
15542 arg1 = gimple_call_arg (stmt, 1); // address
15543 lhs = gimple_call_lhs (stmt);
15544 location_t loc = gimple_location (stmt);
15545 /* Since arg1 may be cast to a different type, just use ptr_type_node
15546 here instead of trying to enforce TBAA on pointer types. */
15547 tree arg1_type = ptr_type_node;
15548 tree lhs_type = TREE_TYPE (lhs);
15549 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15550 the tree using the value from arg0. The resulting type will match
15551 the type of arg1. */
15552 gimple_seq stmts = NULL;
15553 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15554 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15555 arg1_type, arg1, temp_offset);
15556 /* Mask off any lower bits from the address. */
15557 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15558 arg1_type, temp_addr,
15559 build_int_cst (arg1_type, -16));
15560 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15561 if (!is_gimple_mem_ref_addr (aligned_addr))
15562 {
15563 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15564 gimple *g = gimple_build_assign (t, aligned_addr);
15565 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15566 aligned_addr = t;
15567 }
15568 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15569 take an offset, but since we've already incorporated the offset
15570 above, here we just pass in a zero. */
15571 gimple *g
15572 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15573 build_int_cst (arg1_type, 0)));
15574 gimple_set_location (g, loc);
15575 gsi_replace (gsi, g, true);
15576 return true;
15577 }
15578 /* Vector stores. */
15579 case ALTIVEC_BUILTIN_STVX_V16QI:
15580 case ALTIVEC_BUILTIN_STVX_V8HI:
15581 case ALTIVEC_BUILTIN_STVX_V4SI:
15582 case ALTIVEC_BUILTIN_STVX_V4SF:
15583 case ALTIVEC_BUILTIN_STVX_V2DI:
15584 case ALTIVEC_BUILTIN_STVX_V2DF:
15585 {
15586 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15587 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15588 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15589 location_t loc = gimple_location (stmt);
15590 tree arg0_type = TREE_TYPE (arg0);
15591 /* Use ptr_type_node (no TBAA) for the arg2_type.
15592 FIXME: (Richard) "A proper fix would be to transition this type as
15593 seen from the frontend to GIMPLE, for example in a similar way we
15594 do for MEM_REFs by piggy-backing that on an extra argument, a
15595 constant zero pointer of the alias pointer type to use (which would
15596 also serve as a type indicator of the store itself). I'd use a
15597 target specific internal function for this (not sure if we can have
15598 those target specific, but I guess if it's folded away then that's
15599 fine) and get away with the overload set." */
15600 tree arg2_type = ptr_type_node;
15601 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15602 the tree using the value from arg0. The resulting type will match
15603 the type of arg2. */
15604 gimple_seq stmts = NULL;
15605 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15606 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15607 arg2_type, arg2, temp_offset);
15608 /* Mask off any lower bits from the address. */
15609 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15610 arg2_type, temp_addr,
15611 build_int_cst (arg2_type, -16));
15612 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15613 if (!is_gimple_mem_ref_addr (aligned_addr))
15614 {
15615 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15616 gimple *g = gimple_build_assign (t, aligned_addr);
15617 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15618 aligned_addr = t;
15619 }
15620 /* The desired gimple result should be similar to:
15621 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15622 gimple *g
15623 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15624 build_int_cst (arg2_type, 0)), arg0);
15625 gimple_set_location (g, loc);
15626 gsi_replace (gsi, g, true);
15627 return true;
15628 }
15629
15630 /* unaligned Vector loads. */
15631 case VSX_BUILTIN_LXVW4X_V16QI:
15632 case VSX_BUILTIN_LXVW4X_V8HI:
15633 case VSX_BUILTIN_LXVW4X_V4SF:
15634 case VSX_BUILTIN_LXVW4X_V4SI:
15635 case VSX_BUILTIN_LXVD2X_V2DF:
15636 case VSX_BUILTIN_LXVD2X_V2DI:
15637 {
15638 arg0 = gimple_call_arg (stmt, 0); // offset
15639 arg1 = gimple_call_arg (stmt, 1); // address
15640 lhs = gimple_call_lhs (stmt);
15641 location_t loc = gimple_location (stmt);
15642 /* Since arg1 may be cast to a different type, just use ptr_type_node
15643 here instead of trying to enforce TBAA on pointer types. */
15644 tree arg1_type = ptr_type_node;
15645 tree lhs_type = TREE_TYPE (lhs);
15646 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15647 required alignment (power) is 4 bytes regardless of data type. */
15648 tree align_ltype = build_aligned_type (lhs_type, 4);
15649 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15650 the tree using the value from arg0. The resulting type will match
15651 the type of arg1. */
15652 gimple_seq stmts = NULL;
15653 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15654 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15655 arg1_type, arg1, temp_offset);
15656 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15657 if (!is_gimple_mem_ref_addr (temp_addr))
15658 {
15659 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15660 gimple *g = gimple_build_assign (t, temp_addr);
15661 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15662 temp_addr = t;
15663 }
15664 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15665 take an offset, but since we've already incorporated the offset
15666 above, here we just pass in a zero. */
15667 gimple *g;
15668 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15669 build_int_cst (arg1_type, 0)));
15670 gimple_set_location (g, loc);
15671 gsi_replace (gsi, g, true);
15672 return true;
15673 }
15674
15675 /* unaligned Vector stores. */
15676 case VSX_BUILTIN_STXVW4X_V16QI:
15677 case VSX_BUILTIN_STXVW4X_V8HI:
15678 case VSX_BUILTIN_STXVW4X_V4SF:
15679 case VSX_BUILTIN_STXVW4X_V4SI:
15680 case VSX_BUILTIN_STXVD2X_V2DF:
15681 case VSX_BUILTIN_STXVD2X_V2DI:
15682 {
15683 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15684 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15685 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15686 location_t loc = gimple_location (stmt);
15687 tree arg0_type = TREE_TYPE (arg0);
15688 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15689 tree arg2_type = ptr_type_node;
15690 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15691 required alignment (power) is 4 bytes regardless of data type. */
15692 tree align_stype = build_aligned_type (arg0_type, 4);
15693 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15694 the tree using the value from arg1. */
15695 gimple_seq stmts = NULL;
15696 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15697 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15698 arg2_type, arg2, temp_offset);
15699 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15700 if (!is_gimple_mem_ref_addr (temp_addr))
15701 {
15702 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15703 gimple *g = gimple_build_assign (t, temp_addr);
15704 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15705 temp_addr = t;
15706 }
15707 gimple *g;
15708 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
15709 build_int_cst (arg2_type, 0)), arg0);
15710 gimple_set_location (g, loc);
15711 gsi_replace (gsi, g, true);
15712 return true;
15713 }
15714
15715 /* Vector Fused multiply-add (fma). */
15716 case ALTIVEC_BUILTIN_VMADDFP:
15717 case VSX_BUILTIN_XVMADDDP:
15718 case ALTIVEC_BUILTIN_VMLADDUHM:
15719 {
15720 arg0 = gimple_call_arg (stmt, 0);
15721 arg1 = gimple_call_arg (stmt, 1);
15722 tree arg2 = gimple_call_arg (stmt, 2);
15723 lhs = gimple_call_lhs (stmt);
15724 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15725 gimple_call_set_lhs (g, lhs);
15726 gimple_call_set_nothrow (g, true);
15727 gimple_set_location (g, gimple_location (stmt));
15728 gsi_replace (gsi, g, true);
15729 return true;
15730 }
15731
15732 /* Vector compares; EQ, NE, GE, GT, LE. */
15733 case ALTIVEC_BUILTIN_VCMPEQUB:
15734 case ALTIVEC_BUILTIN_VCMPEQUH:
15735 case ALTIVEC_BUILTIN_VCMPEQUW:
15736 case P8V_BUILTIN_VCMPEQUD:
15737 fold_compare_helper (gsi, EQ_EXPR, stmt);
15738 return true;
15739
15740 case P9V_BUILTIN_CMPNEB:
15741 case P9V_BUILTIN_CMPNEH:
15742 case P9V_BUILTIN_CMPNEW:
15743 fold_compare_helper (gsi, NE_EXPR, stmt);
15744 return true;
15745
15746 case VSX_BUILTIN_CMPGE_16QI:
15747 case VSX_BUILTIN_CMPGE_U16QI:
15748 case VSX_BUILTIN_CMPGE_8HI:
15749 case VSX_BUILTIN_CMPGE_U8HI:
15750 case VSX_BUILTIN_CMPGE_4SI:
15751 case VSX_BUILTIN_CMPGE_U4SI:
15752 case VSX_BUILTIN_CMPGE_2DI:
15753 case VSX_BUILTIN_CMPGE_U2DI:
15754 fold_compare_helper (gsi, GE_EXPR, stmt);
15755 return true;
15756
15757 case ALTIVEC_BUILTIN_VCMPGTSB:
15758 case ALTIVEC_BUILTIN_VCMPGTUB:
15759 case ALTIVEC_BUILTIN_VCMPGTSH:
15760 case ALTIVEC_BUILTIN_VCMPGTUH:
15761 case ALTIVEC_BUILTIN_VCMPGTSW:
15762 case ALTIVEC_BUILTIN_VCMPGTUW:
15763 case P8V_BUILTIN_VCMPGTUD:
15764 case P8V_BUILTIN_VCMPGTSD:
15765 fold_compare_helper (gsi, GT_EXPR, stmt);
15766 return true;
15767
15768 case VSX_BUILTIN_CMPLE_16QI:
15769 case VSX_BUILTIN_CMPLE_U16QI:
15770 case VSX_BUILTIN_CMPLE_8HI:
15771 case VSX_BUILTIN_CMPLE_U8HI:
15772 case VSX_BUILTIN_CMPLE_4SI:
15773 case VSX_BUILTIN_CMPLE_U4SI:
15774 case VSX_BUILTIN_CMPLE_2DI:
15775 case VSX_BUILTIN_CMPLE_U2DI:
15776 fold_compare_helper (gsi, LE_EXPR, stmt);
15777 return true;
15778
15779 /* flavors of vec_splat_[us]{8,16,32}. */
15780 case ALTIVEC_BUILTIN_VSPLTISB:
15781 case ALTIVEC_BUILTIN_VSPLTISH:
15782 case ALTIVEC_BUILTIN_VSPLTISW:
15783 {
15784 arg0 = gimple_call_arg (stmt, 0);
15785 lhs = gimple_call_lhs (stmt);
15786
15787 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
15788 5-bit signed constant in range -16 to +15. */
15789 if (TREE_CODE (arg0) != INTEGER_CST
15790 || !IN_RANGE (TREE_INT_CST_LOW (arg0), -16, 15))
15791 return false;
15792 gimple_seq stmts = NULL;
15793 location_t loc = gimple_location (stmt);
15794 tree splat_value = gimple_convert (&stmts, loc,
15795 TREE_TYPE (TREE_TYPE (lhs)), arg0);
15796 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15797 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
15798 g = gimple_build_assign (lhs, splat_tree);
15799 gimple_set_location (g, gimple_location (stmt));
15800 gsi_replace (gsi, g, true);
15801 return true;
15802 }
15803
15804 /* Flavors of vec_splat. */
15805 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
15806 case ALTIVEC_BUILTIN_VSPLTB:
15807 case ALTIVEC_BUILTIN_VSPLTH:
15808 case ALTIVEC_BUILTIN_VSPLTW:
15809 case VSX_BUILTIN_XXSPLTD_V2DI:
15810 case VSX_BUILTIN_XXSPLTD_V2DF:
15811 {
15812 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
15813 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
15814 /* Only fold the vec_splat_*() if arg1 is both a constant value and
15815 is a valid index into the arg0 vector. */
15816 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
15817 if (TREE_CODE (arg1) != INTEGER_CST
15818 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
15819 return false;
15820 lhs = gimple_call_lhs (stmt);
15821 tree lhs_type = TREE_TYPE (lhs);
15822 tree arg0_type = TREE_TYPE (arg0);
15823 tree splat;
15824 if (TREE_CODE (arg0) == VECTOR_CST)
15825 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
15826 else
15827 {
15828 /* Determine (in bits) the length and start location of the
15829 splat value for a call to the tree_vec_extract helper. */
15830 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
15831 * BITS_PER_UNIT / n_elts;
15832 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
15833 tree len = build_int_cst (bitsizetype, splat_elem_size);
15834 tree start = build_int_cst (bitsizetype, splat_start_bit);
15835 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
15836 len, start);
15837 }
15838 /* And finally, build the new vector. */
15839 tree splat_tree = build_vector_from_val (lhs_type, splat);
15840 g = gimple_build_assign (lhs, splat_tree);
15841 gimple_set_location (g, gimple_location (stmt));
15842 gsi_replace (gsi, g, true);
15843 return true;
15844 }
15845
15846 /* vec_mergel (integrals). */
15847 case ALTIVEC_BUILTIN_VMRGLH:
15848 case ALTIVEC_BUILTIN_VMRGLW:
15849 case VSX_BUILTIN_XXMRGLW_4SI:
15850 case ALTIVEC_BUILTIN_VMRGLB:
15851 case VSX_BUILTIN_VEC_MERGEL_V2DI:
15852 case VSX_BUILTIN_XXMRGLW_4SF:
15853 case VSX_BUILTIN_VEC_MERGEL_V2DF:
15854 fold_mergehl_helper (gsi, stmt, 1);
15855 return true;
15856 /* vec_mergeh (integrals). */
15857 case ALTIVEC_BUILTIN_VMRGHH:
15858 case ALTIVEC_BUILTIN_VMRGHW:
15859 case VSX_BUILTIN_XXMRGHW_4SI:
15860 case ALTIVEC_BUILTIN_VMRGHB:
15861 case VSX_BUILTIN_VEC_MERGEH_V2DI:
15862 case VSX_BUILTIN_XXMRGHW_4SF:
15863 case VSX_BUILTIN_VEC_MERGEH_V2DF:
15864 fold_mergehl_helper (gsi, stmt, 0);
15865 return true;
15866
15867 /* Flavors of vec_mergee. */
15868 case P8V_BUILTIN_VMRGEW_V4SI:
15869 case P8V_BUILTIN_VMRGEW_V2DI:
15870 case P8V_BUILTIN_VMRGEW_V4SF:
15871 case P8V_BUILTIN_VMRGEW_V2DF:
15872 fold_mergeeo_helper (gsi, stmt, 0);
15873 return true;
15874 /* Flavors of vec_mergeo. */
15875 case P8V_BUILTIN_VMRGOW_V4SI:
15876 case P8V_BUILTIN_VMRGOW_V2DI:
15877 case P8V_BUILTIN_VMRGOW_V4SF:
15878 case P8V_BUILTIN_VMRGOW_V2DF:
15879 fold_mergeeo_helper (gsi, stmt, 1);
15880 return true;
15881
15882 /* d = vec_pack (a, b) */
15883 case P8V_BUILTIN_VPKUDUM:
15884 case ALTIVEC_BUILTIN_VPKUHUM:
15885 case ALTIVEC_BUILTIN_VPKUWUM:
15886 {
15887 arg0 = gimple_call_arg (stmt, 0);
15888 arg1 = gimple_call_arg (stmt, 1);
15889 lhs = gimple_call_lhs (stmt);
15890 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
15891 gimple_set_location (g, gimple_location (stmt));
15892 gsi_replace (gsi, g, true);
15893 return true;
15894 }
15895
15896 /* d = vec_unpackh (a) */
15897 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
15898 in this code is sensitive to endian-ness, and needs to be inverted to
15899 handle both LE and BE targets. */
15900 case ALTIVEC_BUILTIN_VUPKHSB:
15901 case ALTIVEC_BUILTIN_VUPKHSH:
15902 case P8V_BUILTIN_VUPKHSW:
15903 {
15904 arg0 = gimple_call_arg (stmt, 0);
15905 lhs = gimple_call_lhs (stmt);
15906 if (BYTES_BIG_ENDIAN)
15907 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15908 else
15909 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15910 gimple_set_location (g, gimple_location (stmt));
15911 gsi_replace (gsi, g, true);
15912 return true;
15913 }
15914 /* d = vec_unpackl (a) */
15915 case ALTIVEC_BUILTIN_VUPKLSB:
15916 case ALTIVEC_BUILTIN_VUPKLSH:
15917 case P8V_BUILTIN_VUPKLSW:
15918 {
15919 arg0 = gimple_call_arg (stmt, 0);
15920 lhs = gimple_call_lhs (stmt);
15921 if (BYTES_BIG_ENDIAN)
15922 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15923 else
15924 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15925 gimple_set_location (g, gimple_location (stmt));
15926 gsi_replace (gsi, g, true);
15927 return true;
15928 }
15929 /* There is no gimple type corresponding with pixel, so just return. */
15930 case ALTIVEC_BUILTIN_VUPKHPX:
15931 case ALTIVEC_BUILTIN_VUPKLPX:
15932 return false;
15933
15934 /* vec_perm. */
15935 case ALTIVEC_BUILTIN_VPERM_16QI:
15936 case ALTIVEC_BUILTIN_VPERM_8HI:
15937 case ALTIVEC_BUILTIN_VPERM_4SI:
15938 case ALTIVEC_BUILTIN_VPERM_2DI:
15939 case ALTIVEC_BUILTIN_VPERM_4SF:
15940 case ALTIVEC_BUILTIN_VPERM_2DF:
15941 {
15942 arg0 = gimple_call_arg (stmt, 0);
15943 arg1 = gimple_call_arg (stmt, 1);
15944 tree permute = gimple_call_arg (stmt, 2);
15945 lhs = gimple_call_lhs (stmt);
15946 location_t loc = gimple_location (stmt);
15947 gimple_seq stmts = NULL;
15948 // convert arg0 and arg1 to match the type of the permute
15949 // for the VEC_PERM_EXPR operation.
15950 tree permute_type = (TREE_TYPE (permute));
15951 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
15952 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
15953 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
15954 permute_type, arg0_ptype, arg1_ptype,
15955 permute);
15956 // Convert the result back to the desired lhs type upon completion.
15957 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
15958 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15959 g = gimple_build_assign (lhs, temp);
15960 gimple_set_location (g, loc);
15961 gsi_replace (gsi, g, true);
15962 return true;
15963 }
15964
15965 default:
15966 if (TARGET_DEBUG_BUILTIN)
15967 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
15968 fn_code, fn_name1, fn_name2);
15969 break;
15970 }
15971
15972 return false;
15973 }
15974
15975 /* Expand an expression EXP that calls a built-in function,
15976 with result going to TARGET if that's convenient
15977 (and in mode MODE if that's convenient).
15978 SUBTARGET may be used as the target for computing one of EXP's operands.
15979 IGNORE is nonzero if the value is to be ignored. */
15980
15981 static rtx
15982 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15983 machine_mode mode ATTRIBUTE_UNUSED,
15984 int ignore ATTRIBUTE_UNUSED)
15985 {
15986 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15987 enum rs6000_builtins fcode
15988 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
15989 size_t uns_fcode = (size_t)fcode;
15990 const struct builtin_description *d;
15991 size_t i;
15992 rtx ret;
15993 bool success;
15994 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
15995 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
15996 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
15997
15998 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
15999 floating point type, depending on whether long double is the IBM extended
16000 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16001 we only define one variant of the built-in function, and switch the code
16002 when defining it, rather than defining two built-ins and using the
16003 overload table in rs6000-c.c to switch between the two. If we don't have
16004 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16005 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16006 if (FLOAT128_IEEE_P (TFmode))
16007 switch (icode)
16008 {
16009 default:
16010 break;
16011
16012 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16013 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16014 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16015 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16016 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16017 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16018 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16019 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16020 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16021 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16022 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16023 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16024 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16025 }
16026
16027 if (TARGET_DEBUG_BUILTIN)
16028 {
16029 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16030 const char *name2 = (icode != CODE_FOR_nothing)
16031 ? get_insn_name ((int) icode)
16032 : "nothing";
16033 const char *name3;
16034
16035 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16036 {
16037 default: name3 = "unknown"; break;
16038 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16039 case RS6000_BTC_UNARY: name3 = "unary"; break;
16040 case RS6000_BTC_BINARY: name3 = "binary"; break;
16041 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16042 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16043 case RS6000_BTC_ABS: name3 = "abs"; break;
16044 case RS6000_BTC_DST: name3 = "dst"; break;
16045 }
16046
16047
16048 fprintf (stderr,
16049 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16050 (name1) ? name1 : "---", fcode,
16051 (name2) ? name2 : "---", (int) icode,
16052 name3,
16053 func_valid_p ? "" : ", not valid");
16054 }
16055
16056 if (!func_valid_p)
16057 {
16058 rs6000_invalid_builtin (fcode);
16059
16060 /* Given it is invalid, just generate a normal call. */
16061 return expand_call (exp, target, ignore);
16062 }
16063
16064 switch (fcode)
16065 {
16066 case RS6000_BUILTIN_RECIP:
16067 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16068
16069 case RS6000_BUILTIN_RECIPF:
16070 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16071
16072 case RS6000_BUILTIN_RSQRTF:
16073 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16074
16075 case RS6000_BUILTIN_RSQRT:
16076 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16077
16078 case POWER7_BUILTIN_BPERMD:
16079 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16080 ? CODE_FOR_bpermd_di
16081 : CODE_FOR_bpermd_si), exp, target);
16082
16083 case RS6000_BUILTIN_GET_TB:
16084 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16085 target);
16086
16087 case RS6000_BUILTIN_MFTB:
16088 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16089 ? CODE_FOR_rs6000_mftb_di
16090 : CODE_FOR_rs6000_mftb_si),
16091 target);
16092
16093 case RS6000_BUILTIN_MFFS:
16094 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16095
16096 case RS6000_BUILTIN_MTFSB0:
16097 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16098
16099 case RS6000_BUILTIN_MTFSB1:
16100 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16101
16102 case RS6000_BUILTIN_SET_FPSCR_RN:
16103 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16104 exp);
16105
16106 case RS6000_BUILTIN_SET_FPSCR_DRN:
16107 return
16108 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16109 exp);
16110
16111 case RS6000_BUILTIN_MFFSL:
16112 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16113
16114 case RS6000_BUILTIN_MTFSF:
16115 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16116
16117 case RS6000_BUILTIN_CPU_INIT:
16118 case RS6000_BUILTIN_CPU_IS:
16119 case RS6000_BUILTIN_CPU_SUPPORTS:
16120 return cpu_expand_builtin (fcode, exp, target);
16121
16122 case MISC_BUILTIN_SPEC_BARRIER:
16123 {
16124 emit_insn (gen_speculation_barrier ());
16125 return NULL_RTX;
16126 }
16127
16128 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16129 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16130 {
16131 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16132 : (int) CODE_FOR_altivec_lvsl_direct);
16133 machine_mode tmode = insn_data[icode2].operand[0].mode;
16134 machine_mode mode = insn_data[icode2].operand[1].mode;
16135 tree arg;
16136 rtx op, addr, pat;
16137
16138 gcc_assert (TARGET_ALTIVEC);
16139
16140 arg = CALL_EXPR_ARG (exp, 0);
16141 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16142 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16143 addr = memory_address (mode, op);
16144 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16145 op = addr;
16146 else
16147 {
16148 /* For the load case need to negate the address. */
16149 op = gen_reg_rtx (GET_MODE (addr));
16150 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16151 }
16152 op = gen_rtx_MEM (mode, op);
16153
16154 if (target == 0
16155 || GET_MODE (target) != tmode
16156 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16157 target = gen_reg_rtx (tmode);
16158
16159 pat = GEN_FCN (icode2) (target, op);
16160 if (!pat)
16161 return 0;
16162 emit_insn (pat);
16163
16164 return target;
16165 }
16166
16167 case ALTIVEC_BUILTIN_VCFUX:
16168 case ALTIVEC_BUILTIN_VCFSX:
16169 case ALTIVEC_BUILTIN_VCTUXS:
16170 case ALTIVEC_BUILTIN_VCTSXS:
16171 /* FIXME: There's got to be a nicer way to handle this case than
16172 constructing a new CALL_EXPR. */
16173 if (call_expr_nargs (exp) == 1)
16174 {
16175 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16176 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16177 }
16178 break;
16179
16180 /* For the pack and unpack int128 routines, fix up the builtin so it
16181 uses the correct IBM128 type. */
16182 case MISC_BUILTIN_PACK_IF:
16183 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16184 {
16185 icode = CODE_FOR_packtf;
16186 fcode = MISC_BUILTIN_PACK_TF;
16187 uns_fcode = (size_t)fcode;
16188 }
16189 break;
16190
16191 case MISC_BUILTIN_UNPACK_IF:
16192 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16193 {
16194 icode = CODE_FOR_unpacktf;
16195 fcode = MISC_BUILTIN_UNPACK_TF;
16196 uns_fcode = (size_t)fcode;
16197 }
16198 break;
16199
16200 default:
16201 break;
16202 }
16203
16204 if (TARGET_ALTIVEC)
16205 {
16206 ret = altivec_expand_builtin (exp, target, &success);
16207
16208 if (success)
16209 return ret;
16210 }
16211 if (TARGET_HTM)
16212 {
16213 ret = htm_expand_builtin (exp, target, &success);
16214
16215 if (success)
16216 return ret;
16217 }
16218
16219 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16220 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16221 gcc_assert (attr == RS6000_BTC_UNARY
16222 || attr == RS6000_BTC_BINARY
16223 || attr == RS6000_BTC_TERNARY
16224 || attr == RS6000_BTC_SPECIAL);
16225
16226 /* Handle simple unary operations. */
16227 d = bdesc_1arg;
16228 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16229 if (d->code == fcode)
16230 return rs6000_expand_unop_builtin (icode, exp, target);
16231
16232 /* Handle simple binary operations. */
16233 d = bdesc_2arg;
16234 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16235 if (d->code == fcode)
16236 return rs6000_expand_binop_builtin (icode, exp, target);
16237
16238 /* Handle simple ternary operations. */
16239 d = bdesc_3arg;
16240 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16241 if (d->code == fcode)
16242 return rs6000_expand_ternop_builtin (icode, exp, target);
16243
16244 /* Handle simple no-argument operations. */
16245 d = bdesc_0arg;
16246 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16247 if (d->code == fcode)
16248 return rs6000_expand_zeroop_builtin (icode, target);
16249
16250 gcc_unreachable ();
16251 }
16252
16253 /* Create a builtin vector type with a name. Taking care not to give
16254 the canonical type a name. */
16255
16256 static tree
16257 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16258 {
16259 tree result = build_vector_type (elt_type, num_elts);
16260
16261 /* Copy so we don't give the canonical type a name. */
16262 result = build_variant_type_copy (result);
16263
16264 add_builtin_type (name, result);
16265
16266 return result;
16267 }
16268
16269 static void
16270 rs6000_init_builtins (void)
16271 {
16272 tree tdecl;
16273 tree ftype;
16274 machine_mode mode;
16275
16276 if (TARGET_DEBUG_BUILTIN)
16277 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16278 (TARGET_ALTIVEC) ? ", altivec" : "",
16279 (TARGET_VSX) ? ", vsx" : "");
16280
16281 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16282 : "__vector long long",
16283 intDI_type_node, 2);
16284 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16285 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16286 intSI_type_node, 4);
16287 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16288 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16289 intHI_type_node, 8);
16290 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16291 intQI_type_node, 16);
16292
16293 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16294 unsigned_intQI_type_node, 16);
16295 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16296 unsigned_intHI_type_node, 8);
16297 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16298 unsigned_intSI_type_node, 4);
16299 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16300 ? "__vector unsigned long"
16301 : "__vector unsigned long long",
16302 unsigned_intDI_type_node, 2);
16303
16304 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16305
16306 const_str_type_node
16307 = build_pointer_type (build_qualified_type (char_type_node,
16308 TYPE_QUAL_CONST));
16309
16310 /* We use V1TI mode as a special container to hold __int128_t items that
16311 must live in VSX registers. */
16312 if (intTI_type_node)
16313 {
16314 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16315 intTI_type_node, 1);
16316 unsigned_V1TI_type_node
16317 = rs6000_vector_type ("__vector unsigned __int128",
16318 unsigned_intTI_type_node, 1);
16319 }
16320
16321 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16322 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16323 'vector unsigned short'. */
16324
16325 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16326 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16327 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16328 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16329 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16330
16331 long_integer_type_internal_node = long_integer_type_node;
16332 long_unsigned_type_internal_node = long_unsigned_type_node;
16333 long_long_integer_type_internal_node = long_long_integer_type_node;
16334 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16335 intQI_type_internal_node = intQI_type_node;
16336 uintQI_type_internal_node = unsigned_intQI_type_node;
16337 intHI_type_internal_node = intHI_type_node;
16338 uintHI_type_internal_node = unsigned_intHI_type_node;
16339 intSI_type_internal_node = intSI_type_node;
16340 uintSI_type_internal_node = unsigned_intSI_type_node;
16341 intDI_type_internal_node = intDI_type_node;
16342 uintDI_type_internal_node = unsigned_intDI_type_node;
16343 intTI_type_internal_node = intTI_type_node;
16344 uintTI_type_internal_node = unsigned_intTI_type_node;
16345 float_type_internal_node = float_type_node;
16346 double_type_internal_node = double_type_node;
16347 long_double_type_internal_node = long_double_type_node;
16348 dfloat64_type_internal_node = dfloat64_type_node;
16349 dfloat128_type_internal_node = dfloat128_type_node;
16350 void_type_internal_node = void_type_node;
16351
16352 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16353 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16354 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16355 format that uses a pair of doubles, depending on the switches and
16356 defaults.
16357
16358 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16359 floating point, we need make sure the type is non-zero or else self-test
16360 fails during bootstrap.
16361
16362 Always create __ibm128 as a separate type, even if the current long double
16363 format is IBM extended double.
16364
16365 For IEEE 128-bit floating point, always create the type __ieee128. If the
16366 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16367 __ieee128. */
16368 if (TARGET_FLOAT128_TYPE)
16369 {
16370 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16371 ibm128_float_type_node = long_double_type_node;
16372 else
16373 {
16374 ibm128_float_type_node = make_node (REAL_TYPE);
16375 TYPE_PRECISION (ibm128_float_type_node) = 128;
16376 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16377 layout_type (ibm128_float_type_node);
16378 }
16379
16380 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16381 "__ibm128");
16382
16383 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16384 ieee128_float_type_node = long_double_type_node;
16385 else
16386 ieee128_float_type_node = float128_type_node;
16387
16388 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16389 "__ieee128");
16390 }
16391
16392 else
16393 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16394
16395 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16396 tree type node. */
16397 builtin_mode_to_type[QImode][0] = integer_type_node;
16398 builtin_mode_to_type[HImode][0] = integer_type_node;
16399 builtin_mode_to_type[SImode][0] = intSI_type_node;
16400 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16401 builtin_mode_to_type[DImode][0] = intDI_type_node;
16402 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16403 builtin_mode_to_type[TImode][0] = intTI_type_node;
16404 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16405 builtin_mode_to_type[SFmode][0] = float_type_node;
16406 builtin_mode_to_type[DFmode][0] = double_type_node;
16407 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16408 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16409 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16410 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16411 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16412 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16413 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16414 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16415 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16416 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16417 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16418 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16419 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16420 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16421 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16422 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16423 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16424
16425 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16426 TYPE_NAME (bool_char_type_node) = tdecl;
16427
16428 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16429 TYPE_NAME (bool_short_type_node) = tdecl;
16430
16431 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16432 TYPE_NAME (bool_int_type_node) = tdecl;
16433
16434 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16435 TYPE_NAME (pixel_type_node) = tdecl;
16436
16437 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16438 bool_char_type_node, 16);
16439 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16440 bool_short_type_node, 8);
16441 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16442 bool_int_type_node, 4);
16443 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16444 ? "__vector __bool long"
16445 : "__vector __bool long long",
16446 bool_long_long_type_node, 2);
16447 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16448 pixel_type_node, 8);
16449
16450 /* Create Altivec and VSX builtins on machines with at least the
16451 general purpose extensions (970 and newer) to allow the use of
16452 the target attribute. */
16453 if (TARGET_EXTRA_BUILTINS)
16454 altivec_init_builtins ();
16455 if (TARGET_HTM)
16456 htm_init_builtins ();
16457
16458 if (TARGET_EXTRA_BUILTINS)
16459 rs6000_common_init_builtins ();
16460
16461 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16462 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16463 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16464
16465 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16466 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16467 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16468
16469 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16470 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16471 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16472
16473 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16474 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16475 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16476
16477 mode = (TARGET_64BIT) ? DImode : SImode;
16478 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16479 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16480 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16481
16482 ftype = build_function_type_list (unsigned_intDI_type_node,
16483 NULL_TREE);
16484 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16485
16486 if (TARGET_64BIT)
16487 ftype = build_function_type_list (unsigned_intDI_type_node,
16488 NULL_TREE);
16489 else
16490 ftype = build_function_type_list (unsigned_intSI_type_node,
16491 NULL_TREE);
16492 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16493
16494 ftype = build_function_type_list (double_type_node, NULL_TREE);
16495 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16496
16497 ftype = build_function_type_list (double_type_node, NULL_TREE);
16498 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16499
16500 ftype = build_function_type_list (void_type_node,
16501 intSI_type_node,
16502 NULL_TREE);
16503 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16504
16505 ftype = build_function_type_list (void_type_node,
16506 intSI_type_node,
16507 NULL_TREE);
16508 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16509
16510 ftype = build_function_type_list (void_type_node,
16511 intDI_type_node,
16512 NULL_TREE);
16513 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16514
16515 ftype = build_function_type_list (void_type_node,
16516 intDI_type_node,
16517 NULL_TREE);
16518 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16519
16520 ftype = build_function_type_list (void_type_node,
16521 intSI_type_node, double_type_node,
16522 NULL_TREE);
16523 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16524
16525 ftype = build_function_type_list (void_type_node, NULL_TREE);
16526 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16527 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16528 MISC_BUILTIN_SPEC_BARRIER);
16529
16530 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16531 NULL_TREE);
16532 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16533 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16534
16535 /* AIX libm provides clog as __clog. */
16536 if (TARGET_XCOFF &&
16537 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16538 set_user_assembler_name (tdecl, "__clog");
16539
16540 #ifdef SUBTARGET_INIT_BUILTINS
16541 SUBTARGET_INIT_BUILTINS;
16542 #endif
16543 }
16544
16545 /* Returns the rs6000 builtin decl for CODE. */
16546
16547 static tree
16548 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16549 {
16550 HOST_WIDE_INT fnmask;
16551
16552 if (code >= RS6000_BUILTIN_COUNT)
16553 return error_mark_node;
16554
16555 fnmask = rs6000_builtin_info[code].mask;
16556 if ((fnmask & rs6000_builtin_mask) != fnmask)
16557 {
16558 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16559 return error_mark_node;
16560 }
16561
16562 return rs6000_builtin_decls[code];
16563 }
16564
16565 static void
16566 altivec_init_builtins (void)
16567 {
16568 const struct builtin_description *d;
16569 size_t i;
16570 tree ftype;
16571 tree decl;
16572 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16573
16574 tree pvoid_type_node = build_pointer_type (void_type_node);
16575
16576 tree pcvoid_type_node
16577 = build_pointer_type (build_qualified_type (void_type_node,
16578 TYPE_QUAL_CONST));
16579
16580 tree int_ftype_opaque
16581 = build_function_type_list (integer_type_node,
16582 opaque_V4SI_type_node, NULL_TREE);
16583 tree opaque_ftype_opaque
16584 = build_function_type_list (integer_type_node, NULL_TREE);
16585 tree opaque_ftype_opaque_int
16586 = build_function_type_list (opaque_V4SI_type_node,
16587 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16588 tree opaque_ftype_opaque_opaque_int
16589 = build_function_type_list (opaque_V4SI_type_node,
16590 opaque_V4SI_type_node, opaque_V4SI_type_node,
16591 integer_type_node, NULL_TREE);
16592 tree opaque_ftype_opaque_opaque_opaque
16593 = build_function_type_list (opaque_V4SI_type_node,
16594 opaque_V4SI_type_node, opaque_V4SI_type_node,
16595 opaque_V4SI_type_node, NULL_TREE);
16596 tree opaque_ftype_opaque_opaque
16597 = build_function_type_list (opaque_V4SI_type_node,
16598 opaque_V4SI_type_node, opaque_V4SI_type_node,
16599 NULL_TREE);
16600 tree int_ftype_int_opaque_opaque
16601 = build_function_type_list (integer_type_node,
16602 integer_type_node, opaque_V4SI_type_node,
16603 opaque_V4SI_type_node, NULL_TREE);
16604 tree int_ftype_int_v4si_v4si
16605 = build_function_type_list (integer_type_node,
16606 integer_type_node, V4SI_type_node,
16607 V4SI_type_node, NULL_TREE);
16608 tree int_ftype_int_v2di_v2di
16609 = build_function_type_list (integer_type_node,
16610 integer_type_node, V2DI_type_node,
16611 V2DI_type_node, NULL_TREE);
16612 tree void_ftype_v4si
16613 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16614 tree v8hi_ftype_void
16615 = build_function_type_list (V8HI_type_node, NULL_TREE);
16616 tree void_ftype_void
16617 = build_function_type_list (void_type_node, NULL_TREE);
16618 tree void_ftype_int
16619 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16620
16621 tree opaque_ftype_long_pcvoid
16622 = build_function_type_list (opaque_V4SI_type_node,
16623 long_integer_type_node, pcvoid_type_node,
16624 NULL_TREE);
16625 tree v16qi_ftype_long_pcvoid
16626 = build_function_type_list (V16QI_type_node,
16627 long_integer_type_node, pcvoid_type_node,
16628 NULL_TREE);
16629 tree v8hi_ftype_long_pcvoid
16630 = build_function_type_list (V8HI_type_node,
16631 long_integer_type_node, pcvoid_type_node,
16632 NULL_TREE);
16633 tree v4si_ftype_long_pcvoid
16634 = build_function_type_list (V4SI_type_node,
16635 long_integer_type_node, pcvoid_type_node,
16636 NULL_TREE);
16637 tree v4sf_ftype_long_pcvoid
16638 = build_function_type_list (V4SF_type_node,
16639 long_integer_type_node, pcvoid_type_node,
16640 NULL_TREE);
16641 tree v2df_ftype_long_pcvoid
16642 = build_function_type_list (V2DF_type_node,
16643 long_integer_type_node, pcvoid_type_node,
16644 NULL_TREE);
16645 tree v2di_ftype_long_pcvoid
16646 = build_function_type_list (V2DI_type_node,
16647 long_integer_type_node, pcvoid_type_node,
16648 NULL_TREE);
16649 tree v1ti_ftype_long_pcvoid
16650 = build_function_type_list (V1TI_type_node,
16651 long_integer_type_node, pcvoid_type_node,
16652 NULL_TREE);
16653
16654 tree void_ftype_opaque_long_pvoid
16655 = build_function_type_list (void_type_node,
16656 opaque_V4SI_type_node, long_integer_type_node,
16657 pvoid_type_node, NULL_TREE);
16658 tree void_ftype_v4si_long_pvoid
16659 = build_function_type_list (void_type_node,
16660 V4SI_type_node, long_integer_type_node,
16661 pvoid_type_node, NULL_TREE);
16662 tree void_ftype_v16qi_long_pvoid
16663 = build_function_type_list (void_type_node,
16664 V16QI_type_node, long_integer_type_node,
16665 pvoid_type_node, NULL_TREE);
16666
16667 tree void_ftype_v16qi_pvoid_long
16668 = build_function_type_list (void_type_node,
16669 V16QI_type_node, pvoid_type_node,
16670 long_integer_type_node, NULL_TREE);
16671
16672 tree void_ftype_v8hi_long_pvoid
16673 = build_function_type_list (void_type_node,
16674 V8HI_type_node, long_integer_type_node,
16675 pvoid_type_node, NULL_TREE);
16676 tree void_ftype_v4sf_long_pvoid
16677 = build_function_type_list (void_type_node,
16678 V4SF_type_node, long_integer_type_node,
16679 pvoid_type_node, NULL_TREE);
16680 tree void_ftype_v2df_long_pvoid
16681 = build_function_type_list (void_type_node,
16682 V2DF_type_node, long_integer_type_node,
16683 pvoid_type_node, NULL_TREE);
16684 tree void_ftype_v1ti_long_pvoid
16685 = build_function_type_list (void_type_node,
16686 V1TI_type_node, long_integer_type_node,
16687 pvoid_type_node, NULL_TREE);
16688 tree void_ftype_v2di_long_pvoid
16689 = build_function_type_list (void_type_node,
16690 V2DI_type_node, long_integer_type_node,
16691 pvoid_type_node, NULL_TREE);
16692 tree int_ftype_int_v8hi_v8hi
16693 = build_function_type_list (integer_type_node,
16694 integer_type_node, V8HI_type_node,
16695 V8HI_type_node, NULL_TREE);
16696 tree int_ftype_int_v16qi_v16qi
16697 = build_function_type_list (integer_type_node,
16698 integer_type_node, V16QI_type_node,
16699 V16QI_type_node, NULL_TREE);
16700 tree int_ftype_int_v4sf_v4sf
16701 = build_function_type_list (integer_type_node,
16702 integer_type_node, V4SF_type_node,
16703 V4SF_type_node, NULL_TREE);
16704 tree int_ftype_int_v2df_v2df
16705 = build_function_type_list (integer_type_node,
16706 integer_type_node, V2DF_type_node,
16707 V2DF_type_node, NULL_TREE);
16708 tree v2di_ftype_v2di
16709 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16710 tree v4si_ftype_v4si
16711 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16712 tree v8hi_ftype_v8hi
16713 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16714 tree v16qi_ftype_v16qi
16715 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16716 tree v4sf_ftype_v4sf
16717 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16718 tree v2df_ftype_v2df
16719 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16720 tree void_ftype_pcvoid_int_int
16721 = build_function_type_list (void_type_node,
16722 pcvoid_type_node, integer_type_node,
16723 integer_type_node, NULL_TREE);
16724
16725 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16726 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16727 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16728 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16729 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16730 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16731 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16732 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16733 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16734 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16735 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16736 ALTIVEC_BUILTIN_LVXL_V2DF);
16737 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16738 ALTIVEC_BUILTIN_LVXL_V2DI);
16739 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16740 ALTIVEC_BUILTIN_LVXL_V4SF);
16741 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16742 ALTIVEC_BUILTIN_LVXL_V4SI);
16743 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16744 ALTIVEC_BUILTIN_LVXL_V8HI);
16745 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16746 ALTIVEC_BUILTIN_LVXL_V16QI);
16747 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16748 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
16749 ALTIVEC_BUILTIN_LVX_V1TI);
16750 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16751 ALTIVEC_BUILTIN_LVX_V2DF);
16752 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16753 ALTIVEC_BUILTIN_LVX_V2DI);
16754 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16755 ALTIVEC_BUILTIN_LVX_V4SF);
16756 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16757 ALTIVEC_BUILTIN_LVX_V4SI);
16758 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16759 ALTIVEC_BUILTIN_LVX_V8HI);
16760 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16761 ALTIVEC_BUILTIN_LVX_V16QI);
16762 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16763 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16764 ALTIVEC_BUILTIN_STVX_V2DF);
16765 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16766 ALTIVEC_BUILTIN_STVX_V2DI);
16767 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16768 ALTIVEC_BUILTIN_STVX_V4SF);
16769 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16770 ALTIVEC_BUILTIN_STVX_V4SI);
16771 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16772 ALTIVEC_BUILTIN_STVX_V8HI);
16773 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16774 ALTIVEC_BUILTIN_STVX_V16QI);
16775 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16776 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16777 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16778 ALTIVEC_BUILTIN_STVXL_V2DF);
16779 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16780 ALTIVEC_BUILTIN_STVXL_V2DI);
16781 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16782 ALTIVEC_BUILTIN_STVXL_V4SF);
16783 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16784 ALTIVEC_BUILTIN_STVXL_V4SI);
16785 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16786 ALTIVEC_BUILTIN_STVXL_V8HI);
16787 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16788 ALTIVEC_BUILTIN_STVXL_V16QI);
16789 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16790 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16791 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16792 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16793 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16794 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16795 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16796 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16797 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16798 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16799 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16800 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16801 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16802 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16803 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16804 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16805
16806 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16807 VSX_BUILTIN_LXVD2X_V2DF);
16808 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16809 VSX_BUILTIN_LXVD2X_V2DI);
16810 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16811 VSX_BUILTIN_LXVW4X_V4SF);
16812 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16813 VSX_BUILTIN_LXVW4X_V4SI);
16814 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16815 VSX_BUILTIN_LXVW4X_V8HI);
16816 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16817 VSX_BUILTIN_LXVW4X_V16QI);
16818 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16819 VSX_BUILTIN_STXVD2X_V2DF);
16820 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16821 VSX_BUILTIN_STXVD2X_V2DI);
16822 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16823 VSX_BUILTIN_STXVW4X_V4SF);
16824 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16825 VSX_BUILTIN_STXVW4X_V4SI);
16826 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16827 VSX_BUILTIN_STXVW4X_V8HI);
16828 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16829 VSX_BUILTIN_STXVW4X_V16QI);
16830
16831 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
16832 VSX_BUILTIN_LD_ELEMREV_V2DF);
16833 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
16834 VSX_BUILTIN_LD_ELEMREV_V2DI);
16835 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
16836 VSX_BUILTIN_LD_ELEMREV_V4SF);
16837 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
16838 VSX_BUILTIN_LD_ELEMREV_V4SI);
16839 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
16840 VSX_BUILTIN_LD_ELEMREV_V8HI);
16841 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
16842 VSX_BUILTIN_LD_ELEMREV_V16QI);
16843 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
16844 VSX_BUILTIN_ST_ELEMREV_V2DF);
16845 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
16846 VSX_BUILTIN_ST_ELEMREV_V1TI);
16847 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
16848 VSX_BUILTIN_ST_ELEMREV_V2DI);
16849 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
16850 VSX_BUILTIN_ST_ELEMREV_V4SF);
16851 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
16852 VSX_BUILTIN_ST_ELEMREV_V4SI);
16853 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
16854 VSX_BUILTIN_ST_ELEMREV_V8HI);
16855 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
16856 VSX_BUILTIN_ST_ELEMREV_V16QI);
16857
16858 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
16859 VSX_BUILTIN_VEC_LD);
16860 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
16861 VSX_BUILTIN_VEC_ST);
16862 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
16863 VSX_BUILTIN_VEC_XL);
16864 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
16865 VSX_BUILTIN_VEC_XL_BE);
16866 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
16867 VSX_BUILTIN_VEC_XST);
16868 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
16869 VSX_BUILTIN_VEC_XST_BE);
16870
16871 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
16872 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
16873 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
16874
16875 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
16876 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
16877 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
16878 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
16879 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
16880 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
16881 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
16882 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
16883 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
16884 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
16885 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
16886 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
16887
16888 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
16889 ALTIVEC_BUILTIN_VEC_ADDE);
16890 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
16891 ALTIVEC_BUILTIN_VEC_ADDEC);
16892 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
16893 ALTIVEC_BUILTIN_VEC_CMPNE);
16894 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
16895 ALTIVEC_BUILTIN_VEC_MUL);
16896 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
16897 ALTIVEC_BUILTIN_VEC_SUBE);
16898 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
16899 ALTIVEC_BUILTIN_VEC_SUBEC);
16900
16901 /* Cell builtins. */
16902 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
16903 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
16904 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
16905 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
16906
16907 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
16908 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
16909 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
16910 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
16911
16912 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
16913 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
16914 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
16915 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
16916
16917 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
16918 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
16919 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
16920 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
16921
16922 if (TARGET_P9_VECTOR)
16923 {
16924 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
16925 P9V_BUILTIN_STXVL);
16926 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
16927 P9V_BUILTIN_XST_LEN_R);
16928 }
16929
16930 /* Add the DST variants. */
16931 d = bdesc_dst;
16932 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
16933 {
16934 HOST_WIDE_INT mask = d->mask;
16935
16936 /* It is expected that these dst built-in functions may have
16937 d->icode equal to CODE_FOR_nothing. */
16938 if ((mask & builtin_mask) != mask)
16939 {
16940 if (TARGET_DEBUG_BUILTIN)
16941 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
16942 d->name);
16943 continue;
16944 }
16945 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
16946 }
16947
16948 /* Initialize the predicates. */
16949 d = bdesc_altivec_preds;
16950 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
16951 {
16952 machine_mode mode1;
16953 tree type;
16954 HOST_WIDE_INT mask = d->mask;
16955
16956 if ((mask & builtin_mask) != mask)
16957 {
16958 if (TARGET_DEBUG_BUILTIN)
16959 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
16960 d->name);
16961 continue;
16962 }
16963
16964 if (rs6000_overloaded_builtin_p (d->code))
16965 mode1 = VOIDmode;
16966 else
16967 {
16968 /* Cannot define builtin if the instruction is disabled. */
16969 gcc_assert (d->icode != CODE_FOR_nothing);
16970 mode1 = insn_data[d->icode].operand[1].mode;
16971 }
16972
16973 switch (mode1)
16974 {
16975 case E_VOIDmode:
16976 type = int_ftype_int_opaque_opaque;
16977 break;
16978 case E_V2DImode:
16979 type = int_ftype_int_v2di_v2di;
16980 break;
16981 case E_V4SImode:
16982 type = int_ftype_int_v4si_v4si;
16983 break;
16984 case E_V8HImode:
16985 type = int_ftype_int_v8hi_v8hi;
16986 break;
16987 case E_V16QImode:
16988 type = int_ftype_int_v16qi_v16qi;
16989 break;
16990 case E_V4SFmode:
16991 type = int_ftype_int_v4sf_v4sf;
16992 break;
16993 case E_V2DFmode:
16994 type = int_ftype_int_v2df_v2df;
16995 break;
16996 default:
16997 gcc_unreachable ();
16998 }
16999
17000 def_builtin (d->name, type, d->code);
17001 }
17002
17003 /* Initialize the abs* operators. */
17004 d = bdesc_abs;
17005 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17006 {
17007 machine_mode mode0;
17008 tree type;
17009 HOST_WIDE_INT mask = d->mask;
17010
17011 if ((mask & builtin_mask) != mask)
17012 {
17013 if (TARGET_DEBUG_BUILTIN)
17014 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17015 d->name);
17016 continue;
17017 }
17018
17019 /* Cannot define builtin if the instruction is disabled. */
17020 gcc_assert (d->icode != CODE_FOR_nothing);
17021 mode0 = insn_data[d->icode].operand[0].mode;
17022
17023 switch (mode0)
17024 {
17025 case E_V2DImode:
17026 type = v2di_ftype_v2di;
17027 break;
17028 case E_V4SImode:
17029 type = v4si_ftype_v4si;
17030 break;
17031 case E_V8HImode:
17032 type = v8hi_ftype_v8hi;
17033 break;
17034 case E_V16QImode:
17035 type = v16qi_ftype_v16qi;
17036 break;
17037 case E_V4SFmode:
17038 type = v4sf_ftype_v4sf;
17039 break;
17040 case E_V2DFmode:
17041 type = v2df_ftype_v2df;
17042 break;
17043 default:
17044 gcc_unreachable ();
17045 }
17046
17047 def_builtin (d->name, type, d->code);
17048 }
17049
17050 /* Initialize target builtin that implements
17051 targetm.vectorize.builtin_mask_for_load. */
17052
17053 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17054 v16qi_ftype_long_pcvoid,
17055 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17056 BUILT_IN_MD, NULL, NULL_TREE);
17057 TREE_READONLY (decl) = 1;
17058 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17059 altivec_builtin_mask_for_load = decl;
17060
17061 /* Access to the vec_init patterns. */
17062 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17063 integer_type_node, integer_type_node,
17064 integer_type_node, NULL_TREE);
17065 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17066
17067 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17068 short_integer_type_node,
17069 short_integer_type_node,
17070 short_integer_type_node,
17071 short_integer_type_node,
17072 short_integer_type_node,
17073 short_integer_type_node,
17074 short_integer_type_node, NULL_TREE);
17075 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17076
17077 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17078 char_type_node, char_type_node,
17079 char_type_node, char_type_node,
17080 char_type_node, char_type_node,
17081 char_type_node, char_type_node,
17082 char_type_node, char_type_node,
17083 char_type_node, char_type_node,
17084 char_type_node, char_type_node,
17085 char_type_node, NULL_TREE);
17086 def_builtin ("__builtin_vec_init_v16qi", ftype,
17087 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17088
17089 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17090 float_type_node, float_type_node,
17091 float_type_node, NULL_TREE);
17092 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17093
17094 /* VSX builtins. */
17095 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17096 double_type_node, NULL_TREE);
17097 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17098
17099 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17100 intDI_type_node, NULL_TREE);
17101 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17102
17103 /* Access to the vec_set patterns. */
17104 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17105 intSI_type_node,
17106 integer_type_node, NULL_TREE);
17107 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17108
17109 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17110 intHI_type_node,
17111 integer_type_node, NULL_TREE);
17112 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17113
17114 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17115 intQI_type_node,
17116 integer_type_node, NULL_TREE);
17117 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17118
17119 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17120 float_type_node,
17121 integer_type_node, NULL_TREE);
17122 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17123
17124 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17125 double_type_node,
17126 integer_type_node, NULL_TREE);
17127 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17128
17129 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17130 intDI_type_node,
17131 integer_type_node, NULL_TREE);
17132 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17133
17134 /* Access to the vec_extract patterns. */
17135 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17136 integer_type_node, NULL_TREE);
17137 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17138
17139 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17140 integer_type_node, NULL_TREE);
17141 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17142
17143 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17144 integer_type_node, NULL_TREE);
17145 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17146
17147 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17148 integer_type_node, NULL_TREE);
17149 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17150
17151 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17152 integer_type_node, NULL_TREE);
17153 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17154
17155 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17156 integer_type_node, NULL_TREE);
17157 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17158
17159
17160 if (V1TI_type_node)
17161 {
17162 tree v1ti_ftype_long_pcvoid
17163 = build_function_type_list (V1TI_type_node,
17164 long_integer_type_node, pcvoid_type_node,
17165 NULL_TREE);
17166 tree void_ftype_v1ti_long_pvoid
17167 = build_function_type_list (void_type_node,
17168 V1TI_type_node, long_integer_type_node,
17169 pvoid_type_node, NULL_TREE);
17170 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17171 VSX_BUILTIN_LD_ELEMREV_V1TI);
17172 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17173 VSX_BUILTIN_LXVD2X_V1TI);
17174 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17175 VSX_BUILTIN_STXVD2X_V1TI);
17176 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17177 NULL_TREE, NULL_TREE);
17178 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17179 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17180 intTI_type_node,
17181 integer_type_node, NULL_TREE);
17182 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17183 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17184 integer_type_node, NULL_TREE);
17185 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17186 }
17187
17188 }
17189
17190 static void
17191 htm_init_builtins (void)
17192 {
17193 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17194 const struct builtin_description *d;
17195 size_t i;
17196
17197 d = bdesc_htm;
17198 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17199 {
17200 tree op[MAX_HTM_OPERANDS], type;
17201 HOST_WIDE_INT mask = d->mask;
17202 unsigned attr = rs6000_builtin_info[d->code].attr;
17203 bool void_func = (attr & RS6000_BTC_VOID);
17204 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17205 int nopnds = 0;
17206 tree gpr_type_node;
17207 tree rettype;
17208 tree argtype;
17209
17210 /* It is expected that these htm built-in functions may have
17211 d->icode equal to CODE_FOR_nothing. */
17212
17213 if (TARGET_32BIT && TARGET_POWERPC64)
17214 gpr_type_node = long_long_unsigned_type_node;
17215 else
17216 gpr_type_node = long_unsigned_type_node;
17217
17218 if (attr & RS6000_BTC_SPR)
17219 {
17220 rettype = gpr_type_node;
17221 argtype = gpr_type_node;
17222 }
17223 else if (d->code == HTM_BUILTIN_TABORTDC
17224 || d->code == HTM_BUILTIN_TABORTDCI)
17225 {
17226 rettype = unsigned_type_node;
17227 argtype = gpr_type_node;
17228 }
17229 else
17230 {
17231 rettype = unsigned_type_node;
17232 argtype = unsigned_type_node;
17233 }
17234
17235 if ((mask & builtin_mask) != mask)
17236 {
17237 if (TARGET_DEBUG_BUILTIN)
17238 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17239 continue;
17240 }
17241
17242 if (d->name == 0)
17243 {
17244 if (TARGET_DEBUG_BUILTIN)
17245 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17246 (long unsigned) i);
17247 continue;
17248 }
17249
17250 op[nopnds++] = (void_func) ? void_type_node : rettype;
17251
17252 if (attr_args == RS6000_BTC_UNARY)
17253 op[nopnds++] = argtype;
17254 else if (attr_args == RS6000_BTC_BINARY)
17255 {
17256 op[nopnds++] = argtype;
17257 op[nopnds++] = argtype;
17258 }
17259 else if (attr_args == RS6000_BTC_TERNARY)
17260 {
17261 op[nopnds++] = argtype;
17262 op[nopnds++] = argtype;
17263 op[nopnds++] = argtype;
17264 }
17265
17266 switch (nopnds)
17267 {
17268 case 1:
17269 type = build_function_type_list (op[0], NULL_TREE);
17270 break;
17271 case 2:
17272 type = build_function_type_list (op[0], op[1], NULL_TREE);
17273 break;
17274 case 3:
17275 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17276 break;
17277 case 4:
17278 type = build_function_type_list (op[0], op[1], op[2], op[3],
17279 NULL_TREE);
17280 break;
17281 default:
17282 gcc_unreachable ();
17283 }
17284
17285 def_builtin (d->name, type, d->code);
17286 }
17287 }
17288
17289 /* Hash function for builtin functions with up to 3 arguments and a return
17290 type. */
17291 hashval_t
17292 builtin_hasher::hash (builtin_hash_struct *bh)
17293 {
17294 unsigned ret = 0;
17295 int i;
17296
17297 for (i = 0; i < 4; i++)
17298 {
17299 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17300 ret = (ret * 2) + bh->uns_p[i];
17301 }
17302
17303 return ret;
17304 }
17305
17306 /* Compare builtin hash entries H1 and H2 for equivalence. */
17307 bool
17308 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17309 {
17310 return ((p1->mode[0] == p2->mode[0])
17311 && (p1->mode[1] == p2->mode[1])
17312 && (p1->mode[2] == p2->mode[2])
17313 && (p1->mode[3] == p2->mode[3])
17314 && (p1->uns_p[0] == p2->uns_p[0])
17315 && (p1->uns_p[1] == p2->uns_p[1])
17316 && (p1->uns_p[2] == p2->uns_p[2])
17317 && (p1->uns_p[3] == p2->uns_p[3]));
17318 }
17319
17320 /* Map types for builtin functions with an explicit return type and up to 3
17321 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17322 of the argument. */
17323 static tree
17324 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17325 machine_mode mode_arg1, machine_mode mode_arg2,
17326 enum rs6000_builtins builtin, const char *name)
17327 {
17328 struct builtin_hash_struct h;
17329 struct builtin_hash_struct *h2;
17330 int num_args = 3;
17331 int i;
17332 tree ret_type = NULL_TREE;
17333 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17334
17335 /* Create builtin_hash_table. */
17336 if (builtin_hash_table == NULL)
17337 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17338
17339 h.type = NULL_TREE;
17340 h.mode[0] = mode_ret;
17341 h.mode[1] = mode_arg0;
17342 h.mode[2] = mode_arg1;
17343 h.mode[3] = mode_arg2;
17344 h.uns_p[0] = 0;
17345 h.uns_p[1] = 0;
17346 h.uns_p[2] = 0;
17347 h.uns_p[3] = 0;
17348
17349 /* If the builtin is a type that produces unsigned results or takes unsigned
17350 arguments, and it is returned as a decl for the vectorizer (such as
17351 widening multiplies, permute), make sure the arguments and return value
17352 are type correct. */
17353 switch (builtin)
17354 {
17355 /* unsigned 1 argument functions. */
17356 case CRYPTO_BUILTIN_VSBOX:
17357 case CRYPTO_BUILTIN_VSBOX_BE:
17358 case P8V_BUILTIN_VGBBD:
17359 case MISC_BUILTIN_CDTBCD:
17360 case MISC_BUILTIN_CBCDTD:
17361 h.uns_p[0] = 1;
17362 h.uns_p[1] = 1;
17363 break;
17364
17365 /* unsigned 2 argument functions. */
17366 case ALTIVEC_BUILTIN_VMULEUB:
17367 case ALTIVEC_BUILTIN_VMULEUH:
17368 case P8V_BUILTIN_VMULEUW:
17369 case ALTIVEC_BUILTIN_VMULOUB:
17370 case ALTIVEC_BUILTIN_VMULOUH:
17371 case P8V_BUILTIN_VMULOUW:
17372 case CRYPTO_BUILTIN_VCIPHER:
17373 case CRYPTO_BUILTIN_VCIPHER_BE:
17374 case CRYPTO_BUILTIN_VCIPHERLAST:
17375 case CRYPTO_BUILTIN_VCIPHERLAST_BE:
17376 case CRYPTO_BUILTIN_VNCIPHER:
17377 case CRYPTO_BUILTIN_VNCIPHER_BE:
17378 case CRYPTO_BUILTIN_VNCIPHERLAST:
17379 case CRYPTO_BUILTIN_VNCIPHERLAST_BE:
17380 case CRYPTO_BUILTIN_VPMSUMB:
17381 case CRYPTO_BUILTIN_VPMSUMH:
17382 case CRYPTO_BUILTIN_VPMSUMW:
17383 case CRYPTO_BUILTIN_VPMSUMD:
17384 case CRYPTO_BUILTIN_VPMSUM:
17385 case MISC_BUILTIN_ADDG6S:
17386 case MISC_BUILTIN_DIVWEU:
17387 case MISC_BUILTIN_DIVDEU:
17388 case VSX_BUILTIN_UDIV_V2DI:
17389 case ALTIVEC_BUILTIN_VMAXUB:
17390 case ALTIVEC_BUILTIN_VMINUB:
17391 case ALTIVEC_BUILTIN_VMAXUH:
17392 case ALTIVEC_BUILTIN_VMINUH:
17393 case ALTIVEC_BUILTIN_VMAXUW:
17394 case ALTIVEC_BUILTIN_VMINUW:
17395 case P8V_BUILTIN_VMAXUD:
17396 case P8V_BUILTIN_VMINUD:
17397 h.uns_p[0] = 1;
17398 h.uns_p[1] = 1;
17399 h.uns_p[2] = 1;
17400 break;
17401
17402 /* unsigned 3 argument functions. */
17403 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17404 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17405 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17406 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17407 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17408 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17409 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17410 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17411 case VSX_BUILTIN_VPERM_16QI_UNS:
17412 case VSX_BUILTIN_VPERM_8HI_UNS:
17413 case VSX_BUILTIN_VPERM_4SI_UNS:
17414 case VSX_BUILTIN_VPERM_2DI_UNS:
17415 case VSX_BUILTIN_XXSEL_16QI_UNS:
17416 case VSX_BUILTIN_XXSEL_8HI_UNS:
17417 case VSX_BUILTIN_XXSEL_4SI_UNS:
17418 case VSX_BUILTIN_XXSEL_2DI_UNS:
17419 case CRYPTO_BUILTIN_VPERMXOR:
17420 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17421 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17422 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17423 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17424 case CRYPTO_BUILTIN_VSHASIGMAW:
17425 case CRYPTO_BUILTIN_VSHASIGMAD:
17426 case CRYPTO_BUILTIN_VSHASIGMA:
17427 h.uns_p[0] = 1;
17428 h.uns_p[1] = 1;
17429 h.uns_p[2] = 1;
17430 h.uns_p[3] = 1;
17431 break;
17432
17433 /* signed permute functions with unsigned char mask. */
17434 case ALTIVEC_BUILTIN_VPERM_16QI:
17435 case ALTIVEC_BUILTIN_VPERM_8HI:
17436 case ALTIVEC_BUILTIN_VPERM_4SI:
17437 case ALTIVEC_BUILTIN_VPERM_4SF:
17438 case ALTIVEC_BUILTIN_VPERM_2DI:
17439 case ALTIVEC_BUILTIN_VPERM_2DF:
17440 case VSX_BUILTIN_VPERM_16QI:
17441 case VSX_BUILTIN_VPERM_8HI:
17442 case VSX_BUILTIN_VPERM_4SI:
17443 case VSX_BUILTIN_VPERM_4SF:
17444 case VSX_BUILTIN_VPERM_2DI:
17445 case VSX_BUILTIN_VPERM_2DF:
17446 h.uns_p[3] = 1;
17447 break;
17448
17449 /* unsigned args, signed return. */
17450 case VSX_BUILTIN_XVCVUXDSP:
17451 case VSX_BUILTIN_XVCVUXDDP_UNS:
17452 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17453 h.uns_p[1] = 1;
17454 break;
17455
17456 /* signed args, unsigned return. */
17457 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17458 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17459 case MISC_BUILTIN_UNPACK_TD:
17460 case MISC_BUILTIN_UNPACK_V1TI:
17461 h.uns_p[0] = 1;
17462 break;
17463
17464 /* unsigned arguments, bool return (compares). */
17465 case ALTIVEC_BUILTIN_VCMPEQUB:
17466 case ALTIVEC_BUILTIN_VCMPEQUH:
17467 case ALTIVEC_BUILTIN_VCMPEQUW:
17468 case P8V_BUILTIN_VCMPEQUD:
17469 case VSX_BUILTIN_CMPGE_U16QI:
17470 case VSX_BUILTIN_CMPGE_U8HI:
17471 case VSX_BUILTIN_CMPGE_U4SI:
17472 case VSX_BUILTIN_CMPGE_U2DI:
17473 case ALTIVEC_BUILTIN_VCMPGTUB:
17474 case ALTIVEC_BUILTIN_VCMPGTUH:
17475 case ALTIVEC_BUILTIN_VCMPGTUW:
17476 case P8V_BUILTIN_VCMPGTUD:
17477 h.uns_p[1] = 1;
17478 h.uns_p[2] = 1;
17479 break;
17480
17481 /* unsigned arguments for 128-bit pack instructions. */
17482 case MISC_BUILTIN_PACK_TD:
17483 case MISC_BUILTIN_PACK_V1TI:
17484 h.uns_p[1] = 1;
17485 h.uns_p[2] = 1;
17486 break;
17487
17488 /* unsigned second arguments (vector shift right). */
17489 case ALTIVEC_BUILTIN_VSRB:
17490 case ALTIVEC_BUILTIN_VSRH:
17491 case ALTIVEC_BUILTIN_VSRW:
17492 case P8V_BUILTIN_VSRD:
17493 h.uns_p[2] = 1;
17494 break;
17495
17496 default:
17497 break;
17498 }
17499
17500 /* Figure out how many args are present. */
17501 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17502 num_args--;
17503
17504 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17505 if (!ret_type && h.uns_p[0])
17506 ret_type = builtin_mode_to_type[h.mode[0]][0];
17507
17508 if (!ret_type)
17509 fatal_error (input_location,
17510 "internal error: builtin function %qs had an unexpected "
17511 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17512
17513 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17514 arg_type[i] = NULL_TREE;
17515
17516 for (i = 0; i < num_args; i++)
17517 {
17518 int m = (int) h.mode[i+1];
17519 int uns_p = h.uns_p[i+1];
17520
17521 arg_type[i] = builtin_mode_to_type[m][uns_p];
17522 if (!arg_type[i] && uns_p)
17523 arg_type[i] = builtin_mode_to_type[m][0];
17524
17525 if (!arg_type[i])
17526 fatal_error (input_location,
17527 "internal error: builtin function %qs, argument %d "
17528 "had unexpected argument type %qs", name, i,
17529 GET_MODE_NAME (m));
17530 }
17531
17532 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17533 if (*found == NULL)
17534 {
17535 h2 = ggc_alloc<builtin_hash_struct> ();
17536 *h2 = h;
17537 *found = h2;
17538
17539 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17540 arg_type[2], NULL_TREE);
17541 }
17542
17543 return (*found)->type;
17544 }
17545
17546 static void
17547 rs6000_common_init_builtins (void)
17548 {
17549 const struct builtin_description *d;
17550 size_t i;
17551
17552 tree opaque_ftype_opaque = NULL_TREE;
17553 tree opaque_ftype_opaque_opaque = NULL_TREE;
17554 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17555 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17556
17557 /* Create Altivec and VSX builtins on machines with at least the
17558 general purpose extensions (970 and newer) to allow the use of
17559 the target attribute. */
17560
17561 if (TARGET_EXTRA_BUILTINS)
17562 builtin_mask |= RS6000_BTM_COMMON;
17563
17564 /* Add the ternary operators. */
17565 d = bdesc_3arg;
17566 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17567 {
17568 tree type;
17569 HOST_WIDE_INT mask = d->mask;
17570
17571 if ((mask & builtin_mask) != mask)
17572 {
17573 if (TARGET_DEBUG_BUILTIN)
17574 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17575 continue;
17576 }
17577
17578 if (rs6000_overloaded_builtin_p (d->code))
17579 {
17580 if (! (type = opaque_ftype_opaque_opaque_opaque))
17581 type = opaque_ftype_opaque_opaque_opaque
17582 = build_function_type_list (opaque_V4SI_type_node,
17583 opaque_V4SI_type_node,
17584 opaque_V4SI_type_node,
17585 opaque_V4SI_type_node,
17586 NULL_TREE);
17587 }
17588 else
17589 {
17590 enum insn_code icode = d->icode;
17591 if (d->name == 0)
17592 {
17593 if (TARGET_DEBUG_BUILTIN)
17594 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17595 (long unsigned)i);
17596
17597 continue;
17598 }
17599
17600 if (icode == CODE_FOR_nothing)
17601 {
17602 if (TARGET_DEBUG_BUILTIN)
17603 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17604 d->name);
17605
17606 continue;
17607 }
17608
17609 type = builtin_function_type (insn_data[icode].operand[0].mode,
17610 insn_data[icode].operand[1].mode,
17611 insn_data[icode].operand[2].mode,
17612 insn_data[icode].operand[3].mode,
17613 d->code, d->name);
17614 }
17615
17616 def_builtin (d->name, type, d->code);
17617 }
17618
17619 /* Add the binary operators. */
17620 d = bdesc_2arg;
17621 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17622 {
17623 machine_mode mode0, mode1, mode2;
17624 tree type;
17625 HOST_WIDE_INT mask = d->mask;
17626
17627 if ((mask & builtin_mask) != mask)
17628 {
17629 if (TARGET_DEBUG_BUILTIN)
17630 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17631 continue;
17632 }
17633
17634 if (rs6000_overloaded_builtin_p (d->code))
17635 {
17636 if (! (type = opaque_ftype_opaque_opaque))
17637 type = opaque_ftype_opaque_opaque
17638 = build_function_type_list (opaque_V4SI_type_node,
17639 opaque_V4SI_type_node,
17640 opaque_V4SI_type_node,
17641 NULL_TREE);
17642 }
17643 else
17644 {
17645 enum insn_code icode = d->icode;
17646 if (d->name == 0)
17647 {
17648 if (TARGET_DEBUG_BUILTIN)
17649 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17650 (long unsigned)i);
17651
17652 continue;
17653 }
17654
17655 if (icode == CODE_FOR_nothing)
17656 {
17657 if (TARGET_DEBUG_BUILTIN)
17658 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17659 d->name);
17660
17661 continue;
17662 }
17663
17664 mode0 = insn_data[icode].operand[0].mode;
17665 mode1 = insn_data[icode].operand[1].mode;
17666 mode2 = insn_data[icode].operand[2].mode;
17667
17668 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17669 d->code, d->name);
17670 }
17671
17672 def_builtin (d->name, type, d->code);
17673 }
17674
17675 /* Add the simple unary operators. */
17676 d = bdesc_1arg;
17677 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17678 {
17679 machine_mode mode0, mode1;
17680 tree type;
17681 HOST_WIDE_INT mask = d->mask;
17682
17683 if ((mask & builtin_mask) != mask)
17684 {
17685 if (TARGET_DEBUG_BUILTIN)
17686 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17687 continue;
17688 }
17689
17690 if (rs6000_overloaded_builtin_p (d->code))
17691 {
17692 if (! (type = opaque_ftype_opaque))
17693 type = opaque_ftype_opaque
17694 = build_function_type_list (opaque_V4SI_type_node,
17695 opaque_V4SI_type_node,
17696 NULL_TREE);
17697 }
17698 else
17699 {
17700 enum insn_code icode = d->icode;
17701 if (d->name == 0)
17702 {
17703 if (TARGET_DEBUG_BUILTIN)
17704 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17705 (long unsigned)i);
17706
17707 continue;
17708 }
17709
17710 if (icode == CODE_FOR_nothing)
17711 {
17712 if (TARGET_DEBUG_BUILTIN)
17713 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17714 d->name);
17715
17716 continue;
17717 }
17718
17719 mode0 = insn_data[icode].operand[0].mode;
17720 mode1 = insn_data[icode].operand[1].mode;
17721
17722 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17723 d->code, d->name);
17724 }
17725
17726 def_builtin (d->name, type, d->code);
17727 }
17728
17729 /* Add the simple no-argument operators. */
17730 d = bdesc_0arg;
17731 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17732 {
17733 machine_mode mode0;
17734 tree type;
17735 HOST_WIDE_INT mask = d->mask;
17736
17737 if ((mask & builtin_mask) != mask)
17738 {
17739 if (TARGET_DEBUG_BUILTIN)
17740 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17741 continue;
17742 }
17743 if (rs6000_overloaded_builtin_p (d->code))
17744 {
17745 if (!opaque_ftype_opaque)
17746 opaque_ftype_opaque
17747 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17748 type = opaque_ftype_opaque;
17749 }
17750 else
17751 {
17752 enum insn_code icode = d->icode;
17753 if (d->name == 0)
17754 {
17755 if (TARGET_DEBUG_BUILTIN)
17756 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17757 (long unsigned) i);
17758 continue;
17759 }
17760 if (icode == CODE_FOR_nothing)
17761 {
17762 if (TARGET_DEBUG_BUILTIN)
17763 fprintf (stderr,
17764 "rs6000_builtin, skip no-argument %s (no code)\n",
17765 d->name);
17766 continue;
17767 }
17768 mode0 = insn_data[icode].operand[0].mode;
17769 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17770 d->code, d->name);
17771 }
17772 def_builtin (d->name, type, d->code);
17773 }
17774 }
17775
17776 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17777 static void
17778 init_float128_ibm (machine_mode mode)
17779 {
17780 if (!TARGET_XL_COMPAT)
17781 {
17782 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17783 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17784 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17785 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17786
17787 if (!TARGET_HARD_FLOAT)
17788 {
17789 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17790 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17791 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17792 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17793 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17794 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17795 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17796 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17797
17798 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17799 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17800 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17801 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17802 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17803 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17804 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17805 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17806 }
17807 }
17808 else
17809 {
17810 set_optab_libfunc (add_optab, mode, "_xlqadd");
17811 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17812 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17813 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17814 }
17815
17816 /* Add various conversions for IFmode to use the traditional TFmode
17817 names. */
17818 if (mode == IFmode)
17819 {
17820 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
17821 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
17822 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
17823 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
17824 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
17825 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
17826
17827 if (TARGET_POWERPC64)
17828 {
17829 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17830 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17831 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17832 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17833 }
17834 }
17835 }
17836
17837 /* Create a decl for either complex long double multiply or complex long double
17838 divide when long double is IEEE 128-bit floating point. We can't use
17839 __multc3 and __divtc3 because the original long double using IBM extended
17840 double used those names. The complex multiply/divide functions are encoded
17841 as builtin functions with a complex result and 4 scalar inputs. */
17842
17843 static void
17844 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
17845 {
17846 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
17847 name, NULL_TREE);
17848
17849 set_builtin_decl (fncode, fndecl, true);
17850
17851 if (TARGET_DEBUG_BUILTIN)
17852 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
17853
17854 return;
17855 }
17856
17857 /* Set up IEEE 128-bit floating point routines. Use different names if the
17858 arguments can be passed in a vector register. The historical PowerPC
17859 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
17860 continue to use that if we aren't using vector registers to pass IEEE
17861 128-bit floating point. */
17862
17863 static void
17864 init_float128_ieee (machine_mode mode)
17865 {
17866 if (FLOAT128_VECTOR_P (mode))
17867 {
17868 static bool complex_muldiv_init_p = false;
17869
17870 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
17871 we have clone or target attributes, this will be called a second
17872 time. We want to create the built-in function only once. */
17873 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
17874 {
17875 complex_muldiv_init_p = true;
17876 built_in_function fncode_mul =
17877 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
17878 - MIN_MODE_COMPLEX_FLOAT);
17879 built_in_function fncode_div =
17880 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
17881 - MIN_MODE_COMPLEX_FLOAT);
17882
17883 tree fntype = build_function_type_list (complex_long_double_type_node,
17884 long_double_type_node,
17885 long_double_type_node,
17886 long_double_type_node,
17887 long_double_type_node,
17888 NULL_TREE);
17889
17890 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
17891 create_complex_muldiv ("__divkc3", fncode_div, fntype);
17892 }
17893
17894 set_optab_libfunc (add_optab, mode, "__addkf3");
17895 set_optab_libfunc (sub_optab, mode, "__subkf3");
17896 set_optab_libfunc (neg_optab, mode, "__negkf2");
17897 set_optab_libfunc (smul_optab, mode, "__mulkf3");
17898 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
17899 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
17900 set_optab_libfunc (abs_optab, mode, "__abskf2");
17901 set_optab_libfunc (powi_optab, mode, "__powikf2");
17902
17903 set_optab_libfunc (eq_optab, mode, "__eqkf2");
17904 set_optab_libfunc (ne_optab, mode, "__nekf2");
17905 set_optab_libfunc (gt_optab, mode, "__gtkf2");
17906 set_optab_libfunc (ge_optab, mode, "__gekf2");
17907 set_optab_libfunc (lt_optab, mode, "__ltkf2");
17908 set_optab_libfunc (le_optab, mode, "__lekf2");
17909 set_optab_libfunc (unord_optab, mode, "__unordkf2");
17910
17911 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
17912 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
17913 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
17914 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
17915
17916 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
17917 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17918 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
17919
17920 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
17921 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17922 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
17923
17924 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
17925 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
17926 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
17927 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
17928 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
17929 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
17930
17931 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
17932 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
17933 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
17934 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
17935
17936 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
17937 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
17938 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
17939 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
17940
17941 if (TARGET_POWERPC64)
17942 {
17943 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
17944 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
17945 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
17946 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
17947 }
17948 }
17949
17950 else
17951 {
17952 set_optab_libfunc (add_optab, mode, "_q_add");
17953 set_optab_libfunc (sub_optab, mode, "_q_sub");
17954 set_optab_libfunc (neg_optab, mode, "_q_neg");
17955 set_optab_libfunc (smul_optab, mode, "_q_mul");
17956 set_optab_libfunc (sdiv_optab, mode, "_q_div");
17957 if (TARGET_PPC_GPOPT)
17958 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
17959
17960 set_optab_libfunc (eq_optab, mode, "_q_feq");
17961 set_optab_libfunc (ne_optab, mode, "_q_fne");
17962 set_optab_libfunc (gt_optab, mode, "_q_fgt");
17963 set_optab_libfunc (ge_optab, mode, "_q_fge");
17964 set_optab_libfunc (lt_optab, mode, "_q_flt");
17965 set_optab_libfunc (le_optab, mode, "_q_fle");
17966
17967 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
17968 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
17969 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
17970 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
17971 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
17972 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
17973 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
17974 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
17975 }
17976 }
17977
17978 static void
17979 rs6000_init_libfuncs (void)
17980 {
17981 /* __float128 support. */
17982 if (TARGET_FLOAT128_TYPE)
17983 {
17984 init_float128_ibm (IFmode);
17985 init_float128_ieee (KFmode);
17986 }
17987
17988 /* AIX/Darwin/64-bit Linux quad floating point routines. */
17989 if (TARGET_LONG_DOUBLE_128)
17990 {
17991 if (!TARGET_IEEEQUAD)
17992 init_float128_ibm (TFmode);
17993
17994 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
17995 else
17996 init_float128_ieee (TFmode);
17997 }
17998 }
17999
18000 /* Emit a potentially record-form instruction, setting DST from SRC.
18001 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18002 signed comparison of DST with zero. If DOT is 1, the generated RTL
18003 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18004 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18005 a separate COMPARE. */
18006
18007 void
18008 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18009 {
18010 if (dot == 0)
18011 {
18012 emit_move_insn (dst, src);
18013 return;
18014 }
18015
18016 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18017 {
18018 emit_move_insn (dst, src);
18019 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18020 return;
18021 }
18022
18023 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18024 if (dot == 1)
18025 {
18026 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18027 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18028 }
18029 else
18030 {
18031 rtx set = gen_rtx_SET (dst, src);
18032 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18033 }
18034 }
18035
18036 \f
18037 /* A validation routine: say whether CODE, a condition code, and MODE
18038 match. The other alternatives either don't make sense or should
18039 never be generated. */
18040
18041 void
18042 validate_condition_mode (enum rtx_code code, machine_mode mode)
18043 {
18044 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18045 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18046 && GET_MODE_CLASS (mode) == MODE_CC);
18047
18048 /* These don't make sense. */
18049 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18050 || mode != CCUNSmode);
18051
18052 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18053 || mode == CCUNSmode);
18054
18055 gcc_assert (mode == CCFPmode
18056 || (code != ORDERED && code != UNORDERED
18057 && code != UNEQ && code != LTGT
18058 && code != UNGT && code != UNLT
18059 && code != UNGE && code != UNLE));
18060
18061 /* These should never be generated except for
18062 flag_finite_math_only. */
18063 gcc_assert (mode != CCFPmode
18064 || flag_finite_math_only
18065 || (code != LE && code != GE
18066 && code != UNEQ && code != LTGT
18067 && code != UNGT && code != UNLT));
18068
18069 /* These are invalid; the information is not there. */
18070 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18071 }
18072
18073 \f
18074 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18075 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18076 not zero, store there the bit offset (counted from the right) where
18077 the single stretch of 1 bits begins; and similarly for B, the bit
18078 offset where it ends. */
18079
18080 bool
18081 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18082 {
18083 unsigned HOST_WIDE_INT val = INTVAL (mask);
18084 unsigned HOST_WIDE_INT bit;
18085 int nb, ne;
18086 int n = GET_MODE_PRECISION (mode);
18087
18088 if (mode != DImode && mode != SImode)
18089 return false;
18090
18091 if (INTVAL (mask) >= 0)
18092 {
18093 bit = val & -val;
18094 ne = exact_log2 (bit);
18095 nb = exact_log2 (val + bit);
18096 }
18097 else if (val + 1 == 0)
18098 {
18099 nb = n;
18100 ne = 0;
18101 }
18102 else if (val & 1)
18103 {
18104 val = ~val;
18105 bit = val & -val;
18106 nb = exact_log2 (bit);
18107 ne = exact_log2 (val + bit);
18108 }
18109 else
18110 {
18111 bit = val & -val;
18112 ne = exact_log2 (bit);
18113 if (val + bit == 0)
18114 nb = n;
18115 else
18116 nb = 0;
18117 }
18118
18119 nb--;
18120
18121 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18122 return false;
18123
18124 if (b)
18125 *b = nb;
18126 if (e)
18127 *e = ne;
18128
18129 return true;
18130 }
18131
18132 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18133 or rldicr instruction, to implement an AND with it in mode MODE. */
18134
18135 bool
18136 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18137 {
18138 int nb, ne;
18139
18140 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18141 return false;
18142
18143 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18144 does not wrap. */
18145 if (mode == DImode)
18146 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18147
18148 /* For SImode, rlwinm can do everything. */
18149 if (mode == SImode)
18150 return (nb < 32 && ne < 32);
18151
18152 return false;
18153 }
18154
18155 /* Return the instruction template for an AND with mask in mode MODE, with
18156 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18157
18158 const char *
18159 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18160 {
18161 int nb, ne;
18162
18163 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18164 gcc_unreachable ();
18165
18166 if (mode == DImode && ne == 0)
18167 {
18168 operands[3] = GEN_INT (63 - nb);
18169 if (dot)
18170 return "rldicl. %0,%1,0,%3";
18171 return "rldicl %0,%1,0,%3";
18172 }
18173
18174 if (mode == DImode && nb == 63)
18175 {
18176 operands[3] = GEN_INT (63 - ne);
18177 if (dot)
18178 return "rldicr. %0,%1,0,%3";
18179 return "rldicr %0,%1,0,%3";
18180 }
18181
18182 if (nb < 32 && ne < 32)
18183 {
18184 operands[3] = GEN_INT (31 - nb);
18185 operands[4] = GEN_INT (31 - ne);
18186 if (dot)
18187 return "rlwinm. %0,%1,0,%3,%4";
18188 return "rlwinm %0,%1,0,%3,%4";
18189 }
18190
18191 gcc_unreachable ();
18192 }
18193
18194 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18195 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18196 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18197
18198 bool
18199 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18200 {
18201 int nb, ne;
18202
18203 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18204 return false;
18205
18206 int n = GET_MODE_PRECISION (mode);
18207 int sh = -1;
18208
18209 if (CONST_INT_P (XEXP (shift, 1)))
18210 {
18211 sh = INTVAL (XEXP (shift, 1));
18212 if (sh < 0 || sh >= n)
18213 return false;
18214 }
18215
18216 rtx_code code = GET_CODE (shift);
18217
18218 /* Convert any shift by 0 to a rotate, to simplify below code. */
18219 if (sh == 0)
18220 code = ROTATE;
18221
18222 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18223 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18224 code = ASHIFT;
18225 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18226 {
18227 code = LSHIFTRT;
18228 sh = n - sh;
18229 }
18230
18231 /* DImode rotates need rld*. */
18232 if (mode == DImode && code == ROTATE)
18233 return (nb == 63 || ne == 0 || ne == sh);
18234
18235 /* SImode rotates need rlw*. */
18236 if (mode == SImode && code == ROTATE)
18237 return (nb < 32 && ne < 32 && sh < 32);
18238
18239 /* Wrap-around masks are only okay for rotates. */
18240 if (ne > nb)
18241 return false;
18242
18243 /* Variable shifts are only okay for rotates. */
18244 if (sh < 0)
18245 return false;
18246
18247 /* Don't allow ASHIFT if the mask is wrong for that. */
18248 if (code == ASHIFT && ne < sh)
18249 return false;
18250
18251 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18252 if the mask is wrong for that. */
18253 if (nb < 32 && ne < 32 && sh < 32
18254 && !(code == LSHIFTRT && nb >= 32 - sh))
18255 return true;
18256
18257 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18258 if the mask is wrong for that. */
18259 if (code == LSHIFTRT)
18260 sh = 64 - sh;
18261 if (nb == 63 || ne == 0 || ne == sh)
18262 return !(code == LSHIFTRT && nb >= sh);
18263
18264 return false;
18265 }
18266
18267 /* Return the instruction template for a shift with mask in mode MODE, with
18268 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18269
18270 const char *
18271 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18272 {
18273 int nb, ne;
18274
18275 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18276 gcc_unreachable ();
18277
18278 if (mode == DImode && ne == 0)
18279 {
18280 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18281 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18282 operands[3] = GEN_INT (63 - nb);
18283 if (dot)
18284 return "rld%I2cl. %0,%1,%2,%3";
18285 return "rld%I2cl %0,%1,%2,%3";
18286 }
18287
18288 if (mode == DImode && nb == 63)
18289 {
18290 operands[3] = GEN_INT (63 - ne);
18291 if (dot)
18292 return "rld%I2cr. %0,%1,%2,%3";
18293 return "rld%I2cr %0,%1,%2,%3";
18294 }
18295
18296 if (mode == DImode
18297 && GET_CODE (operands[4]) != LSHIFTRT
18298 && CONST_INT_P (operands[2])
18299 && ne == INTVAL (operands[2]))
18300 {
18301 operands[3] = GEN_INT (63 - nb);
18302 if (dot)
18303 return "rld%I2c. %0,%1,%2,%3";
18304 return "rld%I2c %0,%1,%2,%3";
18305 }
18306
18307 if (nb < 32 && ne < 32)
18308 {
18309 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18310 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18311 operands[3] = GEN_INT (31 - nb);
18312 operands[4] = GEN_INT (31 - ne);
18313 /* This insn can also be a 64-bit rotate with mask that really makes
18314 it just a shift right (with mask); the %h below are to adjust for
18315 that situation (shift count is >= 32 in that case). */
18316 if (dot)
18317 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18318 return "rlw%I2nm %0,%1,%h2,%3,%4";
18319 }
18320
18321 gcc_unreachable ();
18322 }
18323
18324 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18325 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18326 ASHIFT, or LSHIFTRT) in mode MODE. */
18327
18328 bool
18329 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18330 {
18331 int nb, ne;
18332
18333 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18334 return false;
18335
18336 int n = GET_MODE_PRECISION (mode);
18337
18338 int sh = INTVAL (XEXP (shift, 1));
18339 if (sh < 0 || sh >= n)
18340 return false;
18341
18342 rtx_code code = GET_CODE (shift);
18343
18344 /* Convert any shift by 0 to a rotate, to simplify below code. */
18345 if (sh == 0)
18346 code = ROTATE;
18347
18348 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18349 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18350 code = ASHIFT;
18351 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18352 {
18353 code = LSHIFTRT;
18354 sh = n - sh;
18355 }
18356
18357 /* DImode rotates need rldimi. */
18358 if (mode == DImode && code == ROTATE)
18359 return (ne == sh);
18360
18361 /* SImode rotates need rlwimi. */
18362 if (mode == SImode && code == ROTATE)
18363 return (nb < 32 && ne < 32 && sh < 32);
18364
18365 /* Wrap-around masks are only okay for rotates. */
18366 if (ne > nb)
18367 return false;
18368
18369 /* Don't allow ASHIFT if the mask is wrong for that. */
18370 if (code == ASHIFT && ne < sh)
18371 return false;
18372
18373 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18374 if the mask is wrong for that. */
18375 if (nb < 32 && ne < 32 && sh < 32
18376 && !(code == LSHIFTRT && nb >= 32 - sh))
18377 return true;
18378
18379 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18380 if the mask is wrong for that. */
18381 if (code == LSHIFTRT)
18382 sh = 64 - sh;
18383 if (ne == sh)
18384 return !(code == LSHIFTRT && nb >= sh);
18385
18386 return false;
18387 }
18388
18389 /* Return the instruction template for an insert with mask in mode MODE, with
18390 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18391
18392 const char *
18393 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18394 {
18395 int nb, ne;
18396
18397 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18398 gcc_unreachable ();
18399
18400 /* Prefer rldimi because rlwimi is cracked. */
18401 if (TARGET_POWERPC64
18402 && (!dot || mode == DImode)
18403 && GET_CODE (operands[4]) != LSHIFTRT
18404 && ne == INTVAL (operands[2]))
18405 {
18406 operands[3] = GEN_INT (63 - nb);
18407 if (dot)
18408 return "rldimi. %0,%1,%2,%3";
18409 return "rldimi %0,%1,%2,%3";
18410 }
18411
18412 if (nb < 32 && ne < 32)
18413 {
18414 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18415 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18416 operands[3] = GEN_INT (31 - nb);
18417 operands[4] = GEN_INT (31 - ne);
18418 if (dot)
18419 return "rlwimi. %0,%1,%2,%3,%4";
18420 return "rlwimi %0,%1,%2,%3,%4";
18421 }
18422
18423 gcc_unreachable ();
18424 }
18425
18426 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18427 using two machine instructions. */
18428
18429 bool
18430 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18431 {
18432 /* There are two kinds of AND we can handle with two insns:
18433 1) those we can do with two rl* insn;
18434 2) ori[s];xori[s].
18435
18436 We do not handle that last case yet. */
18437
18438 /* If there is just one stretch of ones, we can do it. */
18439 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18440 return true;
18441
18442 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18443 one insn, we can do the whole thing with two. */
18444 unsigned HOST_WIDE_INT val = INTVAL (c);
18445 unsigned HOST_WIDE_INT bit1 = val & -val;
18446 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18447 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18448 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18449 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18450 }
18451
18452 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18453 If EXPAND is true, split rotate-and-mask instructions we generate to
18454 their constituent parts as well (this is used during expand); if DOT
18455 is 1, make the last insn a record-form instruction clobbering the
18456 destination GPR and setting the CC reg (from operands[3]); if 2, set
18457 that GPR as well as the CC reg. */
18458
18459 void
18460 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18461 {
18462 gcc_assert (!(expand && dot));
18463
18464 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18465
18466 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18467 shift right. This generates better code than doing the masks without
18468 shifts, or shifting first right and then left. */
18469 int nb, ne;
18470 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18471 {
18472 gcc_assert (mode == DImode);
18473
18474 int shift = 63 - nb;
18475 if (expand)
18476 {
18477 rtx tmp1 = gen_reg_rtx (DImode);
18478 rtx tmp2 = gen_reg_rtx (DImode);
18479 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18480 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18481 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18482 }
18483 else
18484 {
18485 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18486 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18487 emit_move_insn (operands[0], tmp);
18488 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18489 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18490 }
18491 return;
18492 }
18493
18494 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18495 that does the rest. */
18496 unsigned HOST_WIDE_INT bit1 = val & -val;
18497 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18498 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18499 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18500
18501 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18502 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18503
18504 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18505
18506 /* Two "no-rotate"-and-mask instructions, for SImode. */
18507 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18508 {
18509 gcc_assert (mode == SImode);
18510
18511 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18512 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18513 emit_move_insn (reg, tmp);
18514 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18515 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18516 return;
18517 }
18518
18519 gcc_assert (mode == DImode);
18520
18521 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18522 insns; we have to do the first in SImode, because it wraps. */
18523 if (mask2 <= 0xffffffff
18524 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18525 {
18526 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18527 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18528 GEN_INT (mask1));
18529 rtx reg_low = gen_lowpart (SImode, reg);
18530 emit_move_insn (reg_low, tmp);
18531 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18532 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18533 return;
18534 }
18535
18536 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18537 at the top end), rotate back and clear the other hole. */
18538 int right = exact_log2 (bit3);
18539 int left = 64 - right;
18540
18541 /* Rotate the mask too. */
18542 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18543
18544 if (expand)
18545 {
18546 rtx tmp1 = gen_reg_rtx (DImode);
18547 rtx tmp2 = gen_reg_rtx (DImode);
18548 rtx tmp3 = gen_reg_rtx (DImode);
18549 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18550 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18551 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18552 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18553 }
18554 else
18555 {
18556 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18557 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18558 emit_move_insn (operands[0], tmp);
18559 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18560 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18561 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18562 }
18563 }
18564 \f
18565 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18566 for lfq and stfq insns iff the registers are hard registers. */
18567
18568 int
18569 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18570 {
18571 /* We might have been passed a SUBREG. */
18572 if (!REG_P (reg1) || !REG_P (reg2))
18573 return 0;
18574
18575 /* We might have been passed non floating point registers. */
18576 if (!FP_REGNO_P (REGNO (reg1))
18577 || !FP_REGNO_P (REGNO (reg2)))
18578 return 0;
18579
18580 return (REGNO (reg1) == REGNO (reg2) - 1);
18581 }
18582
18583 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18584 addr1 and addr2 must be in consecutive memory locations
18585 (addr2 == addr1 + 8). */
18586
18587 int
18588 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18589 {
18590 rtx addr1, addr2;
18591 unsigned int reg1, reg2;
18592 int offset1, offset2;
18593
18594 /* The mems cannot be volatile. */
18595 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18596 return 0;
18597
18598 addr1 = XEXP (mem1, 0);
18599 addr2 = XEXP (mem2, 0);
18600
18601 /* Extract an offset (if used) from the first addr. */
18602 if (GET_CODE (addr1) == PLUS)
18603 {
18604 /* If not a REG, return zero. */
18605 if (!REG_P (XEXP (addr1, 0)))
18606 return 0;
18607 else
18608 {
18609 reg1 = REGNO (XEXP (addr1, 0));
18610 /* The offset must be constant! */
18611 if (!CONST_INT_P (XEXP (addr1, 1)))
18612 return 0;
18613 offset1 = INTVAL (XEXP (addr1, 1));
18614 }
18615 }
18616 else if (!REG_P (addr1))
18617 return 0;
18618 else
18619 {
18620 reg1 = REGNO (addr1);
18621 /* This was a simple (mem (reg)) expression. Offset is 0. */
18622 offset1 = 0;
18623 }
18624
18625 /* And now for the second addr. */
18626 if (GET_CODE (addr2) == PLUS)
18627 {
18628 /* If not a REG, return zero. */
18629 if (!REG_P (XEXP (addr2, 0)))
18630 return 0;
18631 else
18632 {
18633 reg2 = REGNO (XEXP (addr2, 0));
18634 /* The offset must be constant. */
18635 if (!CONST_INT_P (XEXP (addr2, 1)))
18636 return 0;
18637 offset2 = INTVAL (XEXP (addr2, 1));
18638 }
18639 }
18640 else if (!REG_P (addr2))
18641 return 0;
18642 else
18643 {
18644 reg2 = REGNO (addr2);
18645 /* This was a simple (mem (reg)) expression. Offset is 0. */
18646 offset2 = 0;
18647 }
18648
18649 /* Both of these must have the same base register. */
18650 if (reg1 != reg2)
18651 return 0;
18652
18653 /* The offset for the second addr must be 8 more than the first addr. */
18654 if (offset2 != offset1 + 8)
18655 return 0;
18656
18657 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18658 instructions. */
18659 return 1;
18660 }
18661 \f
18662 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18663 need to use DDmode, in all other cases we can use the same mode. */
18664 static machine_mode
18665 rs6000_secondary_memory_needed_mode (machine_mode mode)
18666 {
18667 if (lra_in_progress && mode == SDmode)
18668 return DDmode;
18669 return mode;
18670 }
18671
18672 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18673 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18674 only work on the traditional altivec registers, note if an altivec register
18675 was chosen. */
18676
18677 static enum rs6000_reg_type
18678 register_to_reg_type (rtx reg, bool *is_altivec)
18679 {
18680 HOST_WIDE_INT regno;
18681 enum reg_class rclass;
18682
18683 if (SUBREG_P (reg))
18684 reg = SUBREG_REG (reg);
18685
18686 if (!REG_P (reg))
18687 return NO_REG_TYPE;
18688
18689 regno = REGNO (reg);
18690 if (!HARD_REGISTER_NUM_P (regno))
18691 {
18692 if (!lra_in_progress && !reload_completed)
18693 return PSEUDO_REG_TYPE;
18694
18695 regno = true_regnum (reg);
18696 if (regno < 0 || !HARD_REGISTER_NUM_P (regno))
18697 return PSEUDO_REG_TYPE;
18698 }
18699
18700 gcc_assert (regno >= 0);
18701
18702 if (is_altivec && ALTIVEC_REGNO_P (regno))
18703 *is_altivec = true;
18704
18705 rclass = rs6000_regno_regclass[regno];
18706 return reg_class_to_reg_type[(int)rclass];
18707 }
18708
18709 /* Helper function to return the cost of adding a TOC entry address. */
18710
18711 static inline int
18712 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18713 {
18714 int ret;
18715
18716 if (TARGET_CMODEL != CMODEL_SMALL)
18717 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18718
18719 else
18720 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18721
18722 return ret;
18723 }
18724
18725 /* Helper function for rs6000_secondary_reload to determine whether the memory
18726 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18727 needs reloading. Return negative if the memory is not handled by the memory
18728 helper functions and to try a different reload method, 0 if no additional
18729 instructions are need, and positive to give the extra cost for the
18730 memory. */
18731
18732 static int
18733 rs6000_secondary_reload_memory (rtx addr,
18734 enum reg_class rclass,
18735 machine_mode mode)
18736 {
18737 int extra_cost = 0;
18738 rtx reg, and_arg, plus_arg0, plus_arg1;
18739 addr_mask_type addr_mask;
18740 const char *type = NULL;
18741 const char *fail_msg = NULL;
18742
18743 if (GPR_REG_CLASS_P (rclass))
18744 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18745
18746 else if (rclass == FLOAT_REGS)
18747 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18748
18749 else if (rclass == ALTIVEC_REGS)
18750 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18751
18752 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18753 else if (rclass == VSX_REGS)
18754 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18755 & ~RELOAD_REG_AND_M16);
18756
18757 /* If the register allocator hasn't made up its mind yet on the register
18758 class to use, settle on defaults to use. */
18759 else if (rclass == NO_REGS)
18760 {
18761 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18762 & ~RELOAD_REG_AND_M16);
18763
18764 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18765 addr_mask &= ~(RELOAD_REG_INDEXED
18766 | RELOAD_REG_PRE_INCDEC
18767 | RELOAD_REG_PRE_MODIFY);
18768 }
18769
18770 else
18771 addr_mask = 0;
18772
18773 /* If the register isn't valid in this register class, just return now. */
18774 if ((addr_mask & RELOAD_REG_VALID) == 0)
18775 {
18776 if (TARGET_DEBUG_ADDR)
18777 {
18778 fprintf (stderr,
18779 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18780 "not valid in class\n",
18781 GET_MODE_NAME (mode), reg_class_names[rclass]);
18782 debug_rtx (addr);
18783 }
18784
18785 return -1;
18786 }
18787
18788 switch (GET_CODE (addr))
18789 {
18790 /* Does the register class supports auto update forms for this mode? We
18791 don't need a scratch register, since the powerpc only supports
18792 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18793 case PRE_INC:
18794 case PRE_DEC:
18795 reg = XEXP (addr, 0);
18796 if (!base_reg_operand (addr, GET_MODE (reg)))
18797 {
18798 fail_msg = "no base register #1";
18799 extra_cost = -1;
18800 }
18801
18802 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
18803 {
18804 extra_cost = 1;
18805 type = "update";
18806 }
18807 break;
18808
18809 case PRE_MODIFY:
18810 reg = XEXP (addr, 0);
18811 plus_arg1 = XEXP (addr, 1);
18812 if (!base_reg_operand (reg, GET_MODE (reg))
18813 || GET_CODE (plus_arg1) != PLUS
18814 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
18815 {
18816 fail_msg = "bad PRE_MODIFY";
18817 extra_cost = -1;
18818 }
18819
18820 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
18821 {
18822 extra_cost = 1;
18823 type = "update";
18824 }
18825 break;
18826
18827 /* Do we need to simulate AND -16 to clear the bottom address bits used
18828 in VMX load/stores? Only allow the AND for vector sizes. */
18829 case AND:
18830 and_arg = XEXP (addr, 0);
18831 if (GET_MODE_SIZE (mode) != 16
18832 || !CONST_INT_P (XEXP (addr, 1))
18833 || INTVAL (XEXP (addr, 1)) != -16)
18834 {
18835 fail_msg = "bad Altivec AND #1";
18836 extra_cost = -1;
18837 }
18838
18839 if (rclass != ALTIVEC_REGS)
18840 {
18841 if (legitimate_indirect_address_p (and_arg, false))
18842 extra_cost = 1;
18843
18844 else if (legitimate_indexed_address_p (and_arg, false))
18845 extra_cost = 2;
18846
18847 else
18848 {
18849 fail_msg = "bad Altivec AND #2";
18850 extra_cost = -1;
18851 }
18852
18853 type = "and";
18854 }
18855 break;
18856
18857 /* If this is an indirect address, make sure it is a base register. */
18858 case REG:
18859 case SUBREG:
18860 if (!legitimate_indirect_address_p (addr, false))
18861 {
18862 extra_cost = 1;
18863 type = "move";
18864 }
18865 break;
18866
18867 /* If this is an indexed address, make sure the register class can handle
18868 indexed addresses for this mode. */
18869 case PLUS:
18870 plus_arg0 = XEXP (addr, 0);
18871 plus_arg1 = XEXP (addr, 1);
18872
18873 /* (plus (plus (reg) (constant)) (constant)) is generated during
18874 push_reload processing, so handle it now. */
18875 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
18876 {
18877 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18878 {
18879 extra_cost = 1;
18880 type = "offset";
18881 }
18882 }
18883
18884 /* (plus (plus (reg) (constant)) (reg)) is also generated during
18885 push_reload processing, so handle it now. */
18886 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
18887 {
18888 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
18889 {
18890 extra_cost = 1;
18891 type = "indexed #2";
18892 }
18893 }
18894
18895 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
18896 {
18897 fail_msg = "no base register #2";
18898 extra_cost = -1;
18899 }
18900
18901 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
18902 {
18903 if ((addr_mask & RELOAD_REG_INDEXED) == 0
18904 || !legitimate_indexed_address_p (addr, false))
18905 {
18906 extra_cost = 1;
18907 type = "indexed";
18908 }
18909 }
18910
18911 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
18912 && CONST_INT_P (plus_arg1))
18913 {
18914 if (!quad_address_offset_p (INTVAL (plus_arg1)))
18915 {
18916 extra_cost = 1;
18917 type = "vector d-form offset";
18918 }
18919 }
18920
18921 /* Make sure the register class can handle offset addresses. */
18922 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
18923 {
18924 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18925 {
18926 extra_cost = 1;
18927 type = "offset #2";
18928 }
18929 }
18930
18931 else
18932 {
18933 fail_msg = "bad PLUS";
18934 extra_cost = -1;
18935 }
18936
18937 break;
18938
18939 case LO_SUM:
18940 /* Quad offsets are restricted and can't handle normal addresses. */
18941 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18942 {
18943 extra_cost = -1;
18944 type = "vector d-form lo_sum";
18945 }
18946
18947 else if (!legitimate_lo_sum_address_p (mode, addr, false))
18948 {
18949 fail_msg = "bad LO_SUM";
18950 extra_cost = -1;
18951 }
18952
18953 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18954 {
18955 extra_cost = 1;
18956 type = "lo_sum";
18957 }
18958 break;
18959
18960 /* Static addresses need to create a TOC entry. */
18961 case CONST:
18962 case SYMBOL_REF:
18963 case LABEL_REF:
18964 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18965 {
18966 extra_cost = -1;
18967 type = "vector d-form lo_sum #2";
18968 }
18969
18970 else
18971 {
18972 type = "address";
18973 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
18974 }
18975 break;
18976
18977 /* TOC references look like offsetable memory. */
18978 case UNSPEC:
18979 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
18980 {
18981 fail_msg = "bad UNSPEC";
18982 extra_cost = -1;
18983 }
18984
18985 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18986 {
18987 extra_cost = -1;
18988 type = "vector d-form lo_sum #3";
18989 }
18990
18991 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18992 {
18993 extra_cost = 1;
18994 type = "toc reference";
18995 }
18996 break;
18997
18998 default:
18999 {
19000 fail_msg = "bad address";
19001 extra_cost = -1;
19002 }
19003 }
19004
19005 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19006 {
19007 if (extra_cost < 0)
19008 fprintf (stderr,
19009 "rs6000_secondary_reload_memory error: mode = %s, "
19010 "class = %s, addr_mask = '%s', %s\n",
19011 GET_MODE_NAME (mode),
19012 reg_class_names[rclass],
19013 rs6000_debug_addr_mask (addr_mask, false),
19014 (fail_msg != NULL) ? fail_msg : "<bad address>");
19015
19016 else
19017 fprintf (stderr,
19018 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19019 "addr_mask = '%s', extra cost = %d, %s\n",
19020 GET_MODE_NAME (mode),
19021 reg_class_names[rclass],
19022 rs6000_debug_addr_mask (addr_mask, false),
19023 extra_cost,
19024 (type) ? type : "<none>");
19025
19026 debug_rtx (addr);
19027 }
19028
19029 return extra_cost;
19030 }
19031
19032 /* Helper function for rs6000_secondary_reload to return true if a move to a
19033 different register classe is really a simple move. */
19034
19035 static bool
19036 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19037 enum rs6000_reg_type from_type,
19038 machine_mode mode)
19039 {
19040 int size = GET_MODE_SIZE (mode);
19041
19042 /* Add support for various direct moves available. In this function, we only
19043 look at cases where we don't need any extra registers, and one or more
19044 simple move insns are issued. Originally small integers are not allowed
19045 in FPR/VSX registers. Single precision binary floating is not a simple
19046 move because we need to convert to the single precision memory layout.
19047 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19048 need special direct move handling, which we do not support yet. */
19049 if (TARGET_DIRECT_MOVE
19050 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19051 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19052 {
19053 if (TARGET_POWERPC64)
19054 {
19055 /* ISA 2.07: MTVSRD or MVFVSRD. */
19056 if (size == 8)
19057 return true;
19058
19059 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19060 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19061 return true;
19062 }
19063
19064 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19065 if (TARGET_P8_VECTOR)
19066 {
19067 if (mode == SImode)
19068 return true;
19069
19070 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19071 return true;
19072 }
19073
19074 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19075 if (mode == SDmode)
19076 return true;
19077 }
19078
19079 /* Power6+: MFTGPR or MFFGPR. */
19080 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19081 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19082 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19083 return true;
19084
19085 /* Move to/from SPR. */
19086 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19087 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19088 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19089 return true;
19090
19091 return false;
19092 }
19093
19094 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19095 special direct moves that involve allocating an extra register, return the
19096 insn code of the helper function if there is such a function or
19097 CODE_FOR_nothing if not. */
19098
19099 static bool
19100 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19101 enum rs6000_reg_type from_type,
19102 machine_mode mode,
19103 secondary_reload_info *sri,
19104 bool altivec_p)
19105 {
19106 bool ret = false;
19107 enum insn_code icode = CODE_FOR_nothing;
19108 int cost = 0;
19109 int size = GET_MODE_SIZE (mode);
19110
19111 if (TARGET_POWERPC64 && size == 16)
19112 {
19113 /* Handle moving 128-bit values from GPRs to VSX point registers on
19114 ISA 2.07 (power8, power9) when running in 64-bit mode using
19115 XXPERMDI to glue the two 64-bit values back together. */
19116 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19117 {
19118 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19119 icode = reg_addr[mode].reload_vsx_gpr;
19120 }
19121
19122 /* Handle moving 128-bit values from VSX point registers to GPRs on
19123 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19124 bottom 64-bit value. */
19125 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19126 {
19127 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19128 icode = reg_addr[mode].reload_gpr_vsx;
19129 }
19130 }
19131
19132 else if (TARGET_POWERPC64 && mode == SFmode)
19133 {
19134 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19135 {
19136 cost = 3; /* xscvdpspn, mfvsrd, and. */
19137 icode = reg_addr[mode].reload_gpr_vsx;
19138 }
19139
19140 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19141 {
19142 cost = 2; /* mtvsrz, xscvspdpn. */
19143 icode = reg_addr[mode].reload_vsx_gpr;
19144 }
19145 }
19146
19147 else if (!TARGET_POWERPC64 && size == 8)
19148 {
19149 /* Handle moving 64-bit values from GPRs to floating point registers on
19150 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19151 32-bit values back together. Altivec register classes must be handled
19152 specially since a different instruction is used, and the secondary
19153 reload support requires a single instruction class in the scratch
19154 register constraint. However, right now TFmode is not allowed in
19155 Altivec registers, so the pattern will never match. */
19156 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19157 {
19158 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19159 icode = reg_addr[mode].reload_fpr_gpr;
19160 }
19161 }
19162
19163 if (icode != CODE_FOR_nothing)
19164 {
19165 ret = true;
19166 if (sri)
19167 {
19168 sri->icode = icode;
19169 sri->extra_cost = cost;
19170 }
19171 }
19172
19173 return ret;
19174 }
19175
19176 /* Return whether a move between two register classes can be done either
19177 directly (simple move) or via a pattern that uses a single extra temporary
19178 (using ISA 2.07's direct move in this case. */
19179
19180 static bool
19181 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19182 enum rs6000_reg_type from_type,
19183 machine_mode mode,
19184 secondary_reload_info *sri,
19185 bool altivec_p)
19186 {
19187 /* Fall back to load/store reloads if either type is not a register. */
19188 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19189 return false;
19190
19191 /* If we haven't allocated registers yet, assume the move can be done for the
19192 standard register types. */
19193 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19194 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19195 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19196 return true;
19197
19198 /* Moves to the same set of registers is a simple move for non-specialized
19199 registers. */
19200 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19201 return true;
19202
19203 /* Check whether a simple move can be done directly. */
19204 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19205 {
19206 if (sri)
19207 {
19208 sri->icode = CODE_FOR_nothing;
19209 sri->extra_cost = 0;
19210 }
19211 return true;
19212 }
19213
19214 /* Now check if we can do it in a few steps. */
19215 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19216 altivec_p);
19217 }
19218
19219 /* Inform reload about cases where moving X with a mode MODE to a register in
19220 RCLASS requires an extra scratch or immediate register. Return the class
19221 needed for the immediate register.
19222
19223 For VSX and Altivec, we may need a register to convert sp+offset into
19224 reg+sp.
19225
19226 For misaligned 64-bit gpr loads and stores we need a register to
19227 convert an offset address to indirect. */
19228
19229 static reg_class_t
19230 rs6000_secondary_reload (bool in_p,
19231 rtx x,
19232 reg_class_t rclass_i,
19233 machine_mode mode,
19234 secondary_reload_info *sri)
19235 {
19236 enum reg_class rclass = (enum reg_class) rclass_i;
19237 reg_class_t ret = ALL_REGS;
19238 enum insn_code icode;
19239 bool default_p = false;
19240 bool done_p = false;
19241
19242 /* Allow subreg of memory before/during reload. */
19243 bool memory_p = (MEM_P (x)
19244 || (!reload_completed && SUBREG_P (x)
19245 && MEM_P (SUBREG_REG (x))));
19246
19247 sri->icode = CODE_FOR_nothing;
19248 sri->t_icode = CODE_FOR_nothing;
19249 sri->extra_cost = 0;
19250 icode = ((in_p)
19251 ? reg_addr[mode].reload_load
19252 : reg_addr[mode].reload_store);
19253
19254 if (REG_P (x) || register_operand (x, mode))
19255 {
19256 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19257 bool altivec_p = (rclass == ALTIVEC_REGS);
19258 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19259
19260 if (!in_p)
19261 std::swap (to_type, from_type);
19262
19263 /* Can we do a direct move of some sort? */
19264 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19265 altivec_p))
19266 {
19267 icode = (enum insn_code)sri->icode;
19268 default_p = false;
19269 done_p = true;
19270 ret = NO_REGS;
19271 }
19272 }
19273
19274 /* Make sure 0.0 is not reloaded or forced into memory. */
19275 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19276 {
19277 ret = NO_REGS;
19278 default_p = false;
19279 done_p = true;
19280 }
19281
19282 /* If this is a scalar floating point value and we want to load it into the
19283 traditional Altivec registers, do it via a move via a traditional floating
19284 point register, unless we have D-form addressing. Also make sure that
19285 non-zero constants use a FPR. */
19286 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19287 && !mode_supports_vmx_dform (mode)
19288 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19289 && (memory_p || CONST_DOUBLE_P (x)))
19290 {
19291 ret = FLOAT_REGS;
19292 default_p = false;
19293 done_p = true;
19294 }
19295
19296 /* Handle reload of load/stores if we have reload helper functions. */
19297 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19298 {
19299 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19300 mode);
19301
19302 if (extra_cost >= 0)
19303 {
19304 done_p = true;
19305 ret = NO_REGS;
19306 if (extra_cost > 0)
19307 {
19308 sri->extra_cost = extra_cost;
19309 sri->icode = icode;
19310 }
19311 }
19312 }
19313
19314 /* Handle unaligned loads and stores of integer registers. */
19315 if (!done_p && TARGET_POWERPC64
19316 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19317 && memory_p
19318 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19319 {
19320 rtx addr = XEXP (x, 0);
19321 rtx off = address_offset (addr);
19322
19323 if (off != NULL_RTX)
19324 {
19325 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19326 unsigned HOST_WIDE_INT offset = INTVAL (off);
19327
19328 /* We need a secondary reload when our legitimate_address_p
19329 says the address is good (as otherwise the entire address
19330 will be reloaded), and the offset is not a multiple of
19331 four or we have an address wrap. Address wrap will only
19332 occur for LO_SUMs since legitimate_offset_address_p
19333 rejects addresses for 16-byte mems that will wrap. */
19334 if (GET_CODE (addr) == LO_SUM
19335 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19336 && ((offset & 3) != 0
19337 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19338 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19339 && (offset & 3) != 0))
19340 {
19341 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19342 if (in_p)
19343 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19344 : CODE_FOR_reload_di_load);
19345 else
19346 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19347 : CODE_FOR_reload_di_store);
19348 sri->extra_cost = 2;
19349 ret = NO_REGS;
19350 done_p = true;
19351 }
19352 else
19353 default_p = true;
19354 }
19355 else
19356 default_p = true;
19357 }
19358
19359 if (!done_p && !TARGET_POWERPC64
19360 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19361 && memory_p
19362 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19363 {
19364 rtx addr = XEXP (x, 0);
19365 rtx off = address_offset (addr);
19366
19367 if (off != NULL_RTX)
19368 {
19369 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19370 unsigned HOST_WIDE_INT offset = INTVAL (off);
19371
19372 /* We need a secondary reload when our legitimate_address_p
19373 says the address is good (as otherwise the entire address
19374 will be reloaded), and we have a wrap.
19375
19376 legitimate_lo_sum_address_p allows LO_SUM addresses to
19377 have any offset so test for wrap in the low 16 bits.
19378
19379 legitimate_offset_address_p checks for the range
19380 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19381 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19382 [0x7ff4,0x7fff] respectively, so test for the
19383 intersection of these ranges, [0x7ffc,0x7fff] and
19384 [0x7ff4,0x7ff7] respectively.
19385
19386 Note that the address we see here may have been
19387 manipulated by legitimize_reload_address. */
19388 if (GET_CODE (addr) == LO_SUM
19389 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19390 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19391 {
19392 if (in_p)
19393 sri->icode = CODE_FOR_reload_si_load;
19394 else
19395 sri->icode = CODE_FOR_reload_si_store;
19396 sri->extra_cost = 2;
19397 ret = NO_REGS;
19398 done_p = true;
19399 }
19400 else
19401 default_p = true;
19402 }
19403 else
19404 default_p = true;
19405 }
19406
19407 if (!done_p)
19408 default_p = true;
19409
19410 if (default_p)
19411 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19412
19413 gcc_assert (ret != ALL_REGS);
19414
19415 if (TARGET_DEBUG_ADDR)
19416 {
19417 fprintf (stderr,
19418 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19419 "mode = %s",
19420 reg_class_names[ret],
19421 in_p ? "true" : "false",
19422 reg_class_names[rclass],
19423 GET_MODE_NAME (mode));
19424
19425 if (reload_completed)
19426 fputs (", after reload", stderr);
19427
19428 if (!done_p)
19429 fputs (", done_p not set", stderr);
19430
19431 if (default_p)
19432 fputs (", default secondary reload", stderr);
19433
19434 if (sri->icode != CODE_FOR_nothing)
19435 fprintf (stderr, ", reload func = %s, extra cost = %d",
19436 insn_data[sri->icode].name, sri->extra_cost);
19437
19438 else if (sri->extra_cost > 0)
19439 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19440
19441 fputs ("\n", stderr);
19442 debug_rtx (x);
19443 }
19444
19445 return ret;
19446 }
19447
19448 /* Better tracing for rs6000_secondary_reload_inner. */
19449
19450 static void
19451 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19452 bool store_p)
19453 {
19454 rtx set, clobber;
19455
19456 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19457
19458 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19459 store_p ? "store" : "load");
19460
19461 if (store_p)
19462 set = gen_rtx_SET (mem, reg);
19463 else
19464 set = gen_rtx_SET (reg, mem);
19465
19466 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19467 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19468 }
19469
19470 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19471 ATTRIBUTE_NORETURN;
19472
19473 static void
19474 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19475 bool store_p)
19476 {
19477 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19478 gcc_unreachable ();
19479 }
19480
19481 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19482 reload helper functions. These were identified in
19483 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19484 reload, it calls the insns:
19485 reload_<RELOAD:mode>_<P:mptrsize>_store
19486 reload_<RELOAD:mode>_<P:mptrsize>_load
19487
19488 which in turn calls this function, to do whatever is necessary to create
19489 valid addresses. */
19490
19491 void
19492 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19493 {
19494 int regno = true_regnum (reg);
19495 machine_mode mode = GET_MODE (reg);
19496 addr_mask_type addr_mask;
19497 rtx addr;
19498 rtx new_addr;
19499 rtx op_reg, op0, op1;
19500 rtx and_op;
19501 rtx cc_clobber;
19502 rtvec rv;
19503
19504 if (regno < 0 || !HARD_REGISTER_NUM_P (regno) || !MEM_P (mem)
19505 || !base_reg_operand (scratch, GET_MODE (scratch)))
19506 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19507
19508 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19509 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19510
19511 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19512 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19513
19514 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19515 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19516
19517 else
19518 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19519
19520 /* Make sure the mode is valid in this register class. */
19521 if ((addr_mask & RELOAD_REG_VALID) == 0)
19522 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19523
19524 if (TARGET_DEBUG_ADDR)
19525 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19526
19527 new_addr = addr = XEXP (mem, 0);
19528 switch (GET_CODE (addr))
19529 {
19530 /* Does the register class support auto update forms for this mode? If
19531 not, do the update now. We don't need a scratch register, since the
19532 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19533 case PRE_INC:
19534 case PRE_DEC:
19535 op_reg = XEXP (addr, 0);
19536 if (!base_reg_operand (op_reg, Pmode))
19537 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19538
19539 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19540 {
19541 int delta = GET_MODE_SIZE (mode);
19542 if (GET_CODE (addr) == PRE_DEC)
19543 delta = -delta;
19544 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19545 new_addr = op_reg;
19546 }
19547 break;
19548
19549 case PRE_MODIFY:
19550 op0 = XEXP (addr, 0);
19551 op1 = XEXP (addr, 1);
19552 if (!base_reg_operand (op0, Pmode)
19553 || GET_CODE (op1) != PLUS
19554 || !rtx_equal_p (op0, XEXP (op1, 0)))
19555 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19556
19557 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19558 {
19559 emit_insn (gen_rtx_SET (op0, op1));
19560 new_addr = reg;
19561 }
19562 break;
19563
19564 /* Do we need to simulate AND -16 to clear the bottom address bits used
19565 in VMX load/stores? */
19566 case AND:
19567 op0 = XEXP (addr, 0);
19568 op1 = XEXP (addr, 1);
19569 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19570 {
19571 if (REG_P (op0) || SUBREG_P (op0))
19572 op_reg = op0;
19573
19574 else if (GET_CODE (op1) == PLUS)
19575 {
19576 emit_insn (gen_rtx_SET (scratch, op1));
19577 op_reg = scratch;
19578 }
19579
19580 else
19581 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19582
19583 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19584 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19585 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19586 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19587 new_addr = scratch;
19588 }
19589 break;
19590
19591 /* If this is an indirect address, make sure it is a base register. */
19592 case REG:
19593 case SUBREG:
19594 if (!base_reg_operand (addr, GET_MODE (addr)))
19595 {
19596 emit_insn (gen_rtx_SET (scratch, addr));
19597 new_addr = scratch;
19598 }
19599 break;
19600
19601 /* If this is an indexed address, make sure the register class can handle
19602 indexed addresses for this mode. */
19603 case PLUS:
19604 op0 = XEXP (addr, 0);
19605 op1 = XEXP (addr, 1);
19606 if (!base_reg_operand (op0, Pmode))
19607 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19608
19609 else if (int_reg_operand (op1, Pmode))
19610 {
19611 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19612 {
19613 emit_insn (gen_rtx_SET (scratch, addr));
19614 new_addr = scratch;
19615 }
19616 }
19617
19618 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19619 {
19620 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19621 || !quad_address_p (addr, mode, false))
19622 {
19623 emit_insn (gen_rtx_SET (scratch, addr));
19624 new_addr = scratch;
19625 }
19626 }
19627
19628 /* Make sure the register class can handle offset addresses. */
19629 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19630 {
19631 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19632 {
19633 emit_insn (gen_rtx_SET (scratch, addr));
19634 new_addr = scratch;
19635 }
19636 }
19637
19638 else
19639 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19640
19641 break;
19642
19643 case LO_SUM:
19644 op0 = XEXP (addr, 0);
19645 op1 = XEXP (addr, 1);
19646 if (!base_reg_operand (op0, Pmode))
19647 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19648
19649 else if (int_reg_operand (op1, Pmode))
19650 {
19651 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19652 {
19653 emit_insn (gen_rtx_SET (scratch, addr));
19654 new_addr = scratch;
19655 }
19656 }
19657
19658 /* Quad offsets are restricted and can't handle normal addresses. */
19659 else if (mode_supports_dq_form (mode))
19660 {
19661 emit_insn (gen_rtx_SET (scratch, addr));
19662 new_addr = scratch;
19663 }
19664
19665 /* Make sure the register class can handle offset addresses. */
19666 else if (legitimate_lo_sum_address_p (mode, addr, false))
19667 {
19668 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19669 {
19670 emit_insn (gen_rtx_SET (scratch, addr));
19671 new_addr = scratch;
19672 }
19673 }
19674
19675 else
19676 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19677
19678 break;
19679
19680 case SYMBOL_REF:
19681 case CONST:
19682 case LABEL_REF:
19683 rs6000_emit_move (scratch, addr, Pmode);
19684 new_addr = scratch;
19685 break;
19686
19687 default:
19688 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19689 }
19690
19691 /* Adjust the address if it changed. */
19692 if (addr != new_addr)
19693 {
19694 mem = replace_equiv_address_nv (mem, new_addr);
19695 if (TARGET_DEBUG_ADDR)
19696 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19697 }
19698
19699 /* Now create the move. */
19700 if (store_p)
19701 emit_insn (gen_rtx_SET (mem, reg));
19702 else
19703 emit_insn (gen_rtx_SET (reg, mem));
19704
19705 return;
19706 }
19707
19708 /* Convert reloads involving 64-bit gprs and misaligned offset
19709 addressing, or multiple 32-bit gprs and offsets that are too large,
19710 to use indirect addressing. */
19711
19712 void
19713 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19714 {
19715 int regno = true_regnum (reg);
19716 enum reg_class rclass;
19717 rtx addr;
19718 rtx scratch_or_premodify = scratch;
19719
19720 if (TARGET_DEBUG_ADDR)
19721 {
19722 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19723 store_p ? "store" : "load");
19724 fprintf (stderr, "reg:\n");
19725 debug_rtx (reg);
19726 fprintf (stderr, "mem:\n");
19727 debug_rtx (mem);
19728 fprintf (stderr, "scratch:\n");
19729 debug_rtx (scratch);
19730 }
19731
19732 gcc_assert (regno >= 0 && HARD_REGISTER_NUM_P (regno));
19733 gcc_assert (MEM_P (mem));
19734 rclass = REGNO_REG_CLASS (regno);
19735 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19736 addr = XEXP (mem, 0);
19737
19738 if (GET_CODE (addr) == PRE_MODIFY)
19739 {
19740 gcc_assert (REG_P (XEXP (addr, 0))
19741 && GET_CODE (XEXP (addr, 1)) == PLUS
19742 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19743 scratch_or_premodify = XEXP (addr, 0);
19744 addr = XEXP (addr, 1);
19745 }
19746 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19747
19748 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19749
19750 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19751
19752 /* Now create the move. */
19753 if (store_p)
19754 emit_insn (gen_rtx_SET (mem, reg));
19755 else
19756 emit_insn (gen_rtx_SET (reg, mem));
19757
19758 return;
19759 }
19760
19761 /* Given an rtx X being reloaded into a reg required to be
19762 in class CLASS, return the class of reg to actually use.
19763 In general this is just CLASS; but on some machines
19764 in some cases it is preferable to use a more restrictive class.
19765
19766 On the RS/6000, we have to return NO_REGS when we want to reload a
19767 floating-point CONST_DOUBLE to force it to be copied to memory.
19768
19769 We also don't want to reload integer values into floating-point
19770 registers if we can at all help it. In fact, this can
19771 cause reload to die, if it tries to generate a reload of CTR
19772 into a FP register and discovers it doesn't have the memory location
19773 required.
19774
19775 ??? Would it be a good idea to have reload do the converse, that is
19776 try to reload floating modes into FP registers if possible?
19777 */
19778
19779 static enum reg_class
19780 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
19781 {
19782 machine_mode mode = GET_MODE (x);
19783 bool is_constant = CONSTANT_P (x);
19784
19785 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19786 reload class for it. */
19787 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19788 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
19789 return NO_REGS;
19790
19791 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
19792 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
19793 return NO_REGS;
19794
19795 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
19796 the reloading of address expressions using PLUS into floating point
19797 registers. */
19798 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
19799 {
19800 if (is_constant)
19801 {
19802 /* Zero is always allowed in all VSX registers. */
19803 if (x == CONST0_RTX (mode))
19804 return rclass;
19805
19806 /* If this is a vector constant that can be formed with a few Altivec
19807 instructions, we want altivec registers. */
19808 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
19809 return ALTIVEC_REGS;
19810
19811 /* If this is an integer constant that can easily be loaded into
19812 vector registers, allow it. */
19813 if (CONST_INT_P (x))
19814 {
19815 HOST_WIDE_INT value = INTVAL (x);
19816
19817 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
19818 2.06 can generate it in the Altivec registers with
19819 VSPLTI<x>. */
19820 if (value == -1)
19821 {
19822 if (TARGET_P8_VECTOR)
19823 return rclass;
19824 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19825 return ALTIVEC_REGS;
19826 else
19827 return NO_REGS;
19828 }
19829
19830 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
19831 a sign extend in the Altivec registers. */
19832 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
19833 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
19834 return ALTIVEC_REGS;
19835 }
19836
19837 /* Force constant to memory. */
19838 return NO_REGS;
19839 }
19840
19841 /* D-form addressing can easily reload the value. */
19842 if (mode_supports_vmx_dform (mode)
19843 || mode_supports_dq_form (mode))
19844 return rclass;
19845
19846 /* If this is a scalar floating point value and we don't have D-form
19847 addressing, prefer the traditional floating point registers so that we
19848 can use D-form (register+offset) addressing. */
19849 if (rclass == VSX_REGS
19850 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
19851 return FLOAT_REGS;
19852
19853 /* Prefer the Altivec registers if Altivec is handling the vector
19854 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
19855 loads. */
19856 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
19857 || mode == V1TImode)
19858 return ALTIVEC_REGS;
19859
19860 return rclass;
19861 }
19862
19863 if (is_constant || GET_CODE (x) == PLUS)
19864 {
19865 if (reg_class_subset_p (GENERAL_REGS, rclass))
19866 return GENERAL_REGS;
19867 if (reg_class_subset_p (BASE_REGS, rclass))
19868 return BASE_REGS;
19869 return NO_REGS;
19870 }
19871
19872 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == GEN_OR_FLOAT_REGS)
19873 return GENERAL_REGS;
19874
19875 return rclass;
19876 }
19877
19878 /* Debug version of rs6000_preferred_reload_class. */
19879 static enum reg_class
19880 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
19881 {
19882 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
19883
19884 fprintf (stderr,
19885 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
19886 "mode = %s, x:\n",
19887 reg_class_names[ret], reg_class_names[rclass],
19888 GET_MODE_NAME (GET_MODE (x)));
19889 debug_rtx (x);
19890
19891 return ret;
19892 }
19893
19894 /* If we are copying between FP or AltiVec registers and anything else, we need
19895 a memory location. The exception is when we are targeting ppc64 and the
19896 move to/from fpr to gpr instructions are available. Also, under VSX, you
19897 can copy vector registers from the FP register set to the Altivec register
19898 set and vice versa. */
19899
19900 static bool
19901 rs6000_secondary_memory_needed (machine_mode mode,
19902 reg_class_t from_class,
19903 reg_class_t to_class)
19904 {
19905 enum rs6000_reg_type from_type, to_type;
19906 bool altivec_p = ((from_class == ALTIVEC_REGS)
19907 || (to_class == ALTIVEC_REGS));
19908
19909 /* If a simple/direct move is available, we don't need secondary memory */
19910 from_type = reg_class_to_reg_type[(int)from_class];
19911 to_type = reg_class_to_reg_type[(int)to_class];
19912
19913 if (rs6000_secondary_reload_move (to_type, from_type, mode,
19914 (secondary_reload_info *)0, altivec_p))
19915 return false;
19916
19917 /* If we have a floating point or vector register class, we need to use
19918 memory to transfer the data. */
19919 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
19920 return true;
19921
19922 return false;
19923 }
19924
19925 /* Debug version of rs6000_secondary_memory_needed. */
19926 static bool
19927 rs6000_debug_secondary_memory_needed (machine_mode mode,
19928 reg_class_t from_class,
19929 reg_class_t to_class)
19930 {
19931 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
19932
19933 fprintf (stderr,
19934 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
19935 "to_class = %s, mode = %s\n",
19936 ret ? "true" : "false",
19937 reg_class_names[from_class],
19938 reg_class_names[to_class],
19939 GET_MODE_NAME (mode));
19940
19941 return ret;
19942 }
19943
19944 /* Return the register class of a scratch register needed to copy IN into
19945 or out of a register in RCLASS in MODE. If it can be done directly,
19946 NO_REGS is returned. */
19947
19948 static enum reg_class
19949 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
19950 rtx in)
19951 {
19952 int regno;
19953
19954 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
19955 #if TARGET_MACHO
19956 && MACHOPIC_INDIRECT
19957 #endif
19958 ))
19959 {
19960 /* We cannot copy a symbolic operand directly into anything
19961 other than BASE_REGS for TARGET_ELF. So indicate that a
19962 register from BASE_REGS is needed as an intermediate
19963 register.
19964
19965 On Darwin, pic addresses require a load from memory, which
19966 needs a base register. */
19967 if (rclass != BASE_REGS
19968 && (SYMBOL_REF_P (in)
19969 || GET_CODE (in) == HIGH
19970 || GET_CODE (in) == LABEL_REF
19971 || GET_CODE (in) == CONST))
19972 return BASE_REGS;
19973 }
19974
19975 if (REG_P (in))
19976 {
19977 regno = REGNO (in);
19978 if (!HARD_REGISTER_NUM_P (regno))
19979 {
19980 regno = true_regnum (in);
19981 if (!HARD_REGISTER_NUM_P (regno))
19982 regno = -1;
19983 }
19984 }
19985 else if (SUBREG_P (in))
19986 {
19987 regno = true_regnum (in);
19988 if (!HARD_REGISTER_NUM_P (regno))
19989 regno = -1;
19990 }
19991 else
19992 regno = -1;
19993
19994 /* If we have VSX register moves, prefer moving scalar values between
19995 Altivec registers and GPR by going via an FPR (and then via memory)
19996 instead of reloading the secondary memory address for Altivec moves. */
19997 if (TARGET_VSX
19998 && GET_MODE_SIZE (mode) < 16
19999 && !mode_supports_vmx_dform (mode)
20000 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20001 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20002 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20003 && (regno >= 0 && INT_REGNO_P (regno)))))
20004 return FLOAT_REGS;
20005
20006 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20007 into anything. */
20008 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20009 || (regno >= 0 && INT_REGNO_P (regno)))
20010 return NO_REGS;
20011
20012 /* Constants, memory, and VSX registers can go into VSX registers (both the
20013 traditional floating point and the altivec registers). */
20014 if (rclass == VSX_REGS
20015 && (regno == -1 || VSX_REGNO_P (regno)))
20016 return NO_REGS;
20017
20018 /* Constants, memory, and FP registers can go into FP registers. */
20019 if ((regno == -1 || FP_REGNO_P (regno))
20020 && (rclass == FLOAT_REGS || rclass == GEN_OR_FLOAT_REGS))
20021 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20022
20023 /* Memory, and AltiVec registers can go into AltiVec registers. */
20024 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20025 && rclass == ALTIVEC_REGS)
20026 return NO_REGS;
20027
20028 /* We can copy among the CR registers. */
20029 if ((rclass == CR_REGS || rclass == CR0_REGS)
20030 && regno >= 0 && CR_REGNO_P (regno))
20031 return NO_REGS;
20032
20033 /* Otherwise, we need GENERAL_REGS. */
20034 return GENERAL_REGS;
20035 }
20036
20037 /* Debug version of rs6000_secondary_reload_class. */
20038 static enum reg_class
20039 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20040 machine_mode mode, rtx in)
20041 {
20042 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20043 fprintf (stderr,
20044 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20045 "mode = %s, input rtx:\n",
20046 reg_class_names[ret], reg_class_names[rclass],
20047 GET_MODE_NAME (mode));
20048 debug_rtx (in);
20049
20050 return ret;
20051 }
20052
20053 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20054
20055 static bool
20056 rs6000_can_change_mode_class (machine_mode from,
20057 machine_mode to,
20058 reg_class_t rclass)
20059 {
20060 unsigned from_size = GET_MODE_SIZE (from);
20061 unsigned to_size = GET_MODE_SIZE (to);
20062
20063 if (from_size != to_size)
20064 {
20065 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20066
20067 if (reg_classes_intersect_p (xclass, rclass))
20068 {
20069 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20070 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20071 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20072 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20073
20074 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20075 single register under VSX because the scalar part of the register
20076 is in the upper 64-bits, and not the lower 64-bits. Types like
20077 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20078 IEEE floating point can't overlap, and neither can small
20079 values. */
20080
20081 if (to_float128_vector_p && from_float128_vector_p)
20082 return true;
20083
20084 else if (to_float128_vector_p || from_float128_vector_p)
20085 return false;
20086
20087 /* TDmode in floating-mode registers must always go into a register
20088 pair with the most significant word in the even-numbered register
20089 to match ISA requirements. In little-endian mode, this does not
20090 match subreg numbering, so we cannot allow subregs. */
20091 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20092 return false;
20093
20094 if (from_size < 8 || to_size < 8)
20095 return false;
20096
20097 if (from_size == 8 && (8 * to_nregs) != to_size)
20098 return false;
20099
20100 if (to_size == 8 && (8 * from_nregs) != from_size)
20101 return false;
20102
20103 return true;
20104 }
20105 else
20106 return true;
20107 }
20108
20109 /* Since the VSX register set includes traditional floating point registers
20110 and altivec registers, just check for the size being different instead of
20111 trying to check whether the modes are vector modes. Otherwise it won't
20112 allow say DF and DI to change classes. For types like TFmode and TDmode
20113 that take 2 64-bit registers, rather than a single 128-bit register, don't
20114 allow subregs of those types to other 128 bit types. */
20115 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20116 {
20117 unsigned num_regs = (from_size + 15) / 16;
20118 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20119 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20120 return false;
20121
20122 return (from_size == 8 || from_size == 16);
20123 }
20124
20125 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20126 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20127 return false;
20128
20129 return true;
20130 }
20131
20132 /* Debug version of rs6000_can_change_mode_class. */
20133 static bool
20134 rs6000_debug_can_change_mode_class (machine_mode from,
20135 machine_mode to,
20136 reg_class_t rclass)
20137 {
20138 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20139
20140 fprintf (stderr,
20141 "rs6000_can_change_mode_class, return %s, from = %s, "
20142 "to = %s, rclass = %s\n",
20143 ret ? "true" : "false",
20144 GET_MODE_NAME (from), GET_MODE_NAME (to),
20145 reg_class_names[rclass]);
20146
20147 return ret;
20148 }
20149 \f
20150 /* Return a string to do a move operation of 128 bits of data. */
20151
20152 const char *
20153 rs6000_output_move_128bit (rtx operands[])
20154 {
20155 rtx dest = operands[0];
20156 rtx src = operands[1];
20157 machine_mode mode = GET_MODE (dest);
20158 int dest_regno;
20159 int src_regno;
20160 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20161 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20162
20163 if (REG_P (dest))
20164 {
20165 dest_regno = REGNO (dest);
20166 dest_gpr_p = INT_REGNO_P (dest_regno);
20167 dest_fp_p = FP_REGNO_P (dest_regno);
20168 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20169 dest_vsx_p = dest_fp_p | dest_vmx_p;
20170 }
20171 else
20172 {
20173 dest_regno = -1;
20174 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20175 }
20176
20177 if (REG_P (src))
20178 {
20179 src_regno = REGNO (src);
20180 src_gpr_p = INT_REGNO_P (src_regno);
20181 src_fp_p = FP_REGNO_P (src_regno);
20182 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20183 src_vsx_p = src_fp_p | src_vmx_p;
20184 }
20185 else
20186 {
20187 src_regno = -1;
20188 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20189 }
20190
20191 /* Register moves. */
20192 if (dest_regno >= 0 && src_regno >= 0)
20193 {
20194 if (dest_gpr_p)
20195 {
20196 if (src_gpr_p)
20197 return "#";
20198
20199 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20200 return (WORDS_BIG_ENDIAN
20201 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20202 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20203
20204 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20205 return "#";
20206 }
20207
20208 else if (TARGET_VSX && dest_vsx_p)
20209 {
20210 if (src_vsx_p)
20211 return "xxlor %x0,%x1,%x1";
20212
20213 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20214 return (WORDS_BIG_ENDIAN
20215 ? "mtvsrdd %x0,%1,%L1"
20216 : "mtvsrdd %x0,%L1,%1");
20217
20218 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20219 return "#";
20220 }
20221
20222 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20223 return "vor %0,%1,%1";
20224
20225 else if (dest_fp_p && src_fp_p)
20226 return "#";
20227 }
20228
20229 /* Loads. */
20230 else if (dest_regno >= 0 && MEM_P (src))
20231 {
20232 if (dest_gpr_p)
20233 {
20234 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20235 return "lq %0,%1";
20236 else
20237 return "#";
20238 }
20239
20240 else if (TARGET_ALTIVEC && dest_vmx_p
20241 && altivec_indexed_or_indirect_operand (src, mode))
20242 return "lvx %0,%y1";
20243
20244 else if (TARGET_VSX && dest_vsx_p)
20245 {
20246 if (mode_supports_dq_form (mode)
20247 && quad_address_p (XEXP (src, 0), mode, true))
20248 return "lxv %x0,%1";
20249
20250 else if (TARGET_P9_VECTOR)
20251 return "lxvx %x0,%y1";
20252
20253 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20254 return "lxvw4x %x0,%y1";
20255
20256 else
20257 return "lxvd2x %x0,%y1";
20258 }
20259
20260 else if (TARGET_ALTIVEC && dest_vmx_p)
20261 return "lvx %0,%y1";
20262
20263 else if (dest_fp_p)
20264 return "#";
20265 }
20266
20267 /* Stores. */
20268 else if (src_regno >= 0 && MEM_P (dest))
20269 {
20270 if (src_gpr_p)
20271 {
20272 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20273 return "stq %1,%0";
20274 else
20275 return "#";
20276 }
20277
20278 else if (TARGET_ALTIVEC && src_vmx_p
20279 && altivec_indexed_or_indirect_operand (dest, mode))
20280 return "stvx %1,%y0";
20281
20282 else if (TARGET_VSX && src_vsx_p)
20283 {
20284 if (mode_supports_dq_form (mode)
20285 && quad_address_p (XEXP (dest, 0), mode, true))
20286 return "stxv %x1,%0";
20287
20288 else if (TARGET_P9_VECTOR)
20289 return "stxvx %x1,%y0";
20290
20291 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20292 return "stxvw4x %x1,%y0";
20293
20294 else
20295 return "stxvd2x %x1,%y0";
20296 }
20297
20298 else if (TARGET_ALTIVEC && src_vmx_p)
20299 return "stvx %1,%y0";
20300
20301 else if (src_fp_p)
20302 return "#";
20303 }
20304
20305 /* Constants. */
20306 else if (dest_regno >= 0
20307 && (CONST_INT_P (src)
20308 || CONST_WIDE_INT_P (src)
20309 || CONST_DOUBLE_P (src)
20310 || GET_CODE (src) == CONST_VECTOR))
20311 {
20312 if (dest_gpr_p)
20313 return "#";
20314
20315 else if ((dest_vmx_p && TARGET_ALTIVEC)
20316 || (dest_vsx_p && TARGET_VSX))
20317 return output_vec_const_move (operands);
20318 }
20319
20320 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20321 }
20322
20323 /* Validate a 128-bit move. */
20324 bool
20325 rs6000_move_128bit_ok_p (rtx operands[])
20326 {
20327 machine_mode mode = GET_MODE (operands[0]);
20328 return (gpc_reg_operand (operands[0], mode)
20329 || gpc_reg_operand (operands[1], mode));
20330 }
20331
20332 /* Return true if a 128-bit move needs to be split. */
20333 bool
20334 rs6000_split_128bit_ok_p (rtx operands[])
20335 {
20336 if (!reload_completed)
20337 return false;
20338
20339 if (!gpr_or_gpr_p (operands[0], operands[1]))
20340 return false;
20341
20342 if (quad_load_store_p (operands[0], operands[1]))
20343 return false;
20344
20345 return true;
20346 }
20347
20348 \f
20349 /* Given a comparison operation, return the bit number in CCR to test. We
20350 know this is a valid comparison.
20351
20352 SCC_P is 1 if this is for an scc. That means that %D will have been
20353 used instead of %C, so the bits will be in different places.
20354
20355 Return -1 if OP isn't a valid comparison for some reason. */
20356
20357 int
20358 ccr_bit (rtx op, int scc_p)
20359 {
20360 enum rtx_code code = GET_CODE (op);
20361 machine_mode cc_mode;
20362 int cc_regnum;
20363 int base_bit;
20364 rtx reg;
20365
20366 if (!COMPARISON_P (op))
20367 return -1;
20368
20369 reg = XEXP (op, 0);
20370
20371 if (!REG_P (reg) || !CR_REGNO_P (REGNO (reg)))
20372 return -1;
20373
20374 cc_mode = GET_MODE (reg);
20375 cc_regnum = REGNO (reg);
20376 base_bit = 4 * (cc_regnum - CR0_REGNO);
20377
20378 validate_condition_mode (code, cc_mode);
20379
20380 /* When generating a sCOND operation, only positive conditions are
20381 allowed. */
20382 if (scc_p)
20383 switch (code)
20384 {
20385 case EQ:
20386 case GT:
20387 case LT:
20388 case UNORDERED:
20389 case GTU:
20390 case LTU:
20391 break;
20392 default:
20393 return -1;
20394 }
20395
20396 switch (code)
20397 {
20398 case NE:
20399 return scc_p ? base_bit + 3 : base_bit + 2;
20400 case EQ:
20401 return base_bit + 2;
20402 case GT: case GTU: case UNLE:
20403 return base_bit + 1;
20404 case LT: case LTU: case UNGE:
20405 return base_bit;
20406 case ORDERED: case UNORDERED:
20407 return base_bit + 3;
20408
20409 case GE: case GEU:
20410 /* If scc, we will have done a cror to put the bit in the
20411 unordered position. So test that bit. For integer, this is ! LT
20412 unless this is an scc insn. */
20413 return scc_p ? base_bit + 3 : base_bit;
20414
20415 case LE: case LEU:
20416 return scc_p ? base_bit + 3 : base_bit + 1;
20417
20418 default:
20419 return -1;
20420 }
20421 }
20422 \f
20423 /* Return the GOT register. */
20424
20425 rtx
20426 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20427 {
20428 /* The second flow pass currently (June 1999) can't update
20429 regs_ever_live without disturbing other parts of the compiler, so
20430 update it here to make the prolog/epilogue code happy. */
20431 if (!can_create_pseudo_p ()
20432 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20433 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20434
20435 crtl->uses_pic_offset_table = 1;
20436
20437 return pic_offset_table_rtx;
20438 }
20439 \f
20440 static rs6000_stack_t stack_info;
20441
20442 /* Function to init struct machine_function.
20443 This will be called, via a pointer variable,
20444 from push_function_context. */
20445
20446 static struct machine_function *
20447 rs6000_init_machine_status (void)
20448 {
20449 stack_info.reload_completed = 0;
20450 return ggc_cleared_alloc<machine_function> ();
20451 }
20452 \f
20453 #define INT_P(X) (CONST_INT_P (X) && GET_MODE (X) == VOIDmode)
20454
20455 /* Write out a function code label. */
20456
20457 void
20458 rs6000_output_function_entry (FILE *file, const char *fname)
20459 {
20460 if (fname[0] != '.')
20461 {
20462 switch (DEFAULT_ABI)
20463 {
20464 default:
20465 gcc_unreachable ();
20466
20467 case ABI_AIX:
20468 if (DOT_SYMBOLS)
20469 putc ('.', file);
20470 else
20471 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20472 break;
20473
20474 case ABI_ELFv2:
20475 case ABI_V4:
20476 case ABI_DARWIN:
20477 break;
20478 }
20479 }
20480
20481 RS6000_OUTPUT_BASENAME (file, fname);
20482 }
20483
20484 /* Print an operand. Recognize special options, documented below. */
20485
20486 #if TARGET_ELF
20487 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20488 only introduced by the linker, when applying the sda21
20489 relocation. */
20490 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20491 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20492 #else
20493 #define SMALL_DATA_RELOC "sda21"
20494 #define SMALL_DATA_REG 0
20495 #endif
20496
20497 void
20498 print_operand (FILE *file, rtx x, int code)
20499 {
20500 int i;
20501 unsigned HOST_WIDE_INT uval;
20502
20503 switch (code)
20504 {
20505 /* %a is output_address. */
20506
20507 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20508 output_operand. */
20509
20510 case 'D':
20511 /* Like 'J' but get to the GT bit only. */
20512 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20513 {
20514 output_operand_lossage ("invalid %%D value");
20515 return;
20516 }
20517
20518 /* Bit 1 is GT bit. */
20519 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20520
20521 /* Add one for shift count in rlinm for scc. */
20522 fprintf (file, "%d", i + 1);
20523 return;
20524
20525 case 'e':
20526 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20527 if (! INT_P (x))
20528 {
20529 output_operand_lossage ("invalid %%e value");
20530 return;
20531 }
20532
20533 uval = INTVAL (x);
20534 if ((uval & 0xffff) == 0 && uval != 0)
20535 putc ('s', file);
20536 return;
20537
20538 case 'E':
20539 /* X is a CR register. Print the number of the EQ bit of the CR */
20540 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20541 output_operand_lossage ("invalid %%E value");
20542 else
20543 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20544 return;
20545
20546 case 'f':
20547 /* X is a CR register. Print the shift count needed to move it
20548 to the high-order four bits. */
20549 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20550 output_operand_lossage ("invalid %%f value");
20551 else
20552 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20553 return;
20554
20555 case 'F':
20556 /* Similar, but print the count for the rotate in the opposite
20557 direction. */
20558 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20559 output_operand_lossage ("invalid %%F value");
20560 else
20561 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20562 return;
20563
20564 case 'G':
20565 /* X is a constant integer. If it is negative, print "m",
20566 otherwise print "z". This is to make an aze or ame insn. */
20567 if (!CONST_INT_P (x))
20568 output_operand_lossage ("invalid %%G value");
20569 else if (INTVAL (x) >= 0)
20570 putc ('z', file);
20571 else
20572 putc ('m', file);
20573 return;
20574
20575 case 'h':
20576 /* If constant, output low-order five bits. Otherwise, write
20577 normally. */
20578 if (INT_P (x))
20579 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20580 else
20581 print_operand (file, x, 0);
20582 return;
20583
20584 case 'H':
20585 /* If constant, output low-order six bits. Otherwise, write
20586 normally. */
20587 if (INT_P (x))
20588 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20589 else
20590 print_operand (file, x, 0);
20591 return;
20592
20593 case 'I':
20594 /* Print `i' if this is a constant, else nothing. */
20595 if (INT_P (x))
20596 putc ('i', file);
20597 return;
20598
20599 case 'j':
20600 /* Write the bit number in CCR for jump. */
20601 i = ccr_bit (x, 0);
20602 if (i == -1)
20603 output_operand_lossage ("invalid %%j code");
20604 else
20605 fprintf (file, "%d", i);
20606 return;
20607
20608 case 'J':
20609 /* Similar, but add one for shift count in rlinm for scc and pass
20610 scc flag to `ccr_bit'. */
20611 i = ccr_bit (x, 1);
20612 if (i == -1)
20613 output_operand_lossage ("invalid %%J code");
20614 else
20615 /* If we want bit 31, write a shift count of zero, not 32. */
20616 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20617 return;
20618
20619 case 'k':
20620 /* X must be a constant. Write the 1's complement of the
20621 constant. */
20622 if (! INT_P (x))
20623 output_operand_lossage ("invalid %%k value");
20624 else
20625 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20626 return;
20627
20628 case 'K':
20629 /* X must be a symbolic constant on ELF. Write an
20630 expression suitable for an 'addi' that adds in the low 16
20631 bits of the MEM. */
20632 if (GET_CODE (x) == CONST)
20633 {
20634 if (GET_CODE (XEXP (x, 0)) != PLUS
20635 || (!SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
20636 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20637 || !CONST_INT_P (XEXP (XEXP (x, 0), 1)))
20638 output_operand_lossage ("invalid %%K value");
20639 }
20640 print_operand_address (file, x);
20641 fputs ("@l", file);
20642 return;
20643
20644 /* %l is output_asm_label. */
20645
20646 case 'L':
20647 /* Write second word of DImode or DFmode reference. Works on register
20648 or non-indexed memory only. */
20649 if (REG_P (x))
20650 fputs (reg_names[REGNO (x) + 1], file);
20651 else if (MEM_P (x))
20652 {
20653 machine_mode mode = GET_MODE (x);
20654 /* Handle possible auto-increment. Since it is pre-increment and
20655 we have already done it, we can just use an offset of word. */
20656 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20657 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20658 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20659 UNITS_PER_WORD));
20660 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20661 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20662 UNITS_PER_WORD));
20663 else
20664 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20665 UNITS_PER_WORD),
20666 0));
20667
20668 if (small_data_operand (x, GET_MODE (x)))
20669 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20670 reg_names[SMALL_DATA_REG]);
20671 }
20672 return;
20673
20674 case 'N': /* Unused */
20675 /* Write the number of elements in the vector times 4. */
20676 if (GET_CODE (x) != PARALLEL)
20677 output_operand_lossage ("invalid %%N value");
20678 else
20679 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20680 return;
20681
20682 case 'O': /* Unused */
20683 /* Similar, but subtract 1 first. */
20684 if (GET_CODE (x) != PARALLEL)
20685 output_operand_lossage ("invalid %%O value");
20686 else
20687 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20688 return;
20689
20690 case 'p':
20691 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20692 if (! INT_P (x)
20693 || INTVAL (x) < 0
20694 || (i = exact_log2 (INTVAL (x))) < 0)
20695 output_operand_lossage ("invalid %%p value");
20696 else
20697 fprintf (file, "%d", i);
20698 return;
20699
20700 case 'P':
20701 /* The operand must be an indirect memory reference. The result
20702 is the register name. */
20703 if (!MEM_P (x) || !REG_P (XEXP (x, 0))
20704 || REGNO (XEXP (x, 0)) >= 32)
20705 output_operand_lossage ("invalid %%P value");
20706 else
20707 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20708 return;
20709
20710 case 'q':
20711 /* This outputs the logical code corresponding to a boolean
20712 expression. The expression may have one or both operands
20713 negated (if one, only the first one). For condition register
20714 logical operations, it will also treat the negated
20715 CR codes as NOTs, but not handle NOTs of them. */
20716 {
20717 const char *const *t = 0;
20718 const char *s;
20719 enum rtx_code code = GET_CODE (x);
20720 static const char * const tbl[3][3] = {
20721 { "and", "andc", "nor" },
20722 { "or", "orc", "nand" },
20723 { "xor", "eqv", "xor" } };
20724
20725 if (code == AND)
20726 t = tbl[0];
20727 else if (code == IOR)
20728 t = tbl[1];
20729 else if (code == XOR)
20730 t = tbl[2];
20731 else
20732 output_operand_lossage ("invalid %%q value");
20733
20734 if (GET_CODE (XEXP (x, 0)) != NOT)
20735 s = t[0];
20736 else
20737 {
20738 if (GET_CODE (XEXP (x, 1)) == NOT)
20739 s = t[2];
20740 else
20741 s = t[1];
20742 }
20743
20744 fputs (s, file);
20745 }
20746 return;
20747
20748 case 'Q':
20749 if (! TARGET_MFCRF)
20750 return;
20751 fputc (',', file);
20752 /* FALLTHRU */
20753
20754 case 'R':
20755 /* X is a CR register. Print the mask for `mtcrf'. */
20756 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20757 output_operand_lossage ("invalid %%R value");
20758 else
20759 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20760 return;
20761
20762 case 's':
20763 /* Low 5 bits of 32 - value */
20764 if (! INT_P (x))
20765 output_operand_lossage ("invalid %%s value");
20766 else
20767 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
20768 return;
20769
20770 case 't':
20771 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20772 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20773 {
20774 output_operand_lossage ("invalid %%t value");
20775 return;
20776 }
20777
20778 /* Bit 3 is OV bit. */
20779 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
20780
20781 /* If we want bit 31, write a shift count of zero, not 32. */
20782 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20783 return;
20784
20785 case 'T':
20786 /* Print the symbolic name of a branch target register. */
20787 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20788 x = XVECEXP (x, 0, 0);
20789 if (!REG_P (x) || (REGNO (x) != LR_REGNO
20790 && REGNO (x) != CTR_REGNO))
20791 output_operand_lossage ("invalid %%T value");
20792 else if (REGNO (x) == LR_REGNO)
20793 fputs ("lr", file);
20794 else
20795 fputs ("ctr", file);
20796 return;
20797
20798 case 'u':
20799 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20800 for use in unsigned operand. */
20801 if (! INT_P (x))
20802 {
20803 output_operand_lossage ("invalid %%u value");
20804 return;
20805 }
20806
20807 uval = INTVAL (x);
20808 if ((uval & 0xffff) == 0)
20809 uval >>= 16;
20810
20811 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
20812 return;
20813
20814 case 'v':
20815 /* High-order 16 bits of constant for use in signed operand. */
20816 if (! INT_P (x))
20817 output_operand_lossage ("invalid %%v value");
20818 else
20819 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
20820 (INTVAL (x) >> 16) & 0xffff);
20821 return;
20822
20823 case 'U':
20824 /* Print `u' if this has an auto-increment or auto-decrement. */
20825 if (MEM_P (x)
20826 && (GET_CODE (XEXP (x, 0)) == PRE_INC
20827 || GET_CODE (XEXP (x, 0)) == PRE_DEC
20828 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
20829 putc ('u', file);
20830 return;
20831
20832 case 'V':
20833 /* Print the trap code for this operand. */
20834 switch (GET_CODE (x))
20835 {
20836 case EQ:
20837 fputs ("eq", file); /* 4 */
20838 break;
20839 case NE:
20840 fputs ("ne", file); /* 24 */
20841 break;
20842 case LT:
20843 fputs ("lt", file); /* 16 */
20844 break;
20845 case LE:
20846 fputs ("le", file); /* 20 */
20847 break;
20848 case GT:
20849 fputs ("gt", file); /* 8 */
20850 break;
20851 case GE:
20852 fputs ("ge", file); /* 12 */
20853 break;
20854 case LTU:
20855 fputs ("llt", file); /* 2 */
20856 break;
20857 case LEU:
20858 fputs ("lle", file); /* 6 */
20859 break;
20860 case GTU:
20861 fputs ("lgt", file); /* 1 */
20862 break;
20863 case GEU:
20864 fputs ("lge", file); /* 5 */
20865 break;
20866 default:
20867 output_operand_lossage ("invalid %%V value");
20868 }
20869 break;
20870
20871 case 'w':
20872 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
20873 normally. */
20874 if (INT_P (x))
20875 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
20876 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
20877 else
20878 print_operand (file, x, 0);
20879 return;
20880
20881 case 'x':
20882 /* X is a FPR or Altivec register used in a VSX context. */
20883 if (!REG_P (x) || !VSX_REGNO_P (REGNO (x)))
20884 output_operand_lossage ("invalid %%x value");
20885 else
20886 {
20887 int reg = REGNO (x);
20888 int vsx_reg = (FP_REGNO_P (reg)
20889 ? reg - 32
20890 : reg - FIRST_ALTIVEC_REGNO + 32);
20891
20892 #ifdef TARGET_REGNAMES
20893 if (TARGET_REGNAMES)
20894 fprintf (file, "%%vs%d", vsx_reg);
20895 else
20896 #endif
20897 fprintf (file, "%d", vsx_reg);
20898 }
20899 return;
20900
20901 case 'X':
20902 if (MEM_P (x)
20903 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
20904 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
20905 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
20906 putc ('x', file);
20907 return;
20908
20909 case 'Y':
20910 /* Like 'L', for third word of TImode/PTImode */
20911 if (REG_P (x))
20912 fputs (reg_names[REGNO (x) + 2], file);
20913 else if (MEM_P (x))
20914 {
20915 machine_mode mode = GET_MODE (x);
20916 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20917 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20918 output_address (mode, plus_constant (Pmode,
20919 XEXP (XEXP (x, 0), 0), 8));
20920 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20921 output_address (mode, plus_constant (Pmode,
20922 XEXP (XEXP (x, 0), 0), 8));
20923 else
20924 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
20925 if (small_data_operand (x, GET_MODE (x)))
20926 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20927 reg_names[SMALL_DATA_REG]);
20928 }
20929 return;
20930
20931 case 'z':
20932 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20933 x = XVECEXP (x, 0, 1);
20934 /* X is a SYMBOL_REF. Write out the name preceded by a
20935 period and without any trailing data in brackets. Used for function
20936 names. If we are configured for System V (or the embedded ABI) on
20937 the PowerPC, do not emit the period, since those systems do not use
20938 TOCs and the like. */
20939 if (!SYMBOL_REF_P (x))
20940 {
20941 output_operand_lossage ("invalid %%z value");
20942 return;
20943 }
20944
20945 /* For macho, check to see if we need a stub. */
20946 if (TARGET_MACHO)
20947 {
20948 const char *name = XSTR (x, 0);
20949 #if TARGET_MACHO
20950 if (darwin_emit_branch_islands
20951 && MACHOPIC_INDIRECT
20952 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
20953 name = machopic_indirection_name (x, /*stub_p=*/true);
20954 #endif
20955 assemble_name (file, name);
20956 }
20957 else if (!DOT_SYMBOLS)
20958 assemble_name (file, XSTR (x, 0));
20959 else
20960 rs6000_output_function_entry (file, XSTR (x, 0));
20961 return;
20962
20963 case 'Z':
20964 /* Like 'L', for last word of TImode/PTImode. */
20965 if (REG_P (x))
20966 fputs (reg_names[REGNO (x) + 3], file);
20967 else if (MEM_P (x))
20968 {
20969 machine_mode mode = GET_MODE (x);
20970 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20971 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20972 output_address (mode, plus_constant (Pmode,
20973 XEXP (XEXP (x, 0), 0), 12));
20974 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20975 output_address (mode, plus_constant (Pmode,
20976 XEXP (XEXP (x, 0), 0), 12));
20977 else
20978 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
20979 if (small_data_operand (x, GET_MODE (x)))
20980 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20981 reg_names[SMALL_DATA_REG]);
20982 }
20983 return;
20984
20985 /* Print AltiVec memory operand. */
20986 case 'y':
20987 {
20988 rtx tmp;
20989
20990 gcc_assert (MEM_P (x));
20991
20992 tmp = XEXP (x, 0);
20993
20994 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
20995 && GET_CODE (tmp) == AND
20996 && CONST_INT_P (XEXP (tmp, 1))
20997 && INTVAL (XEXP (tmp, 1)) == -16)
20998 tmp = XEXP (tmp, 0);
20999 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21000 && GET_CODE (tmp) == PRE_MODIFY)
21001 tmp = XEXP (tmp, 1);
21002 if (REG_P (tmp))
21003 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21004 else
21005 {
21006 if (GET_CODE (tmp) != PLUS
21007 || !REG_P (XEXP (tmp, 0))
21008 || !REG_P (XEXP (tmp, 1)))
21009 {
21010 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21011 break;
21012 }
21013
21014 if (REGNO (XEXP (tmp, 0)) == 0)
21015 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21016 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21017 else
21018 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21019 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21020 }
21021 break;
21022 }
21023
21024 case 0:
21025 if (REG_P (x))
21026 fprintf (file, "%s", reg_names[REGNO (x)]);
21027 else if (MEM_P (x))
21028 {
21029 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21030 know the width from the mode. */
21031 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21032 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21033 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21034 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21035 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21036 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21037 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21038 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21039 else
21040 output_address (GET_MODE (x), XEXP (x, 0));
21041 }
21042 else if (toc_relative_expr_p (x, false,
21043 &tocrel_base_oac, &tocrel_offset_oac))
21044 /* This hack along with a corresponding hack in
21045 rs6000_output_addr_const_extra arranges to output addends
21046 where the assembler expects to find them. eg.
21047 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21048 without this hack would be output as "x@toc+4". We
21049 want "x+4@toc". */
21050 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21051 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
21052 output_addr_const (file, XVECEXP (x, 0, 0));
21053 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21054 output_addr_const (file, XVECEXP (x, 0, 1));
21055 else
21056 output_addr_const (file, x);
21057 return;
21058
21059 case '&':
21060 if (const char *name = get_some_local_dynamic_name ())
21061 assemble_name (file, name);
21062 else
21063 output_operand_lossage ("'%%&' used without any "
21064 "local dynamic TLS references");
21065 return;
21066
21067 default:
21068 output_operand_lossage ("invalid %%xn code");
21069 }
21070 }
21071 \f
21072 /* Print the address of an operand. */
21073
21074 void
21075 print_operand_address (FILE *file, rtx x)
21076 {
21077 if (REG_P (x))
21078 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21079 else if (SYMBOL_REF_P (x) || GET_CODE (x) == CONST
21080 || GET_CODE (x) == LABEL_REF)
21081 {
21082 output_addr_const (file, x);
21083 if (small_data_operand (x, GET_MODE (x)))
21084 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21085 reg_names[SMALL_DATA_REG]);
21086 else
21087 gcc_assert (!TARGET_TOC);
21088 }
21089 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21090 && REG_P (XEXP (x, 1)))
21091 {
21092 if (REGNO (XEXP (x, 0)) == 0)
21093 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21094 reg_names[ REGNO (XEXP (x, 0)) ]);
21095 else
21096 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21097 reg_names[ REGNO (XEXP (x, 1)) ]);
21098 }
21099 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21100 && CONST_INT_P (XEXP (x, 1)))
21101 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21102 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21103 #if TARGET_MACHO
21104 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21105 && CONSTANT_P (XEXP (x, 1)))
21106 {
21107 fprintf (file, "lo16(");
21108 output_addr_const (file, XEXP (x, 1));
21109 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21110 }
21111 #endif
21112 #if TARGET_ELF
21113 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21114 && CONSTANT_P (XEXP (x, 1)))
21115 {
21116 output_addr_const (file, XEXP (x, 1));
21117 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21118 }
21119 #endif
21120 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21121 {
21122 /* This hack along with a corresponding hack in
21123 rs6000_output_addr_const_extra arranges to output addends
21124 where the assembler expects to find them. eg.
21125 (lo_sum (reg 9)
21126 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21127 without this hack would be output as "x@toc+8@l(9)". We
21128 want "x+8@toc@l(9)". */
21129 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21130 if (GET_CODE (x) == LO_SUM)
21131 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21132 else
21133 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21134 }
21135 else
21136 output_addr_const (file, x);
21137 }
21138 \f
21139 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21140
21141 static bool
21142 rs6000_output_addr_const_extra (FILE *file, rtx x)
21143 {
21144 if (GET_CODE (x) == UNSPEC)
21145 switch (XINT (x, 1))
21146 {
21147 case UNSPEC_TOCREL:
21148 gcc_checking_assert (SYMBOL_REF_P (XVECEXP (x, 0, 0))
21149 && REG_P (XVECEXP (x, 0, 1))
21150 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21151 output_addr_const (file, XVECEXP (x, 0, 0));
21152 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21153 {
21154 if (INTVAL (tocrel_offset_oac) >= 0)
21155 fprintf (file, "+");
21156 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21157 }
21158 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21159 {
21160 putc ('-', file);
21161 assemble_name (file, toc_label_name);
21162 need_toc_init = 1;
21163 }
21164 else if (TARGET_ELF)
21165 fputs ("@toc", file);
21166 return true;
21167
21168 #if TARGET_MACHO
21169 case UNSPEC_MACHOPIC_OFFSET:
21170 output_addr_const (file, XVECEXP (x, 0, 0));
21171 putc ('-', file);
21172 machopic_output_function_base_name (file);
21173 return true;
21174 #endif
21175 }
21176 return false;
21177 }
21178 \f
21179 /* Target hook for assembling integer objects. The PowerPC version has
21180 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21181 is defined. It also needs to handle DI-mode objects on 64-bit
21182 targets. */
21183
21184 static bool
21185 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21186 {
21187 #ifdef RELOCATABLE_NEEDS_FIXUP
21188 /* Special handling for SI values. */
21189 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21190 {
21191 static int recurse = 0;
21192
21193 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21194 the .fixup section. Since the TOC section is already relocated, we
21195 don't need to mark it here. We used to skip the text section, but it
21196 should never be valid for relocated addresses to be placed in the text
21197 section. */
21198 if (DEFAULT_ABI == ABI_V4
21199 && (TARGET_RELOCATABLE || flag_pic > 1)
21200 && in_section != toc_section
21201 && !recurse
21202 && !CONST_SCALAR_INT_P (x)
21203 && CONSTANT_P (x))
21204 {
21205 char buf[256];
21206
21207 recurse = 1;
21208 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21209 fixuplabelno++;
21210 ASM_OUTPUT_LABEL (asm_out_file, buf);
21211 fprintf (asm_out_file, "\t.long\t(");
21212 output_addr_const (asm_out_file, x);
21213 fprintf (asm_out_file, ")@fixup\n");
21214 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21215 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21216 fprintf (asm_out_file, "\t.long\t");
21217 assemble_name (asm_out_file, buf);
21218 fprintf (asm_out_file, "\n\t.previous\n");
21219 recurse = 0;
21220 return true;
21221 }
21222 /* Remove initial .'s to turn a -mcall-aixdesc function
21223 address into the address of the descriptor, not the function
21224 itself. */
21225 else if (SYMBOL_REF_P (x)
21226 && XSTR (x, 0)[0] == '.'
21227 && DEFAULT_ABI == ABI_AIX)
21228 {
21229 const char *name = XSTR (x, 0);
21230 while (*name == '.')
21231 name++;
21232
21233 fprintf (asm_out_file, "\t.long\t%s\n", name);
21234 return true;
21235 }
21236 }
21237 #endif /* RELOCATABLE_NEEDS_FIXUP */
21238 return default_assemble_integer (x, size, aligned_p);
21239 }
21240
21241 /* Return a template string for assembly to emit when making an
21242 external call. FUNOP is the call mem argument operand number. */
21243
21244 static const char *
21245 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21246 {
21247 /* -Wformat-overflow workaround, without which gcc thinks that %u
21248 might produce 10 digits. */
21249 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21250
21251 char arg[12];
21252 arg[0] = 0;
21253 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21254 {
21255 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21256 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21257 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21258 sprintf (arg, "(%%&@tlsld)");
21259 else
21260 gcc_unreachable ();
21261 }
21262
21263 /* The magic 32768 offset here corresponds to the offset of
21264 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21265 char z[11];
21266 sprintf (z, "%%z%u%s", funop,
21267 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21268 ? "+32768" : ""));
21269
21270 static char str[32]; /* 2 spare */
21271 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21272 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21273 sibcall ? "" : "\n\tnop");
21274 else if (DEFAULT_ABI == ABI_V4)
21275 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21276 flag_pic ? "@plt" : "");
21277 #if TARGET_MACHO
21278 /* If/when we remove the mlongcall opt, we can share the AIX/ELGv2 case. */
21279 else if (DEFAULT_ABI == ABI_DARWIN)
21280 {
21281 /* The cookie is in operand func+2. */
21282 gcc_checking_assert (GET_CODE (operands[funop + 2]) == CONST_INT);
21283 int cookie = INTVAL (operands[funop + 2]);
21284 if (cookie & CALL_LONG)
21285 {
21286 tree funname = get_identifier (XSTR (operands[funop], 0));
21287 tree labelname = get_prev_label (funname);
21288 gcc_checking_assert (labelname && !sibcall);
21289
21290 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21291 instruction will reach 'foo', otherwise link as 'bl L42'".
21292 "L42" should be a 'branch island', that will do a far jump to
21293 'foo'. Branch islands are generated in
21294 macho_branch_islands(). */
21295 sprintf (str, "jbsr %%z%u,%.10s", funop,
21296 IDENTIFIER_POINTER (labelname));
21297 }
21298 else
21299 /* Same as AIX or ELFv2, except to keep backwards compat, no nop
21300 after the call. */
21301 sprintf (str, "b%s %s%s", sibcall ? "" : "l", z, arg);
21302 }
21303 #endif
21304 else
21305 gcc_unreachable ();
21306 return str;
21307 }
21308
21309 const char *
21310 rs6000_call_template (rtx *operands, unsigned int funop)
21311 {
21312 return rs6000_call_template_1 (operands, funop, false);
21313 }
21314
21315 const char *
21316 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21317 {
21318 return rs6000_call_template_1 (operands, funop, true);
21319 }
21320
21321 /* As above, for indirect calls. */
21322
21323 static const char *
21324 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21325 bool sibcall)
21326 {
21327 /* -Wformat-overflow workaround, without which gcc thinks that %u
21328 might produce 10 digits. Note that -Wformat-overflow will not
21329 currently warn here for str[], so do not rely on a warning to
21330 ensure str[] is correctly sized. */
21331 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21332
21333 /* Currently, funop is either 0 or 1. The maximum string is always
21334 a !speculate 64-bit __tls_get_addr call.
21335
21336 ABI_AIX:
21337 . 9 ld 2,%3\n\t
21338 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21339 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21340 . 9 crset 2\n\t
21341 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21342 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21343 . 10 beq%T1l-\n\t
21344 . 10 ld 2,%4(1)
21345 .---
21346 .151
21347
21348 ABI_ELFv2:
21349 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21350 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21351 . 9 crset 2\n\t
21352 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21353 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21354 . 10 beq%T1l-\n\t
21355 . 10 ld 2,%3(1)
21356 .---
21357 .142
21358
21359 ABI_V4:
21360 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21361 . 35 .reloc .,R_PPC64_PLTSEQ,%z1+32768\n\t
21362 . 9 crset 2\n\t
21363 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21364 . 36 .reloc .,R_PPC64_PLTCALL,%z1+32768\n\t
21365 . 8 beq%T1l-
21366 .---
21367 .141 */
21368 static char str[160]; /* 8 spare */
21369 char *s = str;
21370 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21371
21372 if (DEFAULT_ABI == ABI_AIX)
21373 s += sprintf (s,
21374 "l%s 2,%%%u\n\t",
21375 ptrload, funop + 2);
21376
21377 /* We don't need the extra code to stop indirect call speculation if
21378 calling via LR. */
21379 bool speculate = (TARGET_MACHO
21380 || rs6000_speculate_indirect_jumps
21381 || (REG_P (operands[funop])
21382 && REGNO (operands[funop]) == LR_REGNO));
21383
21384 if (TARGET_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21385 {
21386 const char *rel64 = TARGET_64BIT ? "64" : "";
21387 char tls[29];
21388 tls[0] = 0;
21389 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21390 {
21391 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21392 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21393 rel64, funop + 1);
21394 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21395 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21396 rel64);
21397 else
21398 gcc_unreachable ();
21399 }
21400
21401 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21402 && flag_pic == 2 ? "+32768" : "");
21403 if (!speculate)
21404 {
21405 s += sprintf (s,
21406 "%s.reloc .,R_PPC%s_PLTSEQ,%%z%u%s\n\t",
21407 tls, rel64, funop, addend);
21408 s += sprintf (s, "crset 2\n\t");
21409 }
21410 s += sprintf (s,
21411 "%s.reloc .,R_PPC%s_PLTCALL,%%z%u%s\n\t",
21412 tls, rel64, funop, addend);
21413 }
21414 else if (!speculate)
21415 s += sprintf (s, "crset 2\n\t");
21416
21417 if (DEFAULT_ABI == ABI_AIX)
21418 {
21419 if (speculate)
21420 sprintf (s,
21421 "b%%T%ul\n\t"
21422 "l%s 2,%%%u(1)",
21423 funop, ptrload, funop + 3);
21424 else
21425 sprintf (s,
21426 "beq%%T%ul-\n\t"
21427 "l%s 2,%%%u(1)",
21428 funop, ptrload, funop + 3);
21429 }
21430 else if (DEFAULT_ABI == ABI_ELFv2)
21431 {
21432 if (speculate)
21433 sprintf (s,
21434 "b%%T%ul\n\t"
21435 "l%s 2,%%%u(1)",
21436 funop, ptrload, funop + 2);
21437 else
21438 sprintf (s,
21439 "beq%%T%ul-\n\t"
21440 "l%s 2,%%%u(1)",
21441 funop, ptrload, funop + 2);
21442 }
21443 else
21444 {
21445 if (speculate)
21446 sprintf (s,
21447 "b%%T%u%s",
21448 funop, sibcall ? "" : "l");
21449 else
21450 sprintf (s,
21451 "beq%%T%u%s-%s",
21452 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21453 }
21454 return str;
21455 }
21456
21457 const char *
21458 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21459 {
21460 return rs6000_indirect_call_template_1 (operands, funop, false);
21461 }
21462
21463 const char *
21464 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21465 {
21466 return rs6000_indirect_call_template_1 (operands, funop, true);
21467 }
21468
21469 #if HAVE_AS_PLTSEQ
21470 /* Output indirect call insns.
21471 WHICH is 0 for tocsave, 1 for plt16_ha, 2 for plt16_lo, 3 for mtctr. */
21472 const char *
21473 rs6000_pltseq_template (rtx *operands, int which)
21474 {
21475 const char *rel64 = TARGET_64BIT ? "64" : "";
21476 char tls[28];
21477 tls[0] = 0;
21478 if (TARGET_TLS_MARKERS && GET_CODE (operands[3]) == UNSPEC)
21479 {
21480 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21481 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%3\n\t",
21482 rel64);
21483 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21484 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21485 rel64);
21486 else
21487 gcc_unreachable ();
21488 }
21489
21490 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21491 static char str[96]; /* 15 spare */
21492 const char *off = WORDS_BIG_ENDIAN ? "+2" : "";
21493 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21494 && flag_pic == 2 ? "+32768" : "");
21495 switch (which)
21496 {
21497 case 0:
21498 sprintf (str,
21499 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2\n\t"
21500 "st%s",
21501 tls, rel64, TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)");
21502 break;
21503 case 1:
21504 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21505 sprintf (str,
21506 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2\n\t"
21507 "lis %%0,0",
21508 tls, off, rel64);
21509 else
21510 sprintf (str,
21511 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2%s\n\t"
21512 "addis %%0,%%1,0",
21513 tls, off, rel64, addend);
21514 break;
21515 case 2:
21516 sprintf (str,
21517 "%s.reloc .%s,R_PPC%s_PLT16_LO%s,%%z2%s\n\t"
21518 "l%s %%0,0(%%1)",
21519 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend,
21520 TARGET_64BIT ? "d" : "wz");
21521 break;
21522 case 3:
21523 sprintf (str,
21524 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2%s\n\t"
21525 "mtctr %%1",
21526 tls, rel64, addend);
21527 break;
21528 default:
21529 gcc_unreachable ();
21530 }
21531 return str;
21532 }
21533 #endif
21534
21535 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21536 /* Emit an assembler directive to set symbol visibility for DECL to
21537 VISIBILITY_TYPE. */
21538
21539 static void
21540 rs6000_assemble_visibility (tree decl, int vis)
21541 {
21542 if (TARGET_XCOFF)
21543 return;
21544
21545 /* Functions need to have their entry point symbol visibility set as
21546 well as their descriptor symbol visibility. */
21547 if (DEFAULT_ABI == ABI_AIX
21548 && DOT_SYMBOLS
21549 && TREE_CODE (decl) == FUNCTION_DECL)
21550 {
21551 static const char * const visibility_types[] = {
21552 NULL, "protected", "hidden", "internal"
21553 };
21554
21555 const char *name, *type;
21556
21557 name = ((* targetm.strip_name_encoding)
21558 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21559 type = visibility_types[vis];
21560
21561 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21562 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21563 }
21564 else
21565 default_assemble_visibility (decl, vis);
21566 }
21567 #endif
21568 \f
21569 enum rtx_code
21570 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21571 {
21572 /* Reversal of FP compares takes care -- an ordered compare
21573 becomes an unordered compare and vice versa. */
21574 if (mode == CCFPmode
21575 && (!flag_finite_math_only
21576 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21577 || code == UNEQ || code == LTGT))
21578 return reverse_condition_maybe_unordered (code);
21579 else
21580 return reverse_condition (code);
21581 }
21582
21583 /* Generate a compare for CODE. Return a brand-new rtx that
21584 represents the result of the compare. */
21585
21586 static rtx
21587 rs6000_generate_compare (rtx cmp, machine_mode mode)
21588 {
21589 machine_mode comp_mode;
21590 rtx compare_result;
21591 enum rtx_code code = GET_CODE (cmp);
21592 rtx op0 = XEXP (cmp, 0);
21593 rtx op1 = XEXP (cmp, 1);
21594
21595 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21596 comp_mode = CCmode;
21597 else if (FLOAT_MODE_P (mode))
21598 comp_mode = CCFPmode;
21599 else if (code == GTU || code == LTU
21600 || code == GEU || code == LEU)
21601 comp_mode = CCUNSmode;
21602 else if ((code == EQ || code == NE)
21603 && unsigned_reg_p (op0)
21604 && (unsigned_reg_p (op1)
21605 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21606 /* These are unsigned values, perhaps there will be a later
21607 ordering compare that can be shared with this one. */
21608 comp_mode = CCUNSmode;
21609 else
21610 comp_mode = CCmode;
21611
21612 /* If we have an unsigned compare, make sure we don't have a signed value as
21613 an immediate. */
21614 if (comp_mode == CCUNSmode && CONST_INT_P (op1)
21615 && INTVAL (op1) < 0)
21616 {
21617 op0 = copy_rtx_if_shared (op0);
21618 op1 = force_reg (GET_MODE (op0), op1);
21619 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21620 }
21621
21622 /* First, the compare. */
21623 compare_result = gen_reg_rtx (comp_mode);
21624
21625 /* IEEE 128-bit support in VSX registers when we do not have hardware
21626 support. */
21627 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21628 {
21629 rtx libfunc = NULL_RTX;
21630 bool check_nan = false;
21631 rtx dest;
21632
21633 switch (code)
21634 {
21635 case EQ:
21636 case NE:
21637 libfunc = optab_libfunc (eq_optab, mode);
21638 break;
21639
21640 case GT:
21641 case GE:
21642 libfunc = optab_libfunc (ge_optab, mode);
21643 break;
21644
21645 case LT:
21646 case LE:
21647 libfunc = optab_libfunc (le_optab, mode);
21648 break;
21649
21650 case UNORDERED:
21651 case ORDERED:
21652 libfunc = optab_libfunc (unord_optab, mode);
21653 code = (code == UNORDERED) ? NE : EQ;
21654 break;
21655
21656 case UNGE:
21657 case UNGT:
21658 check_nan = true;
21659 libfunc = optab_libfunc (ge_optab, mode);
21660 code = (code == UNGE) ? GE : GT;
21661 break;
21662
21663 case UNLE:
21664 case UNLT:
21665 check_nan = true;
21666 libfunc = optab_libfunc (le_optab, mode);
21667 code = (code == UNLE) ? LE : LT;
21668 break;
21669
21670 case UNEQ:
21671 case LTGT:
21672 check_nan = true;
21673 libfunc = optab_libfunc (eq_optab, mode);
21674 code = (code = UNEQ) ? EQ : NE;
21675 break;
21676
21677 default:
21678 gcc_unreachable ();
21679 }
21680
21681 gcc_assert (libfunc);
21682
21683 if (!check_nan)
21684 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21685 SImode, op0, mode, op1, mode);
21686
21687 /* The library signals an exception for signalling NaNs, so we need to
21688 handle isgreater, etc. by first checking isordered. */
21689 else
21690 {
21691 rtx ne_rtx, normal_dest, unord_dest;
21692 rtx unord_func = optab_libfunc (unord_optab, mode);
21693 rtx join_label = gen_label_rtx ();
21694 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21695 rtx unord_cmp = gen_reg_rtx (comp_mode);
21696
21697
21698 /* Test for either value being a NaN. */
21699 gcc_assert (unord_func);
21700 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21701 SImode, op0, mode, op1, mode);
21702
21703 /* Set value (0) if either value is a NaN, and jump to the join
21704 label. */
21705 dest = gen_reg_rtx (SImode);
21706 emit_move_insn (dest, const1_rtx);
21707 emit_insn (gen_rtx_SET (unord_cmp,
21708 gen_rtx_COMPARE (comp_mode, unord_dest,
21709 const0_rtx)));
21710
21711 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21712 emit_jump_insn (gen_rtx_SET (pc_rtx,
21713 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21714 join_ref,
21715 pc_rtx)));
21716
21717 /* Do the normal comparison, knowing that the values are not
21718 NaNs. */
21719 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21720 SImode, op0, mode, op1, mode);
21721
21722 emit_insn (gen_cstoresi4 (dest,
21723 gen_rtx_fmt_ee (code, SImode, normal_dest,
21724 const0_rtx),
21725 normal_dest, const0_rtx));
21726
21727 /* Join NaN and non-Nan paths. Compare dest against 0. */
21728 emit_label (join_label);
21729 code = NE;
21730 }
21731
21732 emit_insn (gen_rtx_SET (compare_result,
21733 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21734 }
21735
21736 else
21737 {
21738 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21739 CLOBBERs to match cmptf_internal2 pattern. */
21740 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21741 && FLOAT128_IBM_P (GET_MODE (op0))
21742 && TARGET_HARD_FLOAT)
21743 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21744 gen_rtvec (10,
21745 gen_rtx_SET (compare_result,
21746 gen_rtx_COMPARE (comp_mode, op0, op1)),
21747 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21748 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21749 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21750 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21751 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21752 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21753 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21754 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21755 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21756 else if (GET_CODE (op1) == UNSPEC
21757 && XINT (op1, 1) == UNSPEC_SP_TEST)
21758 {
21759 rtx op1b = XVECEXP (op1, 0, 0);
21760 comp_mode = CCEQmode;
21761 compare_result = gen_reg_rtx (CCEQmode);
21762 if (TARGET_64BIT)
21763 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21764 else
21765 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21766 }
21767 else
21768 emit_insn (gen_rtx_SET (compare_result,
21769 gen_rtx_COMPARE (comp_mode, op0, op1)));
21770 }
21771
21772 /* Some kinds of FP comparisons need an OR operation;
21773 under flag_finite_math_only we don't bother. */
21774 if (FLOAT_MODE_P (mode)
21775 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21776 && !flag_finite_math_only
21777 && (code == LE || code == GE
21778 || code == UNEQ || code == LTGT
21779 || code == UNGT || code == UNLT))
21780 {
21781 enum rtx_code or1, or2;
21782 rtx or1_rtx, or2_rtx, compare2_rtx;
21783 rtx or_result = gen_reg_rtx (CCEQmode);
21784
21785 switch (code)
21786 {
21787 case LE: or1 = LT; or2 = EQ; break;
21788 case GE: or1 = GT; or2 = EQ; break;
21789 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21790 case LTGT: or1 = LT; or2 = GT; break;
21791 case UNGT: or1 = UNORDERED; or2 = GT; break;
21792 case UNLT: or1 = UNORDERED; or2 = LT; break;
21793 default: gcc_unreachable ();
21794 }
21795 validate_condition_mode (or1, comp_mode);
21796 validate_condition_mode (or2, comp_mode);
21797 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21798 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21799 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21800 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21801 const_true_rtx);
21802 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21803
21804 compare_result = or_result;
21805 code = EQ;
21806 }
21807
21808 validate_condition_mode (code, GET_MODE (compare_result));
21809
21810 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21811 }
21812
21813 \f
21814 /* Return the diagnostic message string if the binary operation OP is
21815 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21816
21817 static const char*
21818 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21819 const_tree type1,
21820 const_tree type2)
21821 {
21822 machine_mode mode1 = TYPE_MODE (type1);
21823 machine_mode mode2 = TYPE_MODE (type2);
21824
21825 /* For complex modes, use the inner type. */
21826 if (COMPLEX_MODE_P (mode1))
21827 mode1 = GET_MODE_INNER (mode1);
21828
21829 if (COMPLEX_MODE_P (mode2))
21830 mode2 = GET_MODE_INNER (mode2);
21831
21832 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21833 double to intermix unless -mfloat128-convert. */
21834 if (mode1 == mode2)
21835 return NULL;
21836
21837 if (!TARGET_FLOAT128_CVT)
21838 {
21839 if ((mode1 == KFmode && mode2 == IFmode)
21840 || (mode1 == IFmode && mode2 == KFmode))
21841 return N_("__float128 and __ibm128 cannot be used in the same "
21842 "expression");
21843
21844 if (TARGET_IEEEQUAD
21845 && ((mode1 == IFmode && mode2 == TFmode)
21846 || (mode1 == TFmode && mode2 == IFmode)))
21847 return N_("__ibm128 and long double cannot be used in the same "
21848 "expression");
21849
21850 if (!TARGET_IEEEQUAD
21851 && ((mode1 == KFmode && mode2 == TFmode)
21852 || (mode1 == TFmode && mode2 == KFmode)))
21853 return N_("__float128 and long double cannot be used in the same "
21854 "expression");
21855 }
21856
21857 return NULL;
21858 }
21859
21860 \f
21861 /* Expand floating point conversion to/from __float128 and __ibm128. */
21862
21863 void
21864 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
21865 {
21866 machine_mode dest_mode = GET_MODE (dest);
21867 machine_mode src_mode = GET_MODE (src);
21868 convert_optab cvt = unknown_optab;
21869 bool do_move = false;
21870 rtx libfunc = NULL_RTX;
21871 rtx dest2;
21872 typedef rtx (*rtx_2func_t) (rtx, rtx);
21873 rtx_2func_t hw_convert = (rtx_2func_t)0;
21874 size_t kf_or_tf;
21875
21876 struct hw_conv_t {
21877 rtx_2func_t from_df;
21878 rtx_2func_t from_sf;
21879 rtx_2func_t from_si_sign;
21880 rtx_2func_t from_si_uns;
21881 rtx_2func_t from_di_sign;
21882 rtx_2func_t from_di_uns;
21883 rtx_2func_t to_df;
21884 rtx_2func_t to_sf;
21885 rtx_2func_t to_si_sign;
21886 rtx_2func_t to_si_uns;
21887 rtx_2func_t to_di_sign;
21888 rtx_2func_t to_di_uns;
21889 } hw_conversions[2] = {
21890 /* convertions to/from KFmode */
21891 {
21892 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
21893 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
21894 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
21895 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
21896 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
21897 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
21898 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
21899 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
21900 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
21901 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
21902 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
21903 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
21904 },
21905
21906 /* convertions to/from TFmode */
21907 {
21908 gen_extenddftf2_hw, /* TFmode <- DFmode. */
21909 gen_extendsftf2_hw, /* TFmode <- SFmode. */
21910 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
21911 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
21912 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
21913 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
21914 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
21915 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
21916 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
21917 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
21918 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
21919 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
21920 },
21921 };
21922
21923 if (dest_mode == src_mode)
21924 gcc_unreachable ();
21925
21926 /* Eliminate memory operations. */
21927 if (MEM_P (src))
21928 src = force_reg (src_mode, src);
21929
21930 if (MEM_P (dest))
21931 {
21932 rtx tmp = gen_reg_rtx (dest_mode);
21933 rs6000_expand_float128_convert (tmp, src, unsigned_p);
21934 rs6000_emit_move (dest, tmp, dest_mode);
21935 return;
21936 }
21937
21938 /* Convert to IEEE 128-bit floating point. */
21939 if (FLOAT128_IEEE_P (dest_mode))
21940 {
21941 if (dest_mode == KFmode)
21942 kf_or_tf = 0;
21943 else if (dest_mode == TFmode)
21944 kf_or_tf = 1;
21945 else
21946 gcc_unreachable ();
21947
21948 switch (src_mode)
21949 {
21950 case E_DFmode:
21951 cvt = sext_optab;
21952 hw_convert = hw_conversions[kf_or_tf].from_df;
21953 break;
21954
21955 case E_SFmode:
21956 cvt = sext_optab;
21957 hw_convert = hw_conversions[kf_or_tf].from_sf;
21958 break;
21959
21960 case E_KFmode:
21961 case E_IFmode:
21962 case E_TFmode:
21963 if (FLOAT128_IBM_P (src_mode))
21964 cvt = sext_optab;
21965 else
21966 do_move = true;
21967 break;
21968
21969 case E_SImode:
21970 if (unsigned_p)
21971 {
21972 cvt = ufloat_optab;
21973 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
21974 }
21975 else
21976 {
21977 cvt = sfloat_optab;
21978 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
21979 }
21980 break;
21981
21982 case E_DImode:
21983 if (unsigned_p)
21984 {
21985 cvt = ufloat_optab;
21986 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
21987 }
21988 else
21989 {
21990 cvt = sfloat_optab;
21991 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
21992 }
21993 break;
21994
21995 default:
21996 gcc_unreachable ();
21997 }
21998 }
21999
22000 /* Convert from IEEE 128-bit floating point. */
22001 else if (FLOAT128_IEEE_P (src_mode))
22002 {
22003 if (src_mode == KFmode)
22004 kf_or_tf = 0;
22005 else if (src_mode == TFmode)
22006 kf_or_tf = 1;
22007 else
22008 gcc_unreachable ();
22009
22010 switch (dest_mode)
22011 {
22012 case E_DFmode:
22013 cvt = trunc_optab;
22014 hw_convert = hw_conversions[kf_or_tf].to_df;
22015 break;
22016
22017 case E_SFmode:
22018 cvt = trunc_optab;
22019 hw_convert = hw_conversions[kf_or_tf].to_sf;
22020 break;
22021
22022 case E_KFmode:
22023 case E_IFmode:
22024 case E_TFmode:
22025 if (FLOAT128_IBM_P (dest_mode))
22026 cvt = trunc_optab;
22027 else
22028 do_move = true;
22029 break;
22030
22031 case E_SImode:
22032 if (unsigned_p)
22033 {
22034 cvt = ufix_optab;
22035 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22036 }
22037 else
22038 {
22039 cvt = sfix_optab;
22040 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22041 }
22042 break;
22043
22044 case E_DImode:
22045 if (unsigned_p)
22046 {
22047 cvt = ufix_optab;
22048 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22049 }
22050 else
22051 {
22052 cvt = sfix_optab;
22053 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22054 }
22055 break;
22056
22057 default:
22058 gcc_unreachable ();
22059 }
22060 }
22061
22062 /* Both IBM format. */
22063 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22064 do_move = true;
22065
22066 else
22067 gcc_unreachable ();
22068
22069 /* Handle conversion between TFmode/KFmode/IFmode. */
22070 if (do_move)
22071 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22072
22073 /* Handle conversion if we have hardware support. */
22074 else if (TARGET_FLOAT128_HW && hw_convert)
22075 emit_insn ((hw_convert) (dest, src));
22076
22077 /* Call an external function to do the conversion. */
22078 else if (cvt != unknown_optab)
22079 {
22080 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22081 gcc_assert (libfunc != NULL_RTX);
22082
22083 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22084 src, src_mode);
22085
22086 gcc_assert (dest2 != NULL_RTX);
22087 if (!rtx_equal_p (dest, dest2))
22088 emit_move_insn (dest, dest2);
22089 }
22090
22091 else
22092 gcc_unreachable ();
22093
22094 return;
22095 }
22096
22097 \f
22098 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22099 can be used as that dest register. Return the dest register. */
22100
22101 rtx
22102 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22103 {
22104 if (op2 == const0_rtx)
22105 return op1;
22106
22107 if (GET_CODE (scratch) == SCRATCH)
22108 scratch = gen_reg_rtx (mode);
22109
22110 if (logical_operand (op2, mode))
22111 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22112 else
22113 emit_insn (gen_rtx_SET (scratch,
22114 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22115
22116 return scratch;
22117 }
22118
22119 void
22120 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22121 {
22122 rtx condition_rtx;
22123 machine_mode op_mode;
22124 enum rtx_code cond_code;
22125 rtx result = operands[0];
22126
22127 condition_rtx = rs6000_generate_compare (operands[1], mode);
22128 cond_code = GET_CODE (condition_rtx);
22129
22130 if (cond_code == NE
22131 || cond_code == GE || cond_code == LE
22132 || cond_code == GEU || cond_code == LEU
22133 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22134 {
22135 rtx not_result = gen_reg_rtx (CCEQmode);
22136 rtx not_op, rev_cond_rtx;
22137 machine_mode cc_mode;
22138
22139 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22140
22141 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22142 SImode, XEXP (condition_rtx, 0), const0_rtx);
22143 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22144 emit_insn (gen_rtx_SET (not_result, not_op));
22145 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22146 }
22147
22148 op_mode = GET_MODE (XEXP (operands[1], 0));
22149 if (op_mode == VOIDmode)
22150 op_mode = GET_MODE (XEXP (operands[1], 1));
22151
22152 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22153 {
22154 PUT_MODE (condition_rtx, DImode);
22155 convert_move (result, condition_rtx, 0);
22156 }
22157 else
22158 {
22159 PUT_MODE (condition_rtx, SImode);
22160 emit_insn (gen_rtx_SET (result, condition_rtx));
22161 }
22162 }
22163
22164 /* Emit a branch of kind CODE to location LOC. */
22165
22166 void
22167 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22168 {
22169 rtx condition_rtx, loc_ref;
22170
22171 condition_rtx = rs6000_generate_compare (operands[0], mode);
22172 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22173 emit_jump_insn (gen_rtx_SET (pc_rtx,
22174 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22175 loc_ref, pc_rtx)));
22176 }
22177
22178 /* Return the string to output a conditional branch to LABEL, which is
22179 the operand template of the label, or NULL if the branch is really a
22180 conditional return.
22181
22182 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22183 condition code register and its mode specifies what kind of
22184 comparison we made.
22185
22186 REVERSED is nonzero if we should reverse the sense of the comparison.
22187
22188 INSN is the insn. */
22189
22190 char *
22191 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22192 {
22193 static char string[64];
22194 enum rtx_code code = GET_CODE (op);
22195 rtx cc_reg = XEXP (op, 0);
22196 machine_mode mode = GET_MODE (cc_reg);
22197 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22198 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22199 int really_reversed = reversed ^ need_longbranch;
22200 char *s = string;
22201 const char *ccode;
22202 const char *pred;
22203 rtx note;
22204
22205 validate_condition_mode (code, mode);
22206
22207 /* Work out which way this really branches. We could use
22208 reverse_condition_maybe_unordered here always but this
22209 makes the resulting assembler clearer. */
22210 if (really_reversed)
22211 {
22212 /* Reversal of FP compares takes care -- an ordered compare
22213 becomes an unordered compare and vice versa. */
22214 if (mode == CCFPmode)
22215 code = reverse_condition_maybe_unordered (code);
22216 else
22217 code = reverse_condition (code);
22218 }
22219
22220 switch (code)
22221 {
22222 /* Not all of these are actually distinct opcodes, but
22223 we distinguish them for clarity of the resulting assembler. */
22224 case NE: case LTGT:
22225 ccode = "ne"; break;
22226 case EQ: case UNEQ:
22227 ccode = "eq"; break;
22228 case GE: case GEU:
22229 ccode = "ge"; break;
22230 case GT: case GTU: case UNGT:
22231 ccode = "gt"; break;
22232 case LE: case LEU:
22233 ccode = "le"; break;
22234 case LT: case LTU: case UNLT:
22235 ccode = "lt"; break;
22236 case UNORDERED: ccode = "un"; break;
22237 case ORDERED: ccode = "nu"; break;
22238 case UNGE: ccode = "nl"; break;
22239 case UNLE: ccode = "ng"; break;
22240 default:
22241 gcc_unreachable ();
22242 }
22243
22244 /* Maybe we have a guess as to how likely the branch is. */
22245 pred = "";
22246 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22247 if (note != NULL_RTX)
22248 {
22249 /* PROB is the difference from 50%. */
22250 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22251 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22252
22253 /* Only hint for highly probable/improbable branches on newer cpus when
22254 we have real profile data, as static prediction overrides processor
22255 dynamic prediction. For older cpus we may as well always hint, but
22256 assume not taken for branches that are very close to 50% as a
22257 mispredicted taken branch is more expensive than a
22258 mispredicted not-taken branch. */
22259 if (rs6000_always_hint
22260 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22261 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22262 && br_prob_note_reliable_p (note)))
22263 {
22264 if (abs (prob) > REG_BR_PROB_BASE / 20
22265 && ((prob > 0) ^ need_longbranch))
22266 pred = "+";
22267 else
22268 pred = "-";
22269 }
22270 }
22271
22272 if (label == NULL)
22273 s += sprintf (s, "b%slr%s ", ccode, pred);
22274 else
22275 s += sprintf (s, "b%s%s ", ccode, pred);
22276
22277 /* We need to escape any '%' characters in the reg_names string.
22278 Assume they'd only be the first character.... */
22279 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22280 *s++ = '%';
22281 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22282
22283 if (label != NULL)
22284 {
22285 /* If the branch distance was too far, we may have to use an
22286 unconditional branch to go the distance. */
22287 if (need_longbranch)
22288 s += sprintf (s, ",$+8\n\tb %s", label);
22289 else
22290 s += sprintf (s, ",%s", label);
22291 }
22292
22293 return string;
22294 }
22295
22296 /* Return insn for VSX or Altivec comparisons. */
22297
22298 static rtx
22299 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22300 {
22301 rtx mask;
22302 machine_mode mode = GET_MODE (op0);
22303
22304 switch (code)
22305 {
22306 default:
22307 break;
22308
22309 case GE:
22310 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22311 return NULL_RTX;
22312 /* FALLTHRU */
22313
22314 case EQ:
22315 case GT:
22316 case GTU:
22317 case ORDERED:
22318 case UNORDERED:
22319 case UNEQ:
22320 case LTGT:
22321 mask = gen_reg_rtx (mode);
22322 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22323 return mask;
22324 }
22325
22326 return NULL_RTX;
22327 }
22328
22329 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22330 DMODE is expected destination mode. This is a recursive function. */
22331
22332 static rtx
22333 rs6000_emit_vector_compare (enum rtx_code rcode,
22334 rtx op0, rtx op1,
22335 machine_mode dmode)
22336 {
22337 rtx mask;
22338 bool swap_operands = false;
22339 bool try_again = false;
22340
22341 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22342 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22343
22344 /* See if the comparison works as is. */
22345 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22346 if (mask)
22347 return mask;
22348
22349 switch (rcode)
22350 {
22351 case LT:
22352 rcode = GT;
22353 swap_operands = true;
22354 try_again = true;
22355 break;
22356 case LTU:
22357 rcode = GTU;
22358 swap_operands = true;
22359 try_again = true;
22360 break;
22361 case NE:
22362 case UNLE:
22363 case UNLT:
22364 case UNGE:
22365 case UNGT:
22366 /* Invert condition and try again.
22367 e.g., A != B becomes ~(A==B). */
22368 {
22369 enum rtx_code rev_code;
22370 enum insn_code nor_code;
22371 rtx mask2;
22372
22373 rev_code = reverse_condition_maybe_unordered (rcode);
22374 if (rev_code == UNKNOWN)
22375 return NULL_RTX;
22376
22377 nor_code = optab_handler (one_cmpl_optab, dmode);
22378 if (nor_code == CODE_FOR_nothing)
22379 return NULL_RTX;
22380
22381 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22382 if (!mask2)
22383 return NULL_RTX;
22384
22385 mask = gen_reg_rtx (dmode);
22386 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22387 return mask;
22388 }
22389 break;
22390 case GE:
22391 case GEU:
22392 case LE:
22393 case LEU:
22394 /* Try GT/GTU/LT/LTU OR EQ */
22395 {
22396 rtx c_rtx, eq_rtx;
22397 enum insn_code ior_code;
22398 enum rtx_code new_code;
22399
22400 switch (rcode)
22401 {
22402 case GE:
22403 new_code = GT;
22404 break;
22405
22406 case GEU:
22407 new_code = GTU;
22408 break;
22409
22410 case LE:
22411 new_code = LT;
22412 break;
22413
22414 case LEU:
22415 new_code = LTU;
22416 break;
22417
22418 default:
22419 gcc_unreachable ();
22420 }
22421
22422 ior_code = optab_handler (ior_optab, dmode);
22423 if (ior_code == CODE_FOR_nothing)
22424 return NULL_RTX;
22425
22426 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22427 if (!c_rtx)
22428 return NULL_RTX;
22429
22430 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22431 if (!eq_rtx)
22432 return NULL_RTX;
22433
22434 mask = gen_reg_rtx (dmode);
22435 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22436 return mask;
22437 }
22438 break;
22439 default:
22440 return NULL_RTX;
22441 }
22442
22443 if (try_again)
22444 {
22445 if (swap_operands)
22446 std::swap (op0, op1);
22447
22448 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22449 if (mask)
22450 return mask;
22451 }
22452
22453 /* You only get two chances. */
22454 return NULL_RTX;
22455 }
22456
22457 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22458 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22459 operands for the relation operation COND. */
22460
22461 int
22462 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22463 rtx cond, rtx cc_op0, rtx cc_op1)
22464 {
22465 machine_mode dest_mode = GET_MODE (dest);
22466 machine_mode mask_mode = GET_MODE (cc_op0);
22467 enum rtx_code rcode = GET_CODE (cond);
22468 machine_mode cc_mode = CCmode;
22469 rtx mask;
22470 rtx cond2;
22471 bool invert_move = false;
22472
22473 if (VECTOR_UNIT_NONE_P (dest_mode))
22474 return 0;
22475
22476 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22477 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22478
22479 switch (rcode)
22480 {
22481 /* Swap operands if we can, and fall back to doing the operation as
22482 specified, and doing a NOR to invert the test. */
22483 case NE:
22484 case UNLE:
22485 case UNLT:
22486 case UNGE:
22487 case UNGT:
22488 /* Invert condition and try again.
22489 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22490 invert_move = true;
22491 rcode = reverse_condition_maybe_unordered (rcode);
22492 if (rcode == UNKNOWN)
22493 return 0;
22494 break;
22495
22496 case GE:
22497 case LE:
22498 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22499 {
22500 /* Invert condition to avoid compound test. */
22501 invert_move = true;
22502 rcode = reverse_condition (rcode);
22503 }
22504 break;
22505
22506 case GTU:
22507 case GEU:
22508 case LTU:
22509 case LEU:
22510 /* Mark unsigned tests with CCUNSmode. */
22511 cc_mode = CCUNSmode;
22512
22513 /* Invert condition to avoid compound test if necessary. */
22514 if (rcode == GEU || rcode == LEU)
22515 {
22516 invert_move = true;
22517 rcode = reverse_condition (rcode);
22518 }
22519 break;
22520
22521 default:
22522 break;
22523 }
22524
22525 /* Get the vector mask for the given relational operations. */
22526 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22527
22528 if (!mask)
22529 return 0;
22530
22531 if (invert_move)
22532 std::swap (op_true, op_false);
22533
22534 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22535 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22536 && (GET_CODE (op_true) == CONST_VECTOR
22537 || GET_CODE (op_false) == CONST_VECTOR))
22538 {
22539 rtx constant_0 = CONST0_RTX (dest_mode);
22540 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22541
22542 if (op_true == constant_m1 && op_false == constant_0)
22543 {
22544 emit_move_insn (dest, mask);
22545 return 1;
22546 }
22547
22548 else if (op_true == constant_0 && op_false == constant_m1)
22549 {
22550 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22551 return 1;
22552 }
22553
22554 /* If we can't use the vector comparison directly, perhaps we can use
22555 the mask for the true or false fields, instead of loading up a
22556 constant. */
22557 if (op_true == constant_m1)
22558 op_true = mask;
22559
22560 if (op_false == constant_0)
22561 op_false = mask;
22562 }
22563
22564 if (!REG_P (op_true) && !SUBREG_P (op_true))
22565 op_true = force_reg (dest_mode, op_true);
22566
22567 if (!REG_P (op_false) && !SUBREG_P (op_false))
22568 op_false = force_reg (dest_mode, op_false);
22569
22570 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22571 CONST0_RTX (dest_mode));
22572 emit_insn (gen_rtx_SET (dest,
22573 gen_rtx_IF_THEN_ELSE (dest_mode,
22574 cond2,
22575 op_true,
22576 op_false)));
22577 return 1;
22578 }
22579
22580 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22581 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22582 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22583 hardware has no such operation. */
22584
22585 static int
22586 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22587 {
22588 enum rtx_code code = GET_CODE (op);
22589 rtx op0 = XEXP (op, 0);
22590 rtx op1 = XEXP (op, 1);
22591 machine_mode compare_mode = GET_MODE (op0);
22592 machine_mode result_mode = GET_MODE (dest);
22593 bool max_p = false;
22594
22595 if (result_mode != compare_mode)
22596 return 0;
22597
22598 if (code == GE || code == GT)
22599 max_p = true;
22600 else if (code == LE || code == LT)
22601 max_p = false;
22602 else
22603 return 0;
22604
22605 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22606 ;
22607
22608 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22609 max_p = !max_p;
22610
22611 else
22612 return 0;
22613
22614 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22615 return 1;
22616 }
22617
22618 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22619 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22620 operands of the last comparison is nonzero/true, FALSE_COND if it is
22621 zero/false. Return 0 if the hardware has no such operation. */
22622
22623 static int
22624 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22625 {
22626 enum rtx_code code = GET_CODE (op);
22627 rtx op0 = XEXP (op, 0);
22628 rtx op1 = XEXP (op, 1);
22629 machine_mode result_mode = GET_MODE (dest);
22630 rtx compare_rtx;
22631 rtx cmove_rtx;
22632 rtx clobber_rtx;
22633
22634 if (!can_create_pseudo_p ())
22635 return 0;
22636
22637 switch (code)
22638 {
22639 case EQ:
22640 case GE:
22641 case GT:
22642 break;
22643
22644 case NE:
22645 case LT:
22646 case LE:
22647 code = swap_condition (code);
22648 std::swap (op0, op1);
22649 break;
22650
22651 default:
22652 return 0;
22653 }
22654
22655 /* Generate: [(parallel [(set (dest)
22656 (if_then_else (op (cmp1) (cmp2))
22657 (true)
22658 (false)))
22659 (clobber (scratch))])]. */
22660
22661 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22662 cmove_rtx = gen_rtx_SET (dest,
22663 gen_rtx_IF_THEN_ELSE (result_mode,
22664 compare_rtx,
22665 true_cond,
22666 false_cond));
22667
22668 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22669 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22670 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22671
22672 return 1;
22673 }
22674
22675 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22676 operands of the last comparison is nonzero/true, FALSE_COND if it
22677 is zero/false. Return 0 if the hardware has no such operation. */
22678
22679 int
22680 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22681 {
22682 enum rtx_code code = GET_CODE (op);
22683 rtx op0 = XEXP (op, 0);
22684 rtx op1 = XEXP (op, 1);
22685 machine_mode compare_mode = GET_MODE (op0);
22686 machine_mode result_mode = GET_MODE (dest);
22687 rtx temp;
22688 bool is_against_zero;
22689
22690 /* These modes should always match. */
22691 if (GET_MODE (op1) != compare_mode
22692 /* In the isel case however, we can use a compare immediate, so
22693 op1 may be a small constant. */
22694 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22695 return 0;
22696 if (GET_MODE (true_cond) != result_mode)
22697 return 0;
22698 if (GET_MODE (false_cond) != result_mode)
22699 return 0;
22700
22701 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22702 if (TARGET_P9_MINMAX
22703 && (compare_mode == SFmode || compare_mode == DFmode)
22704 && (result_mode == SFmode || result_mode == DFmode))
22705 {
22706 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22707 return 1;
22708
22709 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22710 return 1;
22711 }
22712
22713 /* Don't allow using floating point comparisons for integer results for
22714 now. */
22715 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22716 return 0;
22717
22718 /* First, work out if the hardware can do this at all, or
22719 if it's too slow.... */
22720 if (!FLOAT_MODE_P (compare_mode))
22721 {
22722 if (TARGET_ISEL)
22723 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22724 return 0;
22725 }
22726
22727 is_against_zero = op1 == CONST0_RTX (compare_mode);
22728
22729 /* A floating-point subtract might overflow, underflow, or produce
22730 an inexact result, thus changing the floating-point flags, so it
22731 can't be generated if we care about that. It's safe if one side
22732 of the construct is zero, since then no subtract will be
22733 generated. */
22734 if (SCALAR_FLOAT_MODE_P (compare_mode)
22735 && flag_trapping_math && ! is_against_zero)
22736 return 0;
22737
22738 /* Eliminate half of the comparisons by switching operands, this
22739 makes the remaining code simpler. */
22740 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22741 || code == LTGT || code == LT || code == UNLE)
22742 {
22743 code = reverse_condition_maybe_unordered (code);
22744 temp = true_cond;
22745 true_cond = false_cond;
22746 false_cond = temp;
22747 }
22748
22749 /* UNEQ and LTGT take four instructions for a comparison with zero,
22750 it'll probably be faster to use a branch here too. */
22751 if (code == UNEQ && HONOR_NANS (compare_mode))
22752 return 0;
22753
22754 /* We're going to try to implement comparisons by performing
22755 a subtract, then comparing against zero. Unfortunately,
22756 Inf - Inf is NaN which is not zero, and so if we don't
22757 know that the operand is finite and the comparison
22758 would treat EQ different to UNORDERED, we can't do it. */
22759 if (HONOR_INFINITIES (compare_mode)
22760 && code != GT && code != UNGE
22761 && (!CONST_DOUBLE_P (op1)
22762 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22763 /* Constructs of the form (a OP b ? a : b) are safe. */
22764 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22765 || (! rtx_equal_p (op0, true_cond)
22766 && ! rtx_equal_p (op1, true_cond))))
22767 return 0;
22768
22769 /* At this point we know we can use fsel. */
22770
22771 /* Reduce the comparison to a comparison against zero. */
22772 if (! is_against_zero)
22773 {
22774 temp = gen_reg_rtx (compare_mode);
22775 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22776 op0 = temp;
22777 op1 = CONST0_RTX (compare_mode);
22778 }
22779
22780 /* If we don't care about NaNs we can reduce some of the comparisons
22781 down to faster ones. */
22782 if (! HONOR_NANS (compare_mode))
22783 switch (code)
22784 {
22785 case GT:
22786 code = LE;
22787 temp = true_cond;
22788 true_cond = false_cond;
22789 false_cond = temp;
22790 break;
22791 case UNGE:
22792 code = GE;
22793 break;
22794 case UNEQ:
22795 code = EQ;
22796 break;
22797 default:
22798 break;
22799 }
22800
22801 /* Now, reduce everything down to a GE. */
22802 switch (code)
22803 {
22804 case GE:
22805 break;
22806
22807 case LE:
22808 temp = gen_reg_rtx (compare_mode);
22809 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22810 op0 = temp;
22811 break;
22812
22813 case ORDERED:
22814 temp = gen_reg_rtx (compare_mode);
22815 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
22816 op0 = temp;
22817 break;
22818
22819 case EQ:
22820 temp = gen_reg_rtx (compare_mode);
22821 emit_insn (gen_rtx_SET (temp,
22822 gen_rtx_NEG (compare_mode,
22823 gen_rtx_ABS (compare_mode, op0))));
22824 op0 = temp;
22825 break;
22826
22827 case UNGE:
22828 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22829 temp = gen_reg_rtx (result_mode);
22830 emit_insn (gen_rtx_SET (temp,
22831 gen_rtx_IF_THEN_ELSE (result_mode,
22832 gen_rtx_GE (VOIDmode,
22833 op0, op1),
22834 true_cond, false_cond)));
22835 false_cond = true_cond;
22836 true_cond = temp;
22837
22838 temp = gen_reg_rtx (compare_mode);
22839 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22840 op0 = temp;
22841 break;
22842
22843 case GT:
22844 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22845 temp = gen_reg_rtx (result_mode);
22846 emit_insn (gen_rtx_SET (temp,
22847 gen_rtx_IF_THEN_ELSE (result_mode,
22848 gen_rtx_GE (VOIDmode,
22849 op0, op1),
22850 true_cond, false_cond)));
22851 true_cond = false_cond;
22852 false_cond = temp;
22853
22854 temp = gen_reg_rtx (compare_mode);
22855 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22856 op0 = temp;
22857 break;
22858
22859 default:
22860 gcc_unreachable ();
22861 }
22862
22863 emit_insn (gen_rtx_SET (dest,
22864 gen_rtx_IF_THEN_ELSE (result_mode,
22865 gen_rtx_GE (VOIDmode,
22866 op0, op1),
22867 true_cond, false_cond)));
22868 return 1;
22869 }
22870
22871 /* Same as above, but for ints (isel). */
22872
22873 int
22874 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22875 {
22876 rtx condition_rtx, cr;
22877 machine_mode mode = GET_MODE (dest);
22878 enum rtx_code cond_code;
22879 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
22880 bool signedp;
22881
22882 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
22883 return 0;
22884
22885 /* We still have to do the compare, because isel doesn't do a
22886 compare, it just looks at the CRx bits set by a previous compare
22887 instruction. */
22888 condition_rtx = rs6000_generate_compare (op, mode);
22889 cond_code = GET_CODE (condition_rtx);
22890 cr = XEXP (condition_rtx, 0);
22891 signedp = GET_MODE (cr) == CCmode;
22892
22893 isel_func = (mode == SImode
22894 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
22895 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
22896
22897 switch (cond_code)
22898 {
22899 case LT: case GT: case LTU: case GTU: case EQ:
22900 /* isel handles these directly. */
22901 break;
22902
22903 default:
22904 /* We need to swap the sense of the comparison. */
22905 {
22906 std::swap (false_cond, true_cond);
22907 PUT_CODE (condition_rtx, reverse_condition (cond_code));
22908 }
22909 break;
22910 }
22911
22912 false_cond = force_reg (mode, false_cond);
22913 if (true_cond != const0_rtx)
22914 true_cond = force_reg (mode, true_cond);
22915
22916 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
22917
22918 return 1;
22919 }
22920
22921 void
22922 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
22923 {
22924 machine_mode mode = GET_MODE (op0);
22925 enum rtx_code c;
22926 rtx target;
22927
22928 /* VSX/altivec have direct min/max insns. */
22929 if ((code == SMAX || code == SMIN)
22930 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
22931 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
22932 {
22933 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
22934 return;
22935 }
22936
22937 if (code == SMAX || code == SMIN)
22938 c = GE;
22939 else
22940 c = GEU;
22941
22942 if (code == SMAX || code == UMAX)
22943 target = emit_conditional_move (dest, c, op0, op1, mode,
22944 op0, op1, mode, 0);
22945 else
22946 target = emit_conditional_move (dest, c, op0, op1, mode,
22947 op1, op0, mode, 0);
22948 gcc_assert (target);
22949 if (target != dest)
22950 emit_move_insn (dest, target);
22951 }
22952
22953 /* A subroutine of the atomic operation splitters. Jump to LABEL if
22954 COND is true. Mark the jump as unlikely to be taken. */
22955
22956 static void
22957 emit_unlikely_jump (rtx cond, rtx label)
22958 {
22959 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
22960 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
22961 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
22962 }
22963
22964 /* A subroutine of the atomic operation splitters. Emit a load-locked
22965 instruction in MODE. For QI/HImode, possibly use a pattern than includes
22966 the zero_extend operation. */
22967
22968 static void
22969 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
22970 {
22971 rtx (*fn) (rtx, rtx) = NULL;
22972
22973 switch (mode)
22974 {
22975 case E_QImode:
22976 fn = gen_load_lockedqi;
22977 break;
22978 case E_HImode:
22979 fn = gen_load_lockedhi;
22980 break;
22981 case E_SImode:
22982 if (GET_MODE (mem) == QImode)
22983 fn = gen_load_lockedqi_si;
22984 else if (GET_MODE (mem) == HImode)
22985 fn = gen_load_lockedhi_si;
22986 else
22987 fn = gen_load_lockedsi;
22988 break;
22989 case E_DImode:
22990 fn = gen_load_lockeddi;
22991 break;
22992 case E_TImode:
22993 fn = gen_load_lockedti;
22994 break;
22995 default:
22996 gcc_unreachable ();
22997 }
22998 emit_insn (fn (reg, mem));
22999 }
23000
23001 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23002 instruction in MODE. */
23003
23004 static void
23005 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23006 {
23007 rtx (*fn) (rtx, rtx, rtx) = NULL;
23008
23009 switch (mode)
23010 {
23011 case E_QImode:
23012 fn = gen_store_conditionalqi;
23013 break;
23014 case E_HImode:
23015 fn = gen_store_conditionalhi;
23016 break;
23017 case E_SImode:
23018 fn = gen_store_conditionalsi;
23019 break;
23020 case E_DImode:
23021 fn = gen_store_conditionaldi;
23022 break;
23023 case E_TImode:
23024 fn = gen_store_conditionalti;
23025 break;
23026 default:
23027 gcc_unreachable ();
23028 }
23029
23030 /* Emit sync before stwcx. to address PPC405 Erratum. */
23031 if (PPC405_ERRATUM77)
23032 emit_insn (gen_hwsync ());
23033
23034 emit_insn (fn (res, mem, val));
23035 }
23036
23037 /* Expand barriers before and after a load_locked/store_cond sequence. */
23038
23039 static rtx
23040 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23041 {
23042 rtx addr = XEXP (mem, 0);
23043
23044 if (!legitimate_indirect_address_p (addr, reload_completed)
23045 && !legitimate_indexed_address_p (addr, reload_completed))
23046 {
23047 addr = force_reg (Pmode, addr);
23048 mem = replace_equiv_address_nv (mem, addr);
23049 }
23050
23051 switch (model)
23052 {
23053 case MEMMODEL_RELAXED:
23054 case MEMMODEL_CONSUME:
23055 case MEMMODEL_ACQUIRE:
23056 break;
23057 case MEMMODEL_RELEASE:
23058 case MEMMODEL_ACQ_REL:
23059 emit_insn (gen_lwsync ());
23060 break;
23061 case MEMMODEL_SEQ_CST:
23062 emit_insn (gen_hwsync ());
23063 break;
23064 default:
23065 gcc_unreachable ();
23066 }
23067 return mem;
23068 }
23069
23070 static void
23071 rs6000_post_atomic_barrier (enum memmodel model)
23072 {
23073 switch (model)
23074 {
23075 case MEMMODEL_RELAXED:
23076 case MEMMODEL_CONSUME:
23077 case MEMMODEL_RELEASE:
23078 break;
23079 case MEMMODEL_ACQUIRE:
23080 case MEMMODEL_ACQ_REL:
23081 case MEMMODEL_SEQ_CST:
23082 emit_insn (gen_isync ());
23083 break;
23084 default:
23085 gcc_unreachable ();
23086 }
23087 }
23088
23089 /* A subroutine of the various atomic expanders. For sub-word operations,
23090 we must adjust things to operate on SImode. Given the original MEM,
23091 return a new aligned memory. Also build and return the quantities by
23092 which to shift and mask. */
23093
23094 static rtx
23095 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23096 {
23097 rtx addr, align, shift, mask, mem;
23098 HOST_WIDE_INT shift_mask;
23099 machine_mode mode = GET_MODE (orig_mem);
23100
23101 /* For smaller modes, we have to implement this via SImode. */
23102 shift_mask = (mode == QImode ? 0x18 : 0x10);
23103
23104 addr = XEXP (orig_mem, 0);
23105 addr = force_reg (GET_MODE (addr), addr);
23106
23107 /* Aligned memory containing subword. Generate a new memory. We
23108 do not want any of the existing MEM_ATTR data, as we're now
23109 accessing memory outside the original object. */
23110 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23111 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23112 mem = gen_rtx_MEM (SImode, align);
23113 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23114 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23115 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23116
23117 /* Shift amount for subword relative to aligned word. */
23118 shift = gen_reg_rtx (SImode);
23119 addr = gen_lowpart (SImode, addr);
23120 rtx tmp = gen_reg_rtx (SImode);
23121 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23122 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23123 if (BYTES_BIG_ENDIAN)
23124 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23125 shift, 1, OPTAB_LIB_WIDEN);
23126 *pshift = shift;
23127
23128 /* Mask for insertion. */
23129 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23130 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23131 *pmask = mask;
23132
23133 return mem;
23134 }
23135
23136 /* A subroutine of the various atomic expanders. For sub-word operands,
23137 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23138
23139 static rtx
23140 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23141 {
23142 rtx x;
23143
23144 x = gen_reg_rtx (SImode);
23145 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23146 gen_rtx_NOT (SImode, mask),
23147 oldval)));
23148
23149 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23150
23151 return x;
23152 }
23153
23154 /* A subroutine of the various atomic expanders. For sub-word operands,
23155 extract WIDE to NARROW via SHIFT. */
23156
23157 static void
23158 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23159 {
23160 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23161 wide, 1, OPTAB_LIB_WIDEN);
23162 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23163 }
23164
23165 /* Expand an atomic compare and swap operation. */
23166
23167 void
23168 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23169 {
23170 rtx boolval, retval, mem, oldval, newval, cond;
23171 rtx label1, label2, x, mask, shift;
23172 machine_mode mode, orig_mode;
23173 enum memmodel mod_s, mod_f;
23174 bool is_weak;
23175
23176 boolval = operands[0];
23177 retval = operands[1];
23178 mem = operands[2];
23179 oldval = operands[3];
23180 newval = operands[4];
23181 is_weak = (INTVAL (operands[5]) != 0);
23182 mod_s = memmodel_base (INTVAL (operands[6]));
23183 mod_f = memmodel_base (INTVAL (operands[7]));
23184 orig_mode = mode = GET_MODE (mem);
23185
23186 mask = shift = NULL_RTX;
23187 if (mode == QImode || mode == HImode)
23188 {
23189 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23190 lwarx and shift/mask operations. With power8, we need to do the
23191 comparison in SImode, but the store is still done in QI/HImode. */
23192 oldval = convert_modes (SImode, mode, oldval, 1);
23193
23194 if (!TARGET_SYNC_HI_QI)
23195 {
23196 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23197
23198 /* Shift and mask OLDVAL into position with the word. */
23199 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23200 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23201
23202 /* Shift and mask NEWVAL into position within the word. */
23203 newval = convert_modes (SImode, mode, newval, 1);
23204 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23205 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23206 }
23207
23208 /* Prepare to adjust the return value. */
23209 retval = gen_reg_rtx (SImode);
23210 mode = SImode;
23211 }
23212 else if (reg_overlap_mentioned_p (retval, oldval))
23213 oldval = copy_to_reg (oldval);
23214
23215 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23216 oldval = copy_to_mode_reg (mode, oldval);
23217
23218 if (reg_overlap_mentioned_p (retval, newval))
23219 newval = copy_to_reg (newval);
23220
23221 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23222
23223 label1 = NULL_RTX;
23224 if (!is_weak)
23225 {
23226 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23227 emit_label (XEXP (label1, 0));
23228 }
23229 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23230
23231 emit_load_locked (mode, retval, mem);
23232
23233 x = retval;
23234 if (mask)
23235 x = expand_simple_binop (SImode, AND, retval, mask,
23236 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23237
23238 cond = gen_reg_rtx (CCmode);
23239 /* If we have TImode, synthesize a comparison. */
23240 if (mode != TImode)
23241 x = gen_rtx_COMPARE (CCmode, x, oldval);
23242 else
23243 {
23244 rtx xor1_result = gen_reg_rtx (DImode);
23245 rtx xor2_result = gen_reg_rtx (DImode);
23246 rtx or_result = gen_reg_rtx (DImode);
23247 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23248 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23249 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23250 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23251
23252 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23253 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23254 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23255 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23256 }
23257
23258 emit_insn (gen_rtx_SET (cond, x));
23259
23260 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23261 emit_unlikely_jump (x, label2);
23262
23263 x = newval;
23264 if (mask)
23265 x = rs6000_mask_atomic_subword (retval, newval, mask);
23266
23267 emit_store_conditional (orig_mode, cond, mem, x);
23268
23269 if (!is_weak)
23270 {
23271 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23272 emit_unlikely_jump (x, label1);
23273 }
23274
23275 if (!is_mm_relaxed (mod_f))
23276 emit_label (XEXP (label2, 0));
23277
23278 rs6000_post_atomic_barrier (mod_s);
23279
23280 if (is_mm_relaxed (mod_f))
23281 emit_label (XEXP (label2, 0));
23282
23283 if (shift)
23284 rs6000_finish_atomic_subword (operands[1], retval, shift);
23285 else if (mode != GET_MODE (operands[1]))
23286 convert_move (operands[1], retval, 1);
23287
23288 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23289 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23290 emit_insn (gen_rtx_SET (boolval, x));
23291 }
23292
23293 /* Expand an atomic exchange operation. */
23294
23295 void
23296 rs6000_expand_atomic_exchange (rtx operands[])
23297 {
23298 rtx retval, mem, val, cond;
23299 machine_mode mode;
23300 enum memmodel model;
23301 rtx label, x, mask, shift;
23302
23303 retval = operands[0];
23304 mem = operands[1];
23305 val = operands[2];
23306 model = memmodel_base (INTVAL (operands[3]));
23307 mode = GET_MODE (mem);
23308
23309 mask = shift = NULL_RTX;
23310 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23311 {
23312 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23313
23314 /* Shift and mask VAL into position with the word. */
23315 val = convert_modes (SImode, mode, val, 1);
23316 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23317 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23318
23319 /* Prepare to adjust the return value. */
23320 retval = gen_reg_rtx (SImode);
23321 mode = SImode;
23322 }
23323
23324 mem = rs6000_pre_atomic_barrier (mem, model);
23325
23326 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23327 emit_label (XEXP (label, 0));
23328
23329 emit_load_locked (mode, retval, mem);
23330
23331 x = val;
23332 if (mask)
23333 x = rs6000_mask_atomic_subword (retval, val, mask);
23334
23335 cond = gen_reg_rtx (CCmode);
23336 emit_store_conditional (mode, cond, mem, x);
23337
23338 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23339 emit_unlikely_jump (x, label);
23340
23341 rs6000_post_atomic_barrier (model);
23342
23343 if (shift)
23344 rs6000_finish_atomic_subword (operands[0], retval, shift);
23345 }
23346
23347 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23348 to perform. MEM is the memory on which to operate. VAL is the second
23349 operand of the binary operator. BEFORE and AFTER are optional locations to
23350 return the value of MEM either before of after the operation. MODEL_RTX
23351 is a CONST_INT containing the memory model to use. */
23352
23353 void
23354 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23355 rtx orig_before, rtx orig_after, rtx model_rtx)
23356 {
23357 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23358 machine_mode mode = GET_MODE (mem);
23359 machine_mode store_mode = mode;
23360 rtx label, x, cond, mask, shift;
23361 rtx before = orig_before, after = orig_after;
23362
23363 mask = shift = NULL_RTX;
23364 /* On power8, we want to use SImode for the operation. On previous systems,
23365 use the operation in a subword and shift/mask to get the proper byte or
23366 halfword. */
23367 if (mode == QImode || mode == HImode)
23368 {
23369 if (TARGET_SYNC_HI_QI)
23370 {
23371 val = convert_modes (SImode, mode, val, 1);
23372
23373 /* Prepare to adjust the return value. */
23374 before = gen_reg_rtx (SImode);
23375 if (after)
23376 after = gen_reg_rtx (SImode);
23377 mode = SImode;
23378 }
23379 else
23380 {
23381 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23382
23383 /* Shift and mask VAL into position with the word. */
23384 val = convert_modes (SImode, mode, val, 1);
23385 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23386 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23387
23388 switch (code)
23389 {
23390 case IOR:
23391 case XOR:
23392 /* We've already zero-extended VAL. That is sufficient to
23393 make certain that it does not affect other bits. */
23394 mask = NULL;
23395 break;
23396
23397 case AND:
23398 /* If we make certain that all of the other bits in VAL are
23399 set, that will be sufficient to not affect other bits. */
23400 x = gen_rtx_NOT (SImode, mask);
23401 x = gen_rtx_IOR (SImode, x, val);
23402 emit_insn (gen_rtx_SET (val, x));
23403 mask = NULL;
23404 break;
23405
23406 case NOT:
23407 case PLUS:
23408 case MINUS:
23409 /* These will all affect bits outside the field and need
23410 adjustment via MASK within the loop. */
23411 break;
23412
23413 default:
23414 gcc_unreachable ();
23415 }
23416
23417 /* Prepare to adjust the return value. */
23418 before = gen_reg_rtx (SImode);
23419 if (after)
23420 after = gen_reg_rtx (SImode);
23421 store_mode = mode = SImode;
23422 }
23423 }
23424
23425 mem = rs6000_pre_atomic_barrier (mem, model);
23426
23427 label = gen_label_rtx ();
23428 emit_label (label);
23429 label = gen_rtx_LABEL_REF (VOIDmode, label);
23430
23431 if (before == NULL_RTX)
23432 before = gen_reg_rtx (mode);
23433
23434 emit_load_locked (mode, before, mem);
23435
23436 if (code == NOT)
23437 {
23438 x = expand_simple_binop (mode, AND, before, val,
23439 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23440 after = expand_simple_unop (mode, NOT, x, after, 1);
23441 }
23442 else
23443 {
23444 after = expand_simple_binop (mode, code, before, val,
23445 after, 1, OPTAB_LIB_WIDEN);
23446 }
23447
23448 x = after;
23449 if (mask)
23450 {
23451 x = expand_simple_binop (SImode, AND, after, mask,
23452 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23453 x = rs6000_mask_atomic_subword (before, x, mask);
23454 }
23455 else if (store_mode != mode)
23456 x = convert_modes (store_mode, mode, x, 1);
23457
23458 cond = gen_reg_rtx (CCmode);
23459 emit_store_conditional (store_mode, cond, mem, x);
23460
23461 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23462 emit_unlikely_jump (x, label);
23463
23464 rs6000_post_atomic_barrier (model);
23465
23466 if (shift)
23467 {
23468 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23469 then do the calcuations in a SImode register. */
23470 if (orig_before)
23471 rs6000_finish_atomic_subword (orig_before, before, shift);
23472 if (orig_after)
23473 rs6000_finish_atomic_subword (orig_after, after, shift);
23474 }
23475 else if (store_mode != mode)
23476 {
23477 /* QImode/HImode on machines with lbarx/lharx where we do the native
23478 operation and then do the calcuations in a SImode register. */
23479 if (orig_before)
23480 convert_move (orig_before, before, 1);
23481 if (orig_after)
23482 convert_move (orig_after, after, 1);
23483 }
23484 else if (orig_after && after != orig_after)
23485 emit_move_insn (orig_after, after);
23486 }
23487
23488 /* Emit instructions to move SRC to DST. Called by splitters for
23489 multi-register moves. It will emit at most one instruction for
23490 each register that is accessed; that is, it won't emit li/lis pairs
23491 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23492 register. */
23493
23494 void
23495 rs6000_split_multireg_move (rtx dst, rtx src)
23496 {
23497 /* The register number of the first register being moved. */
23498 int reg;
23499 /* The mode that is to be moved. */
23500 machine_mode mode;
23501 /* The mode that the move is being done in, and its size. */
23502 machine_mode reg_mode;
23503 int reg_mode_size;
23504 /* The number of registers that will be moved. */
23505 int nregs;
23506
23507 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23508 mode = GET_MODE (dst);
23509 nregs = hard_regno_nregs (reg, mode);
23510 if (FP_REGNO_P (reg))
23511 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23512 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23513 else if (ALTIVEC_REGNO_P (reg))
23514 reg_mode = V16QImode;
23515 else
23516 reg_mode = word_mode;
23517 reg_mode_size = GET_MODE_SIZE (reg_mode);
23518
23519 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23520
23521 /* TDmode residing in FP registers is special, since the ISA requires that
23522 the lower-numbered word of a register pair is always the most significant
23523 word, even in little-endian mode. This does not match the usual subreg
23524 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23525 the appropriate constituent registers "by hand" in little-endian mode.
23526
23527 Note we do not need to check for destructive overlap here since TDmode
23528 can only reside in even/odd register pairs. */
23529 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23530 {
23531 rtx p_src, p_dst;
23532 int i;
23533
23534 for (i = 0; i < nregs; i++)
23535 {
23536 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23537 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23538 else
23539 p_src = simplify_gen_subreg (reg_mode, src, mode,
23540 i * reg_mode_size);
23541
23542 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23543 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23544 else
23545 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23546 i * reg_mode_size);
23547
23548 emit_insn (gen_rtx_SET (p_dst, p_src));
23549 }
23550
23551 return;
23552 }
23553
23554 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23555 {
23556 /* Move register range backwards, if we might have destructive
23557 overlap. */
23558 int i;
23559 for (i = nregs - 1; i >= 0; i--)
23560 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23561 i * reg_mode_size),
23562 simplify_gen_subreg (reg_mode, src, mode,
23563 i * reg_mode_size)));
23564 }
23565 else
23566 {
23567 int i;
23568 int j = -1;
23569 bool used_update = false;
23570 rtx restore_basereg = NULL_RTX;
23571
23572 if (MEM_P (src) && INT_REGNO_P (reg))
23573 {
23574 rtx breg;
23575
23576 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23577 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23578 {
23579 rtx delta_rtx;
23580 breg = XEXP (XEXP (src, 0), 0);
23581 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23582 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23583 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23584 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23585 src = replace_equiv_address (src, breg);
23586 }
23587 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23588 {
23589 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23590 {
23591 rtx basereg = XEXP (XEXP (src, 0), 0);
23592 if (TARGET_UPDATE)
23593 {
23594 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23595 emit_insn (gen_rtx_SET (ndst,
23596 gen_rtx_MEM (reg_mode,
23597 XEXP (src, 0))));
23598 used_update = true;
23599 }
23600 else
23601 emit_insn (gen_rtx_SET (basereg,
23602 XEXP (XEXP (src, 0), 1)));
23603 src = replace_equiv_address (src, basereg);
23604 }
23605 else
23606 {
23607 rtx basereg = gen_rtx_REG (Pmode, reg);
23608 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23609 src = replace_equiv_address (src, basereg);
23610 }
23611 }
23612
23613 breg = XEXP (src, 0);
23614 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23615 breg = XEXP (breg, 0);
23616
23617 /* If the base register we are using to address memory is
23618 also a destination reg, then change that register last. */
23619 if (REG_P (breg)
23620 && REGNO (breg) >= REGNO (dst)
23621 && REGNO (breg) < REGNO (dst) + nregs)
23622 j = REGNO (breg) - REGNO (dst);
23623 }
23624 else if (MEM_P (dst) && INT_REGNO_P (reg))
23625 {
23626 rtx breg;
23627
23628 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23629 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23630 {
23631 rtx delta_rtx;
23632 breg = XEXP (XEXP (dst, 0), 0);
23633 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23634 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23635 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23636
23637 /* We have to update the breg before doing the store.
23638 Use store with update, if available. */
23639
23640 if (TARGET_UPDATE)
23641 {
23642 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23643 emit_insn (TARGET_32BIT
23644 ? (TARGET_POWERPC64
23645 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23646 : gen_movsi_si_update (breg, breg, delta_rtx, nsrc))
23647 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23648 used_update = true;
23649 }
23650 else
23651 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23652 dst = replace_equiv_address (dst, breg);
23653 }
23654 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23655 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23656 {
23657 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23658 {
23659 rtx basereg = XEXP (XEXP (dst, 0), 0);
23660 if (TARGET_UPDATE)
23661 {
23662 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23663 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23664 XEXP (dst, 0)),
23665 nsrc));
23666 used_update = true;
23667 }
23668 else
23669 emit_insn (gen_rtx_SET (basereg,
23670 XEXP (XEXP (dst, 0), 1)));
23671 dst = replace_equiv_address (dst, basereg);
23672 }
23673 else
23674 {
23675 rtx basereg = XEXP (XEXP (dst, 0), 0);
23676 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23677 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23678 && REG_P (basereg)
23679 && REG_P (offsetreg)
23680 && REGNO (basereg) != REGNO (offsetreg));
23681 if (REGNO (basereg) == 0)
23682 {
23683 rtx tmp = offsetreg;
23684 offsetreg = basereg;
23685 basereg = tmp;
23686 }
23687 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23688 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23689 dst = replace_equiv_address (dst, basereg);
23690 }
23691 }
23692 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23693 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23694 }
23695
23696 for (i = 0; i < nregs; i++)
23697 {
23698 /* Calculate index to next subword. */
23699 ++j;
23700 if (j == nregs)
23701 j = 0;
23702
23703 /* If compiler already emitted move of first word by
23704 store with update, no need to do anything. */
23705 if (j == 0 && used_update)
23706 continue;
23707
23708 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23709 j * reg_mode_size),
23710 simplify_gen_subreg (reg_mode, src, mode,
23711 j * reg_mode_size)));
23712 }
23713 if (restore_basereg != NULL_RTX)
23714 emit_insn (restore_basereg);
23715 }
23716 }
23717
23718 \f
23719 /* This page contains routines that are used to determine what the
23720 function prologue and epilogue code will do and write them out. */
23721
23722 /* Determine whether the REG is really used. */
23723
23724 static bool
23725 save_reg_p (int reg)
23726 {
23727 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23728 {
23729 /* When calling eh_return, we must return true for all the cases
23730 where conditional_register_usage marks the PIC offset reg
23731 call used or fixed. */
23732 if (crtl->calls_eh_return
23733 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
23734 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
23735 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
23736 return true;
23737
23738 /* We need to mark the PIC offset register live for the same
23739 conditions as it is set up in rs6000_emit_prologue, or
23740 otherwise it won't be saved before we clobber it. */
23741 if (TARGET_TOC && TARGET_MINIMAL_TOC
23742 && !constant_pool_empty_p ())
23743 return true;
23744
23745 if (DEFAULT_ABI == ABI_V4
23746 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
23747 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
23748 return true;
23749
23750 if (DEFAULT_ABI == ABI_DARWIN
23751 && flag_pic && crtl->uses_pic_offset_table)
23752 return true;
23753 }
23754
23755 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
23756 }
23757
23758 /* Return the first fixed-point register that is required to be
23759 saved. 32 if none. */
23760
23761 int
23762 first_reg_to_save (void)
23763 {
23764 int first_reg;
23765
23766 /* Find lowest numbered live register. */
23767 for (first_reg = 13; first_reg <= 31; first_reg++)
23768 if (save_reg_p (first_reg))
23769 break;
23770
23771 return first_reg;
23772 }
23773
23774 /* Similar, for FP regs. */
23775
23776 int
23777 first_fp_reg_to_save (void)
23778 {
23779 int first_reg;
23780
23781 /* Find lowest numbered live register. */
23782 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
23783 if (save_reg_p (first_reg))
23784 break;
23785
23786 return first_reg;
23787 }
23788
23789 /* Similar, for AltiVec regs. */
23790
23791 static int
23792 first_altivec_reg_to_save (void)
23793 {
23794 int i;
23795
23796 /* Stack frame remains as is unless we are in AltiVec ABI. */
23797 if (! TARGET_ALTIVEC_ABI)
23798 return LAST_ALTIVEC_REGNO + 1;
23799
23800 /* On Darwin, the unwind routines are compiled without
23801 TARGET_ALTIVEC, and use save_world to save/restore the
23802 altivec registers when necessary. */
23803 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23804 && ! TARGET_ALTIVEC)
23805 return FIRST_ALTIVEC_REGNO + 20;
23806
23807 /* Find lowest numbered live register. */
23808 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
23809 if (save_reg_p (i))
23810 break;
23811
23812 return i;
23813 }
23814
23815 /* Return a 32-bit mask of the AltiVec registers we need to set in
23816 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
23817 the 32-bit word is 0. */
23818
23819 static unsigned int
23820 compute_vrsave_mask (void)
23821 {
23822 unsigned int i, mask = 0;
23823
23824 /* On Darwin, the unwind routines are compiled without
23825 TARGET_ALTIVEC, and use save_world to save/restore the
23826 call-saved altivec registers when necessary. */
23827 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23828 && ! TARGET_ALTIVEC)
23829 mask |= 0xFFF;
23830
23831 /* First, find out if we use _any_ altivec registers. */
23832 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
23833 if (df_regs_ever_live_p (i))
23834 mask |= ALTIVEC_REG_BIT (i);
23835
23836 if (mask == 0)
23837 return mask;
23838
23839 /* Next, remove the argument registers from the set. These must
23840 be in the VRSAVE mask set by the caller, so we don't need to add
23841 them in again. More importantly, the mask we compute here is
23842 used to generate CLOBBERs in the set_vrsave insn, and we do not
23843 wish the argument registers to die. */
23844 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
23845 mask &= ~ALTIVEC_REG_BIT (i);
23846
23847 /* Similarly, remove the return value from the set. */
23848 {
23849 bool yes = false;
23850 diddle_return_value (is_altivec_return_reg, &yes);
23851 if (yes)
23852 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
23853 }
23854
23855 return mask;
23856 }
23857
23858 /* For a very restricted set of circumstances, we can cut down the
23859 size of prologues/epilogues by calling our own save/restore-the-world
23860 routines. */
23861
23862 static void
23863 compute_save_world_info (rs6000_stack_t *info)
23864 {
23865 info->world_save_p = 1;
23866 info->world_save_p
23867 = (WORLD_SAVE_P (info)
23868 && DEFAULT_ABI == ABI_DARWIN
23869 && !cfun->has_nonlocal_label
23870 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
23871 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
23872 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
23873 && info->cr_save_p);
23874
23875 /* This will not work in conjunction with sibcalls. Make sure there
23876 are none. (This check is expensive, but seldom executed.) */
23877 if (WORLD_SAVE_P (info))
23878 {
23879 rtx_insn *insn;
23880 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
23881 if (CALL_P (insn) && SIBLING_CALL_P (insn))
23882 {
23883 info->world_save_p = 0;
23884 break;
23885 }
23886 }
23887
23888 if (WORLD_SAVE_P (info))
23889 {
23890 /* Even if we're not touching VRsave, make sure there's room on the
23891 stack for it, if it looks like we're calling SAVE_WORLD, which
23892 will attempt to save it. */
23893 info->vrsave_size = 4;
23894
23895 /* If we are going to save the world, we need to save the link register too. */
23896 info->lr_save_p = 1;
23897
23898 /* "Save" the VRsave register too if we're saving the world. */
23899 if (info->vrsave_mask == 0)
23900 info->vrsave_mask = compute_vrsave_mask ();
23901
23902 /* Because the Darwin register save/restore routines only handle
23903 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
23904 check. */
23905 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
23906 && (info->first_altivec_reg_save
23907 >= FIRST_SAVED_ALTIVEC_REGNO));
23908 }
23909
23910 return;
23911 }
23912
23913
23914 static void
23915 is_altivec_return_reg (rtx reg, void *xyes)
23916 {
23917 bool *yes = (bool *) xyes;
23918 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
23919 *yes = true;
23920 }
23921
23922 \f
23923 /* Return whether REG is a global user reg or has been specifed by
23924 -ffixed-REG. We should not restore these, and so cannot use
23925 lmw or out-of-line restore functions if there are any. We also
23926 can't save them (well, emit frame notes for them), because frame
23927 unwinding during exception handling will restore saved registers. */
23928
23929 static bool
23930 fixed_reg_p (int reg)
23931 {
23932 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
23933 backend sets it, overriding anything the user might have given. */
23934 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
23935 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
23936 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
23937 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
23938 return false;
23939
23940 return fixed_regs[reg];
23941 }
23942
23943 /* Determine the strategy for savings/restoring registers. */
23944
23945 enum {
23946 SAVE_MULTIPLE = 0x1,
23947 SAVE_INLINE_GPRS = 0x2,
23948 SAVE_INLINE_FPRS = 0x4,
23949 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
23950 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
23951 SAVE_INLINE_VRS = 0x20,
23952 REST_MULTIPLE = 0x100,
23953 REST_INLINE_GPRS = 0x200,
23954 REST_INLINE_FPRS = 0x400,
23955 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
23956 REST_INLINE_VRS = 0x1000
23957 };
23958
23959 static int
23960 rs6000_savres_strategy (rs6000_stack_t *info,
23961 bool using_static_chain_p)
23962 {
23963 int strategy = 0;
23964
23965 /* Select between in-line and out-of-line save and restore of regs.
23966 First, all the obvious cases where we don't use out-of-line. */
23967 if (crtl->calls_eh_return
23968 || cfun->machine->ra_need_lr)
23969 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
23970 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
23971 | SAVE_INLINE_VRS | REST_INLINE_VRS);
23972
23973 if (info->first_gp_reg_save == 32)
23974 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23975
23976 if (info->first_fp_reg_save == 64)
23977 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23978
23979 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
23980 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23981
23982 /* Define cutoff for using out-of-line functions to save registers. */
23983 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
23984 {
23985 if (!optimize_size)
23986 {
23987 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23988 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23989 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23990 }
23991 else
23992 {
23993 /* Prefer out-of-line restore if it will exit. */
23994 if (info->first_fp_reg_save > 61)
23995 strategy |= SAVE_INLINE_FPRS;
23996 if (info->first_gp_reg_save > 29)
23997 {
23998 if (info->first_fp_reg_save == 64)
23999 strategy |= SAVE_INLINE_GPRS;
24000 else
24001 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24002 }
24003 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24004 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24005 }
24006 }
24007 else if (DEFAULT_ABI == ABI_DARWIN)
24008 {
24009 if (info->first_fp_reg_save > 60)
24010 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24011 if (info->first_gp_reg_save > 29)
24012 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24013 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24014 }
24015 else
24016 {
24017 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24018 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24019 || info->first_fp_reg_save > 61)
24020 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24021 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24022 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24023 }
24024
24025 /* Don't bother to try to save things out-of-line if r11 is occupied
24026 by the static chain. It would require too much fiddling and the
24027 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24028 pointer on Darwin, and AIX uses r1 or r12. */
24029 if (using_static_chain_p
24030 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24031 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24032 | SAVE_INLINE_GPRS
24033 | SAVE_INLINE_VRS);
24034
24035 /* Don't ever restore fixed regs. That means we can't use the
24036 out-of-line register restore functions if a fixed reg is in the
24037 range of regs restored. */
24038 if (!(strategy & REST_INLINE_FPRS))
24039 for (int i = info->first_fp_reg_save; i < 64; i++)
24040 if (fixed_regs[i])
24041 {
24042 strategy |= REST_INLINE_FPRS;
24043 break;
24044 }
24045
24046 /* We can only use the out-of-line routines to restore fprs if we've
24047 saved all the registers from first_fp_reg_save in the prologue.
24048 Otherwise, we risk loading garbage. Of course, if we have saved
24049 out-of-line then we know we haven't skipped any fprs. */
24050 if ((strategy & SAVE_INLINE_FPRS)
24051 && !(strategy & REST_INLINE_FPRS))
24052 for (int i = info->first_fp_reg_save; i < 64; i++)
24053 if (!save_reg_p (i))
24054 {
24055 strategy |= REST_INLINE_FPRS;
24056 break;
24057 }
24058
24059 /* Similarly, for altivec regs. */
24060 if (!(strategy & REST_INLINE_VRS))
24061 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24062 if (fixed_regs[i])
24063 {
24064 strategy |= REST_INLINE_VRS;
24065 break;
24066 }
24067
24068 if ((strategy & SAVE_INLINE_VRS)
24069 && !(strategy & REST_INLINE_VRS))
24070 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24071 if (!save_reg_p (i))
24072 {
24073 strategy |= REST_INLINE_VRS;
24074 break;
24075 }
24076
24077 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24078 saved is an out-of-line save or restore. Set up the value for
24079 the next test (excluding out-of-line gprs). */
24080 bool lr_save_p = (info->lr_save_p
24081 || !(strategy & SAVE_INLINE_FPRS)
24082 || !(strategy & SAVE_INLINE_VRS)
24083 || !(strategy & REST_INLINE_FPRS)
24084 || !(strategy & REST_INLINE_VRS));
24085
24086 if (TARGET_MULTIPLE
24087 && !TARGET_POWERPC64
24088 && info->first_gp_reg_save < 31
24089 && !(flag_shrink_wrap
24090 && flag_shrink_wrap_separate
24091 && optimize_function_for_speed_p (cfun)))
24092 {
24093 int count = 0;
24094 for (int i = info->first_gp_reg_save; i < 32; i++)
24095 if (save_reg_p (i))
24096 count++;
24097
24098 if (count <= 1)
24099 /* Don't use store multiple if only one reg needs to be
24100 saved. This can occur for example when the ABI_V4 pic reg
24101 (r30) needs to be saved to make calls, but r31 is not
24102 used. */
24103 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24104 else
24105 {
24106 /* Prefer store multiple for saves over out-of-line
24107 routines, since the store-multiple instruction will
24108 always be smaller. */
24109 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24110
24111 /* The situation is more complicated with load multiple.
24112 We'd prefer to use the out-of-line routines for restores,
24113 since the "exit" out-of-line routines can handle the
24114 restore of LR and the frame teardown. However if doesn't
24115 make sense to use the out-of-line routine if that is the
24116 only reason we'd need to save LR, and we can't use the
24117 "exit" out-of-line gpr restore if we have saved some
24118 fprs; In those cases it is advantageous to use load
24119 multiple when available. */
24120 if (info->first_fp_reg_save != 64 || !lr_save_p)
24121 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24122 }
24123 }
24124
24125 /* Using the "exit" out-of-line routine does not improve code size
24126 if using it would require lr to be saved and if only saving one
24127 or two gprs. */
24128 else if (!lr_save_p && info->first_gp_reg_save > 29)
24129 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24130
24131 /* Don't ever restore fixed regs. */
24132 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24133 for (int i = info->first_gp_reg_save; i < 32; i++)
24134 if (fixed_reg_p (i))
24135 {
24136 strategy |= REST_INLINE_GPRS;
24137 strategy &= ~REST_MULTIPLE;
24138 break;
24139 }
24140
24141 /* We can only use load multiple or the out-of-line routines to
24142 restore gprs if we've saved all the registers from
24143 first_gp_reg_save. Otherwise, we risk loading garbage.
24144 Of course, if we have saved out-of-line or used stmw then we know
24145 we haven't skipped any gprs. */
24146 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24147 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24148 for (int i = info->first_gp_reg_save; i < 32; i++)
24149 if (!save_reg_p (i))
24150 {
24151 strategy |= REST_INLINE_GPRS;
24152 strategy &= ~REST_MULTIPLE;
24153 break;
24154 }
24155
24156 if (TARGET_ELF && TARGET_64BIT)
24157 {
24158 if (!(strategy & SAVE_INLINE_FPRS))
24159 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24160 else if (!(strategy & SAVE_INLINE_GPRS)
24161 && info->first_fp_reg_save == 64)
24162 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24163 }
24164 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24165 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24166
24167 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24168 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24169
24170 return strategy;
24171 }
24172
24173 /* Calculate the stack information for the current function. This is
24174 complicated by having two separate calling sequences, the AIX calling
24175 sequence and the V.4 calling sequence.
24176
24177 AIX (and Darwin/Mac OS X) stack frames look like:
24178 32-bit 64-bit
24179 SP----> +---------------------------------------+
24180 | back chain to caller | 0 0
24181 +---------------------------------------+
24182 | saved CR | 4 8 (8-11)
24183 +---------------------------------------+
24184 | saved LR | 8 16
24185 +---------------------------------------+
24186 | reserved for compilers | 12 24
24187 +---------------------------------------+
24188 | reserved for binders | 16 32
24189 +---------------------------------------+
24190 | saved TOC pointer | 20 40
24191 +---------------------------------------+
24192 | Parameter save area (+padding*) (P) | 24 48
24193 +---------------------------------------+
24194 | Alloca space (A) | 24+P etc.
24195 +---------------------------------------+
24196 | Local variable space (L) | 24+P+A
24197 +---------------------------------------+
24198 | Float/int conversion temporary (X) | 24+P+A+L
24199 +---------------------------------------+
24200 | Save area for AltiVec registers (W) | 24+P+A+L+X
24201 +---------------------------------------+
24202 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24203 +---------------------------------------+
24204 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24205 +---------------------------------------+
24206 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24207 +---------------------------------------+
24208 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24209 +---------------------------------------+
24210 old SP->| back chain to caller's caller |
24211 +---------------------------------------+
24212
24213 * If the alloca area is present, the parameter save area is
24214 padded so that the former starts 16-byte aligned.
24215
24216 The required alignment for AIX configurations is two words (i.e., 8
24217 or 16 bytes).
24218
24219 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24220
24221 SP----> +---------------------------------------+
24222 | Back chain to caller | 0
24223 +---------------------------------------+
24224 | Save area for CR | 8
24225 +---------------------------------------+
24226 | Saved LR | 16
24227 +---------------------------------------+
24228 | Saved TOC pointer | 24
24229 +---------------------------------------+
24230 | Parameter save area (+padding*) (P) | 32
24231 +---------------------------------------+
24232 | Alloca space (A) | 32+P
24233 +---------------------------------------+
24234 | Local variable space (L) | 32+P+A
24235 +---------------------------------------+
24236 | Save area for AltiVec registers (W) | 32+P+A+L
24237 +---------------------------------------+
24238 | AltiVec alignment padding (Y) | 32+P+A+L+W
24239 +---------------------------------------+
24240 | Save area for GP registers (G) | 32+P+A+L+W+Y
24241 +---------------------------------------+
24242 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24243 +---------------------------------------+
24244 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24245 +---------------------------------------+
24246
24247 * If the alloca area is present, the parameter save area is
24248 padded so that the former starts 16-byte aligned.
24249
24250 V.4 stack frames look like:
24251
24252 SP----> +---------------------------------------+
24253 | back chain to caller | 0
24254 +---------------------------------------+
24255 | caller's saved LR | 4
24256 +---------------------------------------+
24257 | Parameter save area (+padding*) (P) | 8
24258 +---------------------------------------+
24259 | Alloca space (A) | 8+P
24260 +---------------------------------------+
24261 | Varargs save area (V) | 8+P+A
24262 +---------------------------------------+
24263 | Local variable space (L) | 8+P+A+V
24264 +---------------------------------------+
24265 | Float/int conversion temporary (X) | 8+P+A+V+L
24266 +---------------------------------------+
24267 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24268 +---------------------------------------+
24269 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24270 +---------------------------------------+
24271 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24272 +---------------------------------------+
24273 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24274 +---------------------------------------+
24275 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24276 +---------------------------------------+
24277 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24278 +---------------------------------------+
24279 old SP->| back chain to caller's caller |
24280 +---------------------------------------+
24281
24282 * If the alloca area is present and the required alignment is
24283 16 bytes, the parameter save area is padded so that the
24284 alloca area starts 16-byte aligned.
24285
24286 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24287 given. (But note below and in sysv4.h that we require only 8 and
24288 may round up the size of our stack frame anyways. The historical
24289 reason is early versions of powerpc-linux which didn't properly
24290 align the stack at program startup. A happy side-effect is that
24291 -mno-eabi libraries can be used with -meabi programs.)
24292
24293 The EABI configuration defaults to the V.4 layout. However,
24294 the stack alignment requirements may differ. If -mno-eabi is not
24295 given, the required stack alignment is 8 bytes; if -mno-eabi is
24296 given, the required alignment is 16 bytes. (But see V.4 comment
24297 above.) */
24298
24299 #ifndef ABI_STACK_BOUNDARY
24300 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24301 #endif
24302
24303 static rs6000_stack_t *
24304 rs6000_stack_info (void)
24305 {
24306 /* We should never be called for thunks, we are not set up for that. */
24307 gcc_assert (!cfun->is_thunk);
24308
24309 rs6000_stack_t *info = &stack_info;
24310 int reg_size = TARGET_32BIT ? 4 : 8;
24311 int ehrd_size;
24312 int ehcr_size;
24313 int save_align;
24314 int first_gp;
24315 HOST_WIDE_INT non_fixed_size;
24316 bool using_static_chain_p;
24317
24318 if (reload_completed && info->reload_completed)
24319 return info;
24320
24321 memset (info, 0, sizeof (*info));
24322 info->reload_completed = reload_completed;
24323
24324 /* Select which calling sequence. */
24325 info->abi = DEFAULT_ABI;
24326
24327 /* Calculate which registers need to be saved & save area size. */
24328 info->first_gp_reg_save = first_reg_to_save ();
24329 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24330 even if it currently looks like we won't. Reload may need it to
24331 get at a constant; if so, it will have already created a constant
24332 pool entry for it. */
24333 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24334 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24335 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24336 && crtl->uses_const_pool
24337 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24338 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24339 else
24340 first_gp = info->first_gp_reg_save;
24341
24342 info->gp_size = reg_size * (32 - first_gp);
24343
24344 info->first_fp_reg_save = first_fp_reg_to_save ();
24345 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24346
24347 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24348 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24349 - info->first_altivec_reg_save);
24350
24351 /* Does this function call anything? */
24352 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24353
24354 /* Determine if we need to save the condition code registers. */
24355 if (save_reg_p (CR2_REGNO)
24356 || save_reg_p (CR3_REGNO)
24357 || save_reg_p (CR4_REGNO))
24358 {
24359 info->cr_save_p = 1;
24360 if (DEFAULT_ABI == ABI_V4)
24361 info->cr_size = reg_size;
24362 }
24363
24364 /* If the current function calls __builtin_eh_return, then we need
24365 to allocate stack space for registers that will hold data for
24366 the exception handler. */
24367 if (crtl->calls_eh_return)
24368 {
24369 unsigned int i;
24370 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24371 continue;
24372
24373 ehrd_size = i * UNITS_PER_WORD;
24374 }
24375 else
24376 ehrd_size = 0;
24377
24378 /* In the ELFv2 ABI, we also need to allocate space for separate
24379 CR field save areas if the function calls __builtin_eh_return. */
24380 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24381 {
24382 /* This hard-codes that we have three call-saved CR fields. */
24383 ehcr_size = 3 * reg_size;
24384 /* We do *not* use the regular CR save mechanism. */
24385 info->cr_save_p = 0;
24386 }
24387 else
24388 ehcr_size = 0;
24389
24390 /* Determine various sizes. */
24391 info->reg_size = reg_size;
24392 info->fixed_size = RS6000_SAVE_AREA;
24393 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24394 if (cfun->calls_alloca)
24395 info->parm_size =
24396 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24397 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24398 else
24399 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24400 TARGET_ALTIVEC ? 16 : 8);
24401 if (FRAME_GROWS_DOWNWARD)
24402 info->vars_size
24403 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24404 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24405 - (info->fixed_size + info->vars_size + info->parm_size);
24406
24407 if (TARGET_ALTIVEC_ABI)
24408 info->vrsave_mask = compute_vrsave_mask ();
24409
24410 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24411 info->vrsave_size = 4;
24412
24413 compute_save_world_info (info);
24414
24415 /* Calculate the offsets. */
24416 switch (DEFAULT_ABI)
24417 {
24418 case ABI_NONE:
24419 default:
24420 gcc_unreachable ();
24421
24422 case ABI_AIX:
24423 case ABI_ELFv2:
24424 case ABI_DARWIN:
24425 info->fp_save_offset = -info->fp_size;
24426 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24427
24428 if (TARGET_ALTIVEC_ABI)
24429 {
24430 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24431
24432 /* Align stack so vector save area is on a quadword boundary.
24433 The padding goes above the vectors. */
24434 if (info->altivec_size != 0)
24435 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24436
24437 info->altivec_save_offset = info->vrsave_save_offset
24438 - info->altivec_padding_size
24439 - info->altivec_size;
24440 gcc_assert (info->altivec_size == 0
24441 || info->altivec_save_offset % 16 == 0);
24442
24443 /* Adjust for AltiVec case. */
24444 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24445 }
24446 else
24447 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24448
24449 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24450 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24451 info->lr_save_offset = 2*reg_size;
24452 break;
24453
24454 case ABI_V4:
24455 info->fp_save_offset = -info->fp_size;
24456 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24457 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24458
24459 if (TARGET_ALTIVEC_ABI)
24460 {
24461 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24462
24463 /* Align stack so vector save area is on a quadword boundary. */
24464 if (info->altivec_size != 0)
24465 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24466
24467 info->altivec_save_offset = info->vrsave_save_offset
24468 - info->altivec_padding_size
24469 - info->altivec_size;
24470
24471 /* Adjust for AltiVec case. */
24472 info->ehrd_offset = info->altivec_save_offset;
24473 }
24474 else
24475 info->ehrd_offset = info->cr_save_offset;
24476
24477 info->ehrd_offset -= ehrd_size;
24478 info->lr_save_offset = reg_size;
24479 }
24480
24481 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24482 info->save_size = RS6000_ALIGN (info->fp_size
24483 + info->gp_size
24484 + info->altivec_size
24485 + info->altivec_padding_size
24486 + ehrd_size
24487 + ehcr_size
24488 + info->cr_size
24489 + info->vrsave_size,
24490 save_align);
24491
24492 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24493
24494 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24495 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24496
24497 /* Determine if we need to save the link register. */
24498 if (info->calls_p
24499 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24500 && crtl->profile
24501 && !TARGET_PROFILE_KERNEL)
24502 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24503 #ifdef TARGET_RELOCATABLE
24504 || (DEFAULT_ABI == ABI_V4
24505 && (TARGET_RELOCATABLE || flag_pic > 1)
24506 && !constant_pool_empty_p ())
24507 #endif
24508 || rs6000_ra_ever_killed ())
24509 info->lr_save_p = 1;
24510
24511 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24512 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24513 && call_used_regs[STATIC_CHAIN_REGNUM]);
24514 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24515
24516 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24517 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24518 || !(info->savres_strategy & SAVE_INLINE_VRS)
24519 || !(info->savres_strategy & REST_INLINE_GPRS)
24520 || !(info->savres_strategy & REST_INLINE_FPRS)
24521 || !(info->savres_strategy & REST_INLINE_VRS))
24522 info->lr_save_p = 1;
24523
24524 if (info->lr_save_p)
24525 df_set_regs_ever_live (LR_REGNO, true);
24526
24527 /* Determine if we need to allocate any stack frame:
24528
24529 For AIX we need to push the stack if a frame pointer is needed
24530 (because the stack might be dynamically adjusted), if we are
24531 debugging, if we make calls, or if the sum of fp_save, gp_save,
24532 and local variables are more than the space needed to save all
24533 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24534 + 18*8 = 288 (GPR13 reserved).
24535
24536 For V.4 we don't have the stack cushion that AIX uses, but assume
24537 that the debugger can handle stackless frames. */
24538
24539 if (info->calls_p)
24540 info->push_p = 1;
24541
24542 else if (DEFAULT_ABI == ABI_V4)
24543 info->push_p = non_fixed_size != 0;
24544
24545 else if (frame_pointer_needed)
24546 info->push_p = 1;
24547
24548 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24549 info->push_p = 1;
24550
24551 else
24552 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24553
24554 return info;
24555 }
24556
24557 static void
24558 debug_stack_info (rs6000_stack_t *info)
24559 {
24560 const char *abi_string;
24561
24562 if (! info)
24563 info = rs6000_stack_info ();
24564
24565 fprintf (stderr, "\nStack information for function %s:\n",
24566 ((current_function_decl && DECL_NAME (current_function_decl))
24567 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24568 : "<unknown>"));
24569
24570 switch (info->abi)
24571 {
24572 default: abi_string = "Unknown"; break;
24573 case ABI_NONE: abi_string = "NONE"; break;
24574 case ABI_AIX: abi_string = "AIX"; break;
24575 case ABI_ELFv2: abi_string = "ELFv2"; break;
24576 case ABI_DARWIN: abi_string = "Darwin"; break;
24577 case ABI_V4: abi_string = "V.4"; break;
24578 }
24579
24580 fprintf (stderr, "\tABI = %5s\n", abi_string);
24581
24582 if (TARGET_ALTIVEC_ABI)
24583 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24584
24585 if (info->first_gp_reg_save != 32)
24586 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24587
24588 if (info->first_fp_reg_save != 64)
24589 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24590
24591 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24592 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24593 info->first_altivec_reg_save);
24594
24595 if (info->lr_save_p)
24596 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24597
24598 if (info->cr_save_p)
24599 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24600
24601 if (info->vrsave_mask)
24602 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24603
24604 if (info->push_p)
24605 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24606
24607 if (info->calls_p)
24608 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24609
24610 if (info->gp_size)
24611 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24612
24613 if (info->fp_size)
24614 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24615
24616 if (info->altivec_size)
24617 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24618 info->altivec_save_offset);
24619
24620 if (info->vrsave_size)
24621 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24622 info->vrsave_save_offset);
24623
24624 if (info->lr_save_p)
24625 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24626
24627 if (info->cr_save_p)
24628 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24629
24630 if (info->varargs_save_offset)
24631 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24632
24633 if (info->total_size)
24634 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24635 info->total_size);
24636
24637 if (info->vars_size)
24638 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24639 info->vars_size);
24640
24641 if (info->parm_size)
24642 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24643
24644 if (info->fixed_size)
24645 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24646
24647 if (info->gp_size)
24648 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24649
24650 if (info->fp_size)
24651 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24652
24653 if (info->altivec_size)
24654 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24655
24656 if (info->vrsave_size)
24657 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24658
24659 if (info->altivec_padding_size)
24660 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24661 info->altivec_padding_size);
24662
24663 if (info->cr_size)
24664 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24665
24666 if (info->save_size)
24667 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24668
24669 if (info->reg_size != 4)
24670 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24671
24672 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24673
24674 if (info->abi == ABI_DARWIN)
24675 fprintf (stderr, "\tWORLD_SAVE_P = %5d\n", WORLD_SAVE_P(info));
24676
24677 fprintf (stderr, "\n");
24678 }
24679
24680 rtx
24681 rs6000_return_addr (int count, rtx frame)
24682 {
24683 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24684 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24685 if (count != 0
24686 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24687 {
24688 cfun->machine->ra_needs_full_frame = 1;
24689
24690 if (count == 0)
24691 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24692 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24693 frame = stack_pointer_rtx;
24694 rtx prev_frame_addr = memory_address (Pmode, frame);
24695 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
24696 rtx lr_save_off = plus_constant (Pmode,
24697 prev_frame, RETURN_ADDRESS_OFFSET);
24698 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
24699 return gen_rtx_MEM (Pmode, lr_save_addr);
24700 }
24701
24702 cfun->machine->ra_need_lr = 1;
24703 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24704 }
24705
24706 /* Say whether a function is a candidate for sibcall handling or not. */
24707
24708 static bool
24709 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24710 {
24711 tree fntype;
24712
24713 /* The sibcall epilogue may clobber the static chain register.
24714 ??? We could work harder and avoid that, but it's probably
24715 not worth the hassle in practice. */
24716 if (CALL_EXPR_STATIC_CHAIN (exp))
24717 return false;
24718
24719 if (decl)
24720 fntype = TREE_TYPE (decl);
24721 else
24722 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24723
24724 /* We can't do it if the called function has more vector parameters
24725 than the current function; there's nowhere to put the VRsave code. */
24726 if (TARGET_ALTIVEC_ABI
24727 && TARGET_ALTIVEC_VRSAVE
24728 && !(decl && decl == current_function_decl))
24729 {
24730 function_args_iterator args_iter;
24731 tree type;
24732 int nvreg = 0;
24733
24734 /* Functions with vector parameters are required to have a
24735 prototype, so the argument type info must be available
24736 here. */
24737 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24738 if (TREE_CODE (type) == VECTOR_TYPE
24739 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24740 nvreg++;
24741
24742 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24743 if (TREE_CODE (type) == VECTOR_TYPE
24744 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24745 nvreg--;
24746
24747 if (nvreg > 0)
24748 return false;
24749 }
24750
24751 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24752 functions, because the callee may have a different TOC pointer to
24753 the caller and there's no way to ensure we restore the TOC when
24754 we return. With the secure-plt SYSV ABI we can't make non-local
24755 calls when -fpic/PIC because the plt call stubs use r30. */
24756 if (DEFAULT_ABI == ABI_DARWIN
24757 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24758 && decl
24759 && !DECL_EXTERNAL (decl)
24760 && !DECL_WEAK (decl)
24761 && (*targetm.binds_local_p) (decl))
24762 || (DEFAULT_ABI == ABI_V4
24763 && (!TARGET_SECURE_PLT
24764 || !flag_pic
24765 || (decl
24766 && (*targetm.binds_local_p) (decl)))))
24767 {
24768 tree attr_list = TYPE_ATTRIBUTES (fntype);
24769
24770 if (!lookup_attribute ("longcall", attr_list)
24771 || lookup_attribute ("shortcall", attr_list))
24772 return true;
24773 }
24774
24775 return false;
24776 }
24777
24778 static int
24779 rs6000_ra_ever_killed (void)
24780 {
24781 rtx_insn *top;
24782 rtx reg;
24783 rtx_insn *insn;
24784
24785 if (cfun->is_thunk)
24786 return 0;
24787
24788 if (cfun->machine->lr_save_state)
24789 return cfun->machine->lr_save_state - 1;
24790
24791 /* regs_ever_live has LR marked as used if any sibcalls are present,
24792 but this should not force saving and restoring in the
24793 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
24794 clobbers LR, so that is inappropriate. */
24795
24796 /* Also, the prologue can generate a store into LR that
24797 doesn't really count, like this:
24798
24799 move LR->R0
24800 bcl to set PIC register
24801 move LR->R31
24802 move R0->LR
24803
24804 When we're called from the epilogue, we need to avoid counting
24805 this as a store. */
24806
24807 push_topmost_sequence ();
24808 top = get_insns ();
24809 pop_topmost_sequence ();
24810 reg = gen_rtx_REG (Pmode, LR_REGNO);
24811
24812 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
24813 {
24814 if (INSN_P (insn))
24815 {
24816 if (CALL_P (insn))
24817 {
24818 if (!SIBLING_CALL_P (insn))
24819 return 1;
24820 }
24821 else if (find_regno_note (insn, REG_INC, LR_REGNO))
24822 return 1;
24823 else if (set_of (reg, insn) != NULL_RTX
24824 && !prologue_epilogue_contains (insn))
24825 return 1;
24826 }
24827 }
24828 return 0;
24829 }
24830 \f
24831 /* Emit instructions needed to load the TOC register.
24832 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
24833 a constant pool; or for SVR4 -fpic. */
24834
24835 void
24836 rs6000_emit_load_toc_table (int fromprolog)
24837 {
24838 rtx dest;
24839 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
24840
24841 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
24842 {
24843 char buf[30];
24844 rtx lab, tmp1, tmp2, got;
24845
24846 lab = gen_label_rtx ();
24847 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
24848 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24849 if (flag_pic == 2)
24850 {
24851 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24852 need_toc_init = 1;
24853 }
24854 else
24855 got = rs6000_got_sym ();
24856 tmp1 = tmp2 = dest;
24857 if (!fromprolog)
24858 {
24859 tmp1 = gen_reg_rtx (Pmode);
24860 tmp2 = gen_reg_rtx (Pmode);
24861 }
24862 emit_insn (gen_load_toc_v4_PIC_1 (lab));
24863 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
24864 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
24865 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
24866 }
24867 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
24868 {
24869 emit_insn (gen_load_toc_v4_pic_si ());
24870 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24871 }
24872 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
24873 {
24874 char buf[30];
24875 rtx temp0 = (fromprolog
24876 ? gen_rtx_REG (Pmode, 0)
24877 : gen_reg_rtx (Pmode));
24878
24879 if (fromprolog)
24880 {
24881 rtx symF, symL;
24882
24883 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
24884 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24885
24886 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
24887 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24888
24889 emit_insn (gen_load_toc_v4_PIC_1 (symF));
24890 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24891 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
24892 }
24893 else
24894 {
24895 rtx tocsym, lab;
24896
24897 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24898 need_toc_init = 1;
24899 lab = gen_label_rtx ();
24900 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
24901 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24902 if (TARGET_LINK_STACK)
24903 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
24904 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
24905 }
24906 emit_insn (gen_addsi3 (dest, temp0, dest));
24907 }
24908 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
24909 {
24910 /* This is for AIX code running in non-PIC ELF32. */
24911 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24912
24913 need_toc_init = 1;
24914 emit_insn (gen_elf_high (dest, realsym));
24915 emit_insn (gen_elf_low (dest, dest, realsym));
24916 }
24917 else
24918 {
24919 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24920
24921 if (TARGET_32BIT)
24922 emit_insn (gen_load_toc_aix_si (dest));
24923 else
24924 emit_insn (gen_load_toc_aix_di (dest));
24925 }
24926 }
24927
24928 /* Emit instructions to restore the link register after determining where
24929 its value has been stored. */
24930
24931 void
24932 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
24933 {
24934 rs6000_stack_t *info = rs6000_stack_info ();
24935 rtx operands[2];
24936
24937 operands[0] = source;
24938 operands[1] = scratch;
24939
24940 if (info->lr_save_p)
24941 {
24942 rtx frame_rtx = stack_pointer_rtx;
24943 HOST_WIDE_INT sp_offset = 0;
24944 rtx tmp;
24945
24946 if (frame_pointer_needed
24947 || cfun->calls_alloca
24948 || info->total_size > 32767)
24949 {
24950 tmp = gen_frame_mem (Pmode, frame_rtx);
24951 emit_move_insn (operands[1], tmp);
24952 frame_rtx = operands[1];
24953 }
24954 else if (info->push_p)
24955 sp_offset = info->total_size;
24956
24957 tmp = plus_constant (Pmode, frame_rtx,
24958 info->lr_save_offset + sp_offset);
24959 tmp = gen_frame_mem (Pmode, tmp);
24960 emit_move_insn (tmp, operands[0]);
24961 }
24962 else
24963 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
24964
24965 /* Freeze lr_save_p. We've just emitted rtl that depends on the
24966 state of lr_save_p so any change from here on would be a bug. In
24967 particular, stop rs6000_ra_ever_killed from considering the SET
24968 of lr we may have added just above. */
24969 cfun->machine->lr_save_state = info->lr_save_p + 1;
24970 }
24971
24972 static GTY(()) alias_set_type set = -1;
24973
24974 alias_set_type
24975 get_TOC_alias_set (void)
24976 {
24977 if (set == -1)
24978 set = new_alias_set ();
24979 return set;
24980 }
24981
24982 /* This returns nonzero if the current function uses the TOC. This is
24983 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
24984 is generated by the ABI_V4 load_toc_* patterns.
24985 Return 2 instead of 1 if the load_toc_* pattern is in the function
24986 partition that doesn't start the function. */
24987 #if TARGET_ELF
24988 static int
24989 uses_TOC (void)
24990 {
24991 rtx_insn *insn;
24992 int ret = 1;
24993
24994 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
24995 {
24996 if (INSN_P (insn))
24997 {
24998 rtx pat = PATTERN (insn);
24999 int i;
25000
25001 if (GET_CODE (pat) == PARALLEL)
25002 for (i = 0; i < XVECLEN (pat, 0); i++)
25003 {
25004 rtx sub = XVECEXP (pat, 0, i);
25005 if (GET_CODE (sub) == USE)
25006 {
25007 sub = XEXP (sub, 0);
25008 if (GET_CODE (sub) == UNSPEC
25009 && XINT (sub, 1) == UNSPEC_TOC)
25010 return ret;
25011 }
25012 }
25013 }
25014 else if (crtl->has_bb_partition
25015 && NOTE_P (insn)
25016 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25017 ret = 2;
25018 }
25019 return 0;
25020 }
25021 #endif
25022
25023 rtx
25024 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25025 {
25026 rtx tocrel, tocreg, hi;
25027
25028 if (TARGET_DEBUG_ADDR)
25029 {
25030 if (SYMBOL_REF_P (symbol))
25031 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25032 XSTR (symbol, 0));
25033 else
25034 {
25035 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25036 GET_RTX_NAME (GET_CODE (symbol)));
25037 debug_rtx (symbol);
25038 }
25039 }
25040
25041 if (!can_create_pseudo_p ())
25042 df_set_regs_ever_live (TOC_REGISTER, true);
25043
25044 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25045 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25046 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25047 return tocrel;
25048
25049 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25050 if (largetoc_reg != NULL)
25051 {
25052 emit_move_insn (largetoc_reg, hi);
25053 hi = largetoc_reg;
25054 }
25055 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25056 }
25057
25058 /* Issue assembly directives that create a reference to the given DWARF
25059 FRAME_TABLE_LABEL from the current function section. */
25060 void
25061 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25062 {
25063 fprintf (asm_out_file, "\t.ref %s\n",
25064 (* targetm.strip_name_encoding) (frame_table_label));
25065 }
25066 \f
25067 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25068 and the change to the stack pointer. */
25069
25070 static void
25071 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25072 {
25073 rtvec p;
25074 int i;
25075 rtx regs[3];
25076
25077 i = 0;
25078 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25079 if (hard_frame_needed)
25080 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25081 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25082 || (hard_frame_needed
25083 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25084 regs[i++] = fp;
25085
25086 p = rtvec_alloc (i);
25087 while (--i >= 0)
25088 {
25089 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25090 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25091 }
25092
25093 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25094 }
25095
25096 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25097 and set the appropriate attributes for the generated insn. Return the
25098 first insn which adjusts the stack pointer or the last insn before
25099 the stack adjustment loop.
25100
25101 SIZE_INT is used to create the CFI note for the allocation.
25102
25103 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25104 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25105
25106 ORIG_SP contains the backchain value that must be stored at *sp. */
25107
25108 static rtx_insn *
25109 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25110 {
25111 rtx_insn *insn;
25112
25113 rtx size_rtx = GEN_INT (-size_int);
25114 if (size_int > 32767)
25115 {
25116 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25117 /* Need a note here so that try_split doesn't get confused. */
25118 if (get_last_insn () == NULL_RTX)
25119 emit_note (NOTE_INSN_DELETED);
25120 insn = emit_move_insn (tmp_reg, size_rtx);
25121 try_split (PATTERN (insn), insn, 0);
25122 size_rtx = tmp_reg;
25123 }
25124
25125 if (TARGET_32BIT)
25126 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25127 stack_pointer_rtx,
25128 size_rtx,
25129 orig_sp));
25130 else
25131 insn = emit_insn (gen_movdi_update_stack (stack_pointer_rtx,
25132 stack_pointer_rtx,
25133 size_rtx,
25134 orig_sp));
25135 rtx par = PATTERN (insn);
25136 gcc_assert (GET_CODE (par) == PARALLEL);
25137 rtx set = XVECEXP (par, 0, 0);
25138 gcc_assert (GET_CODE (set) == SET);
25139 rtx mem = SET_DEST (set);
25140 gcc_assert (MEM_P (mem));
25141 MEM_NOTRAP_P (mem) = 1;
25142 set_mem_alias_set (mem, get_frame_alias_set ());
25143
25144 RTX_FRAME_RELATED_P (insn) = 1;
25145 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25146 gen_rtx_SET (stack_pointer_rtx,
25147 gen_rtx_PLUS (Pmode,
25148 stack_pointer_rtx,
25149 GEN_INT (-size_int))));
25150
25151 /* Emit a blockage to ensure the allocation/probing insns are
25152 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25153 note for similar reasons. */
25154 if (flag_stack_clash_protection)
25155 {
25156 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25157 emit_insn (gen_blockage ());
25158 }
25159
25160 return insn;
25161 }
25162
25163 static HOST_WIDE_INT
25164 get_stack_clash_protection_probe_interval (void)
25165 {
25166 return (HOST_WIDE_INT_1U
25167 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25168 }
25169
25170 static HOST_WIDE_INT
25171 get_stack_clash_protection_guard_size (void)
25172 {
25173 return (HOST_WIDE_INT_1U
25174 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25175 }
25176
25177 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25178 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25179
25180 COPY_REG, if non-null, should contain a copy of the original
25181 stack pointer at exit from this function.
25182
25183 This is subtly different than the Ada probing in that it tries hard to
25184 prevent attacks that jump the stack guard. Thus it is never allowed to
25185 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25186 space without a suitable probe. */
25187 static rtx_insn *
25188 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25189 rtx copy_reg)
25190 {
25191 rtx orig_sp = copy_reg;
25192
25193 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25194
25195 /* Round the size down to a multiple of PROBE_INTERVAL. */
25196 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25197
25198 /* If explicitly requested,
25199 or the rounded size is not the same as the original size
25200 or the the rounded size is greater than a page,
25201 then we will need a copy of the original stack pointer. */
25202 if (rounded_size != orig_size
25203 || rounded_size > probe_interval
25204 || copy_reg)
25205 {
25206 /* If the caller did not request a copy of the incoming stack
25207 pointer, then we use r0 to hold the copy. */
25208 if (!copy_reg)
25209 orig_sp = gen_rtx_REG (Pmode, 0);
25210 emit_move_insn (orig_sp, stack_pointer_rtx);
25211 }
25212
25213 /* There's three cases here.
25214
25215 One is a single probe which is the most common and most efficiently
25216 implemented as it does not have to have a copy of the original
25217 stack pointer if there are no residuals.
25218
25219 Second is unrolled allocation/probes which we use if there's just
25220 a few of them. It needs to save the original stack pointer into a
25221 temporary for use as a source register in the allocation/probe.
25222
25223 Last is a loop. This is the most uncommon case and least efficient. */
25224 rtx_insn *retval = NULL;
25225 if (rounded_size == probe_interval)
25226 {
25227 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25228
25229 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25230 }
25231 else if (rounded_size <= 8 * probe_interval)
25232 {
25233 /* The ABI requires using the store with update insns to allocate
25234 space and store the backchain into the stack
25235
25236 So we save the current stack pointer into a temporary, then
25237 emit the store-with-update insns to store the saved stack pointer
25238 into the right location in each new page. */
25239 for (int i = 0; i < rounded_size; i += probe_interval)
25240 {
25241 rtx_insn *insn
25242 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25243
25244 /* Save the first stack adjustment in RETVAL. */
25245 if (i == 0)
25246 retval = insn;
25247 }
25248
25249 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25250 }
25251 else
25252 {
25253 /* Compute the ending address. */
25254 rtx end_addr
25255 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25256 rtx rs = GEN_INT (-rounded_size);
25257 rtx_insn *insn;
25258 if (add_operand (rs, Pmode))
25259 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25260 else
25261 {
25262 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25263 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25264 stack_pointer_rtx));
25265 /* Describe the effect of INSN to the CFI engine. */
25266 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25267 gen_rtx_SET (end_addr,
25268 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25269 rs)));
25270 }
25271 RTX_FRAME_RELATED_P (insn) = 1;
25272
25273 /* Emit the loop. */
25274 if (TARGET_64BIT)
25275 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25276 stack_pointer_rtx, orig_sp,
25277 end_addr));
25278 else
25279 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25280 stack_pointer_rtx, orig_sp,
25281 end_addr));
25282 RTX_FRAME_RELATED_P (retval) = 1;
25283 /* Describe the effect of INSN to the CFI engine. */
25284 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25285 gen_rtx_SET (stack_pointer_rtx, end_addr));
25286
25287 /* Emit a blockage to ensure the allocation/probing insns are
25288 not optimized, combined, removed, etc. Other cases handle this
25289 within their call to rs6000_emit_allocate_stack_1. */
25290 emit_insn (gen_blockage ());
25291
25292 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25293 }
25294
25295 if (orig_size != rounded_size)
25296 {
25297 /* Allocate (and implicitly probe) any residual space. */
25298 HOST_WIDE_INT residual = orig_size - rounded_size;
25299
25300 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25301
25302 /* If the residual was the only allocation, then we can return the
25303 allocating insn. */
25304 if (!retval)
25305 retval = insn;
25306 }
25307
25308 return retval;
25309 }
25310
25311 /* Emit the correct code for allocating stack space, as insns.
25312 If COPY_REG, make sure a copy of the old frame is left there.
25313 The generated code may use hard register 0 as a temporary. */
25314
25315 static rtx_insn *
25316 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25317 {
25318 rtx_insn *insn;
25319 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25320 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25321 rtx todec = gen_int_mode (-size, Pmode);
25322
25323 if (INTVAL (todec) != -size)
25324 {
25325 warning (0, "stack frame too large");
25326 emit_insn (gen_trap ());
25327 return 0;
25328 }
25329
25330 if (crtl->limit_stack)
25331 {
25332 if (REG_P (stack_limit_rtx)
25333 && REGNO (stack_limit_rtx) > 1
25334 && REGNO (stack_limit_rtx) <= 31)
25335 {
25336 rtx_insn *insn
25337 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25338 gcc_assert (insn);
25339 emit_insn (insn);
25340 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25341 }
25342 else if (SYMBOL_REF_P (stack_limit_rtx)
25343 && TARGET_32BIT
25344 && DEFAULT_ABI == ABI_V4
25345 && !flag_pic)
25346 {
25347 rtx toload = gen_rtx_CONST (VOIDmode,
25348 gen_rtx_PLUS (Pmode,
25349 stack_limit_rtx,
25350 GEN_INT (size)));
25351
25352 emit_insn (gen_elf_high (tmp_reg, toload));
25353 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25354 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25355 const0_rtx));
25356 }
25357 else
25358 warning (0, "stack limit expression is not supported");
25359 }
25360
25361 if (flag_stack_clash_protection)
25362 {
25363 if (size < get_stack_clash_protection_guard_size ())
25364 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25365 else
25366 {
25367 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25368 copy_reg);
25369
25370 /* If we asked for a copy with an offset, then we still need add in
25371 the offset. */
25372 if (copy_reg && copy_off)
25373 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25374 return insn;
25375 }
25376 }
25377
25378 if (copy_reg)
25379 {
25380 if (copy_off != 0)
25381 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25382 else
25383 emit_move_insn (copy_reg, stack_reg);
25384 }
25385
25386 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25387 it now and set the alias set/attributes. The above gen_*_update
25388 calls will generate a PARALLEL with the MEM set being the first
25389 operation. */
25390 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25391 return insn;
25392 }
25393
25394 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25395
25396 #if PROBE_INTERVAL > 32768
25397 #error Cannot use indexed addressing mode for stack probing
25398 #endif
25399
25400 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25401 inclusive. These are offsets from the current stack pointer. */
25402
25403 static void
25404 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25405 {
25406 /* See if we have a constant small number of probes to generate. If so,
25407 that's the easy case. */
25408 if (first + size <= 32768)
25409 {
25410 HOST_WIDE_INT i;
25411
25412 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25413 it exceeds SIZE. If only one probe is needed, this will not
25414 generate any code. Then probe at FIRST + SIZE. */
25415 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25416 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25417 -(first + i)));
25418
25419 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25420 -(first + size)));
25421 }
25422
25423 /* Otherwise, do the same as above, but in a loop. Note that we must be
25424 extra careful with variables wrapping around because we might be at
25425 the very top (or the very bottom) of the address space and we have
25426 to be able to handle this case properly; in particular, we use an
25427 equality test for the loop condition. */
25428 else
25429 {
25430 HOST_WIDE_INT rounded_size;
25431 rtx r12 = gen_rtx_REG (Pmode, 12);
25432 rtx r0 = gen_rtx_REG (Pmode, 0);
25433
25434 /* Sanity check for the addressing mode we're going to use. */
25435 gcc_assert (first <= 32768);
25436
25437 /* Step 1: round SIZE to the previous multiple of the interval. */
25438
25439 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25440
25441
25442 /* Step 2: compute initial and final value of the loop counter. */
25443
25444 /* TEST_ADDR = SP + FIRST. */
25445 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25446 -first)));
25447
25448 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25449 if (rounded_size > 32768)
25450 {
25451 emit_move_insn (r0, GEN_INT (-rounded_size));
25452 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25453 }
25454 else
25455 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25456 -rounded_size)));
25457
25458
25459 /* Step 3: the loop
25460
25461 do
25462 {
25463 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25464 probe at TEST_ADDR
25465 }
25466 while (TEST_ADDR != LAST_ADDR)
25467
25468 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25469 until it is equal to ROUNDED_SIZE. */
25470
25471 if (TARGET_64BIT)
25472 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25473 else
25474 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25475
25476
25477 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25478 that SIZE is equal to ROUNDED_SIZE. */
25479
25480 if (size != rounded_size)
25481 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25482 }
25483 }
25484
25485 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25486 addresses, not offsets. */
25487
25488 static const char *
25489 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25490 {
25491 static int labelno = 0;
25492 char loop_lab[32];
25493 rtx xops[2];
25494
25495 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25496
25497 /* Loop. */
25498 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25499
25500 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25501 xops[0] = reg1;
25502 xops[1] = GEN_INT (-PROBE_INTERVAL);
25503 output_asm_insn ("addi %0,%0,%1", xops);
25504
25505 /* Probe at TEST_ADDR. */
25506 xops[1] = gen_rtx_REG (Pmode, 0);
25507 output_asm_insn ("stw %1,0(%0)", xops);
25508
25509 /* Test if TEST_ADDR == LAST_ADDR. */
25510 xops[1] = reg2;
25511 if (TARGET_64BIT)
25512 output_asm_insn ("cmpd 0,%0,%1", xops);
25513 else
25514 output_asm_insn ("cmpw 0,%0,%1", xops);
25515
25516 /* Branch. */
25517 fputs ("\tbne 0,", asm_out_file);
25518 assemble_name_raw (asm_out_file, loop_lab);
25519 fputc ('\n', asm_out_file);
25520
25521 return "";
25522 }
25523
25524 /* This function is called when rs6000_frame_related is processing
25525 SETs within a PARALLEL, and returns whether the REGNO save ought to
25526 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25527 for out-of-line register save functions, store multiple, and the
25528 Darwin world_save. They may contain registers that don't really
25529 need saving. */
25530
25531 static bool
25532 interesting_frame_related_regno (unsigned int regno)
25533 {
25534 /* Saves apparently of r0 are actually saving LR. It doesn't make
25535 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25536 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25537 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25538 as frame related. */
25539 if (regno == 0)
25540 return true;
25541 /* If we see CR2 then we are here on a Darwin world save. Saves of
25542 CR2 signify the whole CR is being saved. This is a long-standing
25543 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25544 that CR needs to be saved. */
25545 if (regno == CR2_REGNO)
25546 return true;
25547 /* Omit frame info for any user-defined global regs. If frame info
25548 is supplied for them, frame unwinding will restore a user reg.
25549 Also omit frame info for any reg we don't need to save, as that
25550 bloats frame info and can cause problems with shrink wrapping.
25551 Since global regs won't be seen as needing to be saved, both of
25552 these conditions are covered by save_reg_p. */
25553 return save_reg_p (regno);
25554 }
25555
25556 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25557 addresses, not offsets.
25558
25559 REG2 contains the backchain that must be stored into *sp at each allocation.
25560
25561 This is subtly different than the Ada probing above in that it tries hard
25562 to prevent attacks that jump the stack guard. Thus, it is never allowed
25563 to allocate more than PROBE_INTERVAL bytes of stack space without a
25564 suitable probe. */
25565
25566 static const char *
25567 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25568 {
25569 static int labelno = 0;
25570 char loop_lab[32];
25571 rtx xops[3];
25572
25573 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25574
25575 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25576
25577 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25578
25579 /* This allocates and probes. */
25580 xops[0] = reg1;
25581 xops[1] = reg2;
25582 xops[2] = GEN_INT (-probe_interval);
25583 if (TARGET_64BIT)
25584 output_asm_insn ("stdu %1,%2(%0)", xops);
25585 else
25586 output_asm_insn ("stwu %1,%2(%0)", xops);
25587
25588 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25589 xops[0] = reg1;
25590 xops[1] = reg3;
25591 if (TARGET_64BIT)
25592 output_asm_insn ("cmpd 0,%0,%1", xops);
25593 else
25594 output_asm_insn ("cmpw 0,%0,%1", xops);
25595
25596 fputs ("\tbne 0,", asm_out_file);
25597 assemble_name_raw (asm_out_file, loop_lab);
25598 fputc ('\n', asm_out_file);
25599
25600 return "";
25601 }
25602
25603 /* Wrapper around the output_probe_stack_range routines. */
25604 const char *
25605 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25606 {
25607 if (flag_stack_clash_protection)
25608 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25609 else
25610 return output_probe_stack_range_1 (reg1, reg3);
25611 }
25612
25613 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25614 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25615 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25616 deduce these equivalences by itself so it wasn't necessary to hold
25617 its hand so much. Don't be tempted to always supply d2_f_d_e with
25618 the actual cfa register, ie. r31 when we are using a hard frame
25619 pointer. That fails when saving regs off r1, and sched moves the
25620 r31 setup past the reg saves. */
25621
25622 static rtx_insn *
25623 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25624 rtx reg2, rtx repl2)
25625 {
25626 rtx repl;
25627
25628 if (REGNO (reg) == STACK_POINTER_REGNUM)
25629 {
25630 gcc_checking_assert (val == 0);
25631 repl = NULL_RTX;
25632 }
25633 else
25634 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25635 GEN_INT (val));
25636
25637 rtx pat = PATTERN (insn);
25638 if (!repl && !reg2)
25639 {
25640 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25641 if (GET_CODE (pat) == PARALLEL)
25642 for (int i = 0; i < XVECLEN (pat, 0); i++)
25643 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25644 {
25645 rtx set = XVECEXP (pat, 0, i);
25646
25647 if (!REG_P (SET_SRC (set))
25648 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25649 RTX_FRAME_RELATED_P (set) = 1;
25650 }
25651 RTX_FRAME_RELATED_P (insn) = 1;
25652 return insn;
25653 }
25654
25655 /* We expect that 'pat' is either a SET or a PARALLEL containing
25656 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25657 are important so they all have to be marked RTX_FRAME_RELATED_P.
25658 Call simplify_replace_rtx on the SETs rather than the whole insn
25659 so as to leave the other stuff alone (for example USE of r12). */
25660
25661 set_used_flags (pat);
25662 if (GET_CODE (pat) == SET)
25663 {
25664 if (repl)
25665 pat = simplify_replace_rtx (pat, reg, repl);
25666 if (reg2)
25667 pat = simplify_replace_rtx (pat, reg2, repl2);
25668 }
25669 else if (GET_CODE (pat) == PARALLEL)
25670 {
25671 pat = shallow_copy_rtx (pat);
25672 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25673
25674 for (int i = 0; i < XVECLEN (pat, 0); i++)
25675 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25676 {
25677 rtx set = XVECEXP (pat, 0, i);
25678
25679 if (repl)
25680 set = simplify_replace_rtx (set, reg, repl);
25681 if (reg2)
25682 set = simplify_replace_rtx (set, reg2, repl2);
25683 XVECEXP (pat, 0, i) = set;
25684
25685 if (!REG_P (SET_SRC (set))
25686 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25687 RTX_FRAME_RELATED_P (set) = 1;
25688 }
25689 }
25690 else
25691 gcc_unreachable ();
25692
25693 RTX_FRAME_RELATED_P (insn) = 1;
25694 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25695
25696 return insn;
25697 }
25698
25699 /* Returns an insn that has a vrsave set operation with the
25700 appropriate CLOBBERs. */
25701
25702 static rtx
25703 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25704 {
25705 int nclobs, i;
25706 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25707 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25708
25709 clobs[0]
25710 = gen_rtx_SET (vrsave,
25711 gen_rtx_UNSPEC_VOLATILE (SImode,
25712 gen_rtvec (2, reg, vrsave),
25713 UNSPECV_SET_VRSAVE));
25714
25715 nclobs = 1;
25716
25717 /* We need to clobber the registers in the mask so the scheduler
25718 does not move sets to VRSAVE before sets of AltiVec registers.
25719
25720 However, if the function receives nonlocal gotos, reload will set
25721 all call saved registers live. We will end up with:
25722
25723 (set (reg 999) (mem))
25724 (parallel [ (set (reg vrsave) (unspec blah))
25725 (clobber (reg 999))])
25726
25727 The clobber will cause the store into reg 999 to be dead, and
25728 flow will attempt to delete an epilogue insn. In this case, we
25729 need an unspec use/set of the register. */
25730
25731 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25732 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25733 {
25734 if (!epiloguep || call_used_regs [i])
25735 clobs[nclobs++] = gen_hard_reg_clobber (V4SImode, i);
25736 else
25737 {
25738 rtx reg = gen_rtx_REG (V4SImode, i);
25739
25740 clobs[nclobs++]
25741 = gen_rtx_SET (reg,
25742 gen_rtx_UNSPEC (V4SImode,
25743 gen_rtvec (1, reg), 27));
25744 }
25745 }
25746
25747 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25748
25749 for (i = 0; i < nclobs; ++i)
25750 XVECEXP (insn, 0, i) = clobs[i];
25751
25752 return insn;
25753 }
25754
25755 static rtx
25756 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25757 {
25758 rtx addr, mem;
25759
25760 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25761 mem = gen_frame_mem (GET_MODE (reg), addr);
25762 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25763 }
25764
25765 static rtx
25766 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25767 {
25768 return gen_frame_set (reg, frame_reg, offset, false);
25769 }
25770
25771 static rtx
25772 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25773 {
25774 return gen_frame_set (reg, frame_reg, offset, true);
25775 }
25776
25777 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25778 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25779
25780 static rtx_insn *
25781 emit_frame_save (rtx frame_reg, machine_mode mode,
25782 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25783 {
25784 rtx reg;
25785
25786 /* Some cases that need register indexed addressing. */
25787 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25788 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25789
25790 reg = gen_rtx_REG (mode, regno);
25791 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25792 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25793 NULL_RTX, NULL_RTX);
25794 }
25795
25796 /* Emit an offset memory reference suitable for a frame store, while
25797 converting to a valid addressing mode. */
25798
25799 static rtx
25800 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25801 {
25802 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25803 }
25804
25805 #ifndef TARGET_FIX_AND_CONTINUE
25806 #define TARGET_FIX_AND_CONTINUE 0
25807 #endif
25808
25809 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25810 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25811 #define LAST_SAVRES_REGISTER 31
25812 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25813
25814 enum {
25815 SAVRES_LR = 0x1,
25816 SAVRES_SAVE = 0x2,
25817 SAVRES_REG = 0x0c,
25818 SAVRES_GPR = 0,
25819 SAVRES_FPR = 4,
25820 SAVRES_VR = 8
25821 };
25822
25823 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25824
25825 /* Temporary holding space for an out-of-line register save/restore
25826 routine name. */
25827 static char savres_routine_name[30];
25828
25829 /* Return the name for an out-of-line register save/restore routine.
25830 We are saving/restoring GPRs if GPR is true. */
25831
25832 static char *
25833 rs6000_savres_routine_name (int regno, int sel)
25834 {
25835 const char *prefix = "";
25836 const char *suffix = "";
25837
25838 /* Different targets are supposed to define
25839 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25840 routine name could be defined with:
25841
25842 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25843
25844 This is a nice idea in practice, but in reality, things are
25845 complicated in several ways:
25846
25847 - ELF targets have save/restore routines for GPRs.
25848
25849 - PPC64 ELF targets have routines for save/restore of GPRs that
25850 differ in what they do with the link register, so having a set
25851 prefix doesn't work. (We only use one of the save routines at
25852 the moment, though.)
25853
25854 - PPC32 elf targets have "exit" versions of the restore routines
25855 that restore the link register and can save some extra space.
25856 These require an extra suffix. (There are also "tail" versions
25857 of the restore routines and "GOT" versions of the save routines,
25858 but we don't generate those at present. Same problems apply,
25859 though.)
25860
25861 We deal with all this by synthesizing our own prefix/suffix and
25862 using that for the simple sprintf call shown above. */
25863 if (DEFAULT_ABI == ABI_V4)
25864 {
25865 if (TARGET_64BIT)
25866 goto aix_names;
25867
25868 if ((sel & SAVRES_REG) == SAVRES_GPR)
25869 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25870 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25871 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25872 else if ((sel & SAVRES_REG) == SAVRES_VR)
25873 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25874 else
25875 abort ();
25876
25877 if ((sel & SAVRES_LR))
25878 suffix = "_x";
25879 }
25880 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25881 {
25882 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25883 /* No out-of-line save/restore routines for GPRs on AIX. */
25884 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25885 #endif
25886
25887 aix_names:
25888 if ((sel & SAVRES_REG) == SAVRES_GPR)
25889 prefix = ((sel & SAVRES_SAVE)
25890 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25891 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25892 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25893 {
25894 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25895 if ((sel & SAVRES_LR))
25896 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
25897 else
25898 #endif
25899 {
25900 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
25901 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
25902 }
25903 }
25904 else if ((sel & SAVRES_REG) == SAVRES_VR)
25905 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25906 else
25907 abort ();
25908 }
25909
25910 if (DEFAULT_ABI == ABI_DARWIN)
25911 {
25912 /* The Darwin approach is (slightly) different, in order to be
25913 compatible with code generated by the system toolchain. There is a
25914 single symbol for the start of save sequence, and the code here
25915 embeds an offset into that code on the basis of the first register
25916 to be saved. */
25917 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
25918 if ((sel & SAVRES_REG) == SAVRES_GPR)
25919 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
25920 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
25921 (regno - 13) * 4, prefix, regno);
25922 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25923 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
25924 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
25925 else if ((sel & SAVRES_REG) == SAVRES_VR)
25926 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
25927 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
25928 else
25929 abort ();
25930 }
25931 else
25932 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
25933
25934 return savres_routine_name;
25935 }
25936
25937 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25938 We are saving/restoring GPRs if GPR is true. */
25939
25940 static rtx
25941 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
25942 {
25943 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
25944 ? info->first_gp_reg_save
25945 : (sel & SAVRES_REG) == SAVRES_FPR
25946 ? info->first_fp_reg_save - 32
25947 : (sel & SAVRES_REG) == SAVRES_VR
25948 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
25949 : -1);
25950 rtx sym;
25951 int select = sel;
25952
25953 /* Don't generate bogus routine names. */
25954 gcc_assert (FIRST_SAVRES_REGISTER <= regno
25955 && regno <= LAST_SAVRES_REGISTER
25956 && select >= 0 && select <= 12);
25957
25958 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
25959
25960 if (sym == NULL)
25961 {
25962 char *name;
25963
25964 name = rs6000_savres_routine_name (regno, sel);
25965
25966 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
25967 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
25968 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
25969 }
25970
25971 return sym;
25972 }
25973
25974 /* Emit a sequence of insns, including a stack tie if needed, for
25975 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
25976 reset the stack pointer, but move the base of the frame into
25977 reg UPDT_REGNO for use by out-of-line register restore routines. */
25978
25979 static rtx
25980 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
25981 unsigned updt_regno)
25982 {
25983 /* If there is nothing to do, don't do anything. */
25984 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
25985 return NULL_RTX;
25986
25987 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
25988
25989 /* This blockage is needed so that sched doesn't decide to move
25990 the sp change before the register restores. */
25991 if (DEFAULT_ABI == ABI_V4)
25992 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
25993 GEN_INT (frame_off)));
25994
25995 /* If we are restoring registers out-of-line, we will be using the
25996 "exit" variants of the restore routines, which will reset the
25997 stack for us. But we do need to point updt_reg into the
25998 right place for those routines. */
25999 if (frame_off != 0)
26000 return emit_insn (gen_add3_insn (updt_reg_rtx,
26001 frame_reg_rtx, GEN_INT (frame_off)));
26002 else
26003 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26004
26005 return NULL_RTX;
26006 }
26007
26008 /* Return the register number used as a pointer by out-of-line
26009 save/restore functions. */
26010
26011 static inline unsigned
26012 ptr_regno_for_savres (int sel)
26013 {
26014 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26015 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26016 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26017 }
26018
26019 /* Construct a parallel rtx describing the effect of a call to an
26020 out-of-line register save/restore routine, and emit the insn
26021 or jump_insn as appropriate. */
26022
26023 static rtx_insn *
26024 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26025 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26026 machine_mode reg_mode, int sel)
26027 {
26028 int i;
26029 int offset, start_reg, end_reg, n_regs, use_reg;
26030 int reg_size = GET_MODE_SIZE (reg_mode);
26031 rtx sym;
26032 rtvec p;
26033 rtx par;
26034 rtx_insn *insn;
26035
26036 offset = 0;
26037 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26038 ? info->first_gp_reg_save
26039 : (sel & SAVRES_REG) == SAVRES_FPR
26040 ? info->first_fp_reg_save
26041 : (sel & SAVRES_REG) == SAVRES_VR
26042 ? info->first_altivec_reg_save
26043 : -1);
26044 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26045 ? 32
26046 : (sel & SAVRES_REG) == SAVRES_FPR
26047 ? 64
26048 : (sel & SAVRES_REG) == SAVRES_VR
26049 ? LAST_ALTIVEC_REGNO + 1
26050 : -1);
26051 n_regs = end_reg - start_reg;
26052 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26053 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26054 + n_regs);
26055
26056 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26057 RTVEC_ELT (p, offset++) = ret_rtx;
26058
26059 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
26060
26061 sym = rs6000_savres_routine_sym (info, sel);
26062 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26063
26064 use_reg = ptr_regno_for_savres (sel);
26065 if ((sel & SAVRES_REG) == SAVRES_VR)
26066 {
26067 /* Vector regs are saved/restored using [reg+reg] addressing. */
26068 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, use_reg);
26069 RTVEC_ELT (p, offset++)
26070 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26071 }
26072 else
26073 RTVEC_ELT (p, offset++)
26074 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26075
26076 for (i = 0; i < end_reg - start_reg; i++)
26077 RTVEC_ELT (p, i + offset)
26078 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26079 frame_reg_rtx, save_area_offset + reg_size * i,
26080 (sel & SAVRES_SAVE) != 0);
26081
26082 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26083 RTVEC_ELT (p, i + offset)
26084 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26085
26086 par = gen_rtx_PARALLEL (VOIDmode, p);
26087
26088 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26089 {
26090 insn = emit_jump_insn (par);
26091 JUMP_LABEL (insn) = ret_rtx;
26092 }
26093 else
26094 insn = emit_insn (par);
26095 return insn;
26096 }
26097
26098 /* Emit prologue code to store CR fields that need to be saved into REG. This
26099 function should only be called when moving the non-volatile CRs to REG, it
26100 is not a general purpose routine to move the entire set of CRs to REG.
26101 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26102 volatile CRs. */
26103
26104 static void
26105 rs6000_emit_prologue_move_from_cr (rtx reg)
26106 {
26107 /* Only the ELFv2 ABI allows storing only selected fields. */
26108 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26109 {
26110 int i, cr_reg[8], count = 0;
26111
26112 /* Collect CR fields that must be saved. */
26113 for (i = 0; i < 8; i++)
26114 if (save_reg_p (CR0_REGNO + i))
26115 cr_reg[count++] = i;
26116
26117 /* If it's just a single one, use mfcrf. */
26118 if (count == 1)
26119 {
26120 rtvec p = rtvec_alloc (1);
26121 rtvec r = rtvec_alloc (2);
26122 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26123 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26124 RTVEC_ELT (p, 0)
26125 = gen_rtx_SET (reg,
26126 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26127
26128 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26129 return;
26130 }
26131
26132 /* ??? It might be better to handle count == 2 / 3 cases here
26133 as well, using logical operations to combine the values. */
26134 }
26135
26136 emit_insn (gen_prologue_movesi_from_cr (reg));
26137 }
26138
26139 /* Return whether the split-stack arg pointer (r12) is used. */
26140
26141 static bool
26142 split_stack_arg_pointer_used_p (void)
26143 {
26144 /* If the pseudo holding the arg pointer is no longer a pseudo,
26145 then the arg pointer is used. */
26146 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26147 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26148 || HARD_REGISTER_P (cfun->machine->split_stack_arg_pointer)))
26149 return true;
26150
26151 /* Unfortunately we also need to do some code scanning, since
26152 r12 may have been substituted for the pseudo. */
26153 rtx_insn *insn;
26154 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26155 FOR_BB_INSNS (bb, insn)
26156 if (NONDEBUG_INSN_P (insn))
26157 {
26158 /* A call destroys r12. */
26159 if (CALL_P (insn))
26160 return false;
26161
26162 df_ref use;
26163 FOR_EACH_INSN_USE (use, insn)
26164 {
26165 rtx x = DF_REF_REG (use);
26166 if (REG_P (x) && REGNO (x) == 12)
26167 return true;
26168 }
26169 df_ref def;
26170 FOR_EACH_INSN_DEF (def, insn)
26171 {
26172 rtx x = DF_REF_REG (def);
26173 if (REG_P (x) && REGNO (x) == 12)
26174 return false;
26175 }
26176 }
26177 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26178 }
26179
26180 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26181
26182 static bool
26183 rs6000_global_entry_point_needed_p (void)
26184 {
26185 /* Only needed for the ELFv2 ABI. */
26186 if (DEFAULT_ABI != ABI_ELFv2)
26187 return false;
26188
26189 /* With -msingle-pic-base, we assume the whole program shares the same
26190 TOC, so no global entry point prologues are needed anywhere. */
26191 if (TARGET_SINGLE_PIC_BASE)
26192 return false;
26193
26194 /* Ensure we have a global entry point for thunks. ??? We could
26195 avoid that if the target routine doesn't need a global entry point,
26196 but we do not know whether this is the case at this point. */
26197 if (cfun->is_thunk)
26198 return true;
26199
26200 /* For regular functions, rs6000_emit_prologue sets this flag if the
26201 routine ever uses the TOC pointer. */
26202 return cfun->machine->r2_setup_needed;
26203 }
26204
26205 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26206 static sbitmap
26207 rs6000_get_separate_components (void)
26208 {
26209 rs6000_stack_t *info = rs6000_stack_info ();
26210
26211 if (WORLD_SAVE_P (info))
26212 return NULL;
26213
26214 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26215 && !(info->savres_strategy & REST_MULTIPLE));
26216
26217 /* Component 0 is the save/restore of LR (done via GPR0).
26218 Component 2 is the save of the TOC (GPR2).
26219 Components 13..31 are the save/restore of GPR13..GPR31.
26220 Components 46..63 are the save/restore of FPR14..FPR31. */
26221
26222 cfun->machine->n_components = 64;
26223
26224 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26225 bitmap_clear (components);
26226
26227 int reg_size = TARGET_32BIT ? 4 : 8;
26228 int fp_reg_size = 8;
26229
26230 /* The GPRs we need saved to the frame. */
26231 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26232 && (info->savres_strategy & REST_INLINE_GPRS))
26233 {
26234 int offset = info->gp_save_offset;
26235 if (info->push_p)
26236 offset += info->total_size;
26237
26238 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26239 {
26240 if (IN_RANGE (offset, -0x8000, 0x7fff)
26241 && save_reg_p (regno))
26242 bitmap_set_bit (components, regno);
26243
26244 offset += reg_size;
26245 }
26246 }
26247
26248 /* Don't mess with the hard frame pointer. */
26249 if (frame_pointer_needed)
26250 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26251
26252 /* Don't mess with the fixed TOC register. */
26253 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26254 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26255 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26256 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26257
26258 /* The FPRs we need saved to the frame. */
26259 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26260 && (info->savres_strategy & REST_INLINE_FPRS))
26261 {
26262 int offset = info->fp_save_offset;
26263 if (info->push_p)
26264 offset += info->total_size;
26265
26266 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26267 {
26268 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26269 bitmap_set_bit (components, regno);
26270
26271 offset += fp_reg_size;
26272 }
26273 }
26274
26275 /* Optimize LR save and restore if we can. This is component 0. Any
26276 out-of-line register save/restore routines need LR. */
26277 if (info->lr_save_p
26278 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26279 && (info->savres_strategy & SAVE_INLINE_GPRS)
26280 && (info->savres_strategy & REST_INLINE_GPRS)
26281 && (info->savres_strategy & SAVE_INLINE_FPRS)
26282 && (info->savres_strategy & REST_INLINE_FPRS)
26283 && (info->savres_strategy & SAVE_INLINE_VRS)
26284 && (info->savres_strategy & REST_INLINE_VRS))
26285 {
26286 int offset = info->lr_save_offset;
26287 if (info->push_p)
26288 offset += info->total_size;
26289 if (IN_RANGE (offset, -0x8000, 0x7fff))
26290 bitmap_set_bit (components, 0);
26291 }
26292
26293 /* Optimize saving the TOC. This is component 2. */
26294 if (cfun->machine->save_toc_in_prologue)
26295 bitmap_set_bit (components, 2);
26296
26297 return components;
26298 }
26299
26300 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26301 static sbitmap
26302 rs6000_components_for_bb (basic_block bb)
26303 {
26304 rs6000_stack_t *info = rs6000_stack_info ();
26305
26306 bitmap in = DF_LIVE_IN (bb);
26307 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26308 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26309
26310 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26311 bitmap_clear (components);
26312
26313 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26314
26315 /* GPRs. */
26316 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26317 if (bitmap_bit_p (in, regno)
26318 || bitmap_bit_p (gen, regno)
26319 || bitmap_bit_p (kill, regno))
26320 bitmap_set_bit (components, regno);
26321
26322 /* FPRs. */
26323 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26324 if (bitmap_bit_p (in, regno)
26325 || bitmap_bit_p (gen, regno)
26326 || bitmap_bit_p (kill, regno))
26327 bitmap_set_bit (components, regno);
26328
26329 /* The link register. */
26330 if (bitmap_bit_p (in, LR_REGNO)
26331 || bitmap_bit_p (gen, LR_REGNO)
26332 || bitmap_bit_p (kill, LR_REGNO))
26333 bitmap_set_bit (components, 0);
26334
26335 /* The TOC save. */
26336 if (bitmap_bit_p (in, TOC_REGNUM)
26337 || bitmap_bit_p (gen, TOC_REGNUM)
26338 || bitmap_bit_p (kill, TOC_REGNUM))
26339 bitmap_set_bit (components, 2);
26340
26341 return components;
26342 }
26343
26344 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26345 static void
26346 rs6000_disqualify_components (sbitmap components, edge e,
26347 sbitmap edge_components, bool /*is_prologue*/)
26348 {
26349 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26350 live where we want to place that code. */
26351 if (bitmap_bit_p (edge_components, 0)
26352 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26353 {
26354 if (dump_file)
26355 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26356 "on entry to bb %d\n", e->dest->index);
26357 bitmap_clear_bit (components, 0);
26358 }
26359 }
26360
26361 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26362 static void
26363 rs6000_emit_prologue_components (sbitmap components)
26364 {
26365 rs6000_stack_t *info = rs6000_stack_info ();
26366 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26367 ? HARD_FRAME_POINTER_REGNUM
26368 : STACK_POINTER_REGNUM);
26369
26370 machine_mode reg_mode = Pmode;
26371 int reg_size = TARGET_32BIT ? 4 : 8;
26372 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26373 int fp_reg_size = 8;
26374
26375 /* Prologue for LR. */
26376 if (bitmap_bit_p (components, 0))
26377 {
26378 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26379 rtx reg = gen_rtx_REG (reg_mode, 0);
26380 rtx_insn *insn = emit_move_insn (reg, lr);
26381 RTX_FRAME_RELATED_P (insn) = 1;
26382 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26383
26384 int offset = info->lr_save_offset;
26385 if (info->push_p)
26386 offset += info->total_size;
26387
26388 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26389 RTX_FRAME_RELATED_P (insn) = 1;
26390 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26391 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26392 }
26393
26394 /* Prologue for TOC. */
26395 if (bitmap_bit_p (components, 2))
26396 {
26397 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26398 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26399 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26400 }
26401
26402 /* Prologue for the GPRs. */
26403 int offset = info->gp_save_offset;
26404 if (info->push_p)
26405 offset += info->total_size;
26406
26407 for (int i = info->first_gp_reg_save; i < 32; i++)
26408 {
26409 if (bitmap_bit_p (components, i))
26410 {
26411 rtx reg = gen_rtx_REG (reg_mode, i);
26412 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26413 RTX_FRAME_RELATED_P (insn) = 1;
26414 rtx set = copy_rtx (single_set (insn));
26415 add_reg_note (insn, REG_CFA_OFFSET, set);
26416 }
26417
26418 offset += reg_size;
26419 }
26420
26421 /* Prologue for the FPRs. */
26422 offset = info->fp_save_offset;
26423 if (info->push_p)
26424 offset += info->total_size;
26425
26426 for (int i = info->first_fp_reg_save; i < 64; i++)
26427 {
26428 if (bitmap_bit_p (components, i))
26429 {
26430 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26431 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26432 RTX_FRAME_RELATED_P (insn) = 1;
26433 rtx set = copy_rtx (single_set (insn));
26434 add_reg_note (insn, REG_CFA_OFFSET, set);
26435 }
26436
26437 offset += fp_reg_size;
26438 }
26439 }
26440
26441 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26442 static void
26443 rs6000_emit_epilogue_components (sbitmap components)
26444 {
26445 rs6000_stack_t *info = rs6000_stack_info ();
26446 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26447 ? HARD_FRAME_POINTER_REGNUM
26448 : STACK_POINTER_REGNUM);
26449
26450 machine_mode reg_mode = Pmode;
26451 int reg_size = TARGET_32BIT ? 4 : 8;
26452
26453 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26454 int fp_reg_size = 8;
26455
26456 /* Epilogue for the FPRs. */
26457 int offset = info->fp_save_offset;
26458 if (info->push_p)
26459 offset += info->total_size;
26460
26461 for (int i = info->first_fp_reg_save; i < 64; i++)
26462 {
26463 if (bitmap_bit_p (components, i))
26464 {
26465 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26466 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26467 RTX_FRAME_RELATED_P (insn) = 1;
26468 add_reg_note (insn, REG_CFA_RESTORE, reg);
26469 }
26470
26471 offset += fp_reg_size;
26472 }
26473
26474 /* Epilogue for the GPRs. */
26475 offset = info->gp_save_offset;
26476 if (info->push_p)
26477 offset += info->total_size;
26478
26479 for (int i = info->first_gp_reg_save; i < 32; i++)
26480 {
26481 if (bitmap_bit_p (components, i))
26482 {
26483 rtx reg = gen_rtx_REG (reg_mode, i);
26484 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26485 RTX_FRAME_RELATED_P (insn) = 1;
26486 add_reg_note (insn, REG_CFA_RESTORE, reg);
26487 }
26488
26489 offset += reg_size;
26490 }
26491
26492 /* Epilogue for LR. */
26493 if (bitmap_bit_p (components, 0))
26494 {
26495 int offset = info->lr_save_offset;
26496 if (info->push_p)
26497 offset += info->total_size;
26498
26499 rtx reg = gen_rtx_REG (reg_mode, 0);
26500 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26501
26502 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26503 insn = emit_move_insn (lr, reg);
26504 RTX_FRAME_RELATED_P (insn) = 1;
26505 add_reg_note (insn, REG_CFA_RESTORE, lr);
26506 }
26507 }
26508
26509 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26510 static void
26511 rs6000_set_handled_components (sbitmap components)
26512 {
26513 rs6000_stack_t *info = rs6000_stack_info ();
26514
26515 for (int i = info->first_gp_reg_save; i < 32; i++)
26516 if (bitmap_bit_p (components, i))
26517 cfun->machine->gpr_is_wrapped_separately[i] = true;
26518
26519 for (int i = info->first_fp_reg_save; i < 64; i++)
26520 if (bitmap_bit_p (components, i))
26521 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26522
26523 if (bitmap_bit_p (components, 0))
26524 cfun->machine->lr_is_wrapped_separately = true;
26525
26526 if (bitmap_bit_p (components, 2))
26527 cfun->machine->toc_is_wrapped_separately = true;
26528 }
26529
26530 /* VRSAVE is a bit vector representing which AltiVec registers
26531 are used. The OS uses this to determine which vector
26532 registers to save on a context switch. We need to save
26533 VRSAVE on the stack frame, add whatever AltiVec registers we
26534 used in this function, and do the corresponding magic in the
26535 epilogue. */
26536 static void
26537 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26538 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26539 {
26540 /* Get VRSAVE into a GPR. */
26541 rtx reg = gen_rtx_REG (SImode, save_regno);
26542 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26543 if (TARGET_MACHO)
26544 emit_insn (gen_get_vrsave_internal (reg));
26545 else
26546 emit_insn (gen_rtx_SET (reg, vrsave));
26547
26548 /* Save VRSAVE. */
26549 int offset = info->vrsave_save_offset + frame_off;
26550 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26551
26552 /* Include the registers in the mask. */
26553 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26554
26555 emit_insn (generate_set_vrsave (reg, info, 0));
26556 }
26557
26558 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26559 called, it left the arg pointer to the old stack in r29. Otherwise, the
26560 arg pointer is the top of the current frame. */
26561 static void
26562 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26563 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26564 {
26565 cfun->machine->split_stack_argp_used = true;
26566
26567 if (sp_adjust)
26568 {
26569 rtx r12 = gen_rtx_REG (Pmode, 12);
26570 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26571 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26572 emit_insn_before (set_r12, sp_adjust);
26573 }
26574 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26575 {
26576 rtx r12 = gen_rtx_REG (Pmode, 12);
26577 if (frame_off == 0)
26578 emit_move_insn (r12, frame_reg_rtx);
26579 else
26580 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26581 }
26582
26583 if (info->push_p)
26584 {
26585 rtx r12 = gen_rtx_REG (Pmode, 12);
26586 rtx r29 = gen_rtx_REG (Pmode, 29);
26587 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26588 rtx not_more = gen_label_rtx ();
26589 rtx jump;
26590
26591 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26592 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26593 gen_rtx_LABEL_REF (VOIDmode, not_more),
26594 pc_rtx);
26595 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26596 JUMP_LABEL (jump) = not_more;
26597 LABEL_NUSES (not_more) += 1;
26598 emit_move_insn (r12, r29);
26599 emit_label (not_more);
26600 }
26601 }
26602
26603 /* Emit function prologue as insns. */
26604
26605 void
26606 rs6000_emit_prologue (void)
26607 {
26608 rs6000_stack_t *info = rs6000_stack_info ();
26609 machine_mode reg_mode = Pmode;
26610 int reg_size = TARGET_32BIT ? 4 : 8;
26611 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26612 int fp_reg_size = 8;
26613 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26614 rtx frame_reg_rtx = sp_reg_rtx;
26615 unsigned int cr_save_regno;
26616 rtx cr_save_rtx = NULL_RTX;
26617 rtx_insn *insn;
26618 int strategy;
26619 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26620 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26621 && call_used_regs[STATIC_CHAIN_REGNUM]);
26622 int using_split_stack = (flag_split_stack
26623 && (lookup_attribute ("no_split_stack",
26624 DECL_ATTRIBUTES (cfun->decl))
26625 == NULL));
26626
26627 /* Offset to top of frame for frame_reg and sp respectively. */
26628 HOST_WIDE_INT frame_off = 0;
26629 HOST_WIDE_INT sp_off = 0;
26630 /* sp_adjust is the stack adjusting instruction, tracked so that the
26631 insn setting up the split-stack arg pointer can be emitted just
26632 prior to it, when r12 is not used here for other purposes. */
26633 rtx_insn *sp_adjust = 0;
26634
26635 #if CHECKING_P
26636 /* Track and check usage of r0, r11, r12. */
26637 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26638 #define START_USE(R) do \
26639 { \
26640 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26641 reg_inuse |= 1 << (R); \
26642 } while (0)
26643 #define END_USE(R) do \
26644 { \
26645 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26646 reg_inuse &= ~(1 << (R)); \
26647 } while (0)
26648 #define NOT_INUSE(R) do \
26649 { \
26650 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26651 } while (0)
26652 #else
26653 #define START_USE(R) do {} while (0)
26654 #define END_USE(R) do {} while (0)
26655 #define NOT_INUSE(R) do {} while (0)
26656 #endif
26657
26658 if (DEFAULT_ABI == ABI_ELFv2
26659 && !TARGET_SINGLE_PIC_BASE)
26660 {
26661 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26662
26663 /* With -mminimal-toc we may generate an extra use of r2 below. */
26664 if (TARGET_TOC && TARGET_MINIMAL_TOC
26665 && !constant_pool_empty_p ())
26666 cfun->machine->r2_setup_needed = true;
26667 }
26668
26669
26670 if (flag_stack_usage_info)
26671 current_function_static_stack_size = info->total_size;
26672
26673 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26674 {
26675 HOST_WIDE_INT size = info->total_size;
26676
26677 if (crtl->is_leaf && !cfun->calls_alloca)
26678 {
26679 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
26680 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26681 size - get_stack_check_protect ());
26682 }
26683 else if (size > 0)
26684 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
26685 }
26686
26687 if (TARGET_FIX_AND_CONTINUE)
26688 {
26689 /* gdb on darwin arranges to forward a function from the old
26690 address by modifying the first 5 instructions of the function
26691 to branch to the overriding function. This is necessary to
26692 permit function pointers that point to the old function to
26693 actually forward to the new function. */
26694 emit_insn (gen_nop ());
26695 emit_insn (gen_nop ());
26696 emit_insn (gen_nop ());
26697 emit_insn (gen_nop ());
26698 emit_insn (gen_nop ());
26699 }
26700
26701 /* Handle world saves specially here. */
26702 if (WORLD_SAVE_P (info))
26703 {
26704 int i, j, sz;
26705 rtx treg;
26706 rtvec p;
26707 rtx reg0;
26708
26709 /* save_world expects lr in r0. */
26710 reg0 = gen_rtx_REG (Pmode, 0);
26711 if (info->lr_save_p)
26712 {
26713 insn = emit_move_insn (reg0,
26714 gen_rtx_REG (Pmode, LR_REGNO));
26715 RTX_FRAME_RELATED_P (insn) = 1;
26716 }
26717
26718 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26719 assumptions about the offsets of various bits of the stack
26720 frame. */
26721 gcc_assert (info->gp_save_offset == -220
26722 && info->fp_save_offset == -144
26723 && info->lr_save_offset == 8
26724 && info->cr_save_offset == 4
26725 && info->push_p
26726 && info->lr_save_p
26727 && (!crtl->calls_eh_return
26728 || info->ehrd_offset == -432)
26729 && info->vrsave_save_offset == -224
26730 && info->altivec_save_offset == -416);
26731
26732 treg = gen_rtx_REG (SImode, 11);
26733 emit_move_insn (treg, GEN_INT (-info->total_size));
26734
26735 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26736 in R11. It also clobbers R12, so beware! */
26737
26738 /* Preserve CR2 for save_world prologues */
26739 sz = 5;
26740 sz += 32 - info->first_gp_reg_save;
26741 sz += 64 - info->first_fp_reg_save;
26742 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26743 p = rtvec_alloc (sz);
26744 j = 0;
26745 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, LR_REGNO);
26746 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26747 gen_rtx_SYMBOL_REF (Pmode,
26748 "*save_world"));
26749 /* We do floats first so that the instruction pattern matches
26750 properly. */
26751 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26752 RTVEC_ELT (p, j++)
26753 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
26754 info->first_fp_reg_save + i),
26755 frame_reg_rtx,
26756 info->fp_save_offset + frame_off + 8 * i);
26757 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26758 RTVEC_ELT (p, j++)
26759 = gen_frame_store (gen_rtx_REG (V4SImode,
26760 info->first_altivec_reg_save + i),
26761 frame_reg_rtx,
26762 info->altivec_save_offset + frame_off + 16 * i);
26763 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26764 RTVEC_ELT (p, j++)
26765 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26766 frame_reg_rtx,
26767 info->gp_save_offset + frame_off + reg_size * i);
26768
26769 /* CR register traditionally saved as CR2. */
26770 RTVEC_ELT (p, j++)
26771 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26772 frame_reg_rtx, info->cr_save_offset + frame_off);
26773 /* Explain about use of R0. */
26774 if (info->lr_save_p)
26775 RTVEC_ELT (p, j++)
26776 = gen_frame_store (reg0,
26777 frame_reg_rtx, info->lr_save_offset + frame_off);
26778 /* Explain what happens to the stack pointer. */
26779 {
26780 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26781 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26782 }
26783
26784 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26785 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26786 treg, GEN_INT (-info->total_size));
26787 sp_off = frame_off = info->total_size;
26788 }
26789
26790 strategy = info->savres_strategy;
26791
26792 /* For V.4, update stack before we do any saving and set back pointer. */
26793 if (! WORLD_SAVE_P (info)
26794 && info->push_p
26795 && (DEFAULT_ABI == ABI_V4
26796 || crtl->calls_eh_return))
26797 {
26798 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26799 || !(strategy & SAVE_INLINE_GPRS)
26800 || !(strategy & SAVE_INLINE_VRS));
26801 int ptr_regno = -1;
26802 rtx ptr_reg = NULL_RTX;
26803 int ptr_off = 0;
26804
26805 if (info->total_size < 32767)
26806 frame_off = info->total_size;
26807 else if (need_r11)
26808 ptr_regno = 11;
26809 else if (info->cr_save_p
26810 || info->lr_save_p
26811 || info->first_fp_reg_save < 64
26812 || info->first_gp_reg_save < 32
26813 || info->altivec_size != 0
26814 || info->vrsave_size != 0
26815 || crtl->calls_eh_return)
26816 ptr_regno = 12;
26817 else
26818 {
26819 /* The prologue won't be saving any regs so there is no need
26820 to set up a frame register to access any frame save area.
26821 We also won't be using frame_off anywhere below, but set
26822 the correct value anyway to protect against future
26823 changes to this function. */
26824 frame_off = info->total_size;
26825 }
26826 if (ptr_regno != -1)
26827 {
26828 /* Set up the frame offset to that needed by the first
26829 out-of-line save function. */
26830 START_USE (ptr_regno);
26831 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26832 frame_reg_rtx = ptr_reg;
26833 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26834 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26835 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26836 ptr_off = info->gp_save_offset + info->gp_size;
26837 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26838 ptr_off = info->altivec_save_offset + info->altivec_size;
26839 frame_off = -ptr_off;
26840 }
26841 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26842 ptr_reg, ptr_off);
26843 if (REGNO (frame_reg_rtx) == 12)
26844 sp_adjust = 0;
26845 sp_off = info->total_size;
26846 if (frame_reg_rtx != sp_reg_rtx)
26847 rs6000_emit_stack_tie (frame_reg_rtx, false);
26848 }
26849
26850 /* If we use the link register, get it into r0. */
26851 if (!WORLD_SAVE_P (info) && info->lr_save_p
26852 && !cfun->machine->lr_is_wrapped_separately)
26853 {
26854 rtx addr, reg, mem;
26855
26856 reg = gen_rtx_REG (Pmode, 0);
26857 START_USE (0);
26858 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26859 RTX_FRAME_RELATED_P (insn) = 1;
26860
26861 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26862 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26863 {
26864 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26865 GEN_INT (info->lr_save_offset + frame_off));
26866 mem = gen_rtx_MEM (Pmode, addr);
26867 /* This should not be of rs6000_sr_alias_set, because of
26868 __builtin_return_address. */
26869
26870 insn = emit_move_insn (mem, reg);
26871 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26872 NULL_RTX, NULL_RTX);
26873 END_USE (0);
26874 }
26875 }
26876
26877 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26878 r12 will be needed by out-of-line gpr save. */
26879 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26880 && !(strategy & (SAVE_INLINE_GPRS
26881 | SAVE_NOINLINE_GPRS_SAVES_LR))
26882 ? 11 : 12);
26883 if (!WORLD_SAVE_P (info)
26884 && info->cr_save_p
26885 && REGNO (frame_reg_rtx) != cr_save_regno
26886 && !(using_static_chain_p && cr_save_regno == 11)
26887 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26888 {
26889 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26890 START_USE (cr_save_regno);
26891 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26892 }
26893
26894 /* Do any required saving of fpr's. If only one or two to save, do
26895 it ourselves. Otherwise, call function. */
26896 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26897 {
26898 int offset = info->fp_save_offset + frame_off;
26899 for (int i = info->first_fp_reg_save; i < 64; i++)
26900 {
26901 if (save_reg_p (i)
26902 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
26903 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
26904 sp_off - frame_off);
26905
26906 offset += fp_reg_size;
26907 }
26908 }
26909 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26910 {
26911 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26912 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26913 unsigned ptr_regno = ptr_regno_for_savres (sel);
26914 rtx ptr_reg = frame_reg_rtx;
26915
26916 if (REGNO (frame_reg_rtx) == ptr_regno)
26917 gcc_checking_assert (frame_off == 0);
26918 else
26919 {
26920 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26921 NOT_INUSE (ptr_regno);
26922 emit_insn (gen_add3_insn (ptr_reg,
26923 frame_reg_rtx, GEN_INT (frame_off)));
26924 }
26925 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26926 info->fp_save_offset,
26927 info->lr_save_offset,
26928 DFmode, sel);
26929 rs6000_frame_related (insn, ptr_reg, sp_off,
26930 NULL_RTX, NULL_RTX);
26931 if (lr)
26932 END_USE (0);
26933 }
26934
26935 /* Save GPRs. This is done as a PARALLEL if we are using
26936 the store-multiple instructions. */
26937 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26938 {
26939 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26940 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26941 unsigned ptr_regno = ptr_regno_for_savres (sel);
26942 rtx ptr_reg = frame_reg_rtx;
26943 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
26944 int end_save = info->gp_save_offset + info->gp_size;
26945 int ptr_off;
26946
26947 if (ptr_regno == 12)
26948 sp_adjust = 0;
26949 if (!ptr_set_up)
26950 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26951
26952 /* Need to adjust r11 (r12) if we saved any FPRs. */
26953 if (end_save + frame_off != 0)
26954 {
26955 rtx offset = GEN_INT (end_save + frame_off);
26956
26957 if (ptr_set_up)
26958 frame_off = -end_save;
26959 else
26960 NOT_INUSE (ptr_regno);
26961 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26962 }
26963 else if (!ptr_set_up)
26964 {
26965 NOT_INUSE (ptr_regno);
26966 emit_move_insn (ptr_reg, frame_reg_rtx);
26967 }
26968 ptr_off = -end_save;
26969 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26970 info->gp_save_offset + ptr_off,
26971 info->lr_save_offset + ptr_off,
26972 reg_mode, sel);
26973 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
26974 NULL_RTX, NULL_RTX);
26975 if (lr)
26976 END_USE (0);
26977 }
26978 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
26979 {
26980 rtvec p;
26981 int i;
26982 p = rtvec_alloc (32 - info->first_gp_reg_save);
26983 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26984 RTVEC_ELT (p, i)
26985 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26986 frame_reg_rtx,
26987 info->gp_save_offset + frame_off + reg_size * i);
26988 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26989 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26990 NULL_RTX, NULL_RTX);
26991 }
26992 else if (!WORLD_SAVE_P (info))
26993 {
26994 int offset = info->gp_save_offset + frame_off;
26995 for (int i = info->first_gp_reg_save; i < 32; i++)
26996 {
26997 if (save_reg_p (i)
26998 && !cfun->machine->gpr_is_wrapped_separately[i])
26999 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27000 sp_off - frame_off);
27001
27002 offset += reg_size;
27003 }
27004 }
27005
27006 if (crtl->calls_eh_return)
27007 {
27008 unsigned int i;
27009 rtvec p;
27010
27011 for (i = 0; ; ++i)
27012 {
27013 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27014 if (regno == INVALID_REGNUM)
27015 break;
27016 }
27017
27018 p = rtvec_alloc (i);
27019
27020 for (i = 0; ; ++i)
27021 {
27022 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27023 if (regno == INVALID_REGNUM)
27024 break;
27025
27026 rtx set
27027 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27028 sp_reg_rtx,
27029 info->ehrd_offset + sp_off + reg_size * (int) i);
27030 RTVEC_ELT (p, i) = set;
27031 RTX_FRAME_RELATED_P (set) = 1;
27032 }
27033
27034 insn = emit_insn (gen_blockage ());
27035 RTX_FRAME_RELATED_P (insn) = 1;
27036 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27037 }
27038
27039 /* In AIX ABI we need to make sure r2 is really saved. */
27040 if (TARGET_AIX && crtl->calls_eh_return)
27041 {
27042 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27043 rtx join_insn, note;
27044 rtx_insn *save_insn;
27045 long toc_restore_insn;
27046
27047 tmp_reg = gen_rtx_REG (Pmode, 11);
27048 tmp_reg_si = gen_rtx_REG (SImode, 11);
27049 if (using_static_chain_p)
27050 {
27051 START_USE (0);
27052 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27053 }
27054 else
27055 START_USE (11);
27056 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27057 /* Peek at instruction to which this function returns. If it's
27058 restoring r2, then we know we've already saved r2. We can't
27059 unconditionally save r2 because the value we have will already
27060 be updated if we arrived at this function via a plt call or
27061 toc adjusting stub. */
27062 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27063 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27064 + RS6000_TOC_SAVE_SLOT);
27065 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27066 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27067 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27068 validate_condition_mode (EQ, CCUNSmode);
27069 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27070 emit_insn (gen_rtx_SET (compare_result,
27071 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27072 toc_save_done = gen_label_rtx ();
27073 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27074 gen_rtx_EQ (VOIDmode, compare_result,
27075 const0_rtx),
27076 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27077 pc_rtx);
27078 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27079 JUMP_LABEL (jump) = toc_save_done;
27080 LABEL_NUSES (toc_save_done) += 1;
27081
27082 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27083 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27084 sp_off - frame_off);
27085
27086 emit_label (toc_save_done);
27087
27088 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27089 have a CFG that has different saves along different paths.
27090 Move the note to a dummy blockage insn, which describes that
27091 R2 is unconditionally saved after the label. */
27092 /* ??? An alternate representation might be a special insn pattern
27093 containing both the branch and the store. That might let the
27094 code that minimizes the number of DW_CFA_advance opcodes better
27095 freedom in placing the annotations. */
27096 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27097 if (note)
27098 remove_note (save_insn, note);
27099 else
27100 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27101 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27102 RTX_FRAME_RELATED_P (save_insn) = 0;
27103
27104 join_insn = emit_insn (gen_blockage ());
27105 REG_NOTES (join_insn) = note;
27106 RTX_FRAME_RELATED_P (join_insn) = 1;
27107
27108 if (using_static_chain_p)
27109 {
27110 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27111 END_USE (0);
27112 }
27113 else
27114 END_USE (11);
27115 }
27116
27117 /* Save CR if we use any that must be preserved. */
27118 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27119 {
27120 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27121 GEN_INT (info->cr_save_offset + frame_off));
27122 rtx mem = gen_frame_mem (SImode, addr);
27123
27124 /* If we didn't copy cr before, do so now using r0. */
27125 if (cr_save_rtx == NULL_RTX)
27126 {
27127 START_USE (0);
27128 cr_save_rtx = gen_rtx_REG (SImode, 0);
27129 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27130 }
27131
27132 /* Saving CR requires a two-instruction sequence: one instruction
27133 to move the CR to a general-purpose register, and a second
27134 instruction that stores the GPR to memory.
27135
27136 We do not emit any DWARF CFI records for the first of these,
27137 because we cannot properly represent the fact that CR is saved in
27138 a register. One reason is that we cannot express that multiple
27139 CR fields are saved; another reason is that on 64-bit, the size
27140 of the CR register in DWARF (4 bytes) differs from the size of
27141 a general-purpose register.
27142
27143 This means if any intervening instruction were to clobber one of
27144 the call-saved CR fields, we'd have incorrect CFI. To prevent
27145 this from happening, we mark the store to memory as a use of
27146 those CR fields, which prevents any such instruction from being
27147 scheduled in between the two instructions. */
27148 rtx crsave_v[9];
27149 int n_crsave = 0;
27150 int i;
27151
27152 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27153 for (i = 0; i < 8; i++)
27154 if (save_reg_p (CR0_REGNO + i))
27155 crsave_v[n_crsave++]
27156 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27157
27158 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27159 gen_rtvec_v (n_crsave, crsave_v)));
27160 END_USE (REGNO (cr_save_rtx));
27161
27162 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27163 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27164 so we need to construct a frame expression manually. */
27165 RTX_FRAME_RELATED_P (insn) = 1;
27166
27167 /* Update address to be stack-pointer relative, like
27168 rs6000_frame_related would do. */
27169 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27170 GEN_INT (info->cr_save_offset + sp_off));
27171 mem = gen_frame_mem (SImode, addr);
27172
27173 if (DEFAULT_ABI == ABI_ELFv2)
27174 {
27175 /* In the ELFv2 ABI we generate separate CFI records for each
27176 CR field that was actually saved. They all point to the
27177 same 32-bit stack slot. */
27178 rtx crframe[8];
27179 int n_crframe = 0;
27180
27181 for (i = 0; i < 8; i++)
27182 if (save_reg_p (CR0_REGNO + i))
27183 {
27184 crframe[n_crframe]
27185 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27186
27187 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27188 n_crframe++;
27189 }
27190
27191 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27192 gen_rtx_PARALLEL (VOIDmode,
27193 gen_rtvec_v (n_crframe, crframe)));
27194 }
27195 else
27196 {
27197 /* In other ABIs, by convention, we use a single CR regnum to
27198 represent the fact that all call-saved CR fields are saved.
27199 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27200 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27201 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27202 }
27203 }
27204
27205 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27206 *separate* slots if the routine calls __builtin_eh_return, so
27207 that they can be independently restored by the unwinder. */
27208 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27209 {
27210 int i, cr_off = info->ehcr_offset;
27211 rtx crsave;
27212
27213 /* ??? We might get better performance by using multiple mfocrf
27214 instructions. */
27215 crsave = gen_rtx_REG (SImode, 0);
27216 emit_insn (gen_prologue_movesi_from_cr (crsave));
27217
27218 for (i = 0; i < 8; i++)
27219 if (!call_used_regs[CR0_REGNO + i])
27220 {
27221 rtvec p = rtvec_alloc (2);
27222 RTVEC_ELT (p, 0)
27223 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27224 RTVEC_ELT (p, 1)
27225 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27226
27227 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27228
27229 RTX_FRAME_RELATED_P (insn) = 1;
27230 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27231 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27232 sp_reg_rtx, cr_off + sp_off));
27233
27234 cr_off += reg_size;
27235 }
27236 }
27237
27238 /* If we are emitting stack probes, but allocate no stack, then
27239 just note that in the dump file. */
27240 if (flag_stack_clash_protection
27241 && dump_file
27242 && !info->push_p)
27243 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27244
27245 /* Update stack and set back pointer unless this is V.4,
27246 for which it was done previously. */
27247 if (!WORLD_SAVE_P (info) && info->push_p
27248 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27249 {
27250 rtx ptr_reg = NULL;
27251 int ptr_off = 0;
27252
27253 /* If saving altivec regs we need to be able to address all save
27254 locations using a 16-bit offset. */
27255 if ((strategy & SAVE_INLINE_VRS) == 0
27256 || (info->altivec_size != 0
27257 && (info->altivec_save_offset + info->altivec_size - 16
27258 + info->total_size - frame_off) > 32767)
27259 || (info->vrsave_size != 0
27260 && (info->vrsave_save_offset
27261 + info->total_size - frame_off) > 32767))
27262 {
27263 int sel = SAVRES_SAVE | SAVRES_VR;
27264 unsigned ptr_regno = ptr_regno_for_savres (sel);
27265
27266 if (using_static_chain_p
27267 && ptr_regno == STATIC_CHAIN_REGNUM)
27268 ptr_regno = 12;
27269 if (REGNO (frame_reg_rtx) != ptr_regno)
27270 START_USE (ptr_regno);
27271 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27272 frame_reg_rtx = ptr_reg;
27273 ptr_off = info->altivec_save_offset + info->altivec_size;
27274 frame_off = -ptr_off;
27275 }
27276 else if (REGNO (frame_reg_rtx) == 1)
27277 frame_off = info->total_size;
27278 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27279 ptr_reg, ptr_off);
27280 if (REGNO (frame_reg_rtx) == 12)
27281 sp_adjust = 0;
27282 sp_off = info->total_size;
27283 if (frame_reg_rtx != sp_reg_rtx)
27284 rs6000_emit_stack_tie (frame_reg_rtx, false);
27285 }
27286
27287 /* Set frame pointer, if needed. */
27288 if (frame_pointer_needed)
27289 {
27290 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27291 sp_reg_rtx);
27292 RTX_FRAME_RELATED_P (insn) = 1;
27293 }
27294
27295 /* Save AltiVec registers if needed. Save here because the red zone does
27296 not always include AltiVec registers. */
27297 if (!WORLD_SAVE_P (info)
27298 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27299 {
27300 int end_save = info->altivec_save_offset + info->altivec_size;
27301 int ptr_off;
27302 /* Oddly, the vector save/restore functions point r0 at the end
27303 of the save area, then use r11 or r12 to load offsets for
27304 [reg+reg] addressing. */
27305 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27306 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27307 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27308
27309 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27310 NOT_INUSE (0);
27311 if (scratch_regno == 12)
27312 sp_adjust = 0;
27313 if (end_save + frame_off != 0)
27314 {
27315 rtx offset = GEN_INT (end_save + frame_off);
27316
27317 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27318 }
27319 else
27320 emit_move_insn (ptr_reg, frame_reg_rtx);
27321
27322 ptr_off = -end_save;
27323 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27324 info->altivec_save_offset + ptr_off,
27325 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27326 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27327 NULL_RTX, NULL_RTX);
27328 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27329 {
27330 /* The oddity mentioned above clobbered our frame reg. */
27331 emit_move_insn (frame_reg_rtx, ptr_reg);
27332 frame_off = ptr_off;
27333 }
27334 }
27335 else if (!WORLD_SAVE_P (info)
27336 && info->altivec_size != 0)
27337 {
27338 int i;
27339
27340 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27341 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27342 {
27343 rtx areg, savereg, mem;
27344 HOST_WIDE_INT offset;
27345
27346 offset = (info->altivec_save_offset + frame_off
27347 + 16 * (i - info->first_altivec_reg_save));
27348
27349 savereg = gen_rtx_REG (V4SImode, i);
27350
27351 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27352 {
27353 mem = gen_frame_mem (V4SImode,
27354 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27355 GEN_INT (offset)));
27356 insn = emit_insn (gen_rtx_SET (mem, savereg));
27357 areg = NULL_RTX;
27358 }
27359 else
27360 {
27361 NOT_INUSE (0);
27362 areg = gen_rtx_REG (Pmode, 0);
27363 emit_move_insn (areg, GEN_INT (offset));
27364
27365 /* AltiVec addressing mode is [reg+reg]. */
27366 mem = gen_frame_mem (V4SImode,
27367 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27368
27369 /* Rather than emitting a generic move, force use of the stvx
27370 instruction, which we always want on ISA 2.07 (power8) systems.
27371 In particular we don't want xxpermdi/stxvd2x for little
27372 endian. */
27373 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27374 }
27375
27376 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27377 areg, GEN_INT (offset));
27378 }
27379 }
27380
27381 /* VRSAVE is a bit vector representing which AltiVec registers
27382 are used. The OS uses this to determine which vector
27383 registers to save on a context switch. We need to save
27384 VRSAVE on the stack frame, add whatever AltiVec registers we
27385 used in this function, and do the corresponding magic in the
27386 epilogue. */
27387
27388 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27389 {
27390 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27391 be using r12 as frame_reg_rtx and r11 as the static chain
27392 pointer for nested functions. */
27393 int save_regno = 12;
27394 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27395 && !using_static_chain_p)
27396 save_regno = 11;
27397 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27398 {
27399 save_regno = 11;
27400 if (using_static_chain_p)
27401 save_regno = 0;
27402 }
27403 NOT_INUSE (save_regno);
27404
27405 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27406 }
27407
27408 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27409 if (!TARGET_SINGLE_PIC_BASE
27410 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27411 && !constant_pool_empty_p ())
27412 || (DEFAULT_ABI == ABI_V4
27413 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27414 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27415 {
27416 /* If emit_load_toc_table will use the link register, we need to save
27417 it. We use R12 for this purpose because emit_load_toc_table
27418 can use register 0. This allows us to use a plain 'blr' to return
27419 from the procedure more often. */
27420 int save_LR_around_toc_setup = (TARGET_ELF
27421 && DEFAULT_ABI == ABI_V4
27422 && flag_pic
27423 && ! info->lr_save_p
27424 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27425 if (save_LR_around_toc_setup)
27426 {
27427 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27428 rtx tmp = gen_rtx_REG (Pmode, 12);
27429
27430 sp_adjust = 0;
27431 insn = emit_move_insn (tmp, lr);
27432 RTX_FRAME_RELATED_P (insn) = 1;
27433
27434 rs6000_emit_load_toc_table (TRUE);
27435
27436 insn = emit_move_insn (lr, tmp);
27437 add_reg_note (insn, REG_CFA_RESTORE, lr);
27438 RTX_FRAME_RELATED_P (insn) = 1;
27439 }
27440 else
27441 rs6000_emit_load_toc_table (TRUE);
27442 }
27443
27444 #if TARGET_MACHO
27445 if (!TARGET_SINGLE_PIC_BASE
27446 && DEFAULT_ABI == ABI_DARWIN
27447 && flag_pic && crtl->uses_pic_offset_table)
27448 {
27449 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27450 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27451
27452 /* Save and restore LR locally around this call (in R0). */
27453 if (!info->lr_save_p)
27454 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27455
27456 emit_insn (gen_load_macho_picbase (src));
27457
27458 emit_move_insn (gen_rtx_REG (Pmode,
27459 RS6000_PIC_OFFSET_TABLE_REGNUM),
27460 lr);
27461
27462 if (!info->lr_save_p)
27463 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27464 }
27465 #endif
27466
27467 /* If we need to, save the TOC register after doing the stack setup.
27468 Do not emit eh frame info for this save. The unwinder wants info,
27469 conceptually attached to instructions in this function, about
27470 register values in the caller of this function. This R2 may have
27471 already been changed from the value in the caller.
27472 We don't attempt to write accurate DWARF EH frame info for R2
27473 because code emitted by gcc for a (non-pointer) function call
27474 doesn't save and restore R2. Instead, R2 is managed out-of-line
27475 by a linker generated plt call stub when the function resides in
27476 a shared library. This behavior is costly to describe in DWARF,
27477 both in terms of the size of DWARF info and the time taken in the
27478 unwinder to interpret it. R2 changes, apart from the
27479 calls_eh_return case earlier in this function, are handled by
27480 linux-unwind.h frob_update_context. */
27481 if (rs6000_save_toc_in_prologue_p ()
27482 && !cfun->machine->toc_is_wrapped_separately)
27483 {
27484 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27485 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27486 }
27487
27488 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27489 if (using_split_stack && split_stack_arg_pointer_used_p ())
27490 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27491 }
27492
27493 /* Output .extern statements for the save/restore routines we use. */
27494
27495 static void
27496 rs6000_output_savres_externs (FILE *file)
27497 {
27498 rs6000_stack_t *info = rs6000_stack_info ();
27499
27500 if (TARGET_DEBUG_STACK)
27501 debug_stack_info (info);
27502
27503 /* Write .extern for any function we will call to save and restore
27504 fp values. */
27505 if (info->first_fp_reg_save < 64
27506 && !TARGET_MACHO
27507 && !TARGET_ELF)
27508 {
27509 char *name;
27510 int regno = info->first_fp_reg_save - 32;
27511
27512 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27513 {
27514 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27515 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27516 name = rs6000_savres_routine_name (regno, sel);
27517 fprintf (file, "\t.extern %s\n", name);
27518 }
27519 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27520 {
27521 bool lr = (info->savres_strategy
27522 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27523 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27524 name = rs6000_savres_routine_name (regno, sel);
27525 fprintf (file, "\t.extern %s\n", name);
27526 }
27527 }
27528 }
27529
27530 /* Write function prologue. */
27531
27532 static void
27533 rs6000_output_function_prologue (FILE *file)
27534 {
27535 if (!cfun->is_thunk)
27536 {
27537 rs6000_output_savres_externs (file);
27538 #ifdef USING_ELFOS_H
27539 const char *curr_machine = rs6000_machine_from_flags ();
27540 if (rs6000_machine != curr_machine)
27541 {
27542 rs6000_machine = curr_machine;
27543 emit_asm_machine ();
27544 }
27545 #endif
27546 }
27547
27548 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27549 immediately after the global entry point label. */
27550 if (rs6000_global_entry_point_needed_p ())
27551 {
27552 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27553
27554 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27555
27556 if (TARGET_CMODEL != CMODEL_LARGE)
27557 {
27558 /* In the small and medium code models, we assume the TOC is less
27559 2 GB away from the text section, so it can be computed via the
27560 following two-instruction sequence. */
27561 char buf[256];
27562
27563 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27564 fprintf (file, "0:\taddis 2,12,.TOC.-");
27565 assemble_name (file, buf);
27566 fprintf (file, "@ha\n");
27567 fprintf (file, "\taddi 2,2,.TOC.-");
27568 assemble_name (file, buf);
27569 fprintf (file, "@l\n");
27570 }
27571 else
27572 {
27573 /* In the large code model, we allow arbitrary offsets between the
27574 TOC and the text section, so we have to load the offset from
27575 memory. The data field is emitted directly before the global
27576 entry point in rs6000_elf_declare_function_name. */
27577 char buf[256];
27578
27579 #ifdef HAVE_AS_ENTRY_MARKERS
27580 /* If supported by the linker, emit a marker relocation. If the
27581 total code size of the final executable or shared library
27582 happens to fit into 2 GB after all, the linker will replace
27583 this code sequence with the sequence for the small or medium
27584 code model. */
27585 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27586 #endif
27587 fprintf (file, "\tld 2,");
27588 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27589 assemble_name (file, buf);
27590 fprintf (file, "-");
27591 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27592 assemble_name (file, buf);
27593 fprintf (file, "(12)\n");
27594 fprintf (file, "\tadd 2,2,12\n");
27595 }
27596
27597 fputs ("\t.localentry\t", file);
27598 assemble_name (file, name);
27599 fputs (",.-", file);
27600 assemble_name (file, name);
27601 fputs ("\n", file);
27602 }
27603
27604 /* Output -mprofile-kernel code. This needs to be done here instead of
27605 in output_function_profile since it must go after the ELFv2 ABI
27606 local entry point. */
27607 if (TARGET_PROFILE_KERNEL && crtl->profile)
27608 {
27609 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27610 gcc_assert (!TARGET_32BIT);
27611
27612 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27613
27614 /* In the ELFv2 ABI we have no compiler stack word. It must be
27615 the resposibility of _mcount to preserve the static chain
27616 register if required. */
27617 if (DEFAULT_ABI != ABI_ELFv2
27618 && cfun->static_chain_decl != NULL)
27619 {
27620 asm_fprintf (file, "\tstd %s,24(%s)\n",
27621 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27622 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27623 asm_fprintf (file, "\tld %s,24(%s)\n",
27624 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27625 }
27626 else
27627 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27628 }
27629
27630 rs6000_pic_labelno++;
27631 }
27632
27633 /* -mprofile-kernel code calls mcount before the function prolog,
27634 so a profiled leaf function should stay a leaf function. */
27635 static bool
27636 rs6000_keep_leaf_when_profiled ()
27637 {
27638 return TARGET_PROFILE_KERNEL;
27639 }
27640
27641 /* Non-zero if vmx regs are restored before the frame pop, zero if
27642 we restore after the pop when possible. */
27643 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27644
27645 /* Restoring cr is a two step process: loading a reg from the frame
27646 save, then moving the reg to cr. For ABI_V4 we must let the
27647 unwinder know that the stack location is no longer valid at or
27648 before the stack deallocation, but we can't emit a cfa_restore for
27649 cr at the stack deallocation like we do for other registers.
27650 The trouble is that it is possible for the move to cr to be
27651 scheduled after the stack deallocation. So say exactly where cr
27652 is located on each of the two insns. */
27653
27654 static rtx
27655 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27656 {
27657 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27658 rtx reg = gen_rtx_REG (SImode, regno);
27659 rtx_insn *insn = emit_move_insn (reg, mem);
27660
27661 if (!exit_func && DEFAULT_ABI == ABI_V4)
27662 {
27663 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27664 rtx set = gen_rtx_SET (reg, cr);
27665
27666 add_reg_note (insn, REG_CFA_REGISTER, set);
27667 RTX_FRAME_RELATED_P (insn) = 1;
27668 }
27669 return reg;
27670 }
27671
27672 /* Reload CR from REG. */
27673
27674 static void
27675 restore_saved_cr (rtx reg, bool using_mfcr_multiple, bool exit_func)
27676 {
27677 int count = 0;
27678 int i;
27679
27680 if (using_mfcr_multiple)
27681 {
27682 for (i = 0; i < 8; i++)
27683 if (save_reg_p (CR0_REGNO + i))
27684 count++;
27685 gcc_assert (count);
27686 }
27687
27688 if (using_mfcr_multiple && count > 1)
27689 {
27690 rtx_insn *insn;
27691 rtvec p;
27692 int ndx;
27693
27694 p = rtvec_alloc (count);
27695
27696 ndx = 0;
27697 for (i = 0; i < 8; i++)
27698 if (save_reg_p (CR0_REGNO + i))
27699 {
27700 rtvec r = rtvec_alloc (2);
27701 RTVEC_ELT (r, 0) = reg;
27702 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27703 RTVEC_ELT (p, ndx) =
27704 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27705 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27706 ndx++;
27707 }
27708 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27709 gcc_assert (ndx == count);
27710
27711 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27712 CR field separately. */
27713 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27714 {
27715 for (i = 0; i < 8; i++)
27716 if (save_reg_p (CR0_REGNO + i))
27717 add_reg_note (insn, REG_CFA_RESTORE,
27718 gen_rtx_REG (SImode, CR0_REGNO + i));
27719
27720 RTX_FRAME_RELATED_P (insn) = 1;
27721 }
27722 }
27723 else
27724 for (i = 0; i < 8; i++)
27725 if (save_reg_p (CR0_REGNO + i))
27726 {
27727 rtx insn = emit_insn (gen_movsi_to_cr_one
27728 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27729
27730 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27731 CR field separately, attached to the insn that in fact
27732 restores this particular CR field. */
27733 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27734 {
27735 add_reg_note (insn, REG_CFA_RESTORE,
27736 gen_rtx_REG (SImode, CR0_REGNO + i));
27737
27738 RTX_FRAME_RELATED_P (insn) = 1;
27739 }
27740 }
27741
27742 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27743 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27744 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27745 {
27746 rtx_insn *insn = get_last_insn ();
27747 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27748
27749 add_reg_note (insn, REG_CFA_RESTORE, cr);
27750 RTX_FRAME_RELATED_P (insn) = 1;
27751 }
27752 }
27753
27754 /* Like cr, the move to lr instruction can be scheduled after the
27755 stack deallocation, but unlike cr, its stack frame save is still
27756 valid. So we only need to emit the cfa_restore on the correct
27757 instruction. */
27758
27759 static void
27760 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27761 {
27762 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27763 rtx reg = gen_rtx_REG (Pmode, regno);
27764
27765 emit_move_insn (reg, mem);
27766 }
27767
27768 static void
27769 restore_saved_lr (int regno, bool exit_func)
27770 {
27771 rtx reg = gen_rtx_REG (Pmode, regno);
27772 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27773 rtx_insn *insn = emit_move_insn (lr, reg);
27774
27775 if (!exit_func && flag_shrink_wrap)
27776 {
27777 add_reg_note (insn, REG_CFA_RESTORE, lr);
27778 RTX_FRAME_RELATED_P (insn) = 1;
27779 }
27780 }
27781
27782 static rtx
27783 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27784 {
27785 if (DEFAULT_ABI == ABI_ELFv2)
27786 {
27787 int i;
27788 for (i = 0; i < 8; i++)
27789 if (save_reg_p (CR0_REGNO + i))
27790 {
27791 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27792 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27793 cfa_restores);
27794 }
27795 }
27796 else if (info->cr_save_p)
27797 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27798 gen_rtx_REG (SImode, CR2_REGNO),
27799 cfa_restores);
27800
27801 if (info->lr_save_p)
27802 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27803 gen_rtx_REG (Pmode, LR_REGNO),
27804 cfa_restores);
27805 return cfa_restores;
27806 }
27807
27808 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27809 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27810 below stack pointer not cloberred by signals. */
27811
27812 static inline bool
27813 offset_below_red_zone_p (HOST_WIDE_INT offset)
27814 {
27815 return offset < (DEFAULT_ABI == ABI_V4
27816 ? 0
27817 : TARGET_32BIT ? -220 : -288);
27818 }
27819
27820 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27821
27822 static void
27823 emit_cfa_restores (rtx cfa_restores)
27824 {
27825 rtx_insn *insn = get_last_insn ();
27826 rtx *loc = &REG_NOTES (insn);
27827
27828 while (*loc)
27829 loc = &XEXP (*loc, 1);
27830 *loc = cfa_restores;
27831 RTX_FRAME_RELATED_P (insn) = 1;
27832 }
27833
27834 /* Emit function epilogue as insns. */
27835
27836 void
27837 rs6000_emit_epilogue (enum epilogue_type epilogue_type)
27838 {
27839 HOST_WIDE_INT frame_off = 0;
27840 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27841 rtx frame_reg_rtx = sp_reg_rtx;
27842 rtx cfa_restores = NULL_RTX;
27843 rtx insn;
27844 rtx cr_save_reg = NULL_RTX;
27845 machine_mode reg_mode = Pmode;
27846 int reg_size = TARGET_32BIT ? 4 : 8;
27847 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
27848 int fp_reg_size = 8;
27849 int i;
27850 unsigned ptr_regno;
27851
27852 rs6000_stack_t *info = rs6000_stack_info ();
27853
27854 if (epilogue_type == EPILOGUE_TYPE_NORMAL && crtl->calls_eh_return)
27855 epilogue_type = EPILOGUE_TYPE_EH_RETURN;
27856
27857 int strategy = info->savres_strategy;
27858 bool using_load_multiple = !!(strategy & REST_MULTIPLE);
27859 bool restoring_GPRs_inline = !!(strategy & REST_INLINE_GPRS);
27860 bool restoring_FPRs_inline = !!(strategy & REST_INLINE_FPRS);
27861 if (epilogue_type == EPILOGUE_TYPE_SIBCALL)
27862 {
27863 restoring_GPRs_inline = true;
27864 restoring_FPRs_inline = true;
27865 }
27866
27867 bool using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
27868 || rs6000_tune == PROCESSOR_PPC603
27869 || rs6000_tune == PROCESSOR_PPC750
27870 || optimize_size);
27871
27872 /* Restore via the backchain when we have a large frame, since this
27873 is more efficient than an addis, addi pair. The second condition
27874 here will not trigger at the moment; We don't actually need a
27875 frame pointer for alloca, but the generic parts of the compiler
27876 give us one anyway. */
27877 bool use_backchain_to_restore_sp
27878 = (info->total_size + (info->lr_save_p ? info->lr_save_offset : 0) > 32767
27879 || (cfun->calls_alloca && !frame_pointer_needed));
27880
27881 bool restore_lr = (info->lr_save_p
27882 && (restoring_FPRs_inline
27883 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27884 && (restoring_GPRs_inline
27885 || info->first_fp_reg_save < 64)
27886 && !cfun->machine->lr_is_wrapped_separately);
27887
27888
27889 if (WORLD_SAVE_P (info))
27890 {
27891 gcc_assert (epilogue_type != EPILOGUE_TYPE_SIBCALL);
27892
27893 /* eh_rest_world_r10 will return to the location saved in the LR
27894 stack slot (which is not likely to be our caller.)
27895 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27896 rest_world is similar, except any R10 parameter is ignored.
27897 The exception-handling stuff that was here in 2.95 is no
27898 longer necessary. */
27899
27900 rtvec p;
27901 p = rtvec_alloc (9
27902 + 32 - info->first_gp_reg_save
27903 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27904 + 63 + 1 - info->first_fp_reg_save);
27905
27906 const char *rname;
27907 switch (epilogue_type)
27908 {
27909 case EPILOGUE_TYPE_NORMAL:
27910 rname = ggc_strdup ("*rest_world");
27911 break;
27912
27913 case EPILOGUE_TYPE_EH_RETURN:
27914 rname = ggc_strdup ("*eh_rest_world_r10");
27915 break;
27916
27917 default:
27918 gcc_unreachable ();
27919 }
27920
27921 int j = 0;
27922 RTVEC_ELT (p, j++) = ret_rtx;
27923 RTVEC_ELT (p, j++)
27924 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, rname));
27925 /* The instruction pattern requires a clobber here;
27926 it is shared with the restVEC helper. */
27927 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 11);
27928
27929 {
27930 /* CR register traditionally saved as CR2. */
27931 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27932 RTVEC_ELT (p, j++)
27933 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27934 if (flag_shrink_wrap)
27935 {
27936 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27937 gen_rtx_REG (Pmode, LR_REGNO),
27938 cfa_restores);
27939 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27940 }
27941 }
27942
27943 int i;
27944 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27945 {
27946 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27947 RTVEC_ELT (p, j++)
27948 = gen_frame_load (reg,
27949 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27950 if (flag_shrink_wrap
27951 && save_reg_p (info->first_gp_reg_save + i))
27952 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27953 }
27954 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27955 {
27956 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27957 RTVEC_ELT (p, j++)
27958 = gen_frame_load (reg,
27959 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27960 if (flag_shrink_wrap
27961 && save_reg_p (info->first_altivec_reg_save + i))
27962 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27963 }
27964 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27965 {
27966 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
27967 info->first_fp_reg_save + i);
27968 RTVEC_ELT (p, j++)
27969 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
27970 if (flag_shrink_wrap
27971 && save_reg_p (info->first_fp_reg_save + i))
27972 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27973 }
27974 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 0);
27975 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 12);
27976 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 7);
27977 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 8);
27978 RTVEC_ELT (p, j++)
27979 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
27980 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
27981
27982 if (flag_shrink_wrap)
27983 {
27984 REG_NOTES (insn) = cfa_restores;
27985 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27986 RTX_FRAME_RELATED_P (insn) = 1;
27987 }
27988 return;
27989 }
27990
27991 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27992 if (info->push_p)
27993 frame_off = info->total_size;
27994
27995 /* Restore AltiVec registers if we must do so before adjusting the
27996 stack. */
27997 if (info->altivec_size != 0
27998 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27999 || (DEFAULT_ABI != ABI_V4
28000 && offset_below_red_zone_p (info->altivec_save_offset))))
28001 {
28002 int i;
28003 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28004
28005 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28006 if (use_backchain_to_restore_sp)
28007 {
28008 int frame_regno = 11;
28009
28010 if ((strategy & REST_INLINE_VRS) == 0)
28011 {
28012 /* Of r11 and r12, select the one not clobbered by an
28013 out-of-line restore function for the frame register. */
28014 frame_regno = 11 + 12 - scratch_regno;
28015 }
28016 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28017 emit_move_insn (frame_reg_rtx,
28018 gen_rtx_MEM (Pmode, sp_reg_rtx));
28019 frame_off = 0;
28020 }
28021 else if (frame_pointer_needed)
28022 frame_reg_rtx = hard_frame_pointer_rtx;
28023
28024 if ((strategy & REST_INLINE_VRS) == 0)
28025 {
28026 int end_save = info->altivec_save_offset + info->altivec_size;
28027 int ptr_off;
28028 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28029 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28030
28031 if (end_save + frame_off != 0)
28032 {
28033 rtx offset = GEN_INT (end_save + frame_off);
28034
28035 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28036 }
28037 else
28038 emit_move_insn (ptr_reg, frame_reg_rtx);
28039
28040 ptr_off = -end_save;
28041 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28042 info->altivec_save_offset + ptr_off,
28043 0, V4SImode, SAVRES_VR);
28044 }
28045 else
28046 {
28047 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28048 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28049 {
28050 rtx addr, areg, mem, insn;
28051 rtx reg = gen_rtx_REG (V4SImode, i);
28052 HOST_WIDE_INT offset
28053 = (info->altivec_save_offset + frame_off
28054 + 16 * (i - info->first_altivec_reg_save));
28055
28056 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28057 {
28058 mem = gen_frame_mem (V4SImode,
28059 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28060 GEN_INT (offset)));
28061 insn = gen_rtx_SET (reg, mem);
28062 }
28063 else
28064 {
28065 areg = gen_rtx_REG (Pmode, 0);
28066 emit_move_insn (areg, GEN_INT (offset));
28067
28068 /* AltiVec addressing mode is [reg+reg]. */
28069 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28070 mem = gen_frame_mem (V4SImode, addr);
28071
28072 /* Rather than emitting a generic move, force use of the
28073 lvx instruction, which we always want. In particular we
28074 don't want lxvd2x/xxpermdi for little endian. */
28075 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28076 }
28077
28078 (void) emit_insn (insn);
28079 }
28080 }
28081
28082 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28083 if (((strategy & REST_INLINE_VRS) == 0
28084 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28085 && (flag_shrink_wrap
28086 || (offset_below_red_zone_p
28087 (info->altivec_save_offset
28088 + 16 * (i - info->first_altivec_reg_save))))
28089 && save_reg_p (i))
28090 {
28091 rtx reg = gen_rtx_REG (V4SImode, i);
28092 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28093 }
28094 }
28095
28096 /* Restore VRSAVE if we must do so before adjusting the stack. */
28097 if (info->vrsave_size != 0
28098 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28099 || (DEFAULT_ABI != ABI_V4
28100 && offset_below_red_zone_p (info->vrsave_save_offset))))
28101 {
28102 rtx reg;
28103
28104 if (frame_reg_rtx == sp_reg_rtx)
28105 {
28106 if (use_backchain_to_restore_sp)
28107 {
28108 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28109 emit_move_insn (frame_reg_rtx,
28110 gen_rtx_MEM (Pmode, sp_reg_rtx));
28111 frame_off = 0;
28112 }
28113 else if (frame_pointer_needed)
28114 frame_reg_rtx = hard_frame_pointer_rtx;
28115 }
28116
28117 reg = gen_rtx_REG (SImode, 12);
28118 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28119 info->vrsave_save_offset + frame_off));
28120
28121 emit_insn (generate_set_vrsave (reg, info, 1));
28122 }
28123
28124 insn = NULL_RTX;
28125 /* If we have a large stack frame, restore the old stack pointer
28126 using the backchain. */
28127 if (use_backchain_to_restore_sp)
28128 {
28129 if (frame_reg_rtx == sp_reg_rtx)
28130 {
28131 /* Under V.4, don't reset the stack pointer until after we're done
28132 loading the saved registers. */
28133 if (DEFAULT_ABI == ABI_V4)
28134 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28135
28136 insn = emit_move_insn (frame_reg_rtx,
28137 gen_rtx_MEM (Pmode, sp_reg_rtx));
28138 frame_off = 0;
28139 }
28140 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28141 && DEFAULT_ABI == ABI_V4)
28142 /* frame_reg_rtx has been set up by the altivec restore. */
28143 ;
28144 else
28145 {
28146 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28147 frame_reg_rtx = sp_reg_rtx;
28148 }
28149 }
28150 /* If we have a frame pointer, we can restore the old stack pointer
28151 from it. */
28152 else if (frame_pointer_needed)
28153 {
28154 frame_reg_rtx = sp_reg_rtx;
28155 if (DEFAULT_ABI == ABI_V4)
28156 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28157 /* Prevent reordering memory accesses against stack pointer restore. */
28158 else if (cfun->calls_alloca
28159 || offset_below_red_zone_p (-info->total_size))
28160 rs6000_emit_stack_tie (frame_reg_rtx, true);
28161
28162 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28163 GEN_INT (info->total_size)));
28164 frame_off = 0;
28165 }
28166 else if (info->push_p
28167 && DEFAULT_ABI != ABI_V4
28168 && epilogue_type != EPILOGUE_TYPE_EH_RETURN)
28169 {
28170 /* Prevent reordering memory accesses against stack pointer restore. */
28171 if (cfun->calls_alloca
28172 || offset_below_red_zone_p (-info->total_size))
28173 rs6000_emit_stack_tie (frame_reg_rtx, false);
28174 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28175 GEN_INT (info->total_size)));
28176 frame_off = 0;
28177 }
28178 if (insn && frame_reg_rtx == sp_reg_rtx)
28179 {
28180 if (cfa_restores)
28181 {
28182 REG_NOTES (insn) = cfa_restores;
28183 cfa_restores = NULL_RTX;
28184 }
28185 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28186 RTX_FRAME_RELATED_P (insn) = 1;
28187 }
28188
28189 /* Restore AltiVec registers if we have not done so already. */
28190 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28191 && info->altivec_size != 0
28192 && (DEFAULT_ABI == ABI_V4
28193 || !offset_below_red_zone_p (info->altivec_save_offset)))
28194 {
28195 int i;
28196
28197 if ((strategy & REST_INLINE_VRS) == 0)
28198 {
28199 int end_save = info->altivec_save_offset + info->altivec_size;
28200 int ptr_off;
28201 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28202 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28203 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28204
28205 if (end_save + frame_off != 0)
28206 {
28207 rtx offset = GEN_INT (end_save + frame_off);
28208
28209 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28210 }
28211 else
28212 emit_move_insn (ptr_reg, frame_reg_rtx);
28213
28214 ptr_off = -end_save;
28215 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28216 info->altivec_save_offset + ptr_off,
28217 0, V4SImode, SAVRES_VR);
28218 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28219 {
28220 /* Frame reg was clobbered by out-of-line save. Restore it
28221 from ptr_reg, and if we are calling out-of-line gpr or
28222 fpr restore set up the correct pointer and offset. */
28223 unsigned newptr_regno = 1;
28224 if (!restoring_GPRs_inline)
28225 {
28226 bool lr = info->gp_save_offset + info->gp_size == 0;
28227 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28228 newptr_regno = ptr_regno_for_savres (sel);
28229 end_save = info->gp_save_offset + info->gp_size;
28230 }
28231 else if (!restoring_FPRs_inline)
28232 {
28233 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28234 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28235 newptr_regno = ptr_regno_for_savres (sel);
28236 end_save = info->fp_save_offset + info->fp_size;
28237 }
28238
28239 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28240 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28241
28242 if (end_save + ptr_off != 0)
28243 {
28244 rtx offset = GEN_INT (end_save + ptr_off);
28245
28246 frame_off = -end_save;
28247 if (TARGET_32BIT)
28248 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28249 ptr_reg, offset));
28250 else
28251 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28252 ptr_reg, offset));
28253 }
28254 else
28255 {
28256 frame_off = ptr_off;
28257 emit_move_insn (frame_reg_rtx, ptr_reg);
28258 }
28259 }
28260 }
28261 else
28262 {
28263 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28264 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28265 {
28266 rtx addr, areg, mem, insn;
28267 rtx reg = gen_rtx_REG (V4SImode, i);
28268 HOST_WIDE_INT offset
28269 = (info->altivec_save_offset + frame_off
28270 + 16 * (i - info->first_altivec_reg_save));
28271
28272 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28273 {
28274 mem = gen_frame_mem (V4SImode,
28275 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28276 GEN_INT (offset)));
28277 insn = gen_rtx_SET (reg, mem);
28278 }
28279 else
28280 {
28281 areg = gen_rtx_REG (Pmode, 0);
28282 emit_move_insn (areg, GEN_INT (offset));
28283
28284 /* AltiVec addressing mode is [reg+reg]. */
28285 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28286 mem = gen_frame_mem (V4SImode, addr);
28287
28288 /* Rather than emitting a generic move, force use of the
28289 lvx instruction, which we always want. In particular we
28290 don't want lxvd2x/xxpermdi for little endian. */
28291 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28292 }
28293
28294 (void) emit_insn (insn);
28295 }
28296 }
28297
28298 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28299 if (((strategy & REST_INLINE_VRS) == 0
28300 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28301 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28302 && save_reg_p (i))
28303 {
28304 rtx reg = gen_rtx_REG (V4SImode, i);
28305 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28306 }
28307 }
28308
28309 /* Restore VRSAVE if we have not done so already. */
28310 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28311 && info->vrsave_size != 0
28312 && (DEFAULT_ABI == ABI_V4
28313 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28314 {
28315 rtx reg;
28316
28317 reg = gen_rtx_REG (SImode, 12);
28318 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28319 info->vrsave_save_offset + frame_off));
28320
28321 emit_insn (generate_set_vrsave (reg, info, 1));
28322 }
28323
28324 /* If we exit by an out-of-line restore function on ABI_V4 then that
28325 function will deallocate the stack, so we don't need to worry
28326 about the unwinder restoring cr from an invalid stack frame
28327 location. */
28328 bool exit_func = (!restoring_FPRs_inline
28329 || (!restoring_GPRs_inline
28330 && info->first_fp_reg_save == 64));
28331
28332 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28333 *separate* slots if the routine calls __builtin_eh_return, so
28334 that they can be independently restored by the unwinder. */
28335 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28336 {
28337 int i, cr_off = info->ehcr_offset;
28338
28339 for (i = 0; i < 8; i++)
28340 if (!call_used_regs[CR0_REGNO + i])
28341 {
28342 rtx reg = gen_rtx_REG (SImode, 0);
28343 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28344 cr_off + frame_off));
28345
28346 insn = emit_insn (gen_movsi_to_cr_one
28347 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28348
28349 if (!exit_func && flag_shrink_wrap)
28350 {
28351 add_reg_note (insn, REG_CFA_RESTORE,
28352 gen_rtx_REG (SImode, CR0_REGNO + i));
28353
28354 RTX_FRAME_RELATED_P (insn) = 1;
28355 }
28356
28357 cr_off += reg_size;
28358 }
28359 }
28360
28361 /* Get the old lr if we saved it. If we are restoring registers
28362 out-of-line, then the out-of-line routines can do this for us. */
28363 if (restore_lr && restoring_GPRs_inline)
28364 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28365
28366 /* Get the old cr if we saved it. */
28367 if (info->cr_save_p)
28368 {
28369 unsigned cr_save_regno = 12;
28370
28371 if (!restoring_GPRs_inline)
28372 {
28373 /* Ensure we don't use the register used by the out-of-line
28374 gpr register restore below. */
28375 bool lr = info->gp_save_offset + info->gp_size == 0;
28376 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28377 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28378
28379 if (gpr_ptr_regno == 12)
28380 cr_save_regno = 11;
28381 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28382 }
28383 else if (REGNO (frame_reg_rtx) == 12)
28384 cr_save_regno = 11;
28385
28386 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28387 info->cr_save_offset + frame_off,
28388 exit_func);
28389 }
28390
28391 /* Set LR here to try to overlap restores below. */
28392 if (restore_lr && restoring_GPRs_inline)
28393 restore_saved_lr (0, exit_func);
28394
28395 /* Load exception handler data registers, if needed. */
28396 if (epilogue_type == EPILOGUE_TYPE_EH_RETURN)
28397 {
28398 unsigned int i, regno;
28399
28400 if (TARGET_AIX)
28401 {
28402 rtx reg = gen_rtx_REG (reg_mode, 2);
28403 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28404 frame_off + RS6000_TOC_SAVE_SLOT));
28405 }
28406
28407 for (i = 0; ; ++i)
28408 {
28409 rtx mem;
28410
28411 regno = EH_RETURN_DATA_REGNO (i);
28412 if (regno == INVALID_REGNUM)
28413 break;
28414
28415 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28416 info->ehrd_offset + frame_off
28417 + reg_size * (int) i);
28418
28419 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28420 }
28421 }
28422
28423 /* Restore GPRs. This is done as a PARALLEL if we are using
28424 the load-multiple instructions. */
28425 if (!restoring_GPRs_inline)
28426 {
28427 /* We are jumping to an out-of-line function. */
28428 rtx ptr_reg;
28429 int end_save = info->gp_save_offset + info->gp_size;
28430 bool can_use_exit = end_save == 0;
28431 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28432 int ptr_off;
28433
28434 /* Emit stack reset code if we need it. */
28435 ptr_regno = ptr_regno_for_savres (sel);
28436 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28437 if (can_use_exit)
28438 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28439 else if (end_save + frame_off != 0)
28440 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28441 GEN_INT (end_save + frame_off)));
28442 else if (REGNO (frame_reg_rtx) != ptr_regno)
28443 emit_move_insn (ptr_reg, frame_reg_rtx);
28444 if (REGNO (frame_reg_rtx) == ptr_regno)
28445 frame_off = -end_save;
28446
28447 if (can_use_exit && info->cr_save_p)
28448 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28449
28450 ptr_off = -end_save;
28451 rs6000_emit_savres_rtx (info, ptr_reg,
28452 info->gp_save_offset + ptr_off,
28453 info->lr_save_offset + ptr_off,
28454 reg_mode, sel);
28455 }
28456 else if (using_load_multiple)
28457 {
28458 rtvec p;
28459 p = rtvec_alloc (32 - info->first_gp_reg_save);
28460 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28461 RTVEC_ELT (p, i)
28462 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28463 frame_reg_rtx,
28464 info->gp_save_offset + frame_off + reg_size * i);
28465 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28466 }
28467 else
28468 {
28469 int offset = info->gp_save_offset + frame_off;
28470 for (i = info->first_gp_reg_save; i < 32; i++)
28471 {
28472 if (save_reg_p (i)
28473 && !cfun->machine->gpr_is_wrapped_separately[i])
28474 {
28475 rtx reg = gen_rtx_REG (reg_mode, i);
28476 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28477 }
28478
28479 offset += reg_size;
28480 }
28481 }
28482
28483 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28484 {
28485 /* If the frame pointer was used then we can't delay emitting
28486 a REG_CFA_DEF_CFA note. This must happen on the insn that
28487 restores the frame pointer, r31. We may have already emitted
28488 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28489 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28490 be harmless if emitted. */
28491 if (frame_pointer_needed)
28492 {
28493 insn = get_last_insn ();
28494 add_reg_note (insn, REG_CFA_DEF_CFA,
28495 plus_constant (Pmode, frame_reg_rtx, frame_off));
28496 RTX_FRAME_RELATED_P (insn) = 1;
28497 }
28498
28499 /* Set up cfa_restores. We always need these when
28500 shrink-wrapping. If not shrink-wrapping then we only need
28501 the cfa_restore when the stack location is no longer valid.
28502 The cfa_restores must be emitted on or before the insn that
28503 invalidates the stack, and of course must not be emitted
28504 before the insn that actually does the restore. The latter
28505 is why it is a bad idea to emit the cfa_restores as a group
28506 on the last instruction here that actually does a restore:
28507 That insn may be reordered with respect to others doing
28508 restores. */
28509 if (flag_shrink_wrap
28510 && !restoring_GPRs_inline
28511 && info->first_fp_reg_save == 64)
28512 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28513
28514 for (i = info->first_gp_reg_save; i < 32; i++)
28515 if (save_reg_p (i)
28516 && !cfun->machine->gpr_is_wrapped_separately[i])
28517 {
28518 rtx reg = gen_rtx_REG (reg_mode, i);
28519 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28520 }
28521 }
28522
28523 if (!restoring_GPRs_inline
28524 && info->first_fp_reg_save == 64)
28525 {
28526 /* We are jumping to an out-of-line function. */
28527 if (cfa_restores)
28528 emit_cfa_restores (cfa_restores);
28529 return;
28530 }
28531
28532 if (restore_lr && !restoring_GPRs_inline)
28533 {
28534 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28535 restore_saved_lr (0, exit_func);
28536 }
28537
28538 /* Restore fpr's if we need to do it without calling a function. */
28539 if (restoring_FPRs_inline)
28540 {
28541 int offset = info->fp_save_offset + frame_off;
28542 for (i = info->first_fp_reg_save; i < 64; i++)
28543 {
28544 if (save_reg_p (i)
28545 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28546 {
28547 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28548 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28549 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28550 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28551 cfa_restores);
28552 }
28553
28554 offset += fp_reg_size;
28555 }
28556 }
28557
28558 /* If we saved cr, restore it here. Just those that were used. */
28559 if (info->cr_save_p)
28560 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28561
28562 /* If this is V.4, unwind the stack pointer after all of the loads
28563 have been done, or set up r11 if we are restoring fp out of line. */
28564 ptr_regno = 1;
28565 if (!restoring_FPRs_inline)
28566 {
28567 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28568 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28569 ptr_regno = ptr_regno_for_savres (sel);
28570 }
28571
28572 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28573 if (REGNO (frame_reg_rtx) == ptr_regno)
28574 frame_off = 0;
28575
28576 if (insn && restoring_FPRs_inline)
28577 {
28578 if (cfa_restores)
28579 {
28580 REG_NOTES (insn) = cfa_restores;
28581 cfa_restores = NULL_RTX;
28582 }
28583 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28584 RTX_FRAME_RELATED_P (insn) = 1;
28585 }
28586
28587 if (epilogue_type == EPILOGUE_TYPE_EH_RETURN)
28588 {
28589 rtx sa = EH_RETURN_STACKADJ_RTX;
28590 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28591 }
28592
28593 if (epilogue_type != EPILOGUE_TYPE_SIBCALL && restoring_FPRs_inline)
28594 {
28595 if (cfa_restores)
28596 {
28597 /* We can't hang the cfa_restores off a simple return,
28598 since the shrink-wrap code sometimes uses an existing
28599 return. This means there might be a path from
28600 pre-prologue code to this return, and dwarf2cfi code
28601 wants the eh_frame unwinder state to be the same on
28602 all paths to any point. So we need to emit the
28603 cfa_restores before the return. For -m64 we really
28604 don't need epilogue cfa_restores at all, except for
28605 this irritating dwarf2cfi with shrink-wrap
28606 requirement; The stack red-zone means eh_frame info
28607 from the prologue telling the unwinder to restore
28608 from the stack is perfectly good right to the end of
28609 the function. */
28610 emit_insn (gen_blockage ());
28611 emit_cfa_restores (cfa_restores);
28612 cfa_restores = NULL_RTX;
28613 }
28614
28615 emit_jump_insn (targetm.gen_simple_return ());
28616 }
28617
28618 if (epilogue_type != EPILOGUE_TYPE_SIBCALL && !restoring_FPRs_inline)
28619 {
28620 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28621 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28622 int elt = 0;
28623 RTVEC_ELT (p, elt++) = ret_rtx;
28624 if (lr)
28625 RTVEC_ELT (p, elt++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
28626
28627 /* We have to restore more than two FP registers, so branch to the
28628 restore function. It will return to our caller. */
28629 int i;
28630 int reg;
28631 rtx sym;
28632
28633 if (flag_shrink_wrap)
28634 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28635
28636 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28637 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28638 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28639 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28640
28641 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28642 {
28643 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28644
28645 RTVEC_ELT (p, elt++)
28646 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28647 if (flag_shrink_wrap
28648 && save_reg_p (info->first_fp_reg_save + i))
28649 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28650 }
28651
28652 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28653 }
28654
28655 if (cfa_restores)
28656 {
28657 if (epilogue_type == EPILOGUE_TYPE_SIBCALL)
28658 /* Ensure the cfa_restores are hung off an insn that won't
28659 be reordered above other restores. */
28660 emit_insn (gen_blockage ());
28661
28662 emit_cfa_restores (cfa_restores);
28663 }
28664 }
28665
28666 /* Write function epilogue. */
28667
28668 static void
28669 rs6000_output_function_epilogue (FILE *file)
28670 {
28671 #if TARGET_MACHO
28672 macho_branch_islands ();
28673
28674 {
28675 rtx_insn *insn = get_last_insn ();
28676 rtx_insn *deleted_debug_label = NULL;
28677
28678 /* Mach-O doesn't support labels at the end of objects, so if
28679 it looks like we might want one, take special action.
28680
28681 First, collect any sequence of deleted debug labels. */
28682 while (insn
28683 && NOTE_P (insn)
28684 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28685 {
28686 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28687 notes only, instead set their CODE_LABEL_NUMBER to -1,
28688 otherwise there would be code generation differences
28689 in between -g and -g0. */
28690 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28691 deleted_debug_label = insn;
28692 insn = PREV_INSN (insn);
28693 }
28694
28695 /* Second, if we have:
28696 label:
28697 barrier
28698 then this needs to be detected, so skip past the barrier. */
28699
28700 if (insn && BARRIER_P (insn))
28701 insn = PREV_INSN (insn);
28702
28703 /* Up to now we've only seen notes or barriers. */
28704 if (insn)
28705 {
28706 if (LABEL_P (insn)
28707 || (NOTE_P (insn)
28708 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28709 /* Trailing label: <barrier>. */
28710 fputs ("\tnop\n", file);
28711 else
28712 {
28713 /* Lastly, see if we have a completely empty function body. */
28714 while (insn && ! INSN_P (insn))
28715 insn = PREV_INSN (insn);
28716 /* If we don't find any insns, we've got an empty function body;
28717 I.e. completely empty - without a return or branch. This is
28718 taken as the case where a function body has been removed
28719 because it contains an inline __builtin_unreachable(). GCC
28720 states that reaching __builtin_unreachable() means UB so we're
28721 not obliged to do anything special; however, we want
28722 non-zero-sized function bodies. To meet this, and help the
28723 user out, let's trap the case. */
28724 if (insn == NULL)
28725 fputs ("\ttrap\n", file);
28726 }
28727 }
28728 else if (deleted_debug_label)
28729 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28730 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28731 CODE_LABEL_NUMBER (insn) = -1;
28732 }
28733 #endif
28734
28735 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28736 on its format.
28737
28738 We don't output a traceback table if -finhibit-size-directive was
28739 used. The documentation for -finhibit-size-directive reads
28740 ``don't output a @code{.size} assembler directive, or anything
28741 else that would cause trouble if the function is split in the
28742 middle, and the two halves are placed at locations far apart in
28743 memory.'' The traceback table has this property, since it
28744 includes the offset from the start of the function to the
28745 traceback table itself.
28746
28747 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28748 different traceback table. */
28749 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28750 && ! flag_inhibit_size_directive
28751 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28752 {
28753 const char *fname = NULL;
28754 const char *language_string = lang_hooks.name;
28755 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28756 int i;
28757 int optional_tbtab;
28758 rs6000_stack_t *info = rs6000_stack_info ();
28759
28760 if (rs6000_traceback == traceback_full)
28761 optional_tbtab = 1;
28762 else if (rs6000_traceback == traceback_part)
28763 optional_tbtab = 0;
28764 else
28765 optional_tbtab = !optimize_size && !TARGET_ELF;
28766
28767 if (optional_tbtab)
28768 {
28769 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28770 while (*fname == '.') /* V.4 encodes . in the name */
28771 fname++;
28772
28773 /* Need label immediately before tbtab, so we can compute
28774 its offset from the function start. */
28775 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28776 ASM_OUTPUT_LABEL (file, fname);
28777 }
28778
28779 /* The .tbtab pseudo-op can only be used for the first eight
28780 expressions, since it can't handle the possibly variable
28781 length fields that follow. However, if you omit the optional
28782 fields, the assembler outputs zeros for all optional fields
28783 anyways, giving each variable length field is minimum length
28784 (as defined in sys/debug.h). Thus we cannot use the .tbtab
28785 pseudo-op at all. */
28786
28787 /* An all-zero word flags the start of the tbtab, for debuggers
28788 that have to find it by searching forward from the entry
28789 point or from the current pc. */
28790 fputs ("\t.long 0\n", file);
28791
28792 /* Tbtab format type. Use format type 0. */
28793 fputs ("\t.byte 0,", file);
28794
28795 /* Language type. Unfortunately, there does not seem to be any
28796 official way to discover the language being compiled, so we
28797 use language_string.
28798 C is 0. Fortran is 1. Ada is 3. C++ is 9.
28799 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28800 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
28801 numbers either, so for now use 0. */
28802 if (lang_GNU_C ()
28803 || ! strcmp (language_string, "GNU GIMPLE")
28804 || ! strcmp (language_string, "GNU Go")
28805 || ! strcmp (language_string, "GNU D")
28806 || ! strcmp (language_string, "libgccjit"))
28807 i = 0;
28808 else if (! strcmp (language_string, "GNU F77")
28809 || lang_GNU_Fortran ())
28810 i = 1;
28811 else if (! strcmp (language_string, "GNU Ada"))
28812 i = 3;
28813 else if (lang_GNU_CXX ()
28814 || ! strcmp (language_string, "GNU Objective-C++"))
28815 i = 9;
28816 else if (! strcmp (language_string, "GNU Java"))
28817 i = 13;
28818 else if (! strcmp (language_string, "GNU Objective-C"))
28819 i = 14;
28820 else
28821 gcc_unreachable ();
28822 fprintf (file, "%d,", i);
28823
28824 /* 8 single bit fields: global linkage (not set for C extern linkage,
28825 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28826 from start of procedure stored in tbtab, internal function, function
28827 has controlled storage, function has no toc, function uses fp,
28828 function logs/aborts fp operations. */
28829 /* Assume that fp operations are used if any fp reg must be saved. */
28830 fprintf (file, "%d,",
28831 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28832
28833 /* 6 bitfields: function is interrupt handler, name present in
28834 proc table, function calls alloca, on condition directives
28835 (controls stack walks, 3 bits), saves condition reg, saves
28836 link reg. */
28837 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28838 set up as a frame pointer, even when there is no alloca call. */
28839 fprintf (file, "%d,",
28840 ((optional_tbtab << 6)
28841 | ((optional_tbtab & frame_pointer_needed) << 5)
28842 | (info->cr_save_p << 1)
28843 | (info->lr_save_p)));
28844
28845 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28846 (6 bits). */
28847 fprintf (file, "%d,",
28848 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28849
28850 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28851 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28852
28853 if (optional_tbtab)
28854 {
28855 /* Compute the parameter info from the function decl argument
28856 list. */
28857 tree decl;
28858 int next_parm_info_bit = 31;
28859
28860 for (decl = DECL_ARGUMENTS (current_function_decl);
28861 decl; decl = DECL_CHAIN (decl))
28862 {
28863 rtx parameter = DECL_INCOMING_RTL (decl);
28864 machine_mode mode = GET_MODE (parameter);
28865
28866 if (REG_P (parameter))
28867 {
28868 if (SCALAR_FLOAT_MODE_P (mode))
28869 {
28870 int bits;
28871
28872 float_parms++;
28873
28874 switch (mode)
28875 {
28876 case E_SFmode:
28877 case E_SDmode:
28878 bits = 0x2;
28879 break;
28880
28881 case E_DFmode:
28882 case E_DDmode:
28883 case E_TFmode:
28884 case E_TDmode:
28885 case E_IFmode:
28886 case E_KFmode:
28887 bits = 0x3;
28888 break;
28889
28890 default:
28891 gcc_unreachable ();
28892 }
28893
28894 /* If only one bit will fit, don't or in this entry. */
28895 if (next_parm_info_bit > 0)
28896 parm_info |= (bits << (next_parm_info_bit - 1));
28897 next_parm_info_bit -= 2;
28898 }
28899 else
28900 {
28901 fixed_parms += ((GET_MODE_SIZE (mode)
28902 + (UNITS_PER_WORD - 1))
28903 / UNITS_PER_WORD);
28904 next_parm_info_bit -= 1;
28905 }
28906 }
28907 }
28908 }
28909
28910 /* Number of fixed point parameters. */
28911 /* This is actually the number of words of fixed point parameters; thus
28912 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28913 fprintf (file, "%d,", fixed_parms);
28914
28915 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28916 all on stack. */
28917 /* This is actually the number of fp registers that hold parameters;
28918 and thus the maximum value is 13. */
28919 /* Set parameters on stack bit if parameters are not in their original
28920 registers, regardless of whether they are on the stack? Xlc
28921 seems to set the bit when not optimizing. */
28922 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28923
28924 if (optional_tbtab)
28925 {
28926 /* Optional fields follow. Some are variable length. */
28927
28928 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28929 float, 11 double float. */
28930 /* There is an entry for each parameter in a register, in the order
28931 that they occur in the parameter list. Any intervening arguments
28932 on the stack are ignored. If the list overflows a long (max
28933 possible length 34 bits) then completely leave off all elements
28934 that don't fit. */
28935 /* Only emit this long if there was at least one parameter. */
28936 if (fixed_parms || float_parms)
28937 fprintf (file, "\t.long %d\n", parm_info);
28938
28939 /* Offset from start of code to tb table. */
28940 fputs ("\t.long ", file);
28941 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28942 RS6000_OUTPUT_BASENAME (file, fname);
28943 putc ('-', file);
28944 rs6000_output_function_entry (file, fname);
28945 putc ('\n', file);
28946
28947 /* Interrupt handler mask. */
28948 /* Omit this long, since we never set the interrupt handler bit
28949 above. */
28950
28951 /* Number of CTL (controlled storage) anchors. */
28952 /* Omit this long, since the has_ctl bit is never set above. */
28953
28954 /* Displacement into stack of each CTL anchor. */
28955 /* Omit this list of longs, because there are no CTL anchors. */
28956
28957 /* Length of function name. */
28958 if (*fname == '*')
28959 ++fname;
28960 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28961
28962 /* Function name. */
28963 assemble_string (fname, strlen (fname));
28964
28965 /* Register for alloca automatic storage; this is always reg 31.
28966 Only emit this if the alloca bit was set above. */
28967 if (frame_pointer_needed)
28968 fputs ("\t.byte 31\n", file);
28969
28970 fputs ("\t.align 2\n", file);
28971 }
28972 }
28973
28974 /* Arrange to define .LCTOC1 label, if not already done. */
28975 if (need_toc_init)
28976 {
28977 need_toc_init = 0;
28978 if (!toc_initialized)
28979 {
28980 switch_to_section (toc_section);
28981 switch_to_section (current_function_section ());
28982 }
28983 }
28984 }
28985
28986 /* -fsplit-stack support. */
28987
28988 /* A SYMBOL_REF for __morestack. */
28989 static GTY(()) rtx morestack_ref;
28990
28991 static rtx
28992 gen_add3_const (rtx rt, rtx ra, long c)
28993 {
28994 if (TARGET_64BIT)
28995 return gen_adddi3 (rt, ra, GEN_INT (c));
28996 else
28997 return gen_addsi3 (rt, ra, GEN_INT (c));
28998 }
28999
29000 /* Emit -fsplit-stack prologue, which goes before the regular function
29001 prologue (at local entry point in the case of ELFv2). */
29002
29003 void
29004 rs6000_expand_split_stack_prologue (void)
29005 {
29006 rs6000_stack_t *info = rs6000_stack_info ();
29007 unsigned HOST_WIDE_INT allocate;
29008 long alloc_hi, alloc_lo;
29009 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29010 rtx_insn *insn;
29011
29012 gcc_assert (flag_split_stack && reload_completed);
29013
29014 if (!info->push_p)
29015 return;
29016
29017 if (global_regs[29])
29018 {
29019 error ("%qs uses register r29", "%<-fsplit-stack%>");
29020 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29021 "conflicts with %qD", global_regs_decl[29]);
29022 }
29023
29024 allocate = info->total_size;
29025 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29026 {
29027 sorry ("Stack frame larger than 2G is not supported for "
29028 "%<-fsplit-stack%>");
29029 return;
29030 }
29031 if (morestack_ref == NULL_RTX)
29032 {
29033 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29034 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29035 | SYMBOL_FLAG_FUNCTION);
29036 }
29037
29038 r0 = gen_rtx_REG (Pmode, 0);
29039 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29040 r12 = gen_rtx_REG (Pmode, 12);
29041 emit_insn (gen_load_split_stack_limit (r0));
29042 /* Always emit two insns here to calculate the requested stack,
29043 so that the linker can edit them when adjusting size for calling
29044 non-split-stack code. */
29045 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29046 alloc_lo = -allocate - alloc_hi;
29047 if (alloc_hi != 0)
29048 {
29049 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29050 if (alloc_lo != 0)
29051 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29052 else
29053 emit_insn (gen_nop ());
29054 }
29055 else
29056 {
29057 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29058 emit_insn (gen_nop ());
29059 }
29060
29061 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29062 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29063 ok_label = gen_label_rtx ();
29064 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29065 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29066 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29067 pc_rtx);
29068 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29069 JUMP_LABEL (insn) = ok_label;
29070 /* Mark the jump as very likely to be taken. */
29071 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29072
29073 lr = gen_rtx_REG (Pmode, LR_REGNO);
29074 insn = emit_move_insn (r0, lr);
29075 RTX_FRAME_RELATED_P (insn) = 1;
29076 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29077 RTX_FRAME_RELATED_P (insn) = 1;
29078
29079 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29080 const0_rtx, const0_rtx));
29081 call_fusage = NULL_RTX;
29082 use_reg (&call_fusage, r12);
29083 /* Say the call uses r0, even though it doesn't, to stop regrename
29084 from twiddling with the insns saving lr, trashing args for cfun.
29085 The insns restoring lr are similarly protected by making
29086 split_stack_return use r0. */
29087 use_reg (&call_fusage, r0);
29088 add_function_usage_to (insn, call_fusage);
29089 /* Indicate that this function can't jump to non-local gotos. */
29090 make_reg_eh_region_note_nothrow_nononlocal (insn);
29091 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29092 insn = emit_move_insn (lr, r0);
29093 add_reg_note (insn, REG_CFA_RESTORE, lr);
29094 RTX_FRAME_RELATED_P (insn) = 1;
29095 emit_insn (gen_split_stack_return ());
29096
29097 emit_label (ok_label);
29098 LABEL_NUSES (ok_label) = 1;
29099 }
29100
29101 /* Return the internal arg pointer used for function incoming
29102 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29103 to copy it to a pseudo in order for it to be preserved over calls
29104 and suchlike. We'd really like to use a pseudo here for the
29105 internal arg pointer but data-flow analysis is not prepared to
29106 accept pseudos as live at the beginning of a function. */
29107
29108 static rtx
29109 rs6000_internal_arg_pointer (void)
29110 {
29111 if (flag_split_stack
29112 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29113 == NULL))
29114
29115 {
29116 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29117 {
29118 rtx pat;
29119
29120 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29121 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29122
29123 /* Put the pseudo initialization right after the note at the
29124 beginning of the function. */
29125 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29126 gen_rtx_REG (Pmode, 12));
29127 push_topmost_sequence ();
29128 emit_insn_after (pat, get_insns ());
29129 pop_topmost_sequence ();
29130 }
29131 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29132 FIRST_PARM_OFFSET (current_function_decl));
29133 return copy_to_reg (ret);
29134 }
29135 return virtual_incoming_args_rtx;
29136 }
29137
29138 /* We may have to tell the dataflow pass that the split stack prologue
29139 is initializing a register. */
29140
29141 static void
29142 rs6000_live_on_entry (bitmap regs)
29143 {
29144 if (flag_split_stack)
29145 bitmap_set_bit (regs, 12);
29146 }
29147
29148 /* Emit -fsplit-stack dynamic stack allocation space check. */
29149
29150 void
29151 rs6000_split_stack_space_check (rtx size, rtx label)
29152 {
29153 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29154 rtx limit = gen_reg_rtx (Pmode);
29155 rtx requested = gen_reg_rtx (Pmode);
29156 rtx cmp = gen_reg_rtx (CCUNSmode);
29157 rtx jump;
29158
29159 emit_insn (gen_load_split_stack_limit (limit));
29160 if (CONST_INT_P (size))
29161 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29162 else
29163 {
29164 size = force_reg (Pmode, size);
29165 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29166 }
29167 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29168 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29169 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29170 gen_rtx_LABEL_REF (VOIDmode, label),
29171 pc_rtx);
29172 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29173 JUMP_LABEL (jump) = label;
29174 }
29175 \f
29176 /* A C compound statement that outputs the assembler code for a thunk
29177 function, used to implement C++ virtual function calls with
29178 multiple inheritance. The thunk acts as a wrapper around a virtual
29179 function, adjusting the implicit object parameter before handing
29180 control off to the real function.
29181
29182 First, emit code to add the integer DELTA to the location that
29183 contains the incoming first argument. Assume that this argument
29184 contains a pointer, and is the one used to pass the `this' pointer
29185 in C++. This is the incoming argument *before* the function
29186 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29187 values of all other incoming arguments.
29188
29189 After the addition, emit code to jump to FUNCTION, which is a
29190 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29191 not touch the return address. Hence returning from FUNCTION will
29192 return to whoever called the current `thunk'.
29193
29194 The effect must be as if FUNCTION had been called directly with the
29195 adjusted first argument. This macro is responsible for emitting
29196 all of the code for a thunk function; output_function_prologue()
29197 and output_function_epilogue() are not invoked.
29198
29199 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29200 been extracted from it.) It might possibly be useful on some
29201 targets, but probably not.
29202
29203 If you do not define this macro, the target-independent code in the
29204 C++ frontend will generate a less efficient heavyweight thunk that
29205 calls FUNCTION instead of jumping to it. The generic approach does
29206 not support varargs. */
29207
29208 static void
29209 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29210 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29211 tree function)
29212 {
29213 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
29214 rtx this_rtx, funexp;
29215 rtx_insn *insn;
29216
29217 reload_completed = 1;
29218 epilogue_completed = 1;
29219
29220 /* Mark the end of the (empty) prologue. */
29221 emit_note (NOTE_INSN_PROLOGUE_END);
29222
29223 /* Find the "this" pointer. If the function returns a structure,
29224 the structure return pointer is in r3. */
29225 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29226 this_rtx = gen_rtx_REG (Pmode, 4);
29227 else
29228 this_rtx = gen_rtx_REG (Pmode, 3);
29229
29230 /* Apply the constant offset, if required. */
29231 if (delta)
29232 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29233
29234 /* Apply the offset from the vtable, if required. */
29235 if (vcall_offset)
29236 {
29237 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29238 rtx tmp = gen_rtx_REG (Pmode, 12);
29239
29240 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29241 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29242 {
29243 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29244 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29245 }
29246 else
29247 {
29248 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29249
29250 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29251 }
29252 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29253 }
29254
29255 /* Generate a tail call to the target function. */
29256 if (!TREE_USED (function))
29257 {
29258 assemble_external (function);
29259 TREE_USED (function) = 1;
29260 }
29261 funexp = XEXP (DECL_RTL (function), 0);
29262 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29263
29264 #if TARGET_MACHO
29265 if (MACHOPIC_INDIRECT)
29266 funexp = machopic_indirect_call_target (funexp);
29267 #endif
29268
29269 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29270 generate sibcall RTL explicitly. */
29271 insn = emit_call_insn (
29272 gen_rtx_PARALLEL (VOIDmode,
29273 gen_rtvec (3,
29274 gen_rtx_CALL (VOIDmode,
29275 funexp, const0_rtx),
29276 gen_rtx_USE (VOIDmode, const0_rtx),
29277 simple_return_rtx)));
29278 SIBLING_CALL_P (insn) = 1;
29279 emit_barrier ();
29280
29281 /* Run just enough of rest_of_compilation to get the insns emitted.
29282 There's not really enough bulk here to make other passes such as
29283 instruction scheduling worth while. Note that use_thunk calls
29284 assemble_start_function and assemble_end_function. */
29285 insn = get_insns ();
29286 shorten_branches (insn);
29287 assemble_start_function (thunk_fndecl, fnname);
29288 final_start_function (insn, file, 1);
29289 final (insn, file, 1);
29290 final_end_function ();
29291 assemble_end_function (thunk_fndecl, fnname);
29292
29293 reload_completed = 0;
29294 epilogue_completed = 0;
29295 }
29296 \f
29297 /* A quick summary of the various types of 'constant-pool tables'
29298 under PowerPC:
29299
29300 Target Flags Name One table per
29301 AIX (none) AIX TOC object file
29302 AIX -mfull-toc AIX TOC object file
29303 AIX -mminimal-toc AIX minimal TOC translation unit
29304 SVR4/EABI (none) SVR4 SDATA object file
29305 SVR4/EABI -fpic SVR4 pic object file
29306 SVR4/EABI -fPIC SVR4 PIC translation unit
29307 SVR4/EABI -mrelocatable EABI TOC function
29308 SVR4/EABI -maix AIX TOC object file
29309 SVR4/EABI -maix -mminimal-toc
29310 AIX minimal TOC translation unit
29311
29312 Name Reg. Set by entries contains:
29313 made by addrs? fp? sum?
29314
29315 AIX TOC 2 crt0 as Y option option
29316 AIX minimal TOC 30 prolog gcc Y Y option
29317 SVR4 SDATA 13 crt0 gcc N Y N
29318 SVR4 pic 30 prolog ld Y not yet N
29319 SVR4 PIC 30 prolog gcc Y option option
29320 EABI TOC 30 prolog gcc Y option option
29321
29322 */
29323
29324 /* Hash functions for the hash table. */
29325
29326 static unsigned
29327 rs6000_hash_constant (rtx k)
29328 {
29329 enum rtx_code code = GET_CODE (k);
29330 machine_mode mode = GET_MODE (k);
29331 unsigned result = (code << 3) ^ mode;
29332 const char *format;
29333 int flen, fidx;
29334
29335 format = GET_RTX_FORMAT (code);
29336 flen = strlen (format);
29337 fidx = 0;
29338
29339 switch (code)
29340 {
29341 case LABEL_REF:
29342 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29343
29344 case CONST_WIDE_INT:
29345 {
29346 int i;
29347 flen = CONST_WIDE_INT_NUNITS (k);
29348 for (i = 0; i < flen; i++)
29349 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29350 return result;
29351 }
29352
29353 case CONST_DOUBLE:
29354 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29355
29356 case CODE_LABEL:
29357 fidx = 3;
29358 break;
29359
29360 default:
29361 break;
29362 }
29363
29364 for (; fidx < flen; fidx++)
29365 switch (format[fidx])
29366 {
29367 case 's':
29368 {
29369 unsigned i, len;
29370 const char *str = XSTR (k, fidx);
29371 len = strlen (str);
29372 result = result * 613 + len;
29373 for (i = 0; i < len; i++)
29374 result = result * 613 + (unsigned) str[i];
29375 break;
29376 }
29377 case 'u':
29378 case 'e':
29379 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29380 break;
29381 case 'i':
29382 case 'n':
29383 result = result * 613 + (unsigned) XINT (k, fidx);
29384 break;
29385 case 'w':
29386 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29387 result = result * 613 + (unsigned) XWINT (k, fidx);
29388 else
29389 {
29390 size_t i;
29391 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29392 result = result * 613 + (unsigned) (XWINT (k, fidx)
29393 >> CHAR_BIT * i);
29394 }
29395 break;
29396 case '0':
29397 break;
29398 default:
29399 gcc_unreachable ();
29400 }
29401
29402 return result;
29403 }
29404
29405 hashval_t
29406 toc_hasher::hash (toc_hash_struct *thc)
29407 {
29408 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29409 }
29410
29411 /* Compare H1 and H2 for equivalence. */
29412
29413 bool
29414 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29415 {
29416 rtx r1 = h1->key;
29417 rtx r2 = h2->key;
29418
29419 if (h1->key_mode != h2->key_mode)
29420 return 0;
29421
29422 return rtx_equal_p (r1, r2);
29423 }
29424
29425 /* These are the names given by the C++ front-end to vtables, and
29426 vtable-like objects. Ideally, this logic should not be here;
29427 instead, there should be some programmatic way of inquiring as
29428 to whether or not an object is a vtable. */
29429
29430 #define VTABLE_NAME_P(NAME) \
29431 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29432 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29433 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29434 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29435 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29436
29437 #ifdef NO_DOLLAR_IN_LABEL
29438 /* Return a GGC-allocated character string translating dollar signs in
29439 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29440
29441 const char *
29442 rs6000_xcoff_strip_dollar (const char *name)
29443 {
29444 char *strip, *p;
29445 const char *q;
29446 size_t len;
29447
29448 q = (const char *) strchr (name, '$');
29449
29450 if (q == 0 || q == name)
29451 return name;
29452
29453 len = strlen (name);
29454 strip = XALLOCAVEC (char, len + 1);
29455 strcpy (strip, name);
29456 p = strip + (q - name);
29457 while (p)
29458 {
29459 *p = '_';
29460 p = strchr (p + 1, '$');
29461 }
29462
29463 return ggc_alloc_string (strip, len);
29464 }
29465 #endif
29466
29467 void
29468 rs6000_output_symbol_ref (FILE *file, rtx x)
29469 {
29470 const char *name = XSTR (x, 0);
29471
29472 /* Currently C++ toc references to vtables can be emitted before it
29473 is decided whether the vtable is public or private. If this is
29474 the case, then the linker will eventually complain that there is
29475 a reference to an unknown section. Thus, for vtables only,
29476 we emit the TOC reference to reference the identifier and not the
29477 symbol. */
29478 if (VTABLE_NAME_P (name))
29479 {
29480 RS6000_OUTPUT_BASENAME (file, name);
29481 }
29482 else
29483 assemble_name (file, name);
29484 }
29485
29486 /* Output a TOC entry. We derive the entry name from what is being
29487 written. */
29488
29489 void
29490 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29491 {
29492 char buf[256];
29493 const char *name = buf;
29494 rtx base = x;
29495 HOST_WIDE_INT offset = 0;
29496
29497 gcc_assert (!TARGET_NO_TOC);
29498
29499 /* When the linker won't eliminate them, don't output duplicate
29500 TOC entries (this happens on AIX if there is any kind of TOC,
29501 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29502 CODE_LABELs. */
29503 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29504 {
29505 struct toc_hash_struct *h;
29506
29507 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29508 time because GGC is not initialized at that point. */
29509 if (toc_hash_table == NULL)
29510 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29511
29512 h = ggc_alloc<toc_hash_struct> ();
29513 h->key = x;
29514 h->key_mode = mode;
29515 h->labelno = labelno;
29516
29517 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29518 if (*found == NULL)
29519 *found = h;
29520 else /* This is indeed a duplicate.
29521 Set this label equal to that label. */
29522 {
29523 fputs ("\t.set ", file);
29524 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29525 fprintf (file, "%d,", labelno);
29526 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29527 fprintf (file, "%d\n", ((*found)->labelno));
29528
29529 #ifdef HAVE_AS_TLS
29530 if (TARGET_XCOFF && SYMBOL_REF_P (x)
29531 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29532 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29533 {
29534 fputs ("\t.set ", file);
29535 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29536 fprintf (file, "%d,", labelno);
29537 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29538 fprintf (file, "%d\n", ((*found)->labelno));
29539 }
29540 #endif
29541 return;
29542 }
29543 }
29544
29545 /* If we're going to put a double constant in the TOC, make sure it's
29546 aligned properly when strict alignment is on. */
29547 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29548 && STRICT_ALIGNMENT
29549 && GET_MODE_BITSIZE (mode) >= 64
29550 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29551 ASM_OUTPUT_ALIGN (file, 3);
29552 }
29553
29554 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29555
29556 /* Handle FP constants specially. Note that if we have a minimal
29557 TOC, things we put here aren't actually in the TOC, so we can allow
29558 FP constants. */
29559 if (CONST_DOUBLE_P (x)
29560 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29561 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29562 {
29563 long k[4];
29564
29565 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29566 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29567 else
29568 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29569
29570 if (TARGET_64BIT)
29571 {
29572 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29573 fputs (DOUBLE_INT_ASM_OP, file);
29574 else
29575 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29576 k[0] & 0xffffffff, k[1] & 0xffffffff,
29577 k[2] & 0xffffffff, k[3] & 0xffffffff);
29578 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29579 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29580 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29581 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29582 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29583 return;
29584 }
29585 else
29586 {
29587 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29588 fputs ("\t.long ", file);
29589 else
29590 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29591 k[0] & 0xffffffff, k[1] & 0xffffffff,
29592 k[2] & 0xffffffff, k[3] & 0xffffffff);
29593 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29594 k[0] & 0xffffffff, k[1] & 0xffffffff,
29595 k[2] & 0xffffffff, k[3] & 0xffffffff);
29596 return;
29597 }
29598 }
29599 else if (CONST_DOUBLE_P (x)
29600 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29601 {
29602 long k[2];
29603
29604 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29605 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29606 else
29607 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29608
29609 if (TARGET_64BIT)
29610 {
29611 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29612 fputs (DOUBLE_INT_ASM_OP, file);
29613 else
29614 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29615 k[0] & 0xffffffff, k[1] & 0xffffffff);
29616 fprintf (file, "0x%lx%08lx\n",
29617 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29618 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29619 return;
29620 }
29621 else
29622 {
29623 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29624 fputs ("\t.long ", file);
29625 else
29626 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29627 k[0] & 0xffffffff, k[1] & 0xffffffff);
29628 fprintf (file, "0x%lx,0x%lx\n",
29629 k[0] & 0xffffffff, k[1] & 0xffffffff);
29630 return;
29631 }
29632 }
29633 else if (CONST_DOUBLE_P (x)
29634 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29635 {
29636 long l;
29637
29638 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29639 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29640 else
29641 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29642
29643 if (TARGET_64BIT)
29644 {
29645 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29646 fputs (DOUBLE_INT_ASM_OP, file);
29647 else
29648 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29649 if (WORDS_BIG_ENDIAN)
29650 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29651 else
29652 fprintf (file, "0x%lx\n", l & 0xffffffff);
29653 return;
29654 }
29655 else
29656 {
29657 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29658 fputs ("\t.long ", file);
29659 else
29660 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29661 fprintf (file, "0x%lx\n", l & 0xffffffff);
29662 return;
29663 }
29664 }
29665 else if (GET_MODE (x) == VOIDmode && CONST_INT_P (x))
29666 {
29667 unsigned HOST_WIDE_INT low;
29668 HOST_WIDE_INT high;
29669
29670 low = INTVAL (x) & 0xffffffff;
29671 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29672
29673 /* TOC entries are always Pmode-sized, so when big-endian
29674 smaller integer constants in the TOC need to be padded.
29675 (This is still a win over putting the constants in
29676 a separate constant pool, because then we'd have
29677 to have both a TOC entry _and_ the actual constant.)
29678
29679 For a 32-bit target, CONST_INT values are loaded and shifted
29680 entirely within `low' and can be stored in one TOC entry. */
29681
29682 /* It would be easy to make this work, but it doesn't now. */
29683 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29684
29685 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29686 {
29687 low |= high << 32;
29688 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29689 high = (HOST_WIDE_INT) low >> 32;
29690 low &= 0xffffffff;
29691 }
29692
29693 if (TARGET_64BIT)
29694 {
29695 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29696 fputs (DOUBLE_INT_ASM_OP, file);
29697 else
29698 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29699 (long) high & 0xffffffff, (long) low & 0xffffffff);
29700 fprintf (file, "0x%lx%08lx\n",
29701 (long) high & 0xffffffff, (long) low & 0xffffffff);
29702 return;
29703 }
29704 else
29705 {
29706 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29707 {
29708 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29709 fputs ("\t.long ", file);
29710 else
29711 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29712 (long) high & 0xffffffff, (long) low & 0xffffffff);
29713 fprintf (file, "0x%lx,0x%lx\n",
29714 (long) high & 0xffffffff, (long) low & 0xffffffff);
29715 }
29716 else
29717 {
29718 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29719 fputs ("\t.long ", file);
29720 else
29721 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29722 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29723 }
29724 return;
29725 }
29726 }
29727
29728 if (GET_CODE (x) == CONST)
29729 {
29730 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29731 && CONST_INT_P (XEXP (XEXP (x, 0), 1)));
29732
29733 base = XEXP (XEXP (x, 0), 0);
29734 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29735 }
29736
29737 switch (GET_CODE (base))
29738 {
29739 case SYMBOL_REF:
29740 name = XSTR (base, 0);
29741 break;
29742
29743 case LABEL_REF:
29744 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29745 CODE_LABEL_NUMBER (XEXP (base, 0)));
29746 break;
29747
29748 case CODE_LABEL:
29749 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29750 break;
29751
29752 default:
29753 gcc_unreachable ();
29754 }
29755
29756 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29757 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29758 else
29759 {
29760 fputs ("\t.tc ", file);
29761 RS6000_OUTPUT_BASENAME (file, name);
29762
29763 if (offset < 0)
29764 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29765 else if (offset)
29766 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29767
29768 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29769 after other TOC symbols, reducing overflow of small TOC access
29770 to [TC] symbols. */
29771 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29772 ? "[TE]," : "[TC],", file);
29773 }
29774
29775 /* Currently C++ toc references to vtables can be emitted before it
29776 is decided whether the vtable is public or private. If this is
29777 the case, then the linker will eventually complain that there is
29778 a TOC reference to an unknown section. Thus, for vtables only,
29779 we emit the TOC reference to reference the symbol and not the
29780 section. */
29781 if (VTABLE_NAME_P (name))
29782 {
29783 RS6000_OUTPUT_BASENAME (file, name);
29784 if (offset < 0)
29785 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29786 else if (offset > 0)
29787 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29788 }
29789 else
29790 output_addr_const (file, x);
29791
29792 #if HAVE_AS_TLS
29793 if (TARGET_XCOFF && SYMBOL_REF_P (base))
29794 {
29795 switch (SYMBOL_REF_TLS_MODEL (base))
29796 {
29797 case 0:
29798 break;
29799 case TLS_MODEL_LOCAL_EXEC:
29800 fputs ("@le", file);
29801 break;
29802 case TLS_MODEL_INITIAL_EXEC:
29803 fputs ("@ie", file);
29804 break;
29805 /* Use global-dynamic for local-dynamic. */
29806 case TLS_MODEL_GLOBAL_DYNAMIC:
29807 case TLS_MODEL_LOCAL_DYNAMIC:
29808 putc ('\n', file);
29809 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29810 fputs ("\t.tc .", file);
29811 RS6000_OUTPUT_BASENAME (file, name);
29812 fputs ("[TC],", file);
29813 output_addr_const (file, x);
29814 fputs ("@m", file);
29815 break;
29816 default:
29817 gcc_unreachable ();
29818 }
29819 }
29820 #endif
29821
29822 putc ('\n', file);
29823 }
29824 \f
29825 /* Output an assembler pseudo-op to write an ASCII string of N characters
29826 starting at P to FILE.
29827
29828 On the RS/6000, we have to do this using the .byte operation and
29829 write out special characters outside the quoted string.
29830 Also, the assembler is broken; very long strings are truncated,
29831 so we must artificially break them up early. */
29832
29833 void
29834 output_ascii (FILE *file, const char *p, int n)
29835 {
29836 char c;
29837 int i, count_string;
29838 const char *for_string = "\t.byte \"";
29839 const char *for_decimal = "\t.byte ";
29840 const char *to_close = NULL;
29841
29842 count_string = 0;
29843 for (i = 0; i < n; i++)
29844 {
29845 c = *p++;
29846 if (c >= ' ' && c < 0177)
29847 {
29848 if (for_string)
29849 fputs (for_string, file);
29850 putc (c, file);
29851
29852 /* Write two quotes to get one. */
29853 if (c == '"')
29854 {
29855 putc (c, file);
29856 ++count_string;
29857 }
29858
29859 for_string = NULL;
29860 for_decimal = "\"\n\t.byte ";
29861 to_close = "\"\n";
29862 ++count_string;
29863
29864 if (count_string >= 512)
29865 {
29866 fputs (to_close, file);
29867
29868 for_string = "\t.byte \"";
29869 for_decimal = "\t.byte ";
29870 to_close = NULL;
29871 count_string = 0;
29872 }
29873 }
29874 else
29875 {
29876 if (for_decimal)
29877 fputs (for_decimal, file);
29878 fprintf (file, "%d", c);
29879
29880 for_string = "\n\t.byte \"";
29881 for_decimal = ", ";
29882 to_close = "\n";
29883 count_string = 0;
29884 }
29885 }
29886
29887 /* Now close the string if we have written one. Then end the line. */
29888 if (to_close)
29889 fputs (to_close, file);
29890 }
29891 \f
29892 /* Generate a unique section name for FILENAME for a section type
29893 represented by SECTION_DESC. Output goes into BUF.
29894
29895 SECTION_DESC can be any string, as long as it is different for each
29896 possible section type.
29897
29898 We name the section in the same manner as xlc. The name begins with an
29899 underscore followed by the filename (after stripping any leading directory
29900 names) with the last period replaced by the string SECTION_DESC. If
29901 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29902 the name. */
29903
29904 void
29905 rs6000_gen_section_name (char **buf, const char *filename,
29906 const char *section_desc)
29907 {
29908 const char *q, *after_last_slash, *last_period = 0;
29909 char *p;
29910 int len;
29911
29912 after_last_slash = filename;
29913 for (q = filename; *q; q++)
29914 {
29915 if (*q == '/')
29916 after_last_slash = q + 1;
29917 else if (*q == '.')
29918 last_period = q;
29919 }
29920
29921 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29922 *buf = (char *) xmalloc (len);
29923
29924 p = *buf;
29925 *p++ = '_';
29926
29927 for (q = after_last_slash; *q; q++)
29928 {
29929 if (q == last_period)
29930 {
29931 strcpy (p, section_desc);
29932 p += strlen (section_desc);
29933 break;
29934 }
29935
29936 else if (ISALNUM (*q))
29937 *p++ = *q;
29938 }
29939
29940 if (last_period == 0)
29941 strcpy (p, section_desc);
29942 else
29943 *p = '\0';
29944 }
29945 \f
29946 /* Emit profile function. */
29947
29948 void
29949 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29950 {
29951 /* Non-standard profiling for kernels, which just saves LR then calls
29952 _mcount without worrying about arg saves. The idea is to change
29953 the function prologue as little as possible as it isn't easy to
29954 account for arg save/restore code added just for _mcount. */
29955 if (TARGET_PROFILE_KERNEL)
29956 return;
29957
29958 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29959 {
29960 #ifndef NO_PROFILE_COUNTERS
29961 # define NO_PROFILE_COUNTERS 0
29962 #endif
29963 if (NO_PROFILE_COUNTERS)
29964 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29965 LCT_NORMAL, VOIDmode);
29966 else
29967 {
29968 char buf[30];
29969 const char *label_name;
29970 rtx fun;
29971
29972 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29973 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
29974 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
29975
29976 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29977 LCT_NORMAL, VOIDmode, fun, Pmode);
29978 }
29979 }
29980 else if (DEFAULT_ABI == ABI_DARWIN)
29981 {
29982 const char *mcount_name = RS6000_MCOUNT;
29983 int caller_addr_regno = LR_REGNO;
29984
29985 /* Be conservative and always set this, at least for now. */
29986 crtl->uses_pic_offset_table = 1;
29987
29988 #if TARGET_MACHO
29989 /* For PIC code, set up a stub and collect the caller's address
29990 from r0, which is where the prologue puts it. */
29991 if (MACHOPIC_INDIRECT
29992 && crtl->uses_pic_offset_table)
29993 caller_addr_regno = 0;
29994 #endif
29995 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
29996 LCT_NORMAL, VOIDmode,
29997 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
29998 }
29999 }
30000
30001 /* Write function profiler code. */
30002
30003 void
30004 output_function_profiler (FILE *file, int labelno)
30005 {
30006 char buf[100];
30007
30008 switch (DEFAULT_ABI)
30009 {
30010 default:
30011 gcc_unreachable ();
30012
30013 case ABI_V4:
30014 if (!TARGET_32BIT)
30015 {
30016 warning (0, "no profiling of 64-bit code for this ABI");
30017 return;
30018 }
30019 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30020 fprintf (file, "\tmflr %s\n", reg_names[0]);
30021 if (NO_PROFILE_COUNTERS)
30022 {
30023 asm_fprintf (file, "\tstw %s,4(%s)\n",
30024 reg_names[0], reg_names[1]);
30025 }
30026 else if (TARGET_SECURE_PLT && flag_pic)
30027 {
30028 if (TARGET_LINK_STACK)
30029 {
30030 char name[32];
30031 get_ppc476_thunk_name (name);
30032 asm_fprintf (file, "\tbl %s\n", name);
30033 }
30034 else
30035 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30036 asm_fprintf (file, "\tstw %s,4(%s)\n",
30037 reg_names[0], reg_names[1]);
30038 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30039 asm_fprintf (file, "\taddis %s,%s,",
30040 reg_names[12], reg_names[12]);
30041 assemble_name (file, buf);
30042 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30043 assemble_name (file, buf);
30044 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30045 }
30046 else if (flag_pic == 1)
30047 {
30048 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30049 asm_fprintf (file, "\tstw %s,4(%s)\n",
30050 reg_names[0], reg_names[1]);
30051 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30052 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30053 assemble_name (file, buf);
30054 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30055 }
30056 else if (flag_pic > 1)
30057 {
30058 asm_fprintf (file, "\tstw %s,4(%s)\n",
30059 reg_names[0], reg_names[1]);
30060 /* Now, we need to get the address of the label. */
30061 if (TARGET_LINK_STACK)
30062 {
30063 char name[32];
30064 get_ppc476_thunk_name (name);
30065 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30066 assemble_name (file, buf);
30067 fputs ("-.\n1:", file);
30068 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30069 asm_fprintf (file, "\taddi %s,%s,4\n",
30070 reg_names[11], reg_names[11]);
30071 }
30072 else
30073 {
30074 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30075 assemble_name (file, buf);
30076 fputs ("-.\n1:", file);
30077 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30078 }
30079 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30080 reg_names[0], reg_names[11]);
30081 asm_fprintf (file, "\tadd %s,%s,%s\n",
30082 reg_names[0], reg_names[0], reg_names[11]);
30083 }
30084 else
30085 {
30086 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30087 assemble_name (file, buf);
30088 fputs ("@ha\n", file);
30089 asm_fprintf (file, "\tstw %s,4(%s)\n",
30090 reg_names[0], reg_names[1]);
30091 asm_fprintf (file, "\tla %s,", reg_names[0]);
30092 assemble_name (file, buf);
30093 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30094 }
30095
30096 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30097 fprintf (file, "\tbl %s%s\n",
30098 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30099 break;
30100
30101 case ABI_AIX:
30102 case ABI_ELFv2:
30103 case ABI_DARWIN:
30104 /* Don't do anything, done in output_profile_hook (). */
30105 break;
30106 }
30107 }
30108
30109 \f
30110
30111 /* The following variable value is the last issued insn. */
30112
30113 static rtx_insn *last_scheduled_insn;
30114
30115 /* The following variable helps to balance issuing of load and
30116 store instructions */
30117
30118 static int load_store_pendulum;
30119
30120 /* The following variable helps pair divide insns during scheduling. */
30121 static int divide_cnt;
30122 /* The following variable helps pair and alternate vector and vector load
30123 insns during scheduling. */
30124 static int vec_pairing;
30125
30126
30127 /* Power4 load update and store update instructions are cracked into a
30128 load or store and an integer insn which are executed in the same cycle.
30129 Branches have their own dispatch slot which does not count against the
30130 GCC issue rate, but it changes the program flow so there are no other
30131 instructions to issue in this cycle. */
30132
30133 static int
30134 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30135 {
30136 last_scheduled_insn = insn;
30137 if (GET_CODE (PATTERN (insn)) == USE
30138 || GET_CODE (PATTERN (insn)) == CLOBBER)
30139 {
30140 cached_can_issue_more = more;
30141 return cached_can_issue_more;
30142 }
30143
30144 if (insn_terminates_group_p (insn, current_group))
30145 {
30146 cached_can_issue_more = 0;
30147 return cached_can_issue_more;
30148 }
30149
30150 /* If no reservation, but reach here */
30151 if (recog_memoized (insn) < 0)
30152 return more;
30153
30154 if (rs6000_sched_groups)
30155 {
30156 if (is_microcoded_insn (insn))
30157 cached_can_issue_more = 0;
30158 else if (is_cracked_insn (insn))
30159 cached_can_issue_more = more > 2 ? more - 2 : 0;
30160 else
30161 cached_can_issue_more = more - 1;
30162
30163 return cached_can_issue_more;
30164 }
30165
30166 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30167 return 0;
30168
30169 cached_can_issue_more = more - 1;
30170 return cached_can_issue_more;
30171 }
30172
30173 static int
30174 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30175 {
30176 int r = rs6000_variable_issue_1 (insn, more);
30177 if (verbose)
30178 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30179 return r;
30180 }
30181
30182 /* Adjust the cost of a scheduling dependency. Return the new cost of
30183 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30184
30185 static int
30186 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30187 unsigned int)
30188 {
30189 enum attr_type attr_type;
30190
30191 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30192 return cost;
30193
30194 switch (dep_type)
30195 {
30196 case REG_DEP_TRUE:
30197 {
30198 /* Data dependency; DEP_INSN writes a register that INSN reads
30199 some cycles later. */
30200
30201 /* Separate a load from a narrower, dependent store. */
30202 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9
30203 || rs6000_tune == PROCESSOR_FUTURE)
30204 && GET_CODE (PATTERN (insn)) == SET
30205 && GET_CODE (PATTERN (dep_insn)) == SET
30206 && MEM_P (XEXP (PATTERN (insn), 1))
30207 && MEM_P (XEXP (PATTERN (dep_insn), 0))
30208 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30209 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30210 return cost + 14;
30211
30212 attr_type = get_attr_type (insn);
30213
30214 switch (attr_type)
30215 {
30216 case TYPE_JMPREG:
30217 /* Tell the first scheduling pass about the latency between
30218 a mtctr and bctr (and mtlr and br/blr). The first
30219 scheduling pass will not know about this latency since
30220 the mtctr instruction, which has the latency associated
30221 to it, will be generated by reload. */
30222 return 4;
30223 case TYPE_BRANCH:
30224 /* Leave some extra cycles between a compare and its
30225 dependent branch, to inhibit expensive mispredicts. */
30226 if ((rs6000_tune == PROCESSOR_PPC603
30227 || rs6000_tune == PROCESSOR_PPC604
30228 || rs6000_tune == PROCESSOR_PPC604e
30229 || rs6000_tune == PROCESSOR_PPC620
30230 || rs6000_tune == PROCESSOR_PPC630
30231 || rs6000_tune == PROCESSOR_PPC750
30232 || rs6000_tune == PROCESSOR_PPC7400
30233 || rs6000_tune == PROCESSOR_PPC7450
30234 || rs6000_tune == PROCESSOR_PPCE5500
30235 || rs6000_tune == PROCESSOR_PPCE6500
30236 || rs6000_tune == PROCESSOR_POWER4
30237 || rs6000_tune == PROCESSOR_POWER5
30238 || rs6000_tune == PROCESSOR_POWER7
30239 || rs6000_tune == PROCESSOR_POWER8
30240 || rs6000_tune == PROCESSOR_POWER9
30241 || rs6000_tune == PROCESSOR_FUTURE
30242 || rs6000_tune == PROCESSOR_CELL)
30243 && recog_memoized (dep_insn)
30244 && (INSN_CODE (dep_insn) >= 0))
30245
30246 switch (get_attr_type (dep_insn))
30247 {
30248 case TYPE_CMP:
30249 case TYPE_FPCOMPARE:
30250 case TYPE_CR_LOGICAL:
30251 return cost + 2;
30252 case TYPE_EXTS:
30253 case TYPE_MUL:
30254 if (get_attr_dot (dep_insn) == DOT_YES)
30255 return cost + 2;
30256 else
30257 break;
30258 case TYPE_SHIFT:
30259 if (get_attr_dot (dep_insn) == DOT_YES
30260 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30261 return cost + 2;
30262 else
30263 break;
30264 default:
30265 break;
30266 }
30267 break;
30268
30269 case TYPE_STORE:
30270 case TYPE_FPSTORE:
30271 if ((rs6000_tune == PROCESSOR_POWER6)
30272 && recog_memoized (dep_insn)
30273 && (INSN_CODE (dep_insn) >= 0))
30274 {
30275
30276 if (GET_CODE (PATTERN (insn)) != SET)
30277 /* If this happens, we have to extend this to schedule
30278 optimally. Return default for now. */
30279 return cost;
30280
30281 /* Adjust the cost for the case where the value written
30282 by a fixed point operation is used as the address
30283 gen value on a store. */
30284 switch (get_attr_type (dep_insn))
30285 {
30286 case TYPE_LOAD:
30287 case TYPE_CNTLZ:
30288 {
30289 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30290 return get_attr_sign_extend (dep_insn)
30291 == SIGN_EXTEND_YES ? 6 : 4;
30292 break;
30293 }
30294 case TYPE_SHIFT:
30295 {
30296 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30297 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30298 6 : 3;
30299 break;
30300 }
30301 case TYPE_INTEGER:
30302 case TYPE_ADD:
30303 case TYPE_LOGICAL:
30304 case TYPE_EXTS:
30305 case TYPE_INSERT:
30306 {
30307 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30308 return 3;
30309 break;
30310 }
30311 case TYPE_STORE:
30312 case TYPE_FPLOAD:
30313 case TYPE_FPSTORE:
30314 {
30315 if (get_attr_update (dep_insn) == UPDATE_YES
30316 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30317 return 3;
30318 break;
30319 }
30320 case TYPE_MUL:
30321 {
30322 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30323 return 17;
30324 break;
30325 }
30326 case TYPE_DIV:
30327 {
30328 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30329 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30330 break;
30331 }
30332 default:
30333 break;
30334 }
30335 }
30336 break;
30337
30338 case TYPE_LOAD:
30339 if ((rs6000_tune == PROCESSOR_POWER6)
30340 && recog_memoized (dep_insn)
30341 && (INSN_CODE (dep_insn) >= 0))
30342 {
30343
30344 /* Adjust the cost for the case where the value written
30345 by a fixed point instruction is used within the address
30346 gen portion of a subsequent load(u)(x) */
30347 switch (get_attr_type (dep_insn))
30348 {
30349 case TYPE_LOAD:
30350 case TYPE_CNTLZ:
30351 {
30352 if (set_to_load_agen (dep_insn, insn))
30353 return get_attr_sign_extend (dep_insn)
30354 == SIGN_EXTEND_YES ? 6 : 4;
30355 break;
30356 }
30357 case TYPE_SHIFT:
30358 {
30359 if (set_to_load_agen (dep_insn, insn))
30360 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30361 6 : 3;
30362 break;
30363 }
30364 case TYPE_INTEGER:
30365 case TYPE_ADD:
30366 case TYPE_LOGICAL:
30367 case TYPE_EXTS:
30368 case TYPE_INSERT:
30369 {
30370 if (set_to_load_agen (dep_insn, insn))
30371 return 3;
30372 break;
30373 }
30374 case TYPE_STORE:
30375 case TYPE_FPLOAD:
30376 case TYPE_FPSTORE:
30377 {
30378 if (get_attr_update (dep_insn) == UPDATE_YES
30379 && set_to_load_agen (dep_insn, insn))
30380 return 3;
30381 break;
30382 }
30383 case TYPE_MUL:
30384 {
30385 if (set_to_load_agen (dep_insn, insn))
30386 return 17;
30387 break;
30388 }
30389 case TYPE_DIV:
30390 {
30391 if (set_to_load_agen (dep_insn, insn))
30392 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30393 break;
30394 }
30395 default:
30396 break;
30397 }
30398 }
30399 break;
30400
30401 case TYPE_FPLOAD:
30402 if ((rs6000_tune == PROCESSOR_POWER6)
30403 && get_attr_update (insn) == UPDATE_NO
30404 && recog_memoized (dep_insn)
30405 && (INSN_CODE (dep_insn) >= 0)
30406 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30407 return 2;
30408
30409 default:
30410 break;
30411 }
30412
30413 /* Fall out to return default cost. */
30414 }
30415 break;
30416
30417 case REG_DEP_OUTPUT:
30418 /* Output dependency; DEP_INSN writes a register that INSN writes some
30419 cycles later. */
30420 if ((rs6000_tune == PROCESSOR_POWER6)
30421 && recog_memoized (dep_insn)
30422 && (INSN_CODE (dep_insn) >= 0))
30423 {
30424 attr_type = get_attr_type (insn);
30425
30426 switch (attr_type)
30427 {
30428 case TYPE_FP:
30429 case TYPE_FPSIMPLE:
30430 if (get_attr_type (dep_insn) == TYPE_FP
30431 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30432 return 1;
30433 break;
30434 case TYPE_FPLOAD:
30435 if (get_attr_update (insn) == UPDATE_NO
30436 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30437 return 2;
30438 break;
30439 default:
30440 break;
30441 }
30442 }
30443 /* Fall through, no cost for output dependency. */
30444 /* FALLTHRU */
30445
30446 case REG_DEP_ANTI:
30447 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30448 cycles later. */
30449 return 0;
30450
30451 default:
30452 gcc_unreachable ();
30453 }
30454
30455 return cost;
30456 }
30457
30458 /* Debug version of rs6000_adjust_cost. */
30459
30460 static int
30461 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30462 int cost, unsigned int dw)
30463 {
30464 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30465
30466 if (ret != cost)
30467 {
30468 const char *dep;
30469
30470 switch (dep_type)
30471 {
30472 default: dep = "unknown depencency"; break;
30473 case REG_DEP_TRUE: dep = "data dependency"; break;
30474 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30475 case REG_DEP_ANTI: dep = "anti depencency"; break;
30476 }
30477
30478 fprintf (stderr,
30479 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30480 "%s, insn:\n", ret, cost, dep);
30481
30482 debug_rtx (insn);
30483 }
30484
30485 return ret;
30486 }
30487
30488 /* The function returns a true if INSN is microcoded.
30489 Return false otherwise. */
30490
30491 static bool
30492 is_microcoded_insn (rtx_insn *insn)
30493 {
30494 if (!insn || !NONDEBUG_INSN_P (insn)
30495 || GET_CODE (PATTERN (insn)) == USE
30496 || GET_CODE (PATTERN (insn)) == CLOBBER)
30497 return false;
30498
30499 if (rs6000_tune == PROCESSOR_CELL)
30500 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30501
30502 if (rs6000_sched_groups
30503 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30504 {
30505 enum attr_type type = get_attr_type (insn);
30506 if ((type == TYPE_LOAD
30507 && get_attr_update (insn) == UPDATE_YES
30508 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30509 || ((type == TYPE_LOAD || type == TYPE_STORE)
30510 && get_attr_update (insn) == UPDATE_YES
30511 && get_attr_indexed (insn) == INDEXED_YES)
30512 || type == TYPE_MFCR)
30513 return true;
30514 }
30515
30516 return false;
30517 }
30518
30519 /* The function returns true if INSN is cracked into 2 instructions
30520 by the processor (and therefore occupies 2 issue slots). */
30521
30522 static bool
30523 is_cracked_insn (rtx_insn *insn)
30524 {
30525 if (!insn || !NONDEBUG_INSN_P (insn)
30526 || GET_CODE (PATTERN (insn)) == USE
30527 || GET_CODE (PATTERN (insn)) == CLOBBER)
30528 return false;
30529
30530 if (rs6000_sched_groups
30531 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30532 {
30533 enum attr_type type = get_attr_type (insn);
30534 if ((type == TYPE_LOAD
30535 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30536 && get_attr_update (insn) == UPDATE_NO)
30537 || (type == TYPE_LOAD
30538 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30539 && get_attr_update (insn) == UPDATE_YES
30540 && get_attr_indexed (insn) == INDEXED_NO)
30541 || (type == TYPE_STORE
30542 && get_attr_update (insn) == UPDATE_YES
30543 && get_attr_indexed (insn) == INDEXED_NO)
30544 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30545 && get_attr_update (insn) == UPDATE_YES)
30546 || (type == TYPE_CR_LOGICAL
30547 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30548 || (type == TYPE_EXTS
30549 && get_attr_dot (insn) == DOT_YES)
30550 || (type == TYPE_SHIFT
30551 && get_attr_dot (insn) == DOT_YES
30552 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30553 || (type == TYPE_MUL
30554 && get_attr_dot (insn) == DOT_YES)
30555 || type == TYPE_DIV
30556 || (type == TYPE_INSERT
30557 && get_attr_size (insn) == SIZE_32))
30558 return true;
30559 }
30560
30561 return false;
30562 }
30563
30564 /* The function returns true if INSN can be issued only from
30565 the branch slot. */
30566
30567 static bool
30568 is_branch_slot_insn (rtx_insn *insn)
30569 {
30570 if (!insn || !NONDEBUG_INSN_P (insn)
30571 || GET_CODE (PATTERN (insn)) == USE
30572 || GET_CODE (PATTERN (insn)) == CLOBBER)
30573 return false;
30574
30575 if (rs6000_sched_groups)
30576 {
30577 enum attr_type type = get_attr_type (insn);
30578 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30579 return true;
30580 return false;
30581 }
30582
30583 return false;
30584 }
30585
30586 /* The function returns true if out_inst sets a value that is
30587 used in the address generation computation of in_insn */
30588 static bool
30589 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30590 {
30591 rtx out_set, in_set;
30592
30593 /* For performance reasons, only handle the simple case where
30594 both loads are a single_set. */
30595 out_set = single_set (out_insn);
30596 if (out_set)
30597 {
30598 in_set = single_set (in_insn);
30599 if (in_set)
30600 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30601 }
30602
30603 return false;
30604 }
30605
30606 /* Try to determine base/offset/size parts of the given MEM.
30607 Return true if successful, false if all the values couldn't
30608 be determined.
30609
30610 This function only looks for REG or REG+CONST address forms.
30611 REG+REG address form will return false. */
30612
30613 static bool
30614 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30615 HOST_WIDE_INT *size)
30616 {
30617 rtx addr_rtx;
30618 if MEM_SIZE_KNOWN_P (mem)
30619 *size = MEM_SIZE (mem);
30620 else
30621 return false;
30622
30623 addr_rtx = (XEXP (mem, 0));
30624 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30625 addr_rtx = XEXP (addr_rtx, 1);
30626
30627 *offset = 0;
30628 while (GET_CODE (addr_rtx) == PLUS
30629 && CONST_INT_P (XEXP (addr_rtx, 1)))
30630 {
30631 *offset += INTVAL (XEXP (addr_rtx, 1));
30632 addr_rtx = XEXP (addr_rtx, 0);
30633 }
30634 if (!REG_P (addr_rtx))
30635 return false;
30636
30637 *base = addr_rtx;
30638 return true;
30639 }
30640
30641 /* The function returns true if the target storage location of
30642 mem1 is adjacent to the target storage location of mem2 */
30643 /* Return 1 if memory locations are adjacent. */
30644
30645 static bool
30646 adjacent_mem_locations (rtx mem1, rtx mem2)
30647 {
30648 rtx reg1, reg2;
30649 HOST_WIDE_INT off1, size1, off2, size2;
30650
30651 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30652 && get_memref_parts (mem2, &reg2, &off2, &size2))
30653 return ((REGNO (reg1) == REGNO (reg2))
30654 && ((off1 + size1 == off2)
30655 || (off2 + size2 == off1)));
30656
30657 return false;
30658 }
30659
30660 /* This function returns true if it can be determined that the two MEM
30661 locations overlap by at least 1 byte based on base reg/offset/size. */
30662
30663 static bool
30664 mem_locations_overlap (rtx mem1, rtx mem2)
30665 {
30666 rtx reg1, reg2;
30667 HOST_WIDE_INT off1, size1, off2, size2;
30668
30669 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30670 && get_memref_parts (mem2, &reg2, &off2, &size2))
30671 return ((REGNO (reg1) == REGNO (reg2))
30672 && (((off1 <= off2) && (off1 + size1 > off2))
30673 || ((off2 <= off1) && (off2 + size2 > off1))));
30674
30675 return false;
30676 }
30677
30678 /* A C statement (sans semicolon) to update the integer scheduling
30679 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30680 INSN earlier, reduce the priority to execute INSN later. Do not
30681 define this macro if you do not need to adjust the scheduling
30682 priorities of insns. */
30683
30684 static int
30685 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30686 {
30687 rtx load_mem, str_mem;
30688 /* On machines (like the 750) which have asymmetric integer units,
30689 where one integer unit can do multiply and divides and the other
30690 can't, reduce the priority of multiply/divide so it is scheduled
30691 before other integer operations. */
30692
30693 #if 0
30694 if (! INSN_P (insn))
30695 return priority;
30696
30697 if (GET_CODE (PATTERN (insn)) == USE)
30698 return priority;
30699
30700 switch (rs6000_tune) {
30701 case PROCESSOR_PPC750:
30702 switch (get_attr_type (insn))
30703 {
30704 default:
30705 break;
30706
30707 case TYPE_MUL:
30708 case TYPE_DIV:
30709 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30710 priority, priority);
30711 if (priority >= 0 && priority < 0x01000000)
30712 priority >>= 3;
30713 break;
30714 }
30715 }
30716 #endif
30717
30718 if (insn_must_be_first_in_group (insn)
30719 && reload_completed
30720 && current_sched_info->sched_max_insns_priority
30721 && rs6000_sched_restricted_insns_priority)
30722 {
30723
30724 /* Prioritize insns that can be dispatched only in the first
30725 dispatch slot. */
30726 if (rs6000_sched_restricted_insns_priority == 1)
30727 /* Attach highest priority to insn. This means that in
30728 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30729 precede 'priority' (critical path) considerations. */
30730 return current_sched_info->sched_max_insns_priority;
30731 else if (rs6000_sched_restricted_insns_priority == 2)
30732 /* Increase priority of insn by a minimal amount. This means that in
30733 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30734 considerations precede dispatch-slot restriction considerations. */
30735 return (priority + 1);
30736 }
30737
30738 if (rs6000_tune == PROCESSOR_POWER6
30739 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30740 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30741 /* Attach highest priority to insn if the scheduler has just issued two
30742 stores and this instruction is a load, or two loads and this instruction
30743 is a store. Power6 wants loads and stores scheduled alternately
30744 when possible */
30745 return current_sched_info->sched_max_insns_priority;
30746
30747 return priority;
30748 }
30749
30750 /* Return true if the instruction is nonpipelined on the Cell. */
30751 static bool
30752 is_nonpipeline_insn (rtx_insn *insn)
30753 {
30754 enum attr_type type;
30755 if (!insn || !NONDEBUG_INSN_P (insn)
30756 || GET_CODE (PATTERN (insn)) == USE
30757 || GET_CODE (PATTERN (insn)) == CLOBBER)
30758 return false;
30759
30760 type = get_attr_type (insn);
30761 if (type == TYPE_MUL
30762 || type == TYPE_DIV
30763 || type == TYPE_SDIV
30764 || type == TYPE_DDIV
30765 || type == TYPE_SSQRT
30766 || type == TYPE_DSQRT
30767 || type == TYPE_MFCR
30768 || type == TYPE_MFCRF
30769 || type == TYPE_MFJMPR)
30770 {
30771 return true;
30772 }
30773 return false;
30774 }
30775
30776
30777 /* Return how many instructions the machine can issue per cycle. */
30778
30779 static int
30780 rs6000_issue_rate (void)
30781 {
30782 /* Unless scheduling for register pressure, use issue rate of 1 for
30783 first scheduling pass to decrease degradation. */
30784 if (!reload_completed && !flag_sched_pressure)
30785 return 1;
30786
30787 switch (rs6000_tune) {
30788 case PROCESSOR_RS64A:
30789 case PROCESSOR_PPC601: /* ? */
30790 case PROCESSOR_PPC7450:
30791 return 3;
30792 case PROCESSOR_PPC440:
30793 case PROCESSOR_PPC603:
30794 case PROCESSOR_PPC750:
30795 case PROCESSOR_PPC7400:
30796 case PROCESSOR_PPC8540:
30797 case PROCESSOR_PPC8548:
30798 case PROCESSOR_CELL:
30799 case PROCESSOR_PPCE300C2:
30800 case PROCESSOR_PPCE300C3:
30801 case PROCESSOR_PPCE500MC:
30802 case PROCESSOR_PPCE500MC64:
30803 case PROCESSOR_PPCE5500:
30804 case PROCESSOR_PPCE6500:
30805 case PROCESSOR_TITAN:
30806 return 2;
30807 case PROCESSOR_PPC476:
30808 case PROCESSOR_PPC604:
30809 case PROCESSOR_PPC604e:
30810 case PROCESSOR_PPC620:
30811 case PROCESSOR_PPC630:
30812 return 4;
30813 case PROCESSOR_POWER4:
30814 case PROCESSOR_POWER5:
30815 case PROCESSOR_POWER6:
30816 case PROCESSOR_POWER7:
30817 return 5;
30818 case PROCESSOR_POWER8:
30819 return 7;
30820 case PROCESSOR_POWER9:
30821 case PROCESSOR_FUTURE:
30822 return 6;
30823 default:
30824 return 1;
30825 }
30826 }
30827
30828 /* Return how many instructions to look ahead for better insn
30829 scheduling. */
30830
30831 static int
30832 rs6000_use_sched_lookahead (void)
30833 {
30834 switch (rs6000_tune)
30835 {
30836 case PROCESSOR_PPC8540:
30837 case PROCESSOR_PPC8548:
30838 return 4;
30839
30840 case PROCESSOR_CELL:
30841 return (reload_completed ? 8 : 0);
30842
30843 default:
30844 return 0;
30845 }
30846 }
30847
30848 /* We are choosing insn from the ready queue. Return zero if INSN can be
30849 chosen. */
30850 static int
30851 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30852 {
30853 if (ready_index == 0)
30854 return 0;
30855
30856 if (rs6000_tune != PROCESSOR_CELL)
30857 return 0;
30858
30859 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30860
30861 if (!reload_completed
30862 || is_nonpipeline_insn (insn)
30863 || is_microcoded_insn (insn))
30864 return 1;
30865
30866 return 0;
30867 }
30868
30869 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30870 and return true. */
30871
30872 static bool
30873 find_mem_ref (rtx pat, rtx *mem_ref)
30874 {
30875 const char * fmt;
30876 int i, j;
30877
30878 /* stack_tie does not produce any real memory traffic. */
30879 if (tie_operand (pat, VOIDmode))
30880 return false;
30881
30882 if (MEM_P (pat))
30883 {
30884 *mem_ref = pat;
30885 return true;
30886 }
30887
30888 /* Recursively process the pattern. */
30889 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30890
30891 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30892 {
30893 if (fmt[i] == 'e')
30894 {
30895 if (find_mem_ref (XEXP (pat, i), mem_ref))
30896 return true;
30897 }
30898 else if (fmt[i] == 'E')
30899 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30900 {
30901 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30902 return true;
30903 }
30904 }
30905
30906 return false;
30907 }
30908
30909 /* Determine if PAT is a PATTERN of a load insn. */
30910
30911 static bool
30912 is_load_insn1 (rtx pat, rtx *load_mem)
30913 {
30914 if (!pat || pat == NULL_RTX)
30915 return false;
30916
30917 if (GET_CODE (pat) == SET)
30918 return find_mem_ref (SET_SRC (pat), load_mem);
30919
30920 if (GET_CODE (pat) == PARALLEL)
30921 {
30922 int i;
30923
30924 for (i = 0; i < XVECLEN (pat, 0); i++)
30925 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30926 return true;
30927 }
30928
30929 return false;
30930 }
30931
30932 /* Determine if INSN loads from memory. */
30933
30934 static bool
30935 is_load_insn (rtx insn, rtx *load_mem)
30936 {
30937 if (!insn || !INSN_P (insn))
30938 return false;
30939
30940 if (CALL_P (insn))
30941 return false;
30942
30943 return is_load_insn1 (PATTERN (insn), load_mem);
30944 }
30945
30946 /* Determine if PAT is a PATTERN of a store insn. */
30947
30948 static bool
30949 is_store_insn1 (rtx pat, rtx *str_mem)
30950 {
30951 if (!pat || pat == NULL_RTX)
30952 return false;
30953
30954 if (GET_CODE (pat) == SET)
30955 return find_mem_ref (SET_DEST (pat), str_mem);
30956
30957 if (GET_CODE (pat) == PARALLEL)
30958 {
30959 int i;
30960
30961 for (i = 0; i < XVECLEN (pat, 0); i++)
30962 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30963 return true;
30964 }
30965
30966 return false;
30967 }
30968
30969 /* Determine if INSN stores to memory. */
30970
30971 static bool
30972 is_store_insn (rtx insn, rtx *str_mem)
30973 {
30974 if (!insn || !INSN_P (insn))
30975 return false;
30976
30977 return is_store_insn1 (PATTERN (insn), str_mem);
30978 }
30979
30980 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30981
30982 static bool
30983 is_power9_pairable_vec_type (enum attr_type type)
30984 {
30985 switch (type)
30986 {
30987 case TYPE_VECSIMPLE:
30988 case TYPE_VECCOMPLEX:
30989 case TYPE_VECDIV:
30990 case TYPE_VECCMP:
30991 case TYPE_VECPERM:
30992 case TYPE_VECFLOAT:
30993 case TYPE_VECFDIV:
30994 case TYPE_VECDOUBLE:
30995 return true;
30996 default:
30997 break;
30998 }
30999 return false;
31000 }
31001
31002 /* Returns whether the dependence between INSN and NEXT is considered
31003 costly by the given target. */
31004
31005 static bool
31006 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31007 {
31008 rtx insn;
31009 rtx next;
31010 rtx load_mem, str_mem;
31011
31012 /* If the flag is not enabled - no dependence is considered costly;
31013 allow all dependent insns in the same group.
31014 This is the most aggressive option. */
31015 if (rs6000_sched_costly_dep == no_dep_costly)
31016 return false;
31017
31018 /* If the flag is set to 1 - a dependence is always considered costly;
31019 do not allow dependent instructions in the same group.
31020 This is the most conservative option. */
31021 if (rs6000_sched_costly_dep == all_deps_costly)
31022 return true;
31023
31024 insn = DEP_PRO (dep);
31025 next = DEP_CON (dep);
31026
31027 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31028 && is_load_insn (next, &load_mem)
31029 && is_store_insn (insn, &str_mem))
31030 /* Prevent load after store in the same group. */
31031 return true;
31032
31033 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31034 && is_load_insn (next, &load_mem)
31035 && is_store_insn (insn, &str_mem)
31036 && DEP_TYPE (dep) == REG_DEP_TRUE
31037 && mem_locations_overlap(str_mem, load_mem))
31038 /* Prevent load after store in the same group if it is a true
31039 dependence. */
31040 return true;
31041
31042 /* The flag is set to X; dependences with latency >= X are considered costly,
31043 and will not be scheduled in the same group. */
31044 if (rs6000_sched_costly_dep <= max_dep_latency
31045 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31046 return true;
31047
31048 return false;
31049 }
31050
31051 /* Return the next insn after INSN that is found before TAIL is reached,
31052 skipping any "non-active" insns - insns that will not actually occupy
31053 an issue slot. Return NULL_RTX if such an insn is not found. */
31054
31055 static rtx_insn *
31056 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31057 {
31058 if (insn == NULL_RTX || insn == tail)
31059 return NULL;
31060
31061 while (1)
31062 {
31063 insn = NEXT_INSN (insn);
31064 if (insn == NULL_RTX || insn == tail)
31065 return NULL;
31066
31067 if (CALL_P (insn)
31068 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31069 || (NONJUMP_INSN_P (insn)
31070 && GET_CODE (PATTERN (insn)) != USE
31071 && GET_CODE (PATTERN (insn)) != CLOBBER
31072 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31073 break;
31074 }
31075 return insn;
31076 }
31077
31078 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31079
31080 static int
31081 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31082 {
31083 int pos;
31084 int i;
31085 rtx_insn *tmp;
31086 enum attr_type type, type2;
31087
31088 type = get_attr_type (last_scheduled_insn);
31089
31090 /* Try to issue fixed point divides back-to-back in pairs so they will be
31091 routed to separate execution units and execute in parallel. */
31092 if (type == TYPE_DIV && divide_cnt == 0)
31093 {
31094 /* First divide has been scheduled. */
31095 divide_cnt = 1;
31096
31097 /* Scan the ready list looking for another divide, if found move it
31098 to the end of the list so it is chosen next. */
31099 pos = lastpos;
31100 while (pos >= 0)
31101 {
31102 if (recog_memoized (ready[pos]) >= 0
31103 && get_attr_type (ready[pos]) == TYPE_DIV)
31104 {
31105 tmp = ready[pos];
31106 for (i = pos; i < lastpos; i++)
31107 ready[i] = ready[i + 1];
31108 ready[lastpos] = tmp;
31109 break;
31110 }
31111 pos--;
31112 }
31113 }
31114 else
31115 {
31116 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31117 divide_cnt = 0;
31118
31119 /* The best dispatch throughput for vector and vector load insns can be
31120 achieved by interleaving a vector and vector load such that they'll
31121 dispatch to the same superslice. If this pairing cannot be achieved
31122 then it is best to pair vector insns together and vector load insns
31123 together.
31124
31125 To aid in this pairing, vec_pairing maintains the current state with
31126 the following values:
31127
31128 0 : Initial state, no vecload/vector pairing has been started.
31129
31130 1 : A vecload or vector insn has been issued and a candidate for
31131 pairing has been found and moved to the end of the ready
31132 list. */
31133 if (type == TYPE_VECLOAD)
31134 {
31135 /* Issued a vecload. */
31136 if (vec_pairing == 0)
31137 {
31138 int vecload_pos = -1;
31139 /* We issued a single vecload, look for a vector insn to pair it
31140 with. If one isn't found, try to pair another vecload. */
31141 pos = lastpos;
31142 while (pos >= 0)
31143 {
31144 if (recog_memoized (ready[pos]) >= 0)
31145 {
31146 type2 = get_attr_type (ready[pos]);
31147 if (is_power9_pairable_vec_type (type2))
31148 {
31149 /* Found a vector insn to pair with, move it to the
31150 end of the ready list so it is scheduled next. */
31151 tmp = ready[pos];
31152 for (i = pos; i < lastpos; i++)
31153 ready[i] = ready[i + 1];
31154 ready[lastpos] = tmp;
31155 vec_pairing = 1;
31156 return cached_can_issue_more;
31157 }
31158 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31159 /* Remember position of first vecload seen. */
31160 vecload_pos = pos;
31161 }
31162 pos--;
31163 }
31164 if (vecload_pos >= 0)
31165 {
31166 /* Didn't find a vector to pair with but did find a vecload,
31167 move it to the end of the ready list. */
31168 tmp = ready[vecload_pos];
31169 for (i = vecload_pos; i < lastpos; i++)
31170 ready[i] = ready[i + 1];
31171 ready[lastpos] = tmp;
31172 vec_pairing = 1;
31173 return cached_can_issue_more;
31174 }
31175 }
31176 }
31177 else if (is_power9_pairable_vec_type (type))
31178 {
31179 /* Issued a vector operation. */
31180 if (vec_pairing == 0)
31181 {
31182 int vec_pos = -1;
31183 /* We issued a single vector insn, look for a vecload to pair it
31184 with. If one isn't found, try to pair another vector. */
31185 pos = lastpos;
31186 while (pos >= 0)
31187 {
31188 if (recog_memoized (ready[pos]) >= 0)
31189 {
31190 type2 = get_attr_type (ready[pos]);
31191 if (type2 == TYPE_VECLOAD)
31192 {
31193 /* Found a vecload insn to pair with, move it to the
31194 end of the ready list so it is scheduled next. */
31195 tmp = ready[pos];
31196 for (i = pos; i < lastpos; i++)
31197 ready[i] = ready[i + 1];
31198 ready[lastpos] = tmp;
31199 vec_pairing = 1;
31200 return cached_can_issue_more;
31201 }
31202 else if (is_power9_pairable_vec_type (type2)
31203 && vec_pos == -1)
31204 /* Remember position of first vector insn seen. */
31205 vec_pos = pos;
31206 }
31207 pos--;
31208 }
31209 if (vec_pos >= 0)
31210 {
31211 /* Didn't find a vecload to pair with but did find a vector
31212 insn, move it to the end of the ready list. */
31213 tmp = ready[vec_pos];
31214 for (i = vec_pos; i < lastpos; i++)
31215 ready[i] = ready[i + 1];
31216 ready[lastpos] = tmp;
31217 vec_pairing = 1;
31218 return cached_can_issue_more;
31219 }
31220 }
31221 }
31222
31223 /* We've either finished a vec/vecload pair, couldn't find an insn to
31224 continue the current pair, or the last insn had nothing to do with
31225 with pairing. In any case, reset the state. */
31226 vec_pairing = 0;
31227 }
31228
31229 return cached_can_issue_more;
31230 }
31231
31232 /* We are about to begin issuing insns for this clock cycle. */
31233
31234 static int
31235 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31236 rtx_insn **ready ATTRIBUTE_UNUSED,
31237 int *pn_ready ATTRIBUTE_UNUSED,
31238 int clock_var ATTRIBUTE_UNUSED)
31239 {
31240 int n_ready = *pn_ready;
31241
31242 if (sched_verbose)
31243 fprintf (dump, "// rs6000_sched_reorder :\n");
31244
31245 /* Reorder the ready list, if the second to last ready insn
31246 is a nonepipeline insn. */
31247 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31248 {
31249 if (is_nonpipeline_insn (ready[n_ready - 1])
31250 && (recog_memoized (ready[n_ready - 2]) > 0))
31251 /* Simply swap first two insns. */
31252 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31253 }
31254
31255 if (rs6000_tune == PROCESSOR_POWER6)
31256 load_store_pendulum = 0;
31257
31258 return rs6000_issue_rate ();
31259 }
31260
31261 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31262
31263 static int
31264 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31265 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31266 {
31267 if (sched_verbose)
31268 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31269
31270 /* For Power6, we need to handle some special cases to try and keep the
31271 store queue from overflowing and triggering expensive flushes.
31272
31273 This code monitors how load and store instructions are being issued
31274 and skews the ready list one way or the other to increase the likelihood
31275 that a desired instruction is issued at the proper time.
31276
31277 A couple of things are done. First, we maintain a "load_store_pendulum"
31278 to track the current state of load/store issue.
31279
31280 - If the pendulum is at zero, then no loads or stores have been
31281 issued in the current cycle so we do nothing.
31282
31283 - If the pendulum is 1, then a single load has been issued in this
31284 cycle and we attempt to locate another load in the ready list to
31285 issue with it.
31286
31287 - If the pendulum is -2, then two stores have already been
31288 issued in this cycle, so we increase the priority of the first load
31289 in the ready list to increase it's likelihood of being chosen first
31290 in the next cycle.
31291
31292 - If the pendulum is -1, then a single store has been issued in this
31293 cycle and we attempt to locate another store in the ready list to
31294 issue with it, preferring a store to an adjacent memory location to
31295 facilitate store pairing in the store queue.
31296
31297 - If the pendulum is 2, then two loads have already been
31298 issued in this cycle, so we increase the priority of the first store
31299 in the ready list to increase it's likelihood of being chosen first
31300 in the next cycle.
31301
31302 - If the pendulum < -2 or > 2, then do nothing.
31303
31304 Note: This code covers the most common scenarios. There exist non
31305 load/store instructions which make use of the LSU and which
31306 would need to be accounted for to strictly model the behavior
31307 of the machine. Those instructions are currently unaccounted
31308 for to help minimize compile time overhead of this code.
31309 */
31310 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31311 {
31312 int pos;
31313 int i;
31314 rtx_insn *tmp;
31315 rtx load_mem, str_mem;
31316
31317 if (is_store_insn (last_scheduled_insn, &str_mem))
31318 /* Issuing a store, swing the load_store_pendulum to the left */
31319 load_store_pendulum--;
31320 else if (is_load_insn (last_scheduled_insn, &load_mem))
31321 /* Issuing a load, swing the load_store_pendulum to the right */
31322 load_store_pendulum++;
31323 else
31324 return cached_can_issue_more;
31325
31326 /* If the pendulum is balanced, or there is only one instruction on
31327 the ready list, then all is well, so return. */
31328 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31329 return cached_can_issue_more;
31330
31331 if (load_store_pendulum == 1)
31332 {
31333 /* A load has been issued in this cycle. Scan the ready list
31334 for another load to issue with it */
31335 pos = *pn_ready-1;
31336
31337 while (pos >= 0)
31338 {
31339 if (is_load_insn (ready[pos], &load_mem))
31340 {
31341 /* Found a load. Move it to the head of the ready list,
31342 and adjust it's priority so that it is more likely to
31343 stay there */
31344 tmp = ready[pos];
31345 for (i=pos; i<*pn_ready-1; i++)
31346 ready[i] = ready[i + 1];
31347 ready[*pn_ready-1] = tmp;
31348
31349 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31350 INSN_PRIORITY (tmp)++;
31351 break;
31352 }
31353 pos--;
31354 }
31355 }
31356 else if (load_store_pendulum == -2)
31357 {
31358 /* Two stores have been issued in this cycle. Increase the
31359 priority of the first load in the ready list to favor it for
31360 issuing in the next cycle. */
31361 pos = *pn_ready-1;
31362
31363 while (pos >= 0)
31364 {
31365 if (is_load_insn (ready[pos], &load_mem)
31366 && !sel_sched_p ()
31367 && INSN_PRIORITY_KNOWN (ready[pos]))
31368 {
31369 INSN_PRIORITY (ready[pos])++;
31370
31371 /* Adjust the pendulum to account for the fact that a load
31372 was found and increased in priority. This is to prevent
31373 increasing the priority of multiple loads */
31374 load_store_pendulum--;
31375
31376 break;
31377 }
31378 pos--;
31379 }
31380 }
31381 else if (load_store_pendulum == -1)
31382 {
31383 /* A store has been issued in this cycle. Scan the ready list for
31384 another store to issue with it, preferring a store to an adjacent
31385 memory location */
31386 int first_store_pos = -1;
31387
31388 pos = *pn_ready-1;
31389
31390 while (pos >= 0)
31391 {
31392 if (is_store_insn (ready[pos], &str_mem))
31393 {
31394 rtx str_mem2;
31395 /* Maintain the index of the first store found on the
31396 list */
31397 if (first_store_pos == -1)
31398 first_store_pos = pos;
31399
31400 if (is_store_insn (last_scheduled_insn, &str_mem2)
31401 && adjacent_mem_locations (str_mem, str_mem2))
31402 {
31403 /* Found an adjacent store. Move it to the head of the
31404 ready list, and adjust it's priority so that it is
31405 more likely to stay there */
31406 tmp = ready[pos];
31407 for (i=pos; i<*pn_ready-1; i++)
31408 ready[i] = ready[i + 1];
31409 ready[*pn_ready-1] = tmp;
31410
31411 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31412 INSN_PRIORITY (tmp)++;
31413
31414 first_store_pos = -1;
31415
31416 break;
31417 };
31418 }
31419 pos--;
31420 }
31421
31422 if (first_store_pos >= 0)
31423 {
31424 /* An adjacent store wasn't found, but a non-adjacent store was,
31425 so move the non-adjacent store to the front of the ready
31426 list, and adjust its priority so that it is more likely to
31427 stay there. */
31428 tmp = ready[first_store_pos];
31429 for (i=first_store_pos; i<*pn_ready-1; i++)
31430 ready[i] = ready[i + 1];
31431 ready[*pn_ready-1] = tmp;
31432 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31433 INSN_PRIORITY (tmp)++;
31434 }
31435 }
31436 else if (load_store_pendulum == 2)
31437 {
31438 /* Two loads have been issued in this cycle. Increase the priority
31439 of the first store in the ready list to favor it for issuing in
31440 the next cycle. */
31441 pos = *pn_ready-1;
31442
31443 while (pos >= 0)
31444 {
31445 if (is_store_insn (ready[pos], &str_mem)
31446 && !sel_sched_p ()
31447 && INSN_PRIORITY_KNOWN (ready[pos]))
31448 {
31449 INSN_PRIORITY (ready[pos])++;
31450
31451 /* Adjust the pendulum to account for the fact that a store
31452 was found and increased in priority. This is to prevent
31453 increasing the priority of multiple stores */
31454 load_store_pendulum++;
31455
31456 break;
31457 }
31458 pos--;
31459 }
31460 }
31461 }
31462
31463 /* Do Power9 dependent reordering if necessary. */
31464 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31465 && recog_memoized (last_scheduled_insn) >= 0)
31466 return power9_sched_reorder2 (ready, *pn_ready - 1);
31467
31468 return cached_can_issue_more;
31469 }
31470
31471 /* Return whether the presence of INSN causes a dispatch group termination
31472 of group WHICH_GROUP.
31473
31474 If WHICH_GROUP == current_group, this function will return true if INSN
31475 causes the termination of the current group (i.e, the dispatch group to
31476 which INSN belongs). This means that INSN will be the last insn in the
31477 group it belongs to.
31478
31479 If WHICH_GROUP == previous_group, this function will return true if INSN
31480 causes the termination of the previous group (i.e, the dispatch group that
31481 precedes the group to which INSN belongs). This means that INSN will be
31482 the first insn in the group it belongs to). */
31483
31484 static bool
31485 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31486 {
31487 bool first, last;
31488
31489 if (! insn)
31490 return false;
31491
31492 first = insn_must_be_first_in_group (insn);
31493 last = insn_must_be_last_in_group (insn);
31494
31495 if (first && last)
31496 return true;
31497
31498 if (which_group == current_group)
31499 return last;
31500 else if (which_group == previous_group)
31501 return first;
31502
31503 return false;
31504 }
31505
31506
31507 static bool
31508 insn_must_be_first_in_group (rtx_insn *insn)
31509 {
31510 enum attr_type type;
31511
31512 if (!insn
31513 || NOTE_P (insn)
31514 || DEBUG_INSN_P (insn)
31515 || GET_CODE (PATTERN (insn)) == USE
31516 || GET_CODE (PATTERN (insn)) == CLOBBER)
31517 return false;
31518
31519 switch (rs6000_tune)
31520 {
31521 case PROCESSOR_POWER5:
31522 if (is_cracked_insn (insn))
31523 return true;
31524 /* FALLTHRU */
31525 case PROCESSOR_POWER4:
31526 if (is_microcoded_insn (insn))
31527 return true;
31528
31529 if (!rs6000_sched_groups)
31530 return false;
31531
31532 type = get_attr_type (insn);
31533
31534 switch (type)
31535 {
31536 case TYPE_MFCR:
31537 case TYPE_MFCRF:
31538 case TYPE_MTCR:
31539 case TYPE_CR_LOGICAL:
31540 case TYPE_MTJMPR:
31541 case TYPE_MFJMPR:
31542 case TYPE_DIV:
31543 case TYPE_LOAD_L:
31544 case TYPE_STORE_C:
31545 case TYPE_ISYNC:
31546 case TYPE_SYNC:
31547 return true;
31548 default:
31549 break;
31550 }
31551 break;
31552 case PROCESSOR_POWER6:
31553 type = get_attr_type (insn);
31554
31555 switch (type)
31556 {
31557 case TYPE_EXTS:
31558 case TYPE_CNTLZ:
31559 case TYPE_TRAP:
31560 case TYPE_MUL:
31561 case TYPE_INSERT:
31562 case TYPE_FPCOMPARE:
31563 case TYPE_MFCR:
31564 case TYPE_MTCR:
31565 case TYPE_MFJMPR:
31566 case TYPE_MTJMPR:
31567 case TYPE_ISYNC:
31568 case TYPE_SYNC:
31569 case TYPE_LOAD_L:
31570 case TYPE_STORE_C:
31571 return true;
31572 case TYPE_SHIFT:
31573 if (get_attr_dot (insn) == DOT_NO
31574 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31575 return true;
31576 else
31577 break;
31578 case TYPE_DIV:
31579 if (get_attr_size (insn) == SIZE_32)
31580 return true;
31581 else
31582 break;
31583 case TYPE_LOAD:
31584 case TYPE_STORE:
31585 case TYPE_FPLOAD:
31586 case TYPE_FPSTORE:
31587 if (get_attr_update (insn) == UPDATE_YES)
31588 return true;
31589 else
31590 break;
31591 default:
31592 break;
31593 }
31594 break;
31595 case PROCESSOR_POWER7:
31596 type = get_attr_type (insn);
31597
31598 switch (type)
31599 {
31600 case TYPE_CR_LOGICAL:
31601 case TYPE_MFCR:
31602 case TYPE_MFCRF:
31603 case TYPE_MTCR:
31604 case TYPE_DIV:
31605 case TYPE_ISYNC:
31606 case TYPE_LOAD_L:
31607 case TYPE_STORE_C:
31608 case TYPE_MFJMPR:
31609 case TYPE_MTJMPR:
31610 return true;
31611 case TYPE_MUL:
31612 case TYPE_SHIFT:
31613 case TYPE_EXTS:
31614 if (get_attr_dot (insn) == DOT_YES)
31615 return true;
31616 else
31617 break;
31618 case TYPE_LOAD:
31619 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31620 || get_attr_update (insn) == UPDATE_YES)
31621 return true;
31622 else
31623 break;
31624 case TYPE_STORE:
31625 case TYPE_FPLOAD:
31626 case TYPE_FPSTORE:
31627 if (get_attr_update (insn) == UPDATE_YES)
31628 return true;
31629 else
31630 break;
31631 default:
31632 break;
31633 }
31634 break;
31635 case PROCESSOR_POWER8:
31636 type = get_attr_type (insn);
31637
31638 switch (type)
31639 {
31640 case TYPE_CR_LOGICAL:
31641 case TYPE_MFCR:
31642 case TYPE_MFCRF:
31643 case TYPE_MTCR:
31644 case TYPE_SYNC:
31645 case TYPE_ISYNC:
31646 case TYPE_LOAD_L:
31647 case TYPE_STORE_C:
31648 case TYPE_VECSTORE:
31649 case TYPE_MFJMPR:
31650 case TYPE_MTJMPR:
31651 return true;
31652 case TYPE_SHIFT:
31653 case TYPE_EXTS:
31654 case TYPE_MUL:
31655 if (get_attr_dot (insn) == DOT_YES)
31656 return true;
31657 else
31658 break;
31659 case TYPE_LOAD:
31660 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31661 || get_attr_update (insn) == UPDATE_YES)
31662 return true;
31663 else
31664 break;
31665 case TYPE_STORE:
31666 if (get_attr_update (insn) == UPDATE_YES
31667 && get_attr_indexed (insn) == INDEXED_YES)
31668 return true;
31669 else
31670 break;
31671 default:
31672 break;
31673 }
31674 break;
31675 default:
31676 break;
31677 }
31678
31679 return false;
31680 }
31681
31682 static bool
31683 insn_must_be_last_in_group (rtx_insn *insn)
31684 {
31685 enum attr_type type;
31686
31687 if (!insn
31688 || NOTE_P (insn)
31689 || DEBUG_INSN_P (insn)
31690 || GET_CODE (PATTERN (insn)) == USE
31691 || GET_CODE (PATTERN (insn)) == CLOBBER)
31692 return false;
31693
31694 switch (rs6000_tune) {
31695 case PROCESSOR_POWER4:
31696 case PROCESSOR_POWER5:
31697 if (is_microcoded_insn (insn))
31698 return true;
31699
31700 if (is_branch_slot_insn (insn))
31701 return true;
31702
31703 break;
31704 case PROCESSOR_POWER6:
31705 type = get_attr_type (insn);
31706
31707 switch (type)
31708 {
31709 case TYPE_EXTS:
31710 case TYPE_CNTLZ:
31711 case TYPE_TRAP:
31712 case TYPE_MUL:
31713 case TYPE_FPCOMPARE:
31714 case TYPE_MFCR:
31715 case TYPE_MTCR:
31716 case TYPE_MFJMPR:
31717 case TYPE_MTJMPR:
31718 case TYPE_ISYNC:
31719 case TYPE_SYNC:
31720 case TYPE_LOAD_L:
31721 case TYPE_STORE_C:
31722 return true;
31723 case TYPE_SHIFT:
31724 if (get_attr_dot (insn) == DOT_NO
31725 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31726 return true;
31727 else
31728 break;
31729 case TYPE_DIV:
31730 if (get_attr_size (insn) == SIZE_32)
31731 return true;
31732 else
31733 break;
31734 default:
31735 break;
31736 }
31737 break;
31738 case PROCESSOR_POWER7:
31739 type = get_attr_type (insn);
31740
31741 switch (type)
31742 {
31743 case TYPE_ISYNC:
31744 case TYPE_SYNC:
31745 case TYPE_LOAD_L:
31746 case TYPE_STORE_C:
31747 return true;
31748 case TYPE_LOAD:
31749 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31750 && get_attr_update (insn) == UPDATE_YES)
31751 return true;
31752 else
31753 break;
31754 case TYPE_STORE:
31755 if (get_attr_update (insn) == UPDATE_YES
31756 && get_attr_indexed (insn) == INDEXED_YES)
31757 return true;
31758 else
31759 break;
31760 default:
31761 break;
31762 }
31763 break;
31764 case PROCESSOR_POWER8:
31765 type = get_attr_type (insn);
31766
31767 switch (type)
31768 {
31769 case TYPE_MFCR:
31770 case TYPE_MTCR:
31771 case TYPE_ISYNC:
31772 case TYPE_SYNC:
31773 case TYPE_LOAD_L:
31774 case TYPE_STORE_C:
31775 return true;
31776 case TYPE_LOAD:
31777 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31778 && get_attr_update (insn) == UPDATE_YES)
31779 return true;
31780 else
31781 break;
31782 case TYPE_STORE:
31783 if (get_attr_update (insn) == UPDATE_YES
31784 && get_attr_indexed (insn) == INDEXED_YES)
31785 return true;
31786 else
31787 break;
31788 default:
31789 break;
31790 }
31791 break;
31792 default:
31793 break;
31794 }
31795
31796 return false;
31797 }
31798
31799 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31800 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31801
31802 static bool
31803 is_costly_group (rtx *group_insns, rtx next_insn)
31804 {
31805 int i;
31806 int issue_rate = rs6000_issue_rate ();
31807
31808 for (i = 0; i < issue_rate; i++)
31809 {
31810 sd_iterator_def sd_it;
31811 dep_t dep;
31812 rtx insn = group_insns[i];
31813
31814 if (!insn)
31815 continue;
31816
31817 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31818 {
31819 rtx next = DEP_CON (dep);
31820
31821 if (next == next_insn
31822 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31823 return true;
31824 }
31825 }
31826
31827 return false;
31828 }
31829
31830 /* Utility of the function redefine_groups.
31831 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31832 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31833 to keep it "far" (in a separate group) from GROUP_INSNS, following
31834 one of the following schemes, depending on the value of the flag
31835 -minsert_sched_nops = X:
31836 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31837 in order to force NEXT_INSN into a separate group.
31838 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31839 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31840 insertion (has a group just ended, how many vacant issue slots remain in the
31841 last group, and how many dispatch groups were encountered so far). */
31842
31843 static int
31844 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31845 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31846 int *group_count)
31847 {
31848 rtx nop;
31849 bool force;
31850 int issue_rate = rs6000_issue_rate ();
31851 bool end = *group_end;
31852 int i;
31853
31854 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31855 return can_issue_more;
31856
31857 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31858 return can_issue_more;
31859
31860 force = is_costly_group (group_insns, next_insn);
31861 if (!force)
31862 return can_issue_more;
31863
31864 if (sched_verbose > 6)
31865 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31866 *group_count ,can_issue_more);
31867
31868 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31869 {
31870 if (*group_end)
31871 can_issue_more = 0;
31872
31873 /* Since only a branch can be issued in the last issue_slot, it is
31874 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31875 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31876 in this case the last nop will start a new group and the branch
31877 will be forced to the new group. */
31878 if (can_issue_more && !is_branch_slot_insn (next_insn))
31879 can_issue_more--;
31880
31881 /* Do we have a special group ending nop? */
31882 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
31883 || rs6000_tune == PROCESSOR_POWER8)
31884 {
31885 nop = gen_group_ending_nop ();
31886 emit_insn_before (nop, next_insn);
31887 can_issue_more = 0;
31888 }
31889 else
31890 while (can_issue_more > 0)
31891 {
31892 nop = gen_nop ();
31893 emit_insn_before (nop, next_insn);
31894 can_issue_more--;
31895 }
31896
31897 *group_end = true;
31898 return 0;
31899 }
31900
31901 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31902 {
31903 int n_nops = rs6000_sched_insert_nops;
31904
31905 /* Nops can't be issued from the branch slot, so the effective
31906 issue_rate for nops is 'issue_rate - 1'. */
31907 if (can_issue_more == 0)
31908 can_issue_more = issue_rate;
31909 can_issue_more--;
31910 if (can_issue_more == 0)
31911 {
31912 can_issue_more = issue_rate - 1;
31913 (*group_count)++;
31914 end = true;
31915 for (i = 0; i < issue_rate; i++)
31916 {
31917 group_insns[i] = 0;
31918 }
31919 }
31920
31921 while (n_nops > 0)
31922 {
31923 nop = gen_nop ();
31924 emit_insn_before (nop, next_insn);
31925 if (can_issue_more == issue_rate - 1) /* new group begins */
31926 end = false;
31927 can_issue_more--;
31928 if (can_issue_more == 0)
31929 {
31930 can_issue_more = issue_rate - 1;
31931 (*group_count)++;
31932 end = true;
31933 for (i = 0; i < issue_rate; i++)
31934 {
31935 group_insns[i] = 0;
31936 }
31937 }
31938 n_nops--;
31939 }
31940
31941 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31942 can_issue_more++;
31943
31944 /* Is next_insn going to start a new group? */
31945 *group_end
31946 = (end
31947 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31948 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31949 || (can_issue_more < issue_rate &&
31950 insn_terminates_group_p (next_insn, previous_group)));
31951 if (*group_end && end)
31952 (*group_count)--;
31953
31954 if (sched_verbose > 6)
31955 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31956 *group_count, can_issue_more);
31957 return can_issue_more;
31958 }
31959
31960 return can_issue_more;
31961 }
31962
31963 /* This function tries to synch the dispatch groups that the compiler "sees"
31964 with the dispatch groups that the processor dispatcher is expected to
31965 form in practice. It tries to achieve this synchronization by forcing the
31966 estimated processor grouping on the compiler (as opposed to the function
31967 'pad_goups' which tries to force the scheduler's grouping on the processor).
31968
31969 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31970 examines the (estimated) dispatch groups that will be formed by the processor
31971 dispatcher. It marks these group boundaries to reflect the estimated
31972 processor grouping, overriding the grouping that the scheduler had marked.
31973 Depending on the value of the flag '-minsert-sched-nops' this function can
31974 force certain insns into separate groups or force a certain distance between
31975 them by inserting nops, for example, if there exists a "costly dependence"
31976 between the insns.
31977
31978 The function estimates the group boundaries that the processor will form as
31979 follows: It keeps track of how many vacant issue slots are available after
31980 each insn. A subsequent insn will start a new group if one of the following
31981 4 cases applies:
31982 - no more vacant issue slots remain in the current dispatch group.
31983 - only the last issue slot, which is the branch slot, is vacant, but the next
31984 insn is not a branch.
31985 - only the last 2 or less issue slots, including the branch slot, are vacant,
31986 which means that a cracked insn (which occupies two issue slots) can't be
31987 issued in this group.
31988 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31989 start a new group. */
31990
31991 static int
31992 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31993 rtx_insn *tail)
31994 {
31995 rtx_insn *insn, *next_insn;
31996 int issue_rate;
31997 int can_issue_more;
31998 int slot, i;
31999 bool group_end;
32000 int group_count = 0;
32001 rtx *group_insns;
32002
32003 /* Initialize. */
32004 issue_rate = rs6000_issue_rate ();
32005 group_insns = XALLOCAVEC (rtx, issue_rate);
32006 for (i = 0; i < issue_rate; i++)
32007 {
32008 group_insns[i] = 0;
32009 }
32010 can_issue_more = issue_rate;
32011 slot = 0;
32012 insn = get_next_active_insn (prev_head_insn, tail);
32013 group_end = false;
32014
32015 while (insn != NULL_RTX)
32016 {
32017 slot = (issue_rate - can_issue_more);
32018 group_insns[slot] = insn;
32019 can_issue_more =
32020 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32021 if (insn_terminates_group_p (insn, current_group))
32022 can_issue_more = 0;
32023
32024 next_insn = get_next_active_insn (insn, tail);
32025 if (next_insn == NULL_RTX)
32026 return group_count + 1;
32027
32028 /* Is next_insn going to start a new group? */
32029 group_end
32030 = (can_issue_more == 0
32031 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32032 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32033 || (can_issue_more < issue_rate &&
32034 insn_terminates_group_p (next_insn, previous_group)));
32035
32036 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32037 next_insn, &group_end, can_issue_more,
32038 &group_count);
32039
32040 if (group_end)
32041 {
32042 group_count++;
32043 can_issue_more = 0;
32044 for (i = 0; i < issue_rate; i++)
32045 {
32046 group_insns[i] = 0;
32047 }
32048 }
32049
32050 if (GET_MODE (next_insn) == TImode && can_issue_more)
32051 PUT_MODE (next_insn, VOIDmode);
32052 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32053 PUT_MODE (next_insn, TImode);
32054
32055 insn = next_insn;
32056 if (can_issue_more == 0)
32057 can_issue_more = issue_rate;
32058 } /* while */
32059
32060 return group_count;
32061 }
32062
32063 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32064 dispatch group boundaries that the scheduler had marked. Pad with nops
32065 any dispatch groups which have vacant issue slots, in order to force the
32066 scheduler's grouping on the processor dispatcher. The function
32067 returns the number of dispatch groups found. */
32068
32069 static int
32070 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32071 rtx_insn *tail)
32072 {
32073 rtx_insn *insn, *next_insn;
32074 rtx nop;
32075 int issue_rate;
32076 int can_issue_more;
32077 int group_end;
32078 int group_count = 0;
32079
32080 /* Initialize issue_rate. */
32081 issue_rate = rs6000_issue_rate ();
32082 can_issue_more = issue_rate;
32083
32084 insn = get_next_active_insn (prev_head_insn, tail);
32085 next_insn = get_next_active_insn (insn, tail);
32086
32087 while (insn != NULL_RTX)
32088 {
32089 can_issue_more =
32090 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32091
32092 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32093
32094 if (next_insn == NULL_RTX)
32095 break;
32096
32097 if (group_end)
32098 {
32099 /* If the scheduler had marked group termination at this location
32100 (between insn and next_insn), and neither insn nor next_insn will
32101 force group termination, pad the group with nops to force group
32102 termination. */
32103 if (can_issue_more
32104 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32105 && !insn_terminates_group_p (insn, current_group)
32106 && !insn_terminates_group_p (next_insn, previous_group))
32107 {
32108 if (!is_branch_slot_insn (next_insn))
32109 can_issue_more--;
32110
32111 while (can_issue_more)
32112 {
32113 nop = gen_nop ();
32114 emit_insn_before (nop, next_insn);
32115 can_issue_more--;
32116 }
32117 }
32118
32119 can_issue_more = issue_rate;
32120 group_count++;
32121 }
32122
32123 insn = next_insn;
32124 next_insn = get_next_active_insn (insn, tail);
32125 }
32126
32127 return group_count;
32128 }
32129
32130 /* We're beginning a new block. Initialize data structures as necessary. */
32131
32132 static void
32133 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32134 int sched_verbose ATTRIBUTE_UNUSED,
32135 int max_ready ATTRIBUTE_UNUSED)
32136 {
32137 last_scheduled_insn = NULL;
32138 load_store_pendulum = 0;
32139 divide_cnt = 0;
32140 vec_pairing = 0;
32141 }
32142
32143 /* The following function is called at the end of scheduling BB.
32144 After reload, it inserts nops at insn group bundling. */
32145
32146 static void
32147 rs6000_sched_finish (FILE *dump, int sched_verbose)
32148 {
32149 int n_groups;
32150
32151 if (sched_verbose)
32152 fprintf (dump, "=== Finishing schedule.\n");
32153
32154 if (reload_completed && rs6000_sched_groups)
32155 {
32156 /* Do not run sched_finish hook when selective scheduling enabled. */
32157 if (sel_sched_p ())
32158 return;
32159
32160 if (rs6000_sched_insert_nops == sched_finish_none)
32161 return;
32162
32163 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32164 n_groups = pad_groups (dump, sched_verbose,
32165 current_sched_info->prev_head,
32166 current_sched_info->next_tail);
32167 else
32168 n_groups = redefine_groups (dump, sched_verbose,
32169 current_sched_info->prev_head,
32170 current_sched_info->next_tail);
32171
32172 if (sched_verbose >= 6)
32173 {
32174 fprintf (dump, "ngroups = %d\n", n_groups);
32175 print_rtl (dump, current_sched_info->prev_head);
32176 fprintf (dump, "Done finish_sched\n");
32177 }
32178 }
32179 }
32180
32181 struct rs6000_sched_context
32182 {
32183 short cached_can_issue_more;
32184 rtx_insn *last_scheduled_insn;
32185 int load_store_pendulum;
32186 int divide_cnt;
32187 int vec_pairing;
32188 };
32189
32190 typedef struct rs6000_sched_context rs6000_sched_context_def;
32191 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32192
32193 /* Allocate store for new scheduling context. */
32194 static void *
32195 rs6000_alloc_sched_context (void)
32196 {
32197 return xmalloc (sizeof (rs6000_sched_context_def));
32198 }
32199
32200 /* If CLEAN_P is true then initializes _SC with clean data,
32201 and from the global context otherwise. */
32202 static void
32203 rs6000_init_sched_context (void *_sc, bool clean_p)
32204 {
32205 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32206
32207 if (clean_p)
32208 {
32209 sc->cached_can_issue_more = 0;
32210 sc->last_scheduled_insn = NULL;
32211 sc->load_store_pendulum = 0;
32212 sc->divide_cnt = 0;
32213 sc->vec_pairing = 0;
32214 }
32215 else
32216 {
32217 sc->cached_can_issue_more = cached_can_issue_more;
32218 sc->last_scheduled_insn = last_scheduled_insn;
32219 sc->load_store_pendulum = load_store_pendulum;
32220 sc->divide_cnt = divide_cnt;
32221 sc->vec_pairing = vec_pairing;
32222 }
32223 }
32224
32225 /* Sets the global scheduling context to the one pointed to by _SC. */
32226 static void
32227 rs6000_set_sched_context (void *_sc)
32228 {
32229 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32230
32231 gcc_assert (sc != NULL);
32232
32233 cached_can_issue_more = sc->cached_can_issue_more;
32234 last_scheduled_insn = sc->last_scheduled_insn;
32235 load_store_pendulum = sc->load_store_pendulum;
32236 divide_cnt = sc->divide_cnt;
32237 vec_pairing = sc->vec_pairing;
32238 }
32239
32240 /* Free _SC. */
32241 static void
32242 rs6000_free_sched_context (void *_sc)
32243 {
32244 gcc_assert (_sc != NULL);
32245
32246 free (_sc);
32247 }
32248
32249 static bool
32250 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32251 {
32252 switch (get_attr_type (insn))
32253 {
32254 case TYPE_DIV:
32255 case TYPE_SDIV:
32256 case TYPE_DDIV:
32257 case TYPE_VECDIV:
32258 case TYPE_SSQRT:
32259 case TYPE_DSQRT:
32260 return false;
32261
32262 default:
32263 return true;
32264 }
32265 }
32266 \f
32267 /* Length in units of the trampoline for entering a nested function. */
32268
32269 int
32270 rs6000_trampoline_size (void)
32271 {
32272 int ret = 0;
32273
32274 switch (DEFAULT_ABI)
32275 {
32276 default:
32277 gcc_unreachable ();
32278
32279 case ABI_AIX:
32280 ret = (TARGET_32BIT) ? 12 : 24;
32281 break;
32282
32283 case ABI_ELFv2:
32284 gcc_assert (!TARGET_32BIT);
32285 ret = 32;
32286 break;
32287
32288 case ABI_DARWIN:
32289 case ABI_V4:
32290 ret = (TARGET_32BIT) ? 40 : 48;
32291 break;
32292 }
32293
32294 return ret;
32295 }
32296
32297 /* Emit RTL insns to initialize the variable parts of a trampoline.
32298 FNADDR is an RTX for the address of the function's pure code.
32299 CXT is an RTX for the static chain value for the function. */
32300
32301 static void
32302 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32303 {
32304 int regsize = (TARGET_32BIT) ? 4 : 8;
32305 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32306 rtx ctx_reg = force_reg (Pmode, cxt);
32307 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32308
32309 switch (DEFAULT_ABI)
32310 {
32311 default:
32312 gcc_unreachable ();
32313
32314 /* Under AIX, just build the 3 word function descriptor */
32315 case ABI_AIX:
32316 {
32317 rtx fnmem, fn_reg, toc_reg;
32318
32319 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32320 error ("you cannot take the address of a nested function if you use "
32321 "the %qs option", "-mno-pointers-to-nested-functions");
32322
32323 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32324 fn_reg = gen_reg_rtx (Pmode);
32325 toc_reg = gen_reg_rtx (Pmode);
32326
32327 /* Macro to shorten the code expansions below. */
32328 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32329
32330 m_tramp = replace_equiv_address (m_tramp, addr);
32331
32332 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32333 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32334 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32335 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32336 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32337
32338 # undef MEM_PLUS
32339 }
32340 break;
32341
32342 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32343 case ABI_ELFv2:
32344 case ABI_DARWIN:
32345 case ABI_V4:
32346 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32347 LCT_NORMAL, VOIDmode,
32348 addr, Pmode,
32349 GEN_INT (rs6000_trampoline_size ()), SImode,
32350 fnaddr, Pmode,
32351 ctx_reg, Pmode);
32352 break;
32353 }
32354 }
32355
32356 \f
32357 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32358 identifier as an argument, so the front end shouldn't look it up. */
32359
32360 static bool
32361 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32362 {
32363 return is_attribute_p ("altivec", attr_id);
32364 }
32365
32366 /* Handle the "altivec" attribute. The attribute may have
32367 arguments as follows:
32368
32369 __attribute__((altivec(vector__)))
32370 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32371 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32372
32373 and may appear more than once (e.g., 'vector bool char') in a
32374 given declaration. */
32375
32376 static tree
32377 rs6000_handle_altivec_attribute (tree *node,
32378 tree name ATTRIBUTE_UNUSED,
32379 tree args,
32380 int flags ATTRIBUTE_UNUSED,
32381 bool *no_add_attrs)
32382 {
32383 tree type = *node, result = NULL_TREE;
32384 machine_mode mode;
32385 int unsigned_p;
32386 char altivec_type
32387 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32388 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32389 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32390 : '?');
32391
32392 while (POINTER_TYPE_P (type)
32393 || TREE_CODE (type) == FUNCTION_TYPE
32394 || TREE_CODE (type) == METHOD_TYPE
32395 || TREE_CODE (type) == ARRAY_TYPE)
32396 type = TREE_TYPE (type);
32397
32398 mode = TYPE_MODE (type);
32399
32400 /* Check for invalid AltiVec type qualifiers. */
32401 if (type == long_double_type_node)
32402 error ("use of %<long double%> in AltiVec types is invalid");
32403 else if (type == boolean_type_node)
32404 error ("use of boolean types in AltiVec types is invalid");
32405 else if (TREE_CODE (type) == COMPLEX_TYPE)
32406 error ("use of %<complex%> in AltiVec types is invalid");
32407 else if (DECIMAL_FLOAT_MODE_P (mode))
32408 error ("use of decimal floating point types in AltiVec types is invalid");
32409 else if (!TARGET_VSX)
32410 {
32411 if (type == long_unsigned_type_node || type == long_integer_type_node)
32412 {
32413 if (TARGET_64BIT)
32414 error ("use of %<long%> in AltiVec types is invalid for "
32415 "64-bit code without %qs", "-mvsx");
32416 else if (rs6000_warn_altivec_long)
32417 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32418 "use %<int%>");
32419 }
32420 else if (type == long_long_unsigned_type_node
32421 || type == long_long_integer_type_node)
32422 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32423 "-mvsx");
32424 else if (type == double_type_node)
32425 error ("use of %<double%> in AltiVec types is invalid without %qs",
32426 "-mvsx");
32427 }
32428
32429 switch (altivec_type)
32430 {
32431 case 'v':
32432 unsigned_p = TYPE_UNSIGNED (type);
32433 switch (mode)
32434 {
32435 case E_TImode:
32436 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32437 break;
32438 case E_DImode:
32439 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32440 break;
32441 case E_SImode:
32442 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32443 break;
32444 case E_HImode:
32445 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32446 break;
32447 case E_QImode:
32448 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32449 break;
32450 case E_SFmode: result = V4SF_type_node; break;
32451 case E_DFmode: result = V2DF_type_node; break;
32452 /* If the user says 'vector int bool', we may be handed the 'bool'
32453 attribute _before_ the 'vector' attribute, and so select the
32454 proper type in the 'b' case below. */
32455 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32456 case E_V2DImode: case E_V2DFmode:
32457 result = type;
32458 default: break;
32459 }
32460 break;
32461 case 'b':
32462 switch (mode)
32463 {
32464 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32465 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32466 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32467 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32468 default: break;
32469 }
32470 break;
32471 case 'p':
32472 switch (mode)
32473 {
32474 case E_V8HImode: result = pixel_V8HI_type_node;
32475 default: break;
32476 }
32477 default: break;
32478 }
32479
32480 /* Propagate qualifiers attached to the element type
32481 onto the vector type. */
32482 if (result && result != type && TYPE_QUALS (type))
32483 result = build_qualified_type (result, TYPE_QUALS (type));
32484
32485 *no_add_attrs = true; /* No need to hang on to the attribute. */
32486
32487 if (result)
32488 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32489
32490 return NULL_TREE;
32491 }
32492
32493 /* AltiVec defines five built-in scalar types that serve as vector
32494 elements; we must teach the compiler how to mangle them. The 128-bit
32495 floating point mangling is target-specific as well. */
32496
32497 static const char *
32498 rs6000_mangle_type (const_tree type)
32499 {
32500 type = TYPE_MAIN_VARIANT (type);
32501
32502 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32503 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32504 return NULL;
32505
32506 if (type == bool_char_type_node) return "U6__boolc";
32507 if (type == bool_short_type_node) return "U6__bools";
32508 if (type == pixel_type_node) return "u7__pixel";
32509 if (type == bool_int_type_node) return "U6__booli";
32510 if (type == bool_long_long_type_node) return "U6__boolx";
32511
32512 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32513 return "g";
32514 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32515 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32516
32517 /* For all other types, use the default mangling. */
32518 return NULL;
32519 }
32520
32521 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32522 struct attribute_spec.handler. */
32523
32524 static tree
32525 rs6000_handle_longcall_attribute (tree *node, tree name,
32526 tree args ATTRIBUTE_UNUSED,
32527 int flags ATTRIBUTE_UNUSED,
32528 bool *no_add_attrs)
32529 {
32530 if (TREE_CODE (*node) != FUNCTION_TYPE
32531 && TREE_CODE (*node) != FIELD_DECL
32532 && TREE_CODE (*node) != TYPE_DECL)
32533 {
32534 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32535 name);
32536 *no_add_attrs = true;
32537 }
32538
32539 return NULL_TREE;
32540 }
32541
32542 /* Set longcall attributes on all functions declared when
32543 rs6000_default_long_calls is true. */
32544 static void
32545 rs6000_set_default_type_attributes (tree type)
32546 {
32547 if (rs6000_default_long_calls
32548 && (TREE_CODE (type) == FUNCTION_TYPE
32549 || TREE_CODE (type) == METHOD_TYPE))
32550 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32551 NULL_TREE,
32552 TYPE_ATTRIBUTES (type));
32553
32554 #if TARGET_MACHO
32555 darwin_set_default_type_attributes (type);
32556 #endif
32557 }
32558
32559 /* Return a reference suitable for calling a function with the
32560 longcall attribute. */
32561
32562 static rtx
32563 rs6000_longcall_ref (rtx call_ref, rtx arg)
32564 {
32565 /* System V adds '.' to the internal name, so skip them. */
32566 const char *call_name = XSTR (call_ref, 0);
32567 if (*call_name == '.')
32568 {
32569 while (*call_name == '.')
32570 call_name++;
32571
32572 tree node = get_identifier (call_name);
32573 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32574 }
32575
32576 if (TARGET_PLTSEQ)
32577 {
32578 rtx base = const0_rtx;
32579 int regno;
32580 if (DEFAULT_ABI == ABI_ELFv2)
32581 {
32582 base = gen_rtx_REG (Pmode, TOC_REGISTER);
32583 regno = 12;
32584 }
32585 else
32586 {
32587 if (flag_pic)
32588 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32589 regno = 11;
32590 }
32591 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
32592 may be used by a function global entry point. For SysV4, r11
32593 is used by __glink_PLTresolve lazy resolver entry. */
32594 rtx reg = gen_rtx_REG (Pmode, regno);
32595 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
32596 UNSPEC_PLT16_HA);
32597 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
32598 UNSPEC_PLT16_LO);
32599 emit_insn (gen_rtx_SET (reg, hi));
32600 emit_insn (gen_rtx_SET (reg, lo));
32601 return reg;
32602 }
32603
32604 return force_reg (Pmode, call_ref);
32605 }
32606 \f
32607 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32608 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32609 #endif
32610
32611 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32612 struct attribute_spec.handler. */
32613 static tree
32614 rs6000_handle_struct_attribute (tree *node, tree name,
32615 tree args ATTRIBUTE_UNUSED,
32616 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32617 {
32618 tree *type = NULL;
32619 if (DECL_P (*node))
32620 {
32621 if (TREE_CODE (*node) == TYPE_DECL)
32622 type = &TREE_TYPE (*node);
32623 }
32624 else
32625 type = node;
32626
32627 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32628 || TREE_CODE (*type) == UNION_TYPE)))
32629 {
32630 warning (OPT_Wattributes, "%qE attribute ignored", name);
32631 *no_add_attrs = true;
32632 }
32633
32634 else if ((is_attribute_p ("ms_struct", name)
32635 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32636 || ((is_attribute_p ("gcc_struct", name)
32637 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32638 {
32639 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32640 name);
32641 *no_add_attrs = true;
32642 }
32643
32644 return NULL_TREE;
32645 }
32646
32647 static bool
32648 rs6000_ms_bitfield_layout_p (const_tree record_type)
32649 {
32650 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32651 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32652 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32653 }
32654 \f
32655 #ifdef USING_ELFOS_H
32656
32657 /* A get_unnamed_section callback, used for switching to toc_section. */
32658
32659 static void
32660 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32661 {
32662 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32663 && TARGET_MINIMAL_TOC)
32664 {
32665 if (!toc_initialized)
32666 {
32667 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32668 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32669 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32670 fprintf (asm_out_file, "\t.tc ");
32671 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32672 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32673 fprintf (asm_out_file, "\n");
32674
32675 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32676 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32677 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32678 fprintf (asm_out_file, " = .+32768\n");
32679 toc_initialized = 1;
32680 }
32681 else
32682 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32683 }
32684 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32685 {
32686 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32687 if (!toc_initialized)
32688 {
32689 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32690 toc_initialized = 1;
32691 }
32692 }
32693 else
32694 {
32695 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32696 if (!toc_initialized)
32697 {
32698 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32699 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32700 fprintf (asm_out_file, " = .+32768\n");
32701 toc_initialized = 1;
32702 }
32703 }
32704 }
32705
32706 /* Implement TARGET_ASM_INIT_SECTIONS. */
32707
32708 static void
32709 rs6000_elf_asm_init_sections (void)
32710 {
32711 toc_section
32712 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32713
32714 sdata2_section
32715 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32716 SDATA2_SECTION_ASM_OP);
32717 }
32718
32719 /* Implement TARGET_SELECT_RTX_SECTION. */
32720
32721 static section *
32722 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32723 unsigned HOST_WIDE_INT align)
32724 {
32725 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32726 return toc_section;
32727 else
32728 return default_elf_select_rtx_section (mode, x, align);
32729 }
32730 \f
32731 /* For a SYMBOL_REF, set generic flags and then perform some
32732 target-specific processing.
32733
32734 When the AIX ABI is requested on a non-AIX system, replace the
32735 function name with the real name (with a leading .) rather than the
32736 function descriptor name. This saves a lot of overriding code to
32737 read the prefixes. */
32738
32739 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32740 static void
32741 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32742 {
32743 default_encode_section_info (decl, rtl, first);
32744
32745 if (first
32746 && TREE_CODE (decl) == FUNCTION_DECL
32747 && !TARGET_AIX
32748 && DEFAULT_ABI == ABI_AIX)
32749 {
32750 rtx sym_ref = XEXP (rtl, 0);
32751 size_t len = strlen (XSTR (sym_ref, 0));
32752 char *str = XALLOCAVEC (char, len + 2);
32753 str[0] = '.';
32754 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32755 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32756 }
32757 }
32758
32759 static inline bool
32760 compare_section_name (const char *section, const char *templ)
32761 {
32762 int len;
32763
32764 len = strlen (templ);
32765 return (strncmp (section, templ, len) == 0
32766 && (section[len] == 0 || section[len] == '.'));
32767 }
32768
32769 bool
32770 rs6000_elf_in_small_data_p (const_tree decl)
32771 {
32772 if (rs6000_sdata == SDATA_NONE)
32773 return false;
32774
32775 /* We want to merge strings, so we never consider them small data. */
32776 if (TREE_CODE (decl) == STRING_CST)
32777 return false;
32778
32779 /* Functions are never in the small data area. */
32780 if (TREE_CODE (decl) == FUNCTION_DECL)
32781 return false;
32782
32783 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32784 {
32785 const char *section = DECL_SECTION_NAME (decl);
32786 if (compare_section_name (section, ".sdata")
32787 || compare_section_name (section, ".sdata2")
32788 || compare_section_name (section, ".gnu.linkonce.s")
32789 || compare_section_name (section, ".sbss")
32790 || compare_section_name (section, ".sbss2")
32791 || compare_section_name (section, ".gnu.linkonce.sb")
32792 || strcmp (section, ".PPC.EMB.sdata0") == 0
32793 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32794 return true;
32795 }
32796 else
32797 {
32798 /* If we are told not to put readonly data in sdata, then don't. */
32799 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
32800 && !rs6000_readonly_in_sdata)
32801 return false;
32802
32803 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32804
32805 if (size > 0
32806 && size <= g_switch_value
32807 /* If it's not public, and we're not going to reference it there,
32808 there's no need to put it in the small data section. */
32809 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32810 return true;
32811 }
32812
32813 return false;
32814 }
32815
32816 #endif /* USING_ELFOS_H */
32817 \f
32818 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32819
32820 static bool
32821 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32822 {
32823 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32824 }
32825
32826 /* Do not place thread-local symbols refs in the object blocks. */
32827
32828 static bool
32829 rs6000_use_blocks_for_decl_p (const_tree decl)
32830 {
32831 return !DECL_THREAD_LOCAL_P (decl);
32832 }
32833 \f
32834 /* Return a REG that occurs in ADDR with coefficient 1.
32835 ADDR can be effectively incremented by incrementing REG.
32836
32837 r0 is special and we must not select it as an address
32838 register by this routine since our caller will try to
32839 increment the returned register via an "la" instruction. */
32840
32841 rtx
32842 find_addr_reg (rtx addr)
32843 {
32844 while (GET_CODE (addr) == PLUS)
32845 {
32846 if (REG_P (XEXP (addr, 0))
32847 && REGNO (XEXP (addr, 0)) != 0)
32848 addr = XEXP (addr, 0);
32849 else if (REG_P (XEXP (addr, 1))
32850 && REGNO (XEXP (addr, 1)) != 0)
32851 addr = XEXP (addr, 1);
32852 else if (CONSTANT_P (XEXP (addr, 0)))
32853 addr = XEXP (addr, 1);
32854 else if (CONSTANT_P (XEXP (addr, 1)))
32855 addr = XEXP (addr, 0);
32856 else
32857 gcc_unreachable ();
32858 }
32859 gcc_assert (REG_P (addr) && REGNO (addr) != 0);
32860 return addr;
32861 }
32862
32863 void
32864 rs6000_fatal_bad_address (rtx op)
32865 {
32866 fatal_insn ("bad address", op);
32867 }
32868
32869 #if TARGET_MACHO
32870
32871 typedef struct branch_island_d {
32872 tree function_name;
32873 tree label_name;
32874 int line_number;
32875 } branch_island;
32876
32877
32878 static vec<branch_island, va_gc> *branch_islands;
32879
32880 /* Remember to generate a branch island for far calls to the given
32881 function. */
32882
32883 static void
32884 add_compiler_branch_island (tree label_name, tree function_name,
32885 int line_number)
32886 {
32887 branch_island bi = {function_name, label_name, line_number};
32888 vec_safe_push (branch_islands, bi);
32889 }
32890
32891 /* Generate far-jump branch islands for everything recorded in
32892 branch_islands. Invoked immediately after the last instruction of
32893 the epilogue has been emitted; the branch islands must be appended
32894 to, and contiguous with, the function body. Mach-O stubs are
32895 generated in machopic_output_stub(). */
32896
32897 static void
32898 macho_branch_islands (void)
32899 {
32900 char tmp_buf[512];
32901
32902 while (!vec_safe_is_empty (branch_islands))
32903 {
32904 branch_island *bi = &branch_islands->last ();
32905 const char *label = IDENTIFIER_POINTER (bi->label_name);
32906 const char *name = IDENTIFIER_POINTER (bi->function_name);
32907 char name_buf[512];
32908 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32909 if (name[0] == '*' || name[0] == '&')
32910 strcpy (name_buf, name+1);
32911 else
32912 {
32913 name_buf[0] = '_';
32914 strcpy (name_buf+1, name);
32915 }
32916 strcpy (tmp_buf, "\n");
32917 strcat (tmp_buf, label);
32918 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32919 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32920 dbxout_stabd (N_SLINE, bi->line_number);
32921 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32922 if (flag_pic)
32923 {
32924 if (TARGET_LINK_STACK)
32925 {
32926 char name[32];
32927 get_ppc476_thunk_name (name);
32928 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32929 strcat (tmp_buf, name);
32930 strcat (tmp_buf, "\n");
32931 strcat (tmp_buf, label);
32932 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32933 }
32934 else
32935 {
32936 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32937 strcat (tmp_buf, label);
32938 strcat (tmp_buf, "_pic\n");
32939 strcat (tmp_buf, label);
32940 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32941 }
32942
32943 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32944 strcat (tmp_buf, name_buf);
32945 strcat (tmp_buf, " - ");
32946 strcat (tmp_buf, label);
32947 strcat (tmp_buf, "_pic)\n");
32948
32949 strcat (tmp_buf, "\tmtlr r0\n");
32950
32951 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32952 strcat (tmp_buf, name_buf);
32953 strcat (tmp_buf, " - ");
32954 strcat (tmp_buf, label);
32955 strcat (tmp_buf, "_pic)\n");
32956
32957 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32958 }
32959 else
32960 {
32961 strcat (tmp_buf, ":\n\tlis r12,hi16(");
32962 strcat (tmp_buf, name_buf);
32963 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32964 strcat (tmp_buf, name_buf);
32965 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32966 }
32967 output_asm_insn (tmp_buf, 0);
32968 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32969 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32970 dbxout_stabd (N_SLINE, bi->line_number);
32971 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32972 branch_islands->pop ();
32973 }
32974 }
32975
32976 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32977 already there or not. */
32978
32979 static int
32980 no_previous_def (tree function_name)
32981 {
32982 branch_island *bi;
32983 unsigned ix;
32984
32985 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32986 if (function_name == bi->function_name)
32987 return 0;
32988 return 1;
32989 }
32990
32991 /* GET_PREV_LABEL gets the label name from the previous definition of
32992 the function. */
32993
32994 static tree
32995 get_prev_label (tree function_name)
32996 {
32997 branch_island *bi;
32998 unsigned ix;
32999
33000 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33001 if (function_name == bi->function_name)
33002 return bi->label_name;
33003 return NULL_TREE;
33004 }
33005
33006 /* Generate PIC and indirect symbol stubs. */
33007
33008 void
33009 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33010 {
33011 unsigned int length;
33012 char *symbol_name, *lazy_ptr_name;
33013 char *local_label_0;
33014 static unsigned label = 0;
33015
33016 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33017 symb = (*targetm.strip_name_encoding) (symb);
33018
33019
33020 length = strlen (symb);
33021 symbol_name = XALLOCAVEC (char, length + 32);
33022 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33023
33024 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33025 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33026
33027 if (flag_pic == 2)
33028 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33029 else
33030 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33031
33032 if (flag_pic == 2)
33033 {
33034 fprintf (file, "\t.align 5\n");
33035
33036 fprintf (file, "%s:\n", stub);
33037 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33038
33039 label++;
33040 local_label_0 = XALLOCAVEC (char, 16);
33041 sprintf (local_label_0, "L%u$spb", label);
33042
33043 fprintf (file, "\tmflr r0\n");
33044 if (TARGET_LINK_STACK)
33045 {
33046 char name[32];
33047 get_ppc476_thunk_name (name);
33048 fprintf (file, "\tbl %s\n", name);
33049 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33050 }
33051 else
33052 {
33053 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33054 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33055 }
33056 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33057 lazy_ptr_name, local_label_0);
33058 fprintf (file, "\tmtlr r0\n");
33059 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33060 (TARGET_64BIT ? "ldu" : "lwzu"),
33061 lazy_ptr_name, local_label_0);
33062 fprintf (file, "\tmtctr r12\n");
33063 fprintf (file, "\tbctr\n");
33064 }
33065 else
33066 {
33067 fprintf (file, "\t.align 4\n");
33068
33069 fprintf (file, "%s:\n", stub);
33070 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33071
33072 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33073 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33074 (TARGET_64BIT ? "ldu" : "lwzu"),
33075 lazy_ptr_name);
33076 fprintf (file, "\tmtctr r12\n");
33077 fprintf (file, "\tbctr\n");
33078 }
33079
33080 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33081 fprintf (file, "%s:\n", lazy_ptr_name);
33082 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33083 fprintf (file, "%sdyld_stub_binding_helper\n",
33084 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33085 }
33086
33087 /* Legitimize PIC addresses. If the address is already
33088 position-independent, we return ORIG. Newly generated
33089 position-independent addresses go into a reg. This is REG if non
33090 zero, otherwise we allocate register(s) as necessary. */
33091
33092 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33093
33094 rtx
33095 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33096 rtx reg)
33097 {
33098 rtx base, offset;
33099
33100 if (reg == NULL && !reload_completed)
33101 reg = gen_reg_rtx (Pmode);
33102
33103 if (GET_CODE (orig) == CONST)
33104 {
33105 rtx reg_temp;
33106
33107 if (GET_CODE (XEXP (orig, 0)) == PLUS
33108 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33109 return orig;
33110
33111 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33112
33113 /* Use a different reg for the intermediate value, as
33114 it will be marked UNCHANGING. */
33115 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33116 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33117 Pmode, reg_temp);
33118 offset =
33119 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33120 Pmode, reg);
33121
33122 if (CONST_INT_P (offset))
33123 {
33124 if (SMALL_INT (offset))
33125 return plus_constant (Pmode, base, INTVAL (offset));
33126 else if (!reload_completed)
33127 offset = force_reg (Pmode, offset);
33128 else
33129 {
33130 rtx mem = force_const_mem (Pmode, orig);
33131 return machopic_legitimize_pic_address (mem, Pmode, reg);
33132 }
33133 }
33134 return gen_rtx_PLUS (Pmode, base, offset);
33135 }
33136
33137 /* Fall back on generic machopic code. */
33138 return machopic_legitimize_pic_address (orig, mode, reg);
33139 }
33140
33141 /* Output a .machine directive for the Darwin assembler, and call
33142 the generic start_file routine. */
33143
33144 static void
33145 rs6000_darwin_file_start (void)
33146 {
33147 static const struct
33148 {
33149 const char *arg;
33150 const char *name;
33151 HOST_WIDE_INT if_set;
33152 } mapping[] = {
33153 { "ppc64", "ppc64", MASK_64BIT },
33154 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33155 { "power4", "ppc970", 0 },
33156 { "G5", "ppc970", 0 },
33157 { "7450", "ppc7450", 0 },
33158 { "7400", "ppc7400", MASK_ALTIVEC },
33159 { "G4", "ppc7400", 0 },
33160 { "750", "ppc750", 0 },
33161 { "740", "ppc750", 0 },
33162 { "G3", "ppc750", 0 },
33163 { "604e", "ppc604e", 0 },
33164 { "604", "ppc604", 0 },
33165 { "603e", "ppc603", 0 },
33166 { "603", "ppc603", 0 },
33167 { "601", "ppc601", 0 },
33168 { NULL, "ppc", 0 } };
33169 const char *cpu_id = "";
33170 size_t i;
33171
33172 rs6000_file_start ();
33173 darwin_file_start ();
33174
33175 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33176
33177 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33178 cpu_id = rs6000_default_cpu;
33179
33180 if (global_options_set.x_rs6000_cpu_index)
33181 cpu_id = processor_target_table[rs6000_cpu_index].name;
33182
33183 /* Look through the mapping array. Pick the first name that either
33184 matches the argument, has a bit set in IF_SET that is also set
33185 in the target flags, or has a NULL name. */
33186
33187 i = 0;
33188 while (mapping[i].arg != NULL
33189 && strcmp (mapping[i].arg, cpu_id) != 0
33190 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33191 i++;
33192
33193 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33194 }
33195
33196 #endif /* TARGET_MACHO */
33197
33198 #if TARGET_ELF
33199 static int
33200 rs6000_elf_reloc_rw_mask (void)
33201 {
33202 if (flag_pic)
33203 return 3;
33204 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33205 return 2;
33206 else
33207 return 0;
33208 }
33209
33210 /* Record an element in the table of global constructors. SYMBOL is
33211 a SYMBOL_REF of the function to be called; PRIORITY is a number
33212 between 0 and MAX_INIT_PRIORITY.
33213
33214 This differs from default_named_section_asm_out_constructor in
33215 that we have special handling for -mrelocatable. */
33216
33217 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33218 static void
33219 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33220 {
33221 const char *section = ".ctors";
33222 char buf[18];
33223
33224 if (priority != DEFAULT_INIT_PRIORITY)
33225 {
33226 sprintf (buf, ".ctors.%.5u",
33227 /* Invert the numbering so the linker puts us in the proper
33228 order; constructors are run from right to left, and the
33229 linker sorts in increasing order. */
33230 MAX_INIT_PRIORITY - priority);
33231 section = buf;
33232 }
33233
33234 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33235 assemble_align (POINTER_SIZE);
33236
33237 if (DEFAULT_ABI == ABI_V4
33238 && (TARGET_RELOCATABLE || flag_pic > 1))
33239 {
33240 fputs ("\t.long (", asm_out_file);
33241 output_addr_const (asm_out_file, symbol);
33242 fputs (")@fixup\n", asm_out_file);
33243 }
33244 else
33245 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33246 }
33247
33248 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33249 static void
33250 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33251 {
33252 const char *section = ".dtors";
33253 char buf[18];
33254
33255 if (priority != DEFAULT_INIT_PRIORITY)
33256 {
33257 sprintf (buf, ".dtors.%.5u",
33258 /* Invert the numbering so the linker puts us in the proper
33259 order; constructors are run from right to left, and the
33260 linker sorts in increasing order. */
33261 MAX_INIT_PRIORITY - priority);
33262 section = buf;
33263 }
33264
33265 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33266 assemble_align (POINTER_SIZE);
33267
33268 if (DEFAULT_ABI == ABI_V4
33269 && (TARGET_RELOCATABLE || flag_pic > 1))
33270 {
33271 fputs ("\t.long (", asm_out_file);
33272 output_addr_const (asm_out_file, symbol);
33273 fputs (")@fixup\n", asm_out_file);
33274 }
33275 else
33276 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33277 }
33278
33279 void
33280 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33281 {
33282 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33283 {
33284 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33285 ASM_OUTPUT_LABEL (file, name);
33286 fputs (DOUBLE_INT_ASM_OP, file);
33287 rs6000_output_function_entry (file, name);
33288 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33289 if (DOT_SYMBOLS)
33290 {
33291 fputs ("\t.size\t", file);
33292 assemble_name (file, name);
33293 fputs (",24\n\t.type\t.", file);
33294 assemble_name (file, name);
33295 fputs (",@function\n", file);
33296 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33297 {
33298 fputs ("\t.globl\t.", file);
33299 assemble_name (file, name);
33300 putc ('\n', file);
33301 }
33302 }
33303 else
33304 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33305 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33306 rs6000_output_function_entry (file, name);
33307 fputs (":\n", file);
33308 return;
33309 }
33310
33311 int uses_toc;
33312 if (DEFAULT_ABI == ABI_V4
33313 && (TARGET_RELOCATABLE || flag_pic > 1)
33314 && !TARGET_SECURE_PLT
33315 && (!constant_pool_empty_p () || crtl->profile)
33316 && (uses_toc = uses_TOC ()))
33317 {
33318 char buf[256];
33319
33320 if (uses_toc == 2)
33321 switch_to_other_text_partition ();
33322 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33323
33324 fprintf (file, "\t.long ");
33325 assemble_name (file, toc_label_name);
33326 need_toc_init = 1;
33327 putc ('-', file);
33328 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33329 assemble_name (file, buf);
33330 putc ('\n', file);
33331 if (uses_toc == 2)
33332 switch_to_other_text_partition ();
33333 }
33334
33335 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33336 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33337
33338 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33339 {
33340 char buf[256];
33341
33342 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33343
33344 fprintf (file, "\t.quad .TOC.-");
33345 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33346 assemble_name (file, buf);
33347 putc ('\n', file);
33348 }
33349
33350 if (DEFAULT_ABI == ABI_AIX)
33351 {
33352 const char *desc_name, *orig_name;
33353
33354 orig_name = (*targetm.strip_name_encoding) (name);
33355 desc_name = orig_name;
33356 while (*desc_name == '.')
33357 desc_name++;
33358
33359 if (TREE_PUBLIC (decl))
33360 fprintf (file, "\t.globl %s\n", desc_name);
33361
33362 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33363 fprintf (file, "%s:\n", desc_name);
33364 fprintf (file, "\t.long %s\n", orig_name);
33365 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33366 fputs ("\t.long 0\n", file);
33367 fprintf (file, "\t.previous\n");
33368 }
33369 ASM_OUTPUT_LABEL (file, name);
33370 }
33371
33372 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33373 static void
33374 rs6000_elf_file_end (void)
33375 {
33376 #ifdef HAVE_AS_GNU_ATTRIBUTE
33377 /* ??? The value emitted depends on options active at file end.
33378 Assume anyone using #pragma or attributes that might change
33379 options knows what they are doing. */
33380 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33381 && rs6000_passes_float)
33382 {
33383 int fp;
33384
33385 if (TARGET_HARD_FLOAT)
33386 fp = 1;
33387 else
33388 fp = 2;
33389 if (rs6000_passes_long_double)
33390 {
33391 if (!TARGET_LONG_DOUBLE_128)
33392 fp |= 2 * 4;
33393 else if (TARGET_IEEEQUAD)
33394 fp |= 3 * 4;
33395 else
33396 fp |= 1 * 4;
33397 }
33398 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33399 }
33400 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33401 {
33402 if (rs6000_passes_vector)
33403 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33404 (TARGET_ALTIVEC_ABI ? 2 : 1));
33405 if (rs6000_returns_struct)
33406 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33407 aix_struct_return ? 2 : 1);
33408 }
33409 #endif
33410 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33411 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33412 file_end_indicate_exec_stack ();
33413 #endif
33414
33415 if (flag_split_stack)
33416 file_end_indicate_split_stack ();
33417
33418 if (cpu_builtin_p)
33419 {
33420 /* We have expanded a CPU builtin, so we need to emit a reference to
33421 the special symbol that LIBC uses to declare it supports the
33422 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33423 switch_to_section (data_section);
33424 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33425 fprintf (asm_out_file, "\t%s %s\n",
33426 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33427 }
33428 }
33429 #endif
33430
33431 #if TARGET_XCOFF
33432
33433 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33434 #define HAVE_XCOFF_DWARF_EXTRAS 0
33435 #endif
33436
33437 static enum unwind_info_type
33438 rs6000_xcoff_debug_unwind_info (void)
33439 {
33440 return UI_NONE;
33441 }
33442
33443 static void
33444 rs6000_xcoff_asm_output_anchor (rtx symbol)
33445 {
33446 char buffer[100];
33447
33448 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33449 SYMBOL_REF_BLOCK_OFFSET (symbol));
33450 fprintf (asm_out_file, "%s", SET_ASM_OP);
33451 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33452 fprintf (asm_out_file, ",");
33453 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33454 fprintf (asm_out_file, "\n");
33455 }
33456
33457 static void
33458 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33459 {
33460 fputs (GLOBAL_ASM_OP, stream);
33461 RS6000_OUTPUT_BASENAME (stream, name);
33462 putc ('\n', stream);
33463 }
33464
33465 /* A get_unnamed_decl callback, used for read-only sections. PTR
33466 points to the section string variable. */
33467
33468 static void
33469 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33470 {
33471 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33472 *(const char *const *) directive,
33473 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33474 }
33475
33476 /* Likewise for read-write sections. */
33477
33478 static void
33479 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33480 {
33481 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33482 *(const char *const *) directive,
33483 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33484 }
33485
33486 static void
33487 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33488 {
33489 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33490 *(const char *const *) directive,
33491 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33492 }
33493
33494 /* A get_unnamed_section callback, used for switching to toc_section. */
33495
33496 static void
33497 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33498 {
33499 if (TARGET_MINIMAL_TOC)
33500 {
33501 /* toc_section is always selected at least once from
33502 rs6000_xcoff_file_start, so this is guaranteed to
33503 always be defined once and only once in each file. */
33504 if (!toc_initialized)
33505 {
33506 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33507 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33508 toc_initialized = 1;
33509 }
33510 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33511 (TARGET_32BIT ? "" : ",3"));
33512 }
33513 else
33514 fputs ("\t.toc\n", asm_out_file);
33515 }
33516
33517 /* Implement TARGET_ASM_INIT_SECTIONS. */
33518
33519 static void
33520 rs6000_xcoff_asm_init_sections (void)
33521 {
33522 read_only_data_section
33523 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33524 &xcoff_read_only_section_name);
33525
33526 private_data_section
33527 = get_unnamed_section (SECTION_WRITE,
33528 rs6000_xcoff_output_readwrite_section_asm_op,
33529 &xcoff_private_data_section_name);
33530
33531 read_only_private_data_section
33532 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33533 &xcoff_private_rodata_section_name);
33534
33535 tls_data_section
33536 = get_unnamed_section (SECTION_TLS,
33537 rs6000_xcoff_output_tls_section_asm_op,
33538 &xcoff_tls_data_section_name);
33539
33540 tls_private_data_section
33541 = get_unnamed_section (SECTION_TLS,
33542 rs6000_xcoff_output_tls_section_asm_op,
33543 &xcoff_private_data_section_name);
33544
33545 toc_section
33546 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33547
33548 readonly_data_section = read_only_data_section;
33549 }
33550
33551 static int
33552 rs6000_xcoff_reloc_rw_mask (void)
33553 {
33554 return 3;
33555 }
33556
33557 static void
33558 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33559 tree decl ATTRIBUTE_UNUSED)
33560 {
33561 int smclass;
33562 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33563
33564 if (flags & SECTION_EXCLUDE)
33565 smclass = 4;
33566 else if (flags & SECTION_DEBUG)
33567 {
33568 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33569 return;
33570 }
33571 else if (flags & SECTION_CODE)
33572 smclass = 0;
33573 else if (flags & SECTION_TLS)
33574 smclass = 3;
33575 else if (flags & SECTION_WRITE)
33576 smclass = 2;
33577 else
33578 smclass = 1;
33579
33580 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33581 (flags & SECTION_CODE) ? "." : "",
33582 name, suffix[smclass], flags & SECTION_ENTSIZE);
33583 }
33584
33585 #define IN_NAMED_SECTION(DECL) \
33586 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33587 && DECL_SECTION_NAME (DECL) != NULL)
33588
33589 static section *
33590 rs6000_xcoff_select_section (tree decl, int reloc,
33591 unsigned HOST_WIDE_INT align)
33592 {
33593 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33594 named section. */
33595 if (align > BIGGEST_ALIGNMENT)
33596 {
33597 resolve_unique_section (decl, reloc, true);
33598 if (IN_NAMED_SECTION (decl))
33599 return get_named_section (decl, NULL, reloc);
33600 }
33601
33602 if (decl_readonly_section (decl, reloc))
33603 {
33604 if (TREE_PUBLIC (decl))
33605 return read_only_data_section;
33606 else
33607 return read_only_private_data_section;
33608 }
33609 else
33610 {
33611 #if HAVE_AS_TLS
33612 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33613 {
33614 if (TREE_PUBLIC (decl))
33615 return tls_data_section;
33616 else if (bss_initializer_p (decl))
33617 {
33618 /* Convert to COMMON to emit in BSS. */
33619 DECL_COMMON (decl) = 1;
33620 return tls_comm_section;
33621 }
33622 else
33623 return tls_private_data_section;
33624 }
33625 else
33626 #endif
33627 if (TREE_PUBLIC (decl))
33628 return data_section;
33629 else
33630 return private_data_section;
33631 }
33632 }
33633
33634 static void
33635 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33636 {
33637 const char *name;
33638
33639 /* Use select_section for private data and uninitialized data with
33640 alignment <= BIGGEST_ALIGNMENT. */
33641 if (!TREE_PUBLIC (decl)
33642 || DECL_COMMON (decl)
33643 || (DECL_INITIAL (decl) == NULL_TREE
33644 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33645 || DECL_INITIAL (decl) == error_mark_node
33646 || (flag_zero_initialized_in_bss
33647 && initializer_zerop (DECL_INITIAL (decl))))
33648 return;
33649
33650 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33651 name = (*targetm.strip_name_encoding) (name);
33652 set_decl_section_name (decl, name);
33653 }
33654
33655 /* Select section for constant in constant pool.
33656
33657 On RS/6000, all constants are in the private read-only data area.
33658 However, if this is being placed in the TOC it must be output as a
33659 toc entry. */
33660
33661 static section *
33662 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33663 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33664 {
33665 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33666 return toc_section;
33667 else
33668 return read_only_private_data_section;
33669 }
33670
33671 /* Remove any trailing [DS] or the like from the symbol name. */
33672
33673 static const char *
33674 rs6000_xcoff_strip_name_encoding (const char *name)
33675 {
33676 size_t len;
33677 if (*name == '*')
33678 name++;
33679 len = strlen (name);
33680 if (name[len - 1] == ']')
33681 return ggc_alloc_string (name, len - 4);
33682 else
33683 return name;
33684 }
33685
33686 /* Section attributes. AIX is always PIC. */
33687
33688 static unsigned int
33689 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33690 {
33691 unsigned int align;
33692 unsigned int flags = default_section_type_flags (decl, name, reloc);
33693
33694 /* Align to at least UNIT size. */
33695 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33696 align = MIN_UNITS_PER_WORD;
33697 else
33698 /* Increase alignment of large objects if not already stricter. */
33699 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33700 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33701 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33702
33703 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33704 }
33705
33706 /* Output at beginning of assembler file.
33707
33708 Initialize the section names for the RS/6000 at this point.
33709
33710 Specify filename, including full path, to assembler.
33711
33712 We want to go into the TOC section so at least one .toc will be emitted.
33713 Also, in order to output proper .bs/.es pairs, we need at least one static
33714 [RW] section emitted.
33715
33716 Finally, declare mcount when profiling to make the assembler happy. */
33717
33718 static void
33719 rs6000_xcoff_file_start (void)
33720 {
33721 rs6000_gen_section_name (&xcoff_bss_section_name,
33722 main_input_filename, ".bss_");
33723 rs6000_gen_section_name (&xcoff_private_data_section_name,
33724 main_input_filename, ".rw_");
33725 rs6000_gen_section_name (&xcoff_private_rodata_section_name,
33726 main_input_filename, ".rop_");
33727 rs6000_gen_section_name (&xcoff_read_only_section_name,
33728 main_input_filename, ".ro_");
33729 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33730 main_input_filename, ".tls_");
33731 rs6000_gen_section_name (&xcoff_tbss_section_name,
33732 main_input_filename, ".tbss_[UL]");
33733
33734 fputs ("\t.file\t", asm_out_file);
33735 output_quoted_string (asm_out_file, main_input_filename);
33736 fputc ('\n', asm_out_file);
33737 if (write_symbols != NO_DEBUG)
33738 switch_to_section (private_data_section);
33739 switch_to_section (toc_section);
33740 switch_to_section (text_section);
33741 if (profile_flag)
33742 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33743 rs6000_file_start ();
33744 }
33745
33746 /* Output at end of assembler file.
33747 On the RS/6000, referencing data should automatically pull in text. */
33748
33749 static void
33750 rs6000_xcoff_file_end (void)
33751 {
33752 switch_to_section (text_section);
33753 fputs ("_section_.text:\n", asm_out_file);
33754 switch_to_section (data_section);
33755 fputs (TARGET_32BIT
33756 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33757 asm_out_file);
33758 }
33759
33760 struct declare_alias_data
33761 {
33762 FILE *file;
33763 bool function_descriptor;
33764 };
33765
33766 /* Declare alias N. A helper function for for_node_and_aliases. */
33767
33768 static bool
33769 rs6000_declare_alias (struct symtab_node *n, void *d)
33770 {
33771 struct declare_alias_data *data = (struct declare_alias_data *)d;
33772 /* Main symbol is output specially, because varasm machinery does part of
33773 the job for us - we do not need to declare .globl/lglobs and such. */
33774 if (!n->alias || n->weakref)
33775 return false;
33776
33777 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33778 return false;
33779
33780 /* Prevent assemble_alias from trying to use .set pseudo operation
33781 that does not behave as expected by the middle-end. */
33782 TREE_ASM_WRITTEN (n->decl) = true;
33783
33784 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33785 char *buffer = (char *) alloca (strlen (name) + 2);
33786 char *p;
33787 int dollar_inside = 0;
33788
33789 strcpy (buffer, name);
33790 p = strchr (buffer, '$');
33791 while (p) {
33792 *p = '_';
33793 dollar_inside++;
33794 p = strchr (p + 1, '$');
33795 }
33796 if (TREE_PUBLIC (n->decl))
33797 {
33798 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33799 {
33800 if (dollar_inside) {
33801 if (data->function_descriptor)
33802 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33803 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33804 }
33805 if (data->function_descriptor)
33806 {
33807 fputs ("\t.globl .", data->file);
33808 RS6000_OUTPUT_BASENAME (data->file, buffer);
33809 putc ('\n', data->file);
33810 }
33811 fputs ("\t.globl ", data->file);
33812 RS6000_OUTPUT_BASENAME (data->file, buffer);
33813 putc ('\n', data->file);
33814 }
33815 #ifdef ASM_WEAKEN_DECL
33816 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33817 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33818 #endif
33819 }
33820 else
33821 {
33822 if (dollar_inside)
33823 {
33824 if (data->function_descriptor)
33825 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33826 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33827 }
33828 if (data->function_descriptor)
33829 {
33830 fputs ("\t.lglobl .", data->file);
33831 RS6000_OUTPUT_BASENAME (data->file, buffer);
33832 putc ('\n', data->file);
33833 }
33834 fputs ("\t.lglobl ", data->file);
33835 RS6000_OUTPUT_BASENAME (data->file, buffer);
33836 putc ('\n', data->file);
33837 }
33838 if (data->function_descriptor)
33839 fputs (".", data->file);
33840 RS6000_OUTPUT_BASENAME (data->file, buffer);
33841 fputs (":\n", data->file);
33842 return false;
33843 }
33844
33845
33846 #ifdef HAVE_GAS_HIDDEN
33847 /* Helper function to calculate visibility of a DECL
33848 and return the value as a const string. */
33849
33850 static const char *
33851 rs6000_xcoff_visibility (tree decl)
33852 {
33853 static const char * const visibility_types[] = {
33854 "", ",protected", ",hidden", ",internal"
33855 };
33856
33857 enum symbol_visibility vis = DECL_VISIBILITY (decl);
33858 return visibility_types[vis];
33859 }
33860 #endif
33861
33862
33863 /* This macro produces the initial definition of a function name.
33864 On the RS/6000, we need to place an extra '.' in the function name and
33865 output the function descriptor.
33866 Dollar signs are converted to underscores.
33867
33868 The csect for the function will have already been created when
33869 text_section was selected. We do have to go back to that csect, however.
33870
33871 The third and fourth parameters to the .function pseudo-op (16 and 044)
33872 are placeholders which no longer have any use.
33873
33874 Because AIX assembler's .set command has unexpected semantics, we output
33875 all aliases as alternative labels in front of the definition. */
33876
33877 void
33878 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33879 {
33880 char *buffer = (char *) alloca (strlen (name) + 1);
33881 char *p;
33882 int dollar_inside = 0;
33883 struct declare_alias_data data = {file, false};
33884
33885 strcpy (buffer, name);
33886 p = strchr (buffer, '$');
33887 while (p) {
33888 *p = '_';
33889 dollar_inside++;
33890 p = strchr (p + 1, '$');
33891 }
33892 if (TREE_PUBLIC (decl))
33893 {
33894 if (!RS6000_WEAK || !DECL_WEAK (decl))
33895 {
33896 if (dollar_inside) {
33897 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33898 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33899 }
33900 fputs ("\t.globl .", file);
33901 RS6000_OUTPUT_BASENAME (file, buffer);
33902 #ifdef HAVE_GAS_HIDDEN
33903 fputs (rs6000_xcoff_visibility (decl), file);
33904 #endif
33905 putc ('\n', file);
33906 }
33907 }
33908 else
33909 {
33910 if (dollar_inside) {
33911 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33912 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33913 }
33914 fputs ("\t.lglobl .", file);
33915 RS6000_OUTPUT_BASENAME (file, buffer);
33916 putc ('\n', file);
33917 }
33918 fputs ("\t.csect ", file);
33919 RS6000_OUTPUT_BASENAME (file, buffer);
33920 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33921 RS6000_OUTPUT_BASENAME (file, buffer);
33922 fputs (":\n", file);
33923 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33924 &data, true);
33925 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
33926 RS6000_OUTPUT_BASENAME (file, buffer);
33927 fputs (", TOC[tc0], 0\n", file);
33928 in_section = NULL;
33929 switch_to_section (function_section (decl));
33930 putc ('.', file);
33931 RS6000_OUTPUT_BASENAME (file, buffer);
33932 fputs (":\n", file);
33933 data.function_descriptor = true;
33934 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33935 &data, true);
33936 if (!DECL_IGNORED_P (decl))
33937 {
33938 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33939 xcoffout_declare_function (file, decl, buffer);
33940 else if (write_symbols == DWARF2_DEBUG)
33941 {
33942 name = (*targetm.strip_name_encoding) (name);
33943 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
33944 }
33945 }
33946 return;
33947 }
33948
33949
33950 /* Output assembly language to globalize a symbol from a DECL,
33951 possibly with visibility. */
33952
33953 void
33954 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
33955 {
33956 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
33957 fputs (GLOBAL_ASM_OP, stream);
33958 RS6000_OUTPUT_BASENAME (stream, name);
33959 #ifdef HAVE_GAS_HIDDEN
33960 fputs (rs6000_xcoff_visibility (decl), stream);
33961 #endif
33962 putc ('\n', stream);
33963 }
33964
33965 /* Output assembly language to define a symbol as COMMON from a DECL,
33966 possibly with visibility. */
33967
33968 void
33969 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
33970 tree decl ATTRIBUTE_UNUSED,
33971 const char *name,
33972 unsigned HOST_WIDE_INT size,
33973 unsigned HOST_WIDE_INT align)
33974 {
33975 unsigned HOST_WIDE_INT align2 = 2;
33976
33977 if (align > 32)
33978 align2 = floor_log2 (align / BITS_PER_UNIT);
33979 else if (size > 4)
33980 align2 = 3;
33981
33982 fputs (COMMON_ASM_OP, stream);
33983 RS6000_OUTPUT_BASENAME (stream, name);
33984
33985 fprintf (stream,
33986 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
33987 size, align2);
33988
33989 #ifdef HAVE_GAS_HIDDEN
33990 if (decl != NULL)
33991 fputs (rs6000_xcoff_visibility (decl), stream);
33992 #endif
33993 putc ('\n', stream);
33994 }
33995
33996 /* This macro produces the initial definition of a object (variable) name.
33997 Because AIX assembler's .set command has unexpected semantics, we output
33998 all aliases as alternative labels in front of the definition. */
33999
34000 void
34001 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34002 {
34003 struct declare_alias_data data = {file, false};
34004 RS6000_OUTPUT_BASENAME (file, name);
34005 fputs (":\n", file);
34006 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34007 &data, true);
34008 }
34009
34010 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34011
34012 void
34013 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34014 {
34015 fputs (integer_asm_op (size, FALSE), file);
34016 assemble_name (file, label);
34017 fputs ("-$", file);
34018 }
34019
34020 /* Output a symbol offset relative to the dbase for the current object.
34021 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34022 signed offsets.
34023
34024 __gcc_unwind_dbase is embedded in all executables/libraries through
34025 libgcc/config/rs6000/crtdbase.S. */
34026
34027 void
34028 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34029 {
34030 fputs (integer_asm_op (size, FALSE), file);
34031 assemble_name (file, label);
34032 fputs("-__gcc_unwind_dbase", file);
34033 }
34034
34035 #ifdef HAVE_AS_TLS
34036 static void
34037 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34038 {
34039 rtx symbol;
34040 int flags;
34041 const char *symname;
34042
34043 default_encode_section_info (decl, rtl, first);
34044
34045 /* Careful not to prod global register variables. */
34046 if (!MEM_P (rtl))
34047 return;
34048 symbol = XEXP (rtl, 0);
34049 if (!SYMBOL_REF_P (symbol))
34050 return;
34051
34052 flags = SYMBOL_REF_FLAGS (symbol);
34053
34054 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34055 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34056
34057 SYMBOL_REF_FLAGS (symbol) = flags;
34058
34059 /* Append mapping class to extern decls. */
34060 symname = XSTR (symbol, 0);
34061 if (decl /* sync condition with assemble_external () */
34062 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34063 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34064 || TREE_CODE (decl) == FUNCTION_DECL)
34065 && symname[strlen (symname) - 1] != ']')
34066 {
34067 char *newname = (char *) alloca (strlen (symname) + 5);
34068 strcpy (newname, symname);
34069 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34070 ? "[DS]" : "[UA]"));
34071 XSTR (symbol, 0) = ggc_strdup (newname);
34072 }
34073 }
34074 #endif /* HAVE_AS_TLS */
34075 #endif /* TARGET_XCOFF */
34076
34077 void
34078 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34079 const char *name, const char *val)
34080 {
34081 fputs ("\t.weak\t", stream);
34082 RS6000_OUTPUT_BASENAME (stream, name);
34083 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34084 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34085 {
34086 if (TARGET_XCOFF)
34087 fputs ("[DS]", stream);
34088 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34089 if (TARGET_XCOFF)
34090 fputs (rs6000_xcoff_visibility (decl), stream);
34091 #endif
34092 fputs ("\n\t.weak\t.", stream);
34093 RS6000_OUTPUT_BASENAME (stream, name);
34094 }
34095 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34096 if (TARGET_XCOFF)
34097 fputs (rs6000_xcoff_visibility (decl), stream);
34098 #endif
34099 fputc ('\n', stream);
34100 if (val)
34101 {
34102 #ifdef ASM_OUTPUT_DEF
34103 ASM_OUTPUT_DEF (stream, name, val);
34104 #endif
34105 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34106 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34107 {
34108 fputs ("\t.set\t.", stream);
34109 RS6000_OUTPUT_BASENAME (stream, name);
34110 fputs (",.", stream);
34111 RS6000_OUTPUT_BASENAME (stream, val);
34112 fputc ('\n', stream);
34113 }
34114 }
34115 }
34116
34117
34118 /* Return true if INSN should not be copied. */
34119
34120 static bool
34121 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34122 {
34123 return recog_memoized (insn) >= 0
34124 && get_attr_cannot_copy (insn);
34125 }
34126
34127 /* Compute a (partial) cost for rtx X. Return true if the complete
34128 cost has been computed, and false if subexpressions should be
34129 scanned. In either case, *TOTAL contains the cost result. */
34130
34131 static bool
34132 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34133 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34134 {
34135 int code = GET_CODE (x);
34136
34137 switch (code)
34138 {
34139 /* On the RS/6000, if it is valid in the insn, it is free. */
34140 case CONST_INT:
34141 if (((outer_code == SET
34142 || outer_code == PLUS
34143 || outer_code == MINUS)
34144 && (satisfies_constraint_I (x)
34145 || satisfies_constraint_L (x)))
34146 || (outer_code == AND
34147 && (satisfies_constraint_K (x)
34148 || (mode == SImode
34149 ? satisfies_constraint_L (x)
34150 : satisfies_constraint_J (x))))
34151 || ((outer_code == IOR || outer_code == XOR)
34152 && (satisfies_constraint_K (x)
34153 || (mode == SImode
34154 ? satisfies_constraint_L (x)
34155 : satisfies_constraint_J (x))))
34156 || outer_code == ASHIFT
34157 || outer_code == ASHIFTRT
34158 || outer_code == LSHIFTRT
34159 || outer_code == ROTATE
34160 || outer_code == ROTATERT
34161 || outer_code == ZERO_EXTRACT
34162 || (outer_code == MULT
34163 && satisfies_constraint_I (x))
34164 || ((outer_code == DIV || outer_code == UDIV
34165 || outer_code == MOD || outer_code == UMOD)
34166 && exact_log2 (INTVAL (x)) >= 0)
34167 || (outer_code == COMPARE
34168 && (satisfies_constraint_I (x)
34169 || satisfies_constraint_K (x)))
34170 || ((outer_code == EQ || outer_code == NE)
34171 && (satisfies_constraint_I (x)
34172 || satisfies_constraint_K (x)
34173 || (mode == SImode
34174 ? satisfies_constraint_L (x)
34175 : satisfies_constraint_J (x))))
34176 || (outer_code == GTU
34177 && satisfies_constraint_I (x))
34178 || (outer_code == LTU
34179 && satisfies_constraint_P (x)))
34180 {
34181 *total = 0;
34182 return true;
34183 }
34184 else if ((outer_code == PLUS
34185 && reg_or_add_cint_operand (x, VOIDmode))
34186 || (outer_code == MINUS
34187 && reg_or_sub_cint_operand (x, VOIDmode))
34188 || ((outer_code == SET
34189 || outer_code == IOR
34190 || outer_code == XOR)
34191 && (INTVAL (x)
34192 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34193 {
34194 *total = COSTS_N_INSNS (1);
34195 return true;
34196 }
34197 /* FALLTHRU */
34198
34199 case CONST_DOUBLE:
34200 case CONST_WIDE_INT:
34201 case CONST:
34202 case HIGH:
34203 case SYMBOL_REF:
34204 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34205 return true;
34206
34207 case MEM:
34208 /* When optimizing for size, MEM should be slightly more expensive
34209 than generating address, e.g., (plus (reg) (const)).
34210 L1 cache latency is about two instructions. */
34211 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34212 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34213 *total += COSTS_N_INSNS (100);
34214 return true;
34215
34216 case LABEL_REF:
34217 *total = 0;
34218 return true;
34219
34220 case PLUS:
34221 case MINUS:
34222 if (FLOAT_MODE_P (mode))
34223 *total = rs6000_cost->fp;
34224 else
34225 *total = COSTS_N_INSNS (1);
34226 return false;
34227
34228 case MULT:
34229 if (CONST_INT_P (XEXP (x, 1))
34230 && satisfies_constraint_I (XEXP (x, 1)))
34231 {
34232 if (INTVAL (XEXP (x, 1)) >= -256
34233 && INTVAL (XEXP (x, 1)) <= 255)
34234 *total = rs6000_cost->mulsi_const9;
34235 else
34236 *total = rs6000_cost->mulsi_const;
34237 }
34238 else if (mode == SFmode)
34239 *total = rs6000_cost->fp;
34240 else if (FLOAT_MODE_P (mode))
34241 *total = rs6000_cost->dmul;
34242 else if (mode == DImode)
34243 *total = rs6000_cost->muldi;
34244 else
34245 *total = rs6000_cost->mulsi;
34246 return false;
34247
34248 case FMA:
34249 if (mode == SFmode)
34250 *total = rs6000_cost->fp;
34251 else
34252 *total = rs6000_cost->dmul;
34253 break;
34254
34255 case DIV:
34256 case MOD:
34257 if (FLOAT_MODE_P (mode))
34258 {
34259 *total = mode == DFmode ? rs6000_cost->ddiv
34260 : rs6000_cost->sdiv;
34261 return false;
34262 }
34263 /* FALLTHRU */
34264
34265 case UDIV:
34266 case UMOD:
34267 if (CONST_INT_P (XEXP (x, 1))
34268 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34269 {
34270 if (code == DIV || code == MOD)
34271 /* Shift, addze */
34272 *total = COSTS_N_INSNS (2);
34273 else
34274 /* Shift */
34275 *total = COSTS_N_INSNS (1);
34276 }
34277 else
34278 {
34279 if (GET_MODE (XEXP (x, 1)) == DImode)
34280 *total = rs6000_cost->divdi;
34281 else
34282 *total = rs6000_cost->divsi;
34283 }
34284 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34285 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34286 *total += COSTS_N_INSNS (2);
34287 return false;
34288
34289 case CTZ:
34290 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34291 return false;
34292
34293 case FFS:
34294 *total = COSTS_N_INSNS (4);
34295 return false;
34296
34297 case POPCOUNT:
34298 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34299 return false;
34300
34301 case PARITY:
34302 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34303 return false;
34304
34305 case NOT:
34306 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34307 *total = 0;
34308 else
34309 *total = COSTS_N_INSNS (1);
34310 return false;
34311
34312 case AND:
34313 if (CONST_INT_P (XEXP (x, 1)))
34314 {
34315 rtx left = XEXP (x, 0);
34316 rtx_code left_code = GET_CODE (left);
34317
34318 /* rotate-and-mask: 1 insn. */
34319 if ((left_code == ROTATE
34320 || left_code == ASHIFT
34321 || left_code == LSHIFTRT)
34322 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34323 {
34324 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34325 if (!CONST_INT_P (XEXP (left, 1)))
34326 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34327 *total += COSTS_N_INSNS (1);
34328 return true;
34329 }
34330
34331 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34332 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34333 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34334 || (val & 0xffff) == val
34335 || (val & 0xffff0000) == val
34336 || ((val & 0xffff) == 0 && mode == SImode))
34337 {
34338 *total = rtx_cost (left, mode, AND, 0, speed);
34339 *total += COSTS_N_INSNS (1);
34340 return true;
34341 }
34342
34343 /* 2 insns. */
34344 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34345 {
34346 *total = rtx_cost (left, mode, AND, 0, speed);
34347 *total += COSTS_N_INSNS (2);
34348 return true;
34349 }
34350 }
34351
34352 *total = COSTS_N_INSNS (1);
34353 return false;
34354
34355 case IOR:
34356 /* FIXME */
34357 *total = COSTS_N_INSNS (1);
34358 return true;
34359
34360 case CLZ:
34361 case XOR:
34362 case ZERO_EXTRACT:
34363 *total = COSTS_N_INSNS (1);
34364 return false;
34365
34366 case ASHIFT:
34367 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34368 the sign extend and shift separately within the insn. */
34369 if (TARGET_EXTSWSLI && mode == DImode
34370 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34371 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34372 {
34373 *total = 0;
34374 return false;
34375 }
34376 /* fall through */
34377
34378 case ASHIFTRT:
34379 case LSHIFTRT:
34380 case ROTATE:
34381 case ROTATERT:
34382 /* Handle mul_highpart. */
34383 if (outer_code == TRUNCATE
34384 && GET_CODE (XEXP (x, 0)) == MULT)
34385 {
34386 if (mode == DImode)
34387 *total = rs6000_cost->muldi;
34388 else
34389 *total = rs6000_cost->mulsi;
34390 return true;
34391 }
34392 else if (outer_code == AND)
34393 *total = 0;
34394 else
34395 *total = COSTS_N_INSNS (1);
34396 return false;
34397
34398 case SIGN_EXTEND:
34399 case ZERO_EXTEND:
34400 if (MEM_P (XEXP (x, 0)))
34401 *total = 0;
34402 else
34403 *total = COSTS_N_INSNS (1);
34404 return false;
34405
34406 case COMPARE:
34407 case NEG:
34408 case ABS:
34409 if (!FLOAT_MODE_P (mode))
34410 {
34411 *total = COSTS_N_INSNS (1);
34412 return false;
34413 }
34414 /* FALLTHRU */
34415
34416 case FLOAT:
34417 case UNSIGNED_FLOAT:
34418 case FIX:
34419 case UNSIGNED_FIX:
34420 case FLOAT_TRUNCATE:
34421 *total = rs6000_cost->fp;
34422 return false;
34423
34424 case FLOAT_EXTEND:
34425 if (mode == DFmode)
34426 *total = rs6000_cost->sfdf_convert;
34427 else
34428 *total = rs6000_cost->fp;
34429 return false;
34430
34431 case UNSPEC:
34432 switch (XINT (x, 1))
34433 {
34434 case UNSPEC_FRSP:
34435 *total = rs6000_cost->fp;
34436 return true;
34437
34438 default:
34439 break;
34440 }
34441 break;
34442
34443 case CALL:
34444 case IF_THEN_ELSE:
34445 if (!speed)
34446 {
34447 *total = COSTS_N_INSNS (1);
34448 return true;
34449 }
34450 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34451 {
34452 *total = rs6000_cost->fp;
34453 return false;
34454 }
34455 break;
34456
34457 case NE:
34458 case EQ:
34459 case GTU:
34460 case LTU:
34461 /* Carry bit requires mode == Pmode.
34462 NEG or PLUS already counted so only add one. */
34463 if (mode == Pmode
34464 && (outer_code == NEG || outer_code == PLUS))
34465 {
34466 *total = COSTS_N_INSNS (1);
34467 return true;
34468 }
34469 /* FALLTHRU */
34470
34471 case GT:
34472 case LT:
34473 case UNORDERED:
34474 if (outer_code == SET)
34475 {
34476 if (XEXP (x, 1) == const0_rtx)
34477 {
34478 *total = COSTS_N_INSNS (2);
34479 return true;
34480 }
34481 else
34482 {
34483 *total = COSTS_N_INSNS (3);
34484 return false;
34485 }
34486 }
34487 /* CC COMPARE. */
34488 if (outer_code == COMPARE)
34489 {
34490 *total = 0;
34491 return true;
34492 }
34493 break;
34494
34495 default:
34496 break;
34497 }
34498
34499 return false;
34500 }
34501
34502 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34503
34504 static bool
34505 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34506 int opno, int *total, bool speed)
34507 {
34508 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34509
34510 fprintf (stderr,
34511 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34512 "opno = %d, total = %d, speed = %s, x:\n",
34513 ret ? "complete" : "scan inner",
34514 GET_MODE_NAME (mode),
34515 GET_RTX_NAME (outer_code),
34516 opno,
34517 *total,
34518 speed ? "true" : "false");
34519
34520 debug_rtx (x);
34521
34522 return ret;
34523 }
34524
34525 static int
34526 rs6000_insn_cost (rtx_insn *insn, bool speed)
34527 {
34528 if (recog_memoized (insn) < 0)
34529 return 0;
34530
34531 if (!speed)
34532 return get_attr_length (insn);
34533
34534 int cost = get_attr_cost (insn);
34535 if (cost > 0)
34536 return cost;
34537
34538 int n = get_attr_length (insn) / 4;
34539 enum attr_type type = get_attr_type (insn);
34540
34541 switch (type)
34542 {
34543 case TYPE_LOAD:
34544 case TYPE_FPLOAD:
34545 case TYPE_VECLOAD:
34546 cost = COSTS_N_INSNS (n + 1);
34547 break;
34548
34549 case TYPE_MUL:
34550 switch (get_attr_size (insn))
34551 {
34552 case SIZE_8:
34553 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34554 break;
34555 case SIZE_16:
34556 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34557 break;
34558 case SIZE_32:
34559 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34560 break;
34561 case SIZE_64:
34562 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34563 break;
34564 default:
34565 gcc_unreachable ();
34566 }
34567 break;
34568 case TYPE_DIV:
34569 switch (get_attr_size (insn))
34570 {
34571 case SIZE_32:
34572 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34573 break;
34574 case SIZE_64:
34575 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34576 break;
34577 default:
34578 gcc_unreachable ();
34579 }
34580 break;
34581
34582 case TYPE_FP:
34583 cost = n * rs6000_cost->fp;
34584 break;
34585 case TYPE_DMUL:
34586 cost = n * rs6000_cost->dmul;
34587 break;
34588 case TYPE_SDIV:
34589 cost = n * rs6000_cost->sdiv;
34590 break;
34591 case TYPE_DDIV:
34592 cost = n * rs6000_cost->ddiv;
34593 break;
34594
34595 case TYPE_SYNC:
34596 case TYPE_LOAD_L:
34597 case TYPE_MFCR:
34598 case TYPE_MFCRF:
34599 cost = COSTS_N_INSNS (n + 2);
34600 break;
34601
34602 default:
34603 cost = COSTS_N_INSNS (n);
34604 }
34605
34606 return cost;
34607 }
34608
34609 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34610
34611 static int
34612 rs6000_debug_address_cost (rtx x, machine_mode mode,
34613 addr_space_t as, bool speed)
34614 {
34615 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34616
34617 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34618 ret, speed ? "true" : "false");
34619 debug_rtx (x);
34620
34621 return ret;
34622 }
34623
34624
34625 /* A C expression returning the cost of moving data from a register of class
34626 CLASS1 to one of CLASS2. */
34627
34628 static int
34629 rs6000_register_move_cost (machine_mode mode,
34630 reg_class_t from, reg_class_t to)
34631 {
34632 int ret;
34633 reg_class_t rclass;
34634
34635 if (TARGET_DEBUG_COST)
34636 dbg_cost_ctrl++;
34637
34638 /* If we have VSX, we can easily move between FPR or Altivec registers,
34639 otherwise we can only easily move within classes.
34640 Do this first so we give best-case answers for union classes
34641 containing both gprs and vsx regs. */
34642 HARD_REG_SET to_vsx, from_vsx;
34643 COPY_HARD_REG_SET (to_vsx, reg_class_contents[to]);
34644 AND_HARD_REG_SET (to_vsx, reg_class_contents[VSX_REGS]);
34645 COPY_HARD_REG_SET (from_vsx, reg_class_contents[from]);
34646 AND_HARD_REG_SET (from_vsx, reg_class_contents[VSX_REGS]);
34647 if (!hard_reg_set_empty_p (to_vsx)
34648 && !hard_reg_set_empty_p (from_vsx)
34649 && (TARGET_VSX
34650 || hard_reg_set_intersect_p (to_vsx, from_vsx)))
34651 {
34652 int reg = FIRST_FPR_REGNO;
34653 if (TARGET_VSX
34654 || (TEST_HARD_REG_BIT (to_vsx, FIRST_ALTIVEC_REGNO)
34655 && TEST_HARD_REG_BIT (from_vsx, FIRST_ALTIVEC_REGNO)))
34656 reg = FIRST_ALTIVEC_REGNO;
34657 ret = 2 * hard_regno_nregs (reg, mode);
34658 }
34659
34660 /* Moves from/to GENERAL_REGS. */
34661 else if ((rclass = from, reg_classes_intersect_p (to, GENERAL_REGS))
34662 || (rclass = to, reg_classes_intersect_p (from, GENERAL_REGS)))
34663 {
34664 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34665 {
34666 if (TARGET_DIRECT_MOVE)
34667 {
34668 /* Keep the cost for direct moves above that for within
34669 a register class even if the actual processor cost is
34670 comparable. We do this because a direct move insn
34671 can't be a nop, whereas with ideal register
34672 allocation a move within the same class might turn
34673 out to be a nop. */
34674 if (rs6000_tune == PROCESSOR_POWER9
34675 || rs6000_tune == PROCESSOR_FUTURE)
34676 ret = 3 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34677 else
34678 ret = 4 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34679 /* SFmode requires a conversion when moving between gprs
34680 and vsx. */
34681 if (mode == SFmode)
34682 ret += 2;
34683 }
34684 else
34685 ret = (rs6000_memory_move_cost (mode, rclass, false)
34686 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34687 }
34688
34689 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34690 shift. */
34691 else if (rclass == CR_REGS)
34692 ret = 4;
34693
34694 /* For those processors that have slow LR/CTR moves, make them more
34695 expensive than memory in order to bias spills to memory .*/
34696 else if ((rs6000_tune == PROCESSOR_POWER6
34697 || rs6000_tune == PROCESSOR_POWER7
34698 || rs6000_tune == PROCESSOR_POWER8
34699 || rs6000_tune == PROCESSOR_POWER9)
34700 && reg_class_subset_p (rclass, SPECIAL_REGS))
34701 ret = 6 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34702
34703 else
34704 /* A move will cost one instruction per GPR moved. */
34705 ret = 2 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34706 }
34707
34708 /* Everything else has to go through GENERAL_REGS. */
34709 else
34710 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34711 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34712
34713 if (TARGET_DEBUG_COST)
34714 {
34715 if (dbg_cost_ctrl == 1)
34716 fprintf (stderr,
34717 "rs6000_register_move_cost: ret=%d, mode=%s, from=%s, to=%s\n",
34718 ret, GET_MODE_NAME (mode), reg_class_names[from],
34719 reg_class_names[to]);
34720 dbg_cost_ctrl--;
34721 }
34722
34723 return ret;
34724 }
34725
34726 /* A C expressions returning the cost of moving data of MODE from a register to
34727 or from memory. */
34728
34729 static int
34730 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34731 bool in ATTRIBUTE_UNUSED)
34732 {
34733 int ret;
34734
34735 if (TARGET_DEBUG_COST)
34736 dbg_cost_ctrl++;
34737
34738 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34739 ret = 4 * hard_regno_nregs (0, mode);
34740 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34741 || reg_classes_intersect_p (rclass, VSX_REGS)))
34742 ret = 4 * hard_regno_nregs (32, mode);
34743 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34744 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34745 else
34746 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34747
34748 if (TARGET_DEBUG_COST)
34749 {
34750 if (dbg_cost_ctrl == 1)
34751 fprintf (stderr,
34752 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34753 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34754 dbg_cost_ctrl--;
34755 }
34756
34757 return ret;
34758 }
34759
34760 /* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS.
34761
34762 The register allocator chooses GEN_OR_VSX_REGS for the allocno
34763 class if GENERAL_REGS and VSX_REGS cost is lower than the memory
34764 cost. This happens a lot when TARGET_DIRECT_MOVE makes the register
34765 move cost between GENERAL_REGS and VSX_REGS low.
34766
34767 It might seem reasonable to use a union class. After all, if usage
34768 of vsr is low and gpr high, it might make sense to spill gpr to vsr
34769 rather than memory. However, in cases where register pressure of
34770 both is high, like the cactus_adm spec test, allowing
34771 GEN_OR_VSX_REGS as the allocno class results in bad decisions in
34772 the first scheduling pass. This is partly due to an allocno of
34773 GEN_OR_VSX_REGS wrongly contributing to the GENERAL_REGS pressure
34774 class, which gives too high a pressure for GENERAL_REGS and too low
34775 for VSX_REGS. So, force a choice of the subclass here.
34776
34777 The best class is also the union if GENERAL_REGS and VSX_REGS have
34778 the same cost. In that case we do use GEN_OR_VSX_REGS as the
34779 allocno class, since trying to narrow down the class by regno mode
34780 is prone to error. For example, SImode is allowed in VSX regs and
34781 in some cases (eg. gcc.target/powerpc/p9-xxbr-3.c do_bswap32_vect)
34782 it would be wrong to choose an allocno of GENERAL_REGS based on
34783 SImode. */
34784
34785 static reg_class_t
34786 rs6000_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED,
34787 reg_class_t allocno_class,
34788 reg_class_t best_class)
34789 {
34790 switch (allocno_class)
34791 {
34792 case GEN_OR_VSX_REGS:
34793 /* best_class must be a subset of allocno_class. */
34794 gcc_checking_assert (best_class == GEN_OR_VSX_REGS
34795 || best_class == GEN_OR_FLOAT_REGS
34796 || best_class == VSX_REGS
34797 || best_class == ALTIVEC_REGS
34798 || best_class == FLOAT_REGS
34799 || best_class == GENERAL_REGS
34800 || best_class == BASE_REGS);
34801 /* Use best_class but choose wider classes when copying from the
34802 wider class to best_class is cheap. This mimics IRA choice
34803 of allocno class. */
34804 if (best_class == BASE_REGS)
34805 return GENERAL_REGS;
34806 if (TARGET_VSX
34807 && (best_class == FLOAT_REGS || best_class == ALTIVEC_REGS))
34808 return VSX_REGS;
34809 return best_class;
34810
34811 default:
34812 break;
34813 }
34814
34815 return allocno_class;
34816 }
34817
34818 /* Returns a code for a target-specific builtin that implements
34819 reciprocal of the function, or NULL_TREE if not available. */
34820
34821 static tree
34822 rs6000_builtin_reciprocal (tree fndecl)
34823 {
34824 switch (DECL_FUNCTION_CODE (fndecl))
34825 {
34826 case VSX_BUILTIN_XVSQRTDP:
34827 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34828 return NULL_TREE;
34829
34830 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34831
34832 case VSX_BUILTIN_XVSQRTSP:
34833 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34834 return NULL_TREE;
34835
34836 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34837
34838 default:
34839 return NULL_TREE;
34840 }
34841 }
34842
34843 /* Load up a constant. If the mode is a vector mode, splat the value across
34844 all of the vector elements. */
34845
34846 static rtx
34847 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34848 {
34849 rtx reg;
34850
34851 if (mode == SFmode || mode == DFmode)
34852 {
34853 rtx d = const_double_from_real_value (dconst, mode);
34854 reg = force_reg (mode, d);
34855 }
34856 else if (mode == V4SFmode)
34857 {
34858 rtx d = const_double_from_real_value (dconst, SFmode);
34859 rtvec v = gen_rtvec (4, d, d, d, d);
34860 reg = gen_reg_rtx (mode);
34861 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34862 }
34863 else if (mode == V2DFmode)
34864 {
34865 rtx d = const_double_from_real_value (dconst, DFmode);
34866 rtvec v = gen_rtvec (2, d, d);
34867 reg = gen_reg_rtx (mode);
34868 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34869 }
34870 else
34871 gcc_unreachable ();
34872
34873 return reg;
34874 }
34875
34876 /* Generate an FMA instruction. */
34877
34878 static void
34879 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34880 {
34881 machine_mode mode = GET_MODE (target);
34882 rtx dst;
34883
34884 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34885 gcc_assert (dst != NULL);
34886
34887 if (dst != target)
34888 emit_move_insn (target, dst);
34889 }
34890
34891 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34892
34893 static void
34894 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34895 {
34896 machine_mode mode = GET_MODE (dst);
34897 rtx r;
34898
34899 /* This is a tad more complicated, since the fnma_optab is for
34900 a different expression: fma(-m1, m2, a), which is the same
34901 thing except in the case of signed zeros.
34902
34903 Fortunately we know that if FMA is supported that FNMSUB is
34904 also supported in the ISA. Just expand it directly. */
34905
34906 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34907
34908 r = gen_rtx_NEG (mode, a);
34909 r = gen_rtx_FMA (mode, m1, m2, r);
34910 r = gen_rtx_NEG (mode, r);
34911 emit_insn (gen_rtx_SET (dst, r));
34912 }
34913
34914 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34915 add a reg_note saying that this was a division. Support both scalar and
34916 vector divide. Assumes no trapping math and finite arguments. */
34917
34918 void
34919 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34920 {
34921 machine_mode mode = GET_MODE (dst);
34922 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34923 int i;
34924
34925 /* Low precision estimates guarantee 5 bits of accuracy. High
34926 precision estimates guarantee 14 bits of accuracy. SFmode
34927 requires 23 bits of accuracy. DFmode requires 52 bits of
34928 accuracy. Each pass at least doubles the accuracy, leading
34929 to the following. */
34930 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34931 if (mode == DFmode || mode == V2DFmode)
34932 passes++;
34933
34934 enum insn_code code = optab_handler (smul_optab, mode);
34935 insn_gen_fn gen_mul = GEN_FCN (code);
34936
34937 gcc_assert (code != CODE_FOR_nothing);
34938
34939 one = rs6000_load_constant_and_splat (mode, dconst1);
34940
34941 /* x0 = 1./d estimate */
34942 x0 = gen_reg_rtx (mode);
34943 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34944 UNSPEC_FRES)));
34945
34946 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34947 if (passes > 1) {
34948
34949 /* e0 = 1. - d * x0 */
34950 e0 = gen_reg_rtx (mode);
34951 rs6000_emit_nmsub (e0, d, x0, one);
34952
34953 /* x1 = x0 + e0 * x0 */
34954 x1 = gen_reg_rtx (mode);
34955 rs6000_emit_madd (x1, e0, x0, x0);
34956
34957 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34958 ++i, xprev = xnext, eprev = enext) {
34959
34960 /* enext = eprev * eprev */
34961 enext = gen_reg_rtx (mode);
34962 emit_insn (gen_mul (enext, eprev, eprev));
34963
34964 /* xnext = xprev + enext * xprev */
34965 xnext = gen_reg_rtx (mode);
34966 rs6000_emit_madd (xnext, enext, xprev, xprev);
34967 }
34968
34969 } else
34970 xprev = x0;
34971
34972 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34973
34974 /* u = n * xprev */
34975 u = gen_reg_rtx (mode);
34976 emit_insn (gen_mul (u, n, xprev));
34977
34978 /* v = n - (d * u) */
34979 v = gen_reg_rtx (mode);
34980 rs6000_emit_nmsub (v, d, u, n);
34981
34982 /* dst = (v * xprev) + u */
34983 rs6000_emit_madd (dst, v, xprev, u);
34984
34985 if (note_p)
34986 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34987 }
34988
34989 /* Goldschmidt's Algorithm for single/double-precision floating point
34990 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34991
34992 void
34993 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34994 {
34995 machine_mode mode = GET_MODE (src);
34996 rtx e = gen_reg_rtx (mode);
34997 rtx g = gen_reg_rtx (mode);
34998 rtx h = gen_reg_rtx (mode);
34999
35000 /* Low precision estimates guarantee 5 bits of accuracy. High
35001 precision estimates guarantee 14 bits of accuracy. SFmode
35002 requires 23 bits of accuracy. DFmode requires 52 bits of
35003 accuracy. Each pass at least doubles the accuracy, leading
35004 to the following. */
35005 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35006 if (mode == DFmode || mode == V2DFmode)
35007 passes++;
35008
35009 int i;
35010 rtx mhalf;
35011 enum insn_code code = optab_handler (smul_optab, mode);
35012 insn_gen_fn gen_mul = GEN_FCN (code);
35013
35014 gcc_assert (code != CODE_FOR_nothing);
35015
35016 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35017
35018 /* e = rsqrt estimate */
35019 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35020 UNSPEC_RSQRT)));
35021
35022 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35023 if (!recip)
35024 {
35025 rtx zero = force_reg (mode, CONST0_RTX (mode));
35026
35027 if (mode == SFmode)
35028 {
35029 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35030 e, zero, mode, 0);
35031 if (target != e)
35032 emit_move_insn (e, target);
35033 }
35034 else
35035 {
35036 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35037 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35038 }
35039 }
35040
35041 /* g = sqrt estimate. */
35042 emit_insn (gen_mul (g, e, src));
35043 /* h = 1/(2*sqrt) estimate. */
35044 emit_insn (gen_mul (h, e, mhalf));
35045
35046 if (recip)
35047 {
35048 if (passes == 1)
35049 {
35050 rtx t = gen_reg_rtx (mode);
35051 rs6000_emit_nmsub (t, g, h, mhalf);
35052 /* Apply correction directly to 1/rsqrt estimate. */
35053 rs6000_emit_madd (dst, e, t, e);
35054 }
35055 else
35056 {
35057 for (i = 0; i < passes; i++)
35058 {
35059 rtx t1 = gen_reg_rtx (mode);
35060 rtx g1 = gen_reg_rtx (mode);
35061 rtx h1 = gen_reg_rtx (mode);
35062
35063 rs6000_emit_nmsub (t1, g, h, mhalf);
35064 rs6000_emit_madd (g1, g, t1, g);
35065 rs6000_emit_madd (h1, h, t1, h);
35066
35067 g = g1;
35068 h = h1;
35069 }
35070 /* Multiply by 2 for 1/rsqrt. */
35071 emit_insn (gen_add3_insn (dst, h, h));
35072 }
35073 }
35074 else
35075 {
35076 rtx t = gen_reg_rtx (mode);
35077 rs6000_emit_nmsub (t, g, h, mhalf);
35078 rs6000_emit_madd (dst, g, t, g);
35079 }
35080
35081 return;
35082 }
35083
35084 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35085 (Power7) targets. DST is the target, and SRC is the argument operand. */
35086
35087 void
35088 rs6000_emit_popcount (rtx dst, rtx src)
35089 {
35090 machine_mode mode = GET_MODE (dst);
35091 rtx tmp1, tmp2;
35092
35093 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35094 if (TARGET_POPCNTD)
35095 {
35096 if (mode == SImode)
35097 emit_insn (gen_popcntdsi2 (dst, src));
35098 else
35099 emit_insn (gen_popcntddi2 (dst, src));
35100 return;
35101 }
35102
35103 tmp1 = gen_reg_rtx (mode);
35104
35105 if (mode == SImode)
35106 {
35107 emit_insn (gen_popcntbsi2 (tmp1, src));
35108 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35109 NULL_RTX, 0);
35110 tmp2 = force_reg (SImode, tmp2);
35111 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35112 }
35113 else
35114 {
35115 emit_insn (gen_popcntbdi2 (tmp1, src));
35116 tmp2 = expand_mult (DImode, tmp1,
35117 GEN_INT ((HOST_WIDE_INT)
35118 0x01010101 << 32 | 0x01010101),
35119 NULL_RTX, 0);
35120 tmp2 = force_reg (DImode, tmp2);
35121 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35122 }
35123 }
35124
35125
35126 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35127 target, and SRC is the argument operand. */
35128
35129 void
35130 rs6000_emit_parity (rtx dst, rtx src)
35131 {
35132 machine_mode mode = GET_MODE (dst);
35133 rtx tmp;
35134
35135 tmp = gen_reg_rtx (mode);
35136
35137 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35138 if (TARGET_CMPB)
35139 {
35140 if (mode == SImode)
35141 {
35142 emit_insn (gen_popcntbsi2 (tmp, src));
35143 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35144 }
35145 else
35146 {
35147 emit_insn (gen_popcntbdi2 (tmp, src));
35148 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35149 }
35150 return;
35151 }
35152
35153 if (mode == SImode)
35154 {
35155 /* Is mult+shift >= shift+xor+shift+xor? */
35156 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35157 {
35158 rtx tmp1, tmp2, tmp3, tmp4;
35159
35160 tmp1 = gen_reg_rtx (SImode);
35161 emit_insn (gen_popcntbsi2 (tmp1, src));
35162
35163 tmp2 = gen_reg_rtx (SImode);
35164 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35165 tmp3 = gen_reg_rtx (SImode);
35166 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35167
35168 tmp4 = gen_reg_rtx (SImode);
35169 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35170 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35171 }
35172 else
35173 rs6000_emit_popcount (tmp, src);
35174 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35175 }
35176 else
35177 {
35178 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35179 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35180 {
35181 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35182
35183 tmp1 = gen_reg_rtx (DImode);
35184 emit_insn (gen_popcntbdi2 (tmp1, src));
35185
35186 tmp2 = gen_reg_rtx (DImode);
35187 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35188 tmp3 = gen_reg_rtx (DImode);
35189 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35190
35191 tmp4 = gen_reg_rtx (DImode);
35192 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35193 tmp5 = gen_reg_rtx (DImode);
35194 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35195
35196 tmp6 = gen_reg_rtx (DImode);
35197 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35198 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35199 }
35200 else
35201 rs6000_emit_popcount (tmp, src);
35202 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35203 }
35204 }
35205
35206 /* Expand an Altivec constant permutation for little endian mode.
35207 OP0 and OP1 are the input vectors and TARGET is the output vector.
35208 SEL specifies the constant permutation vector.
35209
35210 There are two issues: First, the two input operands must be
35211 swapped so that together they form a double-wide array in LE
35212 order. Second, the vperm instruction has surprising behavior
35213 in LE mode: it interprets the elements of the source vectors
35214 in BE mode ("left to right") and interprets the elements of
35215 the destination vector in LE mode ("right to left"). To
35216 correct for this, we must subtract each element of the permute
35217 control vector from 31.
35218
35219 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35220 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35221 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35222 serve as the permute control vector. Then, in BE mode,
35223
35224 vperm 9,10,11,12
35225
35226 places the desired result in vr9. However, in LE mode the
35227 vector contents will be
35228
35229 vr10 = 00000003 00000002 00000001 00000000
35230 vr11 = 00000007 00000006 00000005 00000004
35231
35232 The result of the vperm using the same permute control vector is
35233
35234 vr9 = 05000000 07000000 01000000 03000000
35235
35236 That is, the leftmost 4 bytes of vr10 are interpreted as the
35237 source for the rightmost 4 bytes of vr9, and so on.
35238
35239 If we change the permute control vector to
35240
35241 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35242
35243 and issue
35244
35245 vperm 9,11,10,12
35246
35247 we get the desired
35248
35249 vr9 = 00000006 00000004 00000002 00000000. */
35250
35251 static void
35252 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35253 const vec_perm_indices &sel)
35254 {
35255 unsigned int i;
35256 rtx perm[16];
35257 rtx constv, unspec;
35258
35259 /* Unpack and adjust the constant selector. */
35260 for (i = 0; i < 16; ++i)
35261 {
35262 unsigned int elt = 31 - (sel[i] & 31);
35263 perm[i] = GEN_INT (elt);
35264 }
35265
35266 /* Expand to a permute, swapping the inputs and using the
35267 adjusted selector. */
35268 if (!REG_P (op0))
35269 op0 = force_reg (V16QImode, op0);
35270 if (!REG_P (op1))
35271 op1 = force_reg (V16QImode, op1);
35272
35273 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35274 constv = force_reg (V16QImode, constv);
35275 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35276 UNSPEC_VPERM);
35277 if (!REG_P (target))
35278 {
35279 rtx tmp = gen_reg_rtx (V16QImode);
35280 emit_move_insn (tmp, unspec);
35281 unspec = tmp;
35282 }
35283
35284 emit_move_insn (target, unspec);
35285 }
35286
35287 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35288 permute control vector. But here it's not a constant, so we must
35289 generate a vector NAND or NOR to do the adjustment. */
35290
35291 void
35292 altivec_expand_vec_perm_le (rtx operands[4])
35293 {
35294 rtx notx, iorx, unspec;
35295 rtx target = operands[0];
35296 rtx op0 = operands[1];
35297 rtx op1 = operands[2];
35298 rtx sel = operands[3];
35299 rtx tmp = target;
35300 rtx norreg = gen_reg_rtx (V16QImode);
35301 machine_mode mode = GET_MODE (target);
35302
35303 /* Get everything in regs so the pattern matches. */
35304 if (!REG_P (op0))
35305 op0 = force_reg (mode, op0);
35306 if (!REG_P (op1))
35307 op1 = force_reg (mode, op1);
35308 if (!REG_P (sel))
35309 sel = force_reg (V16QImode, sel);
35310 if (!REG_P (target))
35311 tmp = gen_reg_rtx (mode);
35312
35313 if (TARGET_P9_VECTOR)
35314 {
35315 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35316 UNSPEC_VPERMR);
35317 }
35318 else
35319 {
35320 /* Invert the selector with a VNAND if available, else a VNOR.
35321 The VNAND is preferred for future fusion opportunities. */
35322 notx = gen_rtx_NOT (V16QImode, sel);
35323 iorx = (TARGET_P8_VECTOR
35324 ? gen_rtx_IOR (V16QImode, notx, notx)
35325 : gen_rtx_AND (V16QImode, notx, notx));
35326 emit_insn (gen_rtx_SET (norreg, iorx));
35327
35328 /* Permute with operands reversed and adjusted selector. */
35329 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35330 UNSPEC_VPERM);
35331 }
35332
35333 /* Copy into target, possibly by way of a register. */
35334 if (!REG_P (target))
35335 {
35336 emit_move_insn (tmp, unspec);
35337 unspec = tmp;
35338 }
35339
35340 emit_move_insn (target, unspec);
35341 }
35342
35343 /* Expand an Altivec constant permutation. Return true if we match
35344 an efficient implementation; false to fall back to VPERM.
35345
35346 OP0 and OP1 are the input vectors and TARGET is the output vector.
35347 SEL specifies the constant permutation vector. */
35348
35349 static bool
35350 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35351 const vec_perm_indices &sel)
35352 {
35353 struct altivec_perm_insn {
35354 HOST_WIDE_INT mask;
35355 enum insn_code impl;
35356 unsigned char perm[16];
35357 };
35358 static const struct altivec_perm_insn patterns[] = {
35359 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35360 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35361 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35362 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35363 { OPTION_MASK_ALTIVEC,
35364 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35365 : CODE_FOR_altivec_vmrglb_direct),
35366 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35367 { OPTION_MASK_ALTIVEC,
35368 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35369 : CODE_FOR_altivec_vmrglh_direct),
35370 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35371 { OPTION_MASK_ALTIVEC,
35372 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35373 : CODE_FOR_altivec_vmrglw_direct),
35374 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35375 { OPTION_MASK_ALTIVEC,
35376 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35377 : CODE_FOR_altivec_vmrghb_direct),
35378 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35379 { OPTION_MASK_ALTIVEC,
35380 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35381 : CODE_FOR_altivec_vmrghh_direct),
35382 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35383 { OPTION_MASK_ALTIVEC,
35384 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35385 : CODE_FOR_altivec_vmrghw_direct),
35386 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35387 { OPTION_MASK_P8_VECTOR,
35388 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35389 : CODE_FOR_p8_vmrgow_v4sf_direct),
35390 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35391 { OPTION_MASK_P8_VECTOR,
35392 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35393 : CODE_FOR_p8_vmrgew_v4sf_direct),
35394 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35395 };
35396
35397 unsigned int i, j, elt, which;
35398 unsigned char perm[16];
35399 rtx x;
35400 bool one_vec;
35401
35402 /* Unpack the constant selector. */
35403 for (i = which = 0; i < 16; ++i)
35404 {
35405 elt = sel[i] & 31;
35406 which |= (elt < 16 ? 1 : 2);
35407 perm[i] = elt;
35408 }
35409
35410 /* Simplify the constant selector based on operands. */
35411 switch (which)
35412 {
35413 default:
35414 gcc_unreachable ();
35415
35416 case 3:
35417 one_vec = false;
35418 if (!rtx_equal_p (op0, op1))
35419 break;
35420 /* FALLTHRU */
35421
35422 case 2:
35423 for (i = 0; i < 16; ++i)
35424 perm[i] &= 15;
35425 op0 = op1;
35426 one_vec = true;
35427 break;
35428
35429 case 1:
35430 op1 = op0;
35431 one_vec = true;
35432 break;
35433 }
35434
35435 /* Look for splat patterns. */
35436 if (one_vec)
35437 {
35438 elt = perm[0];
35439
35440 for (i = 0; i < 16; ++i)
35441 if (perm[i] != elt)
35442 break;
35443 if (i == 16)
35444 {
35445 if (!BYTES_BIG_ENDIAN)
35446 elt = 15 - elt;
35447 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35448 return true;
35449 }
35450
35451 if (elt % 2 == 0)
35452 {
35453 for (i = 0; i < 16; i += 2)
35454 if (perm[i] != elt || perm[i + 1] != elt + 1)
35455 break;
35456 if (i == 16)
35457 {
35458 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35459 x = gen_reg_rtx (V8HImode);
35460 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35461 GEN_INT (field)));
35462 emit_move_insn (target, gen_lowpart (V16QImode, x));
35463 return true;
35464 }
35465 }
35466
35467 if (elt % 4 == 0)
35468 {
35469 for (i = 0; i < 16; i += 4)
35470 if (perm[i] != elt
35471 || perm[i + 1] != elt + 1
35472 || perm[i + 2] != elt + 2
35473 || perm[i + 3] != elt + 3)
35474 break;
35475 if (i == 16)
35476 {
35477 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35478 x = gen_reg_rtx (V4SImode);
35479 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35480 GEN_INT (field)));
35481 emit_move_insn (target, gen_lowpart (V16QImode, x));
35482 return true;
35483 }
35484 }
35485 }
35486
35487 /* Look for merge and pack patterns. */
35488 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35489 {
35490 bool swapped;
35491
35492 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35493 continue;
35494
35495 elt = patterns[j].perm[0];
35496 if (perm[0] == elt)
35497 swapped = false;
35498 else if (perm[0] == elt + 16)
35499 swapped = true;
35500 else
35501 continue;
35502 for (i = 1; i < 16; ++i)
35503 {
35504 elt = patterns[j].perm[i];
35505 if (swapped)
35506 elt = (elt >= 16 ? elt - 16 : elt + 16);
35507 else if (one_vec && elt >= 16)
35508 elt -= 16;
35509 if (perm[i] != elt)
35510 break;
35511 }
35512 if (i == 16)
35513 {
35514 enum insn_code icode = patterns[j].impl;
35515 machine_mode omode = insn_data[icode].operand[0].mode;
35516 machine_mode imode = insn_data[icode].operand[1].mode;
35517
35518 /* For little-endian, don't use vpkuwum and vpkuhum if the
35519 underlying vector type is not V4SI and V8HI, respectively.
35520 For example, using vpkuwum with a V8HI picks up the even
35521 halfwords (BE numbering) when the even halfwords (LE
35522 numbering) are what we need. */
35523 if (!BYTES_BIG_ENDIAN
35524 && icode == CODE_FOR_altivec_vpkuwum_direct
35525 && ((REG_P (op0)
35526 && GET_MODE (op0) != V4SImode)
35527 || (SUBREG_P (op0)
35528 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35529 continue;
35530 if (!BYTES_BIG_ENDIAN
35531 && icode == CODE_FOR_altivec_vpkuhum_direct
35532 && ((REG_P (op0)
35533 && GET_MODE (op0) != V8HImode)
35534 || (SUBREG_P (op0)
35535 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35536 continue;
35537
35538 /* For little-endian, the two input operands must be swapped
35539 (or swapped back) to ensure proper right-to-left numbering
35540 from 0 to 2N-1. */
35541 if (swapped ^ !BYTES_BIG_ENDIAN)
35542 std::swap (op0, op1);
35543 if (imode != V16QImode)
35544 {
35545 op0 = gen_lowpart (imode, op0);
35546 op1 = gen_lowpart (imode, op1);
35547 }
35548 if (omode == V16QImode)
35549 x = target;
35550 else
35551 x = gen_reg_rtx (omode);
35552 emit_insn (GEN_FCN (icode) (x, op0, op1));
35553 if (omode != V16QImode)
35554 emit_move_insn (target, gen_lowpart (V16QImode, x));
35555 return true;
35556 }
35557 }
35558
35559 if (!BYTES_BIG_ENDIAN)
35560 {
35561 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35562 return true;
35563 }
35564
35565 return false;
35566 }
35567
35568 /* Expand a VSX Permute Doubleword constant permutation.
35569 Return true if we match an efficient implementation. */
35570
35571 static bool
35572 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35573 unsigned char perm0, unsigned char perm1)
35574 {
35575 rtx x;
35576
35577 /* If both selectors come from the same operand, fold to single op. */
35578 if ((perm0 & 2) == (perm1 & 2))
35579 {
35580 if (perm0 & 2)
35581 op0 = op1;
35582 else
35583 op1 = op0;
35584 }
35585 /* If both operands are equal, fold to simpler permutation. */
35586 if (rtx_equal_p (op0, op1))
35587 {
35588 perm0 = perm0 & 1;
35589 perm1 = (perm1 & 1) + 2;
35590 }
35591 /* If the first selector comes from the second operand, swap. */
35592 else if (perm0 & 2)
35593 {
35594 if (perm1 & 2)
35595 return false;
35596 perm0 -= 2;
35597 perm1 += 2;
35598 std::swap (op0, op1);
35599 }
35600 /* If the second selector does not come from the second operand, fail. */
35601 else if ((perm1 & 2) == 0)
35602 return false;
35603
35604 /* Success! */
35605 if (target != NULL)
35606 {
35607 machine_mode vmode, dmode;
35608 rtvec v;
35609
35610 vmode = GET_MODE (target);
35611 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35612 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35613 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35614 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35615 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35616 emit_insn (gen_rtx_SET (target, x));
35617 }
35618 return true;
35619 }
35620
35621 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35622
35623 static bool
35624 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35625 rtx op1, const vec_perm_indices &sel)
35626 {
35627 bool testing_p = !target;
35628
35629 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35630 if (TARGET_ALTIVEC && testing_p)
35631 return true;
35632
35633 /* Check for ps_merge* or xxpermdi insns. */
35634 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35635 {
35636 if (testing_p)
35637 {
35638 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35639 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35640 }
35641 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35642 return true;
35643 }
35644
35645 if (TARGET_ALTIVEC)
35646 {
35647 /* Force the target-independent code to lower to V16QImode. */
35648 if (vmode != V16QImode)
35649 return false;
35650 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35651 return true;
35652 }
35653
35654 return false;
35655 }
35656
35657 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35658 OP0 and OP1 are the input vectors and TARGET is the output vector.
35659 PERM specifies the constant permutation vector. */
35660
35661 static void
35662 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35663 machine_mode vmode, const vec_perm_builder &perm)
35664 {
35665 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35666 if (x != target)
35667 emit_move_insn (target, x);
35668 }
35669
35670 /* Expand an extract even operation. */
35671
35672 void
35673 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35674 {
35675 machine_mode vmode = GET_MODE (target);
35676 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35677 vec_perm_builder perm (nelt, nelt, 1);
35678
35679 for (i = 0; i < nelt; i++)
35680 perm.quick_push (i * 2);
35681
35682 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35683 }
35684
35685 /* Expand a vector interleave operation. */
35686
35687 void
35688 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35689 {
35690 machine_mode vmode = GET_MODE (target);
35691 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35692 vec_perm_builder perm (nelt, nelt, 1);
35693
35694 high = (highp ? 0 : nelt / 2);
35695 for (i = 0; i < nelt / 2; i++)
35696 {
35697 perm.quick_push (i + high);
35698 perm.quick_push (i + nelt + high);
35699 }
35700
35701 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35702 }
35703
35704 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35705 void
35706 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35707 {
35708 HOST_WIDE_INT hwi_scale (scale);
35709 REAL_VALUE_TYPE r_pow;
35710 rtvec v = rtvec_alloc (2);
35711 rtx elt;
35712 rtx scale_vec = gen_reg_rtx (V2DFmode);
35713 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35714 elt = const_double_from_real_value (r_pow, DFmode);
35715 RTVEC_ELT (v, 0) = elt;
35716 RTVEC_ELT (v, 1) = elt;
35717 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35718 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35719 }
35720
35721 /* Return an RTX representing where to find the function value of a
35722 function returning MODE. */
35723 static rtx
35724 rs6000_complex_function_value (machine_mode mode)
35725 {
35726 unsigned int regno;
35727 rtx r1, r2;
35728 machine_mode inner = GET_MODE_INNER (mode);
35729 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35730
35731 if (TARGET_FLOAT128_TYPE
35732 && (mode == KCmode
35733 || (mode == TCmode && TARGET_IEEEQUAD)))
35734 regno = ALTIVEC_ARG_RETURN;
35735
35736 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35737 regno = FP_ARG_RETURN;
35738
35739 else
35740 {
35741 regno = GP_ARG_RETURN;
35742
35743 /* 32-bit is OK since it'll go in r3/r4. */
35744 if (TARGET_32BIT && inner_bytes >= 4)
35745 return gen_rtx_REG (mode, regno);
35746 }
35747
35748 if (inner_bytes >= 8)
35749 return gen_rtx_REG (mode, regno);
35750
35751 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35752 const0_rtx);
35753 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35754 GEN_INT (inner_bytes));
35755 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35756 }
35757
35758 /* Return an rtx describing a return value of MODE as a PARALLEL
35759 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35760 stride REG_STRIDE. */
35761
35762 static rtx
35763 rs6000_parallel_return (machine_mode mode,
35764 int n_elts, machine_mode elt_mode,
35765 unsigned int regno, unsigned int reg_stride)
35766 {
35767 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35768
35769 int i;
35770 for (i = 0; i < n_elts; i++)
35771 {
35772 rtx r = gen_rtx_REG (elt_mode, regno);
35773 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35774 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35775 regno += reg_stride;
35776 }
35777
35778 return par;
35779 }
35780
35781 /* Target hook for TARGET_FUNCTION_VALUE.
35782
35783 An integer value is in r3 and a floating-point value is in fp1,
35784 unless -msoft-float. */
35785
35786 static rtx
35787 rs6000_function_value (const_tree valtype,
35788 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35789 bool outgoing ATTRIBUTE_UNUSED)
35790 {
35791 machine_mode mode;
35792 unsigned int regno;
35793 machine_mode elt_mode;
35794 int n_elts;
35795
35796 /* Special handling for structs in darwin64. */
35797 if (TARGET_MACHO
35798 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35799 {
35800 CUMULATIVE_ARGS valcum;
35801 rtx valret;
35802
35803 valcum.words = 0;
35804 valcum.fregno = FP_ARG_MIN_REG;
35805 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35806 /* Do a trial code generation as if this were going to be passed as
35807 an argument; if any part goes in memory, we return NULL. */
35808 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35809 if (valret)
35810 return valret;
35811 /* Otherwise fall through to standard ABI rules. */
35812 }
35813
35814 mode = TYPE_MODE (valtype);
35815
35816 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35817 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35818 {
35819 int first_reg, n_regs;
35820
35821 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35822 {
35823 /* _Decimal128 must use even/odd register pairs. */
35824 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35825 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35826 }
35827 else
35828 {
35829 first_reg = ALTIVEC_ARG_RETURN;
35830 n_regs = 1;
35831 }
35832
35833 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35834 }
35835
35836 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35837 if (TARGET_32BIT && TARGET_POWERPC64)
35838 switch (mode)
35839 {
35840 default:
35841 break;
35842 case E_DImode:
35843 case E_SCmode:
35844 case E_DCmode:
35845 case E_TCmode:
35846 int count = GET_MODE_SIZE (mode) / 4;
35847 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35848 }
35849
35850 if ((INTEGRAL_TYPE_P (valtype)
35851 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35852 || POINTER_TYPE_P (valtype))
35853 mode = TARGET_32BIT ? SImode : DImode;
35854
35855 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35856 /* _Decimal128 must use an even/odd register pair. */
35857 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35858 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35859 && !FLOAT128_VECTOR_P (mode))
35860 regno = FP_ARG_RETURN;
35861 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35862 && targetm.calls.split_complex_arg)
35863 return rs6000_complex_function_value (mode);
35864 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35865 return register is used in both cases, and we won't see V2DImode/V2DFmode
35866 for pure altivec, combine the two cases. */
35867 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35868 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35869 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35870 regno = ALTIVEC_ARG_RETURN;
35871 else
35872 regno = GP_ARG_RETURN;
35873
35874 return gen_rtx_REG (mode, regno);
35875 }
35876
35877 /* Define how to find the value returned by a library function
35878 assuming the value has mode MODE. */
35879 rtx
35880 rs6000_libcall_value (machine_mode mode)
35881 {
35882 unsigned int regno;
35883
35884 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35885 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35886 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35887
35888 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35889 /* _Decimal128 must use an even/odd register pair. */
35890 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35891 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
35892 regno = FP_ARG_RETURN;
35893 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35894 return register is used in both cases, and we won't see V2DImode/V2DFmode
35895 for pure altivec, combine the two cases. */
35896 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35897 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35898 regno = ALTIVEC_ARG_RETURN;
35899 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35900 return rs6000_complex_function_value (mode);
35901 else
35902 regno = GP_ARG_RETURN;
35903
35904 return gen_rtx_REG (mode, regno);
35905 }
35906
35907 /* Compute register pressure classes. We implement the target hook to avoid
35908 IRA picking something like GEN_OR_FLOAT_REGS as a pressure class, which can
35909 lead to incorrect estimates of number of available registers and therefor
35910 increased register pressure/spill. */
35911 static int
35912 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35913 {
35914 int n;
35915
35916 n = 0;
35917 pressure_classes[n++] = GENERAL_REGS;
35918 if (TARGET_VSX)
35919 pressure_classes[n++] = VSX_REGS;
35920 else
35921 {
35922 if (TARGET_ALTIVEC)
35923 pressure_classes[n++] = ALTIVEC_REGS;
35924 if (TARGET_HARD_FLOAT)
35925 pressure_classes[n++] = FLOAT_REGS;
35926 }
35927 pressure_classes[n++] = CR_REGS;
35928 pressure_classes[n++] = SPECIAL_REGS;
35929
35930 return n;
35931 }
35932
35933 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35934 Frame pointer elimination is automatically handled.
35935
35936 For the RS/6000, if frame pointer elimination is being done, we would like
35937 to convert ap into fp, not sp.
35938
35939 We need r30 if -mminimal-toc was specified, and there are constant pool
35940 references. */
35941
35942 static bool
35943 rs6000_can_eliminate (const int from, const int to)
35944 {
35945 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35946 ? ! frame_pointer_needed
35947 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35948 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
35949 || constant_pool_empty_p ()
35950 : true);
35951 }
35952
35953 /* Define the offset between two registers, FROM to be eliminated and its
35954 replacement TO, at the start of a routine. */
35955 HOST_WIDE_INT
35956 rs6000_initial_elimination_offset (int from, int to)
35957 {
35958 rs6000_stack_t *info = rs6000_stack_info ();
35959 HOST_WIDE_INT offset;
35960
35961 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35962 offset = info->push_p ? 0 : -info->total_size;
35963 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35964 {
35965 offset = info->push_p ? 0 : -info->total_size;
35966 if (FRAME_GROWS_DOWNWARD)
35967 offset += info->fixed_size + info->vars_size + info->parm_size;
35968 }
35969 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35970 offset = FRAME_GROWS_DOWNWARD
35971 ? info->fixed_size + info->vars_size + info->parm_size
35972 : 0;
35973 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35974 offset = info->total_size;
35975 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35976 offset = info->push_p ? info->total_size : 0;
35977 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35978 offset = 0;
35979 else
35980 gcc_unreachable ();
35981
35982 return offset;
35983 }
35984
35985 /* Fill in sizes of registers used by unwinder. */
35986
35987 static void
35988 rs6000_init_dwarf_reg_sizes_extra (tree address)
35989 {
35990 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35991 {
35992 int i;
35993 machine_mode mode = TYPE_MODE (char_type_node);
35994 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35995 rtx mem = gen_rtx_MEM (BLKmode, addr);
35996 rtx value = gen_int_mode (16, mode);
35997
35998 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35999 The unwinder still needs to know the size of Altivec registers. */
36000
36001 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36002 {
36003 int column = DWARF_REG_TO_UNWIND_COLUMN
36004 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36005 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36006
36007 emit_move_insn (adjust_address (mem, mode, offset), value);
36008 }
36009 }
36010 }
36011
36012 /* Map internal gcc register numbers to debug format register numbers.
36013 FORMAT specifies the type of debug register number to use:
36014 0 -- debug information, except for frame-related sections
36015 1 -- DWARF .debug_frame section
36016 2 -- DWARF .eh_frame section */
36017
36018 unsigned int
36019 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36020 {
36021 /* On some platforms, we use the standard DWARF register
36022 numbering for .debug_info and .debug_frame. */
36023 if ((format == 0 && write_symbols == DWARF2_DEBUG) || format == 1)
36024 {
36025 #ifdef RS6000_USE_DWARF_NUMBERING
36026 if (regno <= 31)
36027 return regno;
36028 if (FP_REGNO_P (regno))
36029 return regno - FIRST_FPR_REGNO + 32;
36030 if (ALTIVEC_REGNO_P (regno))
36031 return regno - FIRST_ALTIVEC_REGNO + 1124;
36032 if (regno == LR_REGNO)
36033 return 108;
36034 if (regno == CTR_REGNO)
36035 return 109;
36036 if (regno == CA_REGNO)
36037 return 101; /* XER */
36038 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36039 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36040 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36041 to the DWARF reg for CR. */
36042 if (format == 1 && regno == CR2_REGNO)
36043 return 64;
36044 if (CR_REGNO_P (regno))
36045 return regno - CR0_REGNO + 86;
36046 if (regno == VRSAVE_REGNO)
36047 return 356;
36048 if (regno == VSCR_REGNO)
36049 return 67;
36050
36051 /* These do not make much sense. */
36052 if (regno == FRAME_POINTER_REGNUM)
36053 return 111;
36054 if (regno == ARG_POINTER_REGNUM)
36055 return 67;
36056 if (regno == 64)
36057 return 100;
36058
36059 gcc_unreachable ();
36060 #endif
36061 }
36062
36063 /* We use the GCC 7 (and before) internal number for non-DWARF debug
36064 information, and also for .eh_frame. */
36065 /* Translate the regnos to their numbers in GCC 7 (and before). */
36066 if (regno <= 31)
36067 return regno;
36068 if (FP_REGNO_P (regno))
36069 return regno - FIRST_FPR_REGNO + 32;
36070 if (ALTIVEC_REGNO_P (regno))
36071 return regno - FIRST_ALTIVEC_REGNO + 77;
36072 if (regno == LR_REGNO)
36073 return 65;
36074 if (regno == CTR_REGNO)
36075 return 66;
36076 if (regno == CA_REGNO)
36077 return 76; /* XER */
36078 if (CR_REGNO_P (regno))
36079 return regno - CR0_REGNO + 68;
36080 if (regno == VRSAVE_REGNO)
36081 return 109;
36082 if (regno == VSCR_REGNO)
36083 return 110;
36084
36085 if (regno == FRAME_POINTER_REGNUM)
36086 return 111;
36087 if (regno == ARG_POINTER_REGNUM)
36088 return 67;
36089 if (regno == 64)
36090 return 64;
36091
36092 gcc_unreachable ();
36093 }
36094
36095 /* target hook eh_return_filter_mode */
36096 static scalar_int_mode
36097 rs6000_eh_return_filter_mode (void)
36098 {
36099 return TARGET_32BIT ? SImode : word_mode;
36100 }
36101
36102 /* Target hook for translate_mode_attribute. */
36103 static machine_mode
36104 rs6000_translate_mode_attribute (machine_mode mode)
36105 {
36106 if ((FLOAT128_IEEE_P (mode)
36107 && ieee128_float_type_node == long_double_type_node)
36108 || (FLOAT128_IBM_P (mode)
36109 && ibm128_float_type_node == long_double_type_node))
36110 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
36111 return mode;
36112 }
36113
36114 /* Target hook for scalar_mode_supported_p. */
36115 static bool
36116 rs6000_scalar_mode_supported_p (scalar_mode mode)
36117 {
36118 /* -m32 does not support TImode. This is the default, from
36119 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36120 same ABI as for -m32. But default_scalar_mode_supported_p allows
36121 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36122 for -mpowerpc64. */
36123 if (TARGET_32BIT && mode == TImode)
36124 return false;
36125
36126 if (DECIMAL_FLOAT_MODE_P (mode))
36127 return default_decimal_float_supported_p ();
36128 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36129 return true;
36130 else
36131 return default_scalar_mode_supported_p (mode);
36132 }
36133
36134 /* Target hook for vector_mode_supported_p. */
36135 static bool
36136 rs6000_vector_mode_supported_p (machine_mode mode)
36137 {
36138 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36139 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36140 double-double. */
36141 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36142 return true;
36143
36144 else
36145 return false;
36146 }
36147
36148 /* Target hook for floatn_mode. */
36149 static opt_scalar_float_mode
36150 rs6000_floatn_mode (int n, bool extended)
36151 {
36152 if (extended)
36153 {
36154 switch (n)
36155 {
36156 case 32:
36157 return DFmode;
36158
36159 case 64:
36160 if (TARGET_FLOAT128_TYPE)
36161 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36162 else
36163 return opt_scalar_float_mode ();
36164
36165 case 128:
36166 return opt_scalar_float_mode ();
36167
36168 default:
36169 /* Those are the only valid _FloatNx types. */
36170 gcc_unreachable ();
36171 }
36172 }
36173 else
36174 {
36175 switch (n)
36176 {
36177 case 32:
36178 return SFmode;
36179
36180 case 64:
36181 return DFmode;
36182
36183 case 128:
36184 if (TARGET_FLOAT128_TYPE)
36185 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36186 else
36187 return opt_scalar_float_mode ();
36188
36189 default:
36190 return opt_scalar_float_mode ();
36191 }
36192 }
36193
36194 }
36195
36196 /* Target hook for c_mode_for_suffix. */
36197 static machine_mode
36198 rs6000_c_mode_for_suffix (char suffix)
36199 {
36200 if (TARGET_FLOAT128_TYPE)
36201 {
36202 if (suffix == 'q' || suffix == 'Q')
36203 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36204
36205 /* At the moment, we are not defining a suffix for IBM extended double.
36206 If/when the default for -mabi=ieeelongdouble is changed, and we want
36207 to support __ibm128 constants in legacy library code, we may need to
36208 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36209 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36210 __float80 constants. */
36211 }
36212
36213 return VOIDmode;
36214 }
36215
36216 /* Target hook for invalid_arg_for_unprototyped_fn. */
36217 static const char *
36218 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36219 {
36220 return (!rs6000_darwin64_abi
36221 && typelist == 0
36222 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36223 && (funcdecl == NULL_TREE
36224 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36225 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36226 ? N_("AltiVec argument passed to unprototyped function")
36227 : NULL;
36228 }
36229
36230 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36231 setup by using __stack_chk_fail_local hidden function instead of
36232 calling __stack_chk_fail directly. Otherwise it is better to call
36233 __stack_chk_fail directly. */
36234
36235 static tree ATTRIBUTE_UNUSED
36236 rs6000_stack_protect_fail (void)
36237 {
36238 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36239 ? default_hidden_stack_protect_fail ()
36240 : default_external_stack_protect_fail ();
36241 }
36242
36243 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36244
36245 #if TARGET_ELF
36246 static unsigned HOST_WIDE_INT
36247 rs6000_asan_shadow_offset (void)
36248 {
36249 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36250 }
36251 #endif
36252 \f
36253 /* Mask options that we want to support inside of attribute((target)) and
36254 #pragma GCC target operations. Note, we do not include things like
36255 64/32-bit, endianness, hard/soft floating point, etc. that would have
36256 different calling sequences. */
36257
36258 struct rs6000_opt_mask {
36259 const char *name; /* option name */
36260 HOST_WIDE_INT mask; /* mask to set */
36261 bool invert; /* invert sense of mask */
36262 bool valid_target; /* option is a target option */
36263 };
36264
36265 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36266 {
36267 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36268 { "cmpb", OPTION_MASK_CMPB, false, true },
36269 { "crypto", OPTION_MASK_CRYPTO, false, true },
36270 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36271 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36272 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36273 false, true },
36274 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36275 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36276 { "fprnd", OPTION_MASK_FPRND, false, true },
36277 { "future", OPTION_MASK_FUTURE, false, true },
36278 { "hard-dfp", OPTION_MASK_DFP, false, true },
36279 { "htm", OPTION_MASK_HTM, false, true },
36280 { "isel", OPTION_MASK_ISEL, false, true },
36281 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36282 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36283 { "modulo", OPTION_MASK_MODULO, false, true },
36284 { "mulhw", OPTION_MASK_MULHW, false, true },
36285 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36286 { "pcrel", OPTION_MASK_PCREL, false, true },
36287 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36288 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36289 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36290 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36291 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36292 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36293 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36294 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36295 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36296 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36297 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36298 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36299 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36300 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36301 { "string", 0, false, true },
36302 { "update", OPTION_MASK_NO_UPDATE, true , true },
36303 { "vsx", OPTION_MASK_VSX, false, true },
36304 #ifdef OPTION_MASK_64BIT
36305 #if TARGET_AIX_OS
36306 { "aix64", OPTION_MASK_64BIT, false, false },
36307 { "aix32", OPTION_MASK_64BIT, true, false },
36308 #else
36309 { "64", OPTION_MASK_64BIT, false, false },
36310 { "32", OPTION_MASK_64BIT, true, false },
36311 #endif
36312 #endif
36313 #ifdef OPTION_MASK_EABI
36314 { "eabi", OPTION_MASK_EABI, false, false },
36315 #endif
36316 #ifdef OPTION_MASK_LITTLE_ENDIAN
36317 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36318 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36319 #endif
36320 #ifdef OPTION_MASK_RELOCATABLE
36321 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36322 #endif
36323 #ifdef OPTION_MASK_STRICT_ALIGN
36324 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36325 #endif
36326 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36327 { "string", 0, false, false },
36328 };
36329
36330 /* Builtin mask mapping for printing the flags. */
36331 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36332 {
36333 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36334 { "vsx", RS6000_BTM_VSX, false, false },
36335 { "fre", RS6000_BTM_FRE, false, false },
36336 { "fres", RS6000_BTM_FRES, false, false },
36337 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36338 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36339 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36340 { "cell", RS6000_BTM_CELL, false, false },
36341 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36342 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36343 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36344 { "crypto", RS6000_BTM_CRYPTO, false, false },
36345 { "htm", RS6000_BTM_HTM, false, false },
36346 { "hard-dfp", RS6000_BTM_DFP, false, false },
36347 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36348 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36349 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36350 { "float128", RS6000_BTM_FLOAT128, false, false },
36351 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36352 };
36353
36354 /* Option variables that we want to support inside attribute((target)) and
36355 #pragma GCC target operations. */
36356
36357 struct rs6000_opt_var {
36358 const char *name; /* option name */
36359 size_t global_offset; /* offset of the option in global_options. */
36360 size_t target_offset; /* offset of the option in target options. */
36361 };
36362
36363 static struct rs6000_opt_var const rs6000_opt_vars[] =
36364 {
36365 { "friz",
36366 offsetof (struct gcc_options, x_TARGET_FRIZ),
36367 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36368 { "avoid-indexed-addresses",
36369 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36370 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36371 { "longcall",
36372 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36373 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36374 { "optimize-swaps",
36375 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36376 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36377 { "allow-movmisalign",
36378 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36379 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36380 { "sched-groups",
36381 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36382 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36383 { "always-hint",
36384 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36385 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36386 { "align-branch-targets",
36387 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36388 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36389 { "tls-markers",
36390 offsetof (struct gcc_options, x_tls_markers),
36391 offsetof (struct cl_target_option, x_tls_markers), },
36392 { "sched-prolog",
36393 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36394 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36395 { "sched-epilog",
36396 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36397 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36398 { "speculate-indirect-jumps",
36399 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36400 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36401 };
36402
36403 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36404 parsing. Return true if there were no errors. */
36405
36406 static bool
36407 rs6000_inner_target_options (tree args, bool attr_p)
36408 {
36409 bool ret = true;
36410
36411 if (args == NULL_TREE)
36412 ;
36413
36414 else if (TREE_CODE (args) == STRING_CST)
36415 {
36416 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36417 char *q;
36418
36419 while ((q = strtok (p, ",")) != NULL)
36420 {
36421 bool error_p = false;
36422 bool not_valid_p = false;
36423 const char *cpu_opt = NULL;
36424
36425 p = NULL;
36426 if (strncmp (q, "cpu=", 4) == 0)
36427 {
36428 int cpu_index = rs6000_cpu_name_lookup (q+4);
36429 if (cpu_index >= 0)
36430 rs6000_cpu_index = cpu_index;
36431 else
36432 {
36433 error_p = true;
36434 cpu_opt = q+4;
36435 }
36436 }
36437 else if (strncmp (q, "tune=", 5) == 0)
36438 {
36439 int tune_index = rs6000_cpu_name_lookup (q+5);
36440 if (tune_index >= 0)
36441 rs6000_tune_index = tune_index;
36442 else
36443 {
36444 error_p = true;
36445 cpu_opt = q+5;
36446 }
36447 }
36448 else
36449 {
36450 size_t i;
36451 bool invert = false;
36452 char *r = q;
36453
36454 error_p = true;
36455 if (strncmp (r, "no-", 3) == 0)
36456 {
36457 invert = true;
36458 r += 3;
36459 }
36460
36461 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36462 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36463 {
36464 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36465
36466 if (!rs6000_opt_masks[i].valid_target)
36467 not_valid_p = true;
36468 else
36469 {
36470 error_p = false;
36471 rs6000_isa_flags_explicit |= mask;
36472
36473 /* VSX needs altivec, so -mvsx automagically sets
36474 altivec and disables -mavoid-indexed-addresses. */
36475 if (!invert)
36476 {
36477 if (mask == OPTION_MASK_VSX)
36478 {
36479 mask |= OPTION_MASK_ALTIVEC;
36480 TARGET_AVOID_XFORM = 0;
36481 }
36482 }
36483
36484 if (rs6000_opt_masks[i].invert)
36485 invert = !invert;
36486
36487 if (invert)
36488 rs6000_isa_flags &= ~mask;
36489 else
36490 rs6000_isa_flags |= mask;
36491 }
36492 break;
36493 }
36494
36495 if (error_p && !not_valid_p)
36496 {
36497 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36498 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36499 {
36500 size_t j = rs6000_opt_vars[i].global_offset;
36501 *((int *) ((char *)&global_options + j)) = !invert;
36502 error_p = false;
36503 not_valid_p = false;
36504 break;
36505 }
36506 }
36507 }
36508
36509 if (error_p)
36510 {
36511 const char *eprefix, *esuffix;
36512
36513 ret = false;
36514 if (attr_p)
36515 {
36516 eprefix = "__attribute__((__target__(";
36517 esuffix = ")))";
36518 }
36519 else
36520 {
36521 eprefix = "#pragma GCC target ";
36522 esuffix = "";
36523 }
36524
36525 if (cpu_opt)
36526 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36527 q, esuffix);
36528 else if (not_valid_p)
36529 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36530 else
36531 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36532 }
36533 }
36534 }
36535
36536 else if (TREE_CODE (args) == TREE_LIST)
36537 {
36538 do
36539 {
36540 tree value = TREE_VALUE (args);
36541 if (value)
36542 {
36543 bool ret2 = rs6000_inner_target_options (value, attr_p);
36544 if (!ret2)
36545 ret = false;
36546 }
36547 args = TREE_CHAIN (args);
36548 }
36549 while (args != NULL_TREE);
36550 }
36551
36552 else
36553 {
36554 error ("attribute %<target%> argument not a string");
36555 return false;
36556 }
36557
36558 return ret;
36559 }
36560
36561 /* Print out the target options as a list for -mdebug=target. */
36562
36563 static void
36564 rs6000_debug_target_options (tree args, const char *prefix)
36565 {
36566 if (args == NULL_TREE)
36567 fprintf (stderr, "%s<NULL>", prefix);
36568
36569 else if (TREE_CODE (args) == STRING_CST)
36570 {
36571 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36572 char *q;
36573
36574 while ((q = strtok (p, ",")) != NULL)
36575 {
36576 p = NULL;
36577 fprintf (stderr, "%s\"%s\"", prefix, q);
36578 prefix = ", ";
36579 }
36580 }
36581
36582 else if (TREE_CODE (args) == TREE_LIST)
36583 {
36584 do
36585 {
36586 tree value = TREE_VALUE (args);
36587 if (value)
36588 {
36589 rs6000_debug_target_options (value, prefix);
36590 prefix = ", ";
36591 }
36592 args = TREE_CHAIN (args);
36593 }
36594 while (args != NULL_TREE);
36595 }
36596
36597 else
36598 gcc_unreachable ();
36599
36600 return;
36601 }
36602
36603 \f
36604 /* Hook to validate attribute((target("..."))). */
36605
36606 static bool
36607 rs6000_valid_attribute_p (tree fndecl,
36608 tree ARG_UNUSED (name),
36609 tree args,
36610 int flags)
36611 {
36612 struct cl_target_option cur_target;
36613 bool ret;
36614 tree old_optimize;
36615 tree new_target, new_optimize;
36616 tree func_optimize;
36617
36618 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36619
36620 if (TARGET_DEBUG_TARGET)
36621 {
36622 tree tname = DECL_NAME (fndecl);
36623 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36624 if (tname)
36625 fprintf (stderr, "function: %.*s\n",
36626 (int) IDENTIFIER_LENGTH (tname),
36627 IDENTIFIER_POINTER (tname));
36628 else
36629 fprintf (stderr, "function: unknown\n");
36630
36631 fprintf (stderr, "args:");
36632 rs6000_debug_target_options (args, " ");
36633 fprintf (stderr, "\n");
36634
36635 if (flags)
36636 fprintf (stderr, "flags: 0x%x\n", flags);
36637
36638 fprintf (stderr, "--------------------\n");
36639 }
36640
36641 /* attribute((target("default"))) does nothing, beyond
36642 affecting multi-versioning. */
36643 if (TREE_VALUE (args)
36644 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36645 && TREE_CHAIN (args) == NULL_TREE
36646 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36647 return true;
36648
36649 old_optimize = build_optimization_node (&global_options);
36650 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36651
36652 /* If the function changed the optimization levels as well as setting target
36653 options, start with the optimizations specified. */
36654 if (func_optimize && func_optimize != old_optimize)
36655 cl_optimization_restore (&global_options,
36656 TREE_OPTIMIZATION (func_optimize));
36657
36658 /* The target attributes may also change some optimization flags, so update
36659 the optimization options if necessary. */
36660 cl_target_option_save (&cur_target, &global_options);
36661 rs6000_cpu_index = rs6000_tune_index = -1;
36662 ret = rs6000_inner_target_options (args, true);
36663
36664 /* Set up any additional state. */
36665 if (ret)
36666 {
36667 ret = rs6000_option_override_internal (false);
36668 new_target = build_target_option_node (&global_options);
36669 }
36670 else
36671 new_target = NULL;
36672
36673 new_optimize = build_optimization_node (&global_options);
36674
36675 if (!new_target)
36676 ret = false;
36677
36678 else if (fndecl)
36679 {
36680 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36681
36682 if (old_optimize != new_optimize)
36683 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36684 }
36685
36686 cl_target_option_restore (&global_options, &cur_target);
36687
36688 if (old_optimize != new_optimize)
36689 cl_optimization_restore (&global_options,
36690 TREE_OPTIMIZATION (old_optimize));
36691
36692 return ret;
36693 }
36694
36695 \f
36696 /* Hook to validate the current #pragma GCC target and set the state, and
36697 update the macros based on what was changed. If ARGS is NULL, then
36698 POP_TARGET is used to reset the options. */
36699
36700 bool
36701 rs6000_pragma_target_parse (tree args, tree pop_target)
36702 {
36703 tree prev_tree = build_target_option_node (&global_options);
36704 tree cur_tree;
36705 struct cl_target_option *prev_opt, *cur_opt;
36706 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36707 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36708
36709 if (TARGET_DEBUG_TARGET)
36710 {
36711 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36712 fprintf (stderr, "args:");
36713 rs6000_debug_target_options (args, " ");
36714 fprintf (stderr, "\n");
36715
36716 if (pop_target)
36717 {
36718 fprintf (stderr, "pop_target:\n");
36719 debug_tree (pop_target);
36720 }
36721 else
36722 fprintf (stderr, "pop_target: <NULL>\n");
36723
36724 fprintf (stderr, "--------------------\n");
36725 }
36726
36727 if (! args)
36728 {
36729 cur_tree = ((pop_target)
36730 ? pop_target
36731 : target_option_default_node);
36732 cl_target_option_restore (&global_options,
36733 TREE_TARGET_OPTION (cur_tree));
36734 }
36735 else
36736 {
36737 rs6000_cpu_index = rs6000_tune_index = -1;
36738 if (!rs6000_inner_target_options (args, false)
36739 || !rs6000_option_override_internal (false)
36740 || (cur_tree = build_target_option_node (&global_options))
36741 == NULL_TREE)
36742 {
36743 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36744 fprintf (stderr, "invalid pragma\n");
36745
36746 return false;
36747 }
36748 }
36749
36750 target_option_current_node = cur_tree;
36751 rs6000_activate_target_options (target_option_current_node);
36752
36753 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36754 change the macros that are defined. */
36755 if (rs6000_target_modify_macros_ptr)
36756 {
36757 prev_opt = TREE_TARGET_OPTION (prev_tree);
36758 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36759 prev_flags = prev_opt->x_rs6000_isa_flags;
36760
36761 cur_opt = TREE_TARGET_OPTION (cur_tree);
36762 cur_flags = cur_opt->x_rs6000_isa_flags;
36763 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36764
36765 diff_bumask = (prev_bumask ^ cur_bumask);
36766 diff_flags = (prev_flags ^ cur_flags);
36767
36768 if ((diff_flags != 0) || (diff_bumask != 0))
36769 {
36770 /* Delete old macros. */
36771 rs6000_target_modify_macros_ptr (false,
36772 prev_flags & diff_flags,
36773 prev_bumask & diff_bumask);
36774
36775 /* Define new macros. */
36776 rs6000_target_modify_macros_ptr (true,
36777 cur_flags & diff_flags,
36778 cur_bumask & diff_bumask);
36779 }
36780 }
36781
36782 return true;
36783 }
36784
36785 \f
36786 /* Remember the last target of rs6000_set_current_function. */
36787 static GTY(()) tree rs6000_previous_fndecl;
36788
36789 /* Restore target's globals from NEW_TREE and invalidate the
36790 rs6000_previous_fndecl cache. */
36791
36792 void
36793 rs6000_activate_target_options (tree new_tree)
36794 {
36795 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36796 if (TREE_TARGET_GLOBALS (new_tree))
36797 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36798 else if (new_tree == target_option_default_node)
36799 restore_target_globals (&default_target_globals);
36800 else
36801 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36802 rs6000_previous_fndecl = NULL_TREE;
36803 }
36804
36805 /* Establish appropriate back-end context for processing the function
36806 FNDECL. The argument might be NULL to indicate processing at top
36807 level, outside of any function scope. */
36808 static void
36809 rs6000_set_current_function (tree fndecl)
36810 {
36811 if (TARGET_DEBUG_TARGET)
36812 {
36813 fprintf (stderr, "\n==================== rs6000_set_current_function");
36814
36815 if (fndecl)
36816 fprintf (stderr, ", fndecl %s (%p)",
36817 (DECL_NAME (fndecl)
36818 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36819 : "<unknown>"), (void *)fndecl);
36820
36821 if (rs6000_previous_fndecl)
36822 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36823
36824 fprintf (stderr, "\n");
36825 }
36826
36827 /* Only change the context if the function changes. This hook is called
36828 several times in the course of compiling a function, and we don't want to
36829 slow things down too much or call target_reinit when it isn't safe. */
36830 if (fndecl == rs6000_previous_fndecl)
36831 return;
36832
36833 tree old_tree;
36834 if (rs6000_previous_fndecl == NULL_TREE)
36835 old_tree = target_option_current_node;
36836 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36837 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36838 else
36839 old_tree = target_option_default_node;
36840
36841 tree new_tree;
36842 if (fndecl == NULL_TREE)
36843 {
36844 if (old_tree != target_option_current_node)
36845 new_tree = target_option_current_node;
36846 else
36847 new_tree = NULL_TREE;
36848 }
36849 else
36850 {
36851 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36852 if (new_tree == NULL_TREE)
36853 new_tree = target_option_default_node;
36854 }
36855
36856 if (TARGET_DEBUG_TARGET)
36857 {
36858 if (new_tree)
36859 {
36860 fprintf (stderr, "\nnew fndecl target specific options:\n");
36861 debug_tree (new_tree);
36862 }
36863
36864 if (old_tree)
36865 {
36866 fprintf (stderr, "\nold fndecl target specific options:\n");
36867 debug_tree (old_tree);
36868 }
36869
36870 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36871 fprintf (stderr, "--------------------\n");
36872 }
36873
36874 if (new_tree && old_tree != new_tree)
36875 rs6000_activate_target_options (new_tree);
36876
36877 if (fndecl)
36878 rs6000_previous_fndecl = fndecl;
36879 }
36880
36881 \f
36882 /* Save the current options */
36883
36884 static void
36885 rs6000_function_specific_save (struct cl_target_option *ptr,
36886 struct gcc_options *opts)
36887 {
36888 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36889 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36890 }
36891
36892 /* Restore the current options */
36893
36894 static void
36895 rs6000_function_specific_restore (struct gcc_options *opts,
36896 struct cl_target_option *ptr)
36897
36898 {
36899 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36900 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36901 (void) rs6000_option_override_internal (false);
36902 }
36903
36904 /* Print the current options */
36905
36906 static void
36907 rs6000_function_specific_print (FILE *file, int indent,
36908 struct cl_target_option *ptr)
36909 {
36910 rs6000_print_isa_options (file, indent, "Isa options set",
36911 ptr->x_rs6000_isa_flags);
36912
36913 rs6000_print_isa_options (file, indent, "Isa options explicit",
36914 ptr->x_rs6000_isa_flags_explicit);
36915 }
36916
36917 /* Helper function to print the current isa or misc options on a line. */
36918
36919 static void
36920 rs6000_print_options_internal (FILE *file,
36921 int indent,
36922 const char *string,
36923 HOST_WIDE_INT flags,
36924 const char *prefix,
36925 const struct rs6000_opt_mask *opts,
36926 size_t num_elements)
36927 {
36928 size_t i;
36929 size_t start_column = 0;
36930 size_t cur_column;
36931 size_t max_column = 120;
36932 size_t prefix_len = strlen (prefix);
36933 size_t comma_len = 0;
36934 const char *comma = "";
36935
36936 if (indent)
36937 start_column += fprintf (file, "%*s", indent, "");
36938
36939 if (!flags)
36940 {
36941 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36942 return;
36943 }
36944
36945 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36946
36947 /* Print the various mask options. */
36948 cur_column = start_column;
36949 for (i = 0; i < num_elements; i++)
36950 {
36951 bool invert = opts[i].invert;
36952 const char *name = opts[i].name;
36953 const char *no_str = "";
36954 HOST_WIDE_INT mask = opts[i].mask;
36955 size_t len = comma_len + prefix_len + strlen (name);
36956
36957 if (!invert)
36958 {
36959 if ((flags & mask) == 0)
36960 {
36961 no_str = "no-";
36962 len += sizeof ("no-") - 1;
36963 }
36964
36965 flags &= ~mask;
36966 }
36967
36968 else
36969 {
36970 if ((flags & mask) != 0)
36971 {
36972 no_str = "no-";
36973 len += sizeof ("no-") - 1;
36974 }
36975
36976 flags |= mask;
36977 }
36978
36979 cur_column += len;
36980 if (cur_column > max_column)
36981 {
36982 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36983 cur_column = start_column + len;
36984 comma = "";
36985 }
36986
36987 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36988 comma = ", ";
36989 comma_len = sizeof (", ") - 1;
36990 }
36991
36992 fputs ("\n", file);
36993 }
36994
36995 /* Helper function to print the current isa options on a line. */
36996
36997 static void
36998 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36999 HOST_WIDE_INT flags)
37000 {
37001 rs6000_print_options_internal (file, indent, string, flags, "-m",
37002 &rs6000_opt_masks[0],
37003 ARRAY_SIZE (rs6000_opt_masks));
37004 }
37005
37006 static void
37007 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37008 HOST_WIDE_INT flags)
37009 {
37010 rs6000_print_options_internal (file, indent, string, flags, "",
37011 &rs6000_builtin_mask_names[0],
37012 ARRAY_SIZE (rs6000_builtin_mask_names));
37013 }
37014
37015 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37016 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37017 -mupper-regs-df, etc.).
37018
37019 If the user used -mno-power8-vector, we need to turn off all of the implicit
37020 ISA 2.07 and 3.0 options that relate to the vector unit.
37021
37022 If the user used -mno-power9-vector, we need to turn off all of the implicit
37023 ISA 3.0 options that relate to the vector unit.
37024
37025 This function does not handle explicit options such as the user specifying
37026 -mdirect-move. These are handled in rs6000_option_override_internal, and
37027 the appropriate error is given if needed.
37028
37029 We return a mask of all of the implicit options that should not be enabled
37030 by default. */
37031
37032 static HOST_WIDE_INT
37033 rs6000_disable_incompatible_switches (void)
37034 {
37035 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37036 size_t i, j;
37037
37038 static const struct {
37039 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37040 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37041 const char *const name; /* name of the switch. */
37042 } flags[] = {
37043 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37044 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37045 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37046 };
37047
37048 for (i = 0; i < ARRAY_SIZE (flags); i++)
37049 {
37050 HOST_WIDE_INT no_flag = flags[i].no_flag;
37051
37052 if ((rs6000_isa_flags & no_flag) == 0
37053 && (rs6000_isa_flags_explicit & no_flag) != 0)
37054 {
37055 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37056 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37057 & rs6000_isa_flags
37058 & dep_flags);
37059
37060 if (set_flags)
37061 {
37062 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37063 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37064 {
37065 set_flags &= ~rs6000_opt_masks[j].mask;
37066 error ("%<-mno-%s%> turns off %<-m%s%>",
37067 flags[i].name,
37068 rs6000_opt_masks[j].name);
37069 }
37070
37071 gcc_assert (!set_flags);
37072 }
37073
37074 rs6000_isa_flags &= ~dep_flags;
37075 ignore_masks |= no_flag | dep_flags;
37076 }
37077 }
37078
37079 return ignore_masks;
37080 }
37081
37082 \f
37083 /* Helper function for printing the function name when debugging. */
37084
37085 static const char *
37086 get_decl_name (tree fn)
37087 {
37088 tree name;
37089
37090 if (!fn)
37091 return "<null>";
37092
37093 name = DECL_NAME (fn);
37094 if (!name)
37095 return "<no-name>";
37096
37097 return IDENTIFIER_POINTER (name);
37098 }
37099
37100 /* Return the clone id of the target we are compiling code for in a target
37101 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37102 the priority list for the target clones (ordered from lowest to
37103 highest). */
37104
37105 static int
37106 rs6000_clone_priority (tree fndecl)
37107 {
37108 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37109 HOST_WIDE_INT isa_masks;
37110 int ret = CLONE_DEFAULT;
37111 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37112 const char *attrs_str = NULL;
37113
37114 attrs = TREE_VALUE (TREE_VALUE (attrs));
37115 attrs_str = TREE_STRING_POINTER (attrs);
37116
37117 /* Return priority zero for default function. Return the ISA needed for the
37118 function if it is not the default. */
37119 if (strcmp (attrs_str, "default") != 0)
37120 {
37121 if (fn_opts == NULL_TREE)
37122 fn_opts = target_option_default_node;
37123
37124 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37125 isa_masks = rs6000_isa_flags;
37126 else
37127 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37128
37129 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37130 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37131 break;
37132 }
37133
37134 if (TARGET_DEBUG_TARGET)
37135 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37136 get_decl_name (fndecl), ret);
37137
37138 return ret;
37139 }
37140
37141 /* This compares the priority of target features in function DECL1 and DECL2.
37142 It returns positive value if DECL1 is higher priority, negative value if
37143 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37144 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37145
37146 static int
37147 rs6000_compare_version_priority (tree decl1, tree decl2)
37148 {
37149 int priority1 = rs6000_clone_priority (decl1);
37150 int priority2 = rs6000_clone_priority (decl2);
37151 int ret = priority1 - priority2;
37152
37153 if (TARGET_DEBUG_TARGET)
37154 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37155 get_decl_name (decl1), get_decl_name (decl2), ret);
37156
37157 return ret;
37158 }
37159
37160 /* Make a dispatcher declaration for the multi-versioned function DECL.
37161 Calls to DECL function will be replaced with calls to the dispatcher
37162 by the front-end. Returns the decl of the dispatcher function. */
37163
37164 static tree
37165 rs6000_get_function_versions_dispatcher (void *decl)
37166 {
37167 tree fn = (tree) decl;
37168 struct cgraph_node *node = NULL;
37169 struct cgraph_node *default_node = NULL;
37170 struct cgraph_function_version_info *node_v = NULL;
37171 struct cgraph_function_version_info *first_v = NULL;
37172
37173 tree dispatch_decl = NULL;
37174
37175 struct cgraph_function_version_info *default_version_info = NULL;
37176 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37177
37178 if (TARGET_DEBUG_TARGET)
37179 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37180 get_decl_name (fn));
37181
37182 node = cgraph_node::get (fn);
37183 gcc_assert (node != NULL);
37184
37185 node_v = node->function_version ();
37186 gcc_assert (node_v != NULL);
37187
37188 if (node_v->dispatcher_resolver != NULL)
37189 return node_v->dispatcher_resolver;
37190
37191 /* Find the default version and make it the first node. */
37192 first_v = node_v;
37193 /* Go to the beginning of the chain. */
37194 while (first_v->prev != NULL)
37195 first_v = first_v->prev;
37196
37197 default_version_info = first_v;
37198 while (default_version_info != NULL)
37199 {
37200 const tree decl2 = default_version_info->this_node->decl;
37201 if (is_function_default_version (decl2))
37202 break;
37203 default_version_info = default_version_info->next;
37204 }
37205
37206 /* If there is no default node, just return NULL. */
37207 if (default_version_info == NULL)
37208 return NULL;
37209
37210 /* Make default info the first node. */
37211 if (first_v != default_version_info)
37212 {
37213 default_version_info->prev->next = default_version_info->next;
37214 if (default_version_info->next)
37215 default_version_info->next->prev = default_version_info->prev;
37216 first_v->prev = default_version_info;
37217 default_version_info->next = first_v;
37218 default_version_info->prev = NULL;
37219 }
37220
37221 default_node = default_version_info->this_node;
37222
37223 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37224 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37225 "%<target_clones%> attribute needs GLIBC (2.23 and newer) that "
37226 "exports hardware capability bits");
37227 #else
37228
37229 if (targetm.has_ifunc_p ())
37230 {
37231 struct cgraph_function_version_info *it_v = NULL;
37232 struct cgraph_node *dispatcher_node = NULL;
37233 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37234
37235 /* Right now, the dispatching is done via ifunc. */
37236 dispatch_decl = make_dispatcher_decl (default_node->decl);
37237
37238 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37239 gcc_assert (dispatcher_node != NULL);
37240 dispatcher_node->dispatcher_function = 1;
37241 dispatcher_version_info
37242 = dispatcher_node->insert_new_function_version ();
37243 dispatcher_version_info->next = default_version_info;
37244 dispatcher_node->definition = 1;
37245
37246 /* Set the dispatcher for all the versions. */
37247 it_v = default_version_info;
37248 while (it_v != NULL)
37249 {
37250 it_v->dispatcher_resolver = dispatch_decl;
37251 it_v = it_v->next;
37252 }
37253 }
37254 else
37255 {
37256 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37257 "multiversioning needs ifunc which is not supported "
37258 "on this target");
37259 }
37260 #endif
37261
37262 return dispatch_decl;
37263 }
37264
37265 /* Make the resolver function decl to dispatch the versions of a multi-
37266 versioned function, DEFAULT_DECL. Create an empty basic block in the
37267 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37268 function. */
37269
37270 static tree
37271 make_resolver_func (const tree default_decl,
37272 const tree dispatch_decl,
37273 basic_block *empty_bb)
37274 {
37275 /* Make the resolver function static. The resolver function returns
37276 void *. */
37277 tree decl_name = clone_function_name (default_decl, "resolver");
37278 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37279 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37280 tree decl = build_fn_decl (resolver_name, type);
37281 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37282
37283 DECL_NAME (decl) = decl_name;
37284 TREE_USED (decl) = 1;
37285 DECL_ARTIFICIAL (decl) = 1;
37286 DECL_IGNORED_P (decl) = 0;
37287 TREE_PUBLIC (decl) = 0;
37288 DECL_UNINLINABLE (decl) = 1;
37289
37290 /* Resolver is not external, body is generated. */
37291 DECL_EXTERNAL (decl) = 0;
37292 DECL_EXTERNAL (dispatch_decl) = 0;
37293
37294 DECL_CONTEXT (decl) = NULL_TREE;
37295 DECL_INITIAL (decl) = make_node (BLOCK);
37296 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37297
37298 /* Build result decl and add to function_decl. */
37299 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37300 DECL_CONTEXT (t) = decl;
37301 DECL_ARTIFICIAL (t) = 1;
37302 DECL_IGNORED_P (t) = 1;
37303 DECL_RESULT (decl) = t;
37304
37305 gimplify_function_tree (decl);
37306 push_cfun (DECL_STRUCT_FUNCTION (decl));
37307 *empty_bb = init_lowered_empty_function (decl, false,
37308 profile_count::uninitialized ());
37309
37310 cgraph_node::add_new_function (decl, true);
37311 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37312
37313 pop_cfun ();
37314
37315 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37316 DECL_ATTRIBUTES (dispatch_decl)
37317 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37318
37319 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37320
37321 return decl;
37322 }
37323
37324 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37325 return a pointer to VERSION_DECL if we are running on a machine that
37326 supports the index CLONE_ISA hardware architecture bits. This function will
37327 be called during version dispatch to decide which function version to
37328 execute. It returns the basic block at the end, to which more conditions
37329 can be added. */
37330
37331 static basic_block
37332 add_condition_to_bb (tree function_decl, tree version_decl,
37333 int clone_isa, basic_block new_bb)
37334 {
37335 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37336
37337 gcc_assert (new_bb != NULL);
37338 gimple_seq gseq = bb_seq (new_bb);
37339
37340
37341 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37342 build_fold_addr_expr (version_decl));
37343 tree result_var = create_tmp_var (ptr_type_node);
37344 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37345 gimple *return_stmt = gimple_build_return (result_var);
37346
37347 if (clone_isa == CLONE_DEFAULT)
37348 {
37349 gimple_seq_add_stmt (&gseq, convert_stmt);
37350 gimple_seq_add_stmt (&gseq, return_stmt);
37351 set_bb_seq (new_bb, gseq);
37352 gimple_set_bb (convert_stmt, new_bb);
37353 gimple_set_bb (return_stmt, new_bb);
37354 pop_cfun ();
37355 return new_bb;
37356 }
37357
37358 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37359 tree cond_var = create_tmp_var (bool_int_type_node);
37360 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37361 const char *arg_str = rs6000_clone_map[clone_isa].name;
37362 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37363 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37364 gimple_call_set_lhs (call_cond_stmt, cond_var);
37365
37366 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37367 gimple_set_bb (call_cond_stmt, new_bb);
37368 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37369
37370 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37371 NULL_TREE, NULL_TREE);
37372 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37373 gimple_set_bb (if_else_stmt, new_bb);
37374 gimple_seq_add_stmt (&gseq, if_else_stmt);
37375
37376 gimple_seq_add_stmt (&gseq, convert_stmt);
37377 gimple_seq_add_stmt (&gseq, return_stmt);
37378 set_bb_seq (new_bb, gseq);
37379
37380 basic_block bb1 = new_bb;
37381 edge e12 = split_block (bb1, if_else_stmt);
37382 basic_block bb2 = e12->dest;
37383 e12->flags &= ~EDGE_FALLTHRU;
37384 e12->flags |= EDGE_TRUE_VALUE;
37385
37386 edge e23 = split_block (bb2, return_stmt);
37387 gimple_set_bb (convert_stmt, bb2);
37388 gimple_set_bb (return_stmt, bb2);
37389
37390 basic_block bb3 = e23->dest;
37391 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37392
37393 remove_edge (e23);
37394 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37395
37396 pop_cfun ();
37397 return bb3;
37398 }
37399
37400 /* This function generates the dispatch function for multi-versioned functions.
37401 DISPATCH_DECL is the function which will contain the dispatch logic.
37402 FNDECLS are the function choices for dispatch, and is a tree chain.
37403 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37404 code is generated. */
37405
37406 static int
37407 dispatch_function_versions (tree dispatch_decl,
37408 void *fndecls_p,
37409 basic_block *empty_bb)
37410 {
37411 int ix;
37412 tree ele;
37413 vec<tree> *fndecls;
37414 tree clones[CLONE_MAX];
37415
37416 if (TARGET_DEBUG_TARGET)
37417 fputs ("dispatch_function_versions, top\n", stderr);
37418
37419 gcc_assert (dispatch_decl != NULL
37420 && fndecls_p != NULL
37421 && empty_bb != NULL);
37422
37423 /* fndecls_p is actually a vector. */
37424 fndecls = static_cast<vec<tree> *> (fndecls_p);
37425
37426 /* At least one more version other than the default. */
37427 gcc_assert (fndecls->length () >= 2);
37428
37429 /* The first version in the vector is the default decl. */
37430 memset ((void *) clones, '\0', sizeof (clones));
37431 clones[CLONE_DEFAULT] = (*fndecls)[0];
37432
37433 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37434 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37435 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37436 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37437 to insert the code here to do the call. */
37438
37439 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37440 {
37441 int priority = rs6000_clone_priority (ele);
37442 if (!clones[priority])
37443 clones[priority] = ele;
37444 }
37445
37446 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37447 if (clones[ix])
37448 {
37449 if (TARGET_DEBUG_TARGET)
37450 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37451 ix, get_decl_name (clones[ix]));
37452
37453 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37454 *empty_bb);
37455 }
37456
37457 return 0;
37458 }
37459
37460 /* Generate the dispatching code body to dispatch multi-versioned function
37461 DECL. The target hook is called to process the "target" attributes and
37462 provide the code to dispatch the right function at run-time. NODE points
37463 to the dispatcher decl whose body will be created. */
37464
37465 static tree
37466 rs6000_generate_version_dispatcher_body (void *node_p)
37467 {
37468 tree resolver;
37469 basic_block empty_bb;
37470 struct cgraph_node *node = (cgraph_node *) node_p;
37471 struct cgraph_function_version_info *ninfo = node->function_version ();
37472
37473 if (ninfo->dispatcher_resolver)
37474 return ninfo->dispatcher_resolver;
37475
37476 /* node is going to be an alias, so remove the finalized bit. */
37477 node->definition = false;
37478
37479 /* The first version in the chain corresponds to the default version. */
37480 ninfo->dispatcher_resolver = resolver
37481 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37482
37483 if (TARGET_DEBUG_TARGET)
37484 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37485 get_decl_name (resolver));
37486
37487 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37488 auto_vec<tree, 2> fn_ver_vec;
37489
37490 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37491 vinfo;
37492 vinfo = vinfo->next)
37493 {
37494 struct cgraph_node *version = vinfo->this_node;
37495 /* Check for virtual functions here again, as by this time it should
37496 have been determined if this function needs a vtable index or
37497 not. This happens for methods in derived classes that override
37498 virtual methods in base classes but are not explicitly marked as
37499 virtual. */
37500 if (DECL_VINDEX (version->decl))
37501 sorry ("Virtual function multiversioning not supported");
37502
37503 fn_ver_vec.safe_push (version->decl);
37504 }
37505
37506 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37507 cgraph_edge::rebuild_edges ();
37508 pop_cfun ();
37509 return resolver;
37510 }
37511
37512 \f
37513 /* Hook to determine if one function can safely inline another. */
37514
37515 static bool
37516 rs6000_can_inline_p (tree caller, tree callee)
37517 {
37518 bool ret = false;
37519 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37520 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37521
37522 /* If callee has no option attributes, then it is ok to inline. */
37523 if (!callee_tree)
37524 ret = true;
37525
37526 /* If caller has no option attributes, but callee does then it is not ok to
37527 inline. */
37528 else if (!caller_tree)
37529 ret = false;
37530
37531 else
37532 {
37533 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37534 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37535
37536 /* Callee's options should a subset of the caller's, i.e. a vsx function
37537 can inline an altivec function but a non-vsx function can't inline a
37538 vsx function. */
37539 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37540 == callee_opts->x_rs6000_isa_flags)
37541 ret = true;
37542 }
37543
37544 if (TARGET_DEBUG_TARGET)
37545 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37546 get_decl_name (caller), get_decl_name (callee),
37547 (ret ? "can" : "cannot"));
37548
37549 return ret;
37550 }
37551 \f
37552 /* Allocate a stack temp and fixup the address so it meets the particular
37553 memory requirements (either offetable or REG+REG addressing). */
37554
37555 rtx
37556 rs6000_allocate_stack_temp (machine_mode mode,
37557 bool offsettable_p,
37558 bool reg_reg_p)
37559 {
37560 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37561 rtx addr = XEXP (stack, 0);
37562 int strict_p = reload_completed;
37563
37564 if (!legitimate_indirect_address_p (addr, strict_p))
37565 {
37566 if (offsettable_p
37567 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37568 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37569
37570 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37571 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37572 }
37573
37574 return stack;
37575 }
37576
37577 /* Given a memory reference, if it is not a reg or reg+reg addressing,
37578 convert to such a form to deal with memory reference instructions
37579 like STFIWX and LDBRX that only take reg+reg addressing. */
37580
37581 rtx
37582 rs6000_force_indexed_or_indirect_mem (rtx x)
37583 {
37584 machine_mode mode = GET_MODE (x);
37585
37586 gcc_assert (MEM_P (x));
37587 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
37588 {
37589 rtx addr = XEXP (x, 0);
37590 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37591 {
37592 rtx reg = XEXP (addr, 0);
37593 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37594 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37595 gcc_assert (REG_P (reg));
37596 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37597 addr = reg;
37598 }
37599 else if (GET_CODE (addr) == PRE_MODIFY)
37600 {
37601 rtx reg = XEXP (addr, 0);
37602 rtx expr = XEXP (addr, 1);
37603 gcc_assert (REG_P (reg));
37604 gcc_assert (GET_CODE (expr) == PLUS);
37605 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37606 addr = reg;
37607 }
37608
37609 x = replace_equiv_address (x, force_reg (Pmode, addr));
37610 }
37611
37612 return x;
37613 }
37614
37615 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37616
37617 On the RS/6000, all integer constants are acceptable, most won't be valid
37618 for particular insns, though. Only easy FP constants are acceptable. */
37619
37620 static bool
37621 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37622 {
37623 if (TARGET_ELF && tls_referenced_p (x))
37624 return false;
37625
37626 if (CONST_DOUBLE_P (x))
37627 return easy_fp_constant (x, mode);
37628
37629 if (GET_CODE (x) == CONST_VECTOR)
37630 return easy_vector_constant (x, mode);
37631
37632 return true;
37633 }
37634
37635 \f
37636 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37637
37638 static bool
37639 chain_already_loaded (rtx_insn *last)
37640 {
37641 for (; last != NULL; last = PREV_INSN (last))
37642 {
37643 if (NONJUMP_INSN_P (last))
37644 {
37645 rtx patt = PATTERN (last);
37646
37647 if (GET_CODE (patt) == SET)
37648 {
37649 rtx lhs = XEXP (patt, 0);
37650
37651 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37652 return true;
37653 }
37654 }
37655 }
37656 return false;
37657 }
37658
37659 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37660
37661 void
37662 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37663 {
37664 rtx func = func_desc;
37665 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37666 rtx toc_load = NULL_RTX;
37667 rtx toc_restore = NULL_RTX;
37668 rtx func_addr;
37669 rtx abi_reg = NULL_RTX;
37670 rtx call[4];
37671 int n_call;
37672 rtx insn;
37673 bool is_pltseq_longcall;
37674
37675 if (global_tlsarg)
37676 tlsarg = global_tlsarg;
37677
37678 /* Handle longcall attributes. */
37679 is_pltseq_longcall = false;
37680 if ((INTVAL (cookie) & CALL_LONG) != 0
37681 && GET_CODE (func_desc) == SYMBOL_REF)
37682 {
37683 func = rs6000_longcall_ref (func_desc, tlsarg);
37684 if (TARGET_PLTSEQ)
37685 is_pltseq_longcall = true;
37686 }
37687
37688 /* Handle indirect calls. */
37689 if (!SYMBOL_REF_P (func)
37690 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
37691 {
37692 /* Save the TOC into its reserved slot before the call,
37693 and prepare to restore it after the call. */
37694 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37695 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37696 gen_rtvec (1, stack_toc_offset),
37697 UNSPEC_TOCSLOT);
37698 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37699
37700 /* Can we optimize saving the TOC in the prologue or
37701 do we need to do it at every call? */
37702 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37703 cfun->machine->save_toc_in_prologue = true;
37704 else
37705 {
37706 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37707 rtx stack_toc_mem = gen_frame_mem (Pmode,
37708 gen_rtx_PLUS (Pmode, stack_ptr,
37709 stack_toc_offset));
37710 MEM_VOLATILE_P (stack_toc_mem) = 1;
37711 if (is_pltseq_longcall)
37712 {
37713 /* Use USPEC_PLTSEQ here to emit every instruction in an
37714 inline PLT call sequence with a reloc, enabling the
37715 linker to edit the sequence back to a direct call
37716 when that makes sense. */
37717 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
37718 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37719 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
37720 }
37721 else
37722 emit_move_insn (stack_toc_mem, toc_reg);
37723 }
37724
37725 if (DEFAULT_ABI == ABI_ELFv2)
37726 {
37727 /* A function pointer in the ELFv2 ABI is just a plain address, but
37728 the ABI requires it to be loaded into r12 before the call. */
37729 func_addr = gen_rtx_REG (Pmode, 12);
37730 if (!rtx_equal_p (func_addr, func))
37731 emit_move_insn (func_addr, func);
37732 abi_reg = func_addr;
37733 /* Indirect calls via CTR are strongly preferred over indirect
37734 calls via LR, so move the address there. Needed to mark
37735 this insn for linker plt sequence editing too. */
37736 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37737 if (is_pltseq_longcall)
37738 {
37739 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
37740 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37741 emit_insn (gen_rtx_SET (func_addr, mark_func));
37742 v = gen_rtvec (2, func_addr, func_desc);
37743 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37744 }
37745 else
37746 emit_move_insn (func_addr, abi_reg);
37747 }
37748 else
37749 {
37750 /* A function pointer under AIX is a pointer to a data area whose
37751 first word contains the actual address of the function, whose
37752 second word contains a pointer to its TOC, and whose third word
37753 contains a value to place in the static chain register (r11).
37754 Note that if we load the static chain, our "trampoline" need
37755 not have any executable code. */
37756
37757 /* Load up address of the actual function. */
37758 func = force_reg (Pmode, func);
37759 func_addr = gen_reg_rtx (Pmode);
37760 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
37761
37762 /* Indirect calls via CTR are strongly preferred over indirect
37763 calls via LR, so move the address there. */
37764 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
37765 emit_move_insn (ctr_reg, func_addr);
37766 func_addr = ctr_reg;
37767
37768 /* Prepare to load the TOC of the called function. Note that the
37769 TOC load must happen immediately before the actual call so
37770 that unwinding the TOC registers works correctly. See the
37771 comment in frob_update_context. */
37772 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37773 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37774 gen_rtx_PLUS (Pmode, func,
37775 func_toc_offset));
37776 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37777
37778 /* If we have a static chain, load it up. But, if the call was
37779 originally direct, the 3rd word has not been written since no
37780 trampoline has been built, so we ought not to load it, lest we
37781 override a static chain value. */
37782 if (!(GET_CODE (func_desc) == SYMBOL_REF
37783 && SYMBOL_REF_FUNCTION_P (func_desc))
37784 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37785 && !chain_already_loaded (get_current_sequence ()->next->last))
37786 {
37787 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37788 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37789 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37790 gen_rtx_PLUS (Pmode, func,
37791 func_sc_offset));
37792 emit_move_insn (sc_reg, func_sc_mem);
37793 abi_reg = sc_reg;
37794 }
37795 }
37796 }
37797 else
37798 {
37799 /* Direct calls use the TOC: for local calls, the callee will
37800 assume the TOC register is set; for non-local calls, the
37801 PLT stub needs the TOC register. */
37802 abi_reg = toc_reg;
37803 func_addr = func;
37804 }
37805
37806 /* Create the call. */
37807 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37808 if (value != NULL_RTX)
37809 call[0] = gen_rtx_SET (value, call[0]);
37810 n_call = 1;
37811
37812 if (toc_load)
37813 call[n_call++] = toc_load;
37814 if (toc_restore)
37815 call[n_call++] = toc_restore;
37816
37817 call[n_call++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37818
37819 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37820 insn = emit_call_insn (insn);
37821
37822 /* Mention all registers defined by the ABI to hold information
37823 as uses in CALL_INSN_FUNCTION_USAGE. */
37824 if (abi_reg)
37825 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37826 }
37827
37828 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37829
37830 void
37831 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37832 {
37833 rtx call[2];
37834 rtx insn;
37835
37836 gcc_assert (INTVAL (cookie) == 0);
37837
37838 if (global_tlsarg)
37839 tlsarg = global_tlsarg;
37840
37841 /* Create the call. */
37842 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
37843 if (value != NULL_RTX)
37844 call[0] = gen_rtx_SET (value, call[0]);
37845
37846 call[1] = simple_return_rtx;
37847
37848 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37849 insn = emit_call_insn (insn);
37850
37851 /* Note use of the TOC register. */
37852 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37853 }
37854
37855 /* Expand code to perform a call under the SYSV4 ABI. */
37856
37857 void
37858 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37859 {
37860 rtx func = func_desc;
37861 rtx func_addr;
37862 rtx call[4];
37863 rtx insn;
37864 rtx abi_reg = NULL_RTX;
37865 int n;
37866
37867 if (global_tlsarg)
37868 tlsarg = global_tlsarg;
37869
37870 /* Handle longcall attributes. */
37871 if ((INTVAL (cookie) & CALL_LONG) != 0
37872 && GET_CODE (func_desc) == SYMBOL_REF)
37873 {
37874 func = rs6000_longcall_ref (func_desc, tlsarg);
37875 /* If the longcall was implemented as an inline PLT call using
37876 PLT unspecs then func will be REG:r11. If not, func will be
37877 a pseudo reg. The inline PLT call sequence supports lazy
37878 linking (and longcalls to functions in dlopen'd libraries).
37879 The other style of longcalls don't. The lazy linking entry
37880 to the dynamic symbol resolver requires r11 be the function
37881 address (as it is for linker generated PLT stubs). Ensure
37882 r11 stays valid to the bctrl by marking r11 used by the call. */
37883 if (TARGET_PLTSEQ)
37884 abi_reg = func;
37885 }
37886
37887 /* Handle indirect calls. */
37888 if (GET_CODE (func) != SYMBOL_REF)
37889 {
37890 func = force_reg (Pmode, func);
37891
37892 /* Indirect calls via CTR are strongly preferred over indirect
37893 calls via LR, so move the address there. That can't be left
37894 to reload because we want to mark every instruction in an
37895 inline PLT call sequence with a reloc, enabling the linker to
37896 edit the sequence back to a direct call when that makes sense. */
37897 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37898 if (abi_reg)
37899 {
37900 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
37901 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37902 emit_insn (gen_rtx_SET (func_addr, mark_func));
37903 v = gen_rtvec (2, func_addr, func_desc);
37904 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37905 }
37906 else
37907 emit_move_insn (func_addr, func);
37908 }
37909 else
37910 func_addr = func;
37911
37912 /* Create the call. */
37913 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37914 if (value != NULL_RTX)
37915 call[0] = gen_rtx_SET (value, call[0]);
37916
37917 call[1] = gen_rtx_USE (VOIDmode, cookie);
37918 n = 2;
37919 if (TARGET_SECURE_PLT
37920 && flag_pic
37921 && GET_CODE (func_addr) == SYMBOL_REF
37922 && !SYMBOL_REF_LOCAL_P (func_addr))
37923 call[n++] = gen_rtx_USE (VOIDmode, pic_offset_table_rtx);
37924
37925 call[n++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37926
37927 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n, call));
37928 insn = emit_call_insn (insn);
37929 if (abi_reg)
37930 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37931 }
37932
37933 /* Expand code to perform a sibling call under the SysV4 ABI. */
37934
37935 void
37936 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37937 {
37938 rtx func = func_desc;
37939 rtx func_addr;
37940 rtx call[3];
37941 rtx insn;
37942 rtx abi_reg = NULL_RTX;
37943
37944 if (global_tlsarg)
37945 tlsarg = global_tlsarg;
37946
37947 /* Handle longcall attributes. */
37948 if ((INTVAL (cookie) & CALL_LONG) != 0
37949 && GET_CODE (func_desc) == SYMBOL_REF)
37950 {
37951 func = rs6000_longcall_ref (func_desc, tlsarg);
37952 /* If the longcall was implemented as an inline PLT call using
37953 PLT unspecs then func will be REG:r11. If not, func will be
37954 a pseudo reg. The inline PLT call sequence supports lazy
37955 linking (and longcalls to functions in dlopen'd libraries).
37956 The other style of longcalls don't. The lazy linking entry
37957 to the dynamic symbol resolver requires r11 be the function
37958 address (as it is for linker generated PLT stubs). Ensure
37959 r11 stays valid to the bctr by marking r11 used by the call. */
37960 if (TARGET_PLTSEQ)
37961 abi_reg = func;
37962 }
37963
37964 /* Handle indirect calls. */
37965 if (GET_CODE (func) != SYMBOL_REF)
37966 {
37967 func = force_reg (Pmode, func);
37968
37969 /* Indirect sibcalls must go via CTR. That can't be left to
37970 reload because we want to mark every instruction in an inline
37971 PLT call sequence with a reloc, enabling the linker to edit
37972 the sequence back to a direct call when that makes sense. */
37973 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37974 if (abi_reg)
37975 {
37976 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
37977 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37978 emit_insn (gen_rtx_SET (func_addr, mark_func));
37979 v = gen_rtvec (2, func_addr, func_desc);
37980 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37981 }
37982 else
37983 emit_move_insn (func_addr, func);
37984 }
37985 else
37986 func_addr = func;
37987
37988 /* Create the call. */
37989 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37990 if (value != NULL_RTX)
37991 call[0] = gen_rtx_SET (value, call[0]);
37992
37993 call[1] = gen_rtx_USE (VOIDmode, cookie);
37994 call[2] = simple_return_rtx;
37995
37996 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
37997 insn = emit_call_insn (insn);
37998 if (abi_reg)
37999 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38000 }
38001
38002 #if TARGET_MACHO
38003
38004 /* Expand code to perform a call under the Darwin ABI.
38005 Modulo handling of mlongcall, this is much the same as sysv.
38006 if/when the longcall optimisation is removed, we could drop this
38007 code and use the sysv case (taking care to avoid the tls stuff).
38008
38009 We can use this for sibcalls too, if needed. */
38010
38011 void
38012 rs6000_call_darwin_1 (rtx value, rtx func_desc, rtx tlsarg,
38013 rtx cookie, bool sibcall)
38014 {
38015 rtx func = func_desc;
38016 rtx func_addr;
38017 rtx call[3];
38018 rtx insn;
38019 int cookie_val = INTVAL (cookie);
38020 bool make_island = false;
38021
38022 /* Handle longcall attributes, there are two cases for Darwin:
38023 1) Newer linkers are capable of synthesising any branch islands needed.
38024 2) We need a helper branch island synthesised by the compiler.
38025 The second case has mostly been retired and we don't use it for m64.
38026 In fact, it's is an optimisation, we could just indirect as sysv does..
38027 ... however, backwards compatibility for now.
38028 If we're going to use this, then we need to keep the CALL_LONG bit set,
38029 so that we can pick up the special insn form later. */
38030 if ((cookie_val & CALL_LONG) != 0
38031 && GET_CODE (func_desc) == SYMBOL_REF)
38032 {
38033 if (darwin_emit_branch_islands && TARGET_32BIT)
38034 make_island = true; /* Do nothing yet, retain the CALL_LONG flag. */
38035 else
38036 {
38037 /* The linker is capable of doing this, but the user explicitly
38038 asked for -mlongcall, so we'll do the 'normal' version. */
38039 func = rs6000_longcall_ref (func_desc, NULL_RTX);
38040 cookie_val &= ~CALL_LONG; /* Handled, zap it. */
38041 }
38042 }
38043
38044 /* Handle indirect calls. */
38045 if (GET_CODE (func) != SYMBOL_REF)
38046 {
38047 func = force_reg (Pmode, func);
38048
38049 /* Indirect calls via CTR are strongly preferred over indirect
38050 calls via LR, and are required for indirect sibcalls, so move
38051 the address there. */
38052 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38053 emit_move_insn (func_addr, func);
38054 }
38055 else
38056 func_addr = func;
38057
38058 /* Create the call. */
38059 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38060 if (value != NULL_RTX)
38061 call[0] = gen_rtx_SET (value, call[0]);
38062
38063 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (cookie_val));
38064
38065 if (sibcall)
38066 call[2] = simple_return_rtx;
38067 else
38068 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38069
38070 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38071 insn = emit_call_insn (insn);
38072 /* Now we have the debug info in the insn, we can set up the branch island
38073 if we're using one. */
38074 if (make_island)
38075 {
38076 tree funname = get_identifier (XSTR (func_desc, 0));
38077
38078 if (no_previous_def (funname))
38079 {
38080 rtx label_rtx = gen_label_rtx ();
38081 char *label_buf, temp_buf[256];
38082 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
38083 CODE_LABEL_NUMBER (label_rtx));
38084 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
38085 tree labelname = get_identifier (label_buf);
38086 add_compiler_branch_island (labelname, funname,
38087 insn_line ((const rtx_insn*)insn));
38088 }
38089 }
38090 }
38091 #endif
38092
38093 void
38094 rs6000_call_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38095 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38096 {
38097 #if TARGET_MACHO
38098 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, false);
38099 #else
38100 gcc_unreachable();
38101 #endif
38102 }
38103
38104
38105 void
38106 rs6000_sibcall_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38107 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38108 {
38109 #if TARGET_MACHO
38110 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, true);
38111 #else
38112 gcc_unreachable();
38113 #endif
38114 }
38115
38116
38117 /* Return whether we need to always update the saved TOC pointer when we update
38118 the stack pointer. */
38119
38120 static bool
38121 rs6000_save_toc_in_prologue_p (void)
38122 {
38123 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38124 }
38125
38126 /* Return whether we should generate PC-relative code for FNDECL. */
38127 bool
38128 rs6000_fndecl_pcrel_p (const_tree fndecl)
38129 {
38130 if (DEFAULT_ABI != ABI_ELFv2)
38131 return false;
38132
38133 struct cl_target_option *opts = target_opts_for_fn (fndecl);
38134
38135 return ((opts->x_rs6000_isa_flags & OPTION_MASK_PCREL) != 0
38136 && TARGET_CMODEL == CMODEL_MEDIUM);
38137 }
38138
38139 /* Return whether we should generate PC-relative code for *FN. */
38140 bool
38141 rs6000_pcrel_p (struct function *fn)
38142 {
38143 if (DEFAULT_ABI != ABI_ELFv2)
38144 return false;
38145
38146 /* Optimize usual case. */
38147 if (fn == cfun)
38148 return ((rs6000_isa_flags & OPTION_MASK_PCREL) != 0
38149 && TARGET_CMODEL == CMODEL_MEDIUM);
38150
38151 return rs6000_fndecl_pcrel_p (fn->decl);
38152 }
38153
38154 #ifdef HAVE_GAS_HIDDEN
38155 # define USE_HIDDEN_LINKONCE 1
38156 #else
38157 # define USE_HIDDEN_LINKONCE 0
38158 #endif
38159
38160 /* Fills in the label name that should be used for a 476 link stack thunk. */
38161
38162 void
38163 get_ppc476_thunk_name (char name[32])
38164 {
38165 gcc_assert (TARGET_LINK_STACK);
38166
38167 if (USE_HIDDEN_LINKONCE)
38168 sprintf (name, "__ppc476.get_thunk");
38169 else
38170 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38171 }
38172
38173 /* This function emits the simple thunk routine that is used to preserve
38174 the link stack on the 476 cpu. */
38175
38176 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38177 static void
38178 rs6000_code_end (void)
38179 {
38180 char name[32];
38181 tree decl;
38182
38183 if (!TARGET_LINK_STACK)
38184 return;
38185
38186 get_ppc476_thunk_name (name);
38187
38188 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38189 build_function_type_list (void_type_node, NULL_TREE));
38190 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38191 NULL_TREE, void_type_node);
38192 TREE_PUBLIC (decl) = 1;
38193 TREE_STATIC (decl) = 1;
38194
38195 #if RS6000_WEAK
38196 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38197 {
38198 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38199 targetm.asm_out.unique_section (decl, 0);
38200 switch_to_section (get_named_section (decl, NULL, 0));
38201 DECL_WEAK (decl) = 1;
38202 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38203 targetm.asm_out.globalize_label (asm_out_file, name);
38204 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38205 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38206 }
38207 else
38208 #endif
38209 {
38210 switch_to_section (text_section);
38211 ASM_OUTPUT_LABEL (asm_out_file, name);
38212 }
38213
38214 DECL_INITIAL (decl) = make_node (BLOCK);
38215 current_function_decl = decl;
38216 allocate_struct_function (decl, false);
38217 init_function_start (decl);
38218 first_function_block_is_cold = false;
38219 /* Make sure unwind info is emitted for the thunk if needed. */
38220 final_start_function (emit_barrier (), asm_out_file, 1);
38221
38222 fputs ("\tblr\n", asm_out_file);
38223
38224 final_end_function ();
38225 init_insn_lengths ();
38226 free_after_compilation (cfun);
38227 set_cfun (NULL);
38228 current_function_decl = NULL;
38229 }
38230
38231 /* Add r30 to hard reg set if the prologue sets it up and it is not
38232 pic_offset_table_rtx. */
38233
38234 static void
38235 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38236 {
38237 if (!TARGET_SINGLE_PIC_BASE
38238 && TARGET_TOC
38239 && TARGET_MINIMAL_TOC
38240 && !constant_pool_empty_p ())
38241 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38242 if (cfun->machine->split_stack_argp_used)
38243 add_to_hard_reg_set (&set->set, Pmode, 12);
38244
38245 /* Make sure the hard reg set doesn't include r2, which was possibly added
38246 via PIC_OFFSET_TABLE_REGNUM. */
38247 if (TARGET_TOC)
38248 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38249 }
38250
38251 \f
38252 /* Helper function for rs6000_split_logical to emit a logical instruction after
38253 spliting the operation to single GPR registers.
38254
38255 DEST is the destination register.
38256 OP1 and OP2 are the input source registers.
38257 CODE is the base operation (AND, IOR, XOR, NOT).
38258 MODE is the machine mode.
38259 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38260 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38261 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38262
38263 static void
38264 rs6000_split_logical_inner (rtx dest,
38265 rtx op1,
38266 rtx op2,
38267 enum rtx_code code,
38268 machine_mode mode,
38269 bool complement_final_p,
38270 bool complement_op1_p,
38271 bool complement_op2_p)
38272 {
38273 rtx bool_rtx;
38274
38275 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38276 if (op2 && CONST_INT_P (op2)
38277 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38278 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38279 {
38280 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38281 HOST_WIDE_INT value = INTVAL (op2) & mask;
38282
38283 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38284 if (code == AND)
38285 {
38286 if (value == 0)
38287 {
38288 emit_insn (gen_rtx_SET (dest, const0_rtx));
38289 return;
38290 }
38291
38292 else if (value == mask)
38293 {
38294 if (!rtx_equal_p (dest, op1))
38295 emit_insn (gen_rtx_SET (dest, op1));
38296 return;
38297 }
38298 }
38299
38300 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38301 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38302 else if (code == IOR || code == XOR)
38303 {
38304 if (value == 0)
38305 {
38306 if (!rtx_equal_p (dest, op1))
38307 emit_insn (gen_rtx_SET (dest, op1));
38308 return;
38309 }
38310 }
38311 }
38312
38313 if (code == AND && mode == SImode
38314 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38315 {
38316 emit_insn (gen_andsi3 (dest, op1, op2));
38317 return;
38318 }
38319
38320 if (complement_op1_p)
38321 op1 = gen_rtx_NOT (mode, op1);
38322
38323 if (complement_op2_p)
38324 op2 = gen_rtx_NOT (mode, op2);
38325
38326 /* For canonical RTL, if only one arm is inverted it is the first. */
38327 if (!complement_op1_p && complement_op2_p)
38328 std::swap (op1, op2);
38329
38330 bool_rtx = ((code == NOT)
38331 ? gen_rtx_NOT (mode, op1)
38332 : gen_rtx_fmt_ee (code, mode, op1, op2));
38333
38334 if (complement_final_p)
38335 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38336
38337 emit_insn (gen_rtx_SET (dest, bool_rtx));
38338 }
38339
38340 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38341 operations are split immediately during RTL generation to allow for more
38342 optimizations of the AND/IOR/XOR.
38343
38344 OPERANDS is an array containing the destination and two input operands.
38345 CODE is the base operation (AND, IOR, XOR, NOT).
38346 MODE is the machine mode.
38347 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38348 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38349 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38350 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38351 formation of the AND instructions. */
38352
38353 static void
38354 rs6000_split_logical_di (rtx operands[3],
38355 enum rtx_code code,
38356 bool complement_final_p,
38357 bool complement_op1_p,
38358 bool complement_op2_p)
38359 {
38360 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38361 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38362 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38363 enum hi_lo { hi = 0, lo = 1 };
38364 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38365 size_t i;
38366
38367 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38368 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38369 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38370 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38371
38372 if (code == NOT)
38373 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38374 else
38375 {
38376 if (!CONST_INT_P (operands[2]))
38377 {
38378 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38379 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38380 }
38381 else
38382 {
38383 HOST_WIDE_INT value = INTVAL (operands[2]);
38384 HOST_WIDE_INT value_hi_lo[2];
38385
38386 gcc_assert (!complement_final_p);
38387 gcc_assert (!complement_op1_p);
38388 gcc_assert (!complement_op2_p);
38389
38390 value_hi_lo[hi] = value >> 32;
38391 value_hi_lo[lo] = value & lower_32bits;
38392
38393 for (i = 0; i < 2; i++)
38394 {
38395 HOST_WIDE_INT sub_value = value_hi_lo[i];
38396
38397 if (sub_value & sign_bit)
38398 sub_value |= upper_32bits;
38399
38400 op2_hi_lo[i] = GEN_INT (sub_value);
38401
38402 /* If this is an AND instruction, check to see if we need to load
38403 the value in a register. */
38404 if (code == AND && sub_value != -1 && sub_value != 0
38405 && !and_operand (op2_hi_lo[i], SImode))
38406 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38407 }
38408 }
38409 }
38410
38411 for (i = 0; i < 2; i++)
38412 {
38413 /* Split large IOR/XOR operations. */
38414 if ((code == IOR || code == XOR)
38415 && CONST_INT_P (op2_hi_lo[i])
38416 && !complement_final_p
38417 && !complement_op1_p
38418 && !complement_op2_p
38419 && !logical_const_operand (op2_hi_lo[i], SImode))
38420 {
38421 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38422 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38423 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38424 rtx tmp = gen_reg_rtx (SImode);
38425
38426 /* Make sure the constant is sign extended. */
38427 if ((hi_16bits & sign_bit) != 0)
38428 hi_16bits |= upper_32bits;
38429
38430 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38431 code, SImode, false, false, false);
38432
38433 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38434 code, SImode, false, false, false);
38435 }
38436 else
38437 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38438 code, SImode, complement_final_p,
38439 complement_op1_p, complement_op2_p);
38440 }
38441
38442 return;
38443 }
38444
38445 /* Split the insns that make up boolean operations operating on multiple GPR
38446 registers. The boolean MD patterns ensure that the inputs either are
38447 exactly the same as the output registers, or there is no overlap.
38448
38449 OPERANDS is an array containing the destination and two input operands.
38450 CODE is the base operation (AND, IOR, XOR, NOT).
38451 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38452 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38453 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38454
38455 void
38456 rs6000_split_logical (rtx operands[3],
38457 enum rtx_code code,
38458 bool complement_final_p,
38459 bool complement_op1_p,
38460 bool complement_op2_p)
38461 {
38462 machine_mode mode = GET_MODE (operands[0]);
38463 machine_mode sub_mode;
38464 rtx op0, op1, op2;
38465 int sub_size, regno0, regno1, nregs, i;
38466
38467 /* If this is DImode, use the specialized version that can run before
38468 register allocation. */
38469 if (mode == DImode && !TARGET_POWERPC64)
38470 {
38471 rs6000_split_logical_di (operands, code, complement_final_p,
38472 complement_op1_p, complement_op2_p);
38473 return;
38474 }
38475
38476 op0 = operands[0];
38477 op1 = operands[1];
38478 op2 = (code == NOT) ? NULL_RTX : operands[2];
38479 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38480 sub_size = GET_MODE_SIZE (sub_mode);
38481 regno0 = REGNO (op0);
38482 regno1 = REGNO (op1);
38483
38484 gcc_assert (reload_completed);
38485 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38486 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38487
38488 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38489 gcc_assert (nregs > 1);
38490
38491 if (op2 && REG_P (op2))
38492 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38493
38494 for (i = 0; i < nregs; i++)
38495 {
38496 int offset = i * sub_size;
38497 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38498 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38499 rtx sub_op2 = ((code == NOT)
38500 ? NULL_RTX
38501 : simplify_subreg (sub_mode, op2, mode, offset));
38502
38503 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38504 complement_final_p, complement_op1_p,
38505 complement_op2_p);
38506 }
38507
38508 return;
38509 }
38510
38511 \f
38512 /* Return true if the peephole2 can combine a load involving a combination of
38513 an addis instruction and a load with an offset that can be fused together on
38514 a power8. */
38515
38516 bool
38517 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38518 rtx addis_value, /* addis value. */
38519 rtx target, /* target register that is loaded. */
38520 rtx mem) /* bottom part of the memory addr. */
38521 {
38522 rtx addr;
38523 rtx base_reg;
38524
38525 /* Validate arguments. */
38526 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38527 return false;
38528
38529 if (!base_reg_operand (target, GET_MODE (target)))
38530 return false;
38531
38532 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38533 return false;
38534
38535 /* Allow sign/zero extension. */
38536 if (GET_CODE (mem) == ZERO_EXTEND
38537 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38538 mem = XEXP (mem, 0);
38539
38540 if (!MEM_P (mem))
38541 return false;
38542
38543 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38544 return false;
38545
38546 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38547 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38548 return false;
38549
38550 /* Validate that the register used to load the high value is either the
38551 register being loaded, or we can safely replace its use.
38552
38553 This function is only called from the peephole2 pass and we assume that
38554 there are 2 instructions in the peephole (addis and load), so we want to
38555 check if the target register was not used in the memory address and the
38556 register to hold the addis result is dead after the peephole. */
38557 if (REGNO (addis_reg) != REGNO (target))
38558 {
38559 if (reg_mentioned_p (target, mem))
38560 return false;
38561
38562 if (!peep2_reg_dead_p (2, addis_reg))
38563 return false;
38564
38565 /* If the target register being loaded is the stack pointer, we must
38566 avoid loading any other value into it, even temporarily. */
38567 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38568 return false;
38569 }
38570
38571 base_reg = XEXP (addr, 0);
38572 return REGNO (addis_reg) == REGNO (base_reg);
38573 }
38574
38575 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38576 sequence. We adjust the addis register to use the target register. If the
38577 load sign extends, we adjust the code to do the zero extending load, and an
38578 explicit sign extension later since the fusion only covers zero extending
38579 loads.
38580
38581 The operands are:
38582 operands[0] register set with addis (to be replaced with target)
38583 operands[1] value set via addis
38584 operands[2] target register being loaded
38585 operands[3] D-form memory reference using operands[0]. */
38586
38587 void
38588 expand_fusion_gpr_load (rtx *operands)
38589 {
38590 rtx addis_value = operands[1];
38591 rtx target = operands[2];
38592 rtx orig_mem = operands[3];
38593 rtx new_addr, new_mem, orig_addr, offset;
38594 enum rtx_code plus_or_lo_sum;
38595 machine_mode target_mode = GET_MODE (target);
38596 machine_mode extend_mode = target_mode;
38597 machine_mode ptr_mode = Pmode;
38598 enum rtx_code extend = UNKNOWN;
38599
38600 if (GET_CODE (orig_mem) == ZERO_EXTEND
38601 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38602 {
38603 extend = GET_CODE (orig_mem);
38604 orig_mem = XEXP (orig_mem, 0);
38605 target_mode = GET_MODE (orig_mem);
38606 }
38607
38608 gcc_assert (MEM_P (orig_mem));
38609
38610 orig_addr = XEXP (orig_mem, 0);
38611 plus_or_lo_sum = GET_CODE (orig_addr);
38612 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38613
38614 offset = XEXP (orig_addr, 1);
38615 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38616 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38617
38618 if (extend != UNKNOWN)
38619 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38620
38621 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38622 UNSPEC_FUSION_GPR);
38623 emit_insn (gen_rtx_SET (target, new_mem));
38624
38625 if (extend == SIGN_EXTEND)
38626 {
38627 int sub_off = ((BYTES_BIG_ENDIAN)
38628 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38629 : 0);
38630 rtx sign_reg
38631 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38632
38633 emit_insn (gen_rtx_SET (target,
38634 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38635 }
38636
38637 return;
38638 }
38639
38640 /* Emit the addis instruction that will be part of a fused instruction
38641 sequence. */
38642
38643 void
38644 emit_fusion_addis (rtx target, rtx addis_value)
38645 {
38646 rtx fuse_ops[10];
38647 const char *addis_str = NULL;
38648
38649 /* Emit the addis instruction. */
38650 fuse_ops[0] = target;
38651 if (satisfies_constraint_L (addis_value))
38652 {
38653 fuse_ops[1] = addis_value;
38654 addis_str = "lis %0,%v1";
38655 }
38656
38657 else if (GET_CODE (addis_value) == PLUS)
38658 {
38659 rtx op0 = XEXP (addis_value, 0);
38660 rtx op1 = XEXP (addis_value, 1);
38661
38662 if (REG_P (op0) && CONST_INT_P (op1)
38663 && satisfies_constraint_L (op1))
38664 {
38665 fuse_ops[1] = op0;
38666 fuse_ops[2] = op1;
38667 addis_str = "addis %0,%1,%v2";
38668 }
38669 }
38670
38671 else if (GET_CODE (addis_value) == HIGH)
38672 {
38673 rtx value = XEXP (addis_value, 0);
38674 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38675 {
38676 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38677 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38678 if (TARGET_ELF)
38679 addis_str = "addis %0,%2,%1@toc@ha";
38680
38681 else if (TARGET_XCOFF)
38682 addis_str = "addis %0,%1@u(%2)";
38683
38684 else
38685 gcc_unreachable ();
38686 }
38687
38688 else if (GET_CODE (value) == PLUS)
38689 {
38690 rtx op0 = XEXP (value, 0);
38691 rtx op1 = XEXP (value, 1);
38692
38693 if (GET_CODE (op0) == UNSPEC
38694 && XINT (op0, 1) == UNSPEC_TOCREL
38695 && CONST_INT_P (op1))
38696 {
38697 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38698 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38699 fuse_ops[3] = op1;
38700 if (TARGET_ELF)
38701 addis_str = "addis %0,%2,%1+%3@toc@ha";
38702
38703 else if (TARGET_XCOFF)
38704 addis_str = "addis %0,%1+%3@u(%2)";
38705
38706 else
38707 gcc_unreachable ();
38708 }
38709 }
38710
38711 else if (satisfies_constraint_L (value))
38712 {
38713 fuse_ops[1] = value;
38714 addis_str = "lis %0,%v1";
38715 }
38716
38717 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38718 {
38719 fuse_ops[1] = value;
38720 addis_str = "lis %0,%1@ha";
38721 }
38722 }
38723
38724 if (!addis_str)
38725 fatal_insn ("Could not generate addis value for fusion", addis_value);
38726
38727 output_asm_insn (addis_str, fuse_ops);
38728 }
38729
38730 /* Emit a D-form load or store instruction that is the second instruction
38731 of a fusion sequence. */
38732
38733 static void
38734 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
38735 {
38736 rtx fuse_ops[10];
38737 char insn_template[80];
38738
38739 fuse_ops[0] = load_reg;
38740 fuse_ops[1] = addis_reg;
38741
38742 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38743 {
38744 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38745 fuse_ops[2] = offset;
38746 output_asm_insn (insn_template, fuse_ops);
38747 }
38748
38749 else if (GET_CODE (offset) == UNSPEC
38750 && XINT (offset, 1) == UNSPEC_TOCREL)
38751 {
38752 if (TARGET_ELF)
38753 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38754
38755 else if (TARGET_XCOFF)
38756 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38757
38758 else
38759 gcc_unreachable ();
38760
38761 fuse_ops[2] = XVECEXP (offset, 0, 0);
38762 output_asm_insn (insn_template, fuse_ops);
38763 }
38764
38765 else if (GET_CODE (offset) == PLUS
38766 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38767 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38768 && CONST_INT_P (XEXP (offset, 1)))
38769 {
38770 rtx tocrel_unspec = XEXP (offset, 0);
38771 if (TARGET_ELF)
38772 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38773
38774 else if (TARGET_XCOFF)
38775 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38776
38777 else
38778 gcc_unreachable ();
38779
38780 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38781 fuse_ops[3] = XEXP (offset, 1);
38782 output_asm_insn (insn_template, fuse_ops);
38783 }
38784
38785 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38786 {
38787 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38788
38789 fuse_ops[2] = offset;
38790 output_asm_insn (insn_template, fuse_ops);
38791 }
38792
38793 else
38794 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38795
38796 return;
38797 }
38798
38799 /* Given an address, convert it into the addis and load offset parts. Addresses
38800 created during the peephole2 process look like:
38801 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38802 (unspec [(...)] UNSPEC_TOCREL)) */
38803
38804 static void
38805 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38806 {
38807 rtx hi, lo;
38808
38809 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38810 {
38811 hi = XEXP (addr, 0);
38812 lo = XEXP (addr, 1);
38813 }
38814 else
38815 gcc_unreachable ();
38816
38817 *p_hi = hi;
38818 *p_lo = lo;
38819 }
38820
38821 /* Return a string to fuse an addis instruction with a gpr load to the same
38822 register that we loaded up the addis instruction. The address that is used
38823 is the logical address that was formed during peephole2:
38824 (lo_sum (high) (low-part))
38825
38826 The code is complicated, so we call output_asm_insn directly, and just
38827 return "". */
38828
38829 const char *
38830 emit_fusion_gpr_load (rtx target, rtx mem)
38831 {
38832 rtx addis_value;
38833 rtx addr;
38834 rtx load_offset;
38835 const char *load_str = NULL;
38836 machine_mode mode;
38837
38838 if (GET_CODE (mem) == ZERO_EXTEND)
38839 mem = XEXP (mem, 0);
38840
38841 gcc_assert (REG_P (target) && MEM_P (mem));
38842
38843 addr = XEXP (mem, 0);
38844 fusion_split_address (addr, &addis_value, &load_offset);
38845
38846 /* Now emit the load instruction to the same register. */
38847 mode = GET_MODE (mem);
38848 switch (mode)
38849 {
38850 case E_QImode:
38851 load_str = "lbz";
38852 break;
38853
38854 case E_HImode:
38855 load_str = "lhz";
38856 break;
38857
38858 case E_SImode:
38859 case E_SFmode:
38860 load_str = "lwz";
38861 break;
38862
38863 case E_DImode:
38864 case E_DFmode:
38865 gcc_assert (TARGET_POWERPC64);
38866 load_str = "ld";
38867 break;
38868
38869 default:
38870 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38871 }
38872
38873 /* Emit the addis instruction. */
38874 emit_fusion_addis (target, addis_value);
38875
38876 /* Emit the D-form load instruction. */
38877 emit_fusion_load (target, target, load_offset, load_str);
38878
38879 return "";
38880 }
38881 \f
38882
38883 #ifdef RS6000_GLIBC_ATOMIC_FENV
38884 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38885 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38886 #endif
38887
38888 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38889
38890 static void
38891 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38892 {
38893 if (!TARGET_HARD_FLOAT)
38894 {
38895 #ifdef RS6000_GLIBC_ATOMIC_FENV
38896 if (atomic_hold_decl == NULL_TREE)
38897 {
38898 atomic_hold_decl
38899 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38900 get_identifier ("__atomic_feholdexcept"),
38901 build_function_type_list (void_type_node,
38902 double_ptr_type_node,
38903 NULL_TREE));
38904 TREE_PUBLIC (atomic_hold_decl) = 1;
38905 DECL_EXTERNAL (atomic_hold_decl) = 1;
38906 }
38907
38908 if (atomic_clear_decl == NULL_TREE)
38909 {
38910 atomic_clear_decl
38911 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38912 get_identifier ("__atomic_feclearexcept"),
38913 build_function_type_list (void_type_node,
38914 NULL_TREE));
38915 TREE_PUBLIC (atomic_clear_decl) = 1;
38916 DECL_EXTERNAL (atomic_clear_decl) = 1;
38917 }
38918
38919 tree const_double = build_qualified_type (double_type_node,
38920 TYPE_QUAL_CONST);
38921 tree const_double_ptr = build_pointer_type (const_double);
38922 if (atomic_update_decl == NULL_TREE)
38923 {
38924 atomic_update_decl
38925 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38926 get_identifier ("__atomic_feupdateenv"),
38927 build_function_type_list (void_type_node,
38928 const_double_ptr,
38929 NULL_TREE));
38930 TREE_PUBLIC (atomic_update_decl) = 1;
38931 DECL_EXTERNAL (atomic_update_decl) = 1;
38932 }
38933
38934 tree fenv_var = create_tmp_var_raw (double_type_node);
38935 TREE_ADDRESSABLE (fenv_var) = 1;
38936 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38937
38938 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38939 *clear = build_call_expr (atomic_clear_decl, 0);
38940 *update = build_call_expr (atomic_update_decl, 1,
38941 fold_convert (const_double_ptr, fenv_addr));
38942 #endif
38943 return;
38944 }
38945
38946 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38947 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38948 tree call_mffs = build_call_expr (mffs, 0);
38949
38950 /* Generates the equivalent of feholdexcept (&fenv_var)
38951
38952 *fenv_var = __builtin_mffs ();
38953 double fenv_hold;
38954 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38955 __builtin_mtfsf (0xff, fenv_hold); */
38956
38957 /* Mask to clear everything except for the rounding modes and non-IEEE
38958 arithmetic flag. */
38959 const unsigned HOST_WIDE_INT hold_exception_mask =
38960 HOST_WIDE_INT_C (0xffffffff00000007);
38961
38962 tree fenv_var = create_tmp_var_raw (double_type_node);
38963
38964 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38965
38966 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38967 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38968 build_int_cst (uint64_type_node,
38969 hold_exception_mask));
38970
38971 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38972 fenv_llu_and);
38973
38974 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38975 build_int_cst (unsigned_type_node, 0xff),
38976 fenv_hold_mtfsf);
38977
38978 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38979
38980 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38981
38982 double fenv_clear = __builtin_mffs ();
38983 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38984 __builtin_mtfsf (0xff, fenv_clear); */
38985
38986 /* Mask to clear everything except for the rounding modes and non-IEEE
38987 arithmetic flag. */
38988 const unsigned HOST_WIDE_INT clear_exception_mask =
38989 HOST_WIDE_INT_C (0xffffffff00000000);
38990
38991 tree fenv_clear = create_tmp_var_raw (double_type_node);
38992
38993 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38994
38995 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38996 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38997 fenv_clean_llu,
38998 build_int_cst (uint64_type_node,
38999 clear_exception_mask));
39000
39001 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39002 fenv_clear_llu_and);
39003
39004 tree clear_mtfsf = build_call_expr (mtfsf, 2,
39005 build_int_cst (unsigned_type_node, 0xff),
39006 fenv_clear_mtfsf);
39007
39008 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
39009
39010 /* Generates the equivalent of feupdateenv (&fenv_var)
39011
39012 double old_fenv = __builtin_mffs ();
39013 double fenv_update;
39014 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39015 (*(uint64_t*)fenv_var 0x1ff80fff);
39016 __builtin_mtfsf (0xff, fenv_update); */
39017
39018 const unsigned HOST_WIDE_INT update_exception_mask =
39019 HOST_WIDE_INT_C (0xffffffff1fffff00);
39020 const unsigned HOST_WIDE_INT new_exception_mask =
39021 HOST_WIDE_INT_C (0x1ff80fff);
39022
39023 tree old_fenv = create_tmp_var_raw (double_type_node);
39024 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39025
39026 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39027 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39028 build_int_cst (uint64_type_node,
39029 update_exception_mask));
39030
39031 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39032 build_int_cst (uint64_type_node,
39033 new_exception_mask));
39034
39035 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39036 old_llu_and, new_llu_and);
39037
39038 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39039 new_llu_mask);
39040
39041 tree update_mtfsf = build_call_expr (mtfsf, 2,
39042 build_int_cst (unsigned_type_node, 0xff),
39043 fenv_update_mtfsf);
39044
39045 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39046 }
39047
39048 void
39049 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
39050 {
39051 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39052
39053 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39054 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39055
39056 /* The destination of the vmrgew instruction layout is:
39057 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39058 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39059 vmrgew instruction will be correct. */
39060 if (BYTES_BIG_ENDIAN)
39061 {
39062 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
39063 GEN_INT (0)));
39064 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
39065 GEN_INT (3)));
39066 }
39067 else
39068 {
39069 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
39070 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
39071 }
39072
39073 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39074 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39075
39076 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
39077 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
39078
39079 if (BYTES_BIG_ENDIAN)
39080 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39081 else
39082 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39083 }
39084
39085 void
39086 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39087 {
39088 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39089
39090 rtx_tmp0 = gen_reg_rtx (V2DImode);
39091 rtx_tmp1 = gen_reg_rtx (V2DImode);
39092
39093 /* The destination of the vmrgew instruction layout is:
39094 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39095 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39096 vmrgew instruction will be correct. */
39097 if (BYTES_BIG_ENDIAN)
39098 {
39099 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39100 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39101 }
39102 else
39103 {
39104 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39105 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39106 }
39107
39108 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39109 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39110
39111 if (signed_convert)
39112 {
39113 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39114 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39115 }
39116 else
39117 {
39118 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39119 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39120 }
39121
39122 if (BYTES_BIG_ENDIAN)
39123 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39124 else
39125 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39126 }
39127
39128 void
39129 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39130 rtx src2)
39131 {
39132 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39133
39134 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39135 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39136
39137 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39138 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39139
39140 rtx_tmp2 = gen_reg_rtx (V4SImode);
39141 rtx_tmp3 = gen_reg_rtx (V4SImode);
39142
39143 if (signed_convert)
39144 {
39145 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39146 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39147 }
39148 else
39149 {
39150 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39151 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39152 }
39153
39154 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39155 }
39156
39157 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39158
39159 static bool
39160 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39161 optimization_type opt_type)
39162 {
39163 switch (op)
39164 {
39165 case rsqrt_optab:
39166 return (opt_type == OPTIMIZE_FOR_SPEED
39167 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39168
39169 default:
39170 return true;
39171 }
39172 }
39173
39174 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39175
39176 static HOST_WIDE_INT
39177 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39178 {
39179 if (TREE_CODE (exp) == STRING_CST
39180 && (STRICT_ALIGNMENT || !optimize_size))
39181 return MAX (align, BITS_PER_WORD);
39182 return align;
39183 }
39184
39185 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39186
39187 static HOST_WIDE_INT
39188 rs6000_starting_frame_offset (void)
39189 {
39190 if (FRAME_GROWS_DOWNWARD)
39191 return 0;
39192 return RS6000_STARTING_FRAME_OFFSET;
39193 }
39194 \f
39195
39196 /* Create an alias for a mangled name where we have changed the mangling (in
39197 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
39198 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
39199
39200 #if TARGET_ELF && RS6000_WEAK
39201 static void
39202 rs6000_globalize_decl_name (FILE * stream, tree decl)
39203 {
39204 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
39205
39206 targetm.asm_out.globalize_label (stream, name);
39207
39208 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
39209 {
39210 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
39211 const char *old_name;
39212
39213 ieee128_mangling_gcc_8_1 = true;
39214 lang_hooks.set_decl_assembler_name (decl);
39215 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
39216 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
39217 ieee128_mangling_gcc_8_1 = false;
39218
39219 if (strcmp (name, old_name) != 0)
39220 {
39221 fprintf (stream, "\t.weak %s\n", old_name);
39222 fprintf (stream, "\t.set %s,%s\n", old_name, name);
39223 }
39224 }
39225 }
39226 #endif
39227
39228 \f
39229 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
39230 function names from <foo>l to <foo>f128 if the default long double type is
39231 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
39232 include file switches the names on systems that support long double as IEEE
39233 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
39234 In the future, glibc will export names like __ieee128_sinf128 and we can
39235 switch to using those instead of using sinf128, which pollutes the user's
39236 namespace.
39237
39238 This will switch the names for Fortran math functions as well (which doesn't
39239 use math.h). However, Fortran needs other changes to the compiler and
39240 library before you can switch the real*16 type at compile time.
39241
39242 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
39243 only do this if the default is that long double is IBM extended double, and
39244 the user asked for IEEE 128-bit. */
39245
39246 static tree
39247 rs6000_mangle_decl_assembler_name (tree decl, tree id)
39248 {
39249 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
39250 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
39251 {
39252 size_t len = IDENTIFIER_LENGTH (id);
39253 const char *name = IDENTIFIER_POINTER (id);
39254
39255 if (name[len - 1] == 'l')
39256 {
39257 bool uses_ieee128_p = false;
39258 tree type = TREE_TYPE (decl);
39259 machine_mode ret_mode = TYPE_MODE (type);
39260
39261 /* See if the function returns a IEEE 128-bit floating point type or
39262 complex type. */
39263 if (ret_mode == TFmode || ret_mode == TCmode)
39264 uses_ieee128_p = true;
39265 else
39266 {
39267 function_args_iterator args_iter;
39268 tree arg;
39269
39270 /* See if the function passes a IEEE 128-bit floating point type
39271 or complex type. */
39272 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
39273 {
39274 machine_mode arg_mode = TYPE_MODE (arg);
39275 if (arg_mode == TFmode || arg_mode == TCmode)
39276 {
39277 uses_ieee128_p = true;
39278 break;
39279 }
39280 }
39281 }
39282
39283 /* If we passed or returned an IEEE 128-bit floating point type,
39284 change the name. */
39285 if (uses_ieee128_p)
39286 {
39287 char *name2 = (char *) alloca (len + 4);
39288 memcpy (name2, name, len - 1);
39289 strcpy (name2 + len - 1, "f128");
39290 id = get_identifier (name2);
39291 }
39292 }
39293 }
39294
39295 return id;
39296 }
39297
39298 \f
39299 struct gcc_target targetm = TARGET_INITIALIZER;
39300
39301 #include "gt-rs6000.h"