darwin, ppc - improve debug for mdebug-stack
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2019 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84 #include "tree-vrp.h"
85 #include "tree-ssanames.h"
86
87 /* This file should be included last. */
88 #include "target-def.h"
89
90 #ifndef TARGET_NO_PROTOTYPE
91 #define TARGET_NO_PROTOTYPE 0
92 #endif
93
94 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
95 systems will also set long double to be IEEE 128-bit. AIX and Darwin
96 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
97 those systems will not pick up this default. This needs to be after all
98 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
99 properly defined. */
100 #ifndef TARGET_IEEEQUAD_DEFAULT
101 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
102 #define TARGET_IEEEQUAD_DEFAULT 1
103 #else
104 #define TARGET_IEEEQUAD_DEFAULT 0
105 #endif
106 #endif
107
108 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
109
110 /* Structure used to define the rs6000 stack */
111 typedef struct rs6000_stack {
112 int reload_completed; /* stack info won't change from here on */
113 int first_gp_reg_save; /* first callee saved GP register used */
114 int first_fp_reg_save; /* first callee saved FP register used */
115 int first_altivec_reg_save; /* first callee saved AltiVec register used */
116 int lr_save_p; /* true if the link reg needs to be saved */
117 int cr_save_p; /* true if the CR reg needs to be saved */
118 unsigned int vrsave_mask; /* mask of vec registers to save */
119 int push_p; /* true if we need to allocate stack space */
120 int calls_p; /* true if the function makes any calls */
121 int world_save_p; /* true if we're saving *everything*:
122 r13-r31, cr, f14-f31, vrsave, v20-v31 */
123 enum rs6000_abi abi; /* which ABI to use */
124 int gp_save_offset; /* offset to save GP regs from initial SP */
125 int fp_save_offset; /* offset to save FP regs from initial SP */
126 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
127 int lr_save_offset; /* offset to save LR from initial SP */
128 int cr_save_offset; /* offset to save CR from initial SP */
129 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
130 int varargs_save_offset; /* offset to save the varargs registers */
131 int ehrd_offset; /* offset to EH return data */
132 int ehcr_offset; /* offset to EH CR field data */
133 int reg_size; /* register size (4 or 8) */
134 HOST_WIDE_INT vars_size; /* variable save area size */
135 int parm_size; /* outgoing parameter size */
136 int save_size; /* save area size */
137 int fixed_size; /* fixed size of stack frame */
138 int gp_size; /* size of saved GP registers */
139 int fp_size; /* size of saved FP registers */
140 int altivec_size; /* size of saved AltiVec registers */
141 int cr_size; /* size to hold CR if not in fixed area */
142 int vrsave_size; /* size to hold VRSAVE */
143 int altivec_padding_size; /* size of altivec alignment padding */
144 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
145 int savres_strategy;
146 } rs6000_stack_t;
147
148 /* A C structure for machine-specific, per-function data.
149 This is added to the cfun structure. */
150 typedef struct GTY(()) machine_function
151 {
152 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
153 int ra_needs_full_frame;
154 /* Flags if __builtin_return_address (0) was used. */
155 int ra_need_lr;
156 /* Cache lr_save_p after expansion of builtin_eh_return. */
157 int lr_save_state;
158 /* Whether we need to save the TOC to the reserved stack location in the
159 function prologue. */
160 bool save_toc_in_prologue;
161 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
162 varargs save area. */
163 HOST_WIDE_INT varargs_save_offset;
164 /* Alternative internal arg pointer for -fsplit-stack. */
165 rtx split_stack_arg_pointer;
166 bool split_stack_argp_used;
167 /* Flag if r2 setup is needed with ELFv2 ABI. */
168 bool r2_setup_needed;
169 /* The number of components we use for separate shrink-wrapping. */
170 int n_components;
171 /* The components already handled by separate shrink-wrapping, which should
172 not be considered by the prologue and epilogue. */
173 bool gpr_is_wrapped_separately[32];
174 bool fpr_is_wrapped_separately[32];
175 bool lr_is_wrapped_separately;
176 bool toc_is_wrapped_separately;
177 } machine_function;
178
179 /* Support targetm.vectorize.builtin_mask_for_load. */
180 static GTY(()) tree altivec_builtin_mask_for_load;
181
182 /* Set to nonzero once AIX common-mode calls have been defined. */
183 static GTY(()) int common_mode_defined;
184
185 /* Label number of label created for -mrelocatable, to call to so we can
186 get the address of the GOT section */
187 static int rs6000_pic_labelno;
188
189 #ifdef USING_ELFOS_H
190 /* Counter for labels which are to be placed in .fixup. */
191 int fixuplabelno = 0;
192 #endif
193
194 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
195 int dot_symbols;
196
197 /* Specify the machine mode that pointers have. After generation of rtl, the
198 compiler makes no further distinction between pointers and any other objects
199 of this machine mode. */
200 scalar_int_mode rs6000_pmode;
201
202 #if TARGET_ELF
203 /* Note whether IEEE 128-bit floating point was passed or returned, either as
204 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
205 floating point. We changed the default C++ mangling for these types and we
206 may want to generate a weak alias of the old mangling (U10__float128) to the
207 new mangling (u9__ieee128). */
208 static bool rs6000_passes_ieee128;
209 #endif
210
211 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
212 name used in current releases (i.e. u9__ieee128). */
213 static bool ieee128_mangling_gcc_8_1;
214
215 /* Width in bits of a pointer. */
216 unsigned rs6000_pointer_size;
217
218 #ifdef HAVE_AS_GNU_ATTRIBUTE
219 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
220 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
221 # endif
222 /* Flag whether floating point values have been passed/returned.
223 Note that this doesn't say whether fprs are used, since the
224 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
225 should be set for soft-float values passed in gprs and ieee128
226 values passed in vsx registers. */
227 static bool rs6000_passes_float;
228 static bool rs6000_passes_long_double;
229 /* Flag whether vector values have been passed/returned. */
230 static bool rs6000_passes_vector;
231 /* Flag whether small (<= 8 byte) structures have been returned. */
232 static bool rs6000_returns_struct;
233 #endif
234
235 /* Value is TRUE if register/mode pair is acceptable. */
236 static bool rs6000_hard_regno_mode_ok_p
237 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
238
239 /* Maximum number of registers needed for a given register class and mode. */
240 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
241
242 /* How many registers are needed for a given register and mode. */
243 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
244
245 /* Map register number to register class. */
246 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
247
248 static int dbg_cost_ctrl;
249
250 /* Built in types. */
251 tree rs6000_builtin_types[RS6000_BTI_MAX];
252 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
253
254 /* Flag to say the TOC is initialized */
255 int toc_initialized, need_toc_init;
256 char toc_label_name[10];
257
258 /* Cached value of rs6000_variable_issue. This is cached in
259 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
260 static short cached_can_issue_more;
261
262 static GTY(()) section *read_only_data_section;
263 static GTY(()) section *private_data_section;
264 static GTY(()) section *tls_data_section;
265 static GTY(()) section *tls_private_data_section;
266 static GTY(()) section *read_only_private_data_section;
267 static GTY(()) section *sdata2_section;
268 static GTY(()) section *toc_section;
269
270 struct builtin_description
271 {
272 const HOST_WIDE_INT mask;
273 const enum insn_code icode;
274 const char *const name;
275 const enum rs6000_builtins code;
276 };
277
278 /* Describe the vector unit used for modes. */
279 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
280 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
281
282 /* Register classes for various constraints that are based on the target
283 switches. */
284 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
285
286 /* Describe the alignment of a vector. */
287 int rs6000_vector_align[NUM_MACHINE_MODES];
288
289 /* Map selected modes to types for builtins. */
290 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
291
292 /* What modes to automatically generate reciprocal divide estimate (fre) and
293 reciprocal sqrt (frsqrte) for. */
294 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
295
296 /* Masks to determine which reciprocal esitmate instructions to generate
297 automatically. */
298 enum rs6000_recip_mask {
299 RECIP_SF_DIV = 0x001, /* Use divide estimate */
300 RECIP_DF_DIV = 0x002,
301 RECIP_V4SF_DIV = 0x004,
302 RECIP_V2DF_DIV = 0x008,
303
304 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
305 RECIP_DF_RSQRT = 0x020,
306 RECIP_V4SF_RSQRT = 0x040,
307 RECIP_V2DF_RSQRT = 0x080,
308
309 /* Various combination of flags for -mrecip=xxx. */
310 RECIP_NONE = 0,
311 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
312 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
313 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
314
315 RECIP_HIGH_PRECISION = RECIP_ALL,
316
317 /* On low precision machines like the power5, don't enable double precision
318 reciprocal square root estimate, since it isn't accurate enough. */
319 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
320 };
321
322 /* -mrecip options. */
323 static struct
324 {
325 const char *string; /* option name */
326 unsigned int mask; /* mask bits to set */
327 } recip_options[] = {
328 { "all", RECIP_ALL },
329 { "none", RECIP_NONE },
330 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
331 | RECIP_V2DF_DIV) },
332 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
333 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
334 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
335 | RECIP_V2DF_RSQRT) },
336 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
337 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
338 };
339
340 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
341 static const struct
342 {
343 const char *cpu;
344 unsigned int cpuid;
345 } cpu_is_info[] = {
346 { "power9", PPC_PLATFORM_POWER9 },
347 { "power8", PPC_PLATFORM_POWER8 },
348 { "power7", PPC_PLATFORM_POWER7 },
349 { "power6x", PPC_PLATFORM_POWER6X },
350 { "power6", PPC_PLATFORM_POWER6 },
351 { "power5+", PPC_PLATFORM_POWER5_PLUS },
352 { "power5", PPC_PLATFORM_POWER5 },
353 { "ppc970", PPC_PLATFORM_PPC970 },
354 { "power4", PPC_PLATFORM_POWER4 },
355 { "ppca2", PPC_PLATFORM_PPCA2 },
356 { "ppc476", PPC_PLATFORM_PPC476 },
357 { "ppc464", PPC_PLATFORM_PPC464 },
358 { "ppc440", PPC_PLATFORM_PPC440 },
359 { "ppc405", PPC_PLATFORM_PPC405 },
360 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
361 };
362
363 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
364 static const struct
365 {
366 const char *hwcap;
367 int mask;
368 unsigned int id;
369 } cpu_supports_info[] = {
370 /* AT_HWCAP masks. */
371 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
372 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
373 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
374 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
375 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
376 { "booke", PPC_FEATURE_BOOKE, 0 },
377 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
378 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
379 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
380 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
381 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
382 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
383 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
384 { "notb", PPC_FEATURE_NO_TB, 0 },
385 { "pa6t", PPC_FEATURE_PA6T, 0 },
386 { "power4", PPC_FEATURE_POWER4, 0 },
387 { "power5", PPC_FEATURE_POWER5, 0 },
388 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
389 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
390 { "ppc32", PPC_FEATURE_32, 0 },
391 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
392 { "ppc64", PPC_FEATURE_64, 0 },
393 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
394 { "smt", PPC_FEATURE_SMT, 0 },
395 { "spe", PPC_FEATURE_HAS_SPE, 0 },
396 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
397 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
398 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
399
400 /* AT_HWCAP2 masks. */
401 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
402 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
403 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
404 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
405 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
406 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
407 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
408 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
409 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
410 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
411 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
412 { "darn", PPC_FEATURE2_DARN, 1 },
413 { "scv", PPC_FEATURE2_SCV, 1 }
414 };
415
416 /* On PowerPC, we have a limited number of target clones that we care about
417 which means we can use an array to hold the options, rather than having more
418 elaborate data structures to identify each possible variation. Order the
419 clones from the default to the highest ISA. */
420 enum {
421 CLONE_DEFAULT = 0, /* default clone. */
422 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
423 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
424 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
425 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
426 CLONE_MAX
427 };
428
429 /* Map compiler ISA bits into HWCAP names. */
430 struct clone_map {
431 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
432 const char *name; /* name to use in __builtin_cpu_supports. */
433 };
434
435 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
436 { 0, "" }, /* Default options. */
437 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
438 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
439 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
440 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
441 };
442
443
444 /* Newer LIBCs explicitly export this symbol to declare that they provide
445 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
446 reference to this symbol whenever we expand a CPU builtin, so that
447 we never link against an old LIBC. */
448 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
449
450 /* True if we have expanded a CPU builtin. */
451 bool cpu_builtin_p;
452
453 /* Pointer to function (in rs6000-c.c) that can define or undefine target
454 macros that have changed. Languages that don't support the preprocessor
455 don't link in rs6000-c.c, so we can't call it directly. */
456 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
457
458 /* Simplfy register classes into simpler classifications. We assume
459 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
460 check for standard register classes (gpr/floating/altivec/vsx) and
461 floating/vector classes (float/altivec/vsx). */
462
463 enum rs6000_reg_type {
464 NO_REG_TYPE,
465 PSEUDO_REG_TYPE,
466 GPR_REG_TYPE,
467 VSX_REG_TYPE,
468 ALTIVEC_REG_TYPE,
469 FPR_REG_TYPE,
470 SPR_REG_TYPE,
471 CR_REG_TYPE
472 };
473
474 /* Map register class to register type. */
475 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
476
477 /* First/last register type for the 'normal' register types (i.e. general
478 purpose, floating point, altivec, and VSX registers). */
479 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
480
481 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
482
483
484 /* Register classes we care about in secondary reload or go if legitimate
485 address. We only need to worry about GPR, FPR, and Altivec registers here,
486 along an ANY field that is the OR of the 3 register classes. */
487
488 enum rs6000_reload_reg_type {
489 RELOAD_REG_GPR, /* General purpose registers. */
490 RELOAD_REG_FPR, /* Traditional floating point regs. */
491 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
492 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
493 N_RELOAD_REG
494 };
495
496 /* For setting up register classes, loop through the 3 register classes mapping
497 into real registers, and skip the ANY class, which is just an OR of the
498 bits. */
499 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
500 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
501
502 /* Map reload register type to a register in the register class. */
503 struct reload_reg_map_type {
504 const char *name; /* Register class name. */
505 int reg; /* Register in the register class. */
506 };
507
508 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
509 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
510 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
511 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
512 { "Any", -1 }, /* RELOAD_REG_ANY. */
513 };
514
515 /* Mask bits for each register class, indexed per mode. Historically the
516 compiler has been more restrictive which types can do PRE_MODIFY instead of
517 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
518 typedef unsigned char addr_mask_type;
519
520 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
521 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
522 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
523 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
524 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
525 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
526 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
527 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
528
529 /* Register type masks based on the type, of valid addressing modes. */
530 struct rs6000_reg_addr {
531 enum insn_code reload_load; /* INSN to reload for loading. */
532 enum insn_code reload_store; /* INSN to reload for storing. */
533 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
534 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
535 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
536 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
537 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
538 };
539
540 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
541
542 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
543 static inline bool
544 mode_supports_pre_incdec_p (machine_mode mode)
545 {
546 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
547 != 0);
548 }
549
550 /* Helper function to say whether a mode supports PRE_MODIFY. */
551 static inline bool
552 mode_supports_pre_modify_p (machine_mode mode)
553 {
554 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
555 != 0);
556 }
557
558 /* Return true if we have D-form addressing in altivec registers. */
559 static inline bool
560 mode_supports_vmx_dform (machine_mode mode)
561 {
562 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
563 }
564
565 /* Return true if we have D-form addressing in VSX registers. This addressing
566 is more limited than normal d-form addressing in that the offset must be
567 aligned on a 16-byte boundary. */
568 static inline bool
569 mode_supports_dq_form (machine_mode mode)
570 {
571 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
572 != 0);
573 }
574
575 /* Given that there exists at least one variable that is set (produced)
576 by OUT_INSN and read (consumed) by IN_INSN, return true iff
577 IN_INSN represents one or more memory store operations and none of
578 the variables set by OUT_INSN is used by IN_INSN as the address of a
579 store operation. If either IN_INSN or OUT_INSN does not represent
580 a "single" RTL SET expression (as loosely defined by the
581 implementation of the single_set function) or a PARALLEL with only
582 SETs, CLOBBERs, and USEs inside, this function returns false.
583
584 This rs6000-specific version of store_data_bypass_p checks for
585 certain conditions that result in assertion failures (and internal
586 compiler errors) in the generic store_data_bypass_p function and
587 returns false rather than calling store_data_bypass_p if one of the
588 problematic conditions is detected. */
589
590 int
591 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
592 {
593 rtx out_set, in_set;
594 rtx out_pat, in_pat;
595 rtx out_exp, in_exp;
596 int i, j;
597
598 in_set = single_set (in_insn);
599 if (in_set)
600 {
601 if (MEM_P (SET_DEST (in_set)))
602 {
603 out_set = single_set (out_insn);
604 if (!out_set)
605 {
606 out_pat = PATTERN (out_insn);
607 if (GET_CODE (out_pat) == PARALLEL)
608 {
609 for (i = 0; i < XVECLEN (out_pat, 0); i++)
610 {
611 out_exp = XVECEXP (out_pat, 0, i);
612 if ((GET_CODE (out_exp) == CLOBBER)
613 || (GET_CODE (out_exp) == USE))
614 continue;
615 else if (GET_CODE (out_exp) != SET)
616 return false;
617 }
618 }
619 }
620 }
621 }
622 else
623 {
624 in_pat = PATTERN (in_insn);
625 if (GET_CODE (in_pat) != PARALLEL)
626 return false;
627
628 for (i = 0; i < XVECLEN (in_pat, 0); i++)
629 {
630 in_exp = XVECEXP (in_pat, 0, i);
631 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
632 continue;
633 else if (GET_CODE (in_exp) != SET)
634 return false;
635
636 if (MEM_P (SET_DEST (in_exp)))
637 {
638 out_set = single_set (out_insn);
639 if (!out_set)
640 {
641 out_pat = PATTERN (out_insn);
642 if (GET_CODE (out_pat) != PARALLEL)
643 return false;
644 for (j = 0; j < XVECLEN (out_pat, 0); j++)
645 {
646 out_exp = XVECEXP (out_pat, 0, j);
647 if ((GET_CODE (out_exp) == CLOBBER)
648 || (GET_CODE (out_exp) == USE))
649 continue;
650 else if (GET_CODE (out_exp) != SET)
651 return false;
652 }
653 }
654 }
655 }
656 }
657 return store_data_bypass_p (out_insn, in_insn);
658 }
659
660 \f
661 /* Processor costs (relative to an add) */
662
663 const struct processor_costs *rs6000_cost;
664
665 /* Instruction size costs on 32bit processors. */
666 static const
667 struct processor_costs size32_cost = {
668 COSTS_N_INSNS (1), /* mulsi */
669 COSTS_N_INSNS (1), /* mulsi_const */
670 COSTS_N_INSNS (1), /* mulsi_const9 */
671 COSTS_N_INSNS (1), /* muldi */
672 COSTS_N_INSNS (1), /* divsi */
673 COSTS_N_INSNS (1), /* divdi */
674 COSTS_N_INSNS (1), /* fp */
675 COSTS_N_INSNS (1), /* dmul */
676 COSTS_N_INSNS (1), /* sdiv */
677 COSTS_N_INSNS (1), /* ddiv */
678 32, /* cache line size */
679 0, /* l1 cache */
680 0, /* l2 cache */
681 0, /* streams */
682 0, /* SF->DF convert */
683 };
684
685 /* Instruction size costs on 64bit processors. */
686 static const
687 struct processor_costs size64_cost = {
688 COSTS_N_INSNS (1), /* mulsi */
689 COSTS_N_INSNS (1), /* mulsi_const */
690 COSTS_N_INSNS (1), /* mulsi_const9 */
691 COSTS_N_INSNS (1), /* muldi */
692 COSTS_N_INSNS (1), /* divsi */
693 COSTS_N_INSNS (1), /* divdi */
694 COSTS_N_INSNS (1), /* fp */
695 COSTS_N_INSNS (1), /* dmul */
696 COSTS_N_INSNS (1), /* sdiv */
697 COSTS_N_INSNS (1), /* ddiv */
698 128, /* cache line size */
699 0, /* l1 cache */
700 0, /* l2 cache */
701 0, /* streams */
702 0, /* SF->DF convert */
703 };
704
705 /* Instruction costs on RS64A processors. */
706 static const
707 struct processor_costs rs64a_cost = {
708 COSTS_N_INSNS (20), /* mulsi */
709 COSTS_N_INSNS (12), /* mulsi_const */
710 COSTS_N_INSNS (8), /* mulsi_const9 */
711 COSTS_N_INSNS (34), /* muldi */
712 COSTS_N_INSNS (65), /* divsi */
713 COSTS_N_INSNS (67), /* divdi */
714 COSTS_N_INSNS (4), /* fp */
715 COSTS_N_INSNS (4), /* dmul */
716 COSTS_N_INSNS (31), /* sdiv */
717 COSTS_N_INSNS (31), /* ddiv */
718 128, /* cache line size */
719 128, /* l1 cache */
720 2048, /* l2 cache */
721 1, /* streams */
722 0, /* SF->DF convert */
723 };
724
725 /* Instruction costs on MPCCORE processors. */
726 static const
727 struct processor_costs mpccore_cost = {
728 COSTS_N_INSNS (2), /* mulsi */
729 COSTS_N_INSNS (2), /* mulsi_const */
730 COSTS_N_INSNS (2), /* mulsi_const9 */
731 COSTS_N_INSNS (2), /* muldi */
732 COSTS_N_INSNS (6), /* divsi */
733 COSTS_N_INSNS (6), /* divdi */
734 COSTS_N_INSNS (4), /* fp */
735 COSTS_N_INSNS (5), /* dmul */
736 COSTS_N_INSNS (10), /* sdiv */
737 COSTS_N_INSNS (17), /* ddiv */
738 32, /* cache line size */
739 4, /* l1 cache */
740 16, /* l2 cache */
741 1, /* streams */
742 0, /* SF->DF convert */
743 };
744
745 /* Instruction costs on PPC403 processors. */
746 static const
747 struct processor_costs ppc403_cost = {
748 COSTS_N_INSNS (4), /* mulsi */
749 COSTS_N_INSNS (4), /* mulsi_const */
750 COSTS_N_INSNS (4), /* mulsi_const9 */
751 COSTS_N_INSNS (4), /* muldi */
752 COSTS_N_INSNS (33), /* divsi */
753 COSTS_N_INSNS (33), /* divdi */
754 COSTS_N_INSNS (11), /* fp */
755 COSTS_N_INSNS (11), /* dmul */
756 COSTS_N_INSNS (11), /* sdiv */
757 COSTS_N_INSNS (11), /* ddiv */
758 32, /* cache line size */
759 4, /* l1 cache */
760 16, /* l2 cache */
761 1, /* streams */
762 0, /* SF->DF convert */
763 };
764
765 /* Instruction costs on PPC405 processors. */
766 static const
767 struct processor_costs ppc405_cost = {
768 COSTS_N_INSNS (5), /* mulsi */
769 COSTS_N_INSNS (4), /* mulsi_const */
770 COSTS_N_INSNS (3), /* mulsi_const9 */
771 COSTS_N_INSNS (5), /* muldi */
772 COSTS_N_INSNS (35), /* divsi */
773 COSTS_N_INSNS (35), /* divdi */
774 COSTS_N_INSNS (11), /* fp */
775 COSTS_N_INSNS (11), /* dmul */
776 COSTS_N_INSNS (11), /* sdiv */
777 COSTS_N_INSNS (11), /* ddiv */
778 32, /* cache line size */
779 16, /* l1 cache */
780 128, /* l2 cache */
781 1, /* streams */
782 0, /* SF->DF convert */
783 };
784
785 /* Instruction costs on PPC440 processors. */
786 static const
787 struct processor_costs ppc440_cost = {
788 COSTS_N_INSNS (3), /* mulsi */
789 COSTS_N_INSNS (2), /* mulsi_const */
790 COSTS_N_INSNS (2), /* mulsi_const9 */
791 COSTS_N_INSNS (3), /* muldi */
792 COSTS_N_INSNS (34), /* divsi */
793 COSTS_N_INSNS (34), /* divdi */
794 COSTS_N_INSNS (5), /* fp */
795 COSTS_N_INSNS (5), /* dmul */
796 COSTS_N_INSNS (19), /* sdiv */
797 COSTS_N_INSNS (33), /* ddiv */
798 32, /* cache line size */
799 32, /* l1 cache */
800 256, /* l2 cache */
801 1, /* streams */
802 0, /* SF->DF convert */
803 };
804
805 /* Instruction costs on PPC476 processors. */
806 static const
807 struct processor_costs ppc476_cost = {
808 COSTS_N_INSNS (4), /* mulsi */
809 COSTS_N_INSNS (4), /* mulsi_const */
810 COSTS_N_INSNS (4), /* mulsi_const9 */
811 COSTS_N_INSNS (4), /* muldi */
812 COSTS_N_INSNS (11), /* divsi */
813 COSTS_N_INSNS (11), /* divdi */
814 COSTS_N_INSNS (6), /* fp */
815 COSTS_N_INSNS (6), /* dmul */
816 COSTS_N_INSNS (19), /* sdiv */
817 COSTS_N_INSNS (33), /* ddiv */
818 32, /* l1 cache line size */
819 32, /* l1 cache */
820 512, /* l2 cache */
821 1, /* streams */
822 0, /* SF->DF convert */
823 };
824
825 /* Instruction costs on PPC601 processors. */
826 static const
827 struct processor_costs ppc601_cost = {
828 COSTS_N_INSNS (5), /* mulsi */
829 COSTS_N_INSNS (5), /* mulsi_const */
830 COSTS_N_INSNS (5), /* mulsi_const9 */
831 COSTS_N_INSNS (5), /* muldi */
832 COSTS_N_INSNS (36), /* divsi */
833 COSTS_N_INSNS (36), /* divdi */
834 COSTS_N_INSNS (4), /* fp */
835 COSTS_N_INSNS (5), /* dmul */
836 COSTS_N_INSNS (17), /* sdiv */
837 COSTS_N_INSNS (31), /* ddiv */
838 32, /* cache line size */
839 32, /* l1 cache */
840 256, /* l2 cache */
841 1, /* streams */
842 0, /* SF->DF convert */
843 };
844
845 /* Instruction costs on PPC603 processors. */
846 static const
847 struct processor_costs ppc603_cost = {
848 COSTS_N_INSNS (5), /* mulsi */
849 COSTS_N_INSNS (3), /* mulsi_const */
850 COSTS_N_INSNS (2), /* mulsi_const9 */
851 COSTS_N_INSNS (5), /* muldi */
852 COSTS_N_INSNS (37), /* divsi */
853 COSTS_N_INSNS (37), /* divdi */
854 COSTS_N_INSNS (3), /* fp */
855 COSTS_N_INSNS (4), /* dmul */
856 COSTS_N_INSNS (18), /* sdiv */
857 COSTS_N_INSNS (33), /* ddiv */
858 32, /* cache line size */
859 8, /* l1 cache */
860 64, /* l2 cache */
861 1, /* streams */
862 0, /* SF->DF convert */
863 };
864
865 /* Instruction costs on PPC604 processors. */
866 static const
867 struct processor_costs ppc604_cost = {
868 COSTS_N_INSNS (4), /* mulsi */
869 COSTS_N_INSNS (4), /* mulsi_const */
870 COSTS_N_INSNS (4), /* mulsi_const9 */
871 COSTS_N_INSNS (4), /* muldi */
872 COSTS_N_INSNS (20), /* divsi */
873 COSTS_N_INSNS (20), /* divdi */
874 COSTS_N_INSNS (3), /* fp */
875 COSTS_N_INSNS (3), /* dmul */
876 COSTS_N_INSNS (18), /* sdiv */
877 COSTS_N_INSNS (32), /* ddiv */
878 32, /* cache line size */
879 16, /* l1 cache */
880 512, /* l2 cache */
881 1, /* streams */
882 0, /* SF->DF convert */
883 };
884
885 /* Instruction costs on PPC604e processors. */
886 static const
887 struct processor_costs ppc604e_cost = {
888 COSTS_N_INSNS (2), /* mulsi */
889 COSTS_N_INSNS (2), /* mulsi_const */
890 COSTS_N_INSNS (2), /* mulsi_const9 */
891 COSTS_N_INSNS (2), /* muldi */
892 COSTS_N_INSNS (20), /* divsi */
893 COSTS_N_INSNS (20), /* divdi */
894 COSTS_N_INSNS (3), /* fp */
895 COSTS_N_INSNS (3), /* dmul */
896 COSTS_N_INSNS (18), /* sdiv */
897 COSTS_N_INSNS (32), /* ddiv */
898 32, /* cache line size */
899 32, /* l1 cache */
900 1024, /* l2 cache */
901 1, /* streams */
902 0, /* SF->DF convert */
903 };
904
905 /* Instruction costs on PPC620 processors. */
906 static const
907 struct processor_costs ppc620_cost = {
908 COSTS_N_INSNS (5), /* mulsi */
909 COSTS_N_INSNS (4), /* mulsi_const */
910 COSTS_N_INSNS (3), /* mulsi_const9 */
911 COSTS_N_INSNS (7), /* muldi */
912 COSTS_N_INSNS (21), /* divsi */
913 COSTS_N_INSNS (37), /* divdi */
914 COSTS_N_INSNS (3), /* fp */
915 COSTS_N_INSNS (3), /* dmul */
916 COSTS_N_INSNS (18), /* sdiv */
917 COSTS_N_INSNS (32), /* ddiv */
918 128, /* cache line size */
919 32, /* l1 cache */
920 1024, /* l2 cache */
921 1, /* streams */
922 0, /* SF->DF convert */
923 };
924
925 /* Instruction costs on PPC630 processors. */
926 static const
927 struct processor_costs ppc630_cost = {
928 COSTS_N_INSNS (5), /* mulsi */
929 COSTS_N_INSNS (4), /* mulsi_const */
930 COSTS_N_INSNS (3), /* mulsi_const9 */
931 COSTS_N_INSNS (7), /* muldi */
932 COSTS_N_INSNS (21), /* divsi */
933 COSTS_N_INSNS (37), /* divdi */
934 COSTS_N_INSNS (3), /* fp */
935 COSTS_N_INSNS (3), /* dmul */
936 COSTS_N_INSNS (17), /* sdiv */
937 COSTS_N_INSNS (21), /* ddiv */
938 128, /* cache line size */
939 64, /* l1 cache */
940 1024, /* l2 cache */
941 1, /* streams */
942 0, /* SF->DF convert */
943 };
944
945 /* Instruction costs on Cell processor. */
946 /* COSTS_N_INSNS (1) ~ one add. */
947 static const
948 struct processor_costs ppccell_cost = {
949 COSTS_N_INSNS (9/2)+2, /* mulsi */
950 COSTS_N_INSNS (6/2), /* mulsi_const */
951 COSTS_N_INSNS (6/2), /* mulsi_const9 */
952 COSTS_N_INSNS (15/2)+2, /* muldi */
953 COSTS_N_INSNS (38/2), /* divsi */
954 COSTS_N_INSNS (70/2), /* divdi */
955 COSTS_N_INSNS (10/2), /* fp */
956 COSTS_N_INSNS (10/2), /* dmul */
957 COSTS_N_INSNS (74/2), /* sdiv */
958 COSTS_N_INSNS (74/2), /* ddiv */
959 128, /* cache line size */
960 32, /* l1 cache */
961 512, /* l2 cache */
962 6, /* streams */
963 0, /* SF->DF convert */
964 };
965
966 /* Instruction costs on PPC750 and PPC7400 processors. */
967 static const
968 struct processor_costs ppc750_cost = {
969 COSTS_N_INSNS (5), /* mulsi */
970 COSTS_N_INSNS (3), /* mulsi_const */
971 COSTS_N_INSNS (2), /* mulsi_const9 */
972 COSTS_N_INSNS (5), /* muldi */
973 COSTS_N_INSNS (17), /* divsi */
974 COSTS_N_INSNS (17), /* divdi */
975 COSTS_N_INSNS (3), /* fp */
976 COSTS_N_INSNS (3), /* dmul */
977 COSTS_N_INSNS (17), /* sdiv */
978 COSTS_N_INSNS (31), /* ddiv */
979 32, /* cache line size */
980 32, /* l1 cache */
981 512, /* l2 cache */
982 1, /* streams */
983 0, /* SF->DF convert */
984 };
985
986 /* Instruction costs on PPC7450 processors. */
987 static const
988 struct processor_costs ppc7450_cost = {
989 COSTS_N_INSNS (4), /* mulsi */
990 COSTS_N_INSNS (3), /* mulsi_const */
991 COSTS_N_INSNS (3), /* mulsi_const9 */
992 COSTS_N_INSNS (4), /* muldi */
993 COSTS_N_INSNS (23), /* divsi */
994 COSTS_N_INSNS (23), /* divdi */
995 COSTS_N_INSNS (5), /* fp */
996 COSTS_N_INSNS (5), /* dmul */
997 COSTS_N_INSNS (21), /* sdiv */
998 COSTS_N_INSNS (35), /* ddiv */
999 32, /* cache line size */
1000 32, /* l1 cache */
1001 1024, /* l2 cache */
1002 1, /* streams */
1003 0, /* SF->DF convert */
1004 };
1005
1006 /* Instruction costs on PPC8540 processors. */
1007 static const
1008 struct processor_costs ppc8540_cost = {
1009 COSTS_N_INSNS (4), /* mulsi */
1010 COSTS_N_INSNS (4), /* mulsi_const */
1011 COSTS_N_INSNS (4), /* mulsi_const9 */
1012 COSTS_N_INSNS (4), /* muldi */
1013 COSTS_N_INSNS (19), /* divsi */
1014 COSTS_N_INSNS (19), /* divdi */
1015 COSTS_N_INSNS (4), /* fp */
1016 COSTS_N_INSNS (4), /* dmul */
1017 COSTS_N_INSNS (29), /* sdiv */
1018 COSTS_N_INSNS (29), /* ddiv */
1019 32, /* cache line size */
1020 32, /* l1 cache */
1021 256, /* l2 cache */
1022 1, /* prefetch streams /*/
1023 0, /* SF->DF convert */
1024 };
1025
1026 /* Instruction costs on E300C2 and E300C3 cores. */
1027 static const
1028 struct processor_costs ppce300c2c3_cost = {
1029 COSTS_N_INSNS (4), /* mulsi */
1030 COSTS_N_INSNS (4), /* mulsi_const */
1031 COSTS_N_INSNS (4), /* mulsi_const9 */
1032 COSTS_N_INSNS (4), /* muldi */
1033 COSTS_N_INSNS (19), /* divsi */
1034 COSTS_N_INSNS (19), /* divdi */
1035 COSTS_N_INSNS (3), /* fp */
1036 COSTS_N_INSNS (4), /* dmul */
1037 COSTS_N_INSNS (18), /* sdiv */
1038 COSTS_N_INSNS (33), /* ddiv */
1039 32,
1040 16, /* l1 cache */
1041 16, /* l2 cache */
1042 1, /* prefetch streams /*/
1043 0, /* SF->DF convert */
1044 };
1045
1046 /* Instruction costs on PPCE500MC processors. */
1047 static const
1048 struct processor_costs ppce500mc_cost = {
1049 COSTS_N_INSNS (4), /* mulsi */
1050 COSTS_N_INSNS (4), /* mulsi_const */
1051 COSTS_N_INSNS (4), /* mulsi_const9 */
1052 COSTS_N_INSNS (4), /* muldi */
1053 COSTS_N_INSNS (14), /* divsi */
1054 COSTS_N_INSNS (14), /* divdi */
1055 COSTS_N_INSNS (8), /* fp */
1056 COSTS_N_INSNS (10), /* dmul */
1057 COSTS_N_INSNS (36), /* sdiv */
1058 COSTS_N_INSNS (66), /* ddiv */
1059 64, /* cache line size */
1060 32, /* l1 cache */
1061 128, /* l2 cache */
1062 1, /* prefetch streams /*/
1063 0, /* SF->DF convert */
1064 };
1065
1066 /* Instruction costs on PPCE500MC64 processors. */
1067 static const
1068 struct processor_costs ppce500mc64_cost = {
1069 COSTS_N_INSNS (4), /* mulsi */
1070 COSTS_N_INSNS (4), /* mulsi_const */
1071 COSTS_N_INSNS (4), /* mulsi_const9 */
1072 COSTS_N_INSNS (4), /* muldi */
1073 COSTS_N_INSNS (14), /* divsi */
1074 COSTS_N_INSNS (14), /* divdi */
1075 COSTS_N_INSNS (4), /* fp */
1076 COSTS_N_INSNS (10), /* dmul */
1077 COSTS_N_INSNS (36), /* sdiv */
1078 COSTS_N_INSNS (66), /* ddiv */
1079 64, /* cache line size */
1080 32, /* l1 cache */
1081 128, /* l2 cache */
1082 1, /* prefetch streams /*/
1083 0, /* SF->DF convert */
1084 };
1085
1086 /* Instruction costs on PPCE5500 processors. */
1087 static const
1088 struct processor_costs ppce5500_cost = {
1089 COSTS_N_INSNS (5), /* mulsi */
1090 COSTS_N_INSNS (5), /* mulsi_const */
1091 COSTS_N_INSNS (4), /* mulsi_const9 */
1092 COSTS_N_INSNS (5), /* muldi */
1093 COSTS_N_INSNS (14), /* divsi */
1094 COSTS_N_INSNS (14), /* divdi */
1095 COSTS_N_INSNS (7), /* fp */
1096 COSTS_N_INSNS (10), /* dmul */
1097 COSTS_N_INSNS (36), /* sdiv */
1098 COSTS_N_INSNS (66), /* ddiv */
1099 64, /* cache line size */
1100 32, /* l1 cache */
1101 128, /* l2 cache */
1102 1, /* prefetch streams /*/
1103 0, /* SF->DF convert */
1104 };
1105
1106 /* Instruction costs on PPCE6500 processors. */
1107 static const
1108 struct processor_costs ppce6500_cost = {
1109 COSTS_N_INSNS (5), /* mulsi */
1110 COSTS_N_INSNS (5), /* mulsi_const */
1111 COSTS_N_INSNS (4), /* mulsi_const9 */
1112 COSTS_N_INSNS (5), /* muldi */
1113 COSTS_N_INSNS (14), /* divsi */
1114 COSTS_N_INSNS (14), /* divdi */
1115 COSTS_N_INSNS (7), /* fp */
1116 COSTS_N_INSNS (10), /* dmul */
1117 COSTS_N_INSNS (36), /* sdiv */
1118 COSTS_N_INSNS (66), /* ddiv */
1119 64, /* cache line size */
1120 32, /* l1 cache */
1121 128, /* l2 cache */
1122 1, /* prefetch streams /*/
1123 0, /* SF->DF convert */
1124 };
1125
1126 /* Instruction costs on AppliedMicro Titan processors. */
1127 static const
1128 struct processor_costs titan_cost = {
1129 COSTS_N_INSNS (5), /* mulsi */
1130 COSTS_N_INSNS (5), /* mulsi_const */
1131 COSTS_N_INSNS (5), /* mulsi_const9 */
1132 COSTS_N_INSNS (5), /* muldi */
1133 COSTS_N_INSNS (18), /* divsi */
1134 COSTS_N_INSNS (18), /* divdi */
1135 COSTS_N_INSNS (10), /* fp */
1136 COSTS_N_INSNS (10), /* dmul */
1137 COSTS_N_INSNS (46), /* sdiv */
1138 COSTS_N_INSNS (72), /* ddiv */
1139 32, /* cache line size */
1140 32, /* l1 cache */
1141 512, /* l2 cache */
1142 1, /* prefetch streams /*/
1143 0, /* SF->DF convert */
1144 };
1145
1146 /* Instruction costs on POWER4 and POWER5 processors. */
1147 static const
1148 struct processor_costs power4_cost = {
1149 COSTS_N_INSNS (3), /* mulsi */
1150 COSTS_N_INSNS (2), /* mulsi_const */
1151 COSTS_N_INSNS (2), /* mulsi_const9 */
1152 COSTS_N_INSNS (4), /* muldi */
1153 COSTS_N_INSNS (18), /* divsi */
1154 COSTS_N_INSNS (34), /* divdi */
1155 COSTS_N_INSNS (3), /* fp */
1156 COSTS_N_INSNS (3), /* dmul */
1157 COSTS_N_INSNS (17), /* sdiv */
1158 COSTS_N_INSNS (17), /* ddiv */
1159 128, /* cache line size */
1160 32, /* l1 cache */
1161 1024, /* l2 cache */
1162 8, /* prefetch streams /*/
1163 0, /* SF->DF convert */
1164 };
1165
1166 /* Instruction costs on POWER6 processors. */
1167 static const
1168 struct processor_costs power6_cost = {
1169 COSTS_N_INSNS (8), /* mulsi */
1170 COSTS_N_INSNS (8), /* mulsi_const */
1171 COSTS_N_INSNS (8), /* mulsi_const9 */
1172 COSTS_N_INSNS (8), /* muldi */
1173 COSTS_N_INSNS (22), /* divsi */
1174 COSTS_N_INSNS (28), /* divdi */
1175 COSTS_N_INSNS (3), /* fp */
1176 COSTS_N_INSNS (3), /* dmul */
1177 COSTS_N_INSNS (13), /* sdiv */
1178 COSTS_N_INSNS (16), /* ddiv */
1179 128, /* cache line size */
1180 64, /* l1 cache */
1181 2048, /* l2 cache */
1182 16, /* prefetch streams */
1183 0, /* SF->DF convert */
1184 };
1185
1186 /* Instruction costs on POWER7 processors. */
1187 static const
1188 struct processor_costs power7_cost = {
1189 COSTS_N_INSNS (2), /* mulsi */
1190 COSTS_N_INSNS (2), /* mulsi_const */
1191 COSTS_N_INSNS (2), /* mulsi_const9 */
1192 COSTS_N_INSNS (2), /* muldi */
1193 COSTS_N_INSNS (18), /* divsi */
1194 COSTS_N_INSNS (34), /* divdi */
1195 COSTS_N_INSNS (3), /* fp */
1196 COSTS_N_INSNS (3), /* dmul */
1197 COSTS_N_INSNS (13), /* sdiv */
1198 COSTS_N_INSNS (16), /* ddiv */
1199 128, /* cache line size */
1200 32, /* l1 cache */
1201 256, /* l2 cache */
1202 12, /* prefetch streams */
1203 COSTS_N_INSNS (3), /* SF->DF convert */
1204 };
1205
1206 /* Instruction costs on POWER8 processors. */
1207 static const
1208 struct processor_costs power8_cost = {
1209 COSTS_N_INSNS (3), /* mulsi */
1210 COSTS_N_INSNS (3), /* mulsi_const */
1211 COSTS_N_INSNS (3), /* mulsi_const9 */
1212 COSTS_N_INSNS (3), /* muldi */
1213 COSTS_N_INSNS (19), /* divsi */
1214 COSTS_N_INSNS (35), /* divdi */
1215 COSTS_N_INSNS (3), /* fp */
1216 COSTS_N_INSNS (3), /* dmul */
1217 COSTS_N_INSNS (14), /* sdiv */
1218 COSTS_N_INSNS (17), /* ddiv */
1219 128, /* cache line size */
1220 32, /* l1 cache */
1221 256, /* l2 cache */
1222 12, /* prefetch streams */
1223 COSTS_N_INSNS (3), /* SF->DF convert */
1224 };
1225
1226 /* Instruction costs on POWER9 processors. */
1227 static const
1228 struct processor_costs power9_cost = {
1229 COSTS_N_INSNS (3), /* mulsi */
1230 COSTS_N_INSNS (3), /* mulsi_const */
1231 COSTS_N_INSNS (3), /* mulsi_const9 */
1232 COSTS_N_INSNS (3), /* muldi */
1233 COSTS_N_INSNS (8), /* divsi */
1234 COSTS_N_INSNS (12), /* divdi */
1235 COSTS_N_INSNS (3), /* fp */
1236 COSTS_N_INSNS (3), /* dmul */
1237 COSTS_N_INSNS (13), /* sdiv */
1238 COSTS_N_INSNS (18), /* ddiv */
1239 128, /* cache line size */
1240 32, /* l1 cache */
1241 512, /* l2 cache */
1242 8, /* prefetch streams */
1243 COSTS_N_INSNS (3), /* SF->DF convert */
1244 };
1245
1246 /* Instruction costs on POWER A2 processors. */
1247 static const
1248 struct processor_costs ppca2_cost = {
1249 COSTS_N_INSNS (16), /* mulsi */
1250 COSTS_N_INSNS (16), /* mulsi_const */
1251 COSTS_N_INSNS (16), /* mulsi_const9 */
1252 COSTS_N_INSNS (16), /* muldi */
1253 COSTS_N_INSNS (22), /* divsi */
1254 COSTS_N_INSNS (28), /* divdi */
1255 COSTS_N_INSNS (3), /* fp */
1256 COSTS_N_INSNS (3), /* dmul */
1257 COSTS_N_INSNS (59), /* sdiv */
1258 COSTS_N_INSNS (72), /* ddiv */
1259 64,
1260 16, /* l1 cache */
1261 2048, /* l2 cache */
1262 16, /* prefetch streams */
1263 0, /* SF->DF convert */
1264 };
1265
1266 \f
1267 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1268 #undef RS6000_BUILTIN_0
1269 #undef RS6000_BUILTIN_1
1270 #undef RS6000_BUILTIN_2
1271 #undef RS6000_BUILTIN_3
1272 #undef RS6000_BUILTIN_A
1273 #undef RS6000_BUILTIN_D
1274 #undef RS6000_BUILTIN_H
1275 #undef RS6000_BUILTIN_P
1276 #undef RS6000_BUILTIN_X
1277
1278 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1279 { NAME, ICODE, MASK, ATTR },
1280
1281 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1282 { NAME, ICODE, MASK, ATTR },
1283
1284 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1285 { NAME, ICODE, MASK, ATTR },
1286
1287 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1288 { NAME, ICODE, MASK, ATTR },
1289
1290 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1291 { NAME, ICODE, MASK, ATTR },
1292
1293 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1294 { NAME, ICODE, MASK, ATTR },
1295
1296 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1297 { NAME, ICODE, MASK, ATTR },
1298
1299 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1300 { NAME, ICODE, MASK, ATTR },
1301
1302 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1303 { NAME, ICODE, MASK, ATTR },
1304
1305 struct rs6000_builtin_info_type {
1306 const char *name;
1307 const enum insn_code icode;
1308 const HOST_WIDE_INT mask;
1309 const unsigned attr;
1310 };
1311
1312 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1313 {
1314 #include "rs6000-builtin.def"
1315 };
1316
1317 #undef RS6000_BUILTIN_0
1318 #undef RS6000_BUILTIN_1
1319 #undef RS6000_BUILTIN_2
1320 #undef RS6000_BUILTIN_3
1321 #undef RS6000_BUILTIN_A
1322 #undef RS6000_BUILTIN_D
1323 #undef RS6000_BUILTIN_H
1324 #undef RS6000_BUILTIN_P
1325 #undef RS6000_BUILTIN_X
1326
1327 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1328 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1329
1330 \f
1331 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1332 static struct machine_function * rs6000_init_machine_status (void);
1333 static int rs6000_ra_ever_killed (void);
1334 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1336 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1337 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1338 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1339 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1340 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1341 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1342 bool);
1343 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1344 unsigned int);
1345 static bool is_microcoded_insn (rtx_insn *);
1346 static bool is_nonpipeline_insn (rtx_insn *);
1347 static bool is_cracked_insn (rtx_insn *);
1348 static bool is_load_insn (rtx, rtx *);
1349 static bool is_store_insn (rtx, rtx *);
1350 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1351 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1352 static bool insn_must_be_first_in_group (rtx_insn *);
1353 static bool insn_must_be_last_in_group (rtx_insn *);
1354 static void altivec_init_builtins (void);
1355 static tree builtin_function_type (machine_mode, machine_mode,
1356 machine_mode, machine_mode,
1357 enum rs6000_builtins, const char *name);
1358 static void rs6000_common_init_builtins (void);
1359 static void htm_init_builtins (void);
1360 static rs6000_stack_t *rs6000_stack_info (void);
1361 static void is_altivec_return_reg (rtx, void *);
1362 int easy_vector_constant (rtx, machine_mode);
1363 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1364 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1365 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1366 bool, bool);
1367 #if TARGET_MACHO
1368 static void macho_branch_islands (void);
1369 static tree get_prev_label (tree);
1370 #endif
1371 static bool rs6000_mode_dependent_address (const_rtx);
1372 static bool rs6000_debug_mode_dependent_address (const_rtx);
1373 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1374 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1375 machine_mode, rtx);
1376 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1377 machine_mode,
1378 rtx);
1379 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1380 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1381 enum reg_class);
1382 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1383 reg_class_t,
1384 reg_class_t);
1385 static bool rs6000_debug_can_change_mode_class (machine_mode,
1386 machine_mode,
1387 reg_class_t);
1388 static bool rs6000_save_toc_in_prologue_p (void);
1389 static rtx rs6000_internal_arg_pointer (void);
1390
1391 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1392 = rs6000_mode_dependent_address;
1393
1394 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1395 machine_mode, rtx)
1396 = rs6000_secondary_reload_class;
1397
1398 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1399 = rs6000_preferred_reload_class;
1400
1401 const int INSN_NOT_AVAILABLE = -1;
1402
1403 static void rs6000_print_isa_options (FILE *, int, const char *,
1404 HOST_WIDE_INT);
1405 static void rs6000_print_builtin_options (FILE *, int, const char *,
1406 HOST_WIDE_INT);
1407 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1408
1409 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1410 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1411 enum rs6000_reg_type,
1412 machine_mode,
1413 secondary_reload_info *,
1414 bool);
1415 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1416 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1417 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1418
1419 /* Hash table stuff for keeping track of TOC entries. */
1420
1421 struct GTY((for_user)) toc_hash_struct
1422 {
1423 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1424 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1425 rtx key;
1426 machine_mode key_mode;
1427 int labelno;
1428 };
1429
1430 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1431 {
1432 static hashval_t hash (toc_hash_struct *);
1433 static bool equal (toc_hash_struct *, toc_hash_struct *);
1434 };
1435
1436 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1437
1438 /* Hash table to keep track of the argument types for builtin functions. */
1439
1440 struct GTY((for_user)) builtin_hash_struct
1441 {
1442 tree type;
1443 machine_mode mode[4]; /* return value + 3 arguments. */
1444 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1445 };
1446
1447 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1448 {
1449 static hashval_t hash (builtin_hash_struct *);
1450 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1451 };
1452
1453 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1454
1455 \f
1456 /* Default register names. */
1457 char rs6000_reg_names[][8] =
1458 {
1459 /* GPRs */
1460 "0", "1", "2", "3", "4", "5", "6", "7",
1461 "8", "9", "10", "11", "12", "13", "14", "15",
1462 "16", "17", "18", "19", "20", "21", "22", "23",
1463 "24", "25", "26", "27", "28", "29", "30", "31",
1464 /* FPRs */
1465 "0", "1", "2", "3", "4", "5", "6", "7",
1466 "8", "9", "10", "11", "12", "13", "14", "15",
1467 "16", "17", "18", "19", "20", "21", "22", "23",
1468 "24", "25", "26", "27", "28", "29", "30", "31",
1469 /* VRs */
1470 "0", "1", "2", "3", "4", "5", "6", "7",
1471 "8", "9", "10", "11", "12", "13", "14", "15",
1472 "16", "17", "18", "19", "20", "21", "22", "23",
1473 "24", "25", "26", "27", "28", "29", "30", "31",
1474 /* lr ctr ca ap */
1475 "lr", "ctr", "ca", "ap",
1476 /* cr0..cr7 */
1477 "0", "1", "2", "3", "4", "5", "6", "7",
1478 /* vrsave vscr sfp */
1479 "vrsave", "vscr", "sfp",
1480 };
1481
1482 #ifdef TARGET_REGNAMES
1483 static const char alt_reg_names[][8] =
1484 {
1485 /* GPRs */
1486 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1487 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1488 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1489 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1490 /* FPRs */
1491 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1492 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1493 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1494 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1495 /* VRs */
1496 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1497 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1498 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1499 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1500 /* lr ctr ca ap */
1501 "lr", "ctr", "ca", "ap",
1502 /* cr0..cr7 */
1503 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1504 /* vrsave vscr sfp */
1505 "vrsave", "vscr", "sfp",
1506 };
1507 #endif
1508
1509 /* Table of valid machine attributes. */
1510
1511 static const struct attribute_spec rs6000_attribute_table[] =
1512 {
1513 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1514 affects_type_identity, handler, exclude } */
1515 { "altivec", 1, 1, false, true, false, false,
1516 rs6000_handle_altivec_attribute, NULL },
1517 { "longcall", 0, 0, false, true, true, false,
1518 rs6000_handle_longcall_attribute, NULL },
1519 { "shortcall", 0, 0, false, true, true, false,
1520 rs6000_handle_longcall_attribute, NULL },
1521 { "ms_struct", 0, 0, false, false, false, false,
1522 rs6000_handle_struct_attribute, NULL },
1523 { "gcc_struct", 0, 0, false, false, false, false,
1524 rs6000_handle_struct_attribute, NULL },
1525 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1526 SUBTARGET_ATTRIBUTE_TABLE,
1527 #endif
1528 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1529 };
1530 \f
1531 #ifndef TARGET_PROFILE_KERNEL
1532 #define TARGET_PROFILE_KERNEL 0
1533 #endif
1534
1535 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1536 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1537 \f
1538 /* Initialize the GCC target structure. */
1539 #undef TARGET_ATTRIBUTE_TABLE
1540 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1541 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1542 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1543 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1544 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1545
1546 #undef TARGET_ASM_ALIGNED_DI_OP
1547 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1548
1549 /* Default unaligned ops are only provided for ELF. Find the ops needed
1550 for non-ELF systems. */
1551 #ifndef OBJECT_FORMAT_ELF
1552 #if TARGET_XCOFF
1553 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1554 64-bit targets. */
1555 #undef TARGET_ASM_UNALIGNED_HI_OP
1556 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1557 #undef TARGET_ASM_UNALIGNED_SI_OP
1558 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1559 #undef TARGET_ASM_UNALIGNED_DI_OP
1560 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1561 #else
1562 /* For Darwin. */
1563 #undef TARGET_ASM_UNALIGNED_HI_OP
1564 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1565 #undef TARGET_ASM_UNALIGNED_SI_OP
1566 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1567 #undef TARGET_ASM_UNALIGNED_DI_OP
1568 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1569 #undef TARGET_ASM_ALIGNED_DI_OP
1570 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1571 #endif
1572 #endif
1573
1574 /* This hook deals with fixups for relocatable code and DI-mode objects
1575 in 64-bit code. */
1576 #undef TARGET_ASM_INTEGER
1577 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1578
1579 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1580 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1581 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1582 #endif
1583
1584 #undef TARGET_SET_UP_BY_PROLOGUE
1585 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1586
1587 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1588 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1589 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1590 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1591 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1592 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1593 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1594 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1595 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1596 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1597 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1598 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1599
1600 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1601 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1602
1603 #undef TARGET_INTERNAL_ARG_POINTER
1604 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1605
1606 #undef TARGET_HAVE_TLS
1607 #define TARGET_HAVE_TLS HAVE_AS_TLS
1608
1609 #undef TARGET_CANNOT_FORCE_CONST_MEM
1610 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1611
1612 #undef TARGET_DELEGITIMIZE_ADDRESS
1613 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1614
1615 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1616 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1617
1618 #undef TARGET_LEGITIMATE_COMBINED_INSN
1619 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1620
1621 #undef TARGET_ASM_FUNCTION_PROLOGUE
1622 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1623 #undef TARGET_ASM_FUNCTION_EPILOGUE
1624 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1625
1626 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1627 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1628
1629 #undef TARGET_LEGITIMIZE_ADDRESS
1630 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1631
1632 #undef TARGET_SCHED_VARIABLE_ISSUE
1633 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1634
1635 #undef TARGET_SCHED_ISSUE_RATE
1636 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1637 #undef TARGET_SCHED_ADJUST_COST
1638 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1639 #undef TARGET_SCHED_ADJUST_PRIORITY
1640 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1641 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1642 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1643 #undef TARGET_SCHED_INIT
1644 #define TARGET_SCHED_INIT rs6000_sched_init
1645 #undef TARGET_SCHED_FINISH
1646 #define TARGET_SCHED_FINISH rs6000_sched_finish
1647 #undef TARGET_SCHED_REORDER
1648 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1649 #undef TARGET_SCHED_REORDER2
1650 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1651
1652 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1653 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1654
1655 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1656 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1657
1658 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1659 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1660 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1661 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1662 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1663 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1664 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1665 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1666
1667 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1668 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1669
1670 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1671 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1672 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1673 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1674 rs6000_builtin_support_vector_misalignment
1675 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1676 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1677 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1678 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1679 rs6000_builtin_vectorization_cost
1680 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1681 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1682 rs6000_preferred_simd_mode
1683 #undef TARGET_VECTORIZE_INIT_COST
1684 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1685 #undef TARGET_VECTORIZE_ADD_STMT_COST
1686 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1687 #undef TARGET_VECTORIZE_FINISH_COST
1688 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1689 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1690 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1691
1692 #undef TARGET_INIT_BUILTINS
1693 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1694 #undef TARGET_BUILTIN_DECL
1695 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1696
1697 #undef TARGET_FOLD_BUILTIN
1698 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1699 #undef TARGET_GIMPLE_FOLD_BUILTIN
1700 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1701
1702 #undef TARGET_EXPAND_BUILTIN
1703 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1704
1705 #undef TARGET_MANGLE_TYPE
1706 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1707
1708 #undef TARGET_INIT_LIBFUNCS
1709 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1710
1711 #if TARGET_MACHO
1712 #undef TARGET_BINDS_LOCAL_P
1713 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1714 #endif
1715
1716 #undef TARGET_MS_BITFIELD_LAYOUT_P
1717 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1718
1719 #undef TARGET_ASM_OUTPUT_MI_THUNK
1720 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1721
1722 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1723 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1724
1725 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1726 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1727
1728 #undef TARGET_REGISTER_MOVE_COST
1729 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1730 #undef TARGET_MEMORY_MOVE_COST
1731 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1732 #undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS
1733 #define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS \
1734 rs6000_ira_change_pseudo_allocno_class
1735 #undef TARGET_CANNOT_COPY_INSN_P
1736 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1737 #undef TARGET_RTX_COSTS
1738 #define TARGET_RTX_COSTS rs6000_rtx_costs
1739 #undef TARGET_ADDRESS_COST
1740 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1741 #undef TARGET_INSN_COST
1742 #define TARGET_INSN_COST rs6000_insn_cost
1743
1744 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1745 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1746
1747 #undef TARGET_PROMOTE_FUNCTION_MODE
1748 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1749
1750 #undef TARGET_RETURN_IN_MEMORY
1751 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1752
1753 #undef TARGET_RETURN_IN_MSB
1754 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1755
1756 #undef TARGET_SETUP_INCOMING_VARARGS
1757 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1758
1759 /* Always strict argument naming on rs6000. */
1760 #undef TARGET_STRICT_ARGUMENT_NAMING
1761 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1762 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1763 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1764 #undef TARGET_SPLIT_COMPLEX_ARG
1765 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1766 #undef TARGET_MUST_PASS_IN_STACK
1767 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1768 #undef TARGET_PASS_BY_REFERENCE
1769 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1770 #undef TARGET_ARG_PARTIAL_BYTES
1771 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1772 #undef TARGET_FUNCTION_ARG_ADVANCE
1773 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1774 #undef TARGET_FUNCTION_ARG
1775 #define TARGET_FUNCTION_ARG rs6000_function_arg
1776 #undef TARGET_FUNCTION_ARG_PADDING
1777 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1778 #undef TARGET_FUNCTION_ARG_BOUNDARY
1779 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1780
1781 #undef TARGET_BUILD_BUILTIN_VA_LIST
1782 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1783
1784 #undef TARGET_EXPAND_BUILTIN_VA_START
1785 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1786
1787 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1788 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1789
1790 #undef TARGET_EH_RETURN_FILTER_MODE
1791 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1792
1793 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1794 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1795
1796 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1797 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1798
1799 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1800 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1801
1802 #undef TARGET_FLOATN_MODE
1803 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1804
1805 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1806 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1807
1808 #undef TARGET_MD_ASM_ADJUST
1809 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1810
1811 #undef TARGET_OPTION_OVERRIDE
1812 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1813
1814 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1815 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1816 rs6000_builtin_vectorized_function
1817
1818 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1819 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1820 rs6000_builtin_md_vectorized_function
1821
1822 #undef TARGET_STACK_PROTECT_GUARD
1823 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1824
1825 #if !TARGET_MACHO
1826 #undef TARGET_STACK_PROTECT_FAIL
1827 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1828 #endif
1829
1830 #ifdef HAVE_AS_TLS
1831 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1832 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1833 #endif
1834
1835 /* Use a 32-bit anchor range. This leads to sequences like:
1836
1837 addis tmp,anchor,high
1838 add dest,tmp,low
1839
1840 where tmp itself acts as an anchor, and can be shared between
1841 accesses to the same 64k page. */
1842 #undef TARGET_MIN_ANCHOR_OFFSET
1843 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1844 #undef TARGET_MAX_ANCHOR_OFFSET
1845 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1846 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1847 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1848 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1849 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1850
1851 #undef TARGET_BUILTIN_RECIPROCAL
1852 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1853
1854 #undef TARGET_SECONDARY_RELOAD
1855 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1856 #undef TARGET_SECONDARY_MEMORY_NEEDED
1857 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1858 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1859 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1860
1861 #undef TARGET_LEGITIMATE_ADDRESS_P
1862 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1863
1864 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1865 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1866
1867 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1868 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1869
1870 #undef TARGET_CAN_ELIMINATE
1871 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1872
1873 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1874 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1875
1876 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1877 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1878
1879 #undef TARGET_TRAMPOLINE_INIT
1880 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1881
1882 #undef TARGET_FUNCTION_VALUE
1883 #define TARGET_FUNCTION_VALUE rs6000_function_value
1884
1885 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1886 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1887
1888 #undef TARGET_OPTION_SAVE
1889 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1890
1891 #undef TARGET_OPTION_RESTORE
1892 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1893
1894 #undef TARGET_OPTION_PRINT
1895 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1896
1897 #undef TARGET_CAN_INLINE_P
1898 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1899
1900 #undef TARGET_SET_CURRENT_FUNCTION
1901 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1902
1903 #undef TARGET_LEGITIMATE_CONSTANT_P
1904 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1905
1906 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1907 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1908
1909 #undef TARGET_CAN_USE_DOLOOP_P
1910 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1911
1912 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1913 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1914
1915 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1916 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1917 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1918 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1919 #undef TARGET_UNWIND_WORD_MODE
1920 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1921
1922 #undef TARGET_OFFLOAD_OPTIONS
1923 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1924
1925 #undef TARGET_C_MODE_FOR_SUFFIX
1926 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1927
1928 #undef TARGET_INVALID_BINARY_OP
1929 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1930
1931 #undef TARGET_OPTAB_SUPPORTED_P
1932 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1933
1934 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1935 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1936
1937 #undef TARGET_COMPARE_VERSION_PRIORITY
1938 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1939
1940 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1941 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1942 rs6000_generate_version_dispatcher_body
1943
1944 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1945 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1946 rs6000_get_function_versions_dispatcher
1947
1948 #undef TARGET_OPTION_FUNCTION_VERSIONS
1949 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1950
1951 #undef TARGET_HARD_REGNO_NREGS
1952 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1953 #undef TARGET_HARD_REGNO_MODE_OK
1954 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1955
1956 #undef TARGET_MODES_TIEABLE_P
1957 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1958
1959 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1960 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1961 rs6000_hard_regno_call_part_clobbered
1962
1963 #undef TARGET_SLOW_UNALIGNED_ACCESS
1964 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1965
1966 #undef TARGET_CAN_CHANGE_MODE_CLASS
1967 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1968
1969 #undef TARGET_CONSTANT_ALIGNMENT
1970 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1971
1972 #undef TARGET_STARTING_FRAME_OFFSET
1973 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1974
1975 #if TARGET_ELF && RS6000_WEAK
1976 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1977 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1978 #endif
1979
1980 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1981 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1982
1983 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1984 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1985 \f
1986
1987 /* Processor table. */
1988 struct rs6000_ptt
1989 {
1990 const char *const name; /* Canonical processor name. */
1991 const enum processor_type processor; /* Processor type enum value. */
1992 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1993 };
1994
1995 static struct rs6000_ptt const processor_target_table[] =
1996 {
1997 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1998 #include "rs6000-cpus.def"
1999 #undef RS6000_CPU
2000 };
2001
2002 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2003 name is invalid. */
2004
2005 static int
2006 rs6000_cpu_name_lookup (const char *name)
2007 {
2008 size_t i;
2009
2010 if (name != NULL)
2011 {
2012 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2013 if (! strcmp (name, processor_target_table[i].name))
2014 return (int)i;
2015 }
2016
2017 return -1;
2018 }
2019
2020 \f
2021 /* Return number of consecutive hard regs needed starting at reg REGNO
2022 to hold something of mode MODE.
2023 This is ordinarily the length in words of a value of mode MODE
2024 but can be less for certain modes in special long registers.
2025
2026 POWER and PowerPC GPRs hold 32 bits worth;
2027 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2028
2029 static int
2030 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2031 {
2032 unsigned HOST_WIDE_INT reg_size;
2033
2034 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2035 128-bit floating point that can go in vector registers, which has VSX
2036 memory addressing. */
2037 if (FP_REGNO_P (regno))
2038 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2039 ? UNITS_PER_VSX_WORD
2040 : UNITS_PER_FP_WORD);
2041
2042 else if (ALTIVEC_REGNO_P (regno))
2043 reg_size = UNITS_PER_ALTIVEC_WORD;
2044
2045 else
2046 reg_size = UNITS_PER_WORD;
2047
2048 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2049 }
2050
2051 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2052 MODE. */
2053 static int
2054 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2055 {
2056 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2057
2058 if (COMPLEX_MODE_P (mode))
2059 mode = GET_MODE_INNER (mode);
2060
2061 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2062 register combinations, and use PTImode where we need to deal with quad
2063 word memory operations. Don't allow quad words in the argument or frame
2064 pointer registers, just registers 0..31. */
2065 if (mode == PTImode)
2066 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2067 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2068 && ((regno & 1) == 0));
2069
2070 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2071 implementations. Don't allow an item to be split between a FP register
2072 and an Altivec register. Allow TImode in all VSX registers if the user
2073 asked for it. */
2074 if (TARGET_VSX && VSX_REGNO_P (regno)
2075 && (VECTOR_MEM_VSX_P (mode)
2076 || FLOAT128_VECTOR_P (mode)
2077 || reg_addr[mode].scalar_in_vmx_p
2078 || mode == TImode
2079 || (TARGET_VADDUQM && mode == V1TImode)))
2080 {
2081 if (FP_REGNO_P (regno))
2082 return FP_REGNO_P (last_regno);
2083
2084 if (ALTIVEC_REGNO_P (regno))
2085 {
2086 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2087 return 0;
2088
2089 return ALTIVEC_REGNO_P (last_regno);
2090 }
2091 }
2092
2093 /* The GPRs can hold any mode, but values bigger than one register
2094 cannot go past R31. */
2095 if (INT_REGNO_P (regno))
2096 return INT_REGNO_P (last_regno);
2097
2098 /* The float registers (except for VSX vector modes) can only hold floating
2099 modes and DImode. */
2100 if (FP_REGNO_P (regno))
2101 {
2102 if (FLOAT128_VECTOR_P (mode))
2103 return false;
2104
2105 if (SCALAR_FLOAT_MODE_P (mode)
2106 && (mode != TDmode || (regno % 2) == 0)
2107 && FP_REGNO_P (last_regno))
2108 return 1;
2109
2110 if (GET_MODE_CLASS (mode) == MODE_INT)
2111 {
2112 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2113 return 1;
2114
2115 if (TARGET_P8_VECTOR && (mode == SImode))
2116 return 1;
2117
2118 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2119 return 1;
2120 }
2121
2122 return 0;
2123 }
2124
2125 /* The CR register can only hold CC modes. */
2126 if (CR_REGNO_P (regno))
2127 return GET_MODE_CLASS (mode) == MODE_CC;
2128
2129 if (CA_REGNO_P (regno))
2130 return mode == Pmode || mode == SImode;
2131
2132 /* AltiVec only in AldyVec registers. */
2133 if (ALTIVEC_REGNO_P (regno))
2134 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2135 || mode == V1TImode);
2136
2137 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2138 and it must be able to fit within the register set. */
2139
2140 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2141 }
2142
2143 /* Implement TARGET_HARD_REGNO_NREGS. */
2144
2145 static unsigned int
2146 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2147 {
2148 return rs6000_hard_regno_nregs[mode][regno];
2149 }
2150
2151 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2152
2153 static bool
2154 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2155 {
2156 return rs6000_hard_regno_mode_ok_p[mode][regno];
2157 }
2158
2159 /* Implement TARGET_MODES_TIEABLE_P.
2160
2161 PTImode cannot tie with other modes because PTImode is restricted to even
2162 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2163 57744).
2164
2165 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2166 128-bit floating point on VSX systems ties with other vectors. */
2167
2168 static bool
2169 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2170 {
2171 if (mode1 == PTImode)
2172 return mode2 == PTImode;
2173 if (mode2 == PTImode)
2174 return false;
2175
2176 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2177 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2178 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2179 return false;
2180
2181 if (SCALAR_FLOAT_MODE_P (mode1))
2182 return SCALAR_FLOAT_MODE_P (mode2);
2183 if (SCALAR_FLOAT_MODE_P (mode2))
2184 return false;
2185
2186 if (GET_MODE_CLASS (mode1) == MODE_CC)
2187 return GET_MODE_CLASS (mode2) == MODE_CC;
2188 if (GET_MODE_CLASS (mode2) == MODE_CC)
2189 return false;
2190
2191 return true;
2192 }
2193
2194 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2195
2196 static bool
2197 rs6000_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
2198 unsigned int regno, machine_mode mode)
2199 {
2200 if (TARGET_32BIT
2201 && TARGET_POWERPC64
2202 && GET_MODE_SIZE (mode) > 4
2203 && INT_REGNO_P (regno))
2204 return true;
2205
2206 if (TARGET_VSX
2207 && FP_REGNO_P (regno)
2208 && GET_MODE_SIZE (mode) > 8
2209 && !FLOAT128_2REG_P (mode))
2210 return true;
2211
2212 return false;
2213 }
2214
2215 /* Print interesting facts about registers. */
2216 static void
2217 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2218 {
2219 int r, m;
2220
2221 for (r = first_regno; r <= last_regno; ++r)
2222 {
2223 const char *comma = "";
2224 int len;
2225
2226 if (first_regno == last_regno)
2227 fprintf (stderr, "%s:\t", reg_name);
2228 else
2229 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2230
2231 len = 8;
2232 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2233 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2234 {
2235 if (len > 70)
2236 {
2237 fprintf (stderr, ",\n\t");
2238 len = 8;
2239 comma = "";
2240 }
2241
2242 if (rs6000_hard_regno_nregs[m][r] > 1)
2243 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2244 rs6000_hard_regno_nregs[m][r]);
2245 else
2246 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2247
2248 comma = ", ";
2249 }
2250
2251 if (call_used_regs[r])
2252 {
2253 if (len > 70)
2254 {
2255 fprintf (stderr, ",\n\t");
2256 len = 8;
2257 comma = "";
2258 }
2259
2260 len += fprintf (stderr, "%s%s", comma, "call-used");
2261 comma = ", ";
2262 }
2263
2264 if (fixed_regs[r])
2265 {
2266 if (len > 70)
2267 {
2268 fprintf (stderr, ",\n\t");
2269 len = 8;
2270 comma = "";
2271 }
2272
2273 len += fprintf (stderr, "%s%s", comma, "fixed");
2274 comma = ", ";
2275 }
2276
2277 if (len > 70)
2278 {
2279 fprintf (stderr, ",\n\t");
2280 comma = "";
2281 }
2282
2283 len += fprintf (stderr, "%sreg-class = %s", comma,
2284 reg_class_names[(int)rs6000_regno_regclass[r]]);
2285 comma = ", ";
2286
2287 if (len > 70)
2288 {
2289 fprintf (stderr, ",\n\t");
2290 comma = "";
2291 }
2292
2293 fprintf (stderr, "%sregno = %d\n", comma, r);
2294 }
2295 }
2296
2297 static const char *
2298 rs6000_debug_vector_unit (enum rs6000_vector v)
2299 {
2300 const char *ret;
2301
2302 switch (v)
2303 {
2304 case VECTOR_NONE: ret = "none"; break;
2305 case VECTOR_ALTIVEC: ret = "altivec"; break;
2306 case VECTOR_VSX: ret = "vsx"; break;
2307 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2308 default: ret = "unknown"; break;
2309 }
2310
2311 return ret;
2312 }
2313
2314 /* Inner function printing just the address mask for a particular reload
2315 register class. */
2316 DEBUG_FUNCTION char *
2317 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2318 {
2319 static char ret[8];
2320 char *p = ret;
2321
2322 if ((mask & RELOAD_REG_VALID) != 0)
2323 *p++ = 'v';
2324 else if (keep_spaces)
2325 *p++ = ' ';
2326
2327 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2328 *p++ = 'm';
2329 else if (keep_spaces)
2330 *p++ = ' ';
2331
2332 if ((mask & RELOAD_REG_INDEXED) != 0)
2333 *p++ = 'i';
2334 else if (keep_spaces)
2335 *p++ = ' ';
2336
2337 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2338 *p++ = 'O';
2339 else if ((mask & RELOAD_REG_OFFSET) != 0)
2340 *p++ = 'o';
2341 else if (keep_spaces)
2342 *p++ = ' ';
2343
2344 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2345 *p++ = '+';
2346 else if (keep_spaces)
2347 *p++ = ' ';
2348
2349 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2350 *p++ = '+';
2351 else if (keep_spaces)
2352 *p++ = ' ';
2353
2354 if ((mask & RELOAD_REG_AND_M16) != 0)
2355 *p++ = '&';
2356 else if (keep_spaces)
2357 *p++ = ' ';
2358
2359 *p = '\0';
2360
2361 return ret;
2362 }
2363
2364 /* Print the address masks in a human readble fashion. */
2365 DEBUG_FUNCTION void
2366 rs6000_debug_print_mode (ssize_t m)
2367 {
2368 ssize_t rc;
2369 int spaces = 0;
2370
2371 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2372 for (rc = 0; rc < N_RELOAD_REG; rc++)
2373 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2374 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2375
2376 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2377 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2378 {
2379 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2380 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2381 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2382 spaces = 0;
2383 }
2384 else
2385 spaces += sizeof (" Reload=sl") - 1;
2386
2387 if (reg_addr[m].scalar_in_vmx_p)
2388 {
2389 fprintf (stderr, "%*s Upper=y", spaces, "");
2390 spaces = 0;
2391 }
2392 else
2393 spaces += sizeof (" Upper=y") - 1;
2394
2395 if (rs6000_vector_unit[m] != VECTOR_NONE
2396 || rs6000_vector_mem[m] != VECTOR_NONE)
2397 {
2398 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2399 spaces, "",
2400 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2401 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2402 }
2403
2404 fputs ("\n", stderr);
2405 }
2406
2407 #define DEBUG_FMT_ID "%-32s= "
2408 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2409 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2410 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2411
2412 /* Print various interesting information with -mdebug=reg. */
2413 static void
2414 rs6000_debug_reg_global (void)
2415 {
2416 static const char *const tf[2] = { "false", "true" };
2417 const char *nl = (const char *)0;
2418 int m;
2419 size_t m1, m2, v;
2420 char costly_num[20];
2421 char nop_num[20];
2422 char flags_buffer[40];
2423 const char *costly_str;
2424 const char *nop_str;
2425 const char *trace_str;
2426 const char *abi_str;
2427 const char *cmodel_str;
2428 struct cl_target_option cl_opts;
2429
2430 /* Modes we want tieable information on. */
2431 static const machine_mode print_tieable_modes[] = {
2432 QImode,
2433 HImode,
2434 SImode,
2435 DImode,
2436 TImode,
2437 PTImode,
2438 SFmode,
2439 DFmode,
2440 TFmode,
2441 IFmode,
2442 KFmode,
2443 SDmode,
2444 DDmode,
2445 TDmode,
2446 V16QImode,
2447 V8HImode,
2448 V4SImode,
2449 V2DImode,
2450 V1TImode,
2451 V32QImode,
2452 V16HImode,
2453 V8SImode,
2454 V4DImode,
2455 V2TImode,
2456 V4SFmode,
2457 V2DFmode,
2458 V8SFmode,
2459 V4DFmode,
2460 CCmode,
2461 CCUNSmode,
2462 CCEQmode,
2463 };
2464
2465 /* Virtual regs we are interested in. */
2466 const static struct {
2467 int regno; /* register number. */
2468 const char *name; /* register name. */
2469 } virtual_regs[] = {
2470 { STACK_POINTER_REGNUM, "stack pointer:" },
2471 { TOC_REGNUM, "toc: " },
2472 { STATIC_CHAIN_REGNUM, "static chain: " },
2473 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2474 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2475 { ARG_POINTER_REGNUM, "arg pointer: " },
2476 { FRAME_POINTER_REGNUM, "frame pointer:" },
2477 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2478 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2479 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2480 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2481 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2482 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2483 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2484 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2485 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2486 };
2487
2488 fputs ("\nHard register information:\n", stderr);
2489 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2490 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2491 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2492 LAST_ALTIVEC_REGNO,
2493 "vs");
2494 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2495 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2496 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2497 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2498 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2499 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2500
2501 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2502 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2503 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2504
2505 fprintf (stderr,
2506 "\n"
2507 "d reg_class = %s\n"
2508 "f reg_class = %s\n"
2509 "v reg_class = %s\n"
2510 "wa reg_class = %s\n"
2511 "wb reg_class = %s\n"
2512 "wd reg_class = %s\n"
2513 "we reg_class = %s\n"
2514 "wf reg_class = %s\n"
2515 "wg reg_class = %s\n"
2516 "wh reg_class = %s\n"
2517 "wi reg_class = %s\n"
2518 "wj reg_class = %s\n"
2519 "wk reg_class = %s\n"
2520 "wl reg_class = %s\n"
2521 "wm reg_class = %s\n"
2522 "wo reg_class = %s\n"
2523 "wp reg_class = %s\n"
2524 "wq reg_class = %s\n"
2525 "wr reg_class = %s\n"
2526 "ws reg_class = %s\n"
2527 "wt reg_class = %s\n"
2528 "wu reg_class = %s\n"
2529 "wv reg_class = %s\n"
2530 "ww reg_class = %s\n"
2531 "wx reg_class = %s\n"
2532 "wy reg_class = %s\n"
2533 "wz reg_class = %s\n"
2534 "wA reg_class = %s\n"
2535 "wH reg_class = %s\n"
2536 "wI reg_class = %s\n"
2537 "wJ reg_class = %s\n"
2538 "wK reg_class = %s\n"
2539 "\n",
2540 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2541 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2542 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2543 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2544 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2560 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2561 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2562 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2563 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2564 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2565 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2566 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2567 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2568 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2569 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2570 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2571 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2572
2573 nl = "\n";
2574 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2575 rs6000_debug_print_mode (m);
2576
2577 fputs ("\n", stderr);
2578
2579 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2580 {
2581 machine_mode mode1 = print_tieable_modes[m1];
2582 bool first_time = true;
2583
2584 nl = (const char *)0;
2585 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2586 {
2587 machine_mode mode2 = print_tieable_modes[m2];
2588 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2589 {
2590 if (first_time)
2591 {
2592 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2593 nl = "\n";
2594 first_time = false;
2595 }
2596
2597 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2598 }
2599 }
2600
2601 if (!first_time)
2602 fputs ("\n", stderr);
2603 }
2604
2605 if (nl)
2606 fputs (nl, stderr);
2607
2608 if (rs6000_recip_control)
2609 {
2610 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2611
2612 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2613 if (rs6000_recip_bits[m])
2614 {
2615 fprintf (stderr,
2616 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2617 GET_MODE_NAME (m),
2618 (RS6000_RECIP_AUTO_RE_P (m)
2619 ? "auto"
2620 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2621 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2622 ? "auto"
2623 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2624 }
2625
2626 fputs ("\n", stderr);
2627 }
2628
2629 if (rs6000_cpu_index >= 0)
2630 {
2631 const char *name = processor_target_table[rs6000_cpu_index].name;
2632 HOST_WIDE_INT flags
2633 = processor_target_table[rs6000_cpu_index].target_enable;
2634
2635 sprintf (flags_buffer, "-mcpu=%s flags", name);
2636 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2637 }
2638 else
2639 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2640
2641 if (rs6000_tune_index >= 0)
2642 {
2643 const char *name = processor_target_table[rs6000_tune_index].name;
2644 HOST_WIDE_INT flags
2645 = processor_target_table[rs6000_tune_index].target_enable;
2646
2647 sprintf (flags_buffer, "-mtune=%s flags", name);
2648 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2649 }
2650 else
2651 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2652
2653 cl_target_option_save (&cl_opts, &global_options);
2654 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2655 rs6000_isa_flags);
2656
2657 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2658 rs6000_isa_flags_explicit);
2659
2660 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2661 rs6000_builtin_mask);
2662
2663 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2664
2665 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2666 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2667
2668 switch (rs6000_sched_costly_dep)
2669 {
2670 case max_dep_latency:
2671 costly_str = "max_dep_latency";
2672 break;
2673
2674 case no_dep_costly:
2675 costly_str = "no_dep_costly";
2676 break;
2677
2678 case all_deps_costly:
2679 costly_str = "all_deps_costly";
2680 break;
2681
2682 case true_store_to_load_dep_costly:
2683 costly_str = "true_store_to_load_dep_costly";
2684 break;
2685
2686 case store_to_load_dep_costly:
2687 costly_str = "store_to_load_dep_costly";
2688 break;
2689
2690 default:
2691 costly_str = costly_num;
2692 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2693 break;
2694 }
2695
2696 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2697
2698 switch (rs6000_sched_insert_nops)
2699 {
2700 case sched_finish_regroup_exact:
2701 nop_str = "sched_finish_regroup_exact";
2702 break;
2703
2704 case sched_finish_pad_groups:
2705 nop_str = "sched_finish_pad_groups";
2706 break;
2707
2708 case sched_finish_none:
2709 nop_str = "sched_finish_none";
2710 break;
2711
2712 default:
2713 nop_str = nop_num;
2714 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2715 break;
2716 }
2717
2718 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2719
2720 switch (rs6000_sdata)
2721 {
2722 default:
2723 case SDATA_NONE:
2724 break;
2725
2726 case SDATA_DATA:
2727 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2728 break;
2729
2730 case SDATA_SYSV:
2731 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2732 break;
2733
2734 case SDATA_EABI:
2735 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2736 break;
2737
2738 }
2739
2740 switch (rs6000_traceback)
2741 {
2742 case traceback_default: trace_str = "default"; break;
2743 case traceback_none: trace_str = "none"; break;
2744 case traceback_part: trace_str = "part"; break;
2745 case traceback_full: trace_str = "full"; break;
2746 default: trace_str = "unknown"; break;
2747 }
2748
2749 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2750
2751 switch (rs6000_current_cmodel)
2752 {
2753 case CMODEL_SMALL: cmodel_str = "small"; break;
2754 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2755 case CMODEL_LARGE: cmodel_str = "large"; break;
2756 default: cmodel_str = "unknown"; break;
2757 }
2758
2759 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2760
2761 switch (rs6000_current_abi)
2762 {
2763 case ABI_NONE: abi_str = "none"; break;
2764 case ABI_AIX: abi_str = "aix"; break;
2765 case ABI_ELFv2: abi_str = "ELFv2"; break;
2766 case ABI_V4: abi_str = "V4"; break;
2767 case ABI_DARWIN: abi_str = "darwin"; break;
2768 default: abi_str = "unknown"; break;
2769 }
2770
2771 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2772
2773 if (rs6000_altivec_abi)
2774 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2775
2776 if (rs6000_darwin64_abi)
2777 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2778
2779 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2780 (TARGET_SOFT_FLOAT ? "true" : "false"));
2781
2782 if (TARGET_LINK_STACK)
2783 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2784
2785 if (TARGET_P8_FUSION)
2786 {
2787 char options[80];
2788
2789 strcpy (options, "power8");
2790 if (TARGET_P8_FUSION_SIGN)
2791 strcat (options, ", sign");
2792
2793 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2794 }
2795
2796 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2797 TARGET_SECURE_PLT ? "secure" : "bss");
2798 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2799 aix_struct_return ? "aix" : "sysv");
2800 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2801 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2802 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2803 tf[!!rs6000_align_branch_targets]);
2804 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2805 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2806 rs6000_long_double_type_size);
2807 if (rs6000_long_double_type_size > 64)
2808 {
2809 fprintf (stderr, DEBUG_FMT_S, "long double type",
2810 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2811 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2812 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2813 }
2814 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2815 (int)rs6000_sched_restricted_insns_priority);
2816 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2817 (int)END_BUILTINS);
2818 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2819 (int)RS6000_BUILTIN_COUNT);
2820
2821 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2822 (int)TARGET_FLOAT128_ENABLE_TYPE);
2823
2824 if (TARGET_VSX)
2825 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2826 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2827
2828 if (TARGET_DIRECT_MOVE_128)
2829 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2830 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2831 }
2832
2833 \f
2834 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2835 legitimate address support to figure out the appropriate addressing to
2836 use. */
2837
2838 static void
2839 rs6000_setup_reg_addr_masks (void)
2840 {
2841 ssize_t rc, reg, m, nregs;
2842 addr_mask_type any_addr_mask, addr_mask;
2843
2844 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2845 {
2846 machine_mode m2 = (machine_mode) m;
2847 bool complex_p = false;
2848 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2849 size_t msize;
2850
2851 if (COMPLEX_MODE_P (m2))
2852 {
2853 complex_p = true;
2854 m2 = GET_MODE_INNER (m2);
2855 }
2856
2857 msize = GET_MODE_SIZE (m2);
2858
2859 /* SDmode is special in that we want to access it only via REG+REG
2860 addressing on power7 and above, since we want to use the LFIWZX and
2861 STFIWZX instructions to load it. */
2862 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2863
2864 any_addr_mask = 0;
2865 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2866 {
2867 addr_mask = 0;
2868 reg = reload_reg_map[rc].reg;
2869
2870 /* Can mode values go in the GPR/FPR/Altivec registers? */
2871 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2872 {
2873 bool small_int_vsx_p = (small_int_p
2874 && (rc == RELOAD_REG_FPR
2875 || rc == RELOAD_REG_VMX));
2876
2877 nregs = rs6000_hard_regno_nregs[m][reg];
2878 addr_mask |= RELOAD_REG_VALID;
2879
2880 /* Indicate if the mode takes more than 1 physical register. If
2881 it takes a single register, indicate it can do REG+REG
2882 addressing. Small integers in VSX registers can only do
2883 REG+REG addressing. */
2884 if (small_int_vsx_p)
2885 addr_mask |= RELOAD_REG_INDEXED;
2886 else if (nregs > 1 || m == BLKmode || complex_p)
2887 addr_mask |= RELOAD_REG_MULTIPLE;
2888 else
2889 addr_mask |= RELOAD_REG_INDEXED;
2890
2891 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2892 addressing. If we allow scalars into Altivec registers,
2893 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2894
2895 For VSX systems, we don't allow update addressing for
2896 DFmode/SFmode if those registers can go in both the
2897 traditional floating point registers and Altivec registers.
2898 The load/store instructions for the Altivec registers do not
2899 have update forms. If we allowed update addressing, it seems
2900 to break IV-OPT code using floating point if the index type is
2901 int instead of long (PR target/81550 and target/84042). */
2902
2903 if (TARGET_UPDATE
2904 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2905 && msize <= 8
2906 && !VECTOR_MODE_P (m2)
2907 && !FLOAT128_VECTOR_P (m2)
2908 && !complex_p
2909 && (m != E_DFmode || !TARGET_VSX)
2910 && (m != E_SFmode || !TARGET_P8_VECTOR)
2911 && !small_int_vsx_p)
2912 {
2913 addr_mask |= RELOAD_REG_PRE_INCDEC;
2914
2915 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2916 we don't allow PRE_MODIFY for some multi-register
2917 operations. */
2918 switch (m)
2919 {
2920 default:
2921 addr_mask |= RELOAD_REG_PRE_MODIFY;
2922 break;
2923
2924 case E_DImode:
2925 if (TARGET_POWERPC64)
2926 addr_mask |= RELOAD_REG_PRE_MODIFY;
2927 break;
2928
2929 case E_DFmode:
2930 case E_DDmode:
2931 if (TARGET_HARD_FLOAT)
2932 addr_mask |= RELOAD_REG_PRE_MODIFY;
2933 break;
2934 }
2935 }
2936 }
2937
2938 /* GPR and FPR registers can do REG+OFFSET addressing, except
2939 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2940 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2941 if ((addr_mask != 0) && !indexed_only_p
2942 && msize <= 8
2943 && (rc == RELOAD_REG_GPR
2944 || ((msize == 8 || m2 == SFmode)
2945 && (rc == RELOAD_REG_FPR
2946 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2947 addr_mask |= RELOAD_REG_OFFSET;
2948
2949 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2950 instructions are enabled. The offset for 128-bit VSX registers is
2951 only 12-bits. While GPRs can handle the full offset range, VSX
2952 registers can only handle the restricted range. */
2953 else if ((addr_mask != 0) && !indexed_only_p
2954 && msize == 16 && TARGET_P9_VECTOR
2955 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2956 || (m2 == TImode && TARGET_VSX)))
2957 {
2958 addr_mask |= RELOAD_REG_OFFSET;
2959 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2960 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2961 }
2962
2963 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2964 addressing on 128-bit types. */
2965 if (rc == RELOAD_REG_VMX && msize == 16
2966 && (addr_mask & RELOAD_REG_VALID) != 0)
2967 addr_mask |= RELOAD_REG_AND_M16;
2968
2969 reg_addr[m].addr_mask[rc] = addr_mask;
2970 any_addr_mask |= addr_mask;
2971 }
2972
2973 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2974 }
2975 }
2976
2977 \f
2978 /* Initialize the various global tables that are based on register size. */
2979 static void
2980 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2981 {
2982 ssize_t r, m, c;
2983 int align64;
2984 int align32;
2985
2986 /* Precalculate REGNO_REG_CLASS. */
2987 rs6000_regno_regclass[0] = GENERAL_REGS;
2988 for (r = 1; r < 32; ++r)
2989 rs6000_regno_regclass[r] = BASE_REGS;
2990
2991 for (r = 32; r < 64; ++r)
2992 rs6000_regno_regclass[r] = FLOAT_REGS;
2993
2994 for (r = 64; HARD_REGISTER_NUM_P (r); ++r)
2995 rs6000_regno_regclass[r] = NO_REGS;
2996
2997 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2998 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2999
3000 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
3001 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
3002 rs6000_regno_regclass[r] = CR_REGS;
3003
3004 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
3005 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
3006 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
3007 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
3008 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
3009 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
3010 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
3011
3012 /* Precalculate register class to simpler reload register class. We don't
3013 need all of the register classes that are combinations of different
3014 classes, just the simple ones that have constraint letters. */
3015 for (c = 0; c < N_REG_CLASSES; c++)
3016 reg_class_to_reg_type[c] = NO_REG_TYPE;
3017
3018 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3019 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3020 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3021 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3022 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3023 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3024 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3025 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3026 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3027 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3028
3029 if (TARGET_VSX)
3030 {
3031 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3032 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3033 }
3034 else
3035 {
3036 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3037 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3038 }
3039
3040 /* Precalculate the valid memory formats as well as the vector information,
3041 this must be set up before the rs6000_hard_regno_nregs_internal calls
3042 below. */
3043 gcc_assert ((int)VECTOR_NONE == 0);
3044 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3045 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_mem));
3046
3047 gcc_assert ((int)CODE_FOR_nothing == 0);
3048 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3049
3050 gcc_assert ((int)NO_REGS == 0);
3051 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3052
3053 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3054 believes it can use native alignment or still uses 128-bit alignment. */
3055 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3056 {
3057 align64 = 64;
3058 align32 = 32;
3059 }
3060 else
3061 {
3062 align64 = 128;
3063 align32 = 128;
3064 }
3065
3066 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3067 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3068 if (TARGET_FLOAT128_TYPE)
3069 {
3070 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3071 rs6000_vector_align[KFmode] = 128;
3072
3073 if (FLOAT128_IEEE_P (TFmode))
3074 {
3075 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3076 rs6000_vector_align[TFmode] = 128;
3077 }
3078 }
3079
3080 /* V2DF mode, VSX only. */
3081 if (TARGET_VSX)
3082 {
3083 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3084 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3085 rs6000_vector_align[V2DFmode] = align64;
3086 }
3087
3088 /* V4SF mode, either VSX or Altivec. */
3089 if (TARGET_VSX)
3090 {
3091 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3092 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3093 rs6000_vector_align[V4SFmode] = align32;
3094 }
3095 else if (TARGET_ALTIVEC)
3096 {
3097 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3098 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3099 rs6000_vector_align[V4SFmode] = align32;
3100 }
3101
3102 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3103 and stores. */
3104 if (TARGET_ALTIVEC)
3105 {
3106 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3107 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3108 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3109 rs6000_vector_align[V4SImode] = align32;
3110 rs6000_vector_align[V8HImode] = align32;
3111 rs6000_vector_align[V16QImode] = align32;
3112
3113 if (TARGET_VSX)
3114 {
3115 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3116 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3117 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3118 }
3119 else
3120 {
3121 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3122 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3123 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3124 }
3125 }
3126
3127 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3128 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3129 if (TARGET_VSX)
3130 {
3131 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3132 rs6000_vector_unit[V2DImode]
3133 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3134 rs6000_vector_align[V2DImode] = align64;
3135
3136 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3137 rs6000_vector_unit[V1TImode]
3138 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3139 rs6000_vector_align[V1TImode] = 128;
3140 }
3141
3142 /* DFmode, see if we want to use the VSX unit. Memory is handled
3143 differently, so don't set rs6000_vector_mem. */
3144 if (TARGET_VSX)
3145 {
3146 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3147 rs6000_vector_align[DFmode] = 64;
3148 }
3149
3150 /* SFmode, see if we want to use the VSX unit. */
3151 if (TARGET_P8_VECTOR)
3152 {
3153 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3154 rs6000_vector_align[SFmode] = 32;
3155 }
3156
3157 /* Allow TImode in VSX register and set the VSX memory macros. */
3158 if (TARGET_VSX)
3159 {
3160 rs6000_vector_mem[TImode] = VECTOR_VSX;
3161 rs6000_vector_align[TImode] = align64;
3162 }
3163
3164 /* Register class constraints for the constraints that depend on compile
3165 switches. When the VSX code was added, different constraints were added
3166 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3167 of the VSX registers are used. The register classes for scalar floating
3168 point types is set, based on whether we allow that type into the upper
3169 (Altivec) registers. GCC has register classes to target the Altivec
3170 registers for load/store operations, to select using a VSX memory
3171 operation instead of the traditional floating point operation. The
3172 constraints are:
3173
3174 d - Register class to use with traditional DFmode instructions.
3175 f - Register class to use with traditional SFmode instructions.
3176 v - Altivec register.
3177 wa - Any VSX register.
3178 wc - Reserved to represent individual CR bits (used in LLVM).
3179 wd - Preferred register class for V2DFmode.
3180 wf - Preferred register class for V4SFmode.
3181 wg - Float register for power6x move insns.
3182 wh - FP register for direct move instructions.
3183 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3184 wj - FP or VSX register to hold 64-bit integers for direct moves.
3185 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3186 wl - Float register if we can do 32-bit signed int loads.
3187 wm - VSX register for ISA 2.07 direct move operations.
3188 wn - always NO_REGS.
3189 wr - GPR if 64-bit mode is permitted.
3190 ws - Register class to do ISA 2.06 DF operations.
3191 wt - VSX register for TImode in VSX registers.
3192 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3193 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3194 ww - Register class to do SF conversions in with VSX operations.
3195 wx - Float register if we can do 32-bit int stores.
3196 wy - Register class to do ISA 2.07 SF operations.
3197 wz - Float register if we can do 32-bit unsigned int loads.
3198 wH - Altivec register if SImode is allowed in VSX registers.
3199 wI - Float register if SImode is allowed in VSX registers.
3200 wJ - Float register if QImode/HImode are allowed in VSX registers.
3201 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3202
3203 if (TARGET_HARD_FLOAT)
3204 {
3205 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3206 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3207 }
3208
3209 if (TARGET_VSX)
3210 {
3211 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3212 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3213 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3214 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3215 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3216 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3217 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3218 }
3219
3220 /* Add conditional constraints based on various options, to allow us to
3221 collapse multiple insn patterns. */
3222 if (TARGET_ALTIVEC)
3223 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3224
3225 if (TARGET_MFPGPR) /* DFmode */
3226 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3227
3228 if (TARGET_LFIWAX)
3229 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3230
3231 if (TARGET_DIRECT_MOVE)
3232 {
3233 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3234 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3235 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3236 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3237 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3238 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3239 }
3240
3241 if (TARGET_POWERPC64)
3242 {
3243 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3244 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3245 }
3246
3247 if (TARGET_P8_VECTOR) /* SFmode */
3248 {
3249 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3250 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3251 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3252 }
3253 else if (TARGET_VSX)
3254 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3255
3256 if (TARGET_STFIWX)
3257 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3258
3259 if (TARGET_LFIWZX)
3260 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3261
3262 if (TARGET_FLOAT128_TYPE)
3263 {
3264 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3265 if (FLOAT128_IEEE_P (TFmode))
3266 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3267 }
3268
3269 if (TARGET_P9_VECTOR)
3270 {
3271 /* Support for new D-form instructions. */
3272 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3273
3274 /* Support for ISA 3.0 (power9) vectors. */
3275 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3276 }
3277
3278 /* Support for new direct moves (ISA 3.0 + 64bit). */
3279 if (TARGET_DIRECT_MOVE_128)
3280 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3281
3282 /* Support small integers in VSX registers. */
3283 if (TARGET_P8_VECTOR)
3284 {
3285 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3286 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3287 if (TARGET_P9_VECTOR)
3288 {
3289 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3290 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3291 }
3292 }
3293
3294 /* Set up the reload helper and direct move functions. */
3295 if (TARGET_VSX || TARGET_ALTIVEC)
3296 {
3297 if (TARGET_64BIT)
3298 {
3299 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3300 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3301 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3302 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3303 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3304 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3305 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3306 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3307 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3308 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3309 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3310 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3311 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3312 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3313 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3314 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3315 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3316 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3317 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3318 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3319
3320 if (FLOAT128_VECTOR_P (KFmode))
3321 {
3322 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3323 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3324 }
3325
3326 if (FLOAT128_VECTOR_P (TFmode))
3327 {
3328 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3329 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3330 }
3331
3332 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3333 available. */
3334 if (TARGET_NO_SDMODE_STACK)
3335 {
3336 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3337 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3338 }
3339
3340 if (TARGET_VSX)
3341 {
3342 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3343 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3344 }
3345
3346 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3347 {
3348 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3349 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3350 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3351 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3352 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3353 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3354 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3355 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3356 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3357
3358 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3359 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3360 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3361 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3362 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3363 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3364 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3365 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3366 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3367
3368 if (FLOAT128_VECTOR_P (KFmode))
3369 {
3370 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3371 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3372 }
3373
3374 if (FLOAT128_VECTOR_P (TFmode))
3375 {
3376 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3377 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3378 }
3379 }
3380 }
3381 else
3382 {
3383 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3384 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3385 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3386 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3387 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3388 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3389 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3390 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3391 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3392 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3393 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3394 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3395 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3396 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3397 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3398 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3399 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3400 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3401 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3402 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3403
3404 if (FLOAT128_VECTOR_P (KFmode))
3405 {
3406 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3407 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3408 }
3409
3410 if (FLOAT128_IEEE_P (TFmode))
3411 {
3412 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3413 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3414 }
3415
3416 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3417 available. */
3418 if (TARGET_NO_SDMODE_STACK)
3419 {
3420 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3421 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3422 }
3423
3424 if (TARGET_VSX)
3425 {
3426 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3427 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3428 }
3429
3430 if (TARGET_DIRECT_MOVE)
3431 {
3432 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3433 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3434 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3435 }
3436 }
3437
3438 reg_addr[DFmode].scalar_in_vmx_p = true;
3439 reg_addr[DImode].scalar_in_vmx_p = true;
3440
3441 if (TARGET_P8_VECTOR)
3442 {
3443 reg_addr[SFmode].scalar_in_vmx_p = true;
3444 reg_addr[SImode].scalar_in_vmx_p = true;
3445
3446 if (TARGET_P9_VECTOR)
3447 {
3448 reg_addr[HImode].scalar_in_vmx_p = true;
3449 reg_addr[QImode].scalar_in_vmx_p = true;
3450 }
3451 }
3452 }
3453
3454 /* Precalculate HARD_REGNO_NREGS. */
3455 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3456 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3457 rs6000_hard_regno_nregs[m][r]
3458 = rs6000_hard_regno_nregs_internal (r, (machine_mode) m);
3459
3460 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3461 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3462 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3463 rs6000_hard_regno_mode_ok_p[m][r]
3464 = rs6000_hard_regno_mode_ok_uncached (r, (machine_mode) m);
3465
3466 /* Precalculate CLASS_MAX_NREGS sizes. */
3467 for (c = 0; c < LIM_REG_CLASSES; ++c)
3468 {
3469 int reg_size;
3470
3471 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3472 reg_size = UNITS_PER_VSX_WORD;
3473
3474 else if (c == ALTIVEC_REGS)
3475 reg_size = UNITS_PER_ALTIVEC_WORD;
3476
3477 else if (c == FLOAT_REGS)
3478 reg_size = UNITS_PER_FP_WORD;
3479
3480 else
3481 reg_size = UNITS_PER_WORD;
3482
3483 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3484 {
3485 machine_mode m2 = (machine_mode)m;
3486 int reg_size2 = reg_size;
3487
3488 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3489 in VSX. */
3490 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3491 reg_size2 = UNITS_PER_FP_WORD;
3492
3493 rs6000_class_max_nregs[m][c]
3494 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3495 }
3496 }
3497
3498 /* Calculate which modes to automatically generate code to use a the
3499 reciprocal divide and square root instructions. In the future, possibly
3500 automatically generate the instructions even if the user did not specify
3501 -mrecip. The older machines double precision reciprocal sqrt estimate is
3502 not accurate enough. */
3503 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3504 if (TARGET_FRES)
3505 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3506 if (TARGET_FRE)
3507 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3508 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3509 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3510 if (VECTOR_UNIT_VSX_P (V2DFmode))
3511 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3512
3513 if (TARGET_FRSQRTES)
3514 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3515 if (TARGET_FRSQRTE)
3516 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3517 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3518 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3519 if (VECTOR_UNIT_VSX_P (V2DFmode))
3520 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3521
3522 if (rs6000_recip_control)
3523 {
3524 if (!flag_finite_math_only)
3525 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3526 "-ffast-math");
3527 if (flag_trapping_math)
3528 warning (0, "%qs requires %qs or %qs", "-mrecip",
3529 "-fno-trapping-math", "-ffast-math");
3530 if (!flag_reciprocal_math)
3531 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3532 "-ffast-math");
3533 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3534 {
3535 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3536 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3537 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3538
3539 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3540 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3541 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3542
3543 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3544 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3545 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3546
3547 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3548 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3549 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3550
3551 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3552 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3553 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3554
3555 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3556 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3557 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3558
3559 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3560 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3561 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3562
3563 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3564 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3565 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3566 }
3567 }
3568
3569 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3570 legitimate address support to figure out the appropriate addressing to
3571 use. */
3572 rs6000_setup_reg_addr_masks ();
3573
3574 if (global_init_p || TARGET_DEBUG_TARGET)
3575 {
3576 if (TARGET_DEBUG_REG)
3577 rs6000_debug_reg_global ();
3578
3579 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3580 fprintf (stderr,
3581 "SImode variable mult cost = %d\n"
3582 "SImode constant mult cost = %d\n"
3583 "SImode short constant mult cost = %d\n"
3584 "DImode multipliciation cost = %d\n"
3585 "SImode division cost = %d\n"
3586 "DImode division cost = %d\n"
3587 "Simple fp operation cost = %d\n"
3588 "DFmode multiplication cost = %d\n"
3589 "SFmode division cost = %d\n"
3590 "DFmode division cost = %d\n"
3591 "cache line size = %d\n"
3592 "l1 cache size = %d\n"
3593 "l2 cache size = %d\n"
3594 "simultaneous prefetches = %d\n"
3595 "\n",
3596 rs6000_cost->mulsi,
3597 rs6000_cost->mulsi_const,
3598 rs6000_cost->mulsi_const9,
3599 rs6000_cost->muldi,
3600 rs6000_cost->divsi,
3601 rs6000_cost->divdi,
3602 rs6000_cost->fp,
3603 rs6000_cost->dmul,
3604 rs6000_cost->sdiv,
3605 rs6000_cost->ddiv,
3606 rs6000_cost->cache_line_size,
3607 rs6000_cost->l1_cache_size,
3608 rs6000_cost->l2_cache_size,
3609 rs6000_cost->simultaneous_prefetches);
3610 }
3611 }
3612
3613 #if TARGET_MACHO
3614 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3615
3616 static void
3617 darwin_rs6000_override_options (void)
3618 {
3619 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3620 off. */
3621 rs6000_altivec_abi = 1;
3622 TARGET_ALTIVEC_VRSAVE = 1;
3623 rs6000_current_abi = ABI_DARWIN;
3624
3625 if (DEFAULT_ABI == ABI_DARWIN
3626 && TARGET_64BIT)
3627 darwin_one_byte_bool = 1;
3628
3629 if (TARGET_64BIT && ! TARGET_POWERPC64)
3630 {
3631 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3632 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3633 }
3634 if (flag_mkernel)
3635 {
3636 rs6000_default_long_calls = 1;
3637 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3638 }
3639
3640 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3641 Altivec. */
3642 if (!flag_mkernel && !flag_apple_kext
3643 && TARGET_64BIT
3644 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3645 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3646
3647 /* Unless the user (not the configurer) has explicitly overridden
3648 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3649 G4 unless targeting the kernel. */
3650 if (!flag_mkernel
3651 && !flag_apple_kext
3652 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3653 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3654 && ! global_options_set.x_rs6000_cpu_index)
3655 {
3656 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3657 }
3658 }
3659 #endif
3660
3661 /* If not otherwise specified by a target, make 'long double' equivalent to
3662 'double'. */
3663
3664 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3665 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3666 #endif
3667
3668 /* Return the builtin mask of the various options used that could affect which
3669 builtins were used. In the past we used target_flags, but we've run out of
3670 bits, and some options are no longer in target_flags. */
3671
3672 HOST_WIDE_INT
3673 rs6000_builtin_mask_calculate (void)
3674 {
3675 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3676 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3677 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3678 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3679 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3680 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3681 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3682 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3683 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3684 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3685 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3686 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3687 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3688 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3689 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3690 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3691 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3692 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3693 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3694 | ((TARGET_LONG_DOUBLE_128
3695 && TARGET_HARD_FLOAT
3696 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3697 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3698 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3699 }
3700
3701 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3702 to clobber the XER[CA] bit because clobbering that bit without telling
3703 the compiler worked just fine with versions of GCC before GCC 5, and
3704 breaking a lot of older code in ways that are hard to track down is
3705 not such a great idea. */
3706
3707 static rtx_insn *
3708 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3709 vec<const char *> &/*constraints*/,
3710 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3711 {
3712 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3713 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3714 return NULL;
3715 }
3716
3717 /* Override command line options.
3718
3719 Combine build-specific configuration information with options
3720 specified on the command line to set various state variables which
3721 influence code generation, optimization, and expansion of built-in
3722 functions. Assure that command-line configuration preferences are
3723 compatible with each other and with the build configuration; issue
3724 warnings while adjusting configuration or error messages while
3725 rejecting configuration.
3726
3727 Upon entry to this function:
3728
3729 This function is called once at the beginning of
3730 compilation, and then again at the start and end of compiling
3731 each section of code that has a different configuration, as
3732 indicated, for example, by adding the
3733
3734 __attribute__((__target__("cpu=power9")))
3735
3736 qualifier to a function definition or, for example, by bracketing
3737 code between
3738
3739 #pragma GCC target("altivec")
3740
3741 and
3742
3743 #pragma GCC reset_options
3744
3745 directives. Parameter global_init_p is true for the initial
3746 invocation, which initializes global variables, and false for all
3747 subsequent invocations.
3748
3749
3750 Various global state information is assumed to be valid. This
3751 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3752 default CPU specified at build configure time, TARGET_DEFAULT,
3753 representing the default set of option flags for the default
3754 target, and global_options_set.x_rs6000_isa_flags, representing
3755 which options were requested on the command line.
3756
3757 Upon return from this function:
3758
3759 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3760 was set by name on the command line. Additionally, if certain
3761 attributes are automatically enabled or disabled by this function
3762 in order to assure compatibility between options and
3763 configuration, the flags associated with those attributes are
3764 also set. By setting these "explicit bits", we avoid the risk
3765 that other code might accidentally overwrite these particular
3766 attributes with "default values".
3767
3768 The various bits of rs6000_isa_flags are set to indicate the
3769 target options that have been selected for the most current
3770 compilation efforts. This has the effect of also turning on the
3771 associated TARGET_XXX values since these are macros which are
3772 generally defined to test the corresponding bit of the
3773 rs6000_isa_flags variable.
3774
3775 The variable rs6000_builtin_mask is set to represent the target
3776 options for the most current compilation efforts, consistent with
3777 the current contents of rs6000_isa_flags. This variable controls
3778 expansion of built-in functions.
3779
3780 Various other global variables and fields of global structures
3781 (over 50 in all) are initialized to reflect the desired options
3782 for the most current compilation efforts. */
3783
3784 static bool
3785 rs6000_option_override_internal (bool global_init_p)
3786 {
3787 bool ret = true;
3788
3789 HOST_WIDE_INT set_masks;
3790 HOST_WIDE_INT ignore_masks;
3791 int cpu_index = -1;
3792 int tune_index;
3793 struct cl_target_option *main_target_opt
3794 = ((global_init_p || target_option_default_node == NULL)
3795 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3796
3797 /* Print defaults. */
3798 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3799 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3800
3801 /* Remember the explicit arguments. */
3802 if (global_init_p)
3803 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3804
3805 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3806 library functions, so warn about it. The flag may be useful for
3807 performance studies from time to time though, so don't disable it
3808 entirely. */
3809 if (global_options_set.x_rs6000_alignment_flags
3810 && rs6000_alignment_flags == MASK_ALIGN_POWER
3811 && DEFAULT_ABI == ABI_DARWIN
3812 && TARGET_64BIT)
3813 warning (0, "%qs is not supported for 64-bit Darwin;"
3814 " it is incompatible with the installed C and C++ libraries",
3815 "-malign-power");
3816
3817 /* Numerous experiment shows that IRA based loop pressure
3818 calculation works better for RTL loop invariant motion on targets
3819 with enough (>= 32) registers. It is an expensive optimization.
3820 So it is on only for peak performance. */
3821 if (optimize >= 3 && global_init_p
3822 && !global_options_set.x_flag_ira_loop_pressure)
3823 flag_ira_loop_pressure = 1;
3824
3825 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3826 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3827 options were already specified. */
3828 if (flag_sanitize & SANITIZE_USER_ADDRESS
3829 && !global_options_set.x_flag_asynchronous_unwind_tables)
3830 flag_asynchronous_unwind_tables = 1;
3831
3832 /* Set the pointer size. */
3833 if (TARGET_64BIT)
3834 {
3835 rs6000_pmode = DImode;
3836 rs6000_pointer_size = 64;
3837 }
3838 else
3839 {
3840 rs6000_pmode = SImode;
3841 rs6000_pointer_size = 32;
3842 }
3843
3844 /* Some OSs don't support saving the high part of 64-bit registers on context
3845 switch. Other OSs don't support saving Altivec registers. On those OSs,
3846 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3847 if the user wants either, the user must explicitly specify them and we
3848 won't interfere with the user's specification. */
3849
3850 set_masks = POWERPC_MASKS;
3851 #ifdef OS_MISSING_POWERPC64
3852 if (OS_MISSING_POWERPC64)
3853 set_masks &= ~OPTION_MASK_POWERPC64;
3854 #endif
3855 #ifdef OS_MISSING_ALTIVEC
3856 if (OS_MISSING_ALTIVEC)
3857 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3858 | OTHER_VSX_VECTOR_MASKS);
3859 #endif
3860
3861 /* Don't override by the processor default if given explicitly. */
3862 set_masks &= ~rs6000_isa_flags_explicit;
3863
3864 if (global_init_p && rs6000_dejagnu_cpu_index >= 0)
3865 rs6000_cpu_index = rs6000_dejagnu_cpu_index;
3866
3867 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3868 the cpu in a target attribute or pragma, but did not specify a tuning
3869 option, use the cpu for the tuning option rather than the option specified
3870 with -mtune on the command line. Process a '--with-cpu' configuration
3871 request as an implicit --cpu. */
3872 if (rs6000_cpu_index >= 0)
3873 cpu_index = rs6000_cpu_index;
3874 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3875 cpu_index = main_target_opt->x_rs6000_cpu_index;
3876 else if (OPTION_TARGET_CPU_DEFAULT)
3877 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3878
3879 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3880 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3881 with those from the cpu, except for options that were explicitly set. If
3882 we don't have a cpu, do not override the target bits set in
3883 TARGET_DEFAULT. */
3884 if (cpu_index >= 0)
3885 {
3886 rs6000_cpu_index = cpu_index;
3887 rs6000_isa_flags &= ~set_masks;
3888 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3889 & set_masks);
3890 }
3891 else
3892 {
3893 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3894 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3895 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3896 to using rs6000_isa_flags, we need to do the initialization here.
3897
3898 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3899 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3900 HOST_WIDE_INT flags;
3901 if (TARGET_DEFAULT)
3902 flags = TARGET_DEFAULT;
3903 else
3904 {
3905 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3906 const char *default_cpu = (!TARGET_POWERPC64
3907 ? "powerpc"
3908 : (BYTES_BIG_ENDIAN
3909 ? "powerpc64"
3910 : "powerpc64le"));
3911 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3912 flags = processor_target_table[default_cpu_index].target_enable;
3913 }
3914 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3915 }
3916
3917 if (rs6000_tune_index >= 0)
3918 tune_index = rs6000_tune_index;
3919 else if (cpu_index >= 0)
3920 rs6000_tune_index = tune_index = cpu_index;
3921 else
3922 {
3923 size_t i;
3924 enum processor_type tune_proc
3925 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3926
3927 tune_index = -1;
3928 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3929 if (processor_target_table[i].processor == tune_proc)
3930 {
3931 tune_index = i;
3932 break;
3933 }
3934 }
3935
3936 if (cpu_index >= 0)
3937 rs6000_cpu = processor_target_table[cpu_index].processor;
3938 else
3939 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3940
3941 gcc_assert (tune_index >= 0);
3942 rs6000_tune = processor_target_table[tune_index].processor;
3943
3944 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3945 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3946 || rs6000_cpu == PROCESSOR_PPCE5500)
3947 {
3948 if (TARGET_ALTIVEC)
3949 error ("AltiVec not supported in this target");
3950 }
3951
3952 /* If we are optimizing big endian systems for space, use the load/store
3953 multiple instructions. */
3954 if (BYTES_BIG_ENDIAN && optimize_size)
3955 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3956
3957 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3958 because the hardware doesn't support the instructions used in little
3959 endian mode, and causes an alignment trap. The 750 does not cause an
3960 alignment trap (except when the target is unaligned). */
3961
3962 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3963 {
3964 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3965 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3966 warning (0, "%qs is not supported on little endian systems",
3967 "-mmultiple");
3968 }
3969
3970 /* If little-endian, default to -mstrict-align on older processors.
3971 Testing for htm matches power8 and later. */
3972 if (!BYTES_BIG_ENDIAN
3973 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3974 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3975
3976 if (!rs6000_fold_gimple)
3977 fprintf (stderr,
3978 "gimple folding of rs6000 builtins has been disabled.\n");
3979
3980 /* Add some warnings for VSX. */
3981 if (TARGET_VSX)
3982 {
3983 const char *msg = NULL;
3984 if (!TARGET_HARD_FLOAT)
3985 {
3986 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3987 msg = N_("%<-mvsx%> requires hardware floating point");
3988 else
3989 {
3990 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3991 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3992 }
3993 }
3994 else if (TARGET_AVOID_XFORM > 0)
3995 msg = N_("%<-mvsx%> needs indexed addressing");
3996 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3997 & OPTION_MASK_ALTIVEC))
3998 {
3999 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4000 msg = N_("%<-mvsx%> and %<-mno-altivec%> are incompatible");
4001 else
4002 msg = N_("%<-mno-altivec%> disables vsx");
4003 }
4004
4005 if (msg)
4006 {
4007 warning (0, msg);
4008 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4009 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4010 }
4011 }
4012
4013 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4014 the -mcpu setting to enable options that conflict. */
4015 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4016 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4017 | OPTION_MASK_ALTIVEC
4018 | OPTION_MASK_VSX)) != 0)
4019 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4020 | OPTION_MASK_DIRECT_MOVE)
4021 & ~rs6000_isa_flags_explicit);
4022
4023 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4024 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4025
4026 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4027 off all of the options that depend on those flags. */
4028 ignore_masks = rs6000_disable_incompatible_switches ();
4029
4030 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4031 unless the user explicitly used the -mno-<option> to disable the code. */
4032 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
4033 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4034 else if (TARGET_P9_MINMAX)
4035 {
4036 if (cpu_index >= 0)
4037 {
4038 if (cpu_index == PROCESSOR_POWER9)
4039 {
4040 /* legacy behavior: allow -mcpu=power9 with certain
4041 capabilities explicitly disabled. */
4042 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4043 }
4044 else
4045 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4046 "for <xxx> less than power9", "-mcpu");
4047 }
4048 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4049 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4050 & rs6000_isa_flags_explicit))
4051 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4052 were explicitly cleared. */
4053 error ("%qs incompatible with explicitly disabled options",
4054 "-mpower9-minmax");
4055 else
4056 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4057 }
4058 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4059 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4060 else if (TARGET_VSX)
4061 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4062 else if (TARGET_POPCNTD)
4063 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4064 else if (TARGET_DFP)
4065 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4066 else if (TARGET_CMPB)
4067 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4068 else if (TARGET_FPRND)
4069 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4070 else if (TARGET_POPCNTB)
4071 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4072 else if (TARGET_ALTIVEC)
4073 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4074
4075 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4076 {
4077 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4078 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4079 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4080 }
4081
4082 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4083 {
4084 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4085 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4086 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4087 }
4088
4089 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4090 {
4091 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4092 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4093 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4094 }
4095
4096 if (TARGET_P8_VECTOR && !TARGET_VSX)
4097 {
4098 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4099 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4100 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4101 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4102 {
4103 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4104 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4105 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4106 }
4107 else
4108 {
4109 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4110 not explicit. */
4111 rs6000_isa_flags |= OPTION_MASK_VSX;
4112 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4113 }
4114 }
4115
4116 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4117 {
4118 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4119 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4120 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4121 }
4122
4123 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4124 silently turn off quad memory mode. */
4125 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4126 {
4127 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4128 warning (0, N_("%<-mquad-memory%> requires 64-bit mode"));
4129
4130 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4131 warning (0, N_("%<-mquad-memory-atomic%> requires 64-bit mode"));
4132
4133 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4134 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4135 }
4136
4137 /* Non-atomic quad memory load/store are disabled for little endian, since
4138 the words are reversed, but atomic operations can still be done by
4139 swapping the words. */
4140 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4141 {
4142 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4143 warning (0, N_("%<-mquad-memory%> is not available in little endian "
4144 "mode"));
4145
4146 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4147 }
4148
4149 /* Assume if the user asked for normal quad memory instructions, they want
4150 the atomic versions as well, unless they explicity told us not to use quad
4151 word atomic instructions. */
4152 if (TARGET_QUAD_MEMORY
4153 && !TARGET_QUAD_MEMORY_ATOMIC
4154 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4155 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4156
4157 /* If we can shrink-wrap the TOC register save separately, then use
4158 -msave-toc-indirect unless explicitly disabled. */
4159 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4160 && flag_shrink_wrap_separate
4161 && optimize_function_for_speed_p (cfun))
4162 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4163
4164 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4165 generating power8 instructions. Power9 does not optimize power8 fusion
4166 cases. */
4167 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4168 {
4169 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
4170 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4171 else
4172 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4173 }
4174
4175 /* Setting additional fusion flags turns on base fusion. */
4176 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4177 {
4178 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4179 {
4180 if (TARGET_P8_FUSION_SIGN)
4181 error ("%qs requires %qs", "-mpower8-fusion-sign",
4182 "-mpower8-fusion");
4183
4184 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4185 }
4186 else
4187 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4188 }
4189
4190 /* Power8 does not fuse sign extended loads with the addis. If we are
4191 optimizing at high levels for speed, convert a sign extended load into a
4192 zero extending load, and an explicit sign extension. */
4193 if (TARGET_P8_FUSION
4194 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4195 && optimize_function_for_speed_p (cfun)
4196 && optimize >= 3)
4197 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4198
4199 /* ISA 3.0 vector instructions include ISA 2.07. */
4200 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4201 {
4202 /* We prefer to not mention undocumented options in
4203 error messages. However, if users have managed to select
4204 power9-vector without selecting power8-vector, they
4205 already know about undocumented flags. */
4206 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4207 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4208 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4209 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4210 {
4211 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4212 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4213 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4214 }
4215 else
4216 {
4217 /* OPTION_MASK_P9_VECTOR is explicit and
4218 OPTION_MASK_P8_VECTOR is not explicit. */
4219 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4220 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4221 }
4222 }
4223
4224 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4225 support. If we only have ISA 2.06 support, and the user did not specify
4226 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4227 but we don't enable the full vectorization support */
4228 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4229 TARGET_ALLOW_MOVMISALIGN = 1;
4230
4231 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4232 {
4233 if (TARGET_ALLOW_MOVMISALIGN > 0
4234 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4235 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4236
4237 TARGET_ALLOW_MOVMISALIGN = 0;
4238 }
4239
4240 /* Determine when unaligned vector accesses are permitted, and when
4241 they are preferred over masked Altivec loads. Note that if
4242 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4243 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4244 not true. */
4245 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4246 {
4247 if (!TARGET_VSX)
4248 {
4249 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4250 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4251
4252 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4253 }
4254
4255 else if (!TARGET_ALLOW_MOVMISALIGN)
4256 {
4257 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4258 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4259 "-mallow-movmisalign");
4260
4261 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4262 }
4263 }
4264
4265 /* Use long double size to select the appropriate long double. We use
4266 TYPE_PRECISION to differentiate the 3 different long double types. We map
4267 128 into the precision used for TFmode. */
4268 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4269 ? 64
4270 : FLOAT_PRECISION_TFmode);
4271
4272 /* Set long double size before the IEEE 128-bit tests. */
4273 if (!global_options_set.x_rs6000_long_double_type_size)
4274 {
4275 if (main_target_opt != NULL
4276 && (main_target_opt->x_rs6000_long_double_type_size
4277 != default_long_double_size))
4278 error ("target attribute or pragma changes long double size");
4279 else
4280 rs6000_long_double_type_size = default_long_double_size;
4281 }
4282 else if (rs6000_long_double_type_size == 128)
4283 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4284 else if (global_options_set.x_rs6000_ieeequad)
4285 {
4286 if (global_options.x_rs6000_ieeequad)
4287 error ("%qs requires %qs", "-mabi=ieeelongdouble", "-mlong-double-128");
4288 else
4289 error ("%qs requires %qs", "-mabi=ibmlongdouble", "-mlong-double-128");
4290 }
4291
4292 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4293 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4294 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4295 those systems will not pick up this default. Warn if the user changes the
4296 default unless -Wno-psabi. */
4297 if (!global_options_set.x_rs6000_ieeequad)
4298 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4299
4300 else
4301 {
4302 if (global_options.x_rs6000_ieeequad
4303 && (!TARGET_POPCNTD || !TARGET_VSX))
4304 error ("%qs requires full ISA 2.06 support", "-mabi=ieeelongdouble");
4305
4306 if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4307 {
4308 static bool warned_change_long_double;
4309 if (!warned_change_long_double)
4310 {
4311 warned_change_long_double = true;
4312 if (TARGET_IEEEQUAD)
4313 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4314 else
4315 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4316 }
4317 }
4318 }
4319
4320 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4321 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4322 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4323 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4324 the keyword as well as the type. */
4325 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4326
4327 /* IEEE 128-bit floating point requires VSX support. */
4328 if (TARGET_FLOAT128_KEYWORD)
4329 {
4330 if (!TARGET_VSX)
4331 {
4332 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4333 error ("%qs requires VSX support", "%<-mfloat128%>");
4334
4335 TARGET_FLOAT128_TYPE = 0;
4336 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4337 | OPTION_MASK_FLOAT128_HW);
4338 }
4339 else if (!TARGET_FLOAT128_TYPE)
4340 {
4341 TARGET_FLOAT128_TYPE = 1;
4342 warning (0, "The %<-mfloat128%> option may not be fully supported");
4343 }
4344 }
4345
4346 /* Enable the __float128 keyword under Linux by default. */
4347 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4348 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4349 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4350
4351 /* If we have are supporting the float128 type and full ISA 3.0 support,
4352 enable -mfloat128-hardware by default. However, don't enable the
4353 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4354 because sometimes the compiler wants to put things in an integer
4355 container, and if we don't have __int128 support, it is impossible. */
4356 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4357 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4358 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4359 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4360
4361 if (TARGET_FLOAT128_HW
4362 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4363 {
4364 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4365 error ("%qs requires full ISA 3.0 support", "%<-mfloat128-hardware%>");
4366
4367 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4368 }
4369
4370 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4371 {
4372 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4373 error ("%qs requires %qs", "%<-mfloat128-hardware%>", "-m64");
4374
4375 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4376 }
4377
4378 /* Print the options after updating the defaults. */
4379 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4380 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4381
4382 /* E500mc does "better" if we inline more aggressively. Respect the
4383 user's opinion, though. */
4384 if (rs6000_block_move_inline_limit == 0
4385 && (rs6000_tune == PROCESSOR_PPCE500MC
4386 || rs6000_tune == PROCESSOR_PPCE500MC64
4387 || rs6000_tune == PROCESSOR_PPCE5500
4388 || rs6000_tune == PROCESSOR_PPCE6500))
4389 rs6000_block_move_inline_limit = 128;
4390
4391 /* store_one_arg depends on expand_block_move to handle at least the
4392 size of reg_parm_stack_space. */
4393 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4394 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4395
4396 if (global_init_p)
4397 {
4398 /* If the appropriate debug option is enabled, replace the target hooks
4399 with debug versions that call the real version and then prints
4400 debugging information. */
4401 if (TARGET_DEBUG_COST)
4402 {
4403 targetm.rtx_costs = rs6000_debug_rtx_costs;
4404 targetm.address_cost = rs6000_debug_address_cost;
4405 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4406 }
4407
4408 if (TARGET_DEBUG_ADDR)
4409 {
4410 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4411 targetm.legitimize_address = rs6000_debug_legitimize_address;
4412 rs6000_secondary_reload_class_ptr
4413 = rs6000_debug_secondary_reload_class;
4414 targetm.secondary_memory_needed
4415 = rs6000_debug_secondary_memory_needed;
4416 targetm.can_change_mode_class
4417 = rs6000_debug_can_change_mode_class;
4418 rs6000_preferred_reload_class_ptr
4419 = rs6000_debug_preferred_reload_class;
4420 rs6000_mode_dependent_address_ptr
4421 = rs6000_debug_mode_dependent_address;
4422 }
4423
4424 if (rs6000_veclibabi_name)
4425 {
4426 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4427 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4428 else
4429 {
4430 error ("unknown vectorization library ABI type (%qs) for "
4431 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4432 ret = false;
4433 }
4434 }
4435 }
4436
4437 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4438 target attribute or pragma which automatically enables both options,
4439 unless the altivec ABI was set. This is set by default for 64-bit, but
4440 not for 32-bit. */
4441 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4442 {
4443 TARGET_FLOAT128_TYPE = 0;
4444 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4445 | OPTION_MASK_FLOAT128_KEYWORD)
4446 & ~rs6000_isa_flags_explicit);
4447 }
4448
4449 /* Enable Altivec ABI for AIX -maltivec. */
4450 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4451 {
4452 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4453 error ("target attribute or pragma changes AltiVec ABI");
4454 else
4455 rs6000_altivec_abi = 1;
4456 }
4457
4458 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4459 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4460 be explicitly overridden in either case. */
4461 if (TARGET_ELF)
4462 {
4463 if (!global_options_set.x_rs6000_altivec_abi
4464 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4465 {
4466 if (main_target_opt != NULL &&
4467 !main_target_opt->x_rs6000_altivec_abi)
4468 error ("target attribute or pragma changes AltiVec ABI");
4469 else
4470 rs6000_altivec_abi = 1;
4471 }
4472 }
4473
4474 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4475 So far, the only darwin64 targets are also MACH-O. */
4476 if (TARGET_MACHO
4477 && DEFAULT_ABI == ABI_DARWIN
4478 && TARGET_64BIT)
4479 {
4480 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4481 error ("target attribute or pragma changes darwin64 ABI");
4482 else
4483 {
4484 rs6000_darwin64_abi = 1;
4485 /* Default to natural alignment, for better performance. */
4486 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4487 }
4488 }
4489
4490 /* Place FP constants in the constant pool instead of TOC
4491 if section anchors enabled. */
4492 if (flag_section_anchors
4493 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4494 TARGET_NO_FP_IN_TOC = 1;
4495
4496 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4497 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4498
4499 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4500 SUBTARGET_OVERRIDE_OPTIONS;
4501 #endif
4502 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4503 SUBSUBTARGET_OVERRIDE_OPTIONS;
4504 #endif
4505 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4506 SUB3TARGET_OVERRIDE_OPTIONS;
4507 #endif
4508
4509 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4510 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4511
4512 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4513 && rs6000_tune != PROCESSOR_POWER5
4514 && rs6000_tune != PROCESSOR_POWER6
4515 && rs6000_tune != PROCESSOR_POWER7
4516 && rs6000_tune != PROCESSOR_POWER8
4517 && rs6000_tune != PROCESSOR_POWER9
4518 && rs6000_tune != PROCESSOR_PPCA2
4519 && rs6000_tune != PROCESSOR_CELL
4520 && rs6000_tune != PROCESSOR_PPC476);
4521 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4522 || rs6000_tune == PROCESSOR_POWER5
4523 || rs6000_tune == PROCESSOR_POWER7
4524 || rs6000_tune == PROCESSOR_POWER8);
4525 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4526 || rs6000_tune == PROCESSOR_POWER5
4527 || rs6000_tune == PROCESSOR_POWER6
4528 || rs6000_tune == PROCESSOR_POWER7
4529 || rs6000_tune == PROCESSOR_POWER8
4530 || rs6000_tune == PROCESSOR_POWER9
4531 || rs6000_tune == PROCESSOR_PPCE500MC
4532 || rs6000_tune == PROCESSOR_PPCE500MC64
4533 || rs6000_tune == PROCESSOR_PPCE5500
4534 || rs6000_tune == PROCESSOR_PPCE6500);
4535
4536 /* Allow debug switches to override the above settings. These are set to -1
4537 in rs6000.opt to indicate the user hasn't directly set the switch. */
4538 if (TARGET_ALWAYS_HINT >= 0)
4539 rs6000_always_hint = TARGET_ALWAYS_HINT;
4540
4541 if (TARGET_SCHED_GROUPS >= 0)
4542 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4543
4544 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4545 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4546
4547 rs6000_sched_restricted_insns_priority
4548 = (rs6000_sched_groups ? 1 : 0);
4549
4550 /* Handle -msched-costly-dep option. */
4551 rs6000_sched_costly_dep
4552 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4553
4554 if (rs6000_sched_costly_dep_str)
4555 {
4556 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4557 rs6000_sched_costly_dep = no_dep_costly;
4558 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4559 rs6000_sched_costly_dep = all_deps_costly;
4560 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4561 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4562 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4563 rs6000_sched_costly_dep = store_to_load_dep_costly;
4564 else
4565 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4566 atoi (rs6000_sched_costly_dep_str));
4567 }
4568
4569 /* Handle -minsert-sched-nops option. */
4570 rs6000_sched_insert_nops
4571 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4572
4573 if (rs6000_sched_insert_nops_str)
4574 {
4575 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4576 rs6000_sched_insert_nops = sched_finish_none;
4577 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4578 rs6000_sched_insert_nops = sched_finish_pad_groups;
4579 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4580 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4581 else
4582 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4583 atoi (rs6000_sched_insert_nops_str));
4584 }
4585
4586 /* Handle stack protector */
4587 if (!global_options_set.x_rs6000_stack_protector_guard)
4588 #ifdef TARGET_THREAD_SSP_OFFSET
4589 rs6000_stack_protector_guard = SSP_TLS;
4590 #else
4591 rs6000_stack_protector_guard = SSP_GLOBAL;
4592 #endif
4593
4594 #ifdef TARGET_THREAD_SSP_OFFSET
4595 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4596 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4597 #endif
4598
4599 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4600 {
4601 char *endp;
4602 const char *str = rs6000_stack_protector_guard_offset_str;
4603
4604 errno = 0;
4605 long offset = strtol (str, &endp, 0);
4606 if (!*str || *endp || errno)
4607 error ("%qs is not a valid number in %qs", str,
4608 "-mstack-protector-guard-offset=");
4609
4610 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4611 || (TARGET_64BIT && (offset & 3)))
4612 error ("%qs is not a valid offset in %qs", str,
4613 "-mstack-protector-guard-offset=");
4614
4615 rs6000_stack_protector_guard_offset = offset;
4616 }
4617
4618 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4619 {
4620 const char *str = rs6000_stack_protector_guard_reg_str;
4621 int reg = decode_reg_name (str);
4622
4623 if (!IN_RANGE (reg, 1, 31))
4624 error ("%qs is not a valid base register in %qs", str,
4625 "-mstack-protector-guard-reg=");
4626
4627 rs6000_stack_protector_guard_reg = reg;
4628 }
4629
4630 if (rs6000_stack_protector_guard == SSP_TLS
4631 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4632 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4633
4634 if (global_init_p)
4635 {
4636 #ifdef TARGET_REGNAMES
4637 /* If the user desires alternate register names, copy in the
4638 alternate names now. */
4639 if (TARGET_REGNAMES)
4640 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4641 #endif
4642
4643 /* Set aix_struct_return last, after the ABI is determined.
4644 If -maix-struct-return or -msvr4-struct-return was explicitly
4645 used, don't override with the ABI default. */
4646 if (!global_options_set.x_aix_struct_return)
4647 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4648
4649 #if 0
4650 /* IBM XL compiler defaults to unsigned bitfields. */
4651 if (TARGET_XL_COMPAT)
4652 flag_signed_bitfields = 0;
4653 #endif
4654
4655 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4656 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4657
4658 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4659
4660 /* We can only guarantee the availability of DI pseudo-ops when
4661 assembling for 64-bit targets. */
4662 if (!TARGET_64BIT)
4663 {
4664 targetm.asm_out.aligned_op.di = NULL;
4665 targetm.asm_out.unaligned_op.di = NULL;
4666 }
4667
4668
4669 /* Set branch target alignment, if not optimizing for size. */
4670 if (!optimize_size)
4671 {
4672 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4673 aligned 8byte to avoid misprediction by the branch predictor. */
4674 if (rs6000_tune == PROCESSOR_TITAN
4675 || rs6000_tune == PROCESSOR_CELL)
4676 {
4677 if (flag_align_functions && !str_align_functions)
4678 str_align_functions = "8";
4679 if (flag_align_jumps && !str_align_jumps)
4680 str_align_jumps = "8";
4681 if (flag_align_loops && !str_align_loops)
4682 str_align_loops = "8";
4683 }
4684 if (rs6000_align_branch_targets)
4685 {
4686 if (flag_align_functions && !str_align_functions)
4687 str_align_functions = "16";
4688 if (flag_align_jumps && !str_align_jumps)
4689 str_align_jumps = "16";
4690 if (flag_align_loops && !str_align_loops)
4691 {
4692 can_override_loop_align = 1;
4693 str_align_loops = "16";
4694 }
4695 }
4696
4697 if (flag_align_jumps && !str_align_jumps)
4698 str_align_jumps = "16";
4699 if (flag_align_loops && !str_align_loops)
4700 str_align_loops = "16";
4701 }
4702
4703 /* Arrange to save and restore machine status around nested functions. */
4704 init_machine_status = rs6000_init_machine_status;
4705
4706 /* We should always be splitting complex arguments, but we can't break
4707 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4708 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4709 targetm.calls.split_complex_arg = NULL;
4710
4711 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4712 if (DEFAULT_ABI == ABI_AIX)
4713 targetm.calls.custom_function_descriptors = 0;
4714 }
4715
4716 /* Initialize rs6000_cost with the appropriate target costs. */
4717 if (optimize_size)
4718 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4719 else
4720 switch (rs6000_tune)
4721 {
4722 case PROCESSOR_RS64A:
4723 rs6000_cost = &rs64a_cost;
4724 break;
4725
4726 case PROCESSOR_MPCCORE:
4727 rs6000_cost = &mpccore_cost;
4728 break;
4729
4730 case PROCESSOR_PPC403:
4731 rs6000_cost = &ppc403_cost;
4732 break;
4733
4734 case PROCESSOR_PPC405:
4735 rs6000_cost = &ppc405_cost;
4736 break;
4737
4738 case PROCESSOR_PPC440:
4739 rs6000_cost = &ppc440_cost;
4740 break;
4741
4742 case PROCESSOR_PPC476:
4743 rs6000_cost = &ppc476_cost;
4744 break;
4745
4746 case PROCESSOR_PPC601:
4747 rs6000_cost = &ppc601_cost;
4748 break;
4749
4750 case PROCESSOR_PPC603:
4751 rs6000_cost = &ppc603_cost;
4752 break;
4753
4754 case PROCESSOR_PPC604:
4755 rs6000_cost = &ppc604_cost;
4756 break;
4757
4758 case PROCESSOR_PPC604e:
4759 rs6000_cost = &ppc604e_cost;
4760 break;
4761
4762 case PROCESSOR_PPC620:
4763 rs6000_cost = &ppc620_cost;
4764 break;
4765
4766 case PROCESSOR_PPC630:
4767 rs6000_cost = &ppc630_cost;
4768 break;
4769
4770 case PROCESSOR_CELL:
4771 rs6000_cost = &ppccell_cost;
4772 break;
4773
4774 case PROCESSOR_PPC750:
4775 case PROCESSOR_PPC7400:
4776 rs6000_cost = &ppc750_cost;
4777 break;
4778
4779 case PROCESSOR_PPC7450:
4780 rs6000_cost = &ppc7450_cost;
4781 break;
4782
4783 case PROCESSOR_PPC8540:
4784 case PROCESSOR_PPC8548:
4785 rs6000_cost = &ppc8540_cost;
4786 break;
4787
4788 case PROCESSOR_PPCE300C2:
4789 case PROCESSOR_PPCE300C3:
4790 rs6000_cost = &ppce300c2c3_cost;
4791 break;
4792
4793 case PROCESSOR_PPCE500MC:
4794 rs6000_cost = &ppce500mc_cost;
4795 break;
4796
4797 case PROCESSOR_PPCE500MC64:
4798 rs6000_cost = &ppce500mc64_cost;
4799 break;
4800
4801 case PROCESSOR_PPCE5500:
4802 rs6000_cost = &ppce5500_cost;
4803 break;
4804
4805 case PROCESSOR_PPCE6500:
4806 rs6000_cost = &ppce6500_cost;
4807 break;
4808
4809 case PROCESSOR_TITAN:
4810 rs6000_cost = &titan_cost;
4811 break;
4812
4813 case PROCESSOR_POWER4:
4814 case PROCESSOR_POWER5:
4815 rs6000_cost = &power4_cost;
4816 break;
4817
4818 case PROCESSOR_POWER6:
4819 rs6000_cost = &power6_cost;
4820 break;
4821
4822 case PROCESSOR_POWER7:
4823 rs6000_cost = &power7_cost;
4824 break;
4825
4826 case PROCESSOR_POWER8:
4827 rs6000_cost = &power8_cost;
4828 break;
4829
4830 case PROCESSOR_POWER9:
4831 rs6000_cost = &power9_cost;
4832 break;
4833
4834 case PROCESSOR_PPCA2:
4835 rs6000_cost = &ppca2_cost;
4836 break;
4837
4838 default:
4839 gcc_unreachable ();
4840 }
4841
4842 if (global_init_p)
4843 {
4844 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4845 rs6000_cost->simultaneous_prefetches,
4846 global_options.x_param_values,
4847 global_options_set.x_param_values);
4848 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4849 global_options.x_param_values,
4850 global_options_set.x_param_values);
4851 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4852 rs6000_cost->cache_line_size,
4853 global_options.x_param_values,
4854 global_options_set.x_param_values);
4855 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4856 global_options.x_param_values,
4857 global_options_set.x_param_values);
4858
4859 /* Increase loop peeling limits based on performance analysis. */
4860 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4861 global_options.x_param_values,
4862 global_options_set.x_param_values);
4863 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4864 global_options.x_param_values,
4865 global_options_set.x_param_values);
4866
4867 /* Use the 'model' -fsched-pressure algorithm by default. */
4868 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4869 SCHED_PRESSURE_MODEL,
4870 global_options.x_param_values,
4871 global_options_set.x_param_values);
4872
4873 /* If using typedef char *va_list, signal that
4874 __builtin_va_start (&ap, 0) can be optimized to
4875 ap = __builtin_next_arg (0). */
4876 if (DEFAULT_ABI != ABI_V4)
4877 targetm.expand_builtin_va_start = NULL;
4878 }
4879
4880 /* If not explicitly specified via option, decide whether to generate indexed
4881 load/store instructions. A value of -1 indicates that the
4882 initial value of this variable has not been overwritten. During
4883 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4884 if (TARGET_AVOID_XFORM == -1)
4885 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4886 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4887 need indexed accesses and the type used is the scalar type of the element
4888 being loaded or stored. */
4889 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4890 && !TARGET_ALTIVEC);
4891
4892 /* Set the -mrecip options. */
4893 if (rs6000_recip_name)
4894 {
4895 char *p = ASTRDUP (rs6000_recip_name);
4896 char *q;
4897 unsigned int mask, i;
4898 bool invert;
4899
4900 while ((q = strtok (p, ",")) != NULL)
4901 {
4902 p = NULL;
4903 if (*q == '!')
4904 {
4905 invert = true;
4906 q++;
4907 }
4908 else
4909 invert = false;
4910
4911 if (!strcmp (q, "default"))
4912 mask = ((TARGET_RECIP_PRECISION)
4913 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4914 else
4915 {
4916 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4917 if (!strcmp (q, recip_options[i].string))
4918 {
4919 mask = recip_options[i].mask;
4920 break;
4921 }
4922
4923 if (i == ARRAY_SIZE (recip_options))
4924 {
4925 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4926 invert = false;
4927 mask = 0;
4928 ret = false;
4929 }
4930 }
4931
4932 if (invert)
4933 rs6000_recip_control &= ~mask;
4934 else
4935 rs6000_recip_control |= mask;
4936 }
4937 }
4938
4939 /* Set the builtin mask of the various options used that could affect which
4940 builtins were used. In the past we used target_flags, but we've run out
4941 of bits, and some options are no longer in target_flags. */
4942 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4943 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4944 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4945 rs6000_builtin_mask);
4946
4947 /* Initialize all of the registers. */
4948 rs6000_init_hard_regno_mode_ok (global_init_p);
4949
4950 /* Save the initial options in case the user does function specific options */
4951 if (global_init_p)
4952 target_option_default_node = target_option_current_node
4953 = build_target_option_node (&global_options);
4954
4955 /* If not explicitly specified via option, decide whether to generate the
4956 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4957 if (TARGET_LINK_STACK == -1)
4958 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4959
4960 /* Deprecate use of -mno-speculate-indirect-jumps. */
4961 if (!rs6000_speculate_indirect_jumps)
4962 warning (0, "%qs is deprecated and not recommended in any circumstances",
4963 "-mno-speculate-indirect-jumps");
4964
4965 return ret;
4966 }
4967
4968 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4969 define the target cpu type. */
4970
4971 static void
4972 rs6000_option_override (void)
4973 {
4974 (void) rs6000_option_override_internal (true);
4975 }
4976
4977 \f
4978 /* Implement targetm.vectorize.builtin_mask_for_load. */
4979 static tree
4980 rs6000_builtin_mask_for_load (void)
4981 {
4982 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4983 if ((TARGET_ALTIVEC && !TARGET_VSX)
4984 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4985 return altivec_builtin_mask_for_load;
4986 else
4987 return 0;
4988 }
4989
4990 /* Implement LOOP_ALIGN. */
4991 align_flags
4992 rs6000_loop_align (rtx label)
4993 {
4994 basic_block bb;
4995 int ninsns;
4996
4997 /* Don't override loop alignment if -falign-loops was specified. */
4998 if (!can_override_loop_align)
4999 return align_loops;
5000
5001 bb = BLOCK_FOR_INSN (label);
5002 ninsns = num_loop_insns(bb->loop_father);
5003
5004 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5005 if (ninsns > 4 && ninsns <= 8
5006 && (rs6000_tune == PROCESSOR_POWER4
5007 || rs6000_tune == PROCESSOR_POWER5
5008 || rs6000_tune == PROCESSOR_POWER6
5009 || rs6000_tune == PROCESSOR_POWER7
5010 || rs6000_tune == PROCESSOR_POWER8))
5011 return align_flags (5);
5012 else
5013 return align_loops;
5014 }
5015
5016 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5017 after applying N number of iterations. This routine does not determine
5018 how may iterations are required to reach desired alignment. */
5019
5020 static bool
5021 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5022 {
5023 if (is_packed)
5024 return false;
5025
5026 if (TARGET_32BIT)
5027 {
5028 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5029 return true;
5030
5031 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5032 return true;
5033
5034 return false;
5035 }
5036 else
5037 {
5038 if (TARGET_MACHO)
5039 return false;
5040
5041 /* Assuming that all other types are naturally aligned. CHECKME! */
5042 return true;
5043 }
5044 }
5045
5046 /* Return true if the vector misalignment factor is supported by the
5047 target. */
5048 static bool
5049 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5050 const_tree type,
5051 int misalignment,
5052 bool is_packed)
5053 {
5054 if (TARGET_VSX)
5055 {
5056 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5057 return true;
5058
5059 /* Return if movmisalign pattern is not supported for this mode. */
5060 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5061 return false;
5062
5063 if (misalignment == -1)
5064 {
5065 /* Misalignment factor is unknown at compile time but we know
5066 it's word aligned. */
5067 if (rs6000_vector_alignment_reachable (type, is_packed))
5068 {
5069 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5070
5071 if (element_size == 64 || element_size == 32)
5072 return true;
5073 }
5074
5075 return false;
5076 }
5077
5078 /* VSX supports word-aligned vector. */
5079 if (misalignment % 4 == 0)
5080 return true;
5081 }
5082 return false;
5083 }
5084
5085 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5086 static int
5087 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5088 tree vectype, int misalign)
5089 {
5090 unsigned elements;
5091 tree elem_type;
5092
5093 switch (type_of_cost)
5094 {
5095 case scalar_stmt:
5096 case scalar_load:
5097 case scalar_store:
5098 case vector_stmt:
5099 case vector_load:
5100 case vector_store:
5101 case vec_to_scalar:
5102 case scalar_to_vec:
5103 case cond_branch_not_taken:
5104 return 1;
5105
5106 case vec_perm:
5107 if (TARGET_VSX)
5108 return 3;
5109 else
5110 return 1;
5111
5112 case vec_promote_demote:
5113 if (TARGET_VSX)
5114 return 4;
5115 else
5116 return 1;
5117
5118 case cond_branch_taken:
5119 return 3;
5120
5121 case unaligned_load:
5122 case vector_gather_load:
5123 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5124 return 1;
5125
5126 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5127 {
5128 elements = TYPE_VECTOR_SUBPARTS (vectype);
5129 if (elements == 2)
5130 /* Double word aligned. */
5131 return 2;
5132
5133 if (elements == 4)
5134 {
5135 switch (misalign)
5136 {
5137 case 8:
5138 /* Double word aligned. */
5139 return 2;
5140
5141 case -1:
5142 /* Unknown misalignment. */
5143 case 4:
5144 case 12:
5145 /* Word aligned. */
5146 return 22;
5147
5148 default:
5149 gcc_unreachable ();
5150 }
5151 }
5152 }
5153
5154 if (TARGET_ALTIVEC)
5155 /* Misaligned loads are not supported. */
5156 gcc_unreachable ();
5157
5158 return 2;
5159
5160 case unaligned_store:
5161 case vector_scatter_store:
5162 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5163 return 1;
5164
5165 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5166 {
5167 elements = TYPE_VECTOR_SUBPARTS (vectype);
5168 if (elements == 2)
5169 /* Double word aligned. */
5170 return 2;
5171
5172 if (elements == 4)
5173 {
5174 switch (misalign)
5175 {
5176 case 8:
5177 /* Double word aligned. */
5178 return 2;
5179
5180 case -1:
5181 /* Unknown misalignment. */
5182 case 4:
5183 case 12:
5184 /* Word aligned. */
5185 return 23;
5186
5187 default:
5188 gcc_unreachable ();
5189 }
5190 }
5191 }
5192
5193 if (TARGET_ALTIVEC)
5194 /* Misaligned stores are not supported. */
5195 gcc_unreachable ();
5196
5197 return 2;
5198
5199 case vec_construct:
5200 /* This is a rough approximation assuming non-constant elements
5201 constructed into a vector via element insertion. FIXME:
5202 vec_construct is not granular enough for uniformly good
5203 decisions. If the initialization is a splat, this is
5204 cheaper than we estimate. Improve this someday. */
5205 elem_type = TREE_TYPE (vectype);
5206 /* 32-bit vectors loaded into registers are stored as double
5207 precision, so we need 2 permutes, 2 converts, and 1 merge
5208 to construct a vector of short floats from them. */
5209 if (SCALAR_FLOAT_TYPE_P (elem_type)
5210 && TYPE_PRECISION (elem_type) == 32)
5211 return 5;
5212 /* On POWER9, integer vector types are built up in GPRs and then
5213 use a direct move (2 cycles). For POWER8 this is even worse,
5214 as we need two direct moves and a merge, and the direct moves
5215 are five cycles. */
5216 else if (INTEGRAL_TYPE_P (elem_type))
5217 {
5218 if (TARGET_P9_VECTOR)
5219 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5220 else
5221 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5222 }
5223 else
5224 /* V2DFmode doesn't need a direct move. */
5225 return 2;
5226
5227 default:
5228 gcc_unreachable ();
5229 }
5230 }
5231
5232 /* Implement targetm.vectorize.preferred_simd_mode. */
5233
5234 static machine_mode
5235 rs6000_preferred_simd_mode (scalar_mode mode)
5236 {
5237 if (TARGET_VSX)
5238 switch (mode)
5239 {
5240 case E_DFmode:
5241 return V2DFmode;
5242 default:;
5243 }
5244 if (TARGET_ALTIVEC || TARGET_VSX)
5245 switch (mode)
5246 {
5247 case E_SFmode:
5248 return V4SFmode;
5249 case E_TImode:
5250 return V1TImode;
5251 case E_DImode:
5252 return V2DImode;
5253 case E_SImode:
5254 return V4SImode;
5255 case E_HImode:
5256 return V8HImode;
5257 case E_QImode:
5258 return V16QImode;
5259 default:;
5260 }
5261 return word_mode;
5262 }
5263
5264 typedef struct _rs6000_cost_data
5265 {
5266 struct loop *loop_info;
5267 unsigned cost[3];
5268 } rs6000_cost_data;
5269
5270 /* Test for likely overcommitment of vector hardware resources. If a
5271 loop iteration is relatively large, and too large a percentage of
5272 instructions in the loop are vectorized, the cost model may not
5273 adequately reflect delays from unavailable vector resources.
5274 Penalize the loop body cost for this case. */
5275
5276 static void
5277 rs6000_density_test (rs6000_cost_data *data)
5278 {
5279 const int DENSITY_PCT_THRESHOLD = 85;
5280 const int DENSITY_SIZE_THRESHOLD = 70;
5281 const int DENSITY_PENALTY = 10;
5282 struct loop *loop = data->loop_info;
5283 basic_block *bbs = get_loop_body (loop);
5284 int nbbs = loop->num_nodes;
5285 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5286 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5287 int i, density_pct;
5288
5289 for (i = 0; i < nbbs; i++)
5290 {
5291 basic_block bb = bbs[i];
5292 gimple_stmt_iterator gsi;
5293
5294 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5295 {
5296 gimple *stmt = gsi_stmt (gsi);
5297 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5298
5299 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5300 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5301 not_vec_cost++;
5302 }
5303 }
5304
5305 free (bbs);
5306 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5307
5308 if (density_pct > DENSITY_PCT_THRESHOLD
5309 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5310 {
5311 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5312 if (dump_enabled_p ())
5313 dump_printf_loc (MSG_NOTE, vect_location,
5314 "density %d%%, cost %d exceeds threshold, penalizing "
5315 "loop body cost by %d%%", density_pct,
5316 vec_cost + not_vec_cost, DENSITY_PENALTY);
5317 }
5318 }
5319
5320 /* Implement targetm.vectorize.init_cost. */
5321
5322 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5323 instruction is needed by the vectorization. */
5324 static bool rs6000_vect_nonmem;
5325
5326 static void *
5327 rs6000_init_cost (struct loop *loop_info)
5328 {
5329 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5330 data->loop_info = loop_info;
5331 data->cost[vect_prologue] = 0;
5332 data->cost[vect_body] = 0;
5333 data->cost[vect_epilogue] = 0;
5334 rs6000_vect_nonmem = false;
5335 return data;
5336 }
5337
5338 /* Implement targetm.vectorize.add_stmt_cost. */
5339
5340 static unsigned
5341 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5342 struct _stmt_vec_info *stmt_info, int misalign,
5343 enum vect_cost_model_location where)
5344 {
5345 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5346 unsigned retval = 0;
5347
5348 if (flag_vect_cost_model)
5349 {
5350 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5351 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5352 misalign);
5353 /* Statements in an inner loop relative to the loop being
5354 vectorized are weighted more heavily. The value here is
5355 arbitrary and could potentially be improved with analysis. */
5356 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5357 count *= 50; /* FIXME. */
5358
5359 retval = (unsigned) (count * stmt_cost);
5360 cost_data->cost[where] += retval;
5361
5362 /* Check whether we're doing something other than just a copy loop.
5363 Not all such loops may be profitably vectorized; see
5364 rs6000_finish_cost. */
5365 if ((kind == vec_to_scalar || kind == vec_perm
5366 || kind == vec_promote_demote || kind == vec_construct
5367 || kind == scalar_to_vec)
5368 || (where == vect_body && kind == vector_stmt))
5369 rs6000_vect_nonmem = true;
5370 }
5371
5372 return retval;
5373 }
5374
5375 /* Implement targetm.vectorize.finish_cost. */
5376
5377 static void
5378 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5379 unsigned *body_cost, unsigned *epilogue_cost)
5380 {
5381 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5382
5383 if (cost_data->loop_info)
5384 rs6000_density_test (cost_data);
5385
5386 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5387 that require versioning for any reason. The vectorization is at
5388 best a wash inside the loop, and the versioning checks make
5389 profitability highly unlikely and potentially quite harmful. */
5390 if (cost_data->loop_info)
5391 {
5392 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5393 if (!rs6000_vect_nonmem
5394 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5395 && LOOP_REQUIRES_VERSIONING (vec_info))
5396 cost_data->cost[vect_body] += 10000;
5397 }
5398
5399 *prologue_cost = cost_data->cost[vect_prologue];
5400 *body_cost = cost_data->cost[vect_body];
5401 *epilogue_cost = cost_data->cost[vect_epilogue];
5402 }
5403
5404 /* Implement targetm.vectorize.destroy_cost_data. */
5405
5406 static void
5407 rs6000_destroy_cost_data (void *data)
5408 {
5409 free (data);
5410 }
5411
5412 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5413 library with vectorized intrinsics. */
5414
5415 static tree
5416 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5417 tree type_in)
5418 {
5419 char name[32];
5420 const char *suffix = NULL;
5421 tree fntype, new_fndecl, bdecl = NULL_TREE;
5422 int n_args = 1;
5423 const char *bname;
5424 machine_mode el_mode, in_mode;
5425 int n, in_n;
5426
5427 /* Libmass is suitable for unsafe math only as it does not correctly support
5428 parts of IEEE with the required precision such as denormals. Only support
5429 it if we have VSX to use the simd d2 or f4 functions.
5430 XXX: Add variable length support. */
5431 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5432 return NULL_TREE;
5433
5434 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5435 n = TYPE_VECTOR_SUBPARTS (type_out);
5436 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5437 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5438 if (el_mode != in_mode
5439 || n != in_n)
5440 return NULL_TREE;
5441
5442 switch (fn)
5443 {
5444 CASE_CFN_ATAN2:
5445 CASE_CFN_HYPOT:
5446 CASE_CFN_POW:
5447 n_args = 2;
5448 gcc_fallthrough ();
5449
5450 CASE_CFN_ACOS:
5451 CASE_CFN_ACOSH:
5452 CASE_CFN_ASIN:
5453 CASE_CFN_ASINH:
5454 CASE_CFN_ATAN:
5455 CASE_CFN_ATANH:
5456 CASE_CFN_CBRT:
5457 CASE_CFN_COS:
5458 CASE_CFN_COSH:
5459 CASE_CFN_ERF:
5460 CASE_CFN_ERFC:
5461 CASE_CFN_EXP2:
5462 CASE_CFN_EXP:
5463 CASE_CFN_EXPM1:
5464 CASE_CFN_LGAMMA:
5465 CASE_CFN_LOG10:
5466 CASE_CFN_LOG1P:
5467 CASE_CFN_LOG2:
5468 CASE_CFN_LOG:
5469 CASE_CFN_SIN:
5470 CASE_CFN_SINH:
5471 CASE_CFN_SQRT:
5472 CASE_CFN_TAN:
5473 CASE_CFN_TANH:
5474 if (el_mode == DFmode && n == 2)
5475 {
5476 bdecl = mathfn_built_in (double_type_node, fn);
5477 suffix = "d2"; /* pow -> powd2 */
5478 }
5479 else if (el_mode == SFmode && n == 4)
5480 {
5481 bdecl = mathfn_built_in (float_type_node, fn);
5482 suffix = "4"; /* powf -> powf4 */
5483 }
5484 else
5485 return NULL_TREE;
5486 if (!bdecl)
5487 return NULL_TREE;
5488 break;
5489
5490 default:
5491 return NULL_TREE;
5492 }
5493
5494 gcc_assert (suffix != NULL);
5495 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5496 if (!bname)
5497 return NULL_TREE;
5498
5499 strcpy (name, bname + sizeof ("__builtin_") - 1);
5500 strcat (name, suffix);
5501
5502 if (n_args == 1)
5503 fntype = build_function_type_list (type_out, type_in, NULL);
5504 else if (n_args == 2)
5505 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5506 else
5507 gcc_unreachable ();
5508
5509 /* Build a function declaration for the vectorized function. */
5510 new_fndecl = build_decl (BUILTINS_LOCATION,
5511 FUNCTION_DECL, get_identifier (name), fntype);
5512 TREE_PUBLIC (new_fndecl) = 1;
5513 DECL_EXTERNAL (new_fndecl) = 1;
5514 DECL_IS_NOVOPS (new_fndecl) = 1;
5515 TREE_READONLY (new_fndecl) = 1;
5516
5517 return new_fndecl;
5518 }
5519
5520 /* Returns a function decl for a vectorized version of the builtin function
5521 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5522 if it is not available. */
5523
5524 static tree
5525 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5526 tree type_in)
5527 {
5528 machine_mode in_mode, out_mode;
5529 int in_n, out_n;
5530
5531 if (TARGET_DEBUG_BUILTIN)
5532 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5533 combined_fn_name (combined_fn (fn)),
5534 GET_MODE_NAME (TYPE_MODE (type_out)),
5535 GET_MODE_NAME (TYPE_MODE (type_in)));
5536
5537 if (TREE_CODE (type_out) != VECTOR_TYPE
5538 || TREE_CODE (type_in) != VECTOR_TYPE)
5539 return NULL_TREE;
5540
5541 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5542 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5543 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5544 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5545
5546 switch (fn)
5547 {
5548 CASE_CFN_COPYSIGN:
5549 if (VECTOR_UNIT_VSX_P (V2DFmode)
5550 && out_mode == DFmode && out_n == 2
5551 && in_mode == DFmode && in_n == 2)
5552 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5553 if (VECTOR_UNIT_VSX_P (V4SFmode)
5554 && out_mode == SFmode && out_n == 4
5555 && in_mode == SFmode && in_n == 4)
5556 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5557 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5558 && out_mode == SFmode && out_n == 4
5559 && in_mode == SFmode && in_n == 4)
5560 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5561 break;
5562 CASE_CFN_CEIL:
5563 if (VECTOR_UNIT_VSX_P (V2DFmode)
5564 && out_mode == DFmode && out_n == 2
5565 && in_mode == DFmode && in_n == 2)
5566 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5567 if (VECTOR_UNIT_VSX_P (V4SFmode)
5568 && out_mode == SFmode && out_n == 4
5569 && in_mode == SFmode && in_n == 4)
5570 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5571 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5572 && out_mode == SFmode && out_n == 4
5573 && in_mode == SFmode && in_n == 4)
5574 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5575 break;
5576 CASE_CFN_FLOOR:
5577 if (VECTOR_UNIT_VSX_P (V2DFmode)
5578 && out_mode == DFmode && out_n == 2
5579 && in_mode == DFmode && in_n == 2)
5580 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5581 if (VECTOR_UNIT_VSX_P (V4SFmode)
5582 && out_mode == SFmode && out_n == 4
5583 && in_mode == SFmode && in_n == 4)
5584 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5585 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5586 && out_mode == SFmode && out_n == 4
5587 && in_mode == SFmode && in_n == 4)
5588 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5589 break;
5590 CASE_CFN_FMA:
5591 if (VECTOR_UNIT_VSX_P (V2DFmode)
5592 && out_mode == DFmode && out_n == 2
5593 && in_mode == DFmode && in_n == 2)
5594 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5595 if (VECTOR_UNIT_VSX_P (V4SFmode)
5596 && out_mode == SFmode && out_n == 4
5597 && in_mode == SFmode && in_n == 4)
5598 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5599 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5600 && out_mode == SFmode && out_n == 4
5601 && in_mode == SFmode && in_n == 4)
5602 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5603 break;
5604 CASE_CFN_TRUNC:
5605 if (VECTOR_UNIT_VSX_P (V2DFmode)
5606 && out_mode == DFmode && out_n == 2
5607 && in_mode == DFmode && in_n == 2)
5608 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5609 if (VECTOR_UNIT_VSX_P (V4SFmode)
5610 && out_mode == SFmode && out_n == 4
5611 && in_mode == SFmode && in_n == 4)
5612 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5613 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5614 && out_mode == SFmode && out_n == 4
5615 && in_mode == SFmode && in_n == 4)
5616 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5617 break;
5618 CASE_CFN_NEARBYINT:
5619 if (VECTOR_UNIT_VSX_P (V2DFmode)
5620 && flag_unsafe_math_optimizations
5621 && out_mode == DFmode && out_n == 2
5622 && in_mode == DFmode && in_n == 2)
5623 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5624 if (VECTOR_UNIT_VSX_P (V4SFmode)
5625 && flag_unsafe_math_optimizations
5626 && out_mode == SFmode && out_n == 4
5627 && in_mode == SFmode && in_n == 4)
5628 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5629 break;
5630 CASE_CFN_RINT:
5631 if (VECTOR_UNIT_VSX_P (V2DFmode)
5632 && !flag_trapping_math
5633 && out_mode == DFmode && out_n == 2
5634 && in_mode == DFmode && in_n == 2)
5635 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5636 if (VECTOR_UNIT_VSX_P (V4SFmode)
5637 && !flag_trapping_math
5638 && out_mode == SFmode && out_n == 4
5639 && in_mode == SFmode && in_n == 4)
5640 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5641 break;
5642 default:
5643 break;
5644 }
5645
5646 /* Generate calls to libmass if appropriate. */
5647 if (rs6000_veclib_handler)
5648 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5649
5650 return NULL_TREE;
5651 }
5652
5653 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5654
5655 static tree
5656 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5657 tree type_in)
5658 {
5659 machine_mode in_mode, out_mode;
5660 int in_n, out_n;
5661
5662 if (TARGET_DEBUG_BUILTIN)
5663 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5664 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5665 GET_MODE_NAME (TYPE_MODE (type_out)),
5666 GET_MODE_NAME (TYPE_MODE (type_in)));
5667
5668 if (TREE_CODE (type_out) != VECTOR_TYPE
5669 || TREE_CODE (type_in) != VECTOR_TYPE)
5670 return NULL_TREE;
5671
5672 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5673 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5674 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5675 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5676
5677 enum rs6000_builtins fn
5678 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5679 switch (fn)
5680 {
5681 case RS6000_BUILTIN_RSQRTF:
5682 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5683 && out_mode == SFmode && out_n == 4
5684 && in_mode == SFmode && in_n == 4)
5685 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5686 break;
5687 case RS6000_BUILTIN_RSQRT:
5688 if (VECTOR_UNIT_VSX_P (V2DFmode)
5689 && out_mode == DFmode && out_n == 2
5690 && in_mode == DFmode && in_n == 2)
5691 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5692 break;
5693 case RS6000_BUILTIN_RECIPF:
5694 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5695 && out_mode == SFmode && out_n == 4
5696 && in_mode == SFmode && in_n == 4)
5697 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5698 break;
5699 case RS6000_BUILTIN_RECIP:
5700 if (VECTOR_UNIT_VSX_P (V2DFmode)
5701 && out_mode == DFmode && out_n == 2
5702 && in_mode == DFmode && in_n == 2)
5703 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5704 break;
5705 default:
5706 break;
5707 }
5708 return NULL_TREE;
5709 }
5710 \f
5711 /* Default CPU string for rs6000*_file_start functions. */
5712 static const char *rs6000_default_cpu;
5713
5714 /* Do anything needed at the start of the asm file. */
5715
5716 static void
5717 rs6000_file_start (void)
5718 {
5719 char buffer[80];
5720 const char *start = buffer;
5721 FILE *file = asm_out_file;
5722
5723 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5724
5725 default_file_start ();
5726
5727 if (flag_verbose_asm)
5728 {
5729 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5730
5731 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5732 {
5733 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5734 start = "";
5735 }
5736
5737 if (global_options_set.x_rs6000_cpu_index)
5738 {
5739 fprintf (file, "%s -mcpu=%s", start,
5740 processor_target_table[rs6000_cpu_index].name);
5741 start = "";
5742 }
5743
5744 if (global_options_set.x_rs6000_tune_index)
5745 {
5746 fprintf (file, "%s -mtune=%s", start,
5747 processor_target_table[rs6000_tune_index].name);
5748 start = "";
5749 }
5750
5751 if (PPC405_ERRATUM77)
5752 {
5753 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5754 start = "";
5755 }
5756
5757 #ifdef USING_ELFOS_H
5758 switch (rs6000_sdata)
5759 {
5760 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5761 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5762 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5763 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5764 }
5765
5766 if (rs6000_sdata && g_switch_value)
5767 {
5768 fprintf (file, "%s -G %d", start,
5769 g_switch_value);
5770 start = "";
5771 }
5772 #endif
5773
5774 if (*start == '\0')
5775 putc ('\n', file);
5776 }
5777
5778 #ifdef USING_ELFOS_H
5779 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5780 && !global_options_set.x_rs6000_cpu_index)
5781 {
5782 fputs ("\t.machine ", asm_out_file);
5783 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5784 fputs ("power9\n", asm_out_file);
5785 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5786 fputs ("power8\n", asm_out_file);
5787 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5788 fputs ("power7\n", asm_out_file);
5789 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5790 fputs ("power6\n", asm_out_file);
5791 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5792 fputs ("power5\n", asm_out_file);
5793 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5794 fputs ("power4\n", asm_out_file);
5795 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5796 fputs ("ppc64\n", asm_out_file);
5797 else
5798 fputs ("ppc\n", asm_out_file);
5799 }
5800 #endif
5801
5802 if (DEFAULT_ABI == ABI_ELFv2)
5803 fprintf (file, "\t.abiversion 2\n");
5804 }
5805
5806 \f
5807 /* Return nonzero if this function is known to have a null epilogue. */
5808
5809 int
5810 direct_return (void)
5811 {
5812 if (reload_completed)
5813 {
5814 rs6000_stack_t *info = rs6000_stack_info ();
5815
5816 if (info->first_gp_reg_save == 32
5817 && info->first_fp_reg_save == 64
5818 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5819 && ! info->lr_save_p
5820 && ! info->cr_save_p
5821 && info->vrsave_size == 0
5822 && ! info->push_p)
5823 return 1;
5824 }
5825
5826 return 0;
5827 }
5828
5829 /* Helper for num_insns_constant. Calculate number of instructions to
5830 load VALUE to a single gpr using combinations of addi, addis, ori,
5831 oris and sldi instructions. */
5832
5833 static int
5834 num_insns_constant_gpr (HOST_WIDE_INT value)
5835 {
5836 /* signed constant loadable with addi */
5837 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5838 return 1;
5839
5840 /* constant loadable with addis */
5841 else if ((value & 0xffff) == 0
5842 && (value >> 31 == -1 || value >> 31 == 0))
5843 return 1;
5844
5845 else if (TARGET_POWERPC64)
5846 {
5847 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5848 HOST_WIDE_INT high = value >> 31;
5849
5850 if (high == 0 || high == -1)
5851 return 2;
5852
5853 high >>= 1;
5854
5855 if (low == 0)
5856 return num_insns_constant_gpr (high) + 1;
5857 else if (high == 0)
5858 return num_insns_constant_gpr (low) + 1;
5859 else
5860 return (num_insns_constant_gpr (high)
5861 + num_insns_constant_gpr (low) + 1);
5862 }
5863
5864 else
5865 return 2;
5866 }
5867
5868 /* Helper for num_insns_constant. Allow constants formed by the
5869 num_insns_constant_gpr sequences, plus li -1, rldicl/rldicr/rlwinm,
5870 and handle modes that require multiple gprs. */
5871
5872 static int
5873 num_insns_constant_multi (HOST_WIDE_INT value, machine_mode mode)
5874 {
5875 int nregs = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5876 int total = 0;
5877 while (nregs-- > 0)
5878 {
5879 HOST_WIDE_INT low = sext_hwi (value, BITS_PER_WORD);
5880 int insns = num_insns_constant_gpr (low);
5881 if (insns > 2
5882 /* We won't get more than 2 from num_insns_constant_gpr
5883 except when TARGET_POWERPC64 and mode is DImode or
5884 wider, so the register mode must be DImode. */
5885 && rs6000_is_valid_and_mask (GEN_INT (low), DImode))
5886 insns = 2;
5887 total += insns;
5888 value >>= BITS_PER_WORD;
5889 }
5890 return total;
5891 }
5892
5893 /* Return the number of instructions it takes to form a constant in as
5894 many gprs are needed for MODE. */
5895
5896 int
5897 num_insns_constant (rtx op, machine_mode mode)
5898 {
5899 HOST_WIDE_INT val;
5900
5901 switch (GET_CODE (op))
5902 {
5903 case CONST_INT:
5904 val = INTVAL (op);
5905 break;
5906
5907 case CONST_WIDE_INT:
5908 {
5909 int insns = 0;
5910 for (int i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5911 insns += num_insns_constant_multi (CONST_WIDE_INT_ELT (op, i),
5912 DImode);
5913 return insns;
5914 }
5915
5916 case CONST_DOUBLE:
5917 {
5918 const struct real_value *rv = CONST_DOUBLE_REAL_VALUE (op);
5919
5920 if (mode == SFmode || mode == SDmode)
5921 {
5922 long l;
5923
5924 if (mode == SDmode)
5925 REAL_VALUE_TO_TARGET_DECIMAL32 (*rv, l);
5926 else
5927 REAL_VALUE_TO_TARGET_SINGLE (*rv, l);
5928 /* See the first define_split in rs6000.md handling a
5929 const_double_operand. */
5930 val = l;
5931 mode = SImode;
5932 }
5933 else if (mode == DFmode || mode == DDmode)
5934 {
5935 long l[2];
5936
5937 if (mode == DDmode)
5938 REAL_VALUE_TO_TARGET_DECIMAL64 (*rv, l);
5939 else
5940 REAL_VALUE_TO_TARGET_DOUBLE (*rv, l);
5941
5942 /* See the second (32-bit) and third (64-bit) define_split
5943 in rs6000.md handling a const_double_operand. */
5944 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 1] << 32;
5945 val |= l[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffffUL;
5946 mode = DImode;
5947 }
5948 else if (mode == TFmode || mode == TDmode
5949 || mode == KFmode || mode == IFmode)
5950 {
5951 long l[4];
5952 int insns;
5953
5954 if (mode == TDmode)
5955 REAL_VALUE_TO_TARGET_DECIMAL128 (*rv, l);
5956 else
5957 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*rv, l);
5958
5959 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 3] << 32;
5960 val |= l[WORDS_BIG_ENDIAN ? 1 : 2] & 0xffffffffUL;
5961 insns = num_insns_constant_multi (val, DImode);
5962 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 2 : 1] << 32;
5963 val |= l[WORDS_BIG_ENDIAN ? 3 : 0] & 0xffffffffUL;
5964 insns += num_insns_constant_multi (val, DImode);
5965 return insns;
5966 }
5967 else
5968 gcc_unreachable ();
5969 }
5970 break;
5971
5972 default:
5973 gcc_unreachable ();
5974 }
5975
5976 return num_insns_constant_multi (val, mode);
5977 }
5978
5979 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5980 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5981 corresponding element of the vector, but for V4SFmode, the
5982 corresponding "float" is interpreted as an SImode integer. */
5983
5984 HOST_WIDE_INT
5985 const_vector_elt_as_int (rtx op, unsigned int elt)
5986 {
5987 rtx tmp;
5988
5989 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5990 gcc_assert (GET_MODE (op) != V2DImode
5991 && GET_MODE (op) != V2DFmode);
5992
5993 tmp = CONST_VECTOR_ELT (op, elt);
5994 if (GET_MODE (op) == V4SFmode)
5995 tmp = gen_lowpart (SImode, tmp);
5996 return INTVAL (tmp);
5997 }
5998
5999 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6000 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6001 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6002 all items are set to the same value and contain COPIES replicas of the
6003 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6004 operand and the others are set to the value of the operand's msb. */
6005
6006 static bool
6007 vspltis_constant (rtx op, unsigned step, unsigned copies)
6008 {
6009 machine_mode mode = GET_MODE (op);
6010 machine_mode inner = GET_MODE_INNER (mode);
6011
6012 unsigned i;
6013 unsigned nunits;
6014 unsigned bitsize;
6015 unsigned mask;
6016
6017 HOST_WIDE_INT val;
6018 HOST_WIDE_INT splat_val;
6019 HOST_WIDE_INT msb_val;
6020
6021 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6022 return false;
6023
6024 nunits = GET_MODE_NUNITS (mode);
6025 bitsize = GET_MODE_BITSIZE (inner);
6026 mask = GET_MODE_MASK (inner);
6027
6028 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6029 splat_val = val;
6030 msb_val = val >= 0 ? 0 : -1;
6031
6032 /* Construct the value to be splatted, if possible. If not, return 0. */
6033 for (i = 2; i <= copies; i *= 2)
6034 {
6035 HOST_WIDE_INT small_val;
6036 bitsize /= 2;
6037 small_val = splat_val >> bitsize;
6038 mask >>= bitsize;
6039 if (splat_val != ((HOST_WIDE_INT)
6040 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6041 | (small_val & mask)))
6042 return false;
6043 splat_val = small_val;
6044 }
6045
6046 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6047 if (EASY_VECTOR_15 (splat_val))
6048 ;
6049
6050 /* Also check if we can splat, and then add the result to itself. Do so if
6051 the value is positive, of if the splat instruction is using OP's mode;
6052 for splat_val < 0, the splat and the add should use the same mode. */
6053 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6054 && (splat_val >= 0 || (step == 1 && copies == 1)))
6055 ;
6056
6057 /* Also check if are loading up the most significant bit which can be done by
6058 loading up -1 and shifting the value left by -1. */
6059 else if (EASY_VECTOR_MSB (splat_val, inner))
6060 ;
6061
6062 else
6063 return false;
6064
6065 /* Check if VAL is present in every STEP-th element, and the
6066 other elements are filled with its most significant bit. */
6067 for (i = 1; i < nunits; ++i)
6068 {
6069 HOST_WIDE_INT desired_val;
6070 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6071 if ((i & (step - 1)) == 0)
6072 desired_val = val;
6073 else
6074 desired_val = msb_val;
6075
6076 if (desired_val != const_vector_elt_as_int (op, elt))
6077 return false;
6078 }
6079
6080 return true;
6081 }
6082
6083 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6084 instruction, filling in the bottom elements with 0 or -1.
6085
6086 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6087 for the number of zeroes to shift in, or negative for the number of 0xff
6088 bytes to shift in.
6089
6090 OP is a CONST_VECTOR. */
6091
6092 int
6093 vspltis_shifted (rtx op)
6094 {
6095 machine_mode mode = GET_MODE (op);
6096 machine_mode inner = GET_MODE_INNER (mode);
6097
6098 unsigned i, j;
6099 unsigned nunits;
6100 unsigned mask;
6101
6102 HOST_WIDE_INT val;
6103
6104 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6105 return false;
6106
6107 /* We need to create pseudo registers to do the shift, so don't recognize
6108 shift vector constants after reload. */
6109 if (!can_create_pseudo_p ())
6110 return false;
6111
6112 nunits = GET_MODE_NUNITS (mode);
6113 mask = GET_MODE_MASK (inner);
6114
6115 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6116
6117 /* Check if the value can really be the operand of a vspltis[bhw]. */
6118 if (EASY_VECTOR_15 (val))
6119 ;
6120
6121 /* Also check if we are loading up the most significant bit which can be done
6122 by loading up -1 and shifting the value left by -1. */
6123 else if (EASY_VECTOR_MSB (val, inner))
6124 ;
6125
6126 else
6127 return 0;
6128
6129 /* Check if VAL is present in every STEP-th element until we find elements
6130 that are 0 or all 1 bits. */
6131 for (i = 1; i < nunits; ++i)
6132 {
6133 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6134 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6135
6136 /* If the value isn't the splat value, check for the remaining elements
6137 being 0/-1. */
6138 if (val != elt_val)
6139 {
6140 if (elt_val == 0)
6141 {
6142 for (j = i+1; j < nunits; ++j)
6143 {
6144 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6145 if (const_vector_elt_as_int (op, elt2) != 0)
6146 return 0;
6147 }
6148
6149 return (nunits - i) * GET_MODE_SIZE (inner);
6150 }
6151
6152 else if ((elt_val & mask) == mask)
6153 {
6154 for (j = i+1; j < nunits; ++j)
6155 {
6156 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6157 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6158 return 0;
6159 }
6160
6161 return -((nunits - i) * GET_MODE_SIZE (inner));
6162 }
6163
6164 else
6165 return 0;
6166 }
6167 }
6168
6169 /* If all elements are equal, we don't need to do VLSDOI. */
6170 return 0;
6171 }
6172
6173
6174 /* Return true if OP is of the given MODE and can be synthesized
6175 with a vspltisb, vspltish or vspltisw. */
6176
6177 bool
6178 easy_altivec_constant (rtx op, machine_mode mode)
6179 {
6180 unsigned step, copies;
6181
6182 if (mode == VOIDmode)
6183 mode = GET_MODE (op);
6184 else if (mode != GET_MODE (op))
6185 return false;
6186
6187 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6188 constants. */
6189 if (mode == V2DFmode)
6190 return zero_constant (op, mode);
6191
6192 else if (mode == V2DImode)
6193 {
6194 if (!CONST_INT_P (CONST_VECTOR_ELT (op, 0))
6195 || !CONST_INT_P (CONST_VECTOR_ELT (op, 1)))
6196 return false;
6197
6198 if (zero_constant (op, mode))
6199 return true;
6200
6201 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6202 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6203 return true;
6204
6205 return false;
6206 }
6207
6208 /* V1TImode is a special container for TImode. Ignore for now. */
6209 else if (mode == V1TImode)
6210 return false;
6211
6212 /* Start with a vspltisw. */
6213 step = GET_MODE_NUNITS (mode) / 4;
6214 copies = 1;
6215
6216 if (vspltis_constant (op, step, copies))
6217 return true;
6218
6219 /* Then try with a vspltish. */
6220 if (step == 1)
6221 copies <<= 1;
6222 else
6223 step >>= 1;
6224
6225 if (vspltis_constant (op, step, copies))
6226 return true;
6227
6228 /* And finally a vspltisb. */
6229 if (step == 1)
6230 copies <<= 1;
6231 else
6232 step >>= 1;
6233
6234 if (vspltis_constant (op, step, copies))
6235 return true;
6236
6237 if (vspltis_shifted (op) != 0)
6238 return true;
6239
6240 return false;
6241 }
6242
6243 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6244 result is OP. Abort if it is not possible. */
6245
6246 rtx
6247 gen_easy_altivec_constant (rtx op)
6248 {
6249 machine_mode mode = GET_MODE (op);
6250 int nunits = GET_MODE_NUNITS (mode);
6251 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6252 unsigned step = nunits / 4;
6253 unsigned copies = 1;
6254
6255 /* Start with a vspltisw. */
6256 if (vspltis_constant (op, step, copies))
6257 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6258
6259 /* Then try with a vspltish. */
6260 if (step == 1)
6261 copies <<= 1;
6262 else
6263 step >>= 1;
6264
6265 if (vspltis_constant (op, step, copies))
6266 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6267
6268 /* And finally a vspltisb. */
6269 if (step == 1)
6270 copies <<= 1;
6271 else
6272 step >>= 1;
6273
6274 if (vspltis_constant (op, step, copies))
6275 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6276
6277 gcc_unreachable ();
6278 }
6279
6280 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6281 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6282
6283 Return the number of instructions needed (1 or 2) into the address pointed
6284 via NUM_INSNS_PTR.
6285
6286 Return the constant that is being split via CONSTANT_PTR. */
6287
6288 bool
6289 xxspltib_constant_p (rtx op,
6290 machine_mode mode,
6291 int *num_insns_ptr,
6292 int *constant_ptr)
6293 {
6294 size_t nunits = GET_MODE_NUNITS (mode);
6295 size_t i;
6296 HOST_WIDE_INT value;
6297 rtx element;
6298
6299 /* Set the returned values to out of bound values. */
6300 *num_insns_ptr = -1;
6301 *constant_ptr = 256;
6302
6303 if (!TARGET_P9_VECTOR)
6304 return false;
6305
6306 if (mode == VOIDmode)
6307 mode = GET_MODE (op);
6308
6309 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6310 return false;
6311
6312 /* Handle (vec_duplicate <constant>). */
6313 if (GET_CODE (op) == VEC_DUPLICATE)
6314 {
6315 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6316 && mode != V2DImode)
6317 return false;
6318
6319 element = XEXP (op, 0);
6320 if (!CONST_INT_P (element))
6321 return false;
6322
6323 value = INTVAL (element);
6324 if (!IN_RANGE (value, -128, 127))
6325 return false;
6326 }
6327
6328 /* Handle (const_vector [...]). */
6329 else if (GET_CODE (op) == CONST_VECTOR)
6330 {
6331 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6332 && mode != V2DImode)
6333 return false;
6334
6335 element = CONST_VECTOR_ELT (op, 0);
6336 if (!CONST_INT_P (element))
6337 return false;
6338
6339 value = INTVAL (element);
6340 if (!IN_RANGE (value, -128, 127))
6341 return false;
6342
6343 for (i = 1; i < nunits; i++)
6344 {
6345 element = CONST_VECTOR_ELT (op, i);
6346 if (!CONST_INT_P (element))
6347 return false;
6348
6349 if (value != INTVAL (element))
6350 return false;
6351 }
6352 }
6353
6354 /* Handle integer constants being loaded into the upper part of the VSX
6355 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6356 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6357 else if (CONST_INT_P (op))
6358 {
6359 if (!SCALAR_INT_MODE_P (mode))
6360 return false;
6361
6362 value = INTVAL (op);
6363 if (!IN_RANGE (value, -128, 127))
6364 return false;
6365
6366 if (!IN_RANGE (value, -1, 0))
6367 {
6368 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6369 return false;
6370
6371 if (EASY_VECTOR_15 (value))
6372 return false;
6373 }
6374 }
6375
6376 else
6377 return false;
6378
6379 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6380 sign extend. Special case 0/-1 to allow getting any VSX register instead
6381 of an Altivec register. */
6382 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6383 && EASY_VECTOR_15 (value))
6384 return false;
6385
6386 /* Return # of instructions and the constant byte for XXSPLTIB. */
6387 if (mode == V16QImode)
6388 *num_insns_ptr = 1;
6389
6390 else if (IN_RANGE (value, -1, 0))
6391 *num_insns_ptr = 1;
6392
6393 else
6394 *num_insns_ptr = 2;
6395
6396 *constant_ptr = (int) value;
6397 return true;
6398 }
6399
6400 const char *
6401 output_vec_const_move (rtx *operands)
6402 {
6403 int shift;
6404 machine_mode mode;
6405 rtx dest, vec;
6406
6407 dest = operands[0];
6408 vec = operands[1];
6409 mode = GET_MODE (dest);
6410
6411 if (TARGET_VSX)
6412 {
6413 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6414 int xxspltib_value = 256;
6415 int num_insns = -1;
6416
6417 if (zero_constant (vec, mode))
6418 {
6419 if (TARGET_P9_VECTOR)
6420 return "xxspltib %x0,0";
6421
6422 else if (dest_vmx_p)
6423 return "vspltisw %0,0";
6424
6425 else
6426 return "xxlxor %x0,%x0,%x0";
6427 }
6428
6429 if (all_ones_constant (vec, mode))
6430 {
6431 if (TARGET_P9_VECTOR)
6432 return "xxspltib %x0,255";
6433
6434 else if (dest_vmx_p)
6435 return "vspltisw %0,-1";
6436
6437 else if (TARGET_P8_VECTOR)
6438 return "xxlorc %x0,%x0,%x0";
6439
6440 else
6441 gcc_unreachable ();
6442 }
6443
6444 if (TARGET_P9_VECTOR
6445 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6446 {
6447 if (num_insns == 1)
6448 {
6449 operands[2] = GEN_INT (xxspltib_value & 0xff);
6450 return "xxspltib %x0,%2";
6451 }
6452
6453 return "#";
6454 }
6455 }
6456
6457 if (TARGET_ALTIVEC)
6458 {
6459 rtx splat_vec;
6460
6461 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6462 if (zero_constant (vec, mode))
6463 return "vspltisw %0,0";
6464
6465 if (all_ones_constant (vec, mode))
6466 return "vspltisw %0,-1";
6467
6468 /* Do we need to construct a value using VSLDOI? */
6469 shift = vspltis_shifted (vec);
6470 if (shift != 0)
6471 return "#";
6472
6473 splat_vec = gen_easy_altivec_constant (vec);
6474 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6475 operands[1] = XEXP (splat_vec, 0);
6476 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6477 return "#";
6478
6479 switch (GET_MODE (splat_vec))
6480 {
6481 case E_V4SImode:
6482 return "vspltisw %0,%1";
6483
6484 case E_V8HImode:
6485 return "vspltish %0,%1";
6486
6487 case E_V16QImode:
6488 return "vspltisb %0,%1";
6489
6490 default:
6491 gcc_unreachable ();
6492 }
6493 }
6494
6495 gcc_unreachable ();
6496 }
6497
6498 /* Initialize vector TARGET to VALS. */
6499
6500 void
6501 rs6000_expand_vector_init (rtx target, rtx vals)
6502 {
6503 machine_mode mode = GET_MODE (target);
6504 machine_mode inner_mode = GET_MODE_INNER (mode);
6505 int n_elts = GET_MODE_NUNITS (mode);
6506 int n_var = 0, one_var = -1;
6507 bool all_same = true, all_const_zero = true;
6508 rtx x, mem;
6509 int i;
6510
6511 for (i = 0; i < n_elts; ++i)
6512 {
6513 x = XVECEXP (vals, 0, i);
6514 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6515 ++n_var, one_var = i;
6516 else if (x != CONST0_RTX (inner_mode))
6517 all_const_zero = false;
6518
6519 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6520 all_same = false;
6521 }
6522
6523 if (n_var == 0)
6524 {
6525 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6526 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6527 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6528 {
6529 /* Zero register. */
6530 emit_move_insn (target, CONST0_RTX (mode));
6531 return;
6532 }
6533 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6534 {
6535 /* Splat immediate. */
6536 emit_insn (gen_rtx_SET (target, const_vec));
6537 return;
6538 }
6539 else
6540 {
6541 /* Load from constant pool. */
6542 emit_move_insn (target, const_vec);
6543 return;
6544 }
6545 }
6546
6547 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6548 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6549 {
6550 rtx op[2];
6551 size_t i;
6552 size_t num_elements = all_same ? 1 : 2;
6553 for (i = 0; i < num_elements; i++)
6554 {
6555 op[i] = XVECEXP (vals, 0, i);
6556 /* Just in case there is a SUBREG with a smaller mode, do a
6557 conversion. */
6558 if (GET_MODE (op[i]) != inner_mode)
6559 {
6560 rtx tmp = gen_reg_rtx (inner_mode);
6561 convert_move (tmp, op[i], 0);
6562 op[i] = tmp;
6563 }
6564 /* Allow load with splat double word. */
6565 else if (MEM_P (op[i]))
6566 {
6567 if (!all_same)
6568 op[i] = force_reg (inner_mode, op[i]);
6569 }
6570 else if (!REG_P (op[i]))
6571 op[i] = force_reg (inner_mode, op[i]);
6572 }
6573
6574 if (all_same)
6575 {
6576 if (mode == V2DFmode)
6577 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6578 else
6579 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6580 }
6581 else
6582 {
6583 if (mode == V2DFmode)
6584 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6585 else
6586 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6587 }
6588 return;
6589 }
6590
6591 /* Special case initializing vector int if we are on 64-bit systems with
6592 direct move or we have the ISA 3.0 instructions. */
6593 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6594 && TARGET_DIRECT_MOVE_64BIT)
6595 {
6596 if (all_same)
6597 {
6598 rtx element0 = XVECEXP (vals, 0, 0);
6599 if (MEM_P (element0))
6600 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6601 else
6602 element0 = force_reg (SImode, element0);
6603
6604 if (TARGET_P9_VECTOR)
6605 emit_insn (gen_vsx_splat_v4si (target, element0));
6606 else
6607 {
6608 rtx tmp = gen_reg_rtx (DImode);
6609 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6610 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6611 }
6612 return;
6613 }
6614 else
6615 {
6616 rtx elements[4];
6617 size_t i;
6618
6619 for (i = 0; i < 4; i++)
6620 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6621
6622 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6623 elements[2], elements[3]));
6624 return;
6625 }
6626 }
6627
6628 /* With single precision floating point on VSX, know that internally single
6629 precision is actually represented as a double, and either make 2 V2DF
6630 vectors, and convert these vectors to single precision, or do one
6631 conversion, and splat the result to the other elements. */
6632 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6633 {
6634 if (all_same)
6635 {
6636 rtx element0 = XVECEXP (vals, 0, 0);
6637
6638 if (TARGET_P9_VECTOR)
6639 {
6640 if (MEM_P (element0))
6641 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6642
6643 emit_insn (gen_vsx_splat_v4sf (target, element0));
6644 }
6645
6646 else
6647 {
6648 rtx freg = gen_reg_rtx (V4SFmode);
6649 rtx sreg = force_reg (SFmode, element0);
6650 rtx cvt = (TARGET_XSCVDPSPN
6651 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6652 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6653
6654 emit_insn (cvt);
6655 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6656 const0_rtx));
6657 }
6658 }
6659 else
6660 {
6661 rtx dbl_even = gen_reg_rtx (V2DFmode);
6662 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6663 rtx flt_even = gen_reg_rtx (V4SFmode);
6664 rtx flt_odd = gen_reg_rtx (V4SFmode);
6665 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6666 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6667 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6668 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6669
6670 /* Use VMRGEW if we can instead of doing a permute. */
6671 if (TARGET_P8_VECTOR)
6672 {
6673 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6674 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6675 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6676 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6677 if (BYTES_BIG_ENDIAN)
6678 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6679 else
6680 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6681 }
6682 else
6683 {
6684 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6685 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6686 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6687 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6688 rs6000_expand_extract_even (target, flt_even, flt_odd);
6689 }
6690 }
6691 return;
6692 }
6693
6694 /* Special case initializing vector short/char that are splats if we are on
6695 64-bit systems with direct move. */
6696 if (all_same && TARGET_DIRECT_MOVE_64BIT
6697 && (mode == V16QImode || mode == V8HImode))
6698 {
6699 rtx op0 = XVECEXP (vals, 0, 0);
6700 rtx di_tmp = gen_reg_rtx (DImode);
6701
6702 if (!REG_P (op0))
6703 op0 = force_reg (GET_MODE_INNER (mode), op0);
6704
6705 if (mode == V16QImode)
6706 {
6707 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6708 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6709 return;
6710 }
6711
6712 if (mode == V8HImode)
6713 {
6714 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6715 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6716 return;
6717 }
6718 }
6719
6720 /* Store value to stack temp. Load vector element. Splat. However, splat
6721 of 64-bit items is not supported on Altivec. */
6722 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6723 {
6724 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6725 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6726 XVECEXP (vals, 0, 0));
6727 x = gen_rtx_UNSPEC (VOIDmode,
6728 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6729 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6730 gen_rtvec (2,
6731 gen_rtx_SET (target, mem),
6732 x)));
6733 x = gen_rtx_VEC_SELECT (inner_mode, target,
6734 gen_rtx_PARALLEL (VOIDmode,
6735 gen_rtvec (1, const0_rtx)));
6736 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6737 return;
6738 }
6739
6740 /* One field is non-constant. Load constant then overwrite
6741 varying field. */
6742 if (n_var == 1)
6743 {
6744 rtx copy = copy_rtx (vals);
6745
6746 /* Load constant part of vector, substitute neighboring value for
6747 varying element. */
6748 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6749 rs6000_expand_vector_init (target, copy);
6750
6751 /* Insert variable. */
6752 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6753 return;
6754 }
6755
6756 /* Construct the vector in memory one field at a time
6757 and load the whole vector. */
6758 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6759 for (i = 0; i < n_elts; i++)
6760 emit_move_insn (adjust_address_nv (mem, inner_mode,
6761 i * GET_MODE_SIZE (inner_mode)),
6762 XVECEXP (vals, 0, i));
6763 emit_move_insn (target, mem);
6764 }
6765
6766 /* Set field ELT of TARGET to VAL. */
6767
6768 void
6769 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6770 {
6771 machine_mode mode = GET_MODE (target);
6772 machine_mode inner_mode = GET_MODE_INNER (mode);
6773 rtx reg = gen_reg_rtx (mode);
6774 rtx mask, mem, x;
6775 int width = GET_MODE_SIZE (inner_mode);
6776 int i;
6777
6778 val = force_reg (GET_MODE (val), val);
6779
6780 if (VECTOR_MEM_VSX_P (mode))
6781 {
6782 rtx insn = NULL_RTX;
6783 rtx elt_rtx = GEN_INT (elt);
6784
6785 if (mode == V2DFmode)
6786 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6787
6788 else if (mode == V2DImode)
6789 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6790
6791 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6792 {
6793 if (mode == V4SImode)
6794 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6795 else if (mode == V8HImode)
6796 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6797 else if (mode == V16QImode)
6798 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6799 else if (mode == V4SFmode)
6800 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6801 }
6802
6803 if (insn)
6804 {
6805 emit_insn (insn);
6806 return;
6807 }
6808 }
6809
6810 /* Simplify setting single element vectors like V1TImode. */
6811 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6812 {
6813 emit_move_insn (target, gen_lowpart (mode, val));
6814 return;
6815 }
6816
6817 /* Load single variable value. */
6818 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6819 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6820 x = gen_rtx_UNSPEC (VOIDmode,
6821 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6822 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6823 gen_rtvec (2,
6824 gen_rtx_SET (reg, mem),
6825 x)));
6826
6827 /* Linear sequence. */
6828 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6829 for (i = 0; i < 16; ++i)
6830 XVECEXP (mask, 0, i) = GEN_INT (i);
6831
6832 /* Set permute mask to insert element into target. */
6833 for (i = 0; i < width; ++i)
6834 XVECEXP (mask, 0, elt*width + i)
6835 = GEN_INT (i + 0x10);
6836 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6837
6838 if (BYTES_BIG_ENDIAN)
6839 x = gen_rtx_UNSPEC (mode,
6840 gen_rtvec (3, target, reg,
6841 force_reg (V16QImode, x)),
6842 UNSPEC_VPERM);
6843 else
6844 {
6845 if (TARGET_P9_VECTOR)
6846 x = gen_rtx_UNSPEC (mode,
6847 gen_rtvec (3, reg, target,
6848 force_reg (V16QImode, x)),
6849 UNSPEC_VPERMR);
6850 else
6851 {
6852 /* Invert selector. We prefer to generate VNAND on P8 so
6853 that future fusion opportunities can kick in, but must
6854 generate VNOR elsewhere. */
6855 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6856 rtx iorx = (TARGET_P8_VECTOR
6857 ? gen_rtx_IOR (V16QImode, notx, notx)
6858 : gen_rtx_AND (V16QImode, notx, notx));
6859 rtx tmp = gen_reg_rtx (V16QImode);
6860 emit_insn (gen_rtx_SET (tmp, iorx));
6861
6862 /* Permute with operands reversed and adjusted selector. */
6863 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6864 UNSPEC_VPERM);
6865 }
6866 }
6867
6868 emit_insn (gen_rtx_SET (target, x));
6869 }
6870
6871 /* Extract field ELT from VEC into TARGET. */
6872
6873 void
6874 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6875 {
6876 machine_mode mode = GET_MODE (vec);
6877 machine_mode inner_mode = GET_MODE_INNER (mode);
6878 rtx mem;
6879
6880 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6881 {
6882 switch (mode)
6883 {
6884 default:
6885 break;
6886 case E_V1TImode:
6887 emit_move_insn (target, gen_lowpart (TImode, vec));
6888 break;
6889 case E_V2DFmode:
6890 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6891 return;
6892 case E_V2DImode:
6893 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6894 return;
6895 case E_V4SFmode:
6896 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6897 return;
6898 case E_V16QImode:
6899 if (TARGET_DIRECT_MOVE_64BIT)
6900 {
6901 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6902 return;
6903 }
6904 else
6905 break;
6906 case E_V8HImode:
6907 if (TARGET_DIRECT_MOVE_64BIT)
6908 {
6909 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6910 return;
6911 }
6912 else
6913 break;
6914 case E_V4SImode:
6915 if (TARGET_DIRECT_MOVE_64BIT)
6916 {
6917 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6918 return;
6919 }
6920 break;
6921 }
6922 }
6923 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6924 && TARGET_DIRECT_MOVE_64BIT)
6925 {
6926 if (GET_MODE (elt) != DImode)
6927 {
6928 rtx tmp = gen_reg_rtx (DImode);
6929 convert_move (tmp, elt, 0);
6930 elt = tmp;
6931 }
6932 else if (!REG_P (elt))
6933 elt = force_reg (DImode, elt);
6934
6935 switch (mode)
6936 {
6937 case E_V1TImode:
6938 emit_move_insn (target, gen_lowpart (TImode, vec));
6939 return;
6940
6941 case E_V2DFmode:
6942 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6943 return;
6944
6945 case E_V2DImode:
6946 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6947 return;
6948
6949 case E_V4SFmode:
6950 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6951 return;
6952
6953 case E_V4SImode:
6954 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6955 return;
6956
6957 case E_V8HImode:
6958 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6959 return;
6960
6961 case E_V16QImode:
6962 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6963 return;
6964
6965 default:
6966 gcc_unreachable ();
6967 }
6968 }
6969
6970 /* Allocate mode-sized buffer. */
6971 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6972
6973 emit_move_insn (mem, vec);
6974 if (CONST_INT_P (elt))
6975 {
6976 int modulo_elt = INTVAL (elt) % GET_MODE_NUNITS (mode);
6977
6978 /* Add offset to field within buffer matching vector element. */
6979 mem = adjust_address_nv (mem, inner_mode,
6980 modulo_elt * GET_MODE_SIZE (inner_mode));
6981 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6982 }
6983 else
6984 {
6985 unsigned int ele_size = GET_MODE_SIZE (inner_mode);
6986 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
6987 rtx new_addr = gen_reg_rtx (Pmode);
6988
6989 elt = gen_rtx_AND (Pmode, elt, num_ele_m1);
6990 if (ele_size > 1)
6991 elt = gen_rtx_MULT (Pmode, elt, GEN_INT (ele_size));
6992 new_addr = gen_rtx_PLUS (Pmode, XEXP (mem, 0), elt);
6993 new_addr = change_address (mem, inner_mode, new_addr);
6994 emit_move_insn (target, new_addr);
6995 }
6996 }
6997
6998 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6999 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7000 temporary (BASE_TMP) to fixup the address. Return the new memory address
7001 that is valid for reads or writes to a given register (SCALAR_REG). */
7002
7003 rtx
7004 rs6000_adjust_vec_address (rtx scalar_reg,
7005 rtx mem,
7006 rtx element,
7007 rtx base_tmp,
7008 machine_mode scalar_mode)
7009 {
7010 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7011 rtx addr = XEXP (mem, 0);
7012 rtx element_offset;
7013 rtx new_addr;
7014 bool valid_addr_p;
7015
7016 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7017 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7018
7019 /* Calculate what we need to add to the address to get the element
7020 address. */
7021 if (CONST_INT_P (element))
7022 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7023 else
7024 {
7025 int byte_shift = exact_log2 (scalar_size);
7026 gcc_assert (byte_shift >= 0);
7027
7028 if (byte_shift == 0)
7029 element_offset = element;
7030
7031 else
7032 {
7033 if (TARGET_POWERPC64)
7034 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7035 else
7036 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7037
7038 element_offset = base_tmp;
7039 }
7040 }
7041
7042 /* Create the new address pointing to the element within the vector. If we
7043 are adding 0, we don't have to change the address. */
7044 if (element_offset == const0_rtx)
7045 new_addr = addr;
7046
7047 /* A simple indirect address can be converted into a reg + offset
7048 address. */
7049 else if (REG_P (addr) || SUBREG_P (addr))
7050 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7051
7052 /* Optimize D-FORM addresses with constant offset with a constant element, to
7053 include the element offset in the address directly. */
7054 else if (GET_CODE (addr) == PLUS)
7055 {
7056 rtx op0 = XEXP (addr, 0);
7057 rtx op1 = XEXP (addr, 1);
7058 rtx insn;
7059
7060 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7061 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7062 {
7063 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7064 rtx offset_rtx = GEN_INT (offset);
7065
7066 if (IN_RANGE (offset, -32768, 32767)
7067 && (scalar_size < 8 || (offset & 0x3) == 0))
7068 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7069 else
7070 {
7071 emit_move_insn (base_tmp, offset_rtx);
7072 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7073 }
7074 }
7075 else
7076 {
7077 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7078 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7079
7080 /* Note, ADDI requires the register being added to be a base
7081 register. If the register was R0, load it up into the temporary
7082 and do the add. */
7083 if (op1_reg_p
7084 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7085 {
7086 insn = gen_add3_insn (base_tmp, op1, element_offset);
7087 gcc_assert (insn != NULL_RTX);
7088 emit_insn (insn);
7089 }
7090
7091 else if (ele_reg_p
7092 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7093 {
7094 insn = gen_add3_insn (base_tmp, element_offset, op1);
7095 gcc_assert (insn != NULL_RTX);
7096 emit_insn (insn);
7097 }
7098
7099 else
7100 {
7101 emit_move_insn (base_tmp, op1);
7102 emit_insn (gen_add2_insn (base_tmp, element_offset));
7103 }
7104
7105 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7106 }
7107 }
7108
7109 else
7110 {
7111 emit_move_insn (base_tmp, addr);
7112 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7113 }
7114
7115 /* If we have a PLUS, we need to see whether the particular register class
7116 allows for D-FORM or X-FORM addressing. */
7117 if (GET_CODE (new_addr) == PLUS)
7118 {
7119 rtx op1 = XEXP (new_addr, 1);
7120 addr_mask_type addr_mask;
7121 unsigned int scalar_regno = reg_or_subregno (scalar_reg);
7122
7123 gcc_assert (HARD_REGISTER_NUM_P (scalar_regno));
7124 if (INT_REGNO_P (scalar_regno))
7125 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7126
7127 else if (FP_REGNO_P (scalar_regno))
7128 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7129
7130 else if (ALTIVEC_REGNO_P (scalar_regno))
7131 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7132
7133 else
7134 gcc_unreachable ();
7135
7136 if (REG_P (op1) || SUBREG_P (op1))
7137 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7138 else
7139 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7140 }
7141
7142 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7143 valid_addr_p = true;
7144
7145 else
7146 valid_addr_p = false;
7147
7148 if (!valid_addr_p)
7149 {
7150 emit_move_insn (base_tmp, new_addr);
7151 new_addr = base_tmp;
7152 }
7153
7154 return change_address (mem, scalar_mode, new_addr);
7155 }
7156
7157 /* Split a variable vec_extract operation into the component instructions. */
7158
7159 void
7160 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7161 rtx tmp_altivec)
7162 {
7163 machine_mode mode = GET_MODE (src);
7164 machine_mode scalar_mode = GET_MODE_INNER (GET_MODE (src));
7165 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7166 int byte_shift = exact_log2 (scalar_size);
7167
7168 gcc_assert (byte_shift >= 0);
7169
7170 /* If we are given a memory address, optimize to load just the element. We
7171 don't have to adjust the vector element number on little endian
7172 systems. */
7173 if (MEM_P (src))
7174 {
7175 int num_elements = GET_MODE_NUNITS (mode);
7176 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7177
7178 emit_insn (gen_anddi3 (element, element, num_ele_m1));
7179 gcc_assert (REG_P (tmp_gpr));
7180 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7181 tmp_gpr, scalar_mode));
7182 return;
7183 }
7184
7185 else if (REG_P (src) || SUBREG_P (src))
7186 {
7187 int num_elements = GET_MODE_NUNITS (mode);
7188 int bits_in_element = mode_to_bits (GET_MODE_INNER (mode));
7189 int bit_shift = 7 - exact_log2 (num_elements);
7190 rtx element2;
7191 unsigned int dest_regno = reg_or_subregno (dest);
7192 unsigned int src_regno = reg_or_subregno (src);
7193 unsigned int element_regno = reg_or_subregno (element);
7194
7195 gcc_assert (REG_P (tmp_gpr));
7196
7197 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7198 a general purpose register. */
7199 if (TARGET_P9_VECTOR
7200 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7201 && INT_REGNO_P (dest_regno)
7202 && ALTIVEC_REGNO_P (src_regno)
7203 && INT_REGNO_P (element_regno))
7204 {
7205 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7206 rtx element_si = gen_rtx_REG (SImode, element_regno);
7207
7208 if (mode == V16QImode)
7209 emit_insn (BYTES_BIG_ENDIAN
7210 ? gen_vextublx (dest_si, element_si, src)
7211 : gen_vextubrx (dest_si, element_si, src));
7212
7213 else if (mode == V8HImode)
7214 {
7215 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7216 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7217 emit_insn (BYTES_BIG_ENDIAN
7218 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7219 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7220 }
7221
7222
7223 else
7224 {
7225 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7226 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7227 emit_insn (BYTES_BIG_ENDIAN
7228 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7229 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7230 }
7231
7232 return;
7233 }
7234
7235
7236 gcc_assert (REG_P (tmp_altivec));
7237
7238 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7239 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7240 will shift the element into the upper position (adding 3 to convert a
7241 byte shift into a bit shift). */
7242 if (scalar_size == 8)
7243 {
7244 if (!BYTES_BIG_ENDIAN)
7245 {
7246 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7247 element2 = tmp_gpr;
7248 }
7249 else
7250 element2 = element;
7251
7252 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7253 bit. */
7254 emit_insn (gen_rtx_SET (tmp_gpr,
7255 gen_rtx_AND (DImode,
7256 gen_rtx_ASHIFT (DImode,
7257 element2,
7258 GEN_INT (6)),
7259 GEN_INT (64))));
7260 }
7261 else
7262 {
7263 if (!BYTES_BIG_ENDIAN)
7264 {
7265 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7266
7267 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7268 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7269 element2 = tmp_gpr;
7270 }
7271 else
7272 element2 = element;
7273
7274 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7275 }
7276
7277 /* Get the value into the lower byte of the Altivec register where VSLO
7278 expects it. */
7279 if (TARGET_P9_VECTOR)
7280 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7281 else if (can_create_pseudo_p ())
7282 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7283 else
7284 {
7285 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7286 emit_move_insn (tmp_di, tmp_gpr);
7287 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7288 }
7289
7290 /* Do the VSLO to get the value into the final location. */
7291 switch (mode)
7292 {
7293 case E_V2DFmode:
7294 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7295 return;
7296
7297 case E_V2DImode:
7298 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7299 return;
7300
7301 case E_V4SFmode:
7302 {
7303 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7304 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7305 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7306 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7307 tmp_altivec));
7308
7309 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7310 return;
7311 }
7312
7313 case E_V4SImode:
7314 case E_V8HImode:
7315 case E_V16QImode:
7316 {
7317 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7318 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7319 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7320 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7321 tmp_altivec));
7322 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7323 emit_insn (gen_lshrdi3 (tmp_gpr_di, tmp_gpr_di,
7324 GEN_INT (64 - bits_in_element)));
7325 return;
7326 }
7327
7328 default:
7329 gcc_unreachable ();
7330 }
7331
7332 return;
7333 }
7334 else
7335 gcc_unreachable ();
7336 }
7337
7338 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7339 selects whether the alignment is abi mandated, optional, or
7340 both abi and optional alignment. */
7341
7342 unsigned int
7343 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7344 {
7345 if (how != align_opt)
7346 {
7347 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7348 align = 128;
7349 }
7350
7351 if (how != align_abi)
7352 {
7353 if (TREE_CODE (type) == ARRAY_TYPE
7354 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7355 {
7356 if (align < BITS_PER_WORD)
7357 align = BITS_PER_WORD;
7358 }
7359 }
7360
7361 return align;
7362 }
7363
7364 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7365 instructions simply ignore the low bits; VSX memory instructions
7366 are aligned to 4 or 8 bytes. */
7367
7368 static bool
7369 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7370 {
7371 return (STRICT_ALIGNMENT
7372 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7373 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7374 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7375 && (int) align < VECTOR_ALIGN (mode)))));
7376 }
7377
7378 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7379
7380 bool
7381 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7382 {
7383 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7384 {
7385 if (computed != 128)
7386 {
7387 static bool warned;
7388 if (!warned && warn_psabi)
7389 {
7390 warned = true;
7391 inform (input_location,
7392 "the layout of aggregates containing vectors with"
7393 " %d-byte alignment has changed in GCC 5",
7394 computed / BITS_PER_UNIT);
7395 }
7396 }
7397 /* In current GCC there is no special case. */
7398 return false;
7399 }
7400
7401 return false;
7402 }
7403
7404 /* AIX increases natural record alignment to doubleword if the first
7405 field is an FP double while the FP fields remain word aligned. */
7406
7407 unsigned int
7408 rs6000_special_round_type_align (tree type, unsigned int computed,
7409 unsigned int specified)
7410 {
7411 unsigned int align = MAX (computed, specified);
7412 tree field = TYPE_FIELDS (type);
7413
7414 /* Skip all non field decls */
7415 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7416 field = DECL_CHAIN (field);
7417
7418 if (field != NULL && field != type)
7419 {
7420 type = TREE_TYPE (field);
7421 while (TREE_CODE (type) == ARRAY_TYPE)
7422 type = TREE_TYPE (type);
7423
7424 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7425 align = MAX (align, 64);
7426 }
7427
7428 return align;
7429 }
7430
7431 /* Darwin increases record alignment to the natural alignment of
7432 the first field. */
7433
7434 unsigned int
7435 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7436 unsigned int specified)
7437 {
7438 unsigned int align = MAX (computed, specified);
7439
7440 if (TYPE_PACKED (type))
7441 return align;
7442
7443 /* Find the first field, looking down into aggregates. */
7444 do {
7445 tree field = TYPE_FIELDS (type);
7446 /* Skip all non field decls */
7447 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7448 field = DECL_CHAIN (field);
7449 if (! field)
7450 break;
7451 /* A packed field does not contribute any extra alignment. */
7452 if (DECL_PACKED (field))
7453 return align;
7454 type = TREE_TYPE (field);
7455 while (TREE_CODE (type) == ARRAY_TYPE)
7456 type = TREE_TYPE (type);
7457 } while (AGGREGATE_TYPE_P (type));
7458
7459 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7460 align = MAX (align, TYPE_ALIGN (type));
7461
7462 return align;
7463 }
7464
7465 /* Return 1 for an operand in small memory on V.4/eabi. */
7466
7467 int
7468 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7469 machine_mode mode ATTRIBUTE_UNUSED)
7470 {
7471 #if TARGET_ELF
7472 rtx sym_ref;
7473
7474 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7475 return 0;
7476
7477 if (DEFAULT_ABI != ABI_V4)
7478 return 0;
7479
7480 if (SYMBOL_REF_P (op))
7481 sym_ref = op;
7482
7483 else if (GET_CODE (op) != CONST
7484 || GET_CODE (XEXP (op, 0)) != PLUS
7485 || !SYMBOL_REF_P (XEXP (XEXP (op, 0), 0))
7486 || !CONST_INT_P (XEXP (XEXP (op, 0), 1)))
7487 return 0;
7488
7489 else
7490 {
7491 rtx sum = XEXP (op, 0);
7492 HOST_WIDE_INT summand;
7493
7494 /* We have to be careful here, because it is the referenced address
7495 that must be 32k from _SDA_BASE_, not just the symbol. */
7496 summand = INTVAL (XEXP (sum, 1));
7497 if (summand < 0 || summand > g_switch_value)
7498 return 0;
7499
7500 sym_ref = XEXP (sum, 0);
7501 }
7502
7503 return SYMBOL_REF_SMALL_P (sym_ref);
7504 #else
7505 return 0;
7506 #endif
7507 }
7508
7509 /* Return true if either operand is a general purpose register. */
7510
7511 bool
7512 gpr_or_gpr_p (rtx op0, rtx op1)
7513 {
7514 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7515 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7516 }
7517
7518 /* Return true if this is a move direct operation between GPR registers and
7519 floating point/VSX registers. */
7520
7521 bool
7522 direct_move_p (rtx op0, rtx op1)
7523 {
7524 int regno0, regno1;
7525
7526 if (!REG_P (op0) || !REG_P (op1))
7527 return false;
7528
7529 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7530 return false;
7531
7532 regno0 = REGNO (op0);
7533 regno1 = REGNO (op1);
7534 if (!HARD_REGISTER_NUM_P (regno0) || !HARD_REGISTER_NUM_P (regno1))
7535 return false;
7536
7537 if (INT_REGNO_P (regno0))
7538 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7539
7540 else if (INT_REGNO_P (regno1))
7541 {
7542 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7543 return true;
7544
7545 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7546 return true;
7547 }
7548
7549 return false;
7550 }
7551
7552 /* Return true if the OFFSET is valid for the quad address instructions that
7553 use d-form (register + offset) addressing. */
7554
7555 static inline bool
7556 quad_address_offset_p (HOST_WIDE_INT offset)
7557 {
7558 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7559 }
7560
7561 /* Return true if the ADDR is an acceptable address for a quad memory
7562 operation of mode MODE (either LQ/STQ for general purpose registers, or
7563 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7564 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7565 3.0 LXV/STXV instruction. */
7566
7567 bool
7568 quad_address_p (rtx addr, machine_mode mode, bool strict)
7569 {
7570 rtx op0, op1;
7571
7572 if (GET_MODE_SIZE (mode) != 16)
7573 return false;
7574
7575 if (legitimate_indirect_address_p (addr, strict))
7576 return true;
7577
7578 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7579 return false;
7580
7581 if (GET_CODE (addr) != PLUS)
7582 return false;
7583
7584 op0 = XEXP (addr, 0);
7585 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7586 return false;
7587
7588 op1 = XEXP (addr, 1);
7589 if (!CONST_INT_P (op1))
7590 return false;
7591
7592 return quad_address_offset_p (INTVAL (op1));
7593 }
7594
7595 /* Return true if this is a load or store quad operation. This function does
7596 not handle the atomic quad memory instructions. */
7597
7598 bool
7599 quad_load_store_p (rtx op0, rtx op1)
7600 {
7601 bool ret;
7602
7603 if (!TARGET_QUAD_MEMORY)
7604 ret = false;
7605
7606 else if (REG_P (op0) && MEM_P (op1))
7607 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7608 && quad_memory_operand (op1, GET_MODE (op1))
7609 && !reg_overlap_mentioned_p (op0, op1));
7610
7611 else if (MEM_P (op0) && REG_P (op1))
7612 ret = (quad_memory_operand (op0, GET_MODE (op0))
7613 && quad_int_reg_operand (op1, GET_MODE (op1)));
7614
7615 else
7616 ret = false;
7617
7618 if (TARGET_DEBUG_ADDR)
7619 {
7620 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7621 ret ? "true" : "false");
7622 debug_rtx (gen_rtx_SET (op0, op1));
7623 }
7624
7625 return ret;
7626 }
7627
7628 /* Given an address, return a constant offset term if one exists. */
7629
7630 static rtx
7631 address_offset (rtx op)
7632 {
7633 if (GET_CODE (op) == PRE_INC
7634 || GET_CODE (op) == PRE_DEC)
7635 op = XEXP (op, 0);
7636 else if (GET_CODE (op) == PRE_MODIFY
7637 || GET_CODE (op) == LO_SUM)
7638 op = XEXP (op, 1);
7639
7640 if (GET_CODE (op) == CONST)
7641 op = XEXP (op, 0);
7642
7643 if (GET_CODE (op) == PLUS)
7644 op = XEXP (op, 1);
7645
7646 if (CONST_INT_P (op))
7647 return op;
7648
7649 return NULL_RTX;
7650 }
7651
7652 /* Return true if the MEM operand is a memory operand suitable for use
7653 with a (full width, possibly multiple) gpr load/store. On
7654 powerpc64 this means the offset must be divisible by 4.
7655 Implements 'Y' constraint.
7656
7657 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7658 a constraint function we know the operand has satisfied a suitable
7659 memory predicate.
7660
7661 Offsetting a lo_sum should not be allowed, except where we know by
7662 alignment that a 32k boundary is not crossed. Note that by
7663 "offsetting" here we mean a further offset to access parts of the
7664 MEM. It's fine to have a lo_sum where the inner address is offset
7665 from a sym, since the same sym+offset will appear in the high part
7666 of the address calculation. */
7667
7668 bool
7669 mem_operand_gpr (rtx op, machine_mode mode)
7670 {
7671 unsigned HOST_WIDE_INT offset;
7672 int extra;
7673 rtx addr = XEXP (op, 0);
7674
7675 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7676 if (TARGET_UPDATE
7677 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7678 && mode_supports_pre_incdec_p (mode)
7679 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7680 return true;
7681
7682 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7683 if (!rs6000_offsettable_memref_p (op, mode, false))
7684 return false;
7685
7686 op = address_offset (addr);
7687 if (op == NULL_RTX)
7688 return true;
7689
7690 offset = INTVAL (op);
7691 if (TARGET_POWERPC64 && (offset & 3) != 0)
7692 return false;
7693
7694 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7695 if (extra < 0)
7696 extra = 0;
7697
7698 if (GET_CODE (addr) == LO_SUM)
7699 /* For lo_sum addresses, we must allow any offset except one that
7700 causes a wrap, so test only the low 16 bits. */
7701 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7702
7703 return offset + 0x8000 < 0x10000u - extra;
7704 }
7705
7706 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7707 enforce an offset divisible by 4 even for 32-bit. */
7708
7709 bool
7710 mem_operand_ds_form (rtx op, machine_mode mode)
7711 {
7712 unsigned HOST_WIDE_INT offset;
7713 int extra;
7714 rtx addr = XEXP (op, 0);
7715
7716 if (!offsettable_address_p (false, mode, addr))
7717 return false;
7718
7719 op = address_offset (addr);
7720 if (op == NULL_RTX)
7721 return true;
7722
7723 offset = INTVAL (op);
7724 if ((offset & 3) != 0)
7725 return false;
7726
7727 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7728 if (extra < 0)
7729 extra = 0;
7730
7731 if (GET_CODE (addr) == LO_SUM)
7732 /* For lo_sum addresses, we must allow any offset except one that
7733 causes a wrap, so test only the low 16 bits. */
7734 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7735
7736 return offset + 0x8000 < 0x10000u - extra;
7737 }
7738 \f
7739 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7740
7741 static bool
7742 reg_offset_addressing_ok_p (machine_mode mode)
7743 {
7744 switch (mode)
7745 {
7746 case E_V16QImode:
7747 case E_V8HImode:
7748 case E_V4SFmode:
7749 case E_V4SImode:
7750 case E_V2DFmode:
7751 case E_V2DImode:
7752 case E_V1TImode:
7753 case E_TImode:
7754 case E_TFmode:
7755 case E_KFmode:
7756 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7757 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7758 a vector mode, if we want to use the VSX registers to move it around,
7759 we need to restrict ourselves to reg+reg addressing. Similarly for
7760 IEEE 128-bit floating point that is passed in a single vector
7761 register. */
7762 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7763 return mode_supports_dq_form (mode);
7764 break;
7765
7766 case E_SDmode:
7767 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7768 addressing for the LFIWZX and STFIWX instructions. */
7769 if (TARGET_NO_SDMODE_STACK)
7770 return false;
7771 break;
7772
7773 default:
7774 break;
7775 }
7776
7777 return true;
7778 }
7779
7780 static bool
7781 virtual_stack_registers_memory_p (rtx op)
7782 {
7783 int regnum;
7784
7785 if (REG_P (op))
7786 regnum = REGNO (op);
7787
7788 else if (GET_CODE (op) == PLUS
7789 && REG_P (XEXP (op, 0))
7790 && CONST_INT_P (XEXP (op, 1)))
7791 regnum = REGNO (XEXP (op, 0));
7792
7793 else
7794 return false;
7795
7796 return (regnum >= FIRST_VIRTUAL_REGISTER
7797 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7798 }
7799
7800 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7801 is known to not straddle a 32k boundary. This function is used
7802 to determine whether -mcmodel=medium code can use TOC pointer
7803 relative addressing for OP. This means the alignment of the TOC
7804 pointer must also be taken into account, and unfortunately that is
7805 only 8 bytes. */
7806
7807 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7808 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7809 #endif
7810
7811 static bool
7812 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7813 machine_mode mode)
7814 {
7815 tree decl;
7816 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7817
7818 if (!SYMBOL_REF_P (op))
7819 return false;
7820
7821 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7822 SYMBOL_REF. */
7823 if (mode_supports_dq_form (mode))
7824 return false;
7825
7826 dsize = GET_MODE_SIZE (mode);
7827 decl = SYMBOL_REF_DECL (op);
7828 if (!decl)
7829 {
7830 if (dsize == 0)
7831 return false;
7832
7833 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7834 replacing memory addresses with an anchor plus offset. We
7835 could find the decl by rummaging around in the block->objects
7836 VEC for the given offset but that seems like too much work. */
7837 dalign = BITS_PER_UNIT;
7838 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7839 && SYMBOL_REF_ANCHOR_P (op)
7840 && SYMBOL_REF_BLOCK (op) != NULL)
7841 {
7842 struct object_block *block = SYMBOL_REF_BLOCK (op);
7843
7844 dalign = block->alignment;
7845 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7846 }
7847 else if (CONSTANT_POOL_ADDRESS_P (op))
7848 {
7849 /* It would be nice to have get_pool_align().. */
7850 machine_mode cmode = get_pool_mode (op);
7851
7852 dalign = GET_MODE_ALIGNMENT (cmode);
7853 }
7854 }
7855 else if (DECL_P (decl))
7856 {
7857 dalign = DECL_ALIGN (decl);
7858
7859 if (dsize == 0)
7860 {
7861 /* Allow BLKmode when the entire object is known to not
7862 cross a 32k boundary. */
7863 if (!DECL_SIZE_UNIT (decl))
7864 return false;
7865
7866 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7867 return false;
7868
7869 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7870 if (dsize > 32768)
7871 return false;
7872
7873 dalign /= BITS_PER_UNIT;
7874 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7875 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7876 return dalign >= dsize;
7877 }
7878 }
7879 else
7880 gcc_unreachable ();
7881
7882 /* Find how many bits of the alignment we know for this access. */
7883 dalign /= BITS_PER_UNIT;
7884 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7885 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7886 mask = dalign - 1;
7887 lsb = offset & -offset;
7888 mask &= lsb - 1;
7889 dalign = mask + 1;
7890
7891 return dalign >= dsize;
7892 }
7893
7894 static bool
7895 constant_pool_expr_p (rtx op)
7896 {
7897 rtx base, offset;
7898
7899 split_const (op, &base, &offset);
7900 return (SYMBOL_REF_P (base)
7901 && CONSTANT_POOL_ADDRESS_P (base)
7902 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7903 }
7904
7905 /* These are only used to pass through from print_operand/print_operand_address
7906 to rs6000_output_addr_const_extra over the intervening function
7907 output_addr_const which is not target code. */
7908 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7909
7910 /* Return true if OP is a toc pointer relative address (the output
7911 of create_TOC_reference). If STRICT, do not match non-split
7912 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7913 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7914 TOCREL_OFFSET_RET respectively. */
7915
7916 bool
7917 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7918 const_rtx *tocrel_offset_ret)
7919 {
7920 if (!TARGET_TOC)
7921 return false;
7922
7923 if (TARGET_CMODEL != CMODEL_SMALL)
7924 {
7925 /* When strict ensure we have everything tidy. */
7926 if (strict
7927 && !(GET_CODE (op) == LO_SUM
7928 && REG_P (XEXP (op, 0))
7929 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7930 return false;
7931
7932 /* When not strict, allow non-split TOC addresses and also allow
7933 (lo_sum (high ..)) TOC addresses created during reload. */
7934 if (GET_CODE (op) == LO_SUM)
7935 op = XEXP (op, 1);
7936 }
7937
7938 const_rtx tocrel_base = op;
7939 const_rtx tocrel_offset = const0_rtx;
7940
7941 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7942 {
7943 tocrel_base = XEXP (op, 0);
7944 tocrel_offset = XEXP (op, 1);
7945 }
7946
7947 if (tocrel_base_ret)
7948 *tocrel_base_ret = tocrel_base;
7949 if (tocrel_offset_ret)
7950 *tocrel_offset_ret = tocrel_offset;
7951
7952 return (GET_CODE (tocrel_base) == UNSPEC
7953 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7954 && REG_P (XVECEXP (tocrel_base, 0, 1))
7955 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7956 }
7957
7958 /* Return true if X is a constant pool address, and also for cmodel=medium
7959 if X is a toc-relative address known to be offsettable within MODE. */
7960
7961 bool
7962 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7963 bool strict)
7964 {
7965 const_rtx tocrel_base, tocrel_offset;
7966 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7967 && (TARGET_CMODEL != CMODEL_MEDIUM
7968 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7969 || mode == QImode
7970 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7971 INTVAL (tocrel_offset), mode)));
7972 }
7973
7974 static bool
7975 legitimate_small_data_p (machine_mode mode, rtx x)
7976 {
7977 return (DEFAULT_ABI == ABI_V4
7978 && !flag_pic && !TARGET_TOC
7979 && (SYMBOL_REF_P (x) || GET_CODE (x) == CONST)
7980 && small_data_operand (x, mode));
7981 }
7982
7983 bool
7984 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7985 bool strict, bool worst_case)
7986 {
7987 unsigned HOST_WIDE_INT offset;
7988 unsigned int extra;
7989
7990 if (GET_CODE (x) != PLUS)
7991 return false;
7992 if (!REG_P (XEXP (x, 0)))
7993 return false;
7994 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7995 return false;
7996 if (mode_supports_dq_form (mode))
7997 return quad_address_p (x, mode, strict);
7998 if (!reg_offset_addressing_ok_p (mode))
7999 return virtual_stack_registers_memory_p (x);
8000 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8001 return true;
8002 if (!CONST_INT_P (XEXP (x, 1)))
8003 return false;
8004
8005 offset = INTVAL (XEXP (x, 1));
8006 extra = 0;
8007 switch (mode)
8008 {
8009 case E_DFmode:
8010 case E_DDmode:
8011 case E_DImode:
8012 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8013 addressing. */
8014 if (VECTOR_MEM_VSX_P (mode))
8015 return false;
8016
8017 if (!worst_case)
8018 break;
8019 if (!TARGET_POWERPC64)
8020 extra = 4;
8021 else if (offset & 3)
8022 return false;
8023 break;
8024
8025 case E_TFmode:
8026 case E_IFmode:
8027 case E_KFmode:
8028 case E_TDmode:
8029 case E_TImode:
8030 case E_PTImode:
8031 extra = 8;
8032 if (!worst_case)
8033 break;
8034 if (!TARGET_POWERPC64)
8035 extra = 12;
8036 else if (offset & 3)
8037 return false;
8038 break;
8039
8040 default:
8041 break;
8042 }
8043
8044 offset += 0x8000;
8045 return offset < 0x10000 - extra;
8046 }
8047
8048 bool
8049 legitimate_indexed_address_p (rtx x, int strict)
8050 {
8051 rtx op0, op1;
8052
8053 if (GET_CODE (x) != PLUS)
8054 return false;
8055
8056 op0 = XEXP (x, 0);
8057 op1 = XEXP (x, 1);
8058
8059 return (REG_P (op0) && REG_P (op1)
8060 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8061 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8062 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8063 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8064 }
8065
8066 bool
8067 avoiding_indexed_address_p (machine_mode mode)
8068 {
8069 /* Avoid indexed addressing for modes that have non-indexed
8070 load/store instruction forms. */
8071 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8072 }
8073
8074 bool
8075 legitimate_indirect_address_p (rtx x, int strict)
8076 {
8077 return REG_P (x) && INT_REG_OK_FOR_BASE_P (x, strict);
8078 }
8079
8080 bool
8081 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8082 {
8083 if (!TARGET_MACHO || !flag_pic
8084 || mode != SImode || !MEM_P (x))
8085 return false;
8086 x = XEXP (x, 0);
8087
8088 if (GET_CODE (x) != LO_SUM)
8089 return false;
8090 if (!REG_P (XEXP (x, 0)))
8091 return false;
8092 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8093 return false;
8094 x = XEXP (x, 1);
8095
8096 return CONSTANT_P (x);
8097 }
8098
8099 static bool
8100 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8101 {
8102 if (GET_CODE (x) != LO_SUM)
8103 return false;
8104 if (!REG_P (XEXP (x, 0)))
8105 return false;
8106 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8107 return false;
8108 /* quad word addresses are restricted, and we can't use LO_SUM. */
8109 if (mode_supports_dq_form (mode))
8110 return false;
8111 x = XEXP (x, 1);
8112
8113 if (TARGET_ELF || TARGET_MACHO)
8114 {
8115 bool large_toc_ok;
8116
8117 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8118 return false;
8119 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8120 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8121 recognizes some LO_SUM addresses as valid although this
8122 function says opposite. In most cases, LRA through different
8123 transformations can generate correct code for address reloads.
8124 It cannot manage only some LO_SUM cases. So we need to add
8125 code here saying that some addresses are still valid. */
8126 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8127 && small_toc_ref (x, VOIDmode));
8128 if (TARGET_TOC && ! large_toc_ok)
8129 return false;
8130 if (GET_MODE_NUNITS (mode) != 1)
8131 return false;
8132 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8133 && !(/* ??? Assume floating point reg based on mode? */
8134 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8135 return false;
8136
8137 return CONSTANT_P (x) || large_toc_ok;
8138 }
8139
8140 return false;
8141 }
8142
8143
8144 /* Try machine-dependent ways of modifying an illegitimate address
8145 to be legitimate. If we find one, return the new, valid address.
8146 This is used from only one place: `memory_address' in explow.c.
8147
8148 OLDX is the address as it was before break_out_memory_refs was
8149 called. In some cases it is useful to look at this to decide what
8150 needs to be done.
8151
8152 It is always safe for this function to do nothing. It exists to
8153 recognize opportunities to optimize the output.
8154
8155 On RS/6000, first check for the sum of a register with a constant
8156 integer that is out of range. If so, generate code to add the
8157 constant with the low-order 16 bits masked to the register and force
8158 this result into another register (this can be done with `cau').
8159 Then generate an address of REG+(CONST&0xffff), allowing for the
8160 possibility of bit 16 being a one.
8161
8162 Then check for the sum of a register and something not constant, try to
8163 load the other things into a register and return the sum. */
8164
8165 static rtx
8166 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8167 machine_mode mode)
8168 {
8169 unsigned int extra;
8170
8171 if (!reg_offset_addressing_ok_p (mode)
8172 || mode_supports_dq_form (mode))
8173 {
8174 if (virtual_stack_registers_memory_p (x))
8175 return x;
8176
8177 /* In theory we should not be seeing addresses of the form reg+0,
8178 but just in case it is generated, optimize it away. */
8179 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8180 return force_reg (Pmode, XEXP (x, 0));
8181
8182 /* For TImode with load/store quad, restrict addresses to just a single
8183 pointer, so it works with both GPRs and VSX registers. */
8184 /* Make sure both operands are registers. */
8185 else if (GET_CODE (x) == PLUS
8186 && (mode != TImode || !TARGET_VSX))
8187 return gen_rtx_PLUS (Pmode,
8188 force_reg (Pmode, XEXP (x, 0)),
8189 force_reg (Pmode, XEXP (x, 1)));
8190 else
8191 return force_reg (Pmode, x);
8192 }
8193 if (SYMBOL_REF_P (x))
8194 {
8195 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8196 if (model != 0)
8197 return rs6000_legitimize_tls_address (x, model);
8198 }
8199
8200 extra = 0;
8201 switch (mode)
8202 {
8203 case E_TFmode:
8204 case E_TDmode:
8205 case E_TImode:
8206 case E_PTImode:
8207 case E_IFmode:
8208 case E_KFmode:
8209 /* As in legitimate_offset_address_p we do not assume
8210 worst-case. The mode here is just a hint as to the registers
8211 used. A TImode is usually in gprs, but may actually be in
8212 fprs. Leave worst-case scenario for reload to handle via
8213 insn constraints. PTImode is only GPRs. */
8214 extra = 8;
8215 break;
8216 default:
8217 break;
8218 }
8219
8220 if (GET_CODE (x) == PLUS
8221 && REG_P (XEXP (x, 0))
8222 && CONST_INT_P (XEXP (x, 1))
8223 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8224 >= 0x10000 - extra))
8225 {
8226 HOST_WIDE_INT high_int, low_int;
8227 rtx sum;
8228 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8229 if (low_int >= 0x8000 - extra)
8230 low_int = 0;
8231 high_int = INTVAL (XEXP (x, 1)) - low_int;
8232 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8233 GEN_INT (high_int)), 0);
8234 return plus_constant (Pmode, sum, low_int);
8235 }
8236 else if (GET_CODE (x) == PLUS
8237 && REG_P (XEXP (x, 0))
8238 && !CONST_INT_P (XEXP (x, 1))
8239 && GET_MODE_NUNITS (mode) == 1
8240 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8241 || (/* ??? Assume floating point reg based on mode? */
8242 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8243 && !avoiding_indexed_address_p (mode))
8244 {
8245 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8246 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8247 }
8248 else if ((TARGET_ELF
8249 #if TARGET_MACHO
8250 || !MACHO_DYNAMIC_NO_PIC_P
8251 #endif
8252 )
8253 && TARGET_32BIT
8254 && TARGET_NO_TOC
8255 && !flag_pic
8256 && !CONST_INT_P (x)
8257 && !CONST_WIDE_INT_P (x)
8258 && !CONST_DOUBLE_P (x)
8259 && CONSTANT_P (x)
8260 && GET_MODE_NUNITS (mode) == 1
8261 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8262 || (/* ??? Assume floating point reg based on mode? */
8263 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8264 {
8265 rtx reg = gen_reg_rtx (Pmode);
8266 if (TARGET_ELF)
8267 emit_insn (gen_elf_high (reg, x));
8268 else
8269 emit_insn (gen_macho_high (reg, x));
8270 return gen_rtx_LO_SUM (Pmode, reg, x);
8271 }
8272 else if (TARGET_TOC
8273 && SYMBOL_REF_P (x)
8274 && constant_pool_expr_p (x)
8275 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8276 return create_TOC_reference (x, NULL_RTX);
8277 else
8278 return x;
8279 }
8280
8281 /* Debug version of rs6000_legitimize_address. */
8282 static rtx
8283 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8284 {
8285 rtx ret;
8286 rtx_insn *insns;
8287
8288 start_sequence ();
8289 ret = rs6000_legitimize_address (x, oldx, mode);
8290 insns = get_insns ();
8291 end_sequence ();
8292
8293 if (ret != x)
8294 {
8295 fprintf (stderr,
8296 "\nrs6000_legitimize_address: mode %s, old code %s, "
8297 "new code %s, modified\n",
8298 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8299 GET_RTX_NAME (GET_CODE (ret)));
8300
8301 fprintf (stderr, "Original address:\n");
8302 debug_rtx (x);
8303
8304 fprintf (stderr, "oldx:\n");
8305 debug_rtx (oldx);
8306
8307 fprintf (stderr, "New address:\n");
8308 debug_rtx (ret);
8309
8310 if (insns)
8311 {
8312 fprintf (stderr, "Insns added:\n");
8313 debug_rtx_list (insns, 20);
8314 }
8315 }
8316 else
8317 {
8318 fprintf (stderr,
8319 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8320 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8321
8322 debug_rtx (x);
8323 }
8324
8325 if (insns)
8326 emit_insn (insns);
8327
8328 return ret;
8329 }
8330
8331 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8332 We need to emit DTP-relative relocations. */
8333
8334 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8335 static void
8336 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8337 {
8338 switch (size)
8339 {
8340 case 4:
8341 fputs ("\t.long\t", file);
8342 break;
8343 case 8:
8344 fputs (DOUBLE_INT_ASM_OP, file);
8345 break;
8346 default:
8347 gcc_unreachable ();
8348 }
8349 output_addr_const (file, x);
8350 if (TARGET_ELF)
8351 fputs ("@dtprel+0x8000", file);
8352 else if (TARGET_XCOFF && SYMBOL_REF_P (x))
8353 {
8354 switch (SYMBOL_REF_TLS_MODEL (x))
8355 {
8356 case 0:
8357 break;
8358 case TLS_MODEL_LOCAL_EXEC:
8359 fputs ("@le", file);
8360 break;
8361 case TLS_MODEL_INITIAL_EXEC:
8362 fputs ("@ie", file);
8363 break;
8364 case TLS_MODEL_GLOBAL_DYNAMIC:
8365 case TLS_MODEL_LOCAL_DYNAMIC:
8366 fputs ("@m", file);
8367 break;
8368 default:
8369 gcc_unreachable ();
8370 }
8371 }
8372 }
8373
8374 /* Return true if X is a symbol that refers to real (rather than emulated)
8375 TLS. */
8376
8377 static bool
8378 rs6000_real_tls_symbol_ref_p (rtx x)
8379 {
8380 return (SYMBOL_REF_P (x)
8381 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8382 }
8383
8384 /* In the name of slightly smaller debug output, and to cater to
8385 general assembler lossage, recognize various UNSPEC sequences
8386 and turn them back into a direct symbol reference. */
8387
8388 static rtx
8389 rs6000_delegitimize_address (rtx orig_x)
8390 {
8391 rtx x, y, offset;
8392
8393 if (GET_CODE (orig_x) == UNSPEC && XINT (orig_x, 1) == UNSPEC_FUSION_GPR)
8394 orig_x = XVECEXP (orig_x, 0, 0);
8395
8396 orig_x = delegitimize_mem_from_attrs (orig_x);
8397
8398 x = orig_x;
8399 if (MEM_P (x))
8400 x = XEXP (x, 0);
8401
8402 y = x;
8403 if (TARGET_CMODEL != CMODEL_SMALL && GET_CODE (y) == LO_SUM)
8404 y = XEXP (y, 1);
8405
8406 offset = NULL_RTX;
8407 if (GET_CODE (y) == PLUS
8408 && GET_MODE (y) == Pmode
8409 && CONST_INT_P (XEXP (y, 1)))
8410 {
8411 offset = XEXP (y, 1);
8412 y = XEXP (y, 0);
8413 }
8414
8415 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_TOCREL)
8416 {
8417 y = XVECEXP (y, 0, 0);
8418
8419 #ifdef HAVE_AS_TLS
8420 /* Do not associate thread-local symbols with the original
8421 constant pool symbol. */
8422 if (TARGET_XCOFF
8423 && SYMBOL_REF_P (y)
8424 && CONSTANT_POOL_ADDRESS_P (y)
8425 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8426 return orig_x;
8427 #endif
8428
8429 if (offset != NULL_RTX)
8430 y = gen_rtx_PLUS (Pmode, y, offset);
8431 if (!MEM_P (orig_x))
8432 return y;
8433 else
8434 return replace_equiv_address_nv (orig_x, y);
8435 }
8436
8437 if (TARGET_MACHO
8438 && GET_CODE (orig_x) == LO_SUM
8439 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8440 {
8441 y = XEXP (XEXP (orig_x, 1), 0);
8442 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8443 return XVECEXP (y, 0, 0);
8444 }
8445
8446 return orig_x;
8447 }
8448
8449 /* Return true if X shouldn't be emitted into the debug info.
8450 The linker doesn't like .toc section references from
8451 .debug_* sections, so reject .toc section symbols. */
8452
8453 static bool
8454 rs6000_const_not_ok_for_debug_p (rtx x)
8455 {
8456 if (GET_CODE (x) == UNSPEC)
8457 return true;
8458 if (SYMBOL_REF_P (x)
8459 && CONSTANT_POOL_ADDRESS_P (x))
8460 {
8461 rtx c = get_pool_constant (x);
8462 machine_mode cmode = get_pool_mode (x);
8463 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8464 return true;
8465 }
8466
8467 return false;
8468 }
8469
8470 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8471
8472 static bool
8473 rs6000_legitimate_combined_insn (rtx_insn *insn)
8474 {
8475 int icode = INSN_CODE (insn);
8476
8477 /* Reject creating doloop insns. Combine should not be allowed
8478 to create these for a number of reasons:
8479 1) In a nested loop, if combine creates one of these in an
8480 outer loop and the register allocator happens to allocate ctr
8481 to the outer loop insn, then the inner loop can't use ctr.
8482 Inner loops ought to be more highly optimized.
8483 2) Combine often wants to create one of these from what was
8484 originally a three insn sequence, first combining the three
8485 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8486 allocated ctr, the splitter takes use back to the three insn
8487 sequence. It's better to stop combine at the two insn
8488 sequence.
8489 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8490 insns, the register allocator sometimes uses floating point
8491 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8492 jump insn and output reloads are not implemented for jumps,
8493 the ctrsi/ctrdi splitters need to handle all possible cases.
8494 That's a pain, and it gets to be seriously difficult when a
8495 splitter that runs after reload needs memory to transfer from
8496 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8497 for the difficult case. It's better to not create problems
8498 in the first place. */
8499 if (icode != CODE_FOR_nothing
8500 && (icode == CODE_FOR_bdz_si
8501 || icode == CODE_FOR_bdz_di
8502 || icode == CODE_FOR_bdnz_si
8503 || icode == CODE_FOR_bdnz_di
8504 || icode == CODE_FOR_bdztf_si
8505 || icode == CODE_FOR_bdztf_di
8506 || icode == CODE_FOR_bdnztf_si
8507 || icode == CODE_FOR_bdnztf_di))
8508 return false;
8509
8510 return true;
8511 }
8512
8513 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8514
8515 static GTY(()) rtx rs6000_tls_symbol;
8516 static rtx
8517 rs6000_tls_get_addr (void)
8518 {
8519 if (!rs6000_tls_symbol)
8520 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8521
8522 return rs6000_tls_symbol;
8523 }
8524
8525 /* Construct the SYMBOL_REF for TLS GOT references. */
8526
8527 static GTY(()) rtx rs6000_got_symbol;
8528 static rtx
8529 rs6000_got_sym (void)
8530 {
8531 if (!rs6000_got_symbol)
8532 {
8533 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8534 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8535 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8536 }
8537
8538 return rs6000_got_symbol;
8539 }
8540
8541 /* AIX Thread-Local Address support. */
8542
8543 static rtx
8544 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8545 {
8546 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8547 const char *name;
8548 char *tlsname;
8549
8550 name = XSTR (addr, 0);
8551 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8552 or the symbol will be in TLS private data section. */
8553 if (name[strlen (name) - 1] != ']'
8554 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8555 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8556 {
8557 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8558 strcpy (tlsname, name);
8559 strcat (tlsname,
8560 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8561 tlsaddr = copy_rtx (addr);
8562 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8563 }
8564 else
8565 tlsaddr = addr;
8566
8567 /* Place addr into TOC constant pool. */
8568 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8569
8570 /* Output the TOC entry and create the MEM referencing the value. */
8571 if (constant_pool_expr_p (XEXP (sym, 0))
8572 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8573 {
8574 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8575 mem = gen_const_mem (Pmode, tocref);
8576 set_mem_alias_set (mem, get_TOC_alias_set ());
8577 }
8578 else
8579 return sym;
8580
8581 /* Use global-dynamic for local-dynamic. */
8582 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8583 || model == TLS_MODEL_LOCAL_DYNAMIC)
8584 {
8585 /* Create new TOC reference for @m symbol. */
8586 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8587 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8588 strcpy (tlsname, "*LCM");
8589 strcat (tlsname, name + 3);
8590 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8591 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8592 tocref = create_TOC_reference (modaddr, NULL_RTX);
8593 rtx modmem = gen_const_mem (Pmode, tocref);
8594 set_mem_alias_set (modmem, get_TOC_alias_set ());
8595
8596 rtx modreg = gen_reg_rtx (Pmode);
8597 emit_insn (gen_rtx_SET (modreg, modmem));
8598
8599 tmpreg = gen_reg_rtx (Pmode);
8600 emit_insn (gen_rtx_SET (tmpreg, mem));
8601
8602 dest = gen_reg_rtx (Pmode);
8603 if (TARGET_32BIT)
8604 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8605 else
8606 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8607 return dest;
8608 }
8609 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8610 else if (TARGET_32BIT)
8611 {
8612 tlsreg = gen_reg_rtx (SImode);
8613 emit_insn (gen_tls_get_tpointer (tlsreg));
8614 }
8615 else
8616 tlsreg = gen_rtx_REG (DImode, 13);
8617
8618 /* Load the TOC value into temporary register. */
8619 tmpreg = gen_reg_rtx (Pmode);
8620 emit_insn (gen_rtx_SET (tmpreg, mem));
8621 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8622 gen_rtx_MINUS (Pmode, addr, tlsreg));
8623
8624 /* Add TOC symbol value to TLS pointer. */
8625 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8626
8627 return dest;
8628 }
8629
8630 /* Output arg setup instructions for a !TARGET_TLS_MARKERS
8631 __tls_get_addr call. */
8632
8633 void
8634 rs6000_output_tlsargs (rtx *operands)
8635 {
8636 /* Set up operands for output_asm_insn, without modifying OPERANDS. */
8637 rtx op[3];
8638
8639 /* The set dest of the call, ie. r3, which is also the first arg reg. */
8640 op[0] = operands[0];
8641 /* The TLS symbol from global_tlsarg stashed as CALL operand 2. */
8642 op[1] = XVECEXP (operands[2], 0, 0);
8643 if (XINT (operands[2], 1) == UNSPEC_TLSGD)
8644 {
8645 /* The GOT register. */
8646 op[2] = XVECEXP (operands[2], 0, 1);
8647 if (TARGET_CMODEL != CMODEL_SMALL)
8648 output_asm_insn ("addis %0,%2,%1@got@tlsgd@ha\n\t"
8649 "addi %0,%0,%1@got@tlsgd@l", op);
8650 else
8651 output_asm_insn ("addi %0,%2,%1@got@tlsgd", op);
8652 }
8653 else if (XINT (operands[2], 1) == UNSPEC_TLSLD)
8654 {
8655 if (TARGET_CMODEL != CMODEL_SMALL)
8656 output_asm_insn ("addis %0,%1,%&@got@tlsld@ha\n\t"
8657 "addi %0,%0,%&@got@tlsld@l", op);
8658 else
8659 output_asm_insn ("addi %0,%1,%&@got@tlsld", op);
8660 }
8661 else
8662 gcc_unreachable ();
8663 }
8664
8665 /* Passes the tls arg value for global dynamic and local dynamic
8666 emit_library_call_value in rs6000_legitimize_tls_address to
8667 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8668 marker relocs put on __tls_get_addr calls. */
8669 static rtx global_tlsarg;
8670
8671 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8672 this (thread-local) address. */
8673
8674 static rtx
8675 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8676 {
8677 rtx dest, insn;
8678
8679 if (TARGET_XCOFF)
8680 return rs6000_legitimize_tls_address_aix (addr, model);
8681
8682 dest = gen_reg_rtx (Pmode);
8683 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8684 {
8685 rtx tlsreg;
8686
8687 if (TARGET_64BIT)
8688 {
8689 tlsreg = gen_rtx_REG (Pmode, 13);
8690 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8691 }
8692 else
8693 {
8694 tlsreg = gen_rtx_REG (Pmode, 2);
8695 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8696 }
8697 emit_insn (insn);
8698 }
8699 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8700 {
8701 rtx tlsreg, tmp;
8702
8703 tmp = gen_reg_rtx (Pmode);
8704 if (TARGET_64BIT)
8705 {
8706 tlsreg = gen_rtx_REG (Pmode, 13);
8707 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8708 }
8709 else
8710 {
8711 tlsreg = gen_rtx_REG (Pmode, 2);
8712 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8713 }
8714 emit_insn (insn);
8715 if (TARGET_64BIT)
8716 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8717 else
8718 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8719 emit_insn (insn);
8720 }
8721 else
8722 {
8723 rtx got, tga, tmp1, tmp2;
8724
8725 /* We currently use relocations like @got@tlsgd for tls, which
8726 means the linker will handle allocation of tls entries, placing
8727 them in the .got section. So use a pointer to the .got section,
8728 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8729 or to secondary GOT sections used by 32-bit -fPIC. */
8730 if (TARGET_64BIT)
8731 got = gen_rtx_REG (Pmode, 2);
8732 else
8733 {
8734 if (flag_pic == 1)
8735 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8736 else
8737 {
8738 rtx gsym = rs6000_got_sym ();
8739 got = gen_reg_rtx (Pmode);
8740 if (flag_pic == 0)
8741 rs6000_emit_move (got, gsym, Pmode);
8742 else
8743 {
8744 rtx mem, lab;
8745
8746 tmp1 = gen_reg_rtx (Pmode);
8747 tmp2 = gen_reg_rtx (Pmode);
8748 mem = gen_const_mem (Pmode, tmp1);
8749 lab = gen_label_rtx ();
8750 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8751 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8752 if (TARGET_LINK_STACK)
8753 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8754 emit_move_insn (tmp2, mem);
8755 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8756 set_unique_reg_note (last, REG_EQUAL, gsym);
8757 }
8758 }
8759 }
8760
8761 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8762 {
8763 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8764 UNSPEC_TLSGD);
8765 tga = rs6000_tls_get_addr ();
8766 global_tlsarg = arg;
8767 if (TARGET_TLS_MARKERS)
8768 {
8769 rtx argreg = gen_rtx_REG (Pmode, 3);
8770 emit_insn (gen_rtx_SET (argreg, arg));
8771 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8772 argreg, Pmode);
8773 }
8774 else
8775 emit_library_call_value (tga, dest, LCT_CONST, Pmode);
8776 global_tlsarg = NULL_RTX;
8777
8778 /* Make a note so that the result of this call can be CSEd. */
8779 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8780 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8781 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8782 }
8783 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8784 {
8785 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got), UNSPEC_TLSLD);
8786 tga = rs6000_tls_get_addr ();
8787 tmp1 = gen_reg_rtx (Pmode);
8788 global_tlsarg = arg;
8789 if (TARGET_TLS_MARKERS)
8790 {
8791 rtx argreg = gen_rtx_REG (Pmode, 3);
8792 emit_insn (gen_rtx_SET (argreg, arg));
8793 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8794 argreg, Pmode);
8795 }
8796 else
8797 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode);
8798 global_tlsarg = NULL_RTX;
8799
8800 /* Make a note so that the result of this call can be CSEd. */
8801 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8802 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8803 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8804
8805 if (rs6000_tls_size == 16)
8806 {
8807 if (TARGET_64BIT)
8808 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8809 else
8810 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8811 }
8812 else if (rs6000_tls_size == 32)
8813 {
8814 tmp2 = gen_reg_rtx (Pmode);
8815 if (TARGET_64BIT)
8816 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8817 else
8818 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8819 emit_insn (insn);
8820 if (TARGET_64BIT)
8821 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8822 else
8823 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8824 }
8825 else
8826 {
8827 tmp2 = gen_reg_rtx (Pmode);
8828 if (TARGET_64BIT)
8829 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8830 else
8831 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8832 emit_insn (insn);
8833 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8834 }
8835 emit_insn (insn);
8836 }
8837 else
8838 {
8839 /* IE, or 64-bit offset LE. */
8840 tmp2 = gen_reg_rtx (Pmode);
8841 if (TARGET_64BIT)
8842 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8843 else
8844 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8845 emit_insn (insn);
8846 if (TARGET_64BIT)
8847 insn = gen_tls_tls_64 (dest, tmp2, addr);
8848 else
8849 insn = gen_tls_tls_32 (dest, tmp2, addr);
8850 emit_insn (insn);
8851 }
8852 }
8853
8854 return dest;
8855 }
8856
8857 /* Only create the global variable for the stack protect guard if we are using
8858 the global flavor of that guard. */
8859 static tree
8860 rs6000_init_stack_protect_guard (void)
8861 {
8862 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8863 return default_stack_protect_guard ();
8864
8865 return NULL_TREE;
8866 }
8867
8868 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8869
8870 static bool
8871 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8872 {
8873 if (GET_CODE (x) == HIGH
8874 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8875 return true;
8876
8877 /* A TLS symbol in the TOC cannot contain a sum. */
8878 if (GET_CODE (x) == CONST
8879 && GET_CODE (XEXP (x, 0)) == PLUS
8880 && SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
8881 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8882 return true;
8883
8884 /* Do not place an ELF TLS symbol in the constant pool. */
8885 return TARGET_ELF && tls_referenced_p (x);
8886 }
8887
8888 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8889 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8890 can be addressed relative to the toc pointer. */
8891
8892 static bool
8893 use_toc_relative_ref (rtx sym, machine_mode mode)
8894 {
8895 return ((constant_pool_expr_p (sym)
8896 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8897 get_pool_mode (sym)))
8898 || (TARGET_CMODEL == CMODEL_MEDIUM
8899 && SYMBOL_REF_LOCAL_P (sym)
8900 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8901 }
8902
8903 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
8904 that is a valid memory address for an instruction.
8905 The MODE argument is the machine mode for the MEM expression
8906 that wants to use this address.
8907
8908 On the RS/6000, there are four valid address: a SYMBOL_REF that
8909 refers to a constant pool entry of an address (or the sum of it
8910 plus a constant), a short (16-bit signed) constant plus a register,
8911 the sum of two registers, or a register indirect, possibly with an
8912 auto-increment. For DFmode, DDmode and DImode with a constant plus
8913 register, we must ensure that both words are addressable or PowerPC64
8914 with offset word aligned.
8915
8916 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
8917 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
8918 because adjacent memory cells are accessed by adding word-sized offsets
8919 during assembly output. */
8920 static bool
8921 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
8922 {
8923 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8924 bool quad_offset_p = mode_supports_dq_form (mode);
8925
8926 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
8927 if (VECTOR_MEM_ALTIVEC_P (mode)
8928 && GET_CODE (x) == AND
8929 && CONST_INT_P (XEXP (x, 1))
8930 && INTVAL (XEXP (x, 1)) == -16)
8931 x = XEXP (x, 0);
8932
8933 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
8934 return 0;
8935 if (legitimate_indirect_address_p (x, reg_ok_strict))
8936 return 1;
8937 if (TARGET_UPDATE
8938 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
8939 && mode_supports_pre_incdec_p (mode)
8940 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
8941 return 1;
8942 /* Handle restricted vector d-form offsets in ISA 3.0. */
8943 if (quad_offset_p)
8944 {
8945 if (quad_address_p (x, mode, reg_ok_strict))
8946 return 1;
8947 }
8948 else if (virtual_stack_registers_memory_p (x))
8949 return 1;
8950
8951 else if (reg_offset_p)
8952 {
8953 if (legitimate_small_data_p (mode, x))
8954 return 1;
8955 if (legitimate_constant_pool_address_p (x, mode,
8956 reg_ok_strict || lra_in_progress))
8957 return 1;
8958 }
8959
8960 /* For TImode, if we have TImode in VSX registers, only allow register
8961 indirect addresses. This will allow the values to go in either GPRs
8962 or VSX registers without reloading. The vector types would tend to
8963 go into VSX registers, so we allow REG+REG, while TImode seems
8964 somewhat split, in that some uses are GPR based, and some VSX based. */
8965 /* FIXME: We could loosen this by changing the following to
8966 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
8967 but currently we cannot allow REG+REG addressing for TImode. See
8968 PR72827 for complete details on how this ends up hoodwinking DSE. */
8969 if (mode == TImode && TARGET_VSX)
8970 return 0;
8971 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
8972 if (! reg_ok_strict
8973 && reg_offset_p
8974 && GET_CODE (x) == PLUS
8975 && REG_P (XEXP (x, 0))
8976 && (XEXP (x, 0) == virtual_stack_vars_rtx
8977 || XEXP (x, 0) == arg_pointer_rtx)
8978 && CONST_INT_P (XEXP (x, 1)))
8979 return 1;
8980 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
8981 return 1;
8982 if (!FLOAT128_2REG_P (mode)
8983 && (TARGET_HARD_FLOAT
8984 || TARGET_POWERPC64
8985 || (mode != DFmode && mode != DDmode))
8986 && (TARGET_POWERPC64 || mode != DImode)
8987 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
8988 && mode != PTImode
8989 && !avoiding_indexed_address_p (mode)
8990 && legitimate_indexed_address_p (x, reg_ok_strict))
8991 return 1;
8992 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
8993 && mode_supports_pre_modify_p (mode)
8994 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
8995 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
8996 reg_ok_strict, false)
8997 || (!avoiding_indexed_address_p (mode)
8998 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
8999 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9000 return 1;
9001 if (reg_offset_p && !quad_offset_p
9002 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9003 return 1;
9004 return 0;
9005 }
9006
9007 /* Debug version of rs6000_legitimate_address_p. */
9008 static bool
9009 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9010 bool reg_ok_strict)
9011 {
9012 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9013 fprintf (stderr,
9014 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9015 "strict = %d, reload = %s, code = %s\n",
9016 ret ? "true" : "false",
9017 GET_MODE_NAME (mode),
9018 reg_ok_strict,
9019 (reload_completed ? "after" : "before"),
9020 GET_RTX_NAME (GET_CODE (x)));
9021 debug_rtx (x);
9022
9023 return ret;
9024 }
9025
9026 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9027
9028 static bool
9029 rs6000_mode_dependent_address_p (const_rtx addr,
9030 addr_space_t as ATTRIBUTE_UNUSED)
9031 {
9032 return rs6000_mode_dependent_address_ptr (addr);
9033 }
9034
9035 /* Go to LABEL if ADDR (a legitimate address expression)
9036 has an effect that depends on the machine mode it is used for.
9037
9038 On the RS/6000 this is true of all integral offsets (since AltiVec
9039 and VSX modes don't allow them) or is a pre-increment or decrement.
9040
9041 ??? Except that due to conceptual problems in offsettable_address_p
9042 we can't really report the problems of integral offsets. So leave
9043 this assuming that the adjustable offset must be valid for the
9044 sub-words of a TFmode operand, which is what we had before. */
9045
9046 static bool
9047 rs6000_mode_dependent_address (const_rtx addr)
9048 {
9049 switch (GET_CODE (addr))
9050 {
9051 case PLUS:
9052 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9053 is considered a legitimate address before reload, so there
9054 are no offset restrictions in that case. Note that this
9055 condition is safe in strict mode because any address involving
9056 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9057 been rejected as illegitimate. */
9058 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9059 && XEXP (addr, 0) != arg_pointer_rtx
9060 && CONST_INT_P (XEXP (addr, 1)))
9061 {
9062 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9063 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9064 }
9065 break;
9066
9067 case LO_SUM:
9068 /* Anything in the constant pool is sufficiently aligned that
9069 all bytes have the same high part address. */
9070 return !legitimate_constant_pool_address_p (addr, QImode, false);
9071
9072 /* Auto-increment cases are now treated generically in recog.c. */
9073 case PRE_MODIFY:
9074 return TARGET_UPDATE;
9075
9076 /* AND is only allowed in Altivec loads. */
9077 case AND:
9078 return true;
9079
9080 default:
9081 break;
9082 }
9083
9084 return false;
9085 }
9086
9087 /* Debug version of rs6000_mode_dependent_address. */
9088 static bool
9089 rs6000_debug_mode_dependent_address (const_rtx addr)
9090 {
9091 bool ret = rs6000_mode_dependent_address (addr);
9092
9093 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9094 ret ? "true" : "false");
9095 debug_rtx (addr);
9096
9097 return ret;
9098 }
9099
9100 /* Implement FIND_BASE_TERM. */
9101
9102 rtx
9103 rs6000_find_base_term (rtx op)
9104 {
9105 rtx base;
9106
9107 base = op;
9108 if (GET_CODE (base) == CONST)
9109 base = XEXP (base, 0);
9110 if (GET_CODE (base) == PLUS)
9111 base = XEXP (base, 0);
9112 if (GET_CODE (base) == UNSPEC)
9113 switch (XINT (base, 1))
9114 {
9115 case UNSPEC_TOCREL:
9116 case UNSPEC_MACHOPIC_OFFSET:
9117 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9118 for aliasing purposes. */
9119 return XVECEXP (base, 0, 0);
9120 }
9121
9122 return op;
9123 }
9124
9125 /* More elaborate version of recog's offsettable_memref_p predicate
9126 that works around the ??? note of rs6000_mode_dependent_address.
9127 In particular it accepts
9128
9129 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9130
9131 in 32-bit mode, that the recog predicate rejects. */
9132
9133 static bool
9134 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9135 {
9136 bool worst_case;
9137
9138 if (!MEM_P (op))
9139 return false;
9140
9141 /* First mimic offsettable_memref_p. */
9142 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9143 return true;
9144
9145 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9146 the latter predicate knows nothing about the mode of the memory
9147 reference and, therefore, assumes that it is the largest supported
9148 mode (TFmode). As a consequence, legitimate offsettable memory
9149 references are rejected. rs6000_legitimate_offset_address_p contains
9150 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9151 at least with a little bit of help here given that we know the
9152 actual registers used. */
9153 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9154 || GET_MODE_SIZE (reg_mode) == 4);
9155 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9156 strict, worst_case);
9157 }
9158
9159 /* Determine the reassociation width to be used in reassociate_bb.
9160 This takes into account how many parallel operations we
9161 can actually do of a given type, and also the latency.
9162 P8:
9163 int add/sub 6/cycle
9164 mul 2/cycle
9165 vect add/sub/mul 2/cycle
9166 fp add/sub/mul 2/cycle
9167 dfp 1/cycle
9168 */
9169
9170 static int
9171 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9172 machine_mode mode)
9173 {
9174 switch (rs6000_tune)
9175 {
9176 case PROCESSOR_POWER8:
9177 case PROCESSOR_POWER9:
9178 if (DECIMAL_FLOAT_MODE_P (mode))
9179 return 1;
9180 if (VECTOR_MODE_P (mode))
9181 return 4;
9182 if (INTEGRAL_MODE_P (mode))
9183 return 1;
9184 if (FLOAT_MODE_P (mode))
9185 return 4;
9186 break;
9187 default:
9188 break;
9189 }
9190 return 1;
9191 }
9192
9193 /* Change register usage conditional on target flags. */
9194 static void
9195 rs6000_conditional_register_usage (void)
9196 {
9197 int i;
9198
9199 if (TARGET_DEBUG_TARGET)
9200 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9201
9202 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9203 if (TARGET_64BIT)
9204 fixed_regs[13] = call_used_regs[13]
9205 = call_really_used_regs[13] = 1;
9206
9207 /* Conditionally disable FPRs. */
9208 if (TARGET_SOFT_FLOAT)
9209 for (i = 32; i < 64; i++)
9210 fixed_regs[i] = call_used_regs[i]
9211 = call_really_used_regs[i] = 1;
9212
9213 /* The TOC register is not killed across calls in a way that is
9214 visible to the compiler. */
9215 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9216 call_really_used_regs[2] = 0;
9217
9218 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9219 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9220
9221 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9222 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9223 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9224 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9225
9226 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9227 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9228 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9229 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9230
9231 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9232 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9233 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9234
9235 if (!TARGET_ALTIVEC && !TARGET_VSX)
9236 {
9237 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9238 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9239 call_really_used_regs[VRSAVE_REGNO] = 1;
9240 }
9241
9242 if (TARGET_ALTIVEC || TARGET_VSX)
9243 global_regs[VSCR_REGNO] = 1;
9244
9245 if (TARGET_ALTIVEC_ABI)
9246 {
9247 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9248 call_used_regs[i] = call_really_used_regs[i] = 1;
9249
9250 /* AIX reserves VR20:31 in non-extended ABI mode. */
9251 if (TARGET_XCOFF)
9252 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9253 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9254 }
9255 }
9256
9257 \f
9258 /* Output insns to set DEST equal to the constant SOURCE as a series of
9259 lis, ori and shl instructions and return TRUE. */
9260
9261 bool
9262 rs6000_emit_set_const (rtx dest, rtx source)
9263 {
9264 machine_mode mode = GET_MODE (dest);
9265 rtx temp, set;
9266 rtx_insn *insn;
9267 HOST_WIDE_INT c;
9268
9269 gcc_checking_assert (CONST_INT_P (source));
9270 c = INTVAL (source);
9271 switch (mode)
9272 {
9273 case E_QImode:
9274 case E_HImode:
9275 emit_insn (gen_rtx_SET (dest, source));
9276 return true;
9277
9278 case E_SImode:
9279 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9280
9281 emit_insn (gen_rtx_SET (copy_rtx (temp),
9282 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9283 emit_insn (gen_rtx_SET (dest,
9284 gen_rtx_IOR (SImode, copy_rtx (temp),
9285 GEN_INT (c & 0xffff))));
9286 break;
9287
9288 case E_DImode:
9289 if (!TARGET_POWERPC64)
9290 {
9291 rtx hi, lo;
9292
9293 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9294 DImode);
9295 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9296 DImode);
9297 emit_move_insn (hi, GEN_INT (c >> 32));
9298 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9299 emit_move_insn (lo, GEN_INT (c));
9300 }
9301 else
9302 rs6000_emit_set_long_const (dest, c);
9303 break;
9304
9305 default:
9306 gcc_unreachable ();
9307 }
9308
9309 insn = get_last_insn ();
9310 set = single_set (insn);
9311 if (! CONSTANT_P (SET_SRC (set)))
9312 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9313
9314 return true;
9315 }
9316
9317 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9318 Output insns to set DEST equal to the constant C as a series of
9319 lis, ori and shl instructions. */
9320
9321 static void
9322 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9323 {
9324 rtx temp;
9325 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9326
9327 ud1 = c & 0xffff;
9328 c = c >> 16;
9329 ud2 = c & 0xffff;
9330 c = c >> 16;
9331 ud3 = c & 0xffff;
9332 c = c >> 16;
9333 ud4 = c & 0xffff;
9334
9335 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9336 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9337 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9338
9339 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9340 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9341 {
9342 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9343
9344 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9345 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9346 if (ud1 != 0)
9347 emit_move_insn (dest,
9348 gen_rtx_IOR (DImode, copy_rtx (temp),
9349 GEN_INT (ud1)));
9350 }
9351 else if (ud3 == 0 && ud4 == 0)
9352 {
9353 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9354
9355 gcc_assert (ud2 & 0x8000);
9356 emit_move_insn (copy_rtx (temp),
9357 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9358 if (ud1 != 0)
9359 emit_move_insn (copy_rtx (temp),
9360 gen_rtx_IOR (DImode, copy_rtx (temp),
9361 GEN_INT (ud1)));
9362 emit_move_insn (dest,
9363 gen_rtx_ZERO_EXTEND (DImode,
9364 gen_lowpart (SImode,
9365 copy_rtx (temp))));
9366 }
9367 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9368 || (ud4 == 0 && ! (ud3 & 0x8000)))
9369 {
9370 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9371
9372 emit_move_insn (copy_rtx (temp),
9373 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9374 if (ud2 != 0)
9375 emit_move_insn (copy_rtx (temp),
9376 gen_rtx_IOR (DImode, copy_rtx (temp),
9377 GEN_INT (ud2)));
9378 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9379 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9380 GEN_INT (16)));
9381 if (ud1 != 0)
9382 emit_move_insn (dest,
9383 gen_rtx_IOR (DImode, copy_rtx (temp),
9384 GEN_INT (ud1)));
9385 }
9386 else
9387 {
9388 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9389
9390 emit_move_insn (copy_rtx (temp),
9391 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9392 if (ud3 != 0)
9393 emit_move_insn (copy_rtx (temp),
9394 gen_rtx_IOR (DImode, copy_rtx (temp),
9395 GEN_INT (ud3)));
9396
9397 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9398 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9399 GEN_INT (32)));
9400 if (ud2 != 0)
9401 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9402 gen_rtx_IOR (DImode, copy_rtx (temp),
9403 GEN_INT (ud2 << 16)));
9404 if (ud1 != 0)
9405 emit_move_insn (dest,
9406 gen_rtx_IOR (DImode, copy_rtx (temp),
9407 GEN_INT (ud1)));
9408 }
9409 }
9410
9411 /* Helper for the following. Get rid of [r+r] memory refs
9412 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9413
9414 static void
9415 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9416 {
9417 if (MEM_P (operands[0])
9418 && !REG_P (XEXP (operands[0], 0))
9419 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9420 GET_MODE (operands[0]), false))
9421 operands[0]
9422 = replace_equiv_address (operands[0],
9423 copy_addr_to_reg (XEXP (operands[0], 0)));
9424
9425 if (MEM_P (operands[1])
9426 && !REG_P (XEXP (operands[1], 0))
9427 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9428 GET_MODE (operands[1]), false))
9429 operands[1]
9430 = replace_equiv_address (operands[1],
9431 copy_addr_to_reg (XEXP (operands[1], 0)));
9432 }
9433
9434 /* Generate a vector of constants to permute MODE for a little-endian
9435 storage operation by swapping the two halves of a vector. */
9436 static rtvec
9437 rs6000_const_vec (machine_mode mode)
9438 {
9439 int i, subparts;
9440 rtvec v;
9441
9442 switch (mode)
9443 {
9444 case E_V1TImode:
9445 subparts = 1;
9446 break;
9447 case E_V2DFmode:
9448 case E_V2DImode:
9449 subparts = 2;
9450 break;
9451 case E_V4SFmode:
9452 case E_V4SImode:
9453 subparts = 4;
9454 break;
9455 case E_V8HImode:
9456 subparts = 8;
9457 break;
9458 case E_V16QImode:
9459 subparts = 16;
9460 break;
9461 default:
9462 gcc_unreachable();
9463 }
9464
9465 v = rtvec_alloc (subparts);
9466
9467 for (i = 0; i < subparts / 2; ++i)
9468 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9469 for (i = subparts / 2; i < subparts; ++i)
9470 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9471
9472 return v;
9473 }
9474
9475 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9476 store operation. */
9477 void
9478 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9479 {
9480 /* Scalar permutations are easier to express in integer modes rather than
9481 floating-point modes, so cast them here. We use V1TImode instead
9482 of TImode to ensure that the values don't go through GPRs. */
9483 if (FLOAT128_VECTOR_P (mode))
9484 {
9485 dest = gen_lowpart (V1TImode, dest);
9486 source = gen_lowpart (V1TImode, source);
9487 mode = V1TImode;
9488 }
9489
9490 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9491 scalar. */
9492 if (mode == TImode || mode == V1TImode)
9493 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9494 GEN_INT (64))));
9495 else
9496 {
9497 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9498 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9499 }
9500 }
9501
9502 /* Emit a little-endian load from vector memory location SOURCE to VSX
9503 register DEST in mode MODE. The load is done with two permuting
9504 insn's that represent an lxvd2x and xxpermdi. */
9505 void
9506 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9507 {
9508 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9509 V1TImode). */
9510 if (mode == TImode || mode == V1TImode)
9511 {
9512 mode = V2DImode;
9513 dest = gen_lowpart (V2DImode, dest);
9514 source = adjust_address (source, V2DImode, 0);
9515 }
9516
9517 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9518 rs6000_emit_le_vsx_permute (tmp, source, mode);
9519 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9520 }
9521
9522 /* Emit a little-endian store to vector memory location DEST from VSX
9523 register SOURCE in mode MODE. The store is done with two permuting
9524 insn's that represent an xxpermdi and an stxvd2x. */
9525 void
9526 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9527 {
9528 /* This should never be called during or after LRA, because it does
9529 not re-permute the source register. It is intended only for use
9530 during expand. */
9531 gcc_assert (!lra_in_progress && !reload_completed);
9532
9533 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9534 V1TImode). */
9535 if (mode == TImode || mode == V1TImode)
9536 {
9537 mode = V2DImode;
9538 dest = adjust_address (dest, V2DImode, 0);
9539 source = gen_lowpart (V2DImode, source);
9540 }
9541
9542 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9543 rs6000_emit_le_vsx_permute (tmp, source, mode);
9544 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9545 }
9546
9547 /* Emit a sequence representing a little-endian VSX load or store,
9548 moving data from SOURCE to DEST in mode MODE. This is done
9549 separately from rs6000_emit_move to ensure it is called only
9550 during expand. LE VSX loads and stores introduced later are
9551 handled with a split. The expand-time RTL generation allows
9552 us to optimize away redundant pairs of register-permutes. */
9553 void
9554 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9555 {
9556 gcc_assert (!BYTES_BIG_ENDIAN
9557 && VECTOR_MEM_VSX_P (mode)
9558 && !TARGET_P9_VECTOR
9559 && !gpr_or_gpr_p (dest, source)
9560 && (MEM_P (source) ^ MEM_P (dest)));
9561
9562 if (MEM_P (source))
9563 {
9564 gcc_assert (REG_P (dest) || SUBREG_P (dest));
9565 rs6000_emit_le_vsx_load (dest, source, mode);
9566 }
9567 else
9568 {
9569 if (!REG_P (source))
9570 source = force_reg (mode, source);
9571 rs6000_emit_le_vsx_store (dest, source, mode);
9572 }
9573 }
9574
9575 /* Return whether a SFmode or SImode move can be done without converting one
9576 mode to another. This arrises when we have:
9577
9578 (SUBREG:SF (REG:SI ...))
9579 (SUBREG:SI (REG:SF ...))
9580
9581 and one of the values is in a floating point/vector register, where SFmode
9582 scalars are stored in DFmode format. */
9583
9584 bool
9585 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9586 {
9587 if (TARGET_ALLOW_SF_SUBREG)
9588 return true;
9589
9590 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9591 return true;
9592
9593 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9594 return true;
9595
9596 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9597 if (SUBREG_P (dest))
9598 {
9599 rtx dest_subreg = SUBREG_REG (dest);
9600 rtx src_subreg = SUBREG_REG (src);
9601 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9602 }
9603
9604 return false;
9605 }
9606
9607
9608 /* Helper function to change moves with:
9609
9610 (SUBREG:SF (REG:SI)) and
9611 (SUBREG:SI (REG:SF))
9612
9613 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9614 values are stored as DFmode values in the VSX registers. We need to convert
9615 the bits before we can use a direct move or operate on the bits in the
9616 vector register as an integer type.
9617
9618 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9619
9620 static bool
9621 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9622 {
9623 if (TARGET_DIRECT_MOVE_64BIT && !reload_completed
9624 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9625 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9626 {
9627 rtx inner_source = SUBREG_REG (source);
9628 machine_mode inner_mode = GET_MODE (inner_source);
9629
9630 if (mode == SImode && inner_mode == SFmode)
9631 {
9632 emit_insn (gen_movsi_from_sf (dest, inner_source));
9633 return true;
9634 }
9635
9636 if (mode == SFmode && inner_mode == SImode)
9637 {
9638 emit_insn (gen_movsf_from_si (dest, inner_source));
9639 return true;
9640 }
9641 }
9642
9643 return false;
9644 }
9645
9646 /* Emit a move from SOURCE to DEST in mode MODE. */
9647 void
9648 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9649 {
9650 rtx operands[2];
9651 operands[0] = dest;
9652 operands[1] = source;
9653
9654 if (TARGET_DEBUG_ADDR)
9655 {
9656 fprintf (stderr,
9657 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9658 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9659 GET_MODE_NAME (mode),
9660 lra_in_progress,
9661 reload_completed,
9662 can_create_pseudo_p ());
9663 debug_rtx (dest);
9664 fprintf (stderr, "source:\n");
9665 debug_rtx (source);
9666 }
9667
9668 /* Check that we get CONST_WIDE_INT only when we should. */
9669 if (CONST_WIDE_INT_P (operands[1])
9670 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9671 gcc_unreachable ();
9672
9673 #ifdef HAVE_AS_GNU_ATTRIBUTE
9674 /* If we use a long double type, set the flags in .gnu_attribute that say
9675 what the long double type is. This is to allow the linker's warning
9676 message for the wrong long double to be useful, even if the function does
9677 not do a call (for example, doing a 128-bit add on power9 if the long
9678 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9679 used if they aren't the default long dobule type. */
9680 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9681 {
9682 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9683 rs6000_passes_float = rs6000_passes_long_double = true;
9684
9685 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9686 rs6000_passes_float = rs6000_passes_long_double = true;
9687 }
9688 #endif
9689
9690 /* See if we need to special case SImode/SFmode SUBREG moves. */
9691 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9692 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9693 return;
9694
9695 /* Check if GCC is setting up a block move that will end up using FP
9696 registers as temporaries. We must make sure this is acceptable. */
9697 if (MEM_P (operands[0])
9698 && MEM_P (operands[1])
9699 && mode == DImode
9700 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9701 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9702 && ! (rs6000_slow_unaligned_access (SImode,
9703 (MEM_ALIGN (operands[0]) > 32
9704 ? 32 : MEM_ALIGN (operands[0])))
9705 || rs6000_slow_unaligned_access (SImode,
9706 (MEM_ALIGN (operands[1]) > 32
9707 ? 32 : MEM_ALIGN (operands[1]))))
9708 && ! MEM_VOLATILE_P (operands [0])
9709 && ! MEM_VOLATILE_P (operands [1]))
9710 {
9711 emit_move_insn (adjust_address (operands[0], SImode, 0),
9712 adjust_address (operands[1], SImode, 0));
9713 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9714 adjust_address (copy_rtx (operands[1]), SImode, 4));
9715 return;
9716 }
9717
9718 if (can_create_pseudo_p () && MEM_P (operands[0])
9719 && !gpc_reg_operand (operands[1], mode))
9720 operands[1] = force_reg (mode, operands[1]);
9721
9722 /* Recognize the case where operand[1] is a reference to thread-local
9723 data and load its address to a register. */
9724 if (tls_referenced_p (operands[1]))
9725 {
9726 enum tls_model model;
9727 rtx tmp = operands[1];
9728 rtx addend = NULL;
9729
9730 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9731 {
9732 addend = XEXP (XEXP (tmp, 0), 1);
9733 tmp = XEXP (XEXP (tmp, 0), 0);
9734 }
9735
9736 gcc_assert (SYMBOL_REF_P (tmp));
9737 model = SYMBOL_REF_TLS_MODEL (tmp);
9738 gcc_assert (model != 0);
9739
9740 tmp = rs6000_legitimize_tls_address (tmp, model);
9741 if (addend)
9742 {
9743 tmp = gen_rtx_PLUS (mode, tmp, addend);
9744 tmp = force_operand (tmp, operands[0]);
9745 }
9746 operands[1] = tmp;
9747 }
9748
9749 /* 128-bit constant floating-point values on Darwin should really be loaded
9750 as two parts. However, this premature splitting is a problem when DFmode
9751 values can go into Altivec registers. */
9752 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
9753 && !reg_addr[DFmode].scalar_in_vmx_p)
9754 {
9755 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
9756 simplify_gen_subreg (DFmode, operands[1], mode, 0),
9757 DFmode);
9758 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
9759 GET_MODE_SIZE (DFmode)),
9760 simplify_gen_subreg (DFmode, operands[1], mode,
9761 GET_MODE_SIZE (DFmode)),
9762 DFmode);
9763 return;
9764 }
9765
9766 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9767 p1:SD) if p1 is not of floating point class and p0 is spilled as
9768 we can have no analogous movsd_store for this. */
9769 if (lra_in_progress && mode == DDmode
9770 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
9771 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9772 && SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1]))
9773 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
9774 {
9775 enum reg_class cl;
9776 int regno = REGNO (SUBREG_REG (operands[1]));
9777
9778 if (!HARD_REGISTER_NUM_P (regno))
9779 {
9780 cl = reg_preferred_class (regno);
9781 regno = reg_renumber[regno];
9782 if (regno < 0)
9783 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
9784 }
9785 if (regno >= 0 && ! FP_REGNO_P (regno))
9786 {
9787 mode = SDmode;
9788 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
9789 operands[1] = SUBREG_REG (operands[1]);
9790 }
9791 }
9792 if (lra_in_progress
9793 && mode == SDmode
9794 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
9795 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9796 && (REG_P (operands[1])
9797 || (SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1])))))
9798 {
9799 int regno = reg_or_subregno (operands[1]);
9800 enum reg_class cl;
9801
9802 if (!HARD_REGISTER_NUM_P (regno))
9803 {
9804 cl = reg_preferred_class (regno);
9805 gcc_assert (cl != NO_REGS);
9806 regno = reg_renumber[regno];
9807 if (regno < 0)
9808 regno = ira_class_hard_regs[cl][0];
9809 }
9810 if (FP_REGNO_P (regno))
9811 {
9812 if (GET_MODE (operands[0]) != DDmode)
9813 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
9814 emit_insn (gen_movsd_store (operands[0], operands[1]));
9815 }
9816 else if (INT_REGNO_P (regno))
9817 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9818 else
9819 gcc_unreachable();
9820 return;
9821 }
9822 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
9823 p:DD)) if p0 is not of floating point class and p1 is spilled as
9824 we can have no analogous movsd_load for this. */
9825 if (lra_in_progress && mode == DDmode
9826 && SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))
9827 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
9828 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
9829 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9830 {
9831 enum reg_class cl;
9832 int regno = REGNO (SUBREG_REG (operands[0]));
9833
9834 if (!HARD_REGISTER_NUM_P (regno))
9835 {
9836 cl = reg_preferred_class (regno);
9837 regno = reg_renumber[regno];
9838 if (regno < 0)
9839 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
9840 }
9841 if (regno >= 0 && ! FP_REGNO_P (regno))
9842 {
9843 mode = SDmode;
9844 operands[0] = SUBREG_REG (operands[0]);
9845 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
9846 }
9847 }
9848 if (lra_in_progress
9849 && mode == SDmode
9850 && (REG_P (operands[0])
9851 || (SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))))
9852 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
9853 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9854 {
9855 int regno = reg_or_subregno (operands[0]);
9856 enum reg_class cl;
9857
9858 if (!HARD_REGISTER_NUM_P (regno))
9859 {
9860 cl = reg_preferred_class (regno);
9861 gcc_assert (cl != NO_REGS);
9862 regno = reg_renumber[regno];
9863 if (regno < 0)
9864 regno = ira_class_hard_regs[cl][0];
9865 }
9866 if (FP_REGNO_P (regno))
9867 {
9868 if (GET_MODE (operands[1]) != DDmode)
9869 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
9870 emit_insn (gen_movsd_load (operands[0], operands[1]));
9871 }
9872 else if (INT_REGNO_P (regno))
9873 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9874 else
9875 gcc_unreachable();
9876 return;
9877 }
9878
9879 /* FIXME: In the long term, this switch statement should go away
9880 and be replaced by a sequence of tests based on things like
9881 mode == Pmode. */
9882 switch (mode)
9883 {
9884 case E_HImode:
9885 case E_QImode:
9886 if (CONSTANT_P (operands[1])
9887 && !CONST_INT_P (operands[1]))
9888 operands[1] = force_const_mem (mode, operands[1]);
9889 break;
9890
9891 case E_TFmode:
9892 case E_TDmode:
9893 case E_IFmode:
9894 case E_KFmode:
9895 if (FLOAT128_2REG_P (mode))
9896 rs6000_eliminate_indexed_memrefs (operands);
9897 /* fall through */
9898
9899 case E_DFmode:
9900 case E_DDmode:
9901 case E_SFmode:
9902 case E_SDmode:
9903 if (CONSTANT_P (operands[1])
9904 && ! easy_fp_constant (operands[1], mode))
9905 operands[1] = force_const_mem (mode, operands[1]);
9906 break;
9907
9908 case E_V16QImode:
9909 case E_V8HImode:
9910 case E_V4SFmode:
9911 case E_V4SImode:
9912 case E_V2DFmode:
9913 case E_V2DImode:
9914 case E_V1TImode:
9915 if (CONSTANT_P (operands[1])
9916 && !easy_vector_constant (operands[1], mode))
9917 operands[1] = force_const_mem (mode, operands[1]);
9918 break;
9919
9920 case E_SImode:
9921 case E_DImode:
9922 /* Use default pattern for address of ELF small data */
9923 if (TARGET_ELF
9924 && mode == Pmode
9925 && DEFAULT_ABI == ABI_V4
9926 && (SYMBOL_REF_P (operands[1])
9927 || GET_CODE (operands[1]) == CONST)
9928 && small_data_operand (operands[1], mode))
9929 {
9930 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9931 return;
9932 }
9933
9934 if (DEFAULT_ABI == ABI_V4
9935 && mode == Pmode && mode == SImode
9936 && flag_pic == 1 && got_operand (operands[1], mode))
9937 {
9938 emit_insn (gen_movsi_got (operands[0], operands[1]));
9939 return;
9940 }
9941
9942 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
9943 && TARGET_NO_TOC
9944 && ! flag_pic
9945 && mode == Pmode
9946 && CONSTANT_P (operands[1])
9947 && GET_CODE (operands[1]) != HIGH
9948 && !CONST_INT_P (operands[1]))
9949 {
9950 rtx target = (!can_create_pseudo_p ()
9951 ? operands[0]
9952 : gen_reg_rtx (mode));
9953
9954 /* If this is a function address on -mcall-aixdesc,
9955 convert it to the address of the descriptor. */
9956 if (DEFAULT_ABI == ABI_AIX
9957 && SYMBOL_REF_P (operands[1])
9958 && XSTR (operands[1], 0)[0] == '.')
9959 {
9960 const char *name = XSTR (operands[1], 0);
9961 rtx new_ref;
9962 while (*name == '.')
9963 name++;
9964 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
9965 CONSTANT_POOL_ADDRESS_P (new_ref)
9966 = CONSTANT_POOL_ADDRESS_P (operands[1]);
9967 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
9968 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
9969 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
9970 operands[1] = new_ref;
9971 }
9972
9973 if (DEFAULT_ABI == ABI_DARWIN)
9974 {
9975 #if TARGET_MACHO
9976 if (MACHO_DYNAMIC_NO_PIC_P)
9977 {
9978 /* Take care of any required data indirection. */
9979 operands[1] = rs6000_machopic_legitimize_pic_address (
9980 operands[1], mode, operands[0]);
9981 if (operands[0] != operands[1])
9982 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9983 return;
9984 }
9985 #endif
9986 emit_insn (gen_macho_high (target, operands[1]));
9987 emit_insn (gen_macho_low (operands[0], target, operands[1]));
9988 return;
9989 }
9990
9991 emit_insn (gen_elf_high (target, operands[1]));
9992 emit_insn (gen_elf_low (operands[0], target, operands[1]));
9993 return;
9994 }
9995
9996 /* If this is a SYMBOL_REF that refers to a constant pool entry,
9997 and we have put it in the TOC, we just need to make a TOC-relative
9998 reference to it. */
9999 if (TARGET_TOC
10000 && SYMBOL_REF_P (operands[1])
10001 && use_toc_relative_ref (operands[1], mode))
10002 operands[1] = create_TOC_reference (operands[1], operands[0]);
10003 else if (mode == Pmode
10004 && CONSTANT_P (operands[1])
10005 && GET_CODE (operands[1]) != HIGH
10006 && ((REG_P (operands[0])
10007 && FP_REGNO_P (REGNO (operands[0])))
10008 || !CONST_INT_P (operands[1])
10009 || (num_insns_constant (operands[1], mode)
10010 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10011 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
10012 && (TARGET_CMODEL == CMODEL_SMALL
10013 || can_create_pseudo_p ()
10014 || (REG_P (operands[0])
10015 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10016 {
10017
10018 #if TARGET_MACHO
10019 /* Darwin uses a special PIC legitimizer. */
10020 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10021 {
10022 operands[1] =
10023 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10024 operands[0]);
10025 if (operands[0] != operands[1])
10026 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10027 return;
10028 }
10029 #endif
10030
10031 /* If we are to limit the number of things we put in the TOC and
10032 this is a symbol plus a constant we can add in one insn,
10033 just put the symbol in the TOC and add the constant. */
10034 if (GET_CODE (operands[1]) == CONST
10035 && TARGET_NO_SUM_IN_TOC
10036 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10037 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10038 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10039 || SYMBOL_REF_P (XEXP (XEXP (operands[1], 0), 0)))
10040 && ! side_effects_p (operands[0]))
10041 {
10042 rtx sym =
10043 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10044 rtx other = XEXP (XEXP (operands[1], 0), 1);
10045
10046 sym = force_reg (mode, sym);
10047 emit_insn (gen_add3_insn (operands[0], sym, other));
10048 return;
10049 }
10050
10051 operands[1] = force_const_mem (mode, operands[1]);
10052
10053 if (TARGET_TOC
10054 && SYMBOL_REF_P (XEXP (operands[1], 0))
10055 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10056 {
10057 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10058 operands[0]);
10059 operands[1] = gen_const_mem (mode, tocref);
10060 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10061 }
10062 }
10063 break;
10064
10065 case E_TImode:
10066 if (!VECTOR_MEM_VSX_P (TImode))
10067 rs6000_eliminate_indexed_memrefs (operands);
10068 break;
10069
10070 case E_PTImode:
10071 rs6000_eliminate_indexed_memrefs (operands);
10072 break;
10073
10074 default:
10075 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10076 }
10077
10078 /* Above, we may have called force_const_mem which may have returned
10079 an invalid address. If we can, fix this up; otherwise, reload will
10080 have to deal with it. */
10081 if (MEM_P (operands[1]))
10082 operands[1] = validize_mem (operands[1]);
10083
10084 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10085 }
10086 \f
10087 /* Nonzero if we can use a floating-point register to pass this arg. */
10088 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10089 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10090 && (CUM)->fregno <= FP_ARG_MAX_REG \
10091 && TARGET_HARD_FLOAT)
10092
10093 /* Nonzero if we can use an AltiVec register to pass this arg. */
10094 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10095 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10096 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10097 && TARGET_ALTIVEC_ABI \
10098 && (NAMED))
10099
10100 /* Walk down the type tree of TYPE counting consecutive base elements.
10101 If *MODEP is VOIDmode, then set it to the first valid floating point
10102 or vector type. If a non-floating point or vector type is found, or
10103 if a floating point or vector type that doesn't match a non-VOIDmode
10104 *MODEP is found, then return -1, otherwise return the count in the
10105 sub-tree. */
10106
10107 static int
10108 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10109 {
10110 machine_mode mode;
10111 HOST_WIDE_INT size;
10112
10113 switch (TREE_CODE (type))
10114 {
10115 case REAL_TYPE:
10116 mode = TYPE_MODE (type);
10117 if (!SCALAR_FLOAT_MODE_P (mode))
10118 return -1;
10119
10120 if (*modep == VOIDmode)
10121 *modep = mode;
10122
10123 if (*modep == mode)
10124 return 1;
10125
10126 break;
10127
10128 case COMPLEX_TYPE:
10129 mode = TYPE_MODE (TREE_TYPE (type));
10130 if (!SCALAR_FLOAT_MODE_P (mode))
10131 return -1;
10132
10133 if (*modep == VOIDmode)
10134 *modep = mode;
10135
10136 if (*modep == mode)
10137 return 2;
10138
10139 break;
10140
10141 case VECTOR_TYPE:
10142 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10143 return -1;
10144
10145 /* Use V4SImode as representative of all 128-bit vector types. */
10146 size = int_size_in_bytes (type);
10147 switch (size)
10148 {
10149 case 16:
10150 mode = V4SImode;
10151 break;
10152 default:
10153 return -1;
10154 }
10155
10156 if (*modep == VOIDmode)
10157 *modep = mode;
10158
10159 /* Vector modes are considered to be opaque: two vectors are
10160 equivalent for the purposes of being homogeneous aggregates
10161 if they are the same size. */
10162 if (*modep == mode)
10163 return 1;
10164
10165 break;
10166
10167 case ARRAY_TYPE:
10168 {
10169 int count;
10170 tree index = TYPE_DOMAIN (type);
10171
10172 /* Can't handle incomplete types nor sizes that are not
10173 fixed. */
10174 if (!COMPLETE_TYPE_P (type)
10175 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10176 return -1;
10177
10178 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10179 if (count == -1
10180 || !index
10181 || !TYPE_MAX_VALUE (index)
10182 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10183 || !TYPE_MIN_VALUE (index)
10184 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10185 || count < 0)
10186 return -1;
10187
10188 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10189 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10190
10191 /* There must be no padding. */
10192 if (wi::to_wide (TYPE_SIZE (type))
10193 != count * GET_MODE_BITSIZE (*modep))
10194 return -1;
10195
10196 return count;
10197 }
10198
10199 case RECORD_TYPE:
10200 {
10201 int count = 0;
10202 int sub_count;
10203 tree field;
10204
10205 /* Can't handle incomplete types nor sizes that are not
10206 fixed. */
10207 if (!COMPLETE_TYPE_P (type)
10208 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10209 return -1;
10210
10211 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10212 {
10213 if (TREE_CODE (field) != FIELD_DECL)
10214 continue;
10215
10216 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10217 if (sub_count < 0)
10218 return -1;
10219 count += sub_count;
10220 }
10221
10222 /* There must be no padding. */
10223 if (wi::to_wide (TYPE_SIZE (type))
10224 != count * GET_MODE_BITSIZE (*modep))
10225 return -1;
10226
10227 return count;
10228 }
10229
10230 case UNION_TYPE:
10231 case QUAL_UNION_TYPE:
10232 {
10233 /* These aren't very interesting except in a degenerate case. */
10234 int count = 0;
10235 int sub_count;
10236 tree field;
10237
10238 /* Can't handle incomplete types nor sizes that are not
10239 fixed. */
10240 if (!COMPLETE_TYPE_P (type)
10241 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10242 return -1;
10243
10244 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10245 {
10246 if (TREE_CODE (field) != FIELD_DECL)
10247 continue;
10248
10249 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10250 if (sub_count < 0)
10251 return -1;
10252 count = count > sub_count ? count : sub_count;
10253 }
10254
10255 /* There must be no padding. */
10256 if (wi::to_wide (TYPE_SIZE (type))
10257 != count * GET_MODE_BITSIZE (*modep))
10258 return -1;
10259
10260 return count;
10261 }
10262
10263 default:
10264 break;
10265 }
10266
10267 return -1;
10268 }
10269
10270 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10271 float or vector aggregate that shall be passed in FP/vector registers
10272 according to the ELFv2 ABI, return the homogeneous element mode in
10273 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10274
10275 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10276
10277 static bool
10278 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10279 machine_mode *elt_mode,
10280 int *n_elts)
10281 {
10282 /* Note that we do not accept complex types at the top level as
10283 homogeneous aggregates; these types are handled via the
10284 targetm.calls.split_complex_arg mechanism. Complex types
10285 can be elements of homogeneous aggregates, however. */
10286 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10287 && AGGREGATE_TYPE_P (type))
10288 {
10289 machine_mode field_mode = VOIDmode;
10290 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10291
10292 if (field_count > 0)
10293 {
10294 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10295 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10296
10297 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10298 up to AGGR_ARG_NUM_REG registers. */
10299 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10300 {
10301 if (elt_mode)
10302 *elt_mode = field_mode;
10303 if (n_elts)
10304 *n_elts = field_count;
10305 return true;
10306 }
10307 }
10308 }
10309
10310 if (elt_mode)
10311 *elt_mode = mode;
10312 if (n_elts)
10313 *n_elts = 1;
10314 return false;
10315 }
10316
10317 /* Return a nonzero value to say to return the function value in
10318 memory, just as large structures are always returned. TYPE will be
10319 the data type of the value, and FNTYPE will be the type of the
10320 function doing the returning, or @code{NULL} for libcalls.
10321
10322 The AIX ABI for the RS/6000 specifies that all structures are
10323 returned in memory. The Darwin ABI does the same.
10324
10325 For the Darwin 64 Bit ABI, a function result can be returned in
10326 registers or in memory, depending on the size of the return data
10327 type. If it is returned in registers, the value occupies the same
10328 registers as it would if it were the first and only function
10329 argument. Otherwise, the function places its result in memory at
10330 the location pointed to by GPR3.
10331
10332 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10333 but a draft put them in memory, and GCC used to implement the draft
10334 instead of the final standard. Therefore, aix_struct_return
10335 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10336 compatibility can change DRAFT_V4_STRUCT_RET to override the
10337 default, and -m switches get the final word. See
10338 rs6000_option_override_internal for more details.
10339
10340 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10341 long double support is enabled. These values are returned in memory.
10342
10343 int_size_in_bytes returns -1 for variable size objects, which go in
10344 memory always. The cast to unsigned makes -1 > 8. */
10345
10346 static bool
10347 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10348 {
10349 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10350 if (TARGET_MACHO
10351 && rs6000_darwin64_abi
10352 && TREE_CODE (type) == RECORD_TYPE
10353 && int_size_in_bytes (type) > 0)
10354 {
10355 CUMULATIVE_ARGS valcum;
10356 rtx valret;
10357
10358 valcum.words = 0;
10359 valcum.fregno = FP_ARG_MIN_REG;
10360 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10361 /* Do a trial code generation as if this were going to be passed
10362 as an argument; if any part goes in memory, we return NULL. */
10363 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10364 if (valret)
10365 return false;
10366 /* Otherwise fall through to more conventional ABI rules. */
10367 }
10368
10369 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10370 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10371 NULL, NULL))
10372 return false;
10373
10374 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10375 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10376 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10377 return false;
10378
10379 if (AGGREGATE_TYPE_P (type)
10380 && (aix_struct_return
10381 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10382 return true;
10383
10384 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10385 modes only exist for GCC vector types if -maltivec. */
10386 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10387 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10388 return false;
10389
10390 /* Return synthetic vectors in memory. */
10391 if (TREE_CODE (type) == VECTOR_TYPE
10392 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10393 {
10394 static bool warned_for_return_big_vectors = false;
10395 if (!warned_for_return_big_vectors)
10396 {
10397 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10398 "non-standard ABI extension with no compatibility "
10399 "guarantee");
10400 warned_for_return_big_vectors = true;
10401 }
10402 return true;
10403 }
10404
10405 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10406 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10407 return true;
10408
10409 return false;
10410 }
10411
10412 /* Specify whether values returned in registers should be at the most
10413 significant end of a register. We want aggregates returned by
10414 value to match the way aggregates are passed to functions. */
10415
10416 static bool
10417 rs6000_return_in_msb (const_tree valtype)
10418 {
10419 return (DEFAULT_ABI == ABI_ELFv2
10420 && BYTES_BIG_ENDIAN
10421 && AGGREGATE_TYPE_P (valtype)
10422 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10423 == PAD_UPWARD));
10424 }
10425
10426 #ifdef HAVE_AS_GNU_ATTRIBUTE
10427 /* Return TRUE if a call to function FNDECL may be one that
10428 potentially affects the function calling ABI of the object file. */
10429
10430 static bool
10431 call_ABI_of_interest (tree fndecl)
10432 {
10433 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10434 {
10435 struct cgraph_node *c_node;
10436
10437 /* Libcalls are always interesting. */
10438 if (fndecl == NULL_TREE)
10439 return true;
10440
10441 /* Any call to an external function is interesting. */
10442 if (DECL_EXTERNAL (fndecl))
10443 return true;
10444
10445 /* Interesting functions that we are emitting in this object file. */
10446 c_node = cgraph_node::get (fndecl);
10447 c_node = c_node->ultimate_alias_target ();
10448 return !c_node->only_called_directly_p ();
10449 }
10450 return false;
10451 }
10452 #endif
10453
10454 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10455 for a call to a function whose data type is FNTYPE.
10456 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10457
10458 For incoming args we set the number of arguments in the prototype large
10459 so we never return a PARALLEL. */
10460
10461 void
10462 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10463 rtx libname ATTRIBUTE_UNUSED, int incoming,
10464 int libcall, int n_named_args,
10465 tree fndecl,
10466 machine_mode return_mode ATTRIBUTE_UNUSED)
10467 {
10468 static CUMULATIVE_ARGS zero_cumulative;
10469
10470 *cum = zero_cumulative;
10471 cum->words = 0;
10472 cum->fregno = FP_ARG_MIN_REG;
10473 cum->vregno = ALTIVEC_ARG_MIN_REG;
10474 cum->prototype = (fntype && prototype_p (fntype));
10475 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10476 ? CALL_LIBCALL : CALL_NORMAL);
10477 cum->sysv_gregno = GP_ARG_MIN_REG;
10478 cum->stdarg = stdarg_p (fntype);
10479 cum->libcall = libcall;
10480
10481 cum->nargs_prototype = 0;
10482 if (incoming || cum->prototype)
10483 cum->nargs_prototype = n_named_args;
10484
10485 /* Check for a longcall attribute. */
10486 if ((!fntype && rs6000_default_long_calls)
10487 || (fntype
10488 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10489 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10490 cum->call_cookie |= CALL_LONG;
10491 else if (DEFAULT_ABI != ABI_DARWIN)
10492 {
10493 bool is_local = (fndecl
10494 && !DECL_EXTERNAL (fndecl)
10495 && !DECL_WEAK (fndecl)
10496 && (*targetm.binds_local_p) (fndecl));
10497 if (is_local)
10498 ;
10499 else if (flag_plt)
10500 {
10501 if (fntype
10502 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10503 cum->call_cookie |= CALL_LONG;
10504 }
10505 else
10506 {
10507 if (!(fntype
10508 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10509 cum->call_cookie |= CALL_LONG;
10510 }
10511 }
10512
10513 if (TARGET_DEBUG_ARG)
10514 {
10515 fprintf (stderr, "\ninit_cumulative_args:");
10516 if (fntype)
10517 {
10518 tree ret_type = TREE_TYPE (fntype);
10519 fprintf (stderr, " ret code = %s,",
10520 get_tree_code_name (TREE_CODE (ret_type)));
10521 }
10522
10523 if (cum->call_cookie & CALL_LONG)
10524 fprintf (stderr, " longcall,");
10525
10526 fprintf (stderr, " proto = %d, nargs = %d\n",
10527 cum->prototype, cum->nargs_prototype);
10528 }
10529
10530 #ifdef HAVE_AS_GNU_ATTRIBUTE
10531 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10532 {
10533 cum->escapes = call_ABI_of_interest (fndecl);
10534 if (cum->escapes)
10535 {
10536 tree return_type;
10537
10538 if (fntype)
10539 {
10540 return_type = TREE_TYPE (fntype);
10541 return_mode = TYPE_MODE (return_type);
10542 }
10543 else
10544 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10545
10546 if (return_type != NULL)
10547 {
10548 if (TREE_CODE (return_type) == RECORD_TYPE
10549 && TYPE_TRANSPARENT_AGGR (return_type))
10550 {
10551 return_type = TREE_TYPE (first_field (return_type));
10552 return_mode = TYPE_MODE (return_type);
10553 }
10554 if (AGGREGATE_TYPE_P (return_type)
10555 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10556 <= 8))
10557 rs6000_returns_struct = true;
10558 }
10559 if (SCALAR_FLOAT_MODE_P (return_mode))
10560 {
10561 rs6000_passes_float = true;
10562 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10563 && (FLOAT128_IBM_P (return_mode)
10564 || FLOAT128_IEEE_P (return_mode)
10565 || (return_type != NULL
10566 && (TYPE_MAIN_VARIANT (return_type)
10567 == long_double_type_node))))
10568 rs6000_passes_long_double = true;
10569
10570 /* Note if we passed or return a IEEE 128-bit type. We changed
10571 the mangling for these types, and we may need to make an alias
10572 with the old mangling. */
10573 if (FLOAT128_IEEE_P (return_mode))
10574 rs6000_passes_ieee128 = true;
10575 }
10576 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10577 rs6000_passes_vector = true;
10578 }
10579 }
10580 #endif
10581
10582 if (fntype
10583 && !TARGET_ALTIVEC
10584 && TARGET_ALTIVEC_ABI
10585 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10586 {
10587 error ("cannot return value in vector register because"
10588 " altivec instructions are disabled, use %qs"
10589 " to enable them", "-maltivec");
10590 }
10591 }
10592 \f
10593 /* The mode the ABI uses for a word. This is not the same as word_mode
10594 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10595
10596 static scalar_int_mode
10597 rs6000_abi_word_mode (void)
10598 {
10599 return TARGET_32BIT ? SImode : DImode;
10600 }
10601
10602 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10603 static char *
10604 rs6000_offload_options (void)
10605 {
10606 if (TARGET_64BIT)
10607 return xstrdup ("-foffload-abi=lp64");
10608 else
10609 return xstrdup ("-foffload-abi=ilp32");
10610 }
10611
10612 /* On rs6000, function arguments are promoted, as are function return
10613 values. */
10614
10615 static machine_mode
10616 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10617 machine_mode mode,
10618 int *punsignedp ATTRIBUTE_UNUSED,
10619 const_tree, int)
10620 {
10621 PROMOTE_MODE (mode, *punsignedp, type);
10622
10623 return mode;
10624 }
10625
10626 /* Return true if TYPE must be passed on the stack and not in registers. */
10627
10628 static bool
10629 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10630 {
10631 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10632 return must_pass_in_stack_var_size (mode, type);
10633 else
10634 return must_pass_in_stack_var_size_or_pad (mode, type);
10635 }
10636
10637 static inline bool
10638 is_complex_IBM_long_double (machine_mode mode)
10639 {
10640 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10641 }
10642
10643 /* Whether ABI_V4 passes MODE args to a function in floating point
10644 registers. */
10645
10646 static bool
10647 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10648 {
10649 if (!TARGET_HARD_FLOAT)
10650 return false;
10651 if (mode == DFmode)
10652 return true;
10653 if (mode == SFmode && named)
10654 return true;
10655 /* ABI_V4 passes complex IBM long double in 8 gprs.
10656 Stupid, but we can't change the ABI now. */
10657 if (is_complex_IBM_long_double (mode))
10658 return false;
10659 if (FLOAT128_2REG_P (mode))
10660 return true;
10661 if (DECIMAL_FLOAT_MODE_P (mode))
10662 return true;
10663 return false;
10664 }
10665
10666 /* Implement TARGET_FUNCTION_ARG_PADDING.
10667
10668 For the AIX ABI structs are always stored left shifted in their
10669 argument slot. */
10670
10671 static pad_direction
10672 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10673 {
10674 #ifndef AGGREGATE_PADDING_FIXED
10675 #define AGGREGATE_PADDING_FIXED 0
10676 #endif
10677 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10678 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10679 #endif
10680
10681 if (!AGGREGATE_PADDING_FIXED)
10682 {
10683 /* GCC used to pass structures of the same size as integer types as
10684 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10685 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10686 passed padded downward, except that -mstrict-align further
10687 muddied the water in that multi-component structures of 2 and 4
10688 bytes in size were passed padded upward.
10689
10690 The following arranges for best compatibility with previous
10691 versions of gcc, but removes the -mstrict-align dependency. */
10692 if (BYTES_BIG_ENDIAN)
10693 {
10694 HOST_WIDE_INT size = 0;
10695
10696 if (mode == BLKmode)
10697 {
10698 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10699 size = int_size_in_bytes (type);
10700 }
10701 else
10702 size = GET_MODE_SIZE (mode);
10703
10704 if (size == 1 || size == 2 || size == 4)
10705 return PAD_DOWNWARD;
10706 }
10707 return PAD_UPWARD;
10708 }
10709
10710 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10711 {
10712 if (type != 0 && AGGREGATE_TYPE_P (type))
10713 return PAD_UPWARD;
10714 }
10715
10716 /* Fall back to the default. */
10717 return default_function_arg_padding (mode, type);
10718 }
10719
10720 /* If defined, a C expression that gives the alignment boundary, in bits,
10721 of an argument with the specified mode and type. If it is not defined,
10722 PARM_BOUNDARY is used for all arguments.
10723
10724 V.4 wants long longs and doubles to be double word aligned. Just
10725 testing the mode size is a boneheaded way to do this as it means
10726 that other types such as complex int are also double word aligned.
10727 However, we're stuck with this because changing the ABI might break
10728 existing library interfaces.
10729
10730 Quadword align Altivec/VSX vectors.
10731 Quadword align large synthetic vector types. */
10732
10733 static unsigned int
10734 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
10735 {
10736 machine_mode elt_mode;
10737 int n_elts;
10738
10739 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10740
10741 if (DEFAULT_ABI == ABI_V4
10742 && (GET_MODE_SIZE (mode) == 8
10743 || (TARGET_HARD_FLOAT
10744 && !is_complex_IBM_long_double (mode)
10745 && FLOAT128_2REG_P (mode))))
10746 return 64;
10747 else if (FLOAT128_VECTOR_P (mode))
10748 return 128;
10749 else if (type && TREE_CODE (type) == VECTOR_TYPE
10750 && int_size_in_bytes (type) >= 8
10751 && int_size_in_bytes (type) < 16)
10752 return 64;
10753 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10754 || (type && TREE_CODE (type) == VECTOR_TYPE
10755 && int_size_in_bytes (type) >= 16))
10756 return 128;
10757
10758 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10759 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10760 -mcompat-align-parm is used. */
10761 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
10762 || DEFAULT_ABI == ABI_ELFv2)
10763 && type && TYPE_ALIGN (type) > 64)
10764 {
10765 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
10766 or homogeneous float/vector aggregates here. We already handled
10767 vector aggregates above, but still need to check for float here. */
10768 bool aggregate_p = (AGGREGATE_TYPE_P (type)
10769 && !SCALAR_FLOAT_MODE_P (elt_mode));
10770
10771 /* We used to check for BLKmode instead of the above aggregate type
10772 check. Warn when this results in any difference to the ABI. */
10773 if (aggregate_p != (mode == BLKmode))
10774 {
10775 static bool warned;
10776 if (!warned && warn_psabi)
10777 {
10778 warned = true;
10779 inform (input_location,
10780 "the ABI of passing aggregates with %d-byte alignment"
10781 " has changed in GCC 5",
10782 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
10783 }
10784 }
10785
10786 if (aggregate_p)
10787 return 128;
10788 }
10789
10790 /* Similar for the Darwin64 ABI. Note that for historical reasons we
10791 implement the "aggregate type" check as a BLKmode check here; this
10792 means certain aggregate types are in fact not aligned. */
10793 if (TARGET_MACHO && rs6000_darwin64_abi
10794 && mode == BLKmode
10795 && type && TYPE_ALIGN (type) > 64)
10796 return 128;
10797
10798 return PARM_BOUNDARY;
10799 }
10800
10801 /* The offset in words to the start of the parameter save area. */
10802
10803 static unsigned int
10804 rs6000_parm_offset (void)
10805 {
10806 return (DEFAULT_ABI == ABI_V4 ? 2
10807 : DEFAULT_ABI == ABI_ELFv2 ? 4
10808 : 6);
10809 }
10810
10811 /* For a function parm of MODE and TYPE, return the starting word in
10812 the parameter area. NWORDS of the parameter area are already used. */
10813
10814 static unsigned int
10815 rs6000_parm_start (machine_mode mode, const_tree type,
10816 unsigned int nwords)
10817 {
10818 unsigned int align;
10819
10820 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
10821 return nwords + (-(rs6000_parm_offset () + nwords) & align);
10822 }
10823
10824 /* Compute the size (in words) of a function argument. */
10825
10826 static unsigned long
10827 rs6000_arg_size (machine_mode mode, const_tree type)
10828 {
10829 unsigned long size;
10830
10831 if (mode != BLKmode)
10832 size = GET_MODE_SIZE (mode);
10833 else
10834 size = int_size_in_bytes (type);
10835
10836 if (TARGET_32BIT)
10837 return (size + 3) >> 2;
10838 else
10839 return (size + 7) >> 3;
10840 }
10841 \f
10842 /* Use this to flush pending int fields. */
10843
10844 static void
10845 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
10846 HOST_WIDE_INT bitpos, int final)
10847 {
10848 unsigned int startbit, endbit;
10849 int intregs, intoffset;
10850
10851 /* Handle the situations where a float is taking up the first half
10852 of the GPR, and the other half is empty (typically due to
10853 alignment restrictions). We can detect this by a 8-byte-aligned
10854 int field, or by seeing that this is the final flush for this
10855 argument. Count the word and continue on. */
10856 if (cum->floats_in_gpr == 1
10857 && (cum->intoffset % 64 == 0
10858 || (cum->intoffset == -1 && final)))
10859 {
10860 cum->words++;
10861 cum->floats_in_gpr = 0;
10862 }
10863
10864 if (cum->intoffset == -1)
10865 return;
10866
10867 intoffset = cum->intoffset;
10868 cum->intoffset = -1;
10869 cum->floats_in_gpr = 0;
10870
10871 if (intoffset % BITS_PER_WORD != 0)
10872 {
10873 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
10874 if (!int_mode_for_size (bits, 0).exists ())
10875 {
10876 /* We couldn't find an appropriate mode, which happens,
10877 e.g., in packed structs when there are 3 bytes to load.
10878 Back intoffset back to the beginning of the word in this
10879 case. */
10880 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
10881 }
10882 }
10883
10884 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
10885 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
10886 intregs = (endbit - startbit) / BITS_PER_WORD;
10887 cum->words += intregs;
10888 /* words should be unsigned. */
10889 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
10890 {
10891 int pad = (endbit/BITS_PER_WORD) - cum->words;
10892 cum->words += pad;
10893 }
10894 }
10895
10896 /* The darwin64 ABI calls for us to recurse down through structs,
10897 looking for elements passed in registers. Unfortunately, we have
10898 to track int register count here also because of misalignments
10899 in powerpc alignment mode. */
10900
10901 static void
10902 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
10903 const_tree type,
10904 HOST_WIDE_INT startbitpos)
10905 {
10906 tree f;
10907
10908 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
10909 if (TREE_CODE (f) == FIELD_DECL)
10910 {
10911 HOST_WIDE_INT bitpos = startbitpos;
10912 tree ftype = TREE_TYPE (f);
10913 machine_mode mode;
10914 if (ftype == error_mark_node)
10915 continue;
10916 mode = TYPE_MODE (ftype);
10917
10918 if (DECL_SIZE (f) != 0
10919 && tree_fits_uhwi_p (bit_position (f)))
10920 bitpos += int_bit_position (f);
10921
10922 /* ??? FIXME: else assume zero offset. */
10923
10924 if (TREE_CODE (ftype) == RECORD_TYPE)
10925 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
10926 else if (USE_FP_FOR_ARG_P (cum, mode))
10927 {
10928 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
10929 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10930 cum->fregno += n_fpregs;
10931 /* Single-precision floats present a special problem for
10932 us, because they are smaller than an 8-byte GPR, and so
10933 the structure-packing rules combined with the standard
10934 varargs behavior mean that we want to pack float/float
10935 and float/int combinations into a single register's
10936 space. This is complicated by the arg advance flushing,
10937 which works on arbitrarily large groups of int-type
10938 fields. */
10939 if (mode == SFmode)
10940 {
10941 if (cum->floats_in_gpr == 1)
10942 {
10943 /* Two floats in a word; count the word and reset
10944 the float count. */
10945 cum->words++;
10946 cum->floats_in_gpr = 0;
10947 }
10948 else if (bitpos % 64 == 0)
10949 {
10950 /* A float at the beginning of an 8-byte word;
10951 count it and put off adjusting cum->words until
10952 we see if a arg advance flush is going to do it
10953 for us. */
10954 cum->floats_in_gpr++;
10955 }
10956 else
10957 {
10958 /* The float is at the end of a word, preceded
10959 by integer fields, so the arg advance flush
10960 just above has already set cum->words and
10961 everything is taken care of. */
10962 }
10963 }
10964 else
10965 cum->words += n_fpregs;
10966 }
10967 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
10968 {
10969 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10970 cum->vregno++;
10971 cum->words += 2;
10972 }
10973 else if (cum->intoffset == -1)
10974 cum->intoffset = bitpos;
10975 }
10976 }
10977
10978 /* Check for an item that needs to be considered specially under the darwin 64
10979 bit ABI. These are record types where the mode is BLK or the structure is
10980 8 bytes in size. */
10981 static int
10982 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
10983 {
10984 return rs6000_darwin64_abi
10985 && ((mode == BLKmode
10986 && TREE_CODE (type) == RECORD_TYPE
10987 && int_size_in_bytes (type) > 0)
10988 || (type && TREE_CODE (type) == RECORD_TYPE
10989 && int_size_in_bytes (type) == 8)) ? 1 : 0;
10990 }
10991
10992 /* Update the data in CUM to advance over an argument
10993 of mode MODE and data type TYPE.
10994 (TYPE is null for libcalls where that information may not be available.)
10995
10996 Note that for args passed by reference, function_arg will be called
10997 with MODE and TYPE set to that of the pointer to the arg, not the arg
10998 itself. */
10999
11000 static void
11001 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11002 const_tree type, bool named, int depth)
11003 {
11004 machine_mode elt_mode;
11005 int n_elts;
11006
11007 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11008
11009 /* Only tick off an argument if we're not recursing. */
11010 if (depth == 0)
11011 cum->nargs_prototype--;
11012
11013 #ifdef HAVE_AS_GNU_ATTRIBUTE
11014 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11015 && cum->escapes)
11016 {
11017 if (SCALAR_FLOAT_MODE_P (mode))
11018 {
11019 rs6000_passes_float = true;
11020 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11021 && (FLOAT128_IBM_P (mode)
11022 || FLOAT128_IEEE_P (mode)
11023 || (type != NULL
11024 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11025 rs6000_passes_long_double = true;
11026
11027 /* Note if we passed or return a IEEE 128-bit type. We changed the
11028 mangling for these types, and we may need to make an alias with
11029 the old mangling. */
11030 if (FLOAT128_IEEE_P (mode))
11031 rs6000_passes_ieee128 = true;
11032 }
11033 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11034 rs6000_passes_vector = true;
11035 }
11036 #endif
11037
11038 if (TARGET_ALTIVEC_ABI
11039 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11040 || (type && TREE_CODE (type) == VECTOR_TYPE
11041 && int_size_in_bytes (type) == 16)))
11042 {
11043 bool stack = false;
11044
11045 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11046 {
11047 cum->vregno += n_elts;
11048
11049 if (!TARGET_ALTIVEC)
11050 error ("cannot pass argument in vector register because"
11051 " altivec instructions are disabled, use %qs"
11052 " to enable them", "-maltivec");
11053
11054 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11055 even if it is going to be passed in a vector register.
11056 Darwin does the same for variable-argument functions. */
11057 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11058 && TARGET_64BIT)
11059 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11060 stack = true;
11061 }
11062 else
11063 stack = true;
11064
11065 if (stack)
11066 {
11067 int align;
11068
11069 /* Vector parameters must be 16-byte aligned. In 32-bit
11070 mode this means we need to take into account the offset
11071 to the parameter save area. In 64-bit mode, they just
11072 have to start on an even word, since the parameter save
11073 area is 16-byte aligned. */
11074 if (TARGET_32BIT)
11075 align = -(rs6000_parm_offset () + cum->words) & 3;
11076 else
11077 align = cum->words & 1;
11078 cum->words += align + rs6000_arg_size (mode, type);
11079
11080 if (TARGET_DEBUG_ARG)
11081 {
11082 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11083 cum->words, align);
11084 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11085 cum->nargs_prototype, cum->prototype,
11086 GET_MODE_NAME (mode));
11087 }
11088 }
11089 }
11090 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11091 {
11092 int size = int_size_in_bytes (type);
11093 /* Variable sized types have size == -1 and are
11094 treated as if consisting entirely of ints.
11095 Pad to 16 byte boundary if needed. */
11096 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11097 && (cum->words % 2) != 0)
11098 cum->words++;
11099 /* For varargs, we can just go up by the size of the struct. */
11100 if (!named)
11101 cum->words += (size + 7) / 8;
11102 else
11103 {
11104 /* It is tempting to say int register count just goes up by
11105 sizeof(type)/8, but this is wrong in a case such as
11106 { int; double; int; } [powerpc alignment]. We have to
11107 grovel through the fields for these too. */
11108 cum->intoffset = 0;
11109 cum->floats_in_gpr = 0;
11110 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11111 rs6000_darwin64_record_arg_advance_flush (cum,
11112 size * BITS_PER_UNIT, 1);
11113 }
11114 if (TARGET_DEBUG_ARG)
11115 {
11116 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11117 cum->words, TYPE_ALIGN (type), size);
11118 fprintf (stderr,
11119 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11120 cum->nargs_prototype, cum->prototype,
11121 GET_MODE_NAME (mode));
11122 }
11123 }
11124 else if (DEFAULT_ABI == ABI_V4)
11125 {
11126 if (abi_v4_pass_in_fpr (mode, named))
11127 {
11128 /* _Decimal128 must use an even/odd register pair. This assumes
11129 that the register number is odd when fregno is odd. */
11130 if (mode == TDmode && (cum->fregno % 2) == 1)
11131 cum->fregno++;
11132
11133 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11134 <= FP_ARG_V4_MAX_REG)
11135 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11136 else
11137 {
11138 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11139 if (mode == DFmode || FLOAT128_IBM_P (mode)
11140 || mode == DDmode || mode == TDmode)
11141 cum->words += cum->words & 1;
11142 cum->words += rs6000_arg_size (mode, type);
11143 }
11144 }
11145 else
11146 {
11147 int n_words = rs6000_arg_size (mode, type);
11148 int gregno = cum->sysv_gregno;
11149
11150 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11151 As does any other 2 word item such as complex int due to a
11152 historical mistake. */
11153 if (n_words == 2)
11154 gregno += (1 - gregno) & 1;
11155
11156 /* Multi-reg args are not split between registers and stack. */
11157 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11158 {
11159 /* Long long is aligned on the stack. So are other 2 word
11160 items such as complex int due to a historical mistake. */
11161 if (n_words == 2)
11162 cum->words += cum->words & 1;
11163 cum->words += n_words;
11164 }
11165
11166 /* Note: continuing to accumulate gregno past when we've started
11167 spilling to the stack indicates the fact that we've started
11168 spilling to the stack to expand_builtin_saveregs. */
11169 cum->sysv_gregno = gregno + n_words;
11170 }
11171
11172 if (TARGET_DEBUG_ARG)
11173 {
11174 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11175 cum->words, cum->fregno);
11176 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11177 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11178 fprintf (stderr, "mode = %4s, named = %d\n",
11179 GET_MODE_NAME (mode), named);
11180 }
11181 }
11182 else
11183 {
11184 int n_words = rs6000_arg_size (mode, type);
11185 int start_words = cum->words;
11186 int align_words = rs6000_parm_start (mode, type, start_words);
11187
11188 cum->words = align_words + n_words;
11189
11190 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11191 {
11192 /* _Decimal128 must be passed in an even/odd float register pair.
11193 This assumes that the register number is odd when fregno is
11194 odd. */
11195 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11196 cum->fregno++;
11197 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11198 }
11199
11200 if (TARGET_DEBUG_ARG)
11201 {
11202 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11203 cum->words, cum->fregno);
11204 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11205 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11206 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11207 named, align_words - start_words, depth);
11208 }
11209 }
11210 }
11211
11212 static void
11213 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11214 const_tree type, bool named)
11215 {
11216 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11217 0);
11218 }
11219
11220 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11221 structure between cum->intoffset and bitpos to integer registers. */
11222
11223 static void
11224 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11225 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11226 {
11227 machine_mode mode;
11228 unsigned int regno;
11229 unsigned int startbit, endbit;
11230 int this_regno, intregs, intoffset;
11231 rtx reg;
11232
11233 if (cum->intoffset == -1)
11234 return;
11235
11236 intoffset = cum->intoffset;
11237 cum->intoffset = -1;
11238
11239 /* If this is the trailing part of a word, try to only load that
11240 much into the register. Otherwise load the whole register. Note
11241 that in the latter case we may pick up unwanted bits. It's not a
11242 problem at the moment but may wish to revisit. */
11243
11244 if (intoffset % BITS_PER_WORD != 0)
11245 {
11246 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11247 if (!int_mode_for_size (bits, 0).exists (&mode))
11248 {
11249 /* We couldn't find an appropriate mode, which happens,
11250 e.g., in packed structs when there are 3 bytes to load.
11251 Back intoffset back to the beginning of the word in this
11252 case. */
11253 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11254 mode = word_mode;
11255 }
11256 }
11257 else
11258 mode = word_mode;
11259
11260 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11261 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11262 intregs = (endbit - startbit) / BITS_PER_WORD;
11263 this_regno = cum->words + intoffset / BITS_PER_WORD;
11264
11265 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11266 cum->use_stack = 1;
11267
11268 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11269 if (intregs <= 0)
11270 return;
11271
11272 intoffset /= BITS_PER_UNIT;
11273 do
11274 {
11275 regno = GP_ARG_MIN_REG + this_regno;
11276 reg = gen_rtx_REG (mode, regno);
11277 rvec[(*k)++] =
11278 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11279
11280 this_regno += 1;
11281 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11282 mode = word_mode;
11283 intregs -= 1;
11284 }
11285 while (intregs > 0);
11286 }
11287
11288 /* Recursive workhorse for the following. */
11289
11290 static void
11291 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11292 HOST_WIDE_INT startbitpos, rtx rvec[],
11293 int *k)
11294 {
11295 tree f;
11296
11297 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11298 if (TREE_CODE (f) == FIELD_DECL)
11299 {
11300 HOST_WIDE_INT bitpos = startbitpos;
11301 tree ftype = TREE_TYPE (f);
11302 machine_mode mode;
11303 if (ftype == error_mark_node)
11304 continue;
11305 mode = TYPE_MODE (ftype);
11306
11307 if (DECL_SIZE (f) != 0
11308 && tree_fits_uhwi_p (bit_position (f)))
11309 bitpos += int_bit_position (f);
11310
11311 /* ??? FIXME: else assume zero offset. */
11312
11313 if (TREE_CODE (ftype) == RECORD_TYPE)
11314 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11315 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11316 {
11317 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11318 #if 0
11319 switch (mode)
11320 {
11321 case E_SCmode: mode = SFmode; break;
11322 case E_DCmode: mode = DFmode; break;
11323 case E_TCmode: mode = TFmode; break;
11324 default: break;
11325 }
11326 #endif
11327 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11328 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11329 {
11330 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11331 && (mode == TFmode || mode == TDmode));
11332 /* Long double or _Decimal128 split over regs and memory. */
11333 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11334 cum->use_stack=1;
11335 }
11336 rvec[(*k)++]
11337 = gen_rtx_EXPR_LIST (VOIDmode,
11338 gen_rtx_REG (mode, cum->fregno++),
11339 GEN_INT (bitpos / BITS_PER_UNIT));
11340 if (FLOAT128_2REG_P (mode))
11341 cum->fregno++;
11342 }
11343 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11344 {
11345 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11346 rvec[(*k)++]
11347 = gen_rtx_EXPR_LIST (VOIDmode,
11348 gen_rtx_REG (mode, cum->vregno++),
11349 GEN_INT (bitpos / BITS_PER_UNIT));
11350 }
11351 else if (cum->intoffset == -1)
11352 cum->intoffset = bitpos;
11353 }
11354 }
11355
11356 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11357 the register(s) to be used for each field and subfield of a struct
11358 being passed by value, along with the offset of where the
11359 register's value may be found in the block. FP fields go in FP
11360 register, vector fields go in vector registers, and everything
11361 else goes in int registers, packed as in memory.
11362
11363 This code is also used for function return values. RETVAL indicates
11364 whether this is the case.
11365
11366 Much of this is taken from the SPARC V9 port, which has a similar
11367 calling convention. */
11368
11369 static rtx
11370 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11371 bool named, bool retval)
11372 {
11373 rtx rvec[FIRST_PSEUDO_REGISTER];
11374 int k = 1, kbase = 1;
11375 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11376 /* This is a copy; modifications are not visible to our caller. */
11377 CUMULATIVE_ARGS copy_cum = *orig_cum;
11378 CUMULATIVE_ARGS *cum = &copy_cum;
11379
11380 /* Pad to 16 byte boundary if needed. */
11381 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11382 && (cum->words % 2) != 0)
11383 cum->words++;
11384
11385 cum->intoffset = 0;
11386 cum->use_stack = 0;
11387 cum->named = named;
11388
11389 /* Put entries into rvec[] for individual FP and vector fields, and
11390 for the chunks of memory that go in int regs. Note we start at
11391 element 1; 0 is reserved for an indication of using memory, and
11392 may or may not be filled in below. */
11393 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11394 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11395
11396 /* If any part of the struct went on the stack put all of it there.
11397 This hack is because the generic code for
11398 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11399 parts of the struct are not at the beginning. */
11400 if (cum->use_stack)
11401 {
11402 if (retval)
11403 return NULL_RTX; /* doesn't go in registers at all */
11404 kbase = 0;
11405 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11406 }
11407 if (k > 1 || cum->use_stack)
11408 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11409 else
11410 return NULL_RTX;
11411 }
11412
11413 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11414
11415 static rtx
11416 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11417 int align_words)
11418 {
11419 int n_units;
11420 int i, k;
11421 rtx rvec[GP_ARG_NUM_REG + 1];
11422
11423 if (align_words >= GP_ARG_NUM_REG)
11424 return NULL_RTX;
11425
11426 n_units = rs6000_arg_size (mode, type);
11427
11428 /* Optimize the simple case where the arg fits in one gpr, except in
11429 the case of BLKmode due to assign_parms assuming that registers are
11430 BITS_PER_WORD wide. */
11431 if (n_units == 0
11432 || (n_units == 1 && mode != BLKmode))
11433 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11434
11435 k = 0;
11436 if (align_words + n_units > GP_ARG_NUM_REG)
11437 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11438 using a magic NULL_RTX component.
11439 This is not strictly correct. Only some of the arg belongs in
11440 memory, not all of it. However, the normal scheme using
11441 function_arg_partial_nregs can result in unusual subregs, eg.
11442 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11443 store the whole arg to memory is often more efficient than code
11444 to store pieces, and we know that space is available in the right
11445 place for the whole arg. */
11446 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11447
11448 i = 0;
11449 do
11450 {
11451 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11452 rtx off = GEN_INT (i++ * 4);
11453 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11454 }
11455 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11456
11457 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11458 }
11459
11460 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11461 but must also be copied into the parameter save area starting at
11462 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11463 to the GPRs and/or memory. Return the number of elements used. */
11464
11465 static int
11466 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11467 int align_words, rtx *rvec)
11468 {
11469 int k = 0;
11470
11471 if (align_words < GP_ARG_NUM_REG)
11472 {
11473 int n_words = rs6000_arg_size (mode, type);
11474
11475 if (align_words + n_words > GP_ARG_NUM_REG
11476 || mode == BLKmode
11477 || (TARGET_32BIT && TARGET_POWERPC64))
11478 {
11479 /* If this is partially on the stack, then we only
11480 include the portion actually in registers here. */
11481 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11482 int i = 0;
11483
11484 if (align_words + n_words > GP_ARG_NUM_REG)
11485 {
11486 /* Not all of the arg fits in gprs. Say that it goes in memory
11487 too, using a magic NULL_RTX component. Also see comment in
11488 rs6000_mixed_function_arg for why the normal
11489 function_arg_partial_nregs scheme doesn't work in this case. */
11490 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11491 }
11492
11493 do
11494 {
11495 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11496 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11497 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11498 }
11499 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11500 }
11501 else
11502 {
11503 /* The whole arg fits in gprs. */
11504 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11505 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11506 }
11507 }
11508 else
11509 {
11510 /* It's entirely in memory. */
11511 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11512 }
11513
11514 return k;
11515 }
11516
11517 /* RVEC is a vector of K components of an argument of mode MODE.
11518 Construct the final function_arg return value from it. */
11519
11520 static rtx
11521 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11522 {
11523 gcc_assert (k >= 1);
11524
11525 /* Avoid returning a PARALLEL in the trivial cases. */
11526 if (k == 1)
11527 {
11528 if (XEXP (rvec[0], 0) == NULL_RTX)
11529 return NULL_RTX;
11530
11531 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11532 return XEXP (rvec[0], 0);
11533 }
11534
11535 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11536 }
11537
11538 /* Determine where to put an argument to a function.
11539 Value is zero to push the argument on the stack,
11540 or a hard register in which to store the argument.
11541
11542 MODE is the argument's machine mode.
11543 TYPE is the data type of the argument (as a tree).
11544 This is null for libcalls where that information may
11545 not be available.
11546 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11547 the preceding args and about the function being called. It is
11548 not modified in this routine.
11549 NAMED is nonzero if this argument is a named parameter
11550 (otherwise it is an extra parameter matching an ellipsis).
11551
11552 On RS/6000 the first eight words of non-FP are normally in registers
11553 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11554 Under V.4, the first 8 FP args are in registers.
11555
11556 If this is floating-point and no prototype is specified, we use
11557 both an FP and integer register (or possibly FP reg and stack). Library
11558 functions (when CALL_LIBCALL is set) always have the proper types for args,
11559 so we can pass the FP value just in one register. emit_library_function
11560 doesn't support PARALLEL anyway.
11561
11562 Note that for args passed by reference, function_arg will be called
11563 with MODE and TYPE set to that of the pointer to the arg, not the arg
11564 itself. */
11565
11566 static rtx
11567 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11568 const_tree type, bool named)
11569 {
11570 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11571 enum rs6000_abi abi = DEFAULT_ABI;
11572 machine_mode elt_mode;
11573 int n_elts;
11574
11575 /* Return a marker to indicate whether CR1 needs to set or clear the
11576 bit that V.4 uses to say fp args were passed in registers.
11577 Assume that we don't need the marker for software floating point,
11578 or compiler generated library calls. */
11579 if (mode == VOIDmode)
11580 {
11581 if (abi == ABI_V4
11582 && (cum->call_cookie & CALL_LIBCALL) == 0
11583 && (cum->stdarg
11584 || (cum->nargs_prototype < 0
11585 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11586 && TARGET_HARD_FLOAT)
11587 return GEN_INT (cum->call_cookie
11588 | ((cum->fregno == FP_ARG_MIN_REG)
11589 ? CALL_V4_SET_FP_ARGS
11590 : CALL_V4_CLEAR_FP_ARGS));
11591
11592 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11593 }
11594
11595 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11596
11597 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11598 {
11599 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11600 if (rslt != NULL_RTX)
11601 return rslt;
11602 /* Else fall through to usual handling. */
11603 }
11604
11605 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11606 {
11607 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11608 rtx r, off;
11609 int i, k = 0;
11610
11611 /* Do we also need to pass this argument in the parameter save area?
11612 Library support functions for IEEE 128-bit are assumed to not need the
11613 value passed both in GPRs and in vector registers. */
11614 if (TARGET_64BIT && !cum->prototype
11615 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11616 {
11617 int align_words = ROUND_UP (cum->words, 2);
11618 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11619 }
11620
11621 /* Describe where this argument goes in the vector registers. */
11622 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11623 {
11624 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11625 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11626 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11627 }
11628
11629 return rs6000_finish_function_arg (mode, rvec, k);
11630 }
11631 else if (TARGET_ALTIVEC_ABI
11632 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11633 || (type && TREE_CODE (type) == VECTOR_TYPE
11634 && int_size_in_bytes (type) == 16)))
11635 {
11636 if (named || abi == ABI_V4)
11637 return NULL_RTX;
11638 else
11639 {
11640 /* Vector parameters to varargs functions under AIX or Darwin
11641 get passed in memory and possibly also in GPRs. */
11642 int align, align_words, n_words;
11643 machine_mode part_mode;
11644
11645 /* Vector parameters must be 16-byte aligned. In 32-bit
11646 mode this means we need to take into account the offset
11647 to the parameter save area. In 64-bit mode, they just
11648 have to start on an even word, since the parameter save
11649 area is 16-byte aligned. */
11650 if (TARGET_32BIT)
11651 align = -(rs6000_parm_offset () + cum->words) & 3;
11652 else
11653 align = cum->words & 1;
11654 align_words = cum->words + align;
11655
11656 /* Out of registers? Memory, then. */
11657 if (align_words >= GP_ARG_NUM_REG)
11658 return NULL_RTX;
11659
11660 if (TARGET_32BIT && TARGET_POWERPC64)
11661 return rs6000_mixed_function_arg (mode, type, align_words);
11662
11663 /* The vector value goes in GPRs. Only the part of the
11664 value in GPRs is reported here. */
11665 part_mode = mode;
11666 n_words = rs6000_arg_size (mode, type);
11667 if (align_words + n_words > GP_ARG_NUM_REG)
11668 /* Fortunately, there are only two possibilities, the value
11669 is either wholly in GPRs or half in GPRs and half not. */
11670 part_mode = DImode;
11671
11672 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11673 }
11674 }
11675
11676 else if (abi == ABI_V4)
11677 {
11678 if (abi_v4_pass_in_fpr (mode, named))
11679 {
11680 /* _Decimal128 must use an even/odd register pair. This assumes
11681 that the register number is odd when fregno is odd. */
11682 if (mode == TDmode && (cum->fregno % 2) == 1)
11683 cum->fregno++;
11684
11685 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11686 <= FP_ARG_V4_MAX_REG)
11687 return gen_rtx_REG (mode, cum->fregno);
11688 else
11689 return NULL_RTX;
11690 }
11691 else
11692 {
11693 int n_words = rs6000_arg_size (mode, type);
11694 int gregno = cum->sysv_gregno;
11695
11696 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11697 As does any other 2 word item such as complex int due to a
11698 historical mistake. */
11699 if (n_words == 2)
11700 gregno += (1 - gregno) & 1;
11701
11702 /* Multi-reg args are not split between registers and stack. */
11703 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11704 return NULL_RTX;
11705
11706 if (TARGET_32BIT && TARGET_POWERPC64)
11707 return rs6000_mixed_function_arg (mode, type,
11708 gregno - GP_ARG_MIN_REG);
11709 return gen_rtx_REG (mode, gregno);
11710 }
11711 }
11712 else
11713 {
11714 int align_words = rs6000_parm_start (mode, type, cum->words);
11715
11716 /* _Decimal128 must be passed in an even/odd float register pair.
11717 This assumes that the register number is odd when fregno is odd. */
11718 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11719 cum->fregno++;
11720
11721 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11722 && !(TARGET_AIX && !TARGET_ELF
11723 && type != NULL && AGGREGATE_TYPE_P (type)))
11724 {
11725 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11726 rtx r, off;
11727 int i, k = 0;
11728 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11729 int fpr_words;
11730
11731 /* Do we also need to pass this argument in the parameter
11732 save area? */
11733 if (type && (cum->nargs_prototype <= 0
11734 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11735 && TARGET_XL_COMPAT
11736 && align_words >= GP_ARG_NUM_REG)))
11737 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11738
11739 /* Describe where this argument goes in the fprs. */
11740 for (i = 0; i < n_elts
11741 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
11742 {
11743 /* Check if the argument is split over registers and memory.
11744 This can only ever happen for long double or _Decimal128;
11745 complex types are handled via split_complex_arg. */
11746 machine_mode fmode = elt_mode;
11747 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
11748 {
11749 gcc_assert (FLOAT128_2REG_P (fmode));
11750 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
11751 }
11752
11753 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
11754 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11755 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11756 }
11757
11758 /* If there were not enough FPRs to hold the argument, the rest
11759 usually goes into memory. However, if the current position
11760 is still within the register parameter area, a portion may
11761 actually have to go into GPRs.
11762
11763 Note that it may happen that the portion of the argument
11764 passed in the first "half" of the first GPR was already
11765 passed in the last FPR as well.
11766
11767 For unnamed arguments, we already set up GPRs to cover the
11768 whole argument in rs6000_psave_function_arg, so there is
11769 nothing further to do at this point. */
11770 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
11771 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
11772 && cum->nargs_prototype > 0)
11773 {
11774 static bool warned;
11775
11776 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11777 int n_words = rs6000_arg_size (mode, type);
11778
11779 align_words += fpr_words;
11780 n_words -= fpr_words;
11781
11782 do
11783 {
11784 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11785 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
11786 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11787 }
11788 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11789
11790 if (!warned && warn_psabi)
11791 {
11792 warned = true;
11793 inform (input_location,
11794 "the ABI of passing homogeneous float aggregates"
11795 " has changed in GCC 5");
11796 }
11797 }
11798
11799 return rs6000_finish_function_arg (mode, rvec, k);
11800 }
11801 else if (align_words < GP_ARG_NUM_REG)
11802 {
11803 if (TARGET_32BIT && TARGET_POWERPC64)
11804 return rs6000_mixed_function_arg (mode, type, align_words);
11805
11806 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11807 }
11808 else
11809 return NULL_RTX;
11810 }
11811 }
11812 \f
11813 /* For an arg passed partly in registers and partly in memory, this is
11814 the number of bytes passed in registers. For args passed entirely in
11815 registers or entirely in memory, zero. When an arg is described by a
11816 PARALLEL, perhaps using more than one register type, this function
11817 returns the number of bytes used by the first element of the PARALLEL. */
11818
11819 static int
11820 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
11821 tree type, bool named)
11822 {
11823 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11824 bool passed_in_gprs = true;
11825 int ret = 0;
11826 int align_words;
11827 machine_mode elt_mode;
11828 int n_elts;
11829
11830 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11831
11832 if (DEFAULT_ABI == ABI_V4)
11833 return 0;
11834
11835 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11836 {
11837 /* If we are passing this arg in the fixed parameter save area (gprs or
11838 memory) as well as VRs, we do not use the partial bytes mechanism;
11839 instead, rs6000_function_arg will return a PARALLEL including a memory
11840 element as necessary. Library support functions for IEEE 128-bit are
11841 assumed to not need the value passed both in GPRs and in vector
11842 registers. */
11843 if (TARGET_64BIT && !cum->prototype
11844 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11845 return 0;
11846
11847 /* Otherwise, we pass in VRs only. Check for partial copies. */
11848 passed_in_gprs = false;
11849 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
11850 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
11851 }
11852
11853 /* In this complicated case we just disable the partial_nregs code. */
11854 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11855 return 0;
11856
11857 align_words = rs6000_parm_start (mode, type, cum->words);
11858
11859 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11860 && !(TARGET_AIX && !TARGET_ELF
11861 && type != NULL && AGGREGATE_TYPE_P (type)))
11862 {
11863 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11864
11865 /* If we are passing this arg in the fixed parameter save area
11866 (gprs or memory) as well as FPRs, we do not use the partial
11867 bytes mechanism; instead, rs6000_function_arg will return a
11868 PARALLEL including a memory element as necessary. */
11869 if (type
11870 && (cum->nargs_prototype <= 0
11871 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11872 && TARGET_XL_COMPAT
11873 && align_words >= GP_ARG_NUM_REG)))
11874 return 0;
11875
11876 /* Otherwise, we pass in FPRs only. Check for partial copies. */
11877 passed_in_gprs = false;
11878 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
11879 {
11880 /* Compute number of bytes / words passed in FPRs. If there
11881 is still space available in the register parameter area
11882 *after* that amount, a part of the argument will be passed
11883 in GPRs. In that case, the total amount passed in any
11884 registers is equal to the amount that would have been passed
11885 in GPRs if everything were passed there, so we fall back to
11886 the GPR code below to compute the appropriate value. */
11887 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
11888 * MIN (8, GET_MODE_SIZE (elt_mode)));
11889 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
11890
11891 if (align_words + fpr_words < GP_ARG_NUM_REG)
11892 passed_in_gprs = true;
11893 else
11894 ret = fpr;
11895 }
11896 }
11897
11898 if (passed_in_gprs
11899 && align_words < GP_ARG_NUM_REG
11900 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
11901 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
11902
11903 if (ret != 0 && TARGET_DEBUG_ARG)
11904 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
11905
11906 return ret;
11907 }
11908 \f
11909 /* A C expression that indicates when an argument must be passed by
11910 reference. If nonzero for an argument, a copy of that argument is
11911 made in memory and a pointer to the argument is passed instead of
11912 the argument itself. The pointer is passed in whatever way is
11913 appropriate for passing a pointer to that type.
11914
11915 Under V.4, aggregates and long double are passed by reference.
11916
11917 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
11918 reference unless the AltiVec vector extension ABI is in force.
11919
11920 As an extension to all ABIs, variable sized types are passed by
11921 reference. */
11922
11923 static bool
11924 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
11925 machine_mode mode, const_tree type,
11926 bool named ATTRIBUTE_UNUSED)
11927 {
11928 if (!type)
11929 return 0;
11930
11931 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11932 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11933 {
11934 if (TARGET_DEBUG_ARG)
11935 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
11936 return 1;
11937 }
11938
11939 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
11940 {
11941 if (TARGET_DEBUG_ARG)
11942 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
11943 return 1;
11944 }
11945
11946 if (int_size_in_bytes (type) < 0)
11947 {
11948 if (TARGET_DEBUG_ARG)
11949 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
11950 return 1;
11951 }
11952
11953 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11954 modes only exist for GCC vector types if -maltivec. */
11955 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
11956 {
11957 if (TARGET_DEBUG_ARG)
11958 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
11959 return 1;
11960 }
11961
11962 /* Pass synthetic vectors in memory. */
11963 if (TREE_CODE (type) == VECTOR_TYPE
11964 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11965 {
11966 static bool warned_for_pass_big_vectors = false;
11967 if (TARGET_DEBUG_ARG)
11968 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
11969 if (!warned_for_pass_big_vectors)
11970 {
11971 warning (OPT_Wpsabi, "GCC vector passed by reference: "
11972 "non-standard ABI extension with no compatibility "
11973 "guarantee");
11974 warned_for_pass_big_vectors = true;
11975 }
11976 return 1;
11977 }
11978
11979 return 0;
11980 }
11981
11982 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
11983 already processes. Return true if the parameter must be passed
11984 (fully or partially) on the stack. */
11985
11986 static bool
11987 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
11988 {
11989 machine_mode mode;
11990 int unsignedp;
11991 rtx entry_parm;
11992
11993 /* Catch errors. */
11994 if (type == NULL || type == error_mark_node)
11995 return true;
11996
11997 /* Handle types with no storage requirement. */
11998 if (TYPE_MODE (type) == VOIDmode)
11999 return false;
12000
12001 /* Handle complex types. */
12002 if (TREE_CODE (type) == COMPLEX_TYPE)
12003 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12004 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12005
12006 /* Handle transparent aggregates. */
12007 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12008 && TYPE_TRANSPARENT_AGGR (type))
12009 type = TREE_TYPE (first_field (type));
12010
12011 /* See if this arg was passed by invisible reference. */
12012 if (pass_by_reference (get_cumulative_args (args_so_far),
12013 TYPE_MODE (type), type, true))
12014 type = build_pointer_type (type);
12015
12016 /* Find mode as it is passed by the ABI. */
12017 unsignedp = TYPE_UNSIGNED (type);
12018 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
12019
12020 /* If we must pass in stack, we need a stack. */
12021 if (rs6000_must_pass_in_stack (mode, type))
12022 return true;
12023
12024 /* If there is no incoming register, we need a stack. */
12025 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
12026 if (entry_parm == NULL)
12027 return true;
12028
12029 /* Likewise if we need to pass both in registers and on the stack. */
12030 if (GET_CODE (entry_parm) == PARALLEL
12031 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
12032 return true;
12033
12034 /* Also true if we're partially in registers and partially not. */
12035 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12036 return true;
12037
12038 /* Update info on where next arg arrives in registers. */
12039 rs6000_function_arg_advance (args_so_far, mode, type, true);
12040 return false;
12041 }
12042
12043 /* Return true if FUN has no prototype, has a variable argument
12044 list, or passes any parameter in memory. */
12045
12046 static bool
12047 rs6000_function_parms_need_stack (tree fun, bool incoming)
12048 {
12049 tree fntype, result;
12050 CUMULATIVE_ARGS args_so_far_v;
12051 cumulative_args_t args_so_far;
12052
12053 if (!fun)
12054 /* Must be a libcall, all of which only use reg parms. */
12055 return false;
12056
12057 fntype = fun;
12058 if (!TYPE_P (fun))
12059 fntype = TREE_TYPE (fun);
12060
12061 /* Varargs functions need the parameter save area. */
12062 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12063 return true;
12064
12065 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12066 args_so_far = pack_cumulative_args (&args_so_far_v);
12067
12068 /* When incoming, we will have been passed the function decl.
12069 It is necessary to use the decl to handle K&R style functions,
12070 where TYPE_ARG_TYPES may not be available. */
12071 if (incoming)
12072 {
12073 gcc_assert (DECL_P (fun));
12074 result = DECL_RESULT (fun);
12075 }
12076 else
12077 result = TREE_TYPE (fntype);
12078
12079 if (result && aggregate_value_p (result, fntype))
12080 {
12081 if (!TYPE_P (result))
12082 result = TREE_TYPE (result);
12083 result = build_pointer_type (result);
12084 rs6000_parm_needs_stack (args_so_far, result);
12085 }
12086
12087 if (incoming)
12088 {
12089 tree parm;
12090
12091 for (parm = DECL_ARGUMENTS (fun);
12092 parm && parm != void_list_node;
12093 parm = TREE_CHAIN (parm))
12094 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12095 return true;
12096 }
12097 else
12098 {
12099 function_args_iterator args_iter;
12100 tree arg_type;
12101
12102 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12103 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12104 return true;
12105 }
12106
12107 return false;
12108 }
12109
12110 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12111 usually a constant depending on the ABI. However, in the ELFv2 ABI
12112 the register parameter area is optional when calling a function that
12113 has a prototype is scope, has no variable argument list, and passes
12114 all parameters in registers. */
12115
12116 int
12117 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12118 {
12119 int reg_parm_stack_space;
12120
12121 switch (DEFAULT_ABI)
12122 {
12123 default:
12124 reg_parm_stack_space = 0;
12125 break;
12126
12127 case ABI_AIX:
12128 case ABI_DARWIN:
12129 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12130 break;
12131
12132 case ABI_ELFv2:
12133 /* ??? Recomputing this every time is a bit expensive. Is there
12134 a place to cache this information? */
12135 if (rs6000_function_parms_need_stack (fun, incoming))
12136 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12137 else
12138 reg_parm_stack_space = 0;
12139 break;
12140 }
12141
12142 return reg_parm_stack_space;
12143 }
12144
12145 static void
12146 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12147 {
12148 int i;
12149 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12150
12151 if (nregs == 0)
12152 return;
12153
12154 for (i = 0; i < nregs; i++)
12155 {
12156 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12157 if (reload_completed)
12158 {
12159 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12160 tem = NULL_RTX;
12161 else
12162 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12163 i * GET_MODE_SIZE (reg_mode));
12164 }
12165 else
12166 tem = replace_equiv_address (tem, XEXP (tem, 0));
12167
12168 gcc_assert (tem);
12169
12170 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12171 }
12172 }
12173 \f
12174 /* Perform any needed actions needed for a function that is receiving a
12175 variable number of arguments.
12176
12177 CUM is as above.
12178
12179 MODE and TYPE are the mode and type of the current parameter.
12180
12181 PRETEND_SIZE is a variable that should be set to the amount of stack
12182 that must be pushed by the prolog to pretend that our caller pushed
12183 it.
12184
12185 Normally, this macro will push all remaining incoming registers on the
12186 stack and set PRETEND_SIZE to the length of the registers pushed. */
12187
12188 static void
12189 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12190 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12191 int no_rtl)
12192 {
12193 CUMULATIVE_ARGS next_cum;
12194 int reg_size = TARGET_32BIT ? 4 : 8;
12195 rtx save_area = NULL_RTX, mem;
12196 int first_reg_offset;
12197 alias_set_type set;
12198
12199 /* Skip the last named argument. */
12200 next_cum = *get_cumulative_args (cum);
12201 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12202
12203 if (DEFAULT_ABI == ABI_V4)
12204 {
12205 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12206
12207 if (! no_rtl)
12208 {
12209 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12210 HOST_WIDE_INT offset = 0;
12211
12212 /* Try to optimize the size of the varargs save area.
12213 The ABI requires that ap.reg_save_area is doubleword
12214 aligned, but we don't need to allocate space for all
12215 the bytes, only those to which we actually will save
12216 anything. */
12217 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12218 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12219 if (TARGET_HARD_FLOAT
12220 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12221 && cfun->va_list_fpr_size)
12222 {
12223 if (gpr_reg_num)
12224 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12225 * UNITS_PER_FP_WORD;
12226 if (cfun->va_list_fpr_size
12227 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12228 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12229 else
12230 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12231 * UNITS_PER_FP_WORD;
12232 }
12233 if (gpr_reg_num)
12234 {
12235 offset = -((first_reg_offset * reg_size) & ~7);
12236 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12237 {
12238 gpr_reg_num = cfun->va_list_gpr_size;
12239 if (reg_size == 4 && (first_reg_offset & 1))
12240 gpr_reg_num++;
12241 }
12242 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12243 }
12244 else if (fpr_size)
12245 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12246 * UNITS_PER_FP_WORD
12247 - (int) (GP_ARG_NUM_REG * reg_size);
12248
12249 if (gpr_size + fpr_size)
12250 {
12251 rtx reg_save_area
12252 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12253 gcc_assert (MEM_P (reg_save_area));
12254 reg_save_area = XEXP (reg_save_area, 0);
12255 if (GET_CODE (reg_save_area) == PLUS)
12256 {
12257 gcc_assert (XEXP (reg_save_area, 0)
12258 == virtual_stack_vars_rtx);
12259 gcc_assert (CONST_INT_P (XEXP (reg_save_area, 1)));
12260 offset += INTVAL (XEXP (reg_save_area, 1));
12261 }
12262 else
12263 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12264 }
12265
12266 cfun->machine->varargs_save_offset = offset;
12267 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12268 }
12269 }
12270 else
12271 {
12272 first_reg_offset = next_cum.words;
12273 save_area = crtl->args.internal_arg_pointer;
12274
12275 if (targetm.calls.must_pass_in_stack (mode, type))
12276 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12277 }
12278
12279 set = get_varargs_alias_set ();
12280 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12281 && cfun->va_list_gpr_size)
12282 {
12283 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12284
12285 if (va_list_gpr_counter_field)
12286 /* V4 va_list_gpr_size counts number of registers needed. */
12287 n_gpr = cfun->va_list_gpr_size;
12288 else
12289 /* char * va_list instead counts number of bytes needed. */
12290 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12291
12292 if (nregs > n_gpr)
12293 nregs = n_gpr;
12294
12295 mem = gen_rtx_MEM (BLKmode,
12296 plus_constant (Pmode, save_area,
12297 first_reg_offset * reg_size));
12298 MEM_NOTRAP_P (mem) = 1;
12299 set_mem_alias_set (mem, set);
12300 set_mem_align (mem, BITS_PER_WORD);
12301
12302 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12303 nregs);
12304 }
12305
12306 /* Save FP registers if needed. */
12307 if (DEFAULT_ABI == ABI_V4
12308 && TARGET_HARD_FLOAT
12309 && ! no_rtl
12310 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12311 && cfun->va_list_fpr_size)
12312 {
12313 int fregno = next_cum.fregno, nregs;
12314 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12315 rtx lab = gen_label_rtx ();
12316 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12317 * UNITS_PER_FP_WORD);
12318
12319 emit_jump_insn
12320 (gen_rtx_SET (pc_rtx,
12321 gen_rtx_IF_THEN_ELSE (VOIDmode,
12322 gen_rtx_NE (VOIDmode, cr1,
12323 const0_rtx),
12324 gen_rtx_LABEL_REF (VOIDmode, lab),
12325 pc_rtx)));
12326
12327 for (nregs = 0;
12328 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12329 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12330 {
12331 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12332 plus_constant (Pmode, save_area, off));
12333 MEM_NOTRAP_P (mem) = 1;
12334 set_mem_alias_set (mem, set);
12335 set_mem_align (mem, GET_MODE_ALIGNMENT (
12336 TARGET_HARD_FLOAT ? DFmode : SFmode));
12337 emit_move_insn (mem, gen_rtx_REG (
12338 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12339 }
12340
12341 emit_label (lab);
12342 }
12343 }
12344
12345 /* Create the va_list data type. */
12346
12347 static tree
12348 rs6000_build_builtin_va_list (void)
12349 {
12350 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12351
12352 /* For AIX, prefer 'char *' because that's what the system
12353 header files like. */
12354 if (DEFAULT_ABI != ABI_V4)
12355 return build_pointer_type (char_type_node);
12356
12357 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12358 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12359 get_identifier ("__va_list_tag"), record);
12360
12361 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12362 unsigned_char_type_node);
12363 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12364 unsigned_char_type_node);
12365 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12366 every user file. */
12367 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12368 get_identifier ("reserved"), short_unsigned_type_node);
12369 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12370 get_identifier ("overflow_arg_area"),
12371 ptr_type_node);
12372 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12373 get_identifier ("reg_save_area"),
12374 ptr_type_node);
12375
12376 va_list_gpr_counter_field = f_gpr;
12377 va_list_fpr_counter_field = f_fpr;
12378
12379 DECL_FIELD_CONTEXT (f_gpr) = record;
12380 DECL_FIELD_CONTEXT (f_fpr) = record;
12381 DECL_FIELD_CONTEXT (f_res) = record;
12382 DECL_FIELD_CONTEXT (f_ovf) = record;
12383 DECL_FIELD_CONTEXT (f_sav) = record;
12384
12385 TYPE_STUB_DECL (record) = type_decl;
12386 TYPE_NAME (record) = type_decl;
12387 TYPE_FIELDS (record) = f_gpr;
12388 DECL_CHAIN (f_gpr) = f_fpr;
12389 DECL_CHAIN (f_fpr) = f_res;
12390 DECL_CHAIN (f_res) = f_ovf;
12391 DECL_CHAIN (f_ovf) = f_sav;
12392
12393 layout_type (record);
12394
12395 /* The correct type is an array type of one element. */
12396 return build_array_type (record, build_index_type (size_zero_node));
12397 }
12398
12399 /* Implement va_start. */
12400
12401 static void
12402 rs6000_va_start (tree valist, rtx nextarg)
12403 {
12404 HOST_WIDE_INT words, n_gpr, n_fpr;
12405 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12406 tree gpr, fpr, ovf, sav, t;
12407
12408 /* Only SVR4 needs something special. */
12409 if (DEFAULT_ABI != ABI_V4)
12410 {
12411 std_expand_builtin_va_start (valist, nextarg);
12412 return;
12413 }
12414
12415 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12416 f_fpr = DECL_CHAIN (f_gpr);
12417 f_res = DECL_CHAIN (f_fpr);
12418 f_ovf = DECL_CHAIN (f_res);
12419 f_sav = DECL_CHAIN (f_ovf);
12420
12421 valist = build_simple_mem_ref (valist);
12422 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12423 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12424 f_fpr, NULL_TREE);
12425 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12426 f_ovf, NULL_TREE);
12427 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12428 f_sav, NULL_TREE);
12429
12430 /* Count number of gp and fp argument registers used. */
12431 words = crtl->args.info.words;
12432 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12433 GP_ARG_NUM_REG);
12434 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12435 FP_ARG_NUM_REG);
12436
12437 if (TARGET_DEBUG_ARG)
12438 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12439 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12440 words, n_gpr, n_fpr);
12441
12442 if (cfun->va_list_gpr_size)
12443 {
12444 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12445 build_int_cst (NULL_TREE, n_gpr));
12446 TREE_SIDE_EFFECTS (t) = 1;
12447 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12448 }
12449
12450 if (cfun->va_list_fpr_size)
12451 {
12452 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12453 build_int_cst (NULL_TREE, n_fpr));
12454 TREE_SIDE_EFFECTS (t) = 1;
12455 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12456
12457 #ifdef HAVE_AS_GNU_ATTRIBUTE
12458 if (call_ABI_of_interest (cfun->decl))
12459 rs6000_passes_float = true;
12460 #endif
12461 }
12462
12463 /* Find the overflow area. */
12464 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12465 if (words != 0)
12466 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12467 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12468 TREE_SIDE_EFFECTS (t) = 1;
12469 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12470
12471 /* If there were no va_arg invocations, don't set up the register
12472 save area. */
12473 if (!cfun->va_list_gpr_size
12474 && !cfun->va_list_fpr_size
12475 && n_gpr < GP_ARG_NUM_REG
12476 && n_fpr < FP_ARG_V4_MAX_REG)
12477 return;
12478
12479 /* Find the register save area. */
12480 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12481 if (cfun->machine->varargs_save_offset)
12482 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12483 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12484 TREE_SIDE_EFFECTS (t) = 1;
12485 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12486 }
12487
12488 /* Implement va_arg. */
12489
12490 static tree
12491 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12492 gimple_seq *post_p)
12493 {
12494 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12495 tree gpr, fpr, ovf, sav, reg, t, u;
12496 int size, rsize, n_reg, sav_ofs, sav_scale;
12497 tree lab_false, lab_over, addr;
12498 int align;
12499 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12500 int regalign = 0;
12501 gimple *stmt;
12502
12503 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12504 {
12505 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12506 return build_va_arg_indirect_ref (t);
12507 }
12508
12509 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12510 earlier version of gcc, with the property that it always applied alignment
12511 adjustments to the va-args (even for zero-sized types). The cheapest way
12512 to deal with this is to replicate the effect of the part of
12513 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12514 of relevance.
12515 We don't need to check for pass-by-reference because of the test above.
12516 We can return a simplifed answer, since we know there's no offset to add. */
12517
12518 if (((TARGET_MACHO
12519 && rs6000_darwin64_abi)
12520 || DEFAULT_ABI == ABI_ELFv2
12521 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12522 && integer_zerop (TYPE_SIZE (type)))
12523 {
12524 unsigned HOST_WIDE_INT align, boundary;
12525 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12526 align = PARM_BOUNDARY / BITS_PER_UNIT;
12527 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12528 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12529 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12530 boundary /= BITS_PER_UNIT;
12531 if (boundary > align)
12532 {
12533 tree t ;
12534 /* This updates arg ptr by the amount that would be necessary
12535 to align the zero-sized (but not zero-alignment) item. */
12536 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12537 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12538 gimplify_and_add (t, pre_p);
12539
12540 t = fold_convert (sizetype, valist_tmp);
12541 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12542 fold_convert (TREE_TYPE (valist),
12543 fold_build2 (BIT_AND_EXPR, sizetype, t,
12544 size_int (-boundary))));
12545 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12546 gimplify_and_add (t, pre_p);
12547 }
12548 /* Since it is zero-sized there's no increment for the item itself. */
12549 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12550 return build_va_arg_indirect_ref (valist_tmp);
12551 }
12552
12553 if (DEFAULT_ABI != ABI_V4)
12554 {
12555 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12556 {
12557 tree elem_type = TREE_TYPE (type);
12558 machine_mode elem_mode = TYPE_MODE (elem_type);
12559 int elem_size = GET_MODE_SIZE (elem_mode);
12560
12561 if (elem_size < UNITS_PER_WORD)
12562 {
12563 tree real_part, imag_part;
12564 gimple_seq post = NULL;
12565
12566 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12567 &post);
12568 /* Copy the value into a temporary, lest the formal temporary
12569 be reused out from under us. */
12570 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12571 gimple_seq_add_seq (pre_p, post);
12572
12573 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12574 post_p);
12575
12576 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12577 }
12578 }
12579
12580 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12581 }
12582
12583 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12584 f_fpr = DECL_CHAIN (f_gpr);
12585 f_res = DECL_CHAIN (f_fpr);
12586 f_ovf = DECL_CHAIN (f_res);
12587 f_sav = DECL_CHAIN (f_ovf);
12588
12589 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12590 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12591 f_fpr, NULL_TREE);
12592 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12593 f_ovf, NULL_TREE);
12594 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12595 f_sav, NULL_TREE);
12596
12597 size = int_size_in_bytes (type);
12598 rsize = (size + 3) / 4;
12599 int pad = 4 * rsize - size;
12600 align = 1;
12601
12602 machine_mode mode = TYPE_MODE (type);
12603 if (abi_v4_pass_in_fpr (mode, false))
12604 {
12605 /* FP args go in FP registers, if present. */
12606 reg = fpr;
12607 n_reg = (size + 7) / 8;
12608 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12609 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12610 if (mode != SFmode && mode != SDmode)
12611 align = 8;
12612 }
12613 else
12614 {
12615 /* Otherwise into GP registers. */
12616 reg = gpr;
12617 n_reg = rsize;
12618 sav_ofs = 0;
12619 sav_scale = 4;
12620 if (n_reg == 2)
12621 align = 8;
12622 }
12623
12624 /* Pull the value out of the saved registers.... */
12625
12626 lab_over = NULL;
12627 addr = create_tmp_var (ptr_type_node, "addr");
12628
12629 /* AltiVec vectors never go in registers when -mabi=altivec. */
12630 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12631 align = 16;
12632 else
12633 {
12634 lab_false = create_artificial_label (input_location);
12635 lab_over = create_artificial_label (input_location);
12636
12637 /* Long long is aligned in the registers. As are any other 2 gpr
12638 item such as complex int due to a historical mistake. */
12639 u = reg;
12640 if (n_reg == 2 && reg == gpr)
12641 {
12642 regalign = 1;
12643 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12644 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12645 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12646 unshare_expr (reg), u);
12647 }
12648 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12649 reg number is 0 for f1, so we want to make it odd. */
12650 else if (reg == fpr && mode == TDmode)
12651 {
12652 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12653 build_int_cst (TREE_TYPE (reg), 1));
12654 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12655 }
12656
12657 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12658 t = build2 (GE_EXPR, boolean_type_node, u, t);
12659 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12660 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12661 gimplify_and_add (t, pre_p);
12662
12663 t = sav;
12664 if (sav_ofs)
12665 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12666
12667 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12668 build_int_cst (TREE_TYPE (reg), n_reg));
12669 u = fold_convert (sizetype, u);
12670 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12671 t = fold_build_pointer_plus (t, u);
12672
12673 /* _Decimal32 varargs are located in the second word of the 64-bit
12674 FP register for 32-bit binaries. */
12675 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12676 t = fold_build_pointer_plus_hwi (t, size);
12677
12678 /* Args are passed right-aligned. */
12679 if (BYTES_BIG_ENDIAN)
12680 t = fold_build_pointer_plus_hwi (t, pad);
12681
12682 gimplify_assign (addr, t, pre_p);
12683
12684 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12685
12686 stmt = gimple_build_label (lab_false);
12687 gimple_seq_add_stmt (pre_p, stmt);
12688
12689 if ((n_reg == 2 && !regalign) || n_reg > 2)
12690 {
12691 /* Ensure that we don't find any more args in regs.
12692 Alignment has taken care of for special cases. */
12693 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12694 }
12695 }
12696
12697 /* ... otherwise out of the overflow area. */
12698
12699 /* Care for on-stack alignment if needed. */
12700 t = ovf;
12701 if (align != 1)
12702 {
12703 t = fold_build_pointer_plus_hwi (t, align - 1);
12704 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12705 build_int_cst (TREE_TYPE (t), -align));
12706 }
12707
12708 /* Args are passed right-aligned. */
12709 if (BYTES_BIG_ENDIAN)
12710 t = fold_build_pointer_plus_hwi (t, pad);
12711
12712 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12713
12714 gimplify_assign (unshare_expr (addr), t, pre_p);
12715
12716 t = fold_build_pointer_plus_hwi (t, size);
12717 gimplify_assign (unshare_expr (ovf), t, pre_p);
12718
12719 if (lab_over)
12720 {
12721 stmt = gimple_build_label (lab_over);
12722 gimple_seq_add_stmt (pre_p, stmt);
12723 }
12724
12725 if (STRICT_ALIGNMENT
12726 && (TYPE_ALIGN (type)
12727 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12728 {
12729 /* The value (of type complex double, for example) may not be
12730 aligned in memory in the saved registers, so copy via a
12731 temporary. (This is the same code as used for SPARC.) */
12732 tree tmp = create_tmp_var (type, "va_arg_tmp");
12733 tree dest_addr = build_fold_addr_expr (tmp);
12734
12735 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
12736 3, dest_addr, addr, size_int (rsize * 4));
12737 TREE_ADDRESSABLE (tmp) = 1;
12738
12739 gimplify_and_add (copy, pre_p);
12740 addr = dest_addr;
12741 }
12742
12743 addr = fold_convert (ptrtype, addr);
12744 return build_va_arg_indirect_ref (addr);
12745 }
12746
12747 /* Builtins. */
12748
12749 static void
12750 def_builtin (const char *name, tree type, enum rs6000_builtins code)
12751 {
12752 tree t;
12753 unsigned classify = rs6000_builtin_info[(int)code].attr;
12754 const char *attr_string = "";
12755
12756 gcc_assert (name != NULL);
12757 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
12758
12759 if (rs6000_builtin_decls[(int)code])
12760 fatal_error (input_location,
12761 "internal error: builtin function %qs already processed",
12762 name);
12763
12764 rs6000_builtin_decls[(int)code] = t =
12765 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
12766
12767 /* Set any special attributes. */
12768 if ((classify & RS6000_BTC_CONST) != 0)
12769 {
12770 /* const function, function only depends on the inputs. */
12771 TREE_READONLY (t) = 1;
12772 TREE_NOTHROW (t) = 1;
12773 attr_string = ", const";
12774 }
12775 else if ((classify & RS6000_BTC_PURE) != 0)
12776 {
12777 /* pure function, function can read global memory, but does not set any
12778 external state. */
12779 DECL_PURE_P (t) = 1;
12780 TREE_NOTHROW (t) = 1;
12781 attr_string = ", pure";
12782 }
12783 else if ((classify & RS6000_BTC_FP) != 0)
12784 {
12785 /* Function is a math function. If rounding mode is on, then treat the
12786 function as not reading global memory, but it can have arbitrary side
12787 effects. If it is off, then assume the function is a const function.
12788 This mimics the ATTR_MATHFN_FPROUNDING attribute in
12789 builtin-attribute.def that is used for the math functions. */
12790 TREE_NOTHROW (t) = 1;
12791 if (flag_rounding_math)
12792 {
12793 DECL_PURE_P (t) = 1;
12794 DECL_IS_NOVOPS (t) = 1;
12795 attr_string = ", fp, pure";
12796 }
12797 else
12798 {
12799 TREE_READONLY (t) = 1;
12800 attr_string = ", fp, const";
12801 }
12802 }
12803 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
12804 gcc_unreachable ();
12805
12806 if (TARGET_DEBUG_BUILTIN)
12807 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
12808 (int)code, name, attr_string);
12809 }
12810
12811 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
12812
12813 #undef RS6000_BUILTIN_0
12814 #undef RS6000_BUILTIN_1
12815 #undef RS6000_BUILTIN_2
12816 #undef RS6000_BUILTIN_3
12817 #undef RS6000_BUILTIN_A
12818 #undef RS6000_BUILTIN_D
12819 #undef RS6000_BUILTIN_H
12820 #undef RS6000_BUILTIN_P
12821 #undef RS6000_BUILTIN_X
12822
12823 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12824 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12825 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12826 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
12827 { MASK, ICODE, NAME, ENUM },
12828
12829 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12830 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12831 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12832 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12833 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12834
12835 static const struct builtin_description bdesc_3arg[] =
12836 {
12837 #include "rs6000-builtin.def"
12838 };
12839
12840 /* DST operations: void foo (void *, const int, const char). */
12841
12842 #undef RS6000_BUILTIN_0
12843 #undef RS6000_BUILTIN_1
12844 #undef RS6000_BUILTIN_2
12845 #undef RS6000_BUILTIN_3
12846 #undef RS6000_BUILTIN_A
12847 #undef RS6000_BUILTIN_D
12848 #undef RS6000_BUILTIN_H
12849 #undef RS6000_BUILTIN_P
12850 #undef RS6000_BUILTIN_X
12851
12852 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12853 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12854 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12855 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12856 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12857 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
12858 { MASK, ICODE, NAME, ENUM },
12859
12860 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12861 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12862 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12863
12864 static const struct builtin_description bdesc_dst[] =
12865 {
12866 #include "rs6000-builtin.def"
12867 };
12868
12869 /* Simple binary operations: VECc = foo (VECa, VECb). */
12870
12871 #undef RS6000_BUILTIN_0
12872 #undef RS6000_BUILTIN_1
12873 #undef RS6000_BUILTIN_2
12874 #undef RS6000_BUILTIN_3
12875 #undef RS6000_BUILTIN_A
12876 #undef RS6000_BUILTIN_D
12877 #undef RS6000_BUILTIN_H
12878 #undef RS6000_BUILTIN_P
12879 #undef RS6000_BUILTIN_X
12880
12881 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12882 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12883 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
12884 { MASK, ICODE, NAME, ENUM },
12885
12886 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12887 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12888 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12889 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12890 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12891 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12892
12893 static const struct builtin_description bdesc_2arg[] =
12894 {
12895 #include "rs6000-builtin.def"
12896 };
12897
12898 #undef RS6000_BUILTIN_0
12899 #undef RS6000_BUILTIN_1
12900 #undef RS6000_BUILTIN_2
12901 #undef RS6000_BUILTIN_3
12902 #undef RS6000_BUILTIN_A
12903 #undef RS6000_BUILTIN_D
12904 #undef RS6000_BUILTIN_H
12905 #undef RS6000_BUILTIN_P
12906 #undef RS6000_BUILTIN_X
12907
12908 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12909 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12910 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12911 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12912 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12913 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12914 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12915 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
12916 { MASK, ICODE, NAME, ENUM },
12917
12918 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12919
12920 /* AltiVec predicates. */
12921
12922 static const struct builtin_description bdesc_altivec_preds[] =
12923 {
12924 #include "rs6000-builtin.def"
12925 };
12926
12927 /* ABS* operations. */
12928
12929 #undef RS6000_BUILTIN_0
12930 #undef RS6000_BUILTIN_1
12931 #undef RS6000_BUILTIN_2
12932 #undef RS6000_BUILTIN_3
12933 #undef RS6000_BUILTIN_A
12934 #undef RS6000_BUILTIN_D
12935 #undef RS6000_BUILTIN_H
12936 #undef RS6000_BUILTIN_P
12937 #undef RS6000_BUILTIN_X
12938
12939 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12940 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12941 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12942 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12943 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
12944 { MASK, ICODE, NAME, ENUM },
12945
12946 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12947 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12948 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12949 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12950
12951 static const struct builtin_description bdesc_abs[] =
12952 {
12953 #include "rs6000-builtin.def"
12954 };
12955
12956 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
12957 foo (VECa). */
12958
12959 #undef RS6000_BUILTIN_0
12960 #undef RS6000_BUILTIN_1
12961 #undef RS6000_BUILTIN_2
12962 #undef RS6000_BUILTIN_3
12963 #undef RS6000_BUILTIN_A
12964 #undef RS6000_BUILTIN_D
12965 #undef RS6000_BUILTIN_H
12966 #undef RS6000_BUILTIN_P
12967 #undef RS6000_BUILTIN_X
12968
12969 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12970 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
12971 { MASK, ICODE, NAME, ENUM },
12972
12973 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12974 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12975 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12976 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12977 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12978 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12979 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12980
12981 static const struct builtin_description bdesc_1arg[] =
12982 {
12983 #include "rs6000-builtin.def"
12984 };
12985
12986 /* Simple no-argument operations: result = __builtin_darn_32 () */
12987
12988 #undef RS6000_BUILTIN_0
12989 #undef RS6000_BUILTIN_1
12990 #undef RS6000_BUILTIN_2
12991 #undef RS6000_BUILTIN_3
12992 #undef RS6000_BUILTIN_A
12993 #undef RS6000_BUILTIN_D
12994 #undef RS6000_BUILTIN_H
12995 #undef RS6000_BUILTIN_P
12996 #undef RS6000_BUILTIN_X
12997
12998 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
12999 { MASK, ICODE, NAME, ENUM },
13000
13001 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13002 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13003 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13004 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13005 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13006 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13007 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13008 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13009
13010 static const struct builtin_description bdesc_0arg[] =
13011 {
13012 #include "rs6000-builtin.def"
13013 };
13014
13015 /* HTM builtins. */
13016 #undef RS6000_BUILTIN_0
13017 #undef RS6000_BUILTIN_1
13018 #undef RS6000_BUILTIN_2
13019 #undef RS6000_BUILTIN_3
13020 #undef RS6000_BUILTIN_A
13021 #undef RS6000_BUILTIN_D
13022 #undef RS6000_BUILTIN_H
13023 #undef RS6000_BUILTIN_P
13024 #undef RS6000_BUILTIN_X
13025
13026 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13027 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13028 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13029 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13030 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13031 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13032 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13033 { MASK, ICODE, NAME, ENUM },
13034
13035 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13036 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13037
13038 static const struct builtin_description bdesc_htm[] =
13039 {
13040 #include "rs6000-builtin.def"
13041 };
13042
13043 #undef RS6000_BUILTIN_0
13044 #undef RS6000_BUILTIN_1
13045 #undef RS6000_BUILTIN_2
13046 #undef RS6000_BUILTIN_3
13047 #undef RS6000_BUILTIN_A
13048 #undef RS6000_BUILTIN_D
13049 #undef RS6000_BUILTIN_H
13050 #undef RS6000_BUILTIN_P
13051
13052 /* Return true if a builtin function is overloaded. */
13053 bool
13054 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13055 {
13056 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13057 }
13058
13059 const char *
13060 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13061 {
13062 return rs6000_builtin_info[(int)fncode].name;
13063 }
13064
13065 /* Expand an expression EXP that calls a builtin without arguments. */
13066 static rtx
13067 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13068 {
13069 rtx pat;
13070 machine_mode tmode = insn_data[icode].operand[0].mode;
13071
13072 if (icode == CODE_FOR_nothing)
13073 /* Builtin not supported on this processor. */
13074 return 0;
13075
13076 if (icode == CODE_FOR_rs6000_mffsl
13077 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13078 {
13079 error ("%<__builtin_mffsl%> not supported with %<-msoft-float%>");
13080 return const0_rtx;
13081 }
13082
13083 if (target == 0
13084 || GET_MODE (target) != tmode
13085 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13086 target = gen_reg_rtx (tmode);
13087
13088 pat = GEN_FCN (icode) (target);
13089 if (! pat)
13090 return 0;
13091 emit_insn (pat);
13092
13093 return target;
13094 }
13095
13096
13097 static rtx
13098 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13099 {
13100 rtx pat;
13101 tree arg0 = CALL_EXPR_ARG (exp, 0);
13102 tree arg1 = CALL_EXPR_ARG (exp, 1);
13103 rtx op0 = expand_normal (arg0);
13104 rtx op1 = expand_normal (arg1);
13105 machine_mode mode0 = insn_data[icode].operand[0].mode;
13106 machine_mode mode1 = insn_data[icode].operand[1].mode;
13107
13108 if (icode == CODE_FOR_nothing)
13109 /* Builtin not supported on this processor. */
13110 return 0;
13111
13112 /* If we got invalid arguments bail out before generating bad rtl. */
13113 if (arg0 == error_mark_node || arg1 == error_mark_node)
13114 return const0_rtx;
13115
13116 if (!CONST_INT_P (op0)
13117 || INTVAL (op0) > 255
13118 || INTVAL (op0) < 0)
13119 {
13120 error ("argument 1 must be an 8-bit field value");
13121 return const0_rtx;
13122 }
13123
13124 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13125 op0 = copy_to_mode_reg (mode0, op0);
13126
13127 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13128 op1 = copy_to_mode_reg (mode1, op1);
13129
13130 pat = GEN_FCN (icode) (op0, op1);
13131 if (!pat)
13132 return const0_rtx;
13133 emit_insn (pat);
13134
13135 return NULL_RTX;
13136 }
13137
13138 static rtx
13139 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13140 {
13141 rtx pat;
13142 tree arg0 = CALL_EXPR_ARG (exp, 0);
13143 rtx op0 = expand_normal (arg0);
13144
13145 if (icode == CODE_FOR_nothing)
13146 /* Builtin not supported on this processor. */
13147 return 0;
13148
13149 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13150 {
13151 error ("%<__builtin_mtfsb0%> and %<__builtin_mtfsb1%> not supported with "
13152 "%<-msoft-float%>");
13153 return const0_rtx;
13154 }
13155
13156 /* If we got invalid arguments bail out before generating bad rtl. */
13157 if (arg0 == error_mark_node)
13158 return const0_rtx;
13159
13160 /* Only allow bit numbers 0 to 31. */
13161 if (!u5bit_cint_operand (op0, VOIDmode))
13162 {
13163 error ("Argument must be a constant between 0 and 31.");
13164 return const0_rtx;
13165 }
13166
13167 pat = GEN_FCN (icode) (op0);
13168 if (!pat)
13169 return const0_rtx;
13170 emit_insn (pat);
13171
13172 return NULL_RTX;
13173 }
13174
13175 static rtx
13176 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13177 {
13178 rtx pat;
13179 tree arg0 = CALL_EXPR_ARG (exp, 0);
13180 rtx op0 = expand_normal (arg0);
13181 machine_mode mode0 = insn_data[icode].operand[0].mode;
13182
13183 if (icode == CODE_FOR_nothing)
13184 /* Builtin not supported on this processor. */
13185 return 0;
13186
13187 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13188 {
13189 error ("%<__builtin_set_fpscr_rn%> not supported with %<-msoft-float%>");
13190 return const0_rtx;
13191 }
13192
13193 /* If we got invalid arguments bail out before generating bad rtl. */
13194 if (arg0 == error_mark_node)
13195 return const0_rtx;
13196
13197 /* If the argument is a constant, check the range. Argument can only be a
13198 2-bit value. Unfortunately, can't check the range of the value at
13199 compile time if the argument is a variable. The least significant two
13200 bits of the argument, regardless of type, are used to set the rounding
13201 mode. All other bits are ignored. */
13202 if (CONST_INT_P (op0) && !const_0_to_3_operand(op0, VOIDmode))
13203 {
13204 error ("Argument must be a value between 0 and 3.");
13205 return const0_rtx;
13206 }
13207
13208 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13209 op0 = copy_to_mode_reg (mode0, op0);
13210
13211 pat = GEN_FCN (icode) (op0);
13212 if (!pat)
13213 return const0_rtx;
13214 emit_insn (pat);
13215
13216 return NULL_RTX;
13217 }
13218 static rtx
13219 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13220 {
13221 rtx pat;
13222 tree arg0 = CALL_EXPR_ARG (exp, 0);
13223 rtx op0 = expand_normal (arg0);
13224 machine_mode mode0 = insn_data[icode].operand[0].mode;
13225
13226 if (TARGET_32BIT)
13227 /* Builtin not supported in 32-bit mode. */
13228 fatal_error (input_location,
13229 "%<__builtin_set_fpscr_drn%> is not supported "
13230 "in 32-bit mode.");
13231
13232 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13233 {
13234 error ("%<__builtin_set_fpscr_drn%> not supported with %<-msoft-float%>");
13235 return const0_rtx;
13236 }
13237
13238 if (icode == CODE_FOR_nothing)
13239 /* Builtin not supported on this processor. */
13240 return 0;
13241
13242 /* If we got invalid arguments bail out before generating bad rtl. */
13243 if (arg0 == error_mark_node)
13244 return const0_rtx;
13245
13246 /* If the argument is a constant, check the range. Agrument can only be a
13247 3-bit value. Unfortunately, can't check the range of the value at
13248 compile time if the argument is a variable. The least significant two
13249 bits of the argument, regardless of type, are used to set the rounding
13250 mode. All other bits are ignored. */
13251 if (CONST_INT_P (op0) && !const_0_to_7_operand(op0, VOIDmode))
13252 {
13253 error ("Argument must be a value between 0 and 7.");
13254 return const0_rtx;
13255 }
13256
13257 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13258 op0 = copy_to_mode_reg (mode0, op0);
13259
13260 pat = GEN_FCN (icode) (op0);
13261 if (! pat)
13262 return const0_rtx;
13263 emit_insn (pat);
13264
13265 return NULL_RTX;
13266 }
13267
13268 static rtx
13269 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13270 {
13271 rtx pat;
13272 tree arg0 = CALL_EXPR_ARG (exp, 0);
13273 rtx op0 = expand_normal (arg0);
13274 machine_mode tmode = insn_data[icode].operand[0].mode;
13275 machine_mode mode0 = insn_data[icode].operand[1].mode;
13276
13277 if (icode == CODE_FOR_nothing)
13278 /* Builtin not supported on this processor. */
13279 return 0;
13280
13281 /* If we got invalid arguments bail out before generating bad rtl. */
13282 if (arg0 == error_mark_node)
13283 return const0_rtx;
13284
13285 if (icode == CODE_FOR_altivec_vspltisb
13286 || icode == CODE_FOR_altivec_vspltish
13287 || icode == CODE_FOR_altivec_vspltisw)
13288 {
13289 /* Only allow 5-bit *signed* literals. */
13290 if (!CONST_INT_P (op0)
13291 || INTVAL (op0) > 15
13292 || INTVAL (op0) < -16)
13293 {
13294 error ("argument 1 must be a 5-bit signed literal");
13295 return CONST0_RTX (tmode);
13296 }
13297 }
13298
13299 if (target == 0
13300 || GET_MODE (target) != tmode
13301 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13302 target = gen_reg_rtx (tmode);
13303
13304 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13305 op0 = copy_to_mode_reg (mode0, op0);
13306
13307 pat = GEN_FCN (icode) (target, op0);
13308 if (! pat)
13309 return 0;
13310 emit_insn (pat);
13311
13312 return target;
13313 }
13314
13315 static rtx
13316 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13317 {
13318 rtx pat, scratch1, scratch2;
13319 tree arg0 = CALL_EXPR_ARG (exp, 0);
13320 rtx op0 = expand_normal (arg0);
13321 machine_mode tmode = insn_data[icode].operand[0].mode;
13322 machine_mode mode0 = insn_data[icode].operand[1].mode;
13323
13324 /* If we have invalid arguments, bail out before generating bad rtl. */
13325 if (arg0 == error_mark_node)
13326 return const0_rtx;
13327
13328 if (target == 0
13329 || GET_MODE (target) != tmode
13330 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13331 target = gen_reg_rtx (tmode);
13332
13333 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13334 op0 = copy_to_mode_reg (mode0, op0);
13335
13336 scratch1 = gen_reg_rtx (mode0);
13337 scratch2 = gen_reg_rtx (mode0);
13338
13339 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13340 if (! pat)
13341 return 0;
13342 emit_insn (pat);
13343
13344 return target;
13345 }
13346
13347 static rtx
13348 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13349 {
13350 rtx pat;
13351 tree arg0 = CALL_EXPR_ARG (exp, 0);
13352 tree arg1 = CALL_EXPR_ARG (exp, 1);
13353 rtx op0 = expand_normal (arg0);
13354 rtx op1 = expand_normal (arg1);
13355 machine_mode tmode = insn_data[icode].operand[0].mode;
13356 machine_mode mode0 = insn_data[icode].operand[1].mode;
13357 machine_mode mode1 = insn_data[icode].operand[2].mode;
13358
13359 if (icode == CODE_FOR_nothing)
13360 /* Builtin not supported on this processor. */
13361 return 0;
13362
13363 /* If we got invalid arguments bail out before generating bad rtl. */
13364 if (arg0 == error_mark_node || arg1 == error_mark_node)
13365 return const0_rtx;
13366
13367 if (icode == CODE_FOR_unpackv1ti
13368 || icode == CODE_FOR_unpackkf
13369 || icode == CODE_FOR_unpacktf
13370 || icode == CODE_FOR_unpackif
13371 || icode == CODE_FOR_unpacktd)
13372 {
13373 /* Only allow 1-bit unsigned literals. */
13374 STRIP_NOPS (arg1);
13375 if (TREE_CODE (arg1) != INTEGER_CST
13376 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13377 {
13378 error ("argument 2 must be a 1-bit unsigned literal");
13379 return CONST0_RTX (tmode);
13380 }
13381 }
13382 else if (icode == CODE_FOR_altivec_vspltw)
13383 {
13384 /* Only allow 2-bit unsigned literals. */
13385 STRIP_NOPS (arg1);
13386 if (TREE_CODE (arg1) != INTEGER_CST
13387 || TREE_INT_CST_LOW (arg1) & ~3)
13388 {
13389 error ("argument 2 must be a 2-bit unsigned literal");
13390 return CONST0_RTX (tmode);
13391 }
13392 }
13393 else if (icode == CODE_FOR_altivec_vsplth)
13394 {
13395 /* Only allow 3-bit unsigned literals. */
13396 STRIP_NOPS (arg1);
13397 if (TREE_CODE (arg1) != INTEGER_CST
13398 || TREE_INT_CST_LOW (arg1) & ~7)
13399 {
13400 error ("argument 2 must be a 3-bit unsigned literal");
13401 return CONST0_RTX (tmode);
13402 }
13403 }
13404 else if (icode == CODE_FOR_altivec_vspltb)
13405 {
13406 /* Only allow 4-bit unsigned literals. */
13407 STRIP_NOPS (arg1);
13408 if (TREE_CODE (arg1) != INTEGER_CST
13409 || TREE_INT_CST_LOW (arg1) & ~15)
13410 {
13411 error ("argument 2 must be a 4-bit unsigned literal");
13412 return CONST0_RTX (tmode);
13413 }
13414 }
13415 else if (icode == CODE_FOR_altivec_vcfux
13416 || icode == CODE_FOR_altivec_vcfsx
13417 || icode == CODE_FOR_altivec_vctsxs
13418 || icode == CODE_FOR_altivec_vctuxs)
13419 {
13420 /* Only allow 5-bit unsigned literals. */
13421 STRIP_NOPS (arg1);
13422 if (TREE_CODE (arg1) != INTEGER_CST
13423 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13424 {
13425 error ("argument 2 must be a 5-bit unsigned literal");
13426 return CONST0_RTX (tmode);
13427 }
13428 }
13429 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13430 || icode == CODE_FOR_dfptstsfi_lt_dd
13431 || icode == CODE_FOR_dfptstsfi_gt_dd
13432 || icode == CODE_FOR_dfptstsfi_unordered_dd
13433 || icode == CODE_FOR_dfptstsfi_eq_td
13434 || icode == CODE_FOR_dfptstsfi_lt_td
13435 || icode == CODE_FOR_dfptstsfi_gt_td
13436 || icode == CODE_FOR_dfptstsfi_unordered_td)
13437 {
13438 /* Only allow 6-bit unsigned literals. */
13439 STRIP_NOPS (arg0);
13440 if (TREE_CODE (arg0) != INTEGER_CST
13441 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13442 {
13443 error ("argument 1 must be a 6-bit unsigned literal");
13444 return CONST0_RTX (tmode);
13445 }
13446 }
13447 else if (icode == CODE_FOR_xststdcqp_kf
13448 || icode == CODE_FOR_xststdcqp_tf
13449 || icode == CODE_FOR_xststdcdp
13450 || icode == CODE_FOR_xststdcsp
13451 || icode == CODE_FOR_xvtstdcdp
13452 || icode == CODE_FOR_xvtstdcsp)
13453 {
13454 /* Only allow 7-bit unsigned literals. */
13455 STRIP_NOPS (arg1);
13456 if (TREE_CODE (arg1) != INTEGER_CST
13457 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13458 {
13459 error ("argument 2 must be a 7-bit unsigned literal");
13460 return CONST0_RTX (tmode);
13461 }
13462 }
13463
13464 if (target == 0
13465 || GET_MODE (target) != tmode
13466 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13467 target = gen_reg_rtx (tmode);
13468
13469 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13470 op0 = copy_to_mode_reg (mode0, op0);
13471 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13472 op1 = copy_to_mode_reg (mode1, op1);
13473
13474 pat = GEN_FCN (icode) (target, op0, op1);
13475 if (! pat)
13476 return 0;
13477 emit_insn (pat);
13478
13479 return target;
13480 }
13481
13482 static rtx
13483 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13484 {
13485 rtx pat, scratch;
13486 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13487 tree arg0 = CALL_EXPR_ARG (exp, 1);
13488 tree arg1 = CALL_EXPR_ARG (exp, 2);
13489 rtx op0 = expand_normal (arg0);
13490 rtx op1 = expand_normal (arg1);
13491 machine_mode tmode = SImode;
13492 machine_mode mode0 = insn_data[icode].operand[1].mode;
13493 machine_mode mode1 = insn_data[icode].operand[2].mode;
13494 int cr6_form_int;
13495
13496 if (TREE_CODE (cr6_form) != INTEGER_CST)
13497 {
13498 error ("argument 1 of %qs must be a constant",
13499 "__builtin_altivec_predicate");
13500 return const0_rtx;
13501 }
13502 else
13503 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13504
13505 gcc_assert (mode0 == mode1);
13506
13507 /* If we have invalid arguments, bail out before generating bad rtl. */
13508 if (arg0 == error_mark_node || arg1 == error_mark_node)
13509 return const0_rtx;
13510
13511 if (target == 0
13512 || GET_MODE (target) != tmode
13513 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13514 target = gen_reg_rtx (tmode);
13515
13516 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13517 op0 = copy_to_mode_reg (mode0, op0);
13518 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13519 op1 = copy_to_mode_reg (mode1, op1);
13520
13521 /* Note that for many of the relevant operations (e.g. cmpne or
13522 cmpeq) with float or double operands, it makes more sense for the
13523 mode of the allocated scratch register to select a vector of
13524 integer. But the choice to copy the mode of operand 0 was made
13525 long ago and there are no plans to change it. */
13526 scratch = gen_reg_rtx (mode0);
13527
13528 pat = GEN_FCN (icode) (scratch, op0, op1);
13529 if (! pat)
13530 return 0;
13531 emit_insn (pat);
13532
13533 /* The vec_any* and vec_all* predicates use the same opcodes for two
13534 different operations, but the bits in CR6 will be different
13535 depending on what information we want. So we have to play tricks
13536 with CR6 to get the right bits out.
13537
13538 If you think this is disgusting, look at the specs for the
13539 AltiVec predicates. */
13540
13541 switch (cr6_form_int)
13542 {
13543 case 0:
13544 emit_insn (gen_cr6_test_for_zero (target));
13545 break;
13546 case 1:
13547 emit_insn (gen_cr6_test_for_zero_reverse (target));
13548 break;
13549 case 2:
13550 emit_insn (gen_cr6_test_for_lt (target));
13551 break;
13552 case 3:
13553 emit_insn (gen_cr6_test_for_lt_reverse (target));
13554 break;
13555 default:
13556 error ("argument 1 of %qs is out of range",
13557 "__builtin_altivec_predicate");
13558 break;
13559 }
13560
13561 return target;
13562 }
13563
13564 rtx
13565 swap_endian_selector_for_mode (machine_mode mode)
13566 {
13567 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13568 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13569 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13570 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13571
13572 unsigned int *swaparray, i;
13573 rtx perm[16];
13574
13575 switch (mode)
13576 {
13577 case E_V1TImode:
13578 swaparray = swap1;
13579 break;
13580 case E_V2DFmode:
13581 case E_V2DImode:
13582 swaparray = swap2;
13583 break;
13584 case E_V4SFmode:
13585 case E_V4SImode:
13586 swaparray = swap4;
13587 break;
13588 case E_V8HImode:
13589 swaparray = swap8;
13590 break;
13591 default:
13592 gcc_unreachable ();
13593 }
13594
13595 for (i = 0; i < 16; ++i)
13596 perm[i] = GEN_INT (swaparray[i]);
13597
13598 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13599 gen_rtvec_v (16, perm)));
13600 }
13601
13602 static rtx
13603 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13604 {
13605 rtx pat, addr;
13606 tree arg0 = CALL_EXPR_ARG (exp, 0);
13607 tree arg1 = CALL_EXPR_ARG (exp, 1);
13608 machine_mode tmode = insn_data[icode].operand[0].mode;
13609 machine_mode mode0 = Pmode;
13610 machine_mode mode1 = Pmode;
13611 rtx op0 = expand_normal (arg0);
13612 rtx op1 = expand_normal (arg1);
13613
13614 if (icode == CODE_FOR_nothing)
13615 /* Builtin not supported on this processor. */
13616 return 0;
13617
13618 /* If we got invalid arguments bail out before generating bad rtl. */
13619 if (arg0 == error_mark_node || arg1 == error_mark_node)
13620 return const0_rtx;
13621
13622 if (target == 0
13623 || GET_MODE (target) != tmode
13624 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13625 target = gen_reg_rtx (tmode);
13626
13627 op1 = copy_to_mode_reg (mode1, op1);
13628
13629 /* For LVX, express the RTL accurately by ANDing the address with -16.
13630 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13631 so the raw address is fine. */
13632 if (icode == CODE_FOR_altivec_lvx_v1ti
13633 || icode == CODE_FOR_altivec_lvx_v2df
13634 || icode == CODE_FOR_altivec_lvx_v2di
13635 || icode == CODE_FOR_altivec_lvx_v4sf
13636 || icode == CODE_FOR_altivec_lvx_v4si
13637 || icode == CODE_FOR_altivec_lvx_v8hi
13638 || icode == CODE_FOR_altivec_lvx_v16qi)
13639 {
13640 rtx rawaddr;
13641 if (op0 == const0_rtx)
13642 rawaddr = op1;
13643 else
13644 {
13645 op0 = copy_to_mode_reg (mode0, op0);
13646 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13647 }
13648 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13649 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13650
13651 emit_insn (gen_rtx_SET (target, addr));
13652 }
13653 else
13654 {
13655 if (op0 == const0_rtx)
13656 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13657 else
13658 {
13659 op0 = copy_to_mode_reg (mode0, op0);
13660 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13661 gen_rtx_PLUS (Pmode, op1, op0));
13662 }
13663
13664 pat = GEN_FCN (icode) (target, addr);
13665 if (! pat)
13666 return 0;
13667 emit_insn (pat);
13668 }
13669
13670 return target;
13671 }
13672
13673 static rtx
13674 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13675 {
13676 rtx pat;
13677 tree arg0 = CALL_EXPR_ARG (exp, 0);
13678 tree arg1 = CALL_EXPR_ARG (exp, 1);
13679 tree arg2 = CALL_EXPR_ARG (exp, 2);
13680 rtx op0 = expand_normal (arg0);
13681 rtx op1 = expand_normal (arg1);
13682 rtx op2 = expand_normal (arg2);
13683 machine_mode mode0 = insn_data[icode].operand[0].mode;
13684 machine_mode mode1 = insn_data[icode].operand[1].mode;
13685 machine_mode mode2 = insn_data[icode].operand[2].mode;
13686
13687 if (icode == CODE_FOR_nothing)
13688 /* Builtin not supported on this processor. */
13689 return NULL_RTX;
13690
13691 /* If we got invalid arguments bail out before generating bad rtl. */
13692 if (arg0 == error_mark_node
13693 || arg1 == error_mark_node
13694 || arg2 == error_mark_node)
13695 return NULL_RTX;
13696
13697 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13698 op0 = copy_to_mode_reg (mode0, op0);
13699 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13700 op1 = copy_to_mode_reg (mode1, op1);
13701 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13702 op2 = copy_to_mode_reg (mode2, op2);
13703
13704 pat = GEN_FCN (icode) (op0, op1, op2);
13705 if (pat)
13706 emit_insn (pat);
13707
13708 return NULL_RTX;
13709 }
13710
13711 static rtx
13712 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13713 {
13714 tree arg0 = CALL_EXPR_ARG (exp, 0);
13715 tree arg1 = CALL_EXPR_ARG (exp, 1);
13716 tree arg2 = CALL_EXPR_ARG (exp, 2);
13717 rtx op0 = expand_normal (arg0);
13718 rtx op1 = expand_normal (arg1);
13719 rtx op2 = expand_normal (arg2);
13720 rtx pat, addr, rawaddr;
13721 machine_mode tmode = insn_data[icode].operand[0].mode;
13722 machine_mode smode = insn_data[icode].operand[1].mode;
13723 machine_mode mode1 = Pmode;
13724 machine_mode mode2 = Pmode;
13725
13726 /* Invalid arguments. Bail before doing anything stoopid! */
13727 if (arg0 == error_mark_node
13728 || arg1 == error_mark_node
13729 || arg2 == error_mark_node)
13730 return const0_rtx;
13731
13732 op2 = copy_to_mode_reg (mode2, op2);
13733
13734 /* For STVX, express the RTL accurately by ANDing the address with -16.
13735 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
13736 so the raw address is fine. */
13737 if (icode == CODE_FOR_altivec_stvx_v2df
13738 || icode == CODE_FOR_altivec_stvx_v2di
13739 || icode == CODE_FOR_altivec_stvx_v4sf
13740 || icode == CODE_FOR_altivec_stvx_v4si
13741 || icode == CODE_FOR_altivec_stvx_v8hi
13742 || icode == CODE_FOR_altivec_stvx_v16qi)
13743 {
13744 if (op1 == const0_rtx)
13745 rawaddr = op2;
13746 else
13747 {
13748 op1 = copy_to_mode_reg (mode1, op1);
13749 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
13750 }
13751
13752 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13753 addr = gen_rtx_MEM (tmode, addr);
13754
13755 op0 = copy_to_mode_reg (tmode, op0);
13756
13757 emit_insn (gen_rtx_SET (addr, op0));
13758 }
13759 else
13760 {
13761 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
13762 op0 = copy_to_mode_reg (smode, op0);
13763
13764 if (op1 == const0_rtx)
13765 addr = gen_rtx_MEM (tmode, op2);
13766 else
13767 {
13768 op1 = copy_to_mode_reg (mode1, op1);
13769 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
13770 }
13771
13772 pat = GEN_FCN (icode) (addr, op0);
13773 if (pat)
13774 emit_insn (pat);
13775 }
13776
13777 return NULL_RTX;
13778 }
13779
13780 /* Return the appropriate SPR number associated with the given builtin. */
13781 static inline HOST_WIDE_INT
13782 htm_spr_num (enum rs6000_builtins code)
13783 {
13784 if (code == HTM_BUILTIN_GET_TFHAR
13785 || code == HTM_BUILTIN_SET_TFHAR)
13786 return TFHAR_SPR;
13787 else if (code == HTM_BUILTIN_GET_TFIAR
13788 || code == HTM_BUILTIN_SET_TFIAR)
13789 return TFIAR_SPR;
13790 else if (code == HTM_BUILTIN_GET_TEXASR
13791 || code == HTM_BUILTIN_SET_TEXASR)
13792 return TEXASR_SPR;
13793 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
13794 || code == HTM_BUILTIN_SET_TEXASRU);
13795 return TEXASRU_SPR;
13796 }
13797
13798 /* Return the correct ICODE value depending on whether we are
13799 setting or reading the HTM SPRs. */
13800 static inline enum insn_code
13801 rs6000_htm_spr_icode (bool nonvoid)
13802 {
13803 if (nonvoid)
13804 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
13805 else
13806 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
13807 }
13808
13809 /* Expand the HTM builtin in EXP and store the result in TARGET.
13810 Store true in *EXPANDEDP if we found a builtin to expand. */
13811 static rtx
13812 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
13813 {
13814 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13815 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
13816 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13817 const struct builtin_description *d;
13818 size_t i;
13819
13820 *expandedp = true;
13821
13822 if (!TARGET_POWERPC64
13823 && (fcode == HTM_BUILTIN_TABORTDC
13824 || fcode == HTM_BUILTIN_TABORTDCI))
13825 {
13826 size_t uns_fcode = (size_t)fcode;
13827 const char *name = rs6000_builtin_info[uns_fcode].name;
13828 error ("builtin %qs is only valid in 64-bit mode", name);
13829 return const0_rtx;
13830 }
13831
13832 /* Expand the HTM builtins. */
13833 d = bdesc_htm;
13834 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
13835 if (d->code == fcode)
13836 {
13837 rtx op[MAX_HTM_OPERANDS], pat;
13838 int nopnds = 0;
13839 tree arg;
13840 call_expr_arg_iterator iter;
13841 unsigned attr = rs6000_builtin_info[fcode].attr;
13842 enum insn_code icode = d->icode;
13843 const struct insn_operand_data *insn_op;
13844 bool uses_spr = (attr & RS6000_BTC_SPR);
13845 rtx cr = NULL_RTX;
13846
13847 if (uses_spr)
13848 icode = rs6000_htm_spr_icode (nonvoid);
13849 insn_op = &insn_data[icode].operand[0];
13850
13851 if (nonvoid)
13852 {
13853 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
13854 if (!target
13855 || GET_MODE (target) != tmode
13856 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
13857 target = gen_reg_rtx (tmode);
13858 if (uses_spr)
13859 op[nopnds++] = target;
13860 }
13861
13862 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
13863 {
13864 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
13865 return const0_rtx;
13866
13867 insn_op = &insn_data[icode].operand[nopnds];
13868
13869 op[nopnds] = expand_normal (arg);
13870
13871 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
13872 {
13873 if (!strcmp (insn_op->constraint, "n"))
13874 {
13875 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
13876 if (!CONST_INT_P (op[nopnds]))
13877 error ("argument %d must be an unsigned literal", arg_num);
13878 else
13879 error ("argument %d is an unsigned literal that is "
13880 "out of range", arg_num);
13881 return const0_rtx;
13882 }
13883 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
13884 }
13885
13886 nopnds++;
13887 }
13888
13889 /* Handle the builtins for extended mnemonics. These accept
13890 no arguments, but map to builtins that take arguments. */
13891 switch (fcode)
13892 {
13893 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
13894 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
13895 op[nopnds++] = GEN_INT (1);
13896 if (flag_checking)
13897 attr |= RS6000_BTC_UNARY;
13898 break;
13899 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
13900 op[nopnds++] = GEN_INT (0);
13901 if (flag_checking)
13902 attr |= RS6000_BTC_UNARY;
13903 break;
13904 default:
13905 break;
13906 }
13907
13908 /* If this builtin accesses SPRs, then pass in the appropriate
13909 SPR number and SPR regno as the last two operands. */
13910 if (uses_spr)
13911 {
13912 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
13913 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
13914 }
13915 /* If this builtin accesses a CR, then pass in a scratch
13916 CR as the last operand. */
13917 else if (attr & RS6000_BTC_CR)
13918 { cr = gen_reg_rtx (CCmode);
13919 op[nopnds++] = cr;
13920 }
13921
13922 if (flag_checking)
13923 {
13924 int expected_nopnds = 0;
13925 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
13926 expected_nopnds = 1;
13927 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
13928 expected_nopnds = 2;
13929 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
13930 expected_nopnds = 3;
13931 if (!(attr & RS6000_BTC_VOID))
13932 expected_nopnds += 1;
13933 if (uses_spr)
13934 expected_nopnds += 1;
13935
13936 gcc_assert (nopnds == expected_nopnds
13937 && nopnds <= MAX_HTM_OPERANDS);
13938 }
13939
13940 switch (nopnds)
13941 {
13942 case 1:
13943 pat = GEN_FCN (icode) (op[0]);
13944 break;
13945 case 2:
13946 pat = GEN_FCN (icode) (op[0], op[1]);
13947 break;
13948 case 3:
13949 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
13950 break;
13951 case 4:
13952 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
13953 break;
13954 default:
13955 gcc_unreachable ();
13956 }
13957 if (!pat)
13958 return NULL_RTX;
13959 emit_insn (pat);
13960
13961 if (attr & RS6000_BTC_CR)
13962 {
13963 if (fcode == HTM_BUILTIN_TBEGIN)
13964 {
13965 /* Emit code to set TARGET to true or false depending on
13966 whether the tbegin. instruction successfully or failed
13967 to start a transaction. We do this by placing the 1's
13968 complement of CR's EQ bit into TARGET. */
13969 rtx scratch = gen_reg_rtx (SImode);
13970 emit_insn (gen_rtx_SET (scratch,
13971 gen_rtx_EQ (SImode, cr,
13972 const0_rtx)));
13973 emit_insn (gen_rtx_SET (target,
13974 gen_rtx_XOR (SImode, scratch,
13975 GEN_INT (1))));
13976 }
13977 else
13978 {
13979 /* Emit code to copy the 4-bit condition register field
13980 CR into the least significant end of register TARGET. */
13981 rtx scratch1 = gen_reg_rtx (SImode);
13982 rtx scratch2 = gen_reg_rtx (SImode);
13983 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
13984 emit_insn (gen_movcc (subreg, cr));
13985 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
13986 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
13987 }
13988 }
13989
13990 if (nonvoid)
13991 return target;
13992 return const0_rtx;
13993 }
13994
13995 *expandedp = false;
13996 return NULL_RTX;
13997 }
13998
13999 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14000
14001 static rtx
14002 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
14003 rtx target)
14004 {
14005 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14006 if (fcode == RS6000_BUILTIN_CPU_INIT)
14007 return const0_rtx;
14008
14009 if (target == 0 || GET_MODE (target) != SImode)
14010 target = gen_reg_rtx (SImode);
14011
14012 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14013 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
14014 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14015 to a STRING_CST. */
14016 if (TREE_CODE (arg) == ARRAY_REF
14017 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
14018 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
14019 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
14020 arg = TREE_OPERAND (arg, 0);
14021
14022 if (TREE_CODE (arg) != STRING_CST)
14023 {
14024 error ("builtin %qs only accepts a string argument",
14025 rs6000_builtin_info[(size_t) fcode].name);
14026 return const0_rtx;
14027 }
14028
14029 if (fcode == RS6000_BUILTIN_CPU_IS)
14030 {
14031 const char *cpu = TREE_STRING_POINTER (arg);
14032 rtx cpuid = NULL_RTX;
14033 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14034 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14035 {
14036 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14037 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14038 break;
14039 }
14040 if (cpuid == NULL_RTX)
14041 {
14042 /* Invalid CPU argument. */
14043 error ("cpu %qs is an invalid argument to builtin %qs",
14044 cpu, rs6000_builtin_info[(size_t) fcode].name);
14045 return const0_rtx;
14046 }
14047
14048 rtx platform = gen_reg_rtx (SImode);
14049 rtx tcbmem = gen_const_mem (SImode,
14050 gen_rtx_PLUS (Pmode,
14051 gen_rtx_REG (Pmode, TLS_REGNUM),
14052 GEN_INT (TCB_PLATFORM_OFFSET)));
14053 emit_move_insn (platform, tcbmem);
14054 emit_insn (gen_eqsi3 (target, platform, cpuid));
14055 }
14056 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14057 {
14058 const char *hwcap = TREE_STRING_POINTER (arg);
14059 rtx mask = NULL_RTX;
14060 int hwcap_offset;
14061 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14062 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14063 {
14064 mask = GEN_INT (cpu_supports_info[i].mask);
14065 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14066 break;
14067 }
14068 if (mask == NULL_RTX)
14069 {
14070 /* Invalid HWCAP argument. */
14071 error ("%s %qs is an invalid argument to builtin %qs",
14072 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14073 return const0_rtx;
14074 }
14075
14076 rtx tcb_hwcap = gen_reg_rtx (SImode);
14077 rtx tcbmem = gen_const_mem (SImode,
14078 gen_rtx_PLUS (Pmode,
14079 gen_rtx_REG (Pmode, TLS_REGNUM),
14080 GEN_INT (hwcap_offset)));
14081 emit_move_insn (tcb_hwcap, tcbmem);
14082 rtx scratch1 = gen_reg_rtx (SImode);
14083 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14084 rtx scratch2 = gen_reg_rtx (SImode);
14085 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14086 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14087 }
14088 else
14089 gcc_unreachable ();
14090
14091 /* Record that we have expanded a CPU builtin, so that we can later
14092 emit a reference to the special symbol exported by LIBC to ensure we
14093 do not link against an old LIBC that doesn't support this feature. */
14094 cpu_builtin_p = true;
14095
14096 #else
14097 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14098 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14099
14100 /* For old LIBCs, always return FALSE. */
14101 emit_move_insn (target, GEN_INT (0));
14102 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14103
14104 return target;
14105 }
14106
14107 static rtx
14108 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14109 {
14110 rtx pat;
14111 tree arg0 = CALL_EXPR_ARG (exp, 0);
14112 tree arg1 = CALL_EXPR_ARG (exp, 1);
14113 tree arg2 = CALL_EXPR_ARG (exp, 2);
14114 rtx op0 = expand_normal (arg0);
14115 rtx op1 = expand_normal (arg1);
14116 rtx op2 = expand_normal (arg2);
14117 machine_mode tmode = insn_data[icode].operand[0].mode;
14118 machine_mode mode0 = insn_data[icode].operand[1].mode;
14119 machine_mode mode1 = insn_data[icode].operand[2].mode;
14120 machine_mode mode2 = insn_data[icode].operand[3].mode;
14121
14122 if (icode == CODE_FOR_nothing)
14123 /* Builtin not supported on this processor. */
14124 return 0;
14125
14126 /* If we got invalid arguments bail out before generating bad rtl. */
14127 if (arg0 == error_mark_node
14128 || arg1 == error_mark_node
14129 || arg2 == error_mark_node)
14130 return const0_rtx;
14131
14132 /* Check and prepare argument depending on the instruction code.
14133
14134 Note that a switch statement instead of the sequence of tests
14135 would be incorrect as many of the CODE_FOR values could be
14136 CODE_FOR_nothing and that would yield multiple alternatives
14137 with identical values. We'd never reach here at runtime in
14138 this case. */
14139 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14140 || icode == CODE_FOR_altivec_vsldoi_v2df
14141 || icode == CODE_FOR_altivec_vsldoi_v4si
14142 || icode == CODE_FOR_altivec_vsldoi_v8hi
14143 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14144 {
14145 /* Only allow 4-bit unsigned literals. */
14146 STRIP_NOPS (arg2);
14147 if (TREE_CODE (arg2) != INTEGER_CST
14148 || TREE_INT_CST_LOW (arg2) & ~0xf)
14149 {
14150 error ("argument 3 must be a 4-bit unsigned literal");
14151 return CONST0_RTX (tmode);
14152 }
14153 }
14154 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14155 || icode == CODE_FOR_vsx_xxpermdi_v2di
14156 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14157 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14158 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14159 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14160 || icode == CODE_FOR_vsx_xxpermdi_v4si
14161 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14162 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14163 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14164 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14165 || icode == CODE_FOR_vsx_xxsldwi_v4si
14166 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14167 || icode == CODE_FOR_vsx_xxsldwi_v2di
14168 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14169 {
14170 /* Only allow 2-bit unsigned literals. */
14171 STRIP_NOPS (arg2);
14172 if (TREE_CODE (arg2) != INTEGER_CST
14173 || TREE_INT_CST_LOW (arg2) & ~0x3)
14174 {
14175 error ("argument 3 must be a 2-bit unsigned literal");
14176 return CONST0_RTX (tmode);
14177 }
14178 }
14179 else if (icode == CODE_FOR_vsx_set_v2df
14180 || icode == CODE_FOR_vsx_set_v2di
14181 || icode == CODE_FOR_bcdadd
14182 || icode == CODE_FOR_bcdadd_lt
14183 || icode == CODE_FOR_bcdadd_eq
14184 || icode == CODE_FOR_bcdadd_gt
14185 || icode == CODE_FOR_bcdsub
14186 || icode == CODE_FOR_bcdsub_lt
14187 || icode == CODE_FOR_bcdsub_eq
14188 || icode == CODE_FOR_bcdsub_gt)
14189 {
14190 /* Only allow 1-bit unsigned literals. */
14191 STRIP_NOPS (arg2);
14192 if (TREE_CODE (arg2) != INTEGER_CST
14193 || TREE_INT_CST_LOW (arg2) & ~0x1)
14194 {
14195 error ("argument 3 must be a 1-bit unsigned literal");
14196 return CONST0_RTX (tmode);
14197 }
14198 }
14199 else if (icode == CODE_FOR_dfp_ddedpd_dd
14200 || icode == CODE_FOR_dfp_ddedpd_td)
14201 {
14202 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14203 STRIP_NOPS (arg0);
14204 if (TREE_CODE (arg0) != INTEGER_CST
14205 || TREE_INT_CST_LOW (arg2) & ~0x3)
14206 {
14207 error ("argument 1 must be 0 or 2");
14208 return CONST0_RTX (tmode);
14209 }
14210 }
14211 else if (icode == CODE_FOR_dfp_denbcd_dd
14212 || icode == CODE_FOR_dfp_denbcd_td)
14213 {
14214 /* Only allow 1-bit unsigned literals. */
14215 STRIP_NOPS (arg0);
14216 if (TREE_CODE (arg0) != INTEGER_CST
14217 || TREE_INT_CST_LOW (arg0) & ~0x1)
14218 {
14219 error ("argument 1 must be a 1-bit unsigned literal");
14220 return CONST0_RTX (tmode);
14221 }
14222 }
14223 else if (icode == CODE_FOR_dfp_dscli_dd
14224 || icode == CODE_FOR_dfp_dscli_td
14225 || icode == CODE_FOR_dfp_dscri_dd
14226 || icode == CODE_FOR_dfp_dscri_td)
14227 {
14228 /* Only allow 6-bit unsigned literals. */
14229 STRIP_NOPS (arg1);
14230 if (TREE_CODE (arg1) != INTEGER_CST
14231 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14232 {
14233 error ("argument 2 must be a 6-bit unsigned literal");
14234 return CONST0_RTX (tmode);
14235 }
14236 }
14237 else if (icode == CODE_FOR_crypto_vshasigmaw
14238 || icode == CODE_FOR_crypto_vshasigmad)
14239 {
14240 /* Check whether the 2nd and 3rd arguments are integer constants and in
14241 range and prepare arguments. */
14242 STRIP_NOPS (arg1);
14243 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14244 {
14245 error ("argument 2 must be 0 or 1");
14246 return CONST0_RTX (tmode);
14247 }
14248
14249 STRIP_NOPS (arg2);
14250 if (TREE_CODE (arg2) != INTEGER_CST
14251 || wi::geu_p (wi::to_wide (arg2), 16))
14252 {
14253 error ("argument 3 must be in the range 0..15");
14254 return CONST0_RTX (tmode);
14255 }
14256 }
14257
14258 if (target == 0
14259 || GET_MODE (target) != tmode
14260 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14261 target = gen_reg_rtx (tmode);
14262
14263 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14264 op0 = copy_to_mode_reg (mode0, op0);
14265 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14266 op1 = copy_to_mode_reg (mode1, op1);
14267 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14268 op2 = copy_to_mode_reg (mode2, op2);
14269
14270 pat = GEN_FCN (icode) (target, op0, op1, op2);
14271 if (! pat)
14272 return 0;
14273 emit_insn (pat);
14274
14275 return target;
14276 }
14277
14278
14279 /* Expand the dst builtins. */
14280 static rtx
14281 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14282 bool *expandedp)
14283 {
14284 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14285 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14286 tree arg0, arg1, arg2;
14287 machine_mode mode0, mode1;
14288 rtx pat, op0, op1, op2;
14289 const struct builtin_description *d;
14290 size_t i;
14291
14292 *expandedp = false;
14293
14294 /* Handle DST variants. */
14295 d = bdesc_dst;
14296 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14297 if (d->code == fcode)
14298 {
14299 arg0 = CALL_EXPR_ARG (exp, 0);
14300 arg1 = CALL_EXPR_ARG (exp, 1);
14301 arg2 = CALL_EXPR_ARG (exp, 2);
14302 op0 = expand_normal (arg0);
14303 op1 = expand_normal (arg1);
14304 op2 = expand_normal (arg2);
14305 mode0 = insn_data[d->icode].operand[0].mode;
14306 mode1 = insn_data[d->icode].operand[1].mode;
14307
14308 /* Invalid arguments, bail out before generating bad rtl. */
14309 if (arg0 == error_mark_node
14310 || arg1 == error_mark_node
14311 || arg2 == error_mark_node)
14312 return const0_rtx;
14313
14314 *expandedp = true;
14315 STRIP_NOPS (arg2);
14316 if (TREE_CODE (arg2) != INTEGER_CST
14317 || TREE_INT_CST_LOW (arg2) & ~0x3)
14318 {
14319 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14320 return const0_rtx;
14321 }
14322
14323 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14324 op0 = copy_to_mode_reg (Pmode, op0);
14325 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14326 op1 = copy_to_mode_reg (mode1, op1);
14327
14328 pat = GEN_FCN (d->icode) (op0, op1, op2);
14329 if (pat != 0)
14330 emit_insn (pat);
14331
14332 return NULL_RTX;
14333 }
14334
14335 return NULL_RTX;
14336 }
14337
14338 /* Expand vec_init builtin. */
14339 static rtx
14340 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14341 {
14342 machine_mode tmode = TYPE_MODE (type);
14343 machine_mode inner_mode = GET_MODE_INNER (tmode);
14344 int i, n_elt = GET_MODE_NUNITS (tmode);
14345
14346 gcc_assert (VECTOR_MODE_P (tmode));
14347 gcc_assert (n_elt == call_expr_nargs (exp));
14348
14349 if (!target || !register_operand (target, tmode))
14350 target = gen_reg_rtx (tmode);
14351
14352 /* If we have a vector compromised of a single element, such as V1TImode, do
14353 the initialization directly. */
14354 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14355 {
14356 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14357 emit_move_insn (target, gen_lowpart (tmode, x));
14358 }
14359 else
14360 {
14361 rtvec v = rtvec_alloc (n_elt);
14362
14363 for (i = 0; i < n_elt; ++i)
14364 {
14365 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14366 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14367 }
14368
14369 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14370 }
14371
14372 return target;
14373 }
14374
14375 /* Return the integer constant in ARG. Constrain it to be in the range
14376 of the subparts of VEC_TYPE; issue an error if not. */
14377
14378 static int
14379 get_element_number (tree vec_type, tree arg)
14380 {
14381 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14382
14383 if (!tree_fits_uhwi_p (arg)
14384 || (elt = tree_to_uhwi (arg), elt > max))
14385 {
14386 error ("selector must be an integer constant in the range 0..%wi", max);
14387 return 0;
14388 }
14389
14390 return elt;
14391 }
14392
14393 /* Expand vec_set builtin. */
14394 static rtx
14395 altivec_expand_vec_set_builtin (tree exp)
14396 {
14397 machine_mode tmode, mode1;
14398 tree arg0, arg1, arg2;
14399 int elt;
14400 rtx op0, op1;
14401
14402 arg0 = CALL_EXPR_ARG (exp, 0);
14403 arg1 = CALL_EXPR_ARG (exp, 1);
14404 arg2 = CALL_EXPR_ARG (exp, 2);
14405
14406 tmode = TYPE_MODE (TREE_TYPE (arg0));
14407 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14408 gcc_assert (VECTOR_MODE_P (tmode));
14409
14410 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14411 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14412 elt = get_element_number (TREE_TYPE (arg0), arg2);
14413
14414 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14415 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14416
14417 op0 = force_reg (tmode, op0);
14418 op1 = force_reg (mode1, op1);
14419
14420 rs6000_expand_vector_set (op0, op1, elt);
14421
14422 return op0;
14423 }
14424
14425 /* Expand vec_ext builtin. */
14426 static rtx
14427 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14428 {
14429 machine_mode tmode, mode0;
14430 tree arg0, arg1;
14431 rtx op0;
14432 rtx op1;
14433
14434 arg0 = CALL_EXPR_ARG (exp, 0);
14435 arg1 = CALL_EXPR_ARG (exp, 1);
14436
14437 op0 = expand_normal (arg0);
14438 op1 = expand_normal (arg1);
14439
14440 if (TREE_CODE (arg1) == INTEGER_CST)
14441 {
14442 unsigned HOST_WIDE_INT elt;
14443 unsigned HOST_WIDE_INT size = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
14444 unsigned int truncated_selector;
14445 /* Even if !tree_fits_uhwi_p (arg1)), TREE_INT_CST_LOW (arg0)
14446 returns low-order bits of INTEGER_CST for modulo indexing. */
14447 elt = TREE_INT_CST_LOW (arg1);
14448 truncated_selector = elt % size;
14449 op1 = GEN_INT (truncated_selector);
14450 }
14451
14452 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14453 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14454 gcc_assert (VECTOR_MODE_P (mode0));
14455
14456 op0 = force_reg (mode0, op0);
14457
14458 if (optimize || !target || !register_operand (target, tmode))
14459 target = gen_reg_rtx (tmode);
14460
14461 rs6000_expand_vector_extract (target, op0, op1);
14462
14463 return target;
14464 }
14465
14466 /* Expand the builtin in EXP and store the result in TARGET. Store
14467 true in *EXPANDEDP if we found a builtin to expand. */
14468 static rtx
14469 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14470 {
14471 const struct builtin_description *d;
14472 size_t i;
14473 enum insn_code icode;
14474 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14475 tree arg0, arg1, arg2;
14476 rtx op0, pat;
14477 machine_mode tmode, mode0;
14478 enum rs6000_builtins fcode
14479 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14480
14481 if (rs6000_overloaded_builtin_p (fcode))
14482 {
14483 *expandedp = true;
14484 error ("unresolved overload for Altivec builtin %qF", fndecl);
14485
14486 /* Given it is invalid, just generate a normal call. */
14487 return expand_call (exp, target, false);
14488 }
14489
14490 target = altivec_expand_dst_builtin (exp, target, expandedp);
14491 if (*expandedp)
14492 return target;
14493
14494 *expandedp = true;
14495
14496 switch (fcode)
14497 {
14498 case ALTIVEC_BUILTIN_STVX_V2DF:
14499 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14500 case ALTIVEC_BUILTIN_STVX_V2DI:
14501 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14502 case ALTIVEC_BUILTIN_STVX_V4SF:
14503 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14504 case ALTIVEC_BUILTIN_STVX:
14505 case ALTIVEC_BUILTIN_STVX_V4SI:
14506 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14507 case ALTIVEC_BUILTIN_STVX_V8HI:
14508 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14509 case ALTIVEC_BUILTIN_STVX_V16QI:
14510 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14511 case ALTIVEC_BUILTIN_STVEBX:
14512 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14513 case ALTIVEC_BUILTIN_STVEHX:
14514 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14515 case ALTIVEC_BUILTIN_STVEWX:
14516 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14517 case ALTIVEC_BUILTIN_STVXL_V2DF:
14518 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14519 case ALTIVEC_BUILTIN_STVXL_V2DI:
14520 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14521 case ALTIVEC_BUILTIN_STVXL_V4SF:
14522 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14523 case ALTIVEC_BUILTIN_STVXL:
14524 case ALTIVEC_BUILTIN_STVXL_V4SI:
14525 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14526 case ALTIVEC_BUILTIN_STVXL_V8HI:
14527 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14528 case ALTIVEC_BUILTIN_STVXL_V16QI:
14529 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14530
14531 case ALTIVEC_BUILTIN_STVLX:
14532 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14533 case ALTIVEC_BUILTIN_STVLXL:
14534 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14535 case ALTIVEC_BUILTIN_STVRX:
14536 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14537 case ALTIVEC_BUILTIN_STVRXL:
14538 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14539
14540 case P9V_BUILTIN_STXVL:
14541 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14542
14543 case P9V_BUILTIN_XST_LEN_R:
14544 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14545
14546 case VSX_BUILTIN_STXVD2X_V1TI:
14547 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14548 case VSX_BUILTIN_STXVD2X_V2DF:
14549 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14550 case VSX_BUILTIN_STXVD2X_V2DI:
14551 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14552 case VSX_BUILTIN_STXVW4X_V4SF:
14553 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14554 case VSX_BUILTIN_STXVW4X_V4SI:
14555 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14556 case VSX_BUILTIN_STXVW4X_V8HI:
14557 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14558 case VSX_BUILTIN_STXVW4X_V16QI:
14559 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14560
14561 /* For the following on big endian, it's ok to use any appropriate
14562 unaligned-supporting store, so use a generic expander. For
14563 little-endian, the exact element-reversing instruction must
14564 be used. */
14565 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14566 {
14567 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14568 : CODE_FOR_vsx_st_elemrev_v1ti);
14569 return altivec_expand_stv_builtin (code, exp);
14570 }
14571 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14572 {
14573 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14574 : CODE_FOR_vsx_st_elemrev_v2df);
14575 return altivec_expand_stv_builtin (code, exp);
14576 }
14577 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14578 {
14579 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14580 : CODE_FOR_vsx_st_elemrev_v2di);
14581 return altivec_expand_stv_builtin (code, exp);
14582 }
14583 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14584 {
14585 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14586 : CODE_FOR_vsx_st_elemrev_v4sf);
14587 return altivec_expand_stv_builtin (code, exp);
14588 }
14589 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14590 {
14591 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14592 : CODE_FOR_vsx_st_elemrev_v4si);
14593 return altivec_expand_stv_builtin (code, exp);
14594 }
14595 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14596 {
14597 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14598 : CODE_FOR_vsx_st_elemrev_v8hi);
14599 return altivec_expand_stv_builtin (code, exp);
14600 }
14601 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14602 {
14603 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14604 : CODE_FOR_vsx_st_elemrev_v16qi);
14605 return altivec_expand_stv_builtin (code, exp);
14606 }
14607
14608 case ALTIVEC_BUILTIN_MFVSCR:
14609 icode = CODE_FOR_altivec_mfvscr;
14610 tmode = insn_data[icode].operand[0].mode;
14611
14612 if (target == 0
14613 || GET_MODE (target) != tmode
14614 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14615 target = gen_reg_rtx (tmode);
14616
14617 pat = GEN_FCN (icode) (target);
14618 if (! pat)
14619 return 0;
14620 emit_insn (pat);
14621 return target;
14622
14623 case ALTIVEC_BUILTIN_MTVSCR:
14624 icode = CODE_FOR_altivec_mtvscr;
14625 arg0 = CALL_EXPR_ARG (exp, 0);
14626 op0 = expand_normal (arg0);
14627 mode0 = insn_data[icode].operand[0].mode;
14628
14629 /* If we got invalid arguments bail out before generating bad rtl. */
14630 if (arg0 == error_mark_node)
14631 return const0_rtx;
14632
14633 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14634 op0 = copy_to_mode_reg (mode0, op0);
14635
14636 pat = GEN_FCN (icode) (op0);
14637 if (pat)
14638 emit_insn (pat);
14639 return NULL_RTX;
14640
14641 case ALTIVEC_BUILTIN_DSSALL:
14642 emit_insn (gen_altivec_dssall ());
14643 return NULL_RTX;
14644
14645 case ALTIVEC_BUILTIN_DSS:
14646 icode = CODE_FOR_altivec_dss;
14647 arg0 = CALL_EXPR_ARG (exp, 0);
14648 STRIP_NOPS (arg0);
14649 op0 = expand_normal (arg0);
14650 mode0 = insn_data[icode].operand[0].mode;
14651
14652 /* If we got invalid arguments bail out before generating bad rtl. */
14653 if (arg0 == error_mark_node)
14654 return const0_rtx;
14655
14656 if (TREE_CODE (arg0) != INTEGER_CST
14657 || TREE_INT_CST_LOW (arg0) & ~0x3)
14658 {
14659 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14660 return const0_rtx;
14661 }
14662
14663 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14664 op0 = copy_to_mode_reg (mode0, op0);
14665
14666 emit_insn (gen_altivec_dss (op0));
14667 return NULL_RTX;
14668
14669 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14670 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14671 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14672 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14673 case VSX_BUILTIN_VEC_INIT_V2DF:
14674 case VSX_BUILTIN_VEC_INIT_V2DI:
14675 case VSX_BUILTIN_VEC_INIT_V1TI:
14676 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14677
14678 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14679 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14680 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14681 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14682 case VSX_BUILTIN_VEC_SET_V2DF:
14683 case VSX_BUILTIN_VEC_SET_V2DI:
14684 case VSX_BUILTIN_VEC_SET_V1TI:
14685 return altivec_expand_vec_set_builtin (exp);
14686
14687 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14688 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14689 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14690 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14691 case VSX_BUILTIN_VEC_EXT_V2DF:
14692 case VSX_BUILTIN_VEC_EXT_V2DI:
14693 case VSX_BUILTIN_VEC_EXT_V1TI:
14694 return altivec_expand_vec_ext_builtin (exp, target);
14695
14696 case P9V_BUILTIN_VEC_EXTRACT4B:
14697 arg1 = CALL_EXPR_ARG (exp, 1);
14698 STRIP_NOPS (arg1);
14699
14700 /* Generate a normal call if it is invalid. */
14701 if (arg1 == error_mark_node)
14702 return expand_call (exp, target, false);
14703
14704 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14705 {
14706 error ("second argument to %qs must be 0..12", "vec_vextract4b");
14707 return expand_call (exp, target, false);
14708 }
14709 break;
14710
14711 case P9V_BUILTIN_VEC_INSERT4B:
14712 arg2 = CALL_EXPR_ARG (exp, 2);
14713 STRIP_NOPS (arg2);
14714
14715 /* Generate a normal call if it is invalid. */
14716 if (arg2 == error_mark_node)
14717 return expand_call (exp, target, false);
14718
14719 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
14720 {
14721 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
14722 return expand_call (exp, target, false);
14723 }
14724 break;
14725
14726 default:
14727 break;
14728 /* Fall through. */
14729 }
14730
14731 /* Expand abs* operations. */
14732 d = bdesc_abs;
14733 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14734 if (d->code == fcode)
14735 return altivec_expand_abs_builtin (d->icode, exp, target);
14736
14737 /* Expand the AltiVec predicates. */
14738 d = bdesc_altivec_preds;
14739 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14740 if (d->code == fcode)
14741 return altivec_expand_predicate_builtin (d->icode, exp, target);
14742
14743 /* LV* are funky. We initialized them differently. */
14744 switch (fcode)
14745 {
14746 case ALTIVEC_BUILTIN_LVSL:
14747 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
14748 exp, target, false);
14749 case ALTIVEC_BUILTIN_LVSR:
14750 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
14751 exp, target, false);
14752 case ALTIVEC_BUILTIN_LVEBX:
14753 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
14754 exp, target, false);
14755 case ALTIVEC_BUILTIN_LVEHX:
14756 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
14757 exp, target, false);
14758 case ALTIVEC_BUILTIN_LVEWX:
14759 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
14760 exp, target, false);
14761 case ALTIVEC_BUILTIN_LVXL_V2DF:
14762 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
14763 exp, target, false);
14764 case ALTIVEC_BUILTIN_LVXL_V2DI:
14765 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
14766 exp, target, false);
14767 case ALTIVEC_BUILTIN_LVXL_V4SF:
14768 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
14769 exp, target, false);
14770 case ALTIVEC_BUILTIN_LVXL:
14771 case ALTIVEC_BUILTIN_LVXL_V4SI:
14772 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
14773 exp, target, false);
14774 case ALTIVEC_BUILTIN_LVXL_V8HI:
14775 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
14776 exp, target, false);
14777 case ALTIVEC_BUILTIN_LVXL_V16QI:
14778 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
14779 exp, target, false);
14780 case ALTIVEC_BUILTIN_LVX_V1TI:
14781 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
14782 exp, target, false);
14783 case ALTIVEC_BUILTIN_LVX_V2DF:
14784 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
14785 exp, target, false);
14786 case ALTIVEC_BUILTIN_LVX_V2DI:
14787 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
14788 exp, target, false);
14789 case ALTIVEC_BUILTIN_LVX_V4SF:
14790 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
14791 exp, target, false);
14792 case ALTIVEC_BUILTIN_LVX:
14793 case ALTIVEC_BUILTIN_LVX_V4SI:
14794 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
14795 exp, target, false);
14796 case ALTIVEC_BUILTIN_LVX_V8HI:
14797 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
14798 exp, target, false);
14799 case ALTIVEC_BUILTIN_LVX_V16QI:
14800 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
14801 exp, target, false);
14802 case ALTIVEC_BUILTIN_LVLX:
14803 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
14804 exp, target, true);
14805 case ALTIVEC_BUILTIN_LVLXL:
14806 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
14807 exp, target, true);
14808 case ALTIVEC_BUILTIN_LVRX:
14809 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
14810 exp, target, true);
14811 case ALTIVEC_BUILTIN_LVRXL:
14812 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
14813 exp, target, true);
14814 case VSX_BUILTIN_LXVD2X_V1TI:
14815 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
14816 exp, target, false);
14817 case VSX_BUILTIN_LXVD2X_V2DF:
14818 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
14819 exp, target, false);
14820 case VSX_BUILTIN_LXVD2X_V2DI:
14821 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
14822 exp, target, false);
14823 case VSX_BUILTIN_LXVW4X_V4SF:
14824 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
14825 exp, target, false);
14826 case VSX_BUILTIN_LXVW4X_V4SI:
14827 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
14828 exp, target, false);
14829 case VSX_BUILTIN_LXVW4X_V8HI:
14830 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
14831 exp, target, false);
14832 case VSX_BUILTIN_LXVW4X_V16QI:
14833 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
14834 exp, target, false);
14835 /* For the following on big endian, it's ok to use any appropriate
14836 unaligned-supporting load, so use a generic expander. For
14837 little-endian, the exact element-reversing instruction must
14838 be used. */
14839 case VSX_BUILTIN_LD_ELEMREV_V2DF:
14840 {
14841 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
14842 : CODE_FOR_vsx_ld_elemrev_v2df);
14843 return altivec_expand_lv_builtin (code, exp, target, false);
14844 }
14845 case VSX_BUILTIN_LD_ELEMREV_V1TI:
14846 {
14847 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
14848 : CODE_FOR_vsx_ld_elemrev_v1ti);
14849 return altivec_expand_lv_builtin (code, exp, target, false);
14850 }
14851 case VSX_BUILTIN_LD_ELEMREV_V2DI:
14852 {
14853 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
14854 : CODE_FOR_vsx_ld_elemrev_v2di);
14855 return altivec_expand_lv_builtin (code, exp, target, false);
14856 }
14857 case VSX_BUILTIN_LD_ELEMREV_V4SF:
14858 {
14859 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
14860 : CODE_FOR_vsx_ld_elemrev_v4sf);
14861 return altivec_expand_lv_builtin (code, exp, target, false);
14862 }
14863 case VSX_BUILTIN_LD_ELEMREV_V4SI:
14864 {
14865 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
14866 : CODE_FOR_vsx_ld_elemrev_v4si);
14867 return altivec_expand_lv_builtin (code, exp, target, false);
14868 }
14869 case VSX_BUILTIN_LD_ELEMREV_V8HI:
14870 {
14871 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
14872 : CODE_FOR_vsx_ld_elemrev_v8hi);
14873 return altivec_expand_lv_builtin (code, exp, target, false);
14874 }
14875 case VSX_BUILTIN_LD_ELEMREV_V16QI:
14876 {
14877 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
14878 : CODE_FOR_vsx_ld_elemrev_v16qi);
14879 return altivec_expand_lv_builtin (code, exp, target, false);
14880 }
14881 break;
14882 default:
14883 break;
14884 /* Fall through. */
14885 }
14886
14887 *expandedp = false;
14888 return NULL_RTX;
14889 }
14890
14891 /* Check whether a builtin function is supported in this target
14892 configuration. */
14893 bool
14894 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
14895 {
14896 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
14897 if ((fnmask & rs6000_builtin_mask) != fnmask)
14898 return false;
14899 else
14900 return true;
14901 }
14902
14903 /* Raise an error message for a builtin function that is called without the
14904 appropriate target options being set. */
14905
14906 static void
14907 rs6000_invalid_builtin (enum rs6000_builtins fncode)
14908 {
14909 size_t uns_fncode = (size_t) fncode;
14910 const char *name = rs6000_builtin_info[uns_fncode].name;
14911 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
14912
14913 gcc_assert (name != NULL);
14914 if ((fnmask & RS6000_BTM_CELL) != 0)
14915 error ("builtin function %qs is only valid for the cell processor", name);
14916 else if ((fnmask & RS6000_BTM_VSX) != 0)
14917 error ("builtin function %qs requires the %qs option", name, "-mvsx");
14918 else if ((fnmask & RS6000_BTM_HTM) != 0)
14919 error ("builtin function %qs requires the %qs option", name, "-mhtm");
14920 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
14921 error ("builtin function %qs requires the %qs option", name, "-maltivec");
14922 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14923 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14924 error ("builtin function %qs requires the %qs and %qs options",
14925 name, "-mhard-dfp", "-mpower8-vector");
14926 else if ((fnmask & RS6000_BTM_DFP) != 0)
14927 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
14928 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
14929 error ("builtin function %qs requires the %qs option", name,
14930 "-mpower8-vector");
14931 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14932 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14933 error ("builtin function %qs requires the %qs and %qs options",
14934 name, "-mcpu=power9", "-m64");
14935 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
14936 error ("builtin function %qs requires the %qs option", name,
14937 "-mcpu=power9");
14938 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
14939 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
14940 error ("builtin function %qs requires the %qs and %qs options",
14941 name, "-mcpu=power9", "-m64");
14942 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
14943 error ("builtin function %qs requires the %qs option", name,
14944 "-mcpu=power9");
14945 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
14946 {
14947 if (!TARGET_HARD_FLOAT)
14948 error ("builtin function %qs requires the %qs option", name,
14949 "-mhard-float");
14950 else
14951 error ("builtin function %qs requires the %qs option", name,
14952 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
14953 }
14954 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
14955 error ("builtin function %qs requires the %qs option", name,
14956 "-mhard-float");
14957 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
14958 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
14959 name);
14960 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
14961 error ("builtin function %qs requires the %qs option", name,
14962 "%<-mfloat128%>");
14963 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
14964 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
14965 error ("builtin function %qs requires the %qs (or newer), and "
14966 "%qs or %qs options",
14967 name, "-mcpu=power7", "-m64", "-mpowerpc64");
14968 else
14969 error ("builtin function %qs is not supported with the current options",
14970 name);
14971 }
14972
14973 /* Target hook for early folding of built-ins, shamelessly stolen
14974 from ia64.c. */
14975
14976 static tree
14977 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
14978 int n_args ATTRIBUTE_UNUSED,
14979 tree *args ATTRIBUTE_UNUSED,
14980 bool ignore ATTRIBUTE_UNUSED)
14981 {
14982 #ifdef SUBTARGET_FOLD_BUILTIN
14983 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
14984 #else
14985 return NULL_TREE;
14986 #endif
14987 }
14988
14989 /* Helper function to sort out which built-ins may be valid without having
14990 a LHS. */
14991 static bool
14992 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
14993 {
14994 switch (fn_code)
14995 {
14996 case ALTIVEC_BUILTIN_STVX_V16QI:
14997 case ALTIVEC_BUILTIN_STVX_V8HI:
14998 case ALTIVEC_BUILTIN_STVX_V4SI:
14999 case ALTIVEC_BUILTIN_STVX_V4SF:
15000 case ALTIVEC_BUILTIN_STVX_V2DI:
15001 case ALTIVEC_BUILTIN_STVX_V2DF:
15002 case VSX_BUILTIN_STXVW4X_V16QI:
15003 case VSX_BUILTIN_STXVW4X_V8HI:
15004 case VSX_BUILTIN_STXVW4X_V4SF:
15005 case VSX_BUILTIN_STXVW4X_V4SI:
15006 case VSX_BUILTIN_STXVD2X_V2DF:
15007 case VSX_BUILTIN_STXVD2X_V2DI:
15008 return true;
15009 default:
15010 return false;
15011 }
15012 }
15013
15014 /* Helper function to handle the gimple folding of a vector compare
15015 operation. This sets up true/false vectors, and uses the
15016 VEC_COND_EXPR operation.
15017 CODE indicates which comparison is to be made. (EQ, GT, ...).
15018 TYPE indicates the type of the result. */
15019 static tree
15020 fold_build_vec_cmp (tree_code code, tree type,
15021 tree arg0, tree arg1)
15022 {
15023 tree cmp_type = build_same_sized_truth_vector_type (type);
15024 tree zero_vec = build_zero_cst (type);
15025 tree minus_one_vec = build_minus_one_cst (type);
15026 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
15027 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
15028 }
15029
15030 /* Helper function to handle the in-between steps for the
15031 vector compare built-ins. */
15032 static void
15033 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15034 {
15035 tree arg0 = gimple_call_arg (stmt, 0);
15036 tree arg1 = gimple_call_arg (stmt, 1);
15037 tree lhs = gimple_call_lhs (stmt);
15038 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15039 gimple *g = gimple_build_assign (lhs, cmp);
15040 gimple_set_location (g, gimple_location (stmt));
15041 gsi_replace (gsi, g, true);
15042 }
15043
15044 /* Helper function to map V2DF and V4SF types to their
15045 integral equivalents (V2DI and V4SI). */
15046 tree map_to_integral_tree_type (tree input_tree_type)
15047 {
15048 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
15049 return input_tree_type;
15050 else
15051 {
15052 if (types_compatible_p (TREE_TYPE (input_tree_type),
15053 TREE_TYPE (V2DF_type_node)))
15054 return V2DI_type_node;
15055 else if (types_compatible_p (TREE_TYPE (input_tree_type),
15056 TREE_TYPE (V4SF_type_node)))
15057 return V4SI_type_node;
15058 else
15059 gcc_unreachable ();
15060 }
15061 }
15062
15063 /* Helper function to handle the vector merge[hl] built-ins. The
15064 implementation difference between h and l versions for this code are in
15065 the values used when building of the permute vector for high word versus
15066 low word merge. The variance is keyed off the use_high parameter. */
15067 static void
15068 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15069 {
15070 tree arg0 = gimple_call_arg (stmt, 0);
15071 tree arg1 = gimple_call_arg (stmt, 1);
15072 tree lhs = gimple_call_lhs (stmt);
15073 tree lhs_type = TREE_TYPE (lhs);
15074 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15075 int midpoint = n_elts / 2;
15076 int offset = 0;
15077
15078 if (use_high == 1)
15079 offset = midpoint;
15080
15081 /* The permute_type will match the lhs for integral types. For double and
15082 float types, the permute type needs to map to the V2 or V4 type that
15083 matches size. */
15084 tree permute_type;
15085 permute_type = map_to_integral_tree_type (lhs_type);
15086 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15087
15088 for (int i = 0; i < midpoint; i++)
15089 {
15090 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15091 offset + i));
15092 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15093 offset + n_elts + i));
15094 }
15095
15096 tree permute = elts.build ();
15097
15098 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15099 gimple_set_location (g, gimple_location (stmt));
15100 gsi_replace (gsi, g, true);
15101 }
15102
15103 /* Helper function to handle the vector merge[eo] built-ins. */
15104 static void
15105 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
15106 {
15107 tree arg0 = gimple_call_arg (stmt, 0);
15108 tree arg1 = gimple_call_arg (stmt, 1);
15109 tree lhs = gimple_call_lhs (stmt);
15110 tree lhs_type = TREE_TYPE (lhs);
15111 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15112
15113 /* The permute_type will match the lhs for integral types. For double and
15114 float types, the permute type needs to map to the V2 or V4 type that
15115 matches size. */
15116 tree permute_type;
15117 permute_type = map_to_integral_tree_type (lhs_type);
15118
15119 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15120
15121 /* Build the permute vector. */
15122 for (int i = 0; i < n_elts / 2; i++)
15123 {
15124 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15125 2*i + use_odd));
15126 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15127 2*i + use_odd + n_elts));
15128 }
15129
15130 tree permute = elts.build ();
15131
15132 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15133 gimple_set_location (g, gimple_location (stmt));
15134 gsi_replace (gsi, g, true);
15135 }
15136
15137 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15138 a constant, use rs6000_fold_builtin.) */
15139
15140 bool
15141 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15142 {
15143 gimple *stmt = gsi_stmt (*gsi);
15144 tree fndecl = gimple_call_fndecl (stmt);
15145 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15146 enum rs6000_builtins fn_code
15147 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15148 tree arg0, arg1, lhs, temp;
15149 enum tree_code bcode;
15150 gimple *g;
15151
15152 size_t uns_fncode = (size_t) fn_code;
15153 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15154 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15155 const char *fn_name2 = (icode != CODE_FOR_nothing)
15156 ? get_insn_name ((int) icode)
15157 : "nothing";
15158
15159 if (TARGET_DEBUG_BUILTIN)
15160 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15161 fn_code, fn_name1, fn_name2);
15162
15163 if (!rs6000_fold_gimple)
15164 return false;
15165
15166 /* Prevent gimple folding for code that does not have a LHS, unless it is
15167 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15168 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15169 return false;
15170
15171 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15172 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15173 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15174 if (!func_valid_p)
15175 return false;
15176
15177 switch (fn_code)
15178 {
15179 /* Flavors of vec_add. We deliberately don't expand
15180 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15181 TImode, resulting in much poorer code generation. */
15182 case ALTIVEC_BUILTIN_VADDUBM:
15183 case ALTIVEC_BUILTIN_VADDUHM:
15184 case ALTIVEC_BUILTIN_VADDUWM:
15185 case P8V_BUILTIN_VADDUDM:
15186 case ALTIVEC_BUILTIN_VADDFP:
15187 case VSX_BUILTIN_XVADDDP:
15188 bcode = PLUS_EXPR;
15189 do_binary:
15190 arg0 = gimple_call_arg (stmt, 0);
15191 arg1 = gimple_call_arg (stmt, 1);
15192 lhs = gimple_call_lhs (stmt);
15193 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15194 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15195 {
15196 /* Ensure the binary operation is performed in a type
15197 that wraps if it is integral type. */
15198 gimple_seq stmts = NULL;
15199 tree type = unsigned_type_for (TREE_TYPE (lhs));
15200 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15201 type, arg0);
15202 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15203 type, arg1);
15204 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15205 type, uarg0, uarg1);
15206 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15207 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15208 build1 (VIEW_CONVERT_EXPR,
15209 TREE_TYPE (lhs), res));
15210 gsi_replace (gsi, g, true);
15211 return true;
15212 }
15213 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15214 gimple_set_location (g, gimple_location (stmt));
15215 gsi_replace (gsi, g, true);
15216 return true;
15217 /* Flavors of vec_sub. We deliberately don't expand
15218 P8V_BUILTIN_VSUBUQM. */
15219 case ALTIVEC_BUILTIN_VSUBUBM:
15220 case ALTIVEC_BUILTIN_VSUBUHM:
15221 case ALTIVEC_BUILTIN_VSUBUWM:
15222 case P8V_BUILTIN_VSUBUDM:
15223 case ALTIVEC_BUILTIN_VSUBFP:
15224 case VSX_BUILTIN_XVSUBDP:
15225 bcode = MINUS_EXPR;
15226 goto do_binary;
15227 case VSX_BUILTIN_XVMULSP:
15228 case VSX_BUILTIN_XVMULDP:
15229 arg0 = gimple_call_arg (stmt, 0);
15230 arg1 = gimple_call_arg (stmt, 1);
15231 lhs = gimple_call_lhs (stmt);
15232 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15233 gimple_set_location (g, gimple_location (stmt));
15234 gsi_replace (gsi, g, true);
15235 return true;
15236 /* Even element flavors of vec_mul (signed). */
15237 case ALTIVEC_BUILTIN_VMULESB:
15238 case ALTIVEC_BUILTIN_VMULESH:
15239 case P8V_BUILTIN_VMULESW:
15240 /* Even element flavors of vec_mul (unsigned). */
15241 case ALTIVEC_BUILTIN_VMULEUB:
15242 case ALTIVEC_BUILTIN_VMULEUH:
15243 case P8V_BUILTIN_VMULEUW:
15244 arg0 = gimple_call_arg (stmt, 0);
15245 arg1 = gimple_call_arg (stmt, 1);
15246 lhs = gimple_call_lhs (stmt);
15247 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15248 gimple_set_location (g, gimple_location (stmt));
15249 gsi_replace (gsi, g, true);
15250 return true;
15251 /* Odd element flavors of vec_mul (signed). */
15252 case ALTIVEC_BUILTIN_VMULOSB:
15253 case ALTIVEC_BUILTIN_VMULOSH:
15254 case P8V_BUILTIN_VMULOSW:
15255 /* Odd element flavors of vec_mul (unsigned). */
15256 case ALTIVEC_BUILTIN_VMULOUB:
15257 case ALTIVEC_BUILTIN_VMULOUH:
15258 case P8V_BUILTIN_VMULOUW:
15259 arg0 = gimple_call_arg (stmt, 0);
15260 arg1 = gimple_call_arg (stmt, 1);
15261 lhs = gimple_call_lhs (stmt);
15262 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15263 gimple_set_location (g, gimple_location (stmt));
15264 gsi_replace (gsi, g, true);
15265 return true;
15266 /* Flavors of vec_div (Integer). */
15267 case VSX_BUILTIN_DIV_V2DI:
15268 case VSX_BUILTIN_UDIV_V2DI:
15269 arg0 = gimple_call_arg (stmt, 0);
15270 arg1 = gimple_call_arg (stmt, 1);
15271 lhs = gimple_call_lhs (stmt);
15272 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15273 gimple_set_location (g, gimple_location (stmt));
15274 gsi_replace (gsi, g, true);
15275 return true;
15276 /* Flavors of vec_div (Float). */
15277 case VSX_BUILTIN_XVDIVSP:
15278 case VSX_BUILTIN_XVDIVDP:
15279 arg0 = gimple_call_arg (stmt, 0);
15280 arg1 = gimple_call_arg (stmt, 1);
15281 lhs = gimple_call_lhs (stmt);
15282 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15283 gimple_set_location (g, gimple_location (stmt));
15284 gsi_replace (gsi, g, true);
15285 return true;
15286 /* Flavors of vec_and. */
15287 case ALTIVEC_BUILTIN_VAND:
15288 arg0 = gimple_call_arg (stmt, 0);
15289 arg1 = gimple_call_arg (stmt, 1);
15290 lhs = gimple_call_lhs (stmt);
15291 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15292 gimple_set_location (g, gimple_location (stmt));
15293 gsi_replace (gsi, g, true);
15294 return true;
15295 /* Flavors of vec_andc. */
15296 case ALTIVEC_BUILTIN_VANDC:
15297 arg0 = gimple_call_arg (stmt, 0);
15298 arg1 = gimple_call_arg (stmt, 1);
15299 lhs = gimple_call_lhs (stmt);
15300 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15301 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15302 gimple_set_location (g, gimple_location (stmt));
15303 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15304 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15305 gimple_set_location (g, gimple_location (stmt));
15306 gsi_replace (gsi, g, true);
15307 return true;
15308 /* Flavors of vec_nand. */
15309 case P8V_BUILTIN_VEC_NAND:
15310 case P8V_BUILTIN_NAND_V16QI:
15311 case P8V_BUILTIN_NAND_V8HI:
15312 case P8V_BUILTIN_NAND_V4SI:
15313 case P8V_BUILTIN_NAND_V4SF:
15314 case P8V_BUILTIN_NAND_V2DF:
15315 case P8V_BUILTIN_NAND_V2DI:
15316 arg0 = gimple_call_arg (stmt, 0);
15317 arg1 = gimple_call_arg (stmt, 1);
15318 lhs = gimple_call_lhs (stmt);
15319 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15320 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15321 gimple_set_location (g, gimple_location (stmt));
15322 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15323 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15324 gimple_set_location (g, gimple_location (stmt));
15325 gsi_replace (gsi, g, true);
15326 return true;
15327 /* Flavors of vec_or. */
15328 case ALTIVEC_BUILTIN_VOR:
15329 arg0 = gimple_call_arg (stmt, 0);
15330 arg1 = gimple_call_arg (stmt, 1);
15331 lhs = gimple_call_lhs (stmt);
15332 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15333 gimple_set_location (g, gimple_location (stmt));
15334 gsi_replace (gsi, g, true);
15335 return true;
15336 /* flavors of vec_orc. */
15337 case P8V_BUILTIN_ORC_V16QI:
15338 case P8V_BUILTIN_ORC_V8HI:
15339 case P8V_BUILTIN_ORC_V4SI:
15340 case P8V_BUILTIN_ORC_V4SF:
15341 case P8V_BUILTIN_ORC_V2DF:
15342 case P8V_BUILTIN_ORC_V2DI:
15343 arg0 = gimple_call_arg (stmt, 0);
15344 arg1 = gimple_call_arg (stmt, 1);
15345 lhs = gimple_call_lhs (stmt);
15346 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15347 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15348 gimple_set_location (g, gimple_location (stmt));
15349 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15350 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15351 gimple_set_location (g, gimple_location (stmt));
15352 gsi_replace (gsi, g, true);
15353 return true;
15354 /* Flavors of vec_xor. */
15355 case ALTIVEC_BUILTIN_VXOR:
15356 arg0 = gimple_call_arg (stmt, 0);
15357 arg1 = gimple_call_arg (stmt, 1);
15358 lhs = gimple_call_lhs (stmt);
15359 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15360 gimple_set_location (g, gimple_location (stmt));
15361 gsi_replace (gsi, g, true);
15362 return true;
15363 /* Flavors of vec_nor. */
15364 case ALTIVEC_BUILTIN_VNOR:
15365 arg0 = gimple_call_arg (stmt, 0);
15366 arg1 = gimple_call_arg (stmt, 1);
15367 lhs = gimple_call_lhs (stmt);
15368 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15369 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15370 gimple_set_location (g, gimple_location (stmt));
15371 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15372 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15373 gimple_set_location (g, gimple_location (stmt));
15374 gsi_replace (gsi, g, true);
15375 return true;
15376 /* flavors of vec_abs. */
15377 case ALTIVEC_BUILTIN_ABS_V16QI:
15378 case ALTIVEC_BUILTIN_ABS_V8HI:
15379 case ALTIVEC_BUILTIN_ABS_V4SI:
15380 case ALTIVEC_BUILTIN_ABS_V4SF:
15381 case P8V_BUILTIN_ABS_V2DI:
15382 case VSX_BUILTIN_XVABSDP:
15383 arg0 = gimple_call_arg (stmt, 0);
15384 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15385 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15386 return false;
15387 lhs = gimple_call_lhs (stmt);
15388 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15389 gimple_set_location (g, gimple_location (stmt));
15390 gsi_replace (gsi, g, true);
15391 return true;
15392 /* flavors of vec_min. */
15393 case VSX_BUILTIN_XVMINDP:
15394 case P8V_BUILTIN_VMINSD:
15395 case P8V_BUILTIN_VMINUD:
15396 case ALTIVEC_BUILTIN_VMINSB:
15397 case ALTIVEC_BUILTIN_VMINSH:
15398 case ALTIVEC_BUILTIN_VMINSW:
15399 case ALTIVEC_BUILTIN_VMINUB:
15400 case ALTIVEC_BUILTIN_VMINUH:
15401 case ALTIVEC_BUILTIN_VMINUW:
15402 case ALTIVEC_BUILTIN_VMINFP:
15403 arg0 = gimple_call_arg (stmt, 0);
15404 arg1 = gimple_call_arg (stmt, 1);
15405 lhs = gimple_call_lhs (stmt);
15406 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15407 gimple_set_location (g, gimple_location (stmt));
15408 gsi_replace (gsi, g, true);
15409 return true;
15410 /* flavors of vec_max. */
15411 case VSX_BUILTIN_XVMAXDP:
15412 case P8V_BUILTIN_VMAXSD:
15413 case P8V_BUILTIN_VMAXUD:
15414 case ALTIVEC_BUILTIN_VMAXSB:
15415 case ALTIVEC_BUILTIN_VMAXSH:
15416 case ALTIVEC_BUILTIN_VMAXSW:
15417 case ALTIVEC_BUILTIN_VMAXUB:
15418 case ALTIVEC_BUILTIN_VMAXUH:
15419 case ALTIVEC_BUILTIN_VMAXUW:
15420 case ALTIVEC_BUILTIN_VMAXFP:
15421 arg0 = gimple_call_arg (stmt, 0);
15422 arg1 = gimple_call_arg (stmt, 1);
15423 lhs = gimple_call_lhs (stmt);
15424 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15425 gimple_set_location (g, gimple_location (stmt));
15426 gsi_replace (gsi, g, true);
15427 return true;
15428 /* Flavors of vec_eqv. */
15429 case P8V_BUILTIN_EQV_V16QI:
15430 case P8V_BUILTIN_EQV_V8HI:
15431 case P8V_BUILTIN_EQV_V4SI:
15432 case P8V_BUILTIN_EQV_V4SF:
15433 case P8V_BUILTIN_EQV_V2DF:
15434 case P8V_BUILTIN_EQV_V2DI:
15435 arg0 = gimple_call_arg (stmt, 0);
15436 arg1 = gimple_call_arg (stmt, 1);
15437 lhs = gimple_call_lhs (stmt);
15438 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15439 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15440 gimple_set_location (g, gimple_location (stmt));
15441 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15442 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15443 gimple_set_location (g, gimple_location (stmt));
15444 gsi_replace (gsi, g, true);
15445 return true;
15446 /* Flavors of vec_rotate_left. */
15447 case ALTIVEC_BUILTIN_VRLB:
15448 case ALTIVEC_BUILTIN_VRLH:
15449 case ALTIVEC_BUILTIN_VRLW:
15450 case P8V_BUILTIN_VRLD:
15451 arg0 = gimple_call_arg (stmt, 0);
15452 arg1 = gimple_call_arg (stmt, 1);
15453 lhs = gimple_call_lhs (stmt);
15454 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15455 gimple_set_location (g, gimple_location (stmt));
15456 gsi_replace (gsi, g, true);
15457 return true;
15458 /* Flavors of vector shift right algebraic.
15459 vec_sra{b,h,w} -> vsra{b,h,w}. */
15460 case ALTIVEC_BUILTIN_VSRAB:
15461 case ALTIVEC_BUILTIN_VSRAH:
15462 case ALTIVEC_BUILTIN_VSRAW:
15463 case P8V_BUILTIN_VSRAD:
15464 {
15465 arg0 = gimple_call_arg (stmt, 0);
15466 arg1 = gimple_call_arg (stmt, 1);
15467 lhs = gimple_call_lhs (stmt);
15468 tree arg1_type = TREE_TYPE (arg1);
15469 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15470 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15471 location_t loc = gimple_location (stmt);
15472 /* Force arg1 into the range valid matching the arg0 type. */
15473 /* Build a vector consisting of the max valid bit-size values. */
15474 int n_elts = VECTOR_CST_NELTS (arg1);
15475 tree element_size = build_int_cst (unsigned_element_type,
15476 128 / n_elts);
15477 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15478 for (int i = 0; i < n_elts; i++)
15479 elts.safe_push (element_size);
15480 tree modulo_tree = elts.build ();
15481 /* Modulo the provided shift value against that vector. */
15482 gimple_seq stmts = NULL;
15483 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15484 unsigned_arg1_type, arg1);
15485 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15486 unsigned_arg1_type, unsigned_arg1,
15487 modulo_tree);
15488 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15489 /* And finally, do the shift. */
15490 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, new_arg1);
15491 gimple_set_location (g, loc);
15492 gsi_replace (gsi, g, true);
15493 return true;
15494 }
15495 /* Flavors of vector shift left.
15496 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15497 case ALTIVEC_BUILTIN_VSLB:
15498 case ALTIVEC_BUILTIN_VSLH:
15499 case ALTIVEC_BUILTIN_VSLW:
15500 case P8V_BUILTIN_VSLD:
15501 {
15502 location_t loc;
15503 gimple_seq stmts = NULL;
15504 arg0 = gimple_call_arg (stmt, 0);
15505 tree arg0_type = TREE_TYPE (arg0);
15506 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15507 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15508 return false;
15509 arg1 = gimple_call_arg (stmt, 1);
15510 tree arg1_type = TREE_TYPE (arg1);
15511 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15512 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15513 loc = gimple_location (stmt);
15514 lhs = gimple_call_lhs (stmt);
15515 /* Force arg1 into the range valid matching the arg0 type. */
15516 /* Build a vector consisting of the max valid bit-size values. */
15517 int n_elts = VECTOR_CST_NELTS (arg1);
15518 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15519 * BITS_PER_UNIT;
15520 tree element_size = build_int_cst (unsigned_element_type,
15521 tree_size_in_bits / n_elts);
15522 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15523 for (int i = 0; i < n_elts; i++)
15524 elts.safe_push (element_size);
15525 tree modulo_tree = elts.build ();
15526 /* Modulo the provided shift value against that vector. */
15527 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15528 unsigned_arg1_type, arg1);
15529 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15530 unsigned_arg1_type, unsigned_arg1,
15531 modulo_tree);
15532 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15533 /* And finally, do the shift. */
15534 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15535 gimple_set_location (g, gimple_location (stmt));
15536 gsi_replace (gsi, g, true);
15537 return true;
15538 }
15539 /* Flavors of vector shift right. */
15540 case ALTIVEC_BUILTIN_VSRB:
15541 case ALTIVEC_BUILTIN_VSRH:
15542 case ALTIVEC_BUILTIN_VSRW:
15543 case P8V_BUILTIN_VSRD:
15544 {
15545 arg0 = gimple_call_arg (stmt, 0);
15546 arg1 = gimple_call_arg (stmt, 1);
15547 lhs = gimple_call_lhs (stmt);
15548 tree arg1_type = TREE_TYPE (arg1);
15549 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15550 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15551 location_t loc = gimple_location (stmt);
15552 gimple_seq stmts = NULL;
15553 /* Convert arg0 to unsigned. */
15554 tree arg0_unsigned
15555 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15556 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15557 /* Force arg1 into the range valid matching the arg0 type. */
15558 /* Build a vector consisting of the max valid bit-size values. */
15559 int n_elts = VECTOR_CST_NELTS (arg1);
15560 tree element_size = build_int_cst (unsigned_element_type,
15561 128 / n_elts);
15562 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15563 for (int i = 0; i < n_elts; i++)
15564 elts.safe_push (element_size);
15565 tree modulo_tree = elts.build ();
15566 /* Modulo the provided shift value against that vector. */
15567 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15568 unsigned_arg1_type, arg1);
15569 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15570 unsigned_arg1_type, unsigned_arg1,
15571 modulo_tree);
15572 /* Do the shift. */
15573 tree res
15574 = gimple_build (&stmts, RSHIFT_EXPR,
15575 TREE_TYPE (arg0_unsigned), arg0_unsigned, new_arg1);
15576 /* Convert result back to the lhs type. */
15577 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15578 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15579 update_call_from_tree (gsi, res);
15580 return true;
15581 }
15582 /* Vector loads. */
15583 case ALTIVEC_BUILTIN_LVX_V16QI:
15584 case ALTIVEC_BUILTIN_LVX_V8HI:
15585 case ALTIVEC_BUILTIN_LVX_V4SI:
15586 case ALTIVEC_BUILTIN_LVX_V4SF:
15587 case ALTIVEC_BUILTIN_LVX_V2DI:
15588 case ALTIVEC_BUILTIN_LVX_V2DF:
15589 case ALTIVEC_BUILTIN_LVX_V1TI:
15590 {
15591 arg0 = gimple_call_arg (stmt, 0); // offset
15592 arg1 = gimple_call_arg (stmt, 1); // address
15593 lhs = gimple_call_lhs (stmt);
15594 location_t loc = gimple_location (stmt);
15595 /* Since arg1 may be cast to a different type, just use ptr_type_node
15596 here instead of trying to enforce TBAA on pointer types. */
15597 tree arg1_type = ptr_type_node;
15598 tree lhs_type = TREE_TYPE (lhs);
15599 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15600 the tree using the value from arg0. The resulting type will match
15601 the type of arg1. */
15602 gimple_seq stmts = NULL;
15603 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15604 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15605 arg1_type, arg1, temp_offset);
15606 /* Mask off any lower bits from the address. */
15607 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15608 arg1_type, temp_addr,
15609 build_int_cst (arg1_type, -16));
15610 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15611 if (!is_gimple_mem_ref_addr (aligned_addr))
15612 {
15613 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15614 gimple *g = gimple_build_assign (t, aligned_addr);
15615 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15616 aligned_addr = t;
15617 }
15618 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15619 take an offset, but since we've already incorporated the offset
15620 above, here we just pass in a zero. */
15621 gimple *g
15622 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15623 build_int_cst (arg1_type, 0)));
15624 gimple_set_location (g, loc);
15625 gsi_replace (gsi, g, true);
15626 return true;
15627 }
15628 /* Vector stores. */
15629 case ALTIVEC_BUILTIN_STVX_V16QI:
15630 case ALTIVEC_BUILTIN_STVX_V8HI:
15631 case ALTIVEC_BUILTIN_STVX_V4SI:
15632 case ALTIVEC_BUILTIN_STVX_V4SF:
15633 case ALTIVEC_BUILTIN_STVX_V2DI:
15634 case ALTIVEC_BUILTIN_STVX_V2DF:
15635 {
15636 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15637 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15638 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15639 location_t loc = gimple_location (stmt);
15640 tree arg0_type = TREE_TYPE (arg0);
15641 /* Use ptr_type_node (no TBAA) for the arg2_type.
15642 FIXME: (Richard) "A proper fix would be to transition this type as
15643 seen from the frontend to GIMPLE, for example in a similar way we
15644 do for MEM_REFs by piggy-backing that on an extra argument, a
15645 constant zero pointer of the alias pointer type to use (which would
15646 also serve as a type indicator of the store itself). I'd use a
15647 target specific internal function for this (not sure if we can have
15648 those target specific, but I guess if it's folded away then that's
15649 fine) and get away with the overload set." */
15650 tree arg2_type = ptr_type_node;
15651 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15652 the tree using the value from arg0. The resulting type will match
15653 the type of arg2. */
15654 gimple_seq stmts = NULL;
15655 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15656 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15657 arg2_type, arg2, temp_offset);
15658 /* Mask off any lower bits from the address. */
15659 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15660 arg2_type, temp_addr,
15661 build_int_cst (arg2_type, -16));
15662 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15663 if (!is_gimple_mem_ref_addr (aligned_addr))
15664 {
15665 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15666 gimple *g = gimple_build_assign (t, aligned_addr);
15667 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15668 aligned_addr = t;
15669 }
15670 /* The desired gimple result should be similar to:
15671 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15672 gimple *g
15673 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15674 build_int_cst (arg2_type, 0)), arg0);
15675 gimple_set_location (g, loc);
15676 gsi_replace (gsi, g, true);
15677 return true;
15678 }
15679
15680 /* unaligned Vector loads. */
15681 case VSX_BUILTIN_LXVW4X_V16QI:
15682 case VSX_BUILTIN_LXVW4X_V8HI:
15683 case VSX_BUILTIN_LXVW4X_V4SF:
15684 case VSX_BUILTIN_LXVW4X_V4SI:
15685 case VSX_BUILTIN_LXVD2X_V2DF:
15686 case VSX_BUILTIN_LXVD2X_V2DI:
15687 {
15688 arg0 = gimple_call_arg (stmt, 0); // offset
15689 arg1 = gimple_call_arg (stmt, 1); // address
15690 lhs = gimple_call_lhs (stmt);
15691 location_t loc = gimple_location (stmt);
15692 /* Since arg1 may be cast to a different type, just use ptr_type_node
15693 here instead of trying to enforce TBAA on pointer types. */
15694 tree arg1_type = ptr_type_node;
15695 tree lhs_type = TREE_TYPE (lhs);
15696 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15697 required alignment (power) is 4 bytes regardless of data type. */
15698 tree align_ltype = build_aligned_type (lhs_type, 4);
15699 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15700 the tree using the value from arg0. The resulting type will match
15701 the type of arg1. */
15702 gimple_seq stmts = NULL;
15703 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15704 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15705 arg1_type, arg1, temp_offset);
15706 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15707 if (!is_gimple_mem_ref_addr (temp_addr))
15708 {
15709 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15710 gimple *g = gimple_build_assign (t, temp_addr);
15711 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15712 temp_addr = t;
15713 }
15714 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15715 take an offset, but since we've already incorporated the offset
15716 above, here we just pass in a zero. */
15717 gimple *g;
15718 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15719 build_int_cst (arg1_type, 0)));
15720 gimple_set_location (g, loc);
15721 gsi_replace (gsi, g, true);
15722 return true;
15723 }
15724
15725 /* unaligned Vector stores. */
15726 case VSX_BUILTIN_STXVW4X_V16QI:
15727 case VSX_BUILTIN_STXVW4X_V8HI:
15728 case VSX_BUILTIN_STXVW4X_V4SF:
15729 case VSX_BUILTIN_STXVW4X_V4SI:
15730 case VSX_BUILTIN_STXVD2X_V2DF:
15731 case VSX_BUILTIN_STXVD2X_V2DI:
15732 {
15733 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15734 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15735 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15736 location_t loc = gimple_location (stmt);
15737 tree arg0_type = TREE_TYPE (arg0);
15738 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15739 tree arg2_type = ptr_type_node;
15740 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15741 required alignment (power) is 4 bytes regardless of data type. */
15742 tree align_stype = build_aligned_type (arg0_type, 4);
15743 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15744 the tree using the value from arg1. */
15745 gimple_seq stmts = NULL;
15746 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15747 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15748 arg2_type, arg2, temp_offset);
15749 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15750 if (!is_gimple_mem_ref_addr (temp_addr))
15751 {
15752 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15753 gimple *g = gimple_build_assign (t, temp_addr);
15754 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15755 temp_addr = t;
15756 }
15757 gimple *g;
15758 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
15759 build_int_cst (arg2_type, 0)), arg0);
15760 gimple_set_location (g, loc);
15761 gsi_replace (gsi, g, true);
15762 return true;
15763 }
15764
15765 /* Vector Fused multiply-add (fma). */
15766 case ALTIVEC_BUILTIN_VMADDFP:
15767 case VSX_BUILTIN_XVMADDDP:
15768 case ALTIVEC_BUILTIN_VMLADDUHM:
15769 {
15770 arg0 = gimple_call_arg (stmt, 0);
15771 arg1 = gimple_call_arg (stmt, 1);
15772 tree arg2 = gimple_call_arg (stmt, 2);
15773 lhs = gimple_call_lhs (stmt);
15774 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15775 gimple_call_set_lhs (g, lhs);
15776 gimple_call_set_nothrow (g, true);
15777 gimple_set_location (g, gimple_location (stmt));
15778 gsi_replace (gsi, g, true);
15779 return true;
15780 }
15781
15782 /* Vector compares; EQ, NE, GE, GT, LE. */
15783 case ALTIVEC_BUILTIN_VCMPEQUB:
15784 case ALTIVEC_BUILTIN_VCMPEQUH:
15785 case ALTIVEC_BUILTIN_VCMPEQUW:
15786 case P8V_BUILTIN_VCMPEQUD:
15787 fold_compare_helper (gsi, EQ_EXPR, stmt);
15788 return true;
15789
15790 case P9V_BUILTIN_CMPNEB:
15791 case P9V_BUILTIN_CMPNEH:
15792 case P9V_BUILTIN_CMPNEW:
15793 fold_compare_helper (gsi, NE_EXPR, stmt);
15794 return true;
15795
15796 case VSX_BUILTIN_CMPGE_16QI:
15797 case VSX_BUILTIN_CMPGE_U16QI:
15798 case VSX_BUILTIN_CMPGE_8HI:
15799 case VSX_BUILTIN_CMPGE_U8HI:
15800 case VSX_BUILTIN_CMPGE_4SI:
15801 case VSX_BUILTIN_CMPGE_U4SI:
15802 case VSX_BUILTIN_CMPGE_2DI:
15803 case VSX_BUILTIN_CMPGE_U2DI:
15804 fold_compare_helper (gsi, GE_EXPR, stmt);
15805 return true;
15806
15807 case ALTIVEC_BUILTIN_VCMPGTSB:
15808 case ALTIVEC_BUILTIN_VCMPGTUB:
15809 case ALTIVEC_BUILTIN_VCMPGTSH:
15810 case ALTIVEC_BUILTIN_VCMPGTUH:
15811 case ALTIVEC_BUILTIN_VCMPGTSW:
15812 case ALTIVEC_BUILTIN_VCMPGTUW:
15813 case P8V_BUILTIN_VCMPGTUD:
15814 case P8V_BUILTIN_VCMPGTSD:
15815 fold_compare_helper (gsi, GT_EXPR, stmt);
15816 return true;
15817
15818 case VSX_BUILTIN_CMPLE_16QI:
15819 case VSX_BUILTIN_CMPLE_U16QI:
15820 case VSX_BUILTIN_CMPLE_8HI:
15821 case VSX_BUILTIN_CMPLE_U8HI:
15822 case VSX_BUILTIN_CMPLE_4SI:
15823 case VSX_BUILTIN_CMPLE_U4SI:
15824 case VSX_BUILTIN_CMPLE_2DI:
15825 case VSX_BUILTIN_CMPLE_U2DI:
15826 fold_compare_helper (gsi, LE_EXPR, stmt);
15827 return true;
15828
15829 /* flavors of vec_splat_[us]{8,16,32}. */
15830 case ALTIVEC_BUILTIN_VSPLTISB:
15831 case ALTIVEC_BUILTIN_VSPLTISH:
15832 case ALTIVEC_BUILTIN_VSPLTISW:
15833 {
15834 arg0 = gimple_call_arg (stmt, 0);
15835 lhs = gimple_call_lhs (stmt);
15836
15837 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
15838 5-bit signed constant in range -16 to +15. */
15839 if (TREE_CODE (arg0) != INTEGER_CST
15840 || !IN_RANGE (TREE_INT_CST_LOW (arg0), -16, 15))
15841 return false;
15842 gimple_seq stmts = NULL;
15843 location_t loc = gimple_location (stmt);
15844 tree splat_value = gimple_convert (&stmts, loc,
15845 TREE_TYPE (TREE_TYPE (lhs)), arg0);
15846 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15847 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
15848 g = gimple_build_assign (lhs, splat_tree);
15849 gimple_set_location (g, gimple_location (stmt));
15850 gsi_replace (gsi, g, true);
15851 return true;
15852 }
15853
15854 /* Flavors of vec_splat. */
15855 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
15856 case ALTIVEC_BUILTIN_VSPLTB:
15857 case ALTIVEC_BUILTIN_VSPLTH:
15858 case ALTIVEC_BUILTIN_VSPLTW:
15859 case VSX_BUILTIN_XXSPLTD_V2DI:
15860 case VSX_BUILTIN_XXSPLTD_V2DF:
15861 {
15862 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
15863 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
15864 /* Only fold the vec_splat_*() if arg1 is both a constant value and
15865 is a valid index into the arg0 vector. */
15866 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
15867 if (TREE_CODE (arg1) != INTEGER_CST
15868 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
15869 return false;
15870 lhs = gimple_call_lhs (stmt);
15871 tree lhs_type = TREE_TYPE (lhs);
15872 tree arg0_type = TREE_TYPE (arg0);
15873 tree splat;
15874 if (TREE_CODE (arg0) == VECTOR_CST)
15875 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
15876 else
15877 {
15878 /* Determine (in bits) the length and start location of the
15879 splat value for a call to the tree_vec_extract helper. */
15880 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
15881 * BITS_PER_UNIT / n_elts;
15882 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
15883 tree len = build_int_cst (bitsizetype, splat_elem_size);
15884 tree start = build_int_cst (bitsizetype, splat_start_bit);
15885 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
15886 len, start);
15887 }
15888 /* And finally, build the new vector. */
15889 tree splat_tree = build_vector_from_val (lhs_type, splat);
15890 g = gimple_build_assign (lhs, splat_tree);
15891 gimple_set_location (g, gimple_location (stmt));
15892 gsi_replace (gsi, g, true);
15893 return true;
15894 }
15895
15896 /* vec_mergel (integrals). */
15897 case ALTIVEC_BUILTIN_VMRGLH:
15898 case ALTIVEC_BUILTIN_VMRGLW:
15899 case VSX_BUILTIN_XXMRGLW_4SI:
15900 case ALTIVEC_BUILTIN_VMRGLB:
15901 case VSX_BUILTIN_VEC_MERGEL_V2DI:
15902 case VSX_BUILTIN_XXMRGLW_4SF:
15903 case VSX_BUILTIN_VEC_MERGEL_V2DF:
15904 fold_mergehl_helper (gsi, stmt, 1);
15905 return true;
15906 /* vec_mergeh (integrals). */
15907 case ALTIVEC_BUILTIN_VMRGHH:
15908 case ALTIVEC_BUILTIN_VMRGHW:
15909 case VSX_BUILTIN_XXMRGHW_4SI:
15910 case ALTIVEC_BUILTIN_VMRGHB:
15911 case VSX_BUILTIN_VEC_MERGEH_V2DI:
15912 case VSX_BUILTIN_XXMRGHW_4SF:
15913 case VSX_BUILTIN_VEC_MERGEH_V2DF:
15914 fold_mergehl_helper (gsi, stmt, 0);
15915 return true;
15916
15917 /* Flavors of vec_mergee. */
15918 case P8V_BUILTIN_VMRGEW_V4SI:
15919 case P8V_BUILTIN_VMRGEW_V2DI:
15920 case P8V_BUILTIN_VMRGEW_V4SF:
15921 case P8V_BUILTIN_VMRGEW_V2DF:
15922 fold_mergeeo_helper (gsi, stmt, 0);
15923 return true;
15924 /* Flavors of vec_mergeo. */
15925 case P8V_BUILTIN_VMRGOW_V4SI:
15926 case P8V_BUILTIN_VMRGOW_V2DI:
15927 case P8V_BUILTIN_VMRGOW_V4SF:
15928 case P8V_BUILTIN_VMRGOW_V2DF:
15929 fold_mergeeo_helper (gsi, stmt, 1);
15930 return true;
15931
15932 /* d = vec_pack (a, b) */
15933 case P8V_BUILTIN_VPKUDUM:
15934 case ALTIVEC_BUILTIN_VPKUHUM:
15935 case ALTIVEC_BUILTIN_VPKUWUM:
15936 {
15937 arg0 = gimple_call_arg (stmt, 0);
15938 arg1 = gimple_call_arg (stmt, 1);
15939 lhs = gimple_call_lhs (stmt);
15940 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
15941 gimple_set_location (g, gimple_location (stmt));
15942 gsi_replace (gsi, g, true);
15943 return true;
15944 }
15945
15946 /* d = vec_unpackh (a) */
15947 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
15948 in this code is sensitive to endian-ness, and needs to be inverted to
15949 handle both LE and BE targets. */
15950 case ALTIVEC_BUILTIN_VUPKHSB:
15951 case ALTIVEC_BUILTIN_VUPKHSH:
15952 case P8V_BUILTIN_VUPKHSW:
15953 {
15954 arg0 = gimple_call_arg (stmt, 0);
15955 lhs = gimple_call_lhs (stmt);
15956 if (BYTES_BIG_ENDIAN)
15957 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15958 else
15959 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15960 gimple_set_location (g, gimple_location (stmt));
15961 gsi_replace (gsi, g, true);
15962 return true;
15963 }
15964 /* d = vec_unpackl (a) */
15965 case ALTIVEC_BUILTIN_VUPKLSB:
15966 case ALTIVEC_BUILTIN_VUPKLSH:
15967 case P8V_BUILTIN_VUPKLSW:
15968 {
15969 arg0 = gimple_call_arg (stmt, 0);
15970 lhs = gimple_call_lhs (stmt);
15971 if (BYTES_BIG_ENDIAN)
15972 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15973 else
15974 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15975 gimple_set_location (g, gimple_location (stmt));
15976 gsi_replace (gsi, g, true);
15977 return true;
15978 }
15979 /* There is no gimple type corresponding with pixel, so just return. */
15980 case ALTIVEC_BUILTIN_VUPKHPX:
15981 case ALTIVEC_BUILTIN_VUPKLPX:
15982 return false;
15983
15984 /* vec_perm. */
15985 case ALTIVEC_BUILTIN_VPERM_16QI:
15986 case ALTIVEC_BUILTIN_VPERM_8HI:
15987 case ALTIVEC_BUILTIN_VPERM_4SI:
15988 case ALTIVEC_BUILTIN_VPERM_2DI:
15989 case ALTIVEC_BUILTIN_VPERM_4SF:
15990 case ALTIVEC_BUILTIN_VPERM_2DF:
15991 {
15992 arg0 = gimple_call_arg (stmt, 0);
15993 arg1 = gimple_call_arg (stmt, 1);
15994 tree permute = gimple_call_arg (stmt, 2);
15995 lhs = gimple_call_lhs (stmt);
15996 location_t loc = gimple_location (stmt);
15997 gimple_seq stmts = NULL;
15998 // convert arg0 and arg1 to match the type of the permute
15999 // for the VEC_PERM_EXPR operation.
16000 tree permute_type = (TREE_TYPE (permute));
16001 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
16002 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
16003 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
16004 permute_type, arg0_ptype, arg1_ptype,
16005 permute);
16006 // Convert the result back to the desired lhs type upon completion.
16007 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
16008 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
16009 g = gimple_build_assign (lhs, temp);
16010 gimple_set_location (g, loc);
16011 gsi_replace (gsi, g, true);
16012 return true;
16013 }
16014
16015 default:
16016 if (TARGET_DEBUG_BUILTIN)
16017 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
16018 fn_code, fn_name1, fn_name2);
16019 break;
16020 }
16021
16022 return false;
16023 }
16024
16025 /* Expand an expression EXP that calls a built-in function,
16026 with result going to TARGET if that's convenient
16027 (and in mode MODE if that's convenient).
16028 SUBTARGET may be used as the target for computing one of EXP's operands.
16029 IGNORE is nonzero if the value is to be ignored. */
16030
16031 static rtx
16032 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16033 machine_mode mode ATTRIBUTE_UNUSED,
16034 int ignore ATTRIBUTE_UNUSED)
16035 {
16036 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16037 enum rs6000_builtins fcode
16038 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16039 size_t uns_fcode = (size_t)fcode;
16040 const struct builtin_description *d;
16041 size_t i;
16042 rtx ret;
16043 bool success;
16044 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16045 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16046 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16047
16048 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16049 floating point type, depending on whether long double is the IBM extended
16050 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16051 we only define one variant of the built-in function, and switch the code
16052 when defining it, rather than defining two built-ins and using the
16053 overload table in rs6000-c.c to switch between the two. If we don't have
16054 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16055 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16056 if (FLOAT128_IEEE_P (TFmode))
16057 switch (icode)
16058 {
16059 default:
16060 break;
16061
16062 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16063 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16064 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16065 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16066 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16067 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16068 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16069 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16070 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16071 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16072 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16073 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16074 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16075 }
16076
16077 if (TARGET_DEBUG_BUILTIN)
16078 {
16079 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16080 const char *name2 = (icode != CODE_FOR_nothing)
16081 ? get_insn_name ((int) icode)
16082 : "nothing";
16083 const char *name3;
16084
16085 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16086 {
16087 default: name3 = "unknown"; break;
16088 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16089 case RS6000_BTC_UNARY: name3 = "unary"; break;
16090 case RS6000_BTC_BINARY: name3 = "binary"; break;
16091 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16092 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16093 case RS6000_BTC_ABS: name3 = "abs"; break;
16094 case RS6000_BTC_DST: name3 = "dst"; break;
16095 }
16096
16097
16098 fprintf (stderr,
16099 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16100 (name1) ? name1 : "---", fcode,
16101 (name2) ? name2 : "---", (int) icode,
16102 name3,
16103 func_valid_p ? "" : ", not valid");
16104 }
16105
16106 if (!func_valid_p)
16107 {
16108 rs6000_invalid_builtin (fcode);
16109
16110 /* Given it is invalid, just generate a normal call. */
16111 return expand_call (exp, target, ignore);
16112 }
16113
16114 switch (fcode)
16115 {
16116 case RS6000_BUILTIN_RECIP:
16117 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16118
16119 case RS6000_BUILTIN_RECIPF:
16120 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16121
16122 case RS6000_BUILTIN_RSQRTF:
16123 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16124
16125 case RS6000_BUILTIN_RSQRT:
16126 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16127
16128 case POWER7_BUILTIN_BPERMD:
16129 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16130 ? CODE_FOR_bpermd_di
16131 : CODE_FOR_bpermd_si), exp, target);
16132
16133 case RS6000_BUILTIN_GET_TB:
16134 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16135 target);
16136
16137 case RS6000_BUILTIN_MFTB:
16138 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16139 ? CODE_FOR_rs6000_mftb_di
16140 : CODE_FOR_rs6000_mftb_si),
16141 target);
16142
16143 case RS6000_BUILTIN_MFFS:
16144 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16145
16146 case RS6000_BUILTIN_MTFSB0:
16147 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16148
16149 case RS6000_BUILTIN_MTFSB1:
16150 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16151
16152 case RS6000_BUILTIN_SET_FPSCR_RN:
16153 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16154 exp);
16155
16156 case RS6000_BUILTIN_SET_FPSCR_DRN:
16157 return
16158 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16159 exp);
16160
16161 case RS6000_BUILTIN_MFFSL:
16162 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16163
16164 case RS6000_BUILTIN_MTFSF:
16165 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16166
16167 case RS6000_BUILTIN_CPU_INIT:
16168 case RS6000_BUILTIN_CPU_IS:
16169 case RS6000_BUILTIN_CPU_SUPPORTS:
16170 return cpu_expand_builtin (fcode, exp, target);
16171
16172 case MISC_BUILTIN_SPEC_BARRIER:
16173 {
16174 emit_insn (gen_speculation_barrier ());
16175 return NULL_RTX;
16176 }
16177
16178 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16179 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16180 {
16181 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16182 : (int) CODE_FOR_altivec_lvsl_direct);
16183 machine_mode tmode = insn_data[icode2].operand[0].mode;
16184 machine_mode mode = insn_data[icode2].operand[1].mode;
16185 tree arg;
16186 rtx op, addr, pat;
16187
16188 gcc_assert (TARGET_ALTIVEC);
16189
16190 arg = CALL_EXPR_ARG (exp, 0);
16191 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16192 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16193 addr = memory_address (mode, op);
16194 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16195 op = addr;
16196 else
16197 {
16198 /* For the load case need to negate the address. */
16199 op = gen_reg_rtx (GET_MODE (addr));
16200 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16201 }
16202 op = gen_rtx_MEM (mode, op);
16203
16204 if (target == 0
16205 || GET_MODE (target) != tmode
16206 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16207 target = gen_reg_rtx (tmode);
16208
16209 pat = GEN_FCN (icode2) (target, op);
16210 if (!pat)
16211 return 0;
16212 emit_insn (pat);
16213
16214 return target;
16215 }
16216
16217 case ALTIVEC_BUILTIN_VCFUX:
16218 case ALTIVEC_BUILTIN_VCFSX:
16219 case ALTIVEC_BUILTIN_VCTUXS:
16220 case ALTIVEC_BUILTIN_VCTSXS:
16221 /* FIXME: There's got to be a nicer way to handle this case than
16222 constructing a new CALL_EXPR. */
16223 if (call_expr_nargs (exp) == 1)
16224 {
16225 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16226 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16227 }
16228 break;
16229
16230 /* For the pack and unpack int128 routines, fix up the builtin so it
16231 uses the correct IBM128 type. */
16232 case MISC_BUILTIN_PACK_IF:
16233 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16234 {
16235 icode = CODE_FOR_packtf;
16236 fcode = MISC_BUILTIN_PACK_TF;
16237 uns_fcode = (size_t)fcode;
16238 }
16239 break;
16240
16241 case MISC_BUILTIN_UNPACK_IF:
16242 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16243 {
16244 icode = CODE_FOR_unpacktf;
16245 fcode = MISC_BUILTIN_UNPACK_TF;
16246 uns_fcode = (size_t)fcode;
16247 }
16248 break;
16249
16250 default:
16251 break;
16252 }
16253
16254 if (TARGET_ALTIVEC)
16255 {
16256 ret = altivec_expand_builtin (exp, target, &success);
16257
16258 if (success)
16259 return ret;
16260 }
16261 if (TARGET_HTM)
16262 {
16263 ret = htm_expand_builtin (exp, target, &success);
16264
16265 if (success)
16266 return ret;
16267 }
16268
16269 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16270 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16271 gcc_assert (attr == RS6000_BTC_UNARY
16272 || attr == RS6000_BTC_BINARY
16273 || attr == RS6000_BTC_TERNARY
16274 || attr == RS6000_BTC_SPECIAL);
16275
16276 /* Handle simple unary operations. */
16277 d = bdesc_1arg;
16278 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16279 if (d->code == fcode)
16280 return rs6000_expand_unop_builtin (icode, exp, target);
16281
16282 /* Handle simple binary operations. */
16283 d = bdesc_2arg;
16284 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16285 if (d->code == fcode)
16286 return rs6000_expand_binop_builtin (icode, exp, target);
16287
16288 /* Handle simple ternary operations. */
16289 d = bdesc_3arg;
16290 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16291 if (d->code == fcode)
16292 return rs6000_expand_ternop_builtin (icode, exp, target);
16293
16294 /* Handle simple no-argument operations. */
16295 d = bdesc_0arg;
16296 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16297 if (d->code == fcode)
16298 return rs6000_expand_zeroop_builtin (icode, target);
16299
16300 gcc_unreachable ();
16301 }
16302
16303 /* Create a builtin vector type with a name. Taking care not to give
16304 the canonical type a name. */
16305
16306 static tree
16307 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16308 {
16309 tree result = build_vector_type (elt_type, num_elts);
16310
16311 /* Copy so we don't give the canonical type a name. */
16312 result = build_variant_type_copy (result);
16313
16314 add_builtin_type (name, result);
16315
16316 return result;
16317 }
16318
16319 static void
16320 rs6000_init_builtins (void)
16321 {
16322 tree tdecl;
16323 tree ftype;
16324 machine_mode mode;
16325
16326 if (TARGET_DEBUG_BUILTIN)
16327 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16328 (TARGET_ALTIVEC) ? ", altivec" : "",
16329 (TARGET_VSX) ? ", vsx" : "");
16330
16331 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16332 : "__vector long long",
16333 intDI_type_node, 2);
16334 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16335 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16336 intSI_type_node, 4);
16337 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16338 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16339 intHI_type_node, 8);
16340 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16341 intQI_type_node, 16);
16342
16343 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16344 unsigned_intQI_type_node, 16);
16345 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16346 unsigned_intHI_type_node, 8);
16347 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16348 unsigned_intSI_type_node, 4);
16349 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16350 ? "__vector unsigned long"
16351 : "__vector unsigned long long",
16352 unsigned_intDI_type_node, 2);
16353
16354 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16355
16356 const_str_type_node
16357 = build_pointer_type (build_qualified_type (char_type_node,
16358 TYPE_QUAL_CONST));
16359
16360 /* We use V1TI mode as a special container to hold __int128_t items that
16361 must live in VSX registers. */
16362 if (intTI_type_node)
16363 {
16364 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16365 intTI_type_node, 1);
16366 unsigned_V1TI_type_node
16367 = rs6000_vector_type ("__vector unsigned __int128",
16368 unsigned_intTI_type_node, 1);
16369 }
16370
16371 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16372 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16373 'vector unsigned short'. */
16374
16375 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16376 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16377 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16378 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16379 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16380
16381 long_integer_type_internal_node = long_integer_type_node;
16382 long_unsigned_type_internal_node = long_unsigned_type_node;
16383 long_long_integer_type_internal_node = long_long_integer_type_node;
16384 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16385 intQI_type_internal_node = intQI_type_node;
16386 uintQI_type_internal_node = unsigned_intQI_type_node;
16387 intHI_type_internal_node = intHI_type_node;
16388 uintHI_type_internal_node = unsigned_intHI_type_node;
16389 intSI_type_internal_node = intSI_type_node;
16390 uintSI_type_internal_node = unsigned_intSI_type_node;
16391 intDI_type_internal_node = intDI_type_node;
16392 uintDI_type_internal_node = unsigned_intDI_type_node;
16393 intTI_type_internal_node = intTI_type_node;
16394 uintTI_type_internal_node = unsigned_intTI_type_node;
16395 float_type_internal_node = float_type_node;
16396 double_type_internal_node = double_type_node;
16397 long_double_type_internal_node = long_double_type_node;
16398 dfloat64_type_internal_node = dfloat64_type_node;
16399 dfloat128_type_internal_node = dfloat128_type_node;
16400 void_type_internal_node = void_type_node;
16401
16402 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16403 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16404 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16405 format that uses a pair of doubles, depending on the switches and
16406 defaults.
16407
16408 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16409 floating point, we need make sure the type is non-zero or else self-test
16410 fails during bootstrap.
16411
16412 Always create __ibm128 as a separate type, even if the current long double
16413 format is IBM extended double.
16414
16415 For IEEE 128-bit floating point, always create the type __ieee128. If the
16416 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16417 __ieee128. */
16418 if (TARGET_FLOAT128_TYPE)
16419 {
16420 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16421 ibm128_float_type_node = long_double_type_node;
16422 else
16423 {
16424 ibm128_float_type_node = make_node (REAL_TYPE);
16425 TYPE_PRECISION (ibm128_float_type_node) = 128;
16426 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16427 layout_type (ibm128_float_type_node);
16428 }
16429
16430 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16431 "__ibm128");
16432
16433 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16434 ieee128_float_type_node = long_double_type_node;
16435 else
16436 ieee128_float_type_node = float128_type_node;
16437
16438 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16439 "__ieee128");
16440 }
16441
16442 else
16443 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16444
16445 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16446 tree type node. */
16447 builtin_mode_to_type[QImode][0] = integer_type_node;
16448 builtin_mode_to_type[HImode][0] = integer_type_node;
16449 builtin_mode_to_type[SImode][0] = intSI_type_node;
16450 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16451 builtin_mode_to_type[DImode][0] = intDI_type_node;
16452 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16453 builtin_mode_to_type[TImode][0] = intTI_type_node;
16454 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16455 builtin_mode_to_type[SFmode][0] = float_type_node;
16456 builtin_mode_to_type[DFmode][0] = double_type_node;
16457 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16458 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16459 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16460 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16461 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16462 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16463 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16464 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16465 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16466 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16467 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16468 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16469 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16470 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16471 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16472 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16473 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16474
16475 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16476 TYPE_NAME (bool_char_type_node) = tdecl;
16477
16478 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16479 TYPE_NAME (bool_short_type_node) = tdecl;
16480
16481 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16482 TYPE_NAME (bool_int_type_node) = tdecl;
16483
16484 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16485 TYPE_NAME (pixel_type_node) = tdecl;
16486
16487 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16488 bool_char_type_node, 16);
16489 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16490 bool_short_type_node, 8);
16491 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16492 bool_int_type_node, 4);
16493 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16494 ? "__vector __bool long"
16495 : "__vector __bool long long",
16496 bool_long_long_type_node, 2);
16497 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16498 pixel_type_node, 8);
16499
16500 /* Create Altivec and VSX builtins on machines with at least the
16501 general purpose extensions (970 and newer) to allow the use of
16502 the target attribute. */
16503 if (TARGET_EXTRA_BUILTINS)
16504 altivec_init_builtins ();
16505 if (TARGET_HTM)
16506 htm_init_builtins ();
16507
16508 if (TARGET_EXTRA_BUILTINS)
16509 rs6000_common_init_builtins ();
16510
16511 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16512 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16513 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16514
16515 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16516 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16517 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16518
16519 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16520 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16521 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16522
16523 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16524 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16525 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16526
16527 mode = (TARGET_64BIT) ? DImode : SImode;
16528 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16529 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16530 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16531
16532 ftype = build_function_type_list (unsigned_intDI_type_node,
16533 NULL_TREE);
16534 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16535
16536 if (TARGET_64BIT)
16537 ftype = build_function_type_list (unsigned_intDI_type_node,
16538 NULL_TREE);
16539 else
16540 ftype = build_function_type_list (unsigned_intSI_type_node,
16541 NULL_TREE);
16542 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16543
16544 ftype = build_function_type_list (double_type_node, NULL_TREE);
16545 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16546
16547 ftype = build_function_type_list (double_type_node, NULL_TREE);
16548 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16549
16550 ftype = build_function_type_list (void_type_node,
16551 intSI_type_node,
16552 NULL_TREE);
16553 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16554
16555 ftype = build_function_type_list (void_type_node,
16556 intSI_type_node,
16557 NULL_TREE);
16558 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16559
16560 ftype = build_function_type_list (void_type_node,
16561 intDI_type_node,
16562 NULL_TREE);
16563 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16564
16565 ftype = build_function_type_list (void_type_node,
16566 intDI_type_node,
16567 NULL_TREE);
16568 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16569
16570 ftype = build_function_type_list (void_type_node,
16571 intSI_type_node, double_type_node,
16572 NULL_TREE);
16573 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16574
16575 ftype = build_function_type_list (void_type_node, NULL_TREE);
16576 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16577 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16578 MISC_BUILTIN_SPEC_BARRIER);
16579
16580 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16581 NULL_TREE);
16582 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16583 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16584
16585 /* AIX libm provides clog as __clog. */
16586 if (TARGET_XCOFF &&
16587 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16588 set_user_assembler_name (tdecl, "__clog");
16589
16590 #ifdef SUBTARGET_INIT_BUILTINS
16591 SUBTARGET_INIT_BUILTINS;
16592 #endif
16593 }
16594
16595 /* Returns the rs6000 builtin decl for CODE. */
16596
16597 static tree
16598 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16599 {
16600 HOST_WIDE_INT fnmask;
16601
16602 if (code >= RS6000_BUILTIN_COUNT)
16603 return error_mark_node;
16604
16605 fnmask = rs6000_builtin_info[code].mask;
16606 if ((fnmask & rs6000_builtin_mask) != fnmask)
16607 {
16608 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16609 return error_mark_node;
16610 }
16611
16612 return rs6000_builtin_decls[code];
16613 }
16614
16615 static void
16616 altivec_init_builtins (void)
16617 {
16618 const struct builtin_description *d;
16619 size_t i;
16620 tree ftype;
16621 tree decl;
16622 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16623
16624 tree pvoid_type_node = build_pointer_type (void_type_node);
16625
16626 tree pcvoid_type_node
16627 = build_pointer_type (build_qualified_type (void_type_node,
16628 TYPE_QUAL_CONST));
16629
16630 tree int_ftype_opaque
16631 = build_function_type_list (integer_type_node,
16632 opaque_V4SI_type_node, NULL_TREE);
16633 tree opaque_ftype_opaque
16634 = build_function_type_list (integer_type_node, NULL_TREE);
16635 tree opaque_ftype_opaque_int
16636 = build_function_type_list (opaque_V4SI_type_node,
16637 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16638 tree opaque_ftype_opaque_opaque_int
16639 = build_function_type_list (opaque_V4SI_type_node,
16640 opaque_V4SI_type_node, opaque_V4SI_type_node,
16641 integer_type_node, NULL_TREE);
16642 tree opaque_ftype_opaque_opaque_opaque
16643 = build_function_type_list (opaque_V4SI_type_node,
16644 opaque_V4SI_type_node, opaque_V4SI_type_node,
16645 opaque_V4SI_type_node, NULL_TREE);
16646 tree opaque_ftype_opaque_opaque
16647 = build_function_type_list (opaque_V4SI_type_node,
16648 opaque_V4SI_type_node, opaque_V4SI_type_node,
16649 NULL_TREE);
16650 tree int_ftype_int_opaque_opaque
16651 = build_function_type_list (integer_type_node,
16652 integer_type_node, opaque_V4SI_type_node,
16653 opaque_V4SI_type_node, NULL_TREE);
16654 tree int_ftype_int_v4si_v4si
16655 = build_function_type_list (integer_type_node,
16656 integer_type_node, V4SI_type_node,
16657 V4SI_type_node, NULL_TREE);
16658 tree int_ftype_int_v2di_v2di
16659 = build_function_type_list (integer_type_node,
16660 integer_type_node, V2DI_type_node,
16661 V2DI_type_node, NULL_TREE);
16662 tree void_ftype_v4si
16663 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16664 tree v8hi_ftype_void
16665 = build_function_type_list (V8HI_type_node, NULL_TREE);
16666 tree void_ftype_void
16667 = build_function_type_list (void_type_node, NULL_TREE);
16668 tree void_ftype_int
16669 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16670
16671 tree opaque_ftype_long_pcvoid
16672 = build_function_type_list (opaque_V4SI_type_node,
16673 long_integer_type_node, pcvoid_type_node,
16674 NULL_TREE);
16675 tree v16qi_ftype_long_pcvoid
16676 = build_function_type_list (V16QI_type_node,
16677 long_integer_type_node, pcvoid_type_node,
16678 NULL_TREE);
16679 tree v8hi_ftype_long_pcvoid
16680 = build_function_type_list (V8HI_type_node,
16681 long_integer_type_node, pcvoid_type_node,
16682 NULL_TREE);
16683 tree v4si_ftype_long_pcvoid
16684 = build_function_type_list (V4SI_type_node,
16685 long_integer_type_node, pcvoid_type_node,
16686 NULL_TREE);
16687 tree v4sf_ftype_long_pcvoid
16688 = build_function_type_list (V4SF_type_node,
16689 long_integer_type_node, pcvoid_type_node,
16690 NULL_TREE);
16691 tree v2df_ftype_long_pcvoid
16692 = build_function_type_list (V2DF_type_node,
16693 long_integer_type_node, pcvoid_type_node,
16694 NULL_TREE);
16695 tree v2di_ftype_long_pcvoid
16696 = build_function_type_list (V2DI_type_node,
16697 long_integer_type_node, pcvoid_type_node,
16698 NULL_TREE);
16699 tree v1ti_ftype_long_pcvoid
16700 = build_function_type_list (V1TI_type_node,
16701 long_integer_type_node, pcvoid_type_node,
16702 NULL_TREE);
16703
16704 tree void_ftype_opaque_long_pvoid
16705 = build_function_type_list (void_type_node,
16706 opaque_V4SI_type_node, long_integer_type_node,
16707 pvoid_type_node, NULL_TREE);
16708 tree void_ftype_v4si_long_pvoid
16709 = build_function_type_list (void_type_node,
16710 V4SI_type_node, long_integer_type_node,
16711 pvoid_type_node, NULL_TREE);
16712 tree void_ftype_v16qi_long_pvoid
16713 = build_function_type_list (void_type_node,
16714 V16QI_type_node, long_integer_type_node,
16715 pvoid_type_node, NULL_TREE);
16716
16717 tree void_ftype_v16qi_pvoid_long
16718 = build_function_type_list (void_type_node,
16719 V16QI_type_node, pvoid_type_node,
16720 long_integer_type_node, NULL_TREE);
16721
16722 tree void_ftype_v8hi_long_pvoid
16723 = build_function_type_list (void_type_node,
16724 V8HI_type_node, long_integer_type_node,
16725 pvoid_type_node, NULL_TREE);
16726 tree void_ftype_v4sf_long_pvoid
16727 = build_function_type_list (void_type_node,
16728 V4SF_type_node, long_integer_type_node,
16729 pvoid_type_node, NULL_TREE);
16730 tree void_ftype_v2df_long_pvoid
16731 = build_function_type_list (void_type_node,
16732 V2DF_type_node, long_integer_type_node,
16733 pvoid_type_node, NULL_TREE);
16734 tree void_ftype_v1ti_long_pvoid
16735 = build_function_type_list (void_type_node,
16736 V1TI_type_node, long_integer_type_node,
16737 pvoid_type_node, NULL_TREE);
16738 tree void_ftype_v2di_long_pvoid
16739 = build_function_type_list (void_type_node,
16740 V2DI_type_node, long_integer_type_node,
16741 pvoid_type_node, NULL_TREE);
16742 tree int_ftype_int_v8hi_v8hi
16743 = build_function_type_list (integer_type_node,
16744 integer_type_node, V8HI_type_node,
16745 V8HI_type_node, NULL_TREE);
16746 tree int_ftype_int_v16qi_v16qi
16747 = build_function_type_list (integer_type_node,
16748 integer_type_node, V16QI_type_node,
16749 V16QI_type_node, NULL_TREE);
16750 tree int_ftype_int_v4sf_v4sf
16751 = build_function_type_list (integer_type_node,
16752 integer_type_node, V4SF_type_node,
16753 V4SF_type_node, NULL_TREE);
16754 tree int_ftype_int_v2df_v2df
16755 = build_function_type_list (integer_type_node,
16756 integer_type_node, V2DF_type_node,
16757 V2DF_type_node, NULL_TREE);
16758 tree v2di_ftype_v2di
16759 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16760 tree v4si_ftype_v4si
16761 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16762 tree v8hi_ftype_v8hi
16763 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16764 tree v16qi_ftype_v16qi
16765 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16766 tree v4sf_ftype_v4sf
16767 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16768 tree v2df_ftype_v2df
16769 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16770 tree void_ftype_pcvoid_int_int
16771 = build_function_type_list (void_type_node,
16772 pcvoid_type_node, integer_type_node,
16773 integer_type_node, NULL_TREE);
16774
16775 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16776 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16777 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16778 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16779 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16780 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16781 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16782 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16783 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16784 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16785 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16786 ALTIVEC_BUILTIN_LVXL_V2DF);
16787 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16788 ALTIVEC_BUILTIN_LVXL_V2DI);
16789 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16790 ALTIVEC_BUILTIN_LVXL_V4SF);
16791 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16792 ALTIVEC_BUILTIN_LVXL_V4SI);
16793 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16794 ALTIVEC_BUILTIN_LVXL_V8HI);
16795 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16796 ALTIVEC_BUILTIN_LVXL_V16QI);
16797 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16798 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
16799 ALTIVEC_BUILTIN_LVX_V1TI);
16800 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16801 ALTIVEC_BUILTIN_LVX_V2DF);
16802 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16803 ALTIVEC_BUILTIN_LVX_V2DI);
16804 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16805 ALTIVEC_BUILTIN_LVX_V4SF);
16806 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16807 ALTIVEC_BUILTIN_LVX_V4SI);
16808 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16809 ALTIVEC_BUILTIN_LVX_V8HI);
16810 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16811 ALTIVEC_BUILTIN_LVX_V16QI);
16812 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16813 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16814 ALTIVEC_BUILTIN_STVX_V2DF);
16815 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16816 ALTIVEC_BUILTIN_STVX_V2DI);
16817 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16818 ALTIVEC_BUILTIN_STVX_V4SF);
16819 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16820 ALTIVEC_BUILTIN_STVX_V4SI);
16821 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16822 ALTIVEC_BUILTIN_STVX_V8HI);
16823 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16824 ALTIVEC_BUILTIN_STVX_V16QI);
16825 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16826 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16827 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16828 ALTIVEC_BUILTIN_STVXL_V2DF);
16829 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16830 ALTIVEC_BUILTIN_STVXL_V2DI);
16831 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16832 ALTIVEC_BUILTIN_STVXL_V4SF);
16833 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16834 ALTIVEC_BUILTIN_STVXL_V4SI);
16835 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16836 ALTIVEC_BUILTIN_STVXL_V8HI);
16837 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16838 ALTIVEC_BUILTIN_STVXL_V16QI);
16839 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16840 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16841 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16842 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16843 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16844 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16845 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16846 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16847 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16848 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16849 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16850 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16851 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16852 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16853 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16854 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16855
16856 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16857 VSX_BUILTIN_LXVD2X_V2DF);
16858 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16859 VSX_BUILTIN_LXVD2X_V2DI);
16860 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16861 VSX_BUILTIN_LXVW4X_V4SF);
16862 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16863 VSX_BUILTIN_LXVW4X_V4SI);
16864 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16865 VSX_BUILTIN_LXVW4X_V8HI);
16866 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16867 VSX_BUILTIN_LXVW4X_V16QI);
16868 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16869 VSX_BUILTIN_STXVD2X_V2DF);
16870 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16871 VSX_BUILTIN_STXVD2X_V2DI);
16872 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16873 VSX_BUILTIN_STXVW4X_V4SF);
16874 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16875 VSX_BUILTIN_STXVW4X_V4SI);
16876 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16877 VSX_BUILTIN_STXVW4X_V8HI);
16878 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16879 VSX_BUILTIN_STXVW4X_V16QI);
16880
16881 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
16882 VSX_BUILTIN_LD_ELEMREV_V2DF);
16883 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
16884 VSX_BUILTIN_LD_ELEMREV_V2DI);
16885 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
16886 VSX_BUILTIN_LD_ELEMREV_V4SF);
16887 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
16888 VSX_BUILTIN_LD_ELEMREV_V4SI);
16889 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
16890 VSX_BUILTIN_LD_ELEMREV_V8HI);
16891 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
16892 VSX_BUILTIN_LD_ELEMREV_V16QI);
16893 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
16894 VSX_BUILTIN_ST_ELEMREV_V2DF);
16895 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
16896 VSX_BUILTIN_ST_ELEMREV_V1TI);
16897 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
16898 VSX_BUILTIN_ST_ELEMREV_V2DI);
16899 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
16900 VSX_BUILTIN_ST_ELEMREV_V4SF);
16901 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
16902 VSX_BUILTIN_ST_ELEMREV_V4SI);
16903 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
16904 VSX_BUILTIN_ST_ELEMREV_V8HI);
16905 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
16906 VSX_BUILTIN_ST_ELEMREV_V16QI);
16907
16908 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
16909 VSX_BUILTIN_VEC_LD);
16910 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
16911 VSX_BUILTIN_VEC_ST);
16912 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
16913 VSX_BUILTIN_VEC_XL);
16914 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
16915 VSX_BUILTIN_VEC_XL_BE);
16916 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
16917 VSX_BUILTIN_VEC_XST);
16918 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
16919 VSX_BUILTIN_VEC_XST_BE);
16920
16921 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
16922 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
16923 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
16924
16925 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
16926 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
16927 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
16928 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
16929 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
16930 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
16931 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
16932 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
16933 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
16934 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
16935 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
16936 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
16937
16938 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
16939 ALTIVEC_BUILTIN_VEC_ADDE);
16940 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
16941 ALTIVEC_BUILTIN_VEC_ADDEC);
16942 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
16943 ALTIVEC_BUILTIN_VEC_CMPNE);
16944 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
16945 ALTIVEC_BUILTIN_VEC_MUL);
16946 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
16947 ALTIVEC_BUILTIN_VEC_SUBE);
16948 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
16949 ALTIVEC_BUILTIN_VEC_SUBEC);
16950
16951 /* Cell builtins. */
16952 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
16953 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
16954 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
16955 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
16956
16957 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
16958 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
16959 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
16960 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
16961
16962 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
16963 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
16964 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
16965 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
16966
16967 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
16968 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
16969 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
16970 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
16971
16972 if (TARGET_P9_VECTOR)
16973 {
16974 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
16975 P9V_BUILTIN_STXVL);
16976 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
16977 P9V_BUILTIN_XST_LEN_R);
16978 }
16979
16980 /* Add the DST variants. */
16981 d = bdesc_dst;
16982 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
16983 {
16984 HOST_WIDE_INT mask = d->mask;
16985
16986 /* It is expected that these dst built-in functions may have
16987 d->icode equal to CODE_FOR_nothing. */
16988 if ((mask & builtin_mask) != mask)
16989 {
16990 if (TARGET_DEBUG_BUILTIN)
16991 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
16992 d->name);
16993 continue;
16994 }
16995 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
16996 }
16997
16998 /* Initialize the predicates. */
16999 d = bdesc_altivec_preds;
17000 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
17001 {
17002 machine_mode mode1;
17003 tree type;
17004 HOST_WIDE_INT mask = d->mask;
17005
17006 if ((mask & builtin_mask) != mask)
17007 {
17008 if (TARGET_DEBUG_BUILTIN)
17009 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
17010 d->name);
17011 continue;
17012 }
17013
17014 if (rs6000_overloaded_builtin_p (d->code))
17015 mode1 = VOIDmode;
17016 else
17017 {
17018 /* Cannot define builtin if the instruction is disabled. */
17019 gcc_assert (d->icode != CODE_FOR_nothing);
17020 mode1 = insn_data[d->icode].operand[1].mode;
17021 }
17022
17023 switch (mode1)
17024 {
17025 case E_VOIDmode:
17026 type = int_ftype_int_opaque_opaque;
17027 break;
17028 case E_V2DImode:
17029 type = int_ftype_int_v2di_v2di;
17030 break;
17031 case E_V4SImode:
17032 type = int_ftype_int_v4si_v4si;
17033 break;
17034 case E_V8HImode:
17035 type = int_ftype_int_v8hi_v8hi;
17036 break;
17037 case E_V16QImode:
17038 type = int_ftype_int_v16qi_v16qi;
17039 break;
17040 case E_V4SFmode:
17041 type = int_ftype_int_v4sf_v4sf;
17042 break;
17043 case E_V2DFmode:
17044 type = int_ftype_int_v2df_v2df;
17045 break;
17046 default:
17047 gcc_unreachable ();
17048 }
17049
17050 def_builtin (d->name, type, d->code);
17051 }
17052
17053 /* Initialize the abs* operators. */
17054 d = bdesc_abs;
17055 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17056 {
17057 machine_mode mode0;
17058 tree type;
17059 HOST_WIDE_INT mask = d->mask;
17060
17061 if ((mask & builtin_mask) != mask)
17062 {
17063 if (TARGET_DEBUG_BUILTIN)
17064 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17065 d->name);
17066 continue;
17067 }
17068
17069 /* Cannot define builtin if the instruction is disabled. */
17070 gcc_assert (d->icode != CODE_FOR_nothing);
17071 mode0 = insn_data[d->icode].operand[0].mode;
17072
17073 switch (mode0)
17074 {
17075 case E_V2DImode:
17076 type = v2di_ftype_v2di;
17077 break;
17078 case E_V4SImode:
17079 type = v4si_ftype_v4si;
17080 break;
17081 case E_V8HImode:
17082 type = v8hi_ftype_v8hi;
17083 break;
17084 case E_V16QImode:
17085 type = v16qi_ftype_v16qi;
17086 break;
17087 case E_V4SFmode:
17088 type = v4sf_ftype_v4sf;
17089 break;
17090 case E_V2DFmode:
17091 type = v2df_ftype_v2df;
17092 break;
17093 default:
17094 gcc_unreachable ();
17095 }
17096
17097 def_builtin (d->name, type, d->code);
17098 }
17099
17100 /* Initialize target builtin that implements
17101 targetm.vectorize.builtin_mask_for_load. */
17102
17103 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17104 v16qi_ftype_long_pcvoid,
17105 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17106 BUILT_IN_MD, NULL, NULL_TREE);
17107 TREE_READONLY (decl) = 1;
17108 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17109 altivec_builtin_mask_for_load = decl;
17110
17111 /* Access to the vec_init patterns. */
17112 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17113 integer_type_node, integer_type_node,
17114 integer_type_node, NULL_TREE);
17115 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17116
17117 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17118 short_integer_type_node,
17119 short_integer_type_node,
17120 short_integer_type_node,
17121 short_integer_type_node,
17122 short_integer_type_node,
17123 short_integer_type_node,
17124 short_integer_type_node, NULL_TREE);
17125 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17126
17127 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17128 char_type_node, char_type_node,
17129 char_type_node, char_type_node,
17130 char_type_node, char_type_node,
17131 char_type_node, char_type_node,
17132 char_type_node, char_type_node,
17133 char_type_node, char_type_node,
17134 char_type_node, char_type_node,
17135 char_type_node, NULL_TREE);
17136 def_builtin ("__builtin_vec_init_v16qi", ftype,
17137 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17138
17139 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17140 float_type_node, float_type_node,
17141 float_type_node, NULL_TREE);
17142 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17143
17144 /* VSX builtins. */
17145 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17146 double_type_node, NULL_TREE);
17147 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17148
17149 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17150 intDI_type_node, NULL_TREE);
17151 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17152
17153 /* Access to the vec_set patterns. */
17154 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17155 intSI_type_node,
17156 integer_type_node, NULL_TREE);
17157 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17158
17159 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17160 intHI_type_node,
17161 integer_type_node, NULL_TREE);
17162 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17163
17164 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17165 intQI_type_node,
17166 integer_type_node, NULL_TREE);
17167 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17168
17169 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17170 float_type_node,
17171 integer_type_node, NULL_TREE);
17172 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17173
17174 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17175 double_type_node,
17176 integer_type_node, NULL_TREE);
17177 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17178
17179 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17180 intDI_type_node,
17181 integer_type_node, NULL_TREE);
17182 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17183
17184 /* Access to the vec_extract patterns. */
17185 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17186 integer_type_node, NULL_TREE);
17187 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17188
17189 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17190 integer_type_node, NULL_TREE);
17191 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17192
17193 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17194 integer_type_node, NULL_TREE);
17195 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17196
17197 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17198 integer_type_node, NULL_TREE);
17199 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17200
17201 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17202 integer_type_node, NULL_TREE);
17203 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17204
17205 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17206 integer_type_node, NULL_TREE);
17207 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17208
17209
17210 if (V1TI_type_node)
17211 {
17212 tree v1ti_ftype_long_pcvoid
17213 = build_function_type_list (V1TI_type_node,
17214 long_integer_type_node, pcvoid_type_node,
17215 NULL_TREE);
17216 tree void_ftype_v1ti_long_pvoid
17217 = build_function_type_list (void_type_node,
17218 V1TI_type_node, long_integer_type_node,
17219 pvoid_type_node, NULL_TREE);
17220 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17221 VSX_BUILTIN_LD_ELEMREV_V1TI);
17222 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17223 VSX_BUILTIN_LXVD2X_V1TI);
17224 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17225 VSX_BUILTIN_STXVD2X_V1TI);
17226 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17227 NULL_TREE, NULL_TREE);
17228 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17229 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17230 intTI_type_node,
17231 integer_type_node, NULL_TREE);
17232 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17233 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17234 integer_type_node, NULL_TREE);
17235 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17236 }
17237
17238 }
17239
17240 static void
17241 htm_init_builtins (void)
17242 {
17243 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17244 const struct builtin_description *d;
17245 size_t i;
17246
17247 d = bdesc_htm;
17248 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17249 {
17250 tree op[MAX_HTM_OPERANDS], type;
17251 HOST_WIDE_INT mask = d->mask;
17252 unsigned attr = rs6000_builtin_info[d->code].attr;
17253 bool void_func = (attr & RS6000_BTC_VOID);
17254 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17255 int nopnds = 0;
17256 tree gpr_type_node;
17257 tree rettype;
17258 tree argtype;
17259
17260 /* It is expected that these htm built-in functions may have
17261 d->icode equal to CODE_FOR_nothing. */
17262
17263 if (TARGET_32BIT && TARGET_POWERPC64)
17264 gpr_type_node = long_long_unsigned_type_node;
17265 else
17266 gpr_type_node = long_unsigned_type_node;
17267
17268 if (attr & RS6000_BTC_SPR)
17269 {
17270 rettype = gpr_type_node;
17271 argtype = gpr_type_node;
17272 }
17273 else if (d->code == HTM_BUILTIN_TABORTDC
17274 || d->code == HTM_BUILTIN_TABORTDCI)
17275 {
17276 rettype = unsigned_type_node;
17277 argtype = gpr_type_node;
17278 }
17279 else
17280 {
17281 rettype = unsigned_type_node;
17282 argtype = unsigned_type_node;
17283 }
17284
17285 if ((mask & builtin_mask) != mask)
17286 {
17287 if (TARGET_DEBUG_BUILTIN)
17288 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17289 continue;
17290 }
17291
17292 if (d->name == 0)
17293 {
17294 if (TARGET_DEBUG_BUILTIN)
17295 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17296 (long unsigned) i);
17297 continue;
17298 }
17299
17300 op[nopnds++] = (void_func) ? void_type_node : rettype;
17301
17302 if (attr_args == RS6000_BTC_UNARY)
17303 op[nopnds++] = argtype;
17304 else if (attr_args == RS6000_BTC_BINARY)
17305 {
17306 op[nopnds++] = argtype;
17307 op[nopnds++] = argtype;
17308 }
17309 else if (attr_args == RS6000_BTC_TERNARY)
17310 {
17311 op[nopnds++] = argtype;
17312 op[nopnds++] = argtype;
17313 op[nopnds++] = argtype;
17314 }
17315
17316 switch (nopnds)
17317 {
17318 case 1:
17319 type = build_function_type_list (op[0], NULL_TREE);
17320 break;
17321 case 2:
17322 type = build_function_type_list (op[0], op[1], NULL_TREE);
17323 break;
17324 case 3:
17325 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17326 break;
17327 case 4:
17328 type = build_function_type_list (op[0], op[1], op[2], op[3],
17329 NULL_TREE);
17330 break;
17331 default:
17332 gcc_unreachable ();
17333 }
17334
17335 def_builtin (d->name, type, d->code);
17336 }
17337 }
17338
17339 /* Hash function for builtin functions with up to 3 arguments and a return
17340 type. */
17341 hashval_t
17342 builtin_hasher::hash (builtin_hash_struct *bh)
17343 {
17344 unsigned ret = 0;
17345 int i;
17346
17347 for (i = 0; i < 4; i++)
17348 {
17349 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17350 ret = (ret * 2) + bh->uns_p[i];
17351 }
17352
17353 return ret;
17354 }
17355
17356 /* Compare builtin hash entries H1 and H2 for equivalence. */
17357 bool
17358 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17359 {
17360 return ((p1->mode[0] == p2->mode[0])
17361 && (p1->mode[1] == p2->mode[1])
17362 && (p1->mode[2] == p2->mode[2])
17363 && (p1->mode[3] == p2->mode[3])
17364 && (p1->uns_p[0] == p2->uns_p[0])
17365 && (p1->uns_p[1] == p2->uns_p[1])
17366 && (p1->uns_p[2] == p2->uns_p[2])
17367 && (p1->uns_p[3] == p2->uns_p[3]));
17368 }
17369
17370 /* Map types for builtin functions with an explicit return type and up to 3
17371 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17372 of the argument. */
17373 static tree
17374 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17375 machine_mode mode_arg1, machine_mode mode_arg2,
17376 enum rs6000_builtins builtin, const char *name)
17377 {
17378 struct builtin_hash_struct h;
17379 struct builtin_hash_struct *h2;
17380 int num_args = 3;
17381 int i;
17382 tree ret_type = NULL_TREE;
17383 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17384
17385 /* Create builtin_hash_table. */
17386 if (builtin_hash_table == NULL)
17387 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17388
17389 h.type = NULL_TREE;
17390 h.mode[0] = mode_ret;
17391 h.mode[1] = mode_arg0;
17392 h.mode[2] = mode_arg1;
17393 h.mode[3] = mode_arg2;
17394 h.uns_p[0] = 0;
17395 h.uns_p[1] = 0;
17396 h.uns_p[2] = 0;
17397 h.uns_p[3] = 0;
17398
17399 /* If the builtin is a type that produces unsigned results or takes unsigned
17400 arguments, and it is returned as a decl for the vectorizer (such as
17401 widening multiplies, permute), make sure the arguments and return value
17402 are type correct. */
17403 switch (builtin)
17404 {
17405 /* unsigned 1 argument functions. */
17406 case CRYPTO_BUILTIN_VSBOX:
17407 case CRYPTO_BUILTIN_VSBOX_BE:
17408 case P8V_BUILTIN_VGBBD:
17409 case MISC_BUILTIN_CDTBCD:
17410 case MISC_BUILTIN_CBCDTD:
17411 h.uns_p[0] = 1;
17412 h.uns_p[1] = 1;
17413 break;
17414
17415 /* unsigned 2 argument functions. */
17416 case ALTIVEC_BUILTIN_VMULEUB:
17417 case ALTIVEC_BUILTIN_VMULEUH:
17418 case P8V_BUILTIN_VMULEUW:
17419 case ALTIVEC_BUILTIN_VMULOUB:
17420 case ALTIVEC_BUILTIN_VMULOUH:
17421 case P8V_BUILTIN_VMULOUW:
17422 case CRYPTO_BUILTIN_VCIPHER:
17423 case CRYPTO_BUILTIN_VCIPHER_BE:
17424 case CRYPTO_BUILTIN_VCIPHERLAST:
17425 case CRYPTO_BUILTIN_VCIPHERLAST_BE:
17426 case CRYPTO_BUILTIN_VNCIPHER:
17427 case CRYPTO_BUILTIN_VNCIPHER_BE:
17428 case CRYPTO_BUILTIN_VNCIPHERLAST:
17429 case CRYPTO_BUILTIN_VNCIPHERLAST_BE:
17430 case CRYPTO_BUILTIN_VPMSUMB:
17431 case CRYPTO_BUILTIN_VPMSUMH:
17432 case CRYPTO_BUILTIN_VPMSUMW:
17433 case CRYPTO_BUILTIN_VPMSUMD:
17434 case CRYPTO_BUILTIN_VPMSUM:
17435 case MISC_BUILTIN_ADDG6S:
17436 case MISC_BUILTIN_DIVWEU:
17437 case MISC_BUILTIN_DIVDEU:
17438 case VSX_BUILTIN_UDIV_V2DI:
17439 case ALTIVEC_BUILTIN_VMAXUB:
17440 case ALTIVEC_BUILTIN_VMINUB:
17441 case ALTIVEC_BUILTIN_VMAXUH:
17442 case ALTIVEC_BUILTIN_VMINUH:
17443 case ALTIVEC_BUILTIN_VMAXUW:
17444 case ALTIVEC_BUILTIN_VMINUW:
17445 case P8V_BUILTIN_VMAXUD:
17446 case P8V_BUILTIN_VMINUD:
17447 h.uns_p[0] = 1;
17448 h.uns_p[1] = 1;
17449 h.uns_p[2] = 1;
17450 break;
17451
17452 /* unsigned 3 argument functions. */
17453 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17454 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17455 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17456 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17457 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17458 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17459 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17460 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17461 case VSX_BUILTIN_VPERM_16QI_UNS:
17462 case VSX_BUILTIN_VPERM_8HI_UNS:
17463 case VSX_BUILTIN_VPERM_4SI_UNS:
17464 case VSX_BUILTIN_VPERM_2DI_UNS:
17465 case VSX_BUILTIN_XXSEL_16QI_UNS:
17466 case VSX_BUILTIN_XXSEL_8HI_UNS:
17467 case VSX_BUILTIN_XXSEL_4SI_UNS:
17468 case VSX_BUILTIN_XXSEL_2DI_UNS:
17469 case CRYPTO_BUILTIN_VPERMXOR:
17470 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17471 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17472 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17473 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17474 case CRYPTO_BUILTIN_VSHASIGMAW:
17475 case CRYPTO_BUILTIN_VSHASIGMAD:
17476 case CRYPTO_BUILTIN_VSHASIGMA:
17477 h.uns_p[0] = 1;
17478 h.uns_p[1] = 1;
17479 h.uns_p[2] = 1;
17480 h.uns_p[3] = 1;
17481 break;
17482
17483 /* signed permute functions with unsigned char mask. */
17484 case ALTIVEC_BUILTIN_VPERM_16QI:
17485 case ALTIVEC_BUILTIN_VPERM_8HI:
17486 case ALTIVEC_BUILTIN_VPERM_4SI:
17487 case ALTIVEC_BUILTIN_VPERM_4SF:
17488 case ALTIVEC_BUILTIN_VPERM_2DI:
17489 case ALTIVEC_BUILTIN_VPERM_2DF:
17490 case VSX_BUILTIN_VPERM_16QI:
17491 case VSX_BUILTIN_VPERM_8HI:
17492 case VSX_BUILTIN_VPERM_4SI:
17493 case VSX_BUILTIN_VPERM_4SF:
17494 case VSX_BUILTIN_VPERM_2DI:
17495 case VSX_BUILTIN_VPERM_2DF:
17496 h.uns_p[3] = 1;
17497 break;
17498
17499 /* unsigned args, signed return. */
17500 case VSX_BUILTIN_XVCVUXDSP:
17501 case VSX_BUILTIN_XVCVUXDDP_UNS:
17502 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17503 h.uns_p[1] = 1;
17504 break;
17505
17506 /* signed args, unsigned return. */
17507 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17508 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17509 case MISC_BUILTIN_UNPACK_TD:
17510 case MISC_BUILTIN_UNPACK_V1TI:
17511 h.uns_p[0] = 1;
17512 break;
17513
17514 /* unsigned arguments, bool return (compares). */
17515 case ALTIVEC_BUILTIN_VCMPEQUB:
17516 case ALTIVEC_BUILTIN_VCMPEQUH:
17517 case ALTIVEC_BUILTIN_VCMPEQUW:
17518 case P8V_BUILTIN_VCMPEQUD:
17519 case VSX_BUILTIN_CMPGE_U16QI:
17520 case VSX_BUILTIN_CMPGE_U8HI:
17521 case VSX_BUILTIN_CMPGE_U4SI:
17522 case VSX_BUILTIN_CMPGE_U2DI:
17523 case ALTIVEC_BUILTIN_VCMPGTUB:
17524 case ALTIVEC_BUILTIN_VCMPGTUH:
17525 case ALTIVEC_BUILTIN_VCMPGTUW:
17526 case P8V_BUILTIN_VCMPGTUD:
17527 h.uns_p[1] = 1;
17528 h.uns_p[2] = 1;
17529 break;
17530
17531 /* unsigned arguments for 128-bit pack instructions. */
17532 case MISC_BUILTIN_PACK_TD:
17533 case MISC_BUILTIN_PACK_V1TI:
17534 h.uns_p[1] = 1;
17535 h.uns_p[2] = 1;
17536 break;
17537
17538 /* unsigned second arguments (vector shift right). */
17539 case ALTIVEC_BUILTIN_VSRB:
17540 case ALTIVEC_BUILTIN_VSRH:
17541 case ALTIVEC_BUILTIN_VSRW:
17542 case P8V_BUILTIN_VSRD:
17543 h.uns_p[2] = 1;
17544 break;
17545
17546 default:
17547 break;
17548 }
17549
17550 /* Figure out how many args are present. */
17551 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17552 num_args--;
17553
17554 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17555 if (!ret_type && h.uns_p[0])
17556 ret_type = builtin_mode_to_type[h.mode[0]][0];
17557
17558 if (!ret_type)
17559 fatal_error (input_location,
17560 "internal error: builtin function %qs had an unexpected "
17561 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17562
17563 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17564 arg_type[i] = NULL_TREE;
17565
17566 for (i = 0; i < num_args; i++)
17567 {
17568 int m = (int) h.mode[i+1];
17569 int uns_p = h.uns_p[i+1];
17570
17571 arg_type[i] = builtin_mode_to_type[m][uns_p];
17572 if (!arg_type[i] && uns_p)
17573 arg_type[i] = builtin_mode_to_type[m][0];
17574
17575 if (!arg_type[i])
17576 fatal_error (input_location,
17577 "internal error: builtin function %qs, argument %d "
17578 "had unexpected argument type %qs", name, i,
17579 GET_MODE_NAME (m));
17580 }
17581
17582 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17583 if (*found == NULL)
17584 {
17585 h2 = ggc_alloc<builtin_hash_struct> ();
17586 *h2 = h;
17587 *found = h2;
17588
17589 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17590 arg_type[2], NULL_TREE);
17591 }
17592
17593 return (*found)->type;
17594 }
17595
17596 static void
17597 rs6000_common_init_builtins (void)
17598 {
17599 const struct builtin_description *d;
17600 size_t i;
17601
17602 tree opaque_ftype_opaque = NULL_TREE;
17603 tree opaque_ftype_opaque_opaque = NULL_TREE;
17604 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17605 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17606
17607 /* Create Altivec and VSX builtins on machines with at least the
17608 general purpose extensions (970 and newer) to allow the use of
17609 the target attribute. */
17610
17611 if (TARGET_EXTRA_BUILTINS)
17612 builtin_mask |= RS6000_BTM_COMMON;
17613
17614 /* Add the ternary operators. */
17615 d = bdesc_3arg;
17616 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17617 {
17618 tree type;
17619 HOST_WIDE_INT mask = d->mask;
17620
17621 if ((mask & builtin_mask) != mask)
17622 {
17623 if (TARGET_DEBUG_BUILTIN)
17624 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17625 continue;
17626 }
17627
17628 if (rs6000_overloaded_builtin_p (d->code))
17629 {
17630 if (! (type = opaque_ftype_opaque_opaque_opaque))
17631 type = opaque_ftype_opaque_opaque_opaque
17632 = build_function_type_list (opaque_V4SI_type_node,
17633 opaque_V4SI_type_node,
17634 opaque_V4SI_type_node,
17635 opaque_V4SI_type_node,
17636 NULL_TREE);
17637 }
17638 else
17639 {
17640 enum insn_code icode = d->icode;
17641 if (d->name == 0)
17642 {
17643 if (TARGET_DEBUG_BUILTIN)
17644 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17645 (long unsigned)i);
17646
17647 continue;
17648 }
17649
17650 if (icode == CODE_FOR_nothing)
17651 {
17652 if (TARGET_DEBUG_BUILTIN)
17653 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17654 d->name);
17655
17656 continue;
17657 }
17658
17659 type = builtin_function_type (insn_data[icode].operand[0].mode,
17660 insn_data[icode].operand[1].mode,
17661 insn_data[icode].operand[2].mode,
17662 insn_data[icode].operand[3].mode,
17663 d->code, d->name);
17664 }
17665
17666 def_builtin (d->name, type, d->code);
17667 }
17668
17669 /* Add the binary operators. */
17670 d = bdesc_2arg;
17671 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17672 {
17673 machine_mode mode0, mode1, mode2;
17674 tree type;
17675 HOST_WIDE_INT mask = d->mask;
17676
17677 if ((mask & builtin_mask) != mask)
17678 {
17679 if (TARGET_DEBUG_BUILTIN)
17680 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17681 continue;
17682 }
17683
17684 if (rs6000_overloaded_builtin_p (d->code))
17685 {
17686 if (! (type = opaque_ftype_opaque_opaque))
17687 type = opaque_ftype_opaque_opaque
17688 = build_function_type_list (opaque_V4SI_type_node,
17689 opaque_V4SI_type_node,
17690 opaque_V4SI_type_node,
17691 NULL_TREE);
17692 }
17693 else
17694 {
17695 enum insn_code icode = d->icode;
17696 if (d->name == 0)
17697 {
17698 if (TARGET_DEBUG_BUILTIN)
17699 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17700 (long unsigned)i);
17701
17702 continue;
17703 }
17704
17705 if (icode == CODE_FOR_nothing)
17706 {
17707 if (TARGET_DEBUG_BUILTIN)
17708 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17709 d->name);
17710
17711 continue;
17712 }
17713
17714 mode0 = insn_data[icode].operand[0].mode;
17715 mode1 = insn_data[icode].operand[1].mode;
17716 mode2 = insn_data[icode].operand[2].mode;
17717
17718 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17719 d->code, d->name);
17720 }
17721
17722 def_builtin (d->name, type, d->code);
17723 }
17724
17725 /* Add the simple unary operators. */
17726 d = bdesc_1arg;
17727 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17728 {
17729 machine_mode mode0, mode1;
17730 tree type;
17731 HOST_WIDE_INT mask = d->mask;
17732
17733 if ((mask & builtin_mask) != mask)
17734 {
17735 if (TARGET_DEBUG_BUILTIN)
17736 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17737 continue;
17738 }
17739
17740 if (rs6000_overloaded_builtin_p (d->code))
17741 {
17742 if (! (type = opaque_ftype_opaque))
17743 type = opaque_ftype_opaque
17744 = build_function_type_list (opaque_V4SI_type_node,
17745 opaque_V4SI_type_node,
17746 NULL_TREE);
17747 }
17748 else
17749 {
17750 enum insn_code icode = d->icode;
17751 if (d->name == 0)
17752 {
17753 if (TARGET_DEBUG_BUILTIN)
17754 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17755 (long unsigned)i);
17756
17757 continue;
17758 }
17759
17760 if (icode == CODE_FOR_nothing)
17761 {
17762 if (TARGET_DEBUG_BUILTIN)
17763 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17764 d->name);
17765
17766 continue;
17767 }
17768
17769 mode0 = insn_data[icode].operand[0].mode;
17770 mode1 = insn_data[icode].operand[1].mode;
17771
17772 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17773 d->code, d->name);
17774 }
17775
17776 def_builtin (d->name, type, d->code);
17777 }
17778
17779 /* Add the simple no-argument operators. */
17780 d = bdesc_0arg;
17781 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17782 {
17783 machine_mode mode0;
17784 tree type;
17785 HOST_WIDE_INT mask = d->mask;
17786
17787 if ((mask & builtin_mask) != mask)
17788 {
17789 if (TARGET_DEBUG_BUILTIN)
17790 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17791 continue;
17792 }
17793 if (rs6000_overloaded_builtin_p (d->code))
17794 {
17795 if (!opaque_ftype_opaque)
17796 opaque_ftype_opaque
17797 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17798 type = opaque_ftype_opaque;
17799 }
17800 else
17801 {
17802 enum insn_code icode = d->icode;
17803 if (d->name == 0)
17804 {
17805 if (TARGET_DEBUG_BUILTIN)
17806 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17807 (long unsigned) i);
17808 continue;
17809 }
17810 if (icode == CODE_FOR_nothing)
17811 {
17812 if (TARGET_DEBUG_BUILTIN)
17813 fprintf (stderr,
17814 "rs6000_builtin, skip no-argument %s (no code)\n",
17815 d->name);
17816 continue;
17817 }
17818 mode0 = insn_data[icode].operand[0].mode;
17819 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17820 d->code, d->name);
17821 }
17822 def_builtin (d->name, type, d->code);
17823 }
17824 }
17825
17826 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17827 static void
17828 init_float128_ibm (machine_mode mode)
17829 {
17830 if (!TARGET_XL_COMPAT)
17831 {
17832 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17833 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17834 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17835 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17836
17837 if (!TARGET_HARD_FLOAT)
17838 {
17839 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17840 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17841 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17842 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17843 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17844 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17845 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17846 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17847
17848 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17849 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17850 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17851 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17852 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17853 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17854 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17855 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17856 }
17857 }
17858 else
17859 {
17860 set_optab_libfunc (add_optab, mode, "_xlqadd");
17861 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17862 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17863 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17864 }
17865
17866 /* Add various conversions for IFmode to use the traditional TFmode
17867 names. */
17868 if (mode == IFmode)
17869 {
17870 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
17871 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
17872 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
17873 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
17874 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
17875 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
17876
17877 if (TARGET_POWERPC64)
17878 {
17879 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17880 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17881 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17882 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17883 }
17884 }
17885 }
17886
17887 /* Create a decl for either complex long double multiply or complex long double
17888 divide when long double is IEEE 128-bit floating point. We can't use
17889 __multc3 and __divtc3 because the original long double using IBM extended
17890 double used those names. The complex multiply/divide functions are encoded
17891 as builtin functions with a complex result and 4 scalar inputs. */
17892
17893 static void
17894 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
17895 {
17896 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
17897 name, NULL_TREE);
17898
17899 set_builtin_decl (fncode, fndecl, true);
17900
17901 if (TARGET_DEBUG_BUILTIN)
17902 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
17903
17904 return;
17905 }
17906
17907 /* Set up IEEE 128-bit floating point routines. Use different names if the
17908 arguments can be passed in a vector register. The historical PowerPC
17909 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
17910 continue to use that if we aren't using vector registers to pass IEEE
17911 128-bit floating point. */
17912
17913 static void
17914 init_float128_ieee (machine_mode mode)
17915 {
17916 if (FLOAT128_VECTOR_P (mode))
17917 {
17918 static bool complex_muldiv_init_p = false;
17919
17920 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
17921 we have clone or target attributes, this will be called a second
17922 time. We want to create the built-in function only once. */
17923 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
17924 {
17925 complex_muldiv_init_p = true;
17926 built_in_function fncode_mul =
17927 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
17928 - MIN_MODE_COMPLEX_FLOAT);
17929 built_in_function fncode_div =
17930 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
17931 - MIN_MODE_COMPLEX_FLOAT);
17932
17933 tree fntype = build_function_type_list (complex_long_double_type_node,
17934 long_double_type_node,
17935 long_double_type_node,
17936 long_double_type_node,
17937 long_double_type_node,
17938 NULL_TREE);
17939
17940 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
17941 create_complex_muldiv ("__divkc3", fncode_div, fntype);
17942 }
17943
17944 set_optab_libfunc (add_optab, mode, "__addkf3");
17945 set_optab_libfunc (sub_optab, mode, "__subkf3");
17946 set_optab_libfunc (neg_optab, mode, "__negkf2");
17947 set_optab_libfunc (smul_optab, mode, "__mulkf3");
17948 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
17949 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
17950 set_optab_libfunc (abs_optab, mode, "__abskf2");
17951 set_optab_libfunc (powi_optab, mode, "__powikf2");
17952
17953 set_optab_libfunc (eq_optab, mode, "__eqkf2");
17954 set_optab_libfunc (ne_optab, mode, "__nekf2");
17955 set_optab_libfunc (gt_optab, mode, "__gtkf2");
17956 set_optab_libfunc (ge_optab, mode, "__gekf2");
17957 set_optab_libfunc (lt_optab, mode, "__ltkf2");
17958 set_optab_libfunc (le_optab, mode, "__lekf2");
17959 set_optab_libfunc (unord_optab, mode, "__unordkf2");
17960
17961 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
17962 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
17963 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
17964 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
17965
17966 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
17967 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17968 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
17969
17970 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
17971 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17972 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
17973
17974 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
17975 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
17976 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
17977 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
17978 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
17979 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
17980
17981 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
17982 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
17983 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
17984 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
17985
17986 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
17987 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
17988 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
17989 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
17990
17991 if (TARGET_POWERPC64)
17992 {
17993 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
17994 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
17995 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
17996 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
17997 }
17998 }
17999
18000 else
18001 {
18002 set_optab_libfunc (add_optab, mode, "_q_add");
18003 set_optab_libfunc (sub_optab, mode, "_q_sub");
18004 set_optab_libfunc (neg_optab, mode, "_q_neg");
18005 set_optab_libfunc (smul_optab, mode, "_q_mul");
18006 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18007 if (TARGET_PPC_GPOPT)
18008 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18009
18010 set_optab_libfunc (eq_optab, mode, "_q_feq");
18011 set_optab_libfunc (ne_optab, mode, "_q_fne");
18012 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18013 set_optab_libfunc (ge_optab, mode, "_q_fge");
18014 set_optab_libfunc (lt_optab, mode, "_q_flt");
18015 set_optab_libfunc (le_optab, mode, "_q_fle");
18016
18017 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18018 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18019 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18020 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18021 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18022 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18023 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18024 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18025 }
18026 }
18027
18028 static void
18029 rs6000_init_libfuncs (void)
18030 {
18031 /* __float128 support. */
18032 if (TARGET_FLOAT128_TYPE)
18033 {
18034 init_float128_ibm (IFmode);
18035 init_float128_ieee (KFmode);
18036 }
18037
18038 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18039 if (TARGET_LONG_DOUBLE_128)
18040 {
18041 if (!TARGET_IEEEQUAD)
18042 init_float128_ibm (TFmode);
18043
18044 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18045 else
18046 init_float128_ieee (TFmode);
18047 }
18048 }
18049
18050 /* Emit a potentially record-form instruction, setting DST from SRC.
18051 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18052 signed comparison of DST with zero. If DOT is 1, the generated RTL
18053 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18054 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18055 a separate COMPARE. */
18056
18057 void
18058 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18059 {
18060 if (dot == 0)
18061 {
18062 emit_move_insn (dst, src);
18063 return;
18064 }
18065
18066 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18067 {
18068 emit_move_insn (dst, src);
18069 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18070 return;
18071 }
18072
18073 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18074 if (dot == 1)
18075 {
18076 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18077 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18078 }
18079 else
18080 {
18081 rtx set = gen_rtx_SET (dst, src);
18082 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18083 }
18084 }
18085
18086 \f
18087 /* A validation routine: say whether CODE, a condition code, and MODE
18088 match. The other alternatives either don't make sense or should
18089 never be generated. */
18090
18091 void
18092 validate_condition_mode (enum rtx_code code, machine_mode mode)
18093 {
18094 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18095 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18096 && GET_MODE_CLASS (mode) == MODE_CC);
18097
18098 /* These don't make sense. */
18099 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18100 || mode != CCUNSmode);
18101
18102 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18103 || mode == CCUNSmode);
18104
18105 gcc_assert (mode == CCFPmode
18106 || (code != ORDERED && code != UNORDERED
18107 && code != UNEQ && code != LTGT
18108 && code != UNGT && code != UNLT
18109 && code != UNGE && code != UNLE));
18110
18111 /* These should never be generated except for
18112 flag_finite_math_only. */
18113 gcc_assert (mode != CCFPmode
18114 || flag_finite_math_only
18115 || (code != LE && code != GE
18116 && code != UNEQ && code != LTGT
18117 && code != UNGT && code != UNLT));
18118
18119 /* These are invalid; the information is not there. */
18120 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18121 }
18122
18123 \f
18124 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18125 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18126 not zero, store there the bit offset (counted from the right) where
18127 the single stretch of 1 bits begins; and similarly for B, the bit
18128 offset where it ends. */
18129
18130 bool
18131 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18132 {
18133 unsigned HOST_WIDE_INT val = INTVAL (mask);
18134 unsigned HOST_WIDE_INT bit;
18135 int nb, ne;
18136 int n = GET_MODE_PRECISION (mode);
18137
18138 if (mode != DImode && mode != SImode)
18139 return false;
18140
18141 if (INTVAL (mask) >= 0)
18142 {
18143 bit = val & -val;
18144 ne = exact_log2 (bit);
18145 nb = exact_log2 (val + bit);
18146 }
18147 else if (val + 1 == 0)
18148 {
18149 nb = n;
18150 ne = 0;
18151 }
18152 else if (val & 1)
18153 {
18154 val = ~val;
18155 bit = val & -val;
18156 nb = exact_log2 (bit);
18157 ne = exact_log2 (val + bit);
18158 }
18159 else
18160 {
18161 bit = val & -val;
18162 ne = exact_log2 (bit);
18163 if (val + bit == 0)
18164 nb = n;
18165 else
18166 nb = 0;
18167 }
18168
18169 nb--;
18170
18171 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18172 return false;
18173
18174 if (b)
18175 *b = nb;
18176 if (e)
18177 *e = ne;
18178
18179 return true;
18180 }
18181
18182 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18183 or rldicr instruction, to implement an AND with it in mode MODE. */
18184
18185 bool
18186 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18187 {
18188 int nb, ne;
18189
18190 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18191 return false;
18192
18193 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18194 does not wrap. */
18195 if (mode == DImode)
18196 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18197
18198 /* For SImode, rlwinm can do everything. */
18199 if (mode == SImode)
18200 return (nb < 32 && ne < 32);
18201
18202 return false;
18203 }
18204
18205 /* Return the instruction template for an AND with mask in mode MODE, with
18206 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18207
18208 const char *
18209 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18210 {
18211 int nb, ne;
18212
18213 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18214 gcc_unreachable ();
18215
18216 if (mode == DImode && ne == 0)
18217 {
18218 operands[3] = GEN_INT (63 - nb);
18219 if (dot)
18220 return "rldicl. %0,%1,0,%3";
18221 return "rldicl %0,%1,0,%3";
18222 }
18223
18224 if (mode == DImode && nb == 63)
18225 {
18226 operands[3] = GEN_INT (63 - ne);
18227 if (dot)
18228 return "rldicr. %0,%1,0,%3";
18229 return "rldicr %0,%1,0,%3";
18230 }
18231
18232 if (nb < 32 && ne < 32)
18233 {
18234 operands[3] = GEN_INT (31 - nb);
18235 operands[4] = GEN_INT (31 - ne);
18236 if (dot)
18237 return "rlwinm. %0,%1,0,%3,%4";
18238 return "rlwinm %0,%1,0,%3,%4";
18239 }
18240
18241 gcc_unreachable ();
18242 }
18243
18244 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18245 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18246 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18247
18248 bool
18249 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18250 {
18251 int nb, ne;
18252
18253 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18254 return false;
18255
18256 int n = GET_MODE_PRECISION (mode);
18257 int sh = -1;
18258
18259 if (CONST_INT_P (XEXP (shift, 1)))
18260 {
18261 sh = INTVAL (XEXP (shift, 1));
18262 if (sh < 0 || sh >= n)
18263 return false;
18264 }
18265
18266 rtx_code code = GET_CODE (shift);
18267
18268 /* Convert any shift by 0 to a rotate, to simplify below code. */
18269 if (sh == 0)
18270 code = ROTATE;
18271
18272 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18273 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18274 code = ASHIFT;
18275 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18276 {
18277 code = LSHIFTRT;
18278 sh = n - sh;
18279 }
18280
18281 /* DImode rotates need rld*. */
18282 if (mode == DImode && code == ROTATE)
18283 return (nb == 63 || ne == 0 || ne == sh);
18284
18285 /* SImode rotates need rlw*. */
18286 if (mode == SImode && code == ROTATE)
18287 return (nb < 32 && ne < 32 && sh < 32);
18288
18289 /* Wrap-around masks are only okay for rotates. */
18290 if (ne > nb)
18291 return false;
18292
18293 /* Variable shifts are only okay for rotates. */
18294 if (sh < 0)
18295 return false;
18296
18297 /* Don't allow ASHIFT if the mask is wrong for that. */
18298 if (code == ASHIFT && ne < sh)
18299 return false;
18300
18301 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18302 if the mask is wrong for that. */
18303 if (nb < 32 && ne < 32 && sh < 32
18304 && !(code == LSHIFTRT && nb >= 32 - sh))
18305 return true;
18306
18307 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18308 if the mask is wrong for that. */
18309 if (code == LSHIFTRT)
18310 sh = 64 - sh;
18311 if (nb == 63 || ne == 0 || ne == sh)
18312 return !(code == LSHIFTRT && nb >= sh);
18313
18314 return false;
18315 }
18316
18317 /* Return the instruction template for a shift with mask in mode MODE, with
18318 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18319
18320 const char *
18321 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18322 {
18323 int nb, ne;
18324
18325 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18326 gcc_unreachable ();
18327
18328 if (mode == DImode && ne == 0)
18329 {
18330 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18331 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18332 operands[3] = GEN_INT (63 - nb);
18333 if (dot)
18334 return "rld%I2cl. %0,%1,%2,%3";
18335 return "rld%I2cl %0,%1,%2,%3";
18336 }
18337
18338 if (mode == DImode && nb == 63)
18339 {
18340 operands[3] = GEN_INT (63 - ne);
18341 if (dot)
18342 return "rld%I2cr. %0,%1,%2,%3";
18343 return "rld%I2cr %0,%1,%2,%3";
18344 }
18345
18346 if (mode == DImode
18347 && GET_CODE (operands[4]) != LSHIFTRT
18348 && CONST_INT_P (operands[2])
18349 && ne == INTVAL (operands[2]))
18350 {
18351 operands[3] = GEN_INT (63 - nb);
18352 if (dot)
18353 return "rld%I2c. %0,%1,%2,%3";
18354 return "rld%I2c %0,%1,%2,%3";
18355 }
18356
18357 if (nb < 32 && ne < 32)
18358 {
18359 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18360 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18361 operands[3] = GEN_INT (31 - nb);
18362 operands[4] = GEN_INT (31 - ne);
18363 /* This insn can also be a 64-bit rotate with mask that really makes
18364 it just a shift right (with mask); the %h below are to adjust for
18365 that situation (shift count is >= 32 in that case). */
18366 if (dot)
18367 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18368 return "rlw%I2nm %0,%1,%h2,%3,%4";
18369 }
18370
18371 gcc_unreachable ();
18372 }
18373
18374 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18375 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18376 ASHIFT, or LSHIFTRT) in mode MODE. */
18377
18378 bool
18379 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18380 {
18381 int nb, ne;
18382
18383 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18384 return false;
18385
18386 int n = GET_MODE_PRECISION (mode);
18387
18388 int sh = INTVAL (XEXP (shift, 1));
18389 if (sh < 0 || sh >= n)
18390 return false;
18391
18392 rtx_code code = GET_CODE (shift);
18393
18394 /* Convert any shift by 0 to a rotate, to simplify below code. */
18395 if (sh == 0)
18396 code = ROTATE;
18397
18398 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18399 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18400 code = ASHIFT;
18401 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18402 {
18403 code = LSHIFTRT;
18404 sh = n - sh;
18405 }
18406
18407 /* DImode rotates need rldimi. */
18408 if (mode == DImode && code == ROTATE)
18409 return (ne == sh);
18410
18411 /* SImode rotates need rlwimi. */
18412 if (mode == SImode && code == ROTATE)
18413 return (nb < 32 && ne < 32 && sh < 32);
18414
18415 /* Wrap-around masks are only okay for rotates. */
18416 if (ne > nb)
18417 return false;
18418
18419 /* Don't allow ASHIFT if the mask is wrong for that. */
18420 if (code == ASHIFT && ne < sh)
18421 return false;
18422
18423 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18424 if the mask is wrong for that. */
18425 if (nb < 32 && ne < 32 && sh < 32
18426 && !(code == LSHIFTRT && nb >= 32 - sh))
18427 return true;
18428
18429 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18430 if the mask is wrong for that. */
18431 if (code == LSHIFTRT)
18432 sh = 64 - sh;
18433 if (ne == sh)
18434 return !(code == LSHIFTRT && nb >= sh);
18435
18436 return false;
18437 }
18438
18439 /* Return the instruction template for an insert with mask in mode MODE, with
18440 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18441
18442 const char *
18443 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18444 {
18445 int nb, ne;
18446
18447 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18448 gcc_unreachable ();
18449
18450 /* Prefer rldimi because rlwimi is cracked. */
18451 if (TARGET_POWERPC64
18452 && (!dot || mode == DImode)
18453 && GET_CODE (operands[4]) != LSHIFTRT
18454 && ne == INTVAL (operands[2]))
18455 {
18456 operands[3] = GEN_INT (63 - nb);
18457 if (dot)
18458 return "rldimi. %0,%1,%2,%3";
18459 return "rldimi %0,%1,%2,%3";
18460 }
18461
18462 if (nb < 32 && ne < 32)
18463 {
18464 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18465 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18466 operands[3] = GEN_INT (31 - nb);
18467 operands[4] = GEN_INT (31 - ne);
18468 if (dot)
18469 return "rlwimi. %0,%1,%2,%3,%4";
18470 return "rlwimi %0,%1,%2,%3,%4";
18471 }
18472
18473 gcc_unreachable ();
18474 }
18475
18476 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18477 using two machine instructions. */
18478
18479 bool
18480 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18481 {
18482 /* There are two kinds of AND we can handle with two insns:
18483 1) those we can do with two rl* insn;
18484 2) ori[s];xori[s].
18485
18486 We do not handle that last case yet. */
18487
18488 /* If there is just one stretch of ones, we can do it. */
18489 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18490 return true;
18491
18492 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18493 one insn, we can do the whole thing with two. */
18494 unsigned HOST_WIDE_INT val = INTVAL (c);
18495 unsigned HOST_WIDE_INT bit1 = val & -val;
18496 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18497 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18498 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18499 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18500 }
18501
18502 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18503 If EXPAND is true, split rotate-and-mask instructions we generate to
18504 their constituent parts as well (this is used during expand); if DOT
18505 is 1, make the last insn a record-form instruction clobbering the
18506 destination GPR and setting the CC reg (from operands[3]); if 2, set
18507 that GPR as well as the CC reg. */
18508
18509 void
18510 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18511 {
18512 gcc_assert (!(expand && dot));
18513
18514 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18515
18516 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18517 shift right. This generates better code than doing the masks without
18518 shifts, or shifting first right and then left. */
18519 int nb, ne;
18520 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18521 {
18522 gcc_assert (mode == DImode);
18523
18524 int shift = 63 - nb;
18525 if (expand)
18526 {
18527 rtx tmp1 = gen_reg_rtx (DImode);
18528 rtx tmp2 = gen_reg_rtx (DImode);
18529 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18530 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18531 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18532 }
18533 else
18534 {
18535 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18536 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18537 emit_move_insn (operands[0], tmp);
18538 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18539 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18540 }
18541 return;
18542 }
18543
18544 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18545 that does the rest. */
18546 unsigned HOST_WIDE_INT bit1 = val & -val;
18547 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18548 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18549 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18550
18551 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18552 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18553
18554 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18555
18556 /* Two "no-rotate"-and-mask instructions, for SImode. */
18557 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18558 {
18559 gcc_assert (mode == SImode);
18560
18561 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18562 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18563 emit_move_insn (reg, tmp);
18564 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18565 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18566 return;
18567 }
18568
18569 gcc_assert (mode == DImode);
18570
18571 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18572 insns; we have to do the first in SImode, because it wraps. */
18573 if (mask2 <= 0xffffffff
18574 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18575 {
18576 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18577 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18578 GEN_INT (mask1));
18579 rtx reg_low = gen_lowpart (SImode, reg);
18580 emit_move_insn (reg_low, tmp);
18581 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18582 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18583 return;
18584 }
18585
18586 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18587 at the top end), rotate back and clear the other hole. */
18588 int right = exact_log2 (bit3);
18589 int left = 64 - right;
18590
18591 /* Rotate the mask too. */
18592 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18593
18594 if (expand)
18595 {
18596 rtx tmp1 = gen_reg_rtx (DImode);
18597 rtx tmp2 = gen_reg_rtx (DImode);
18598 rtx tmp3 = gen_reg_rtx (DImode);
18599 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18600 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18601 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18602 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18603 }
18604 else
18605 {
18606 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18607 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18608 emit_move_insn (operands[0], tmp);
18609 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18610 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18611 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18612 }
18613 }
18614 \f
18615 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18616 for lfq and stfq insns iff the registers are hard registers. */
18617
18618 int
18619 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18620 {
18621 /* We might have been passed a SUBREG. */
18622 if (!REG_P (reg1) || !REG_P (reg2))
18623 return 0;
18624
18625 /* We might have been passed non floating point registers. */
18626 if (!FP_REGNO_P (REGNO (reg1))
18627 || !FP_REGNO_P (REGNO (reg2)))
18628 return 0;
18629
18630 return (REGNO (reg1) == REGNO (reg2) - 1);
18631 }
18632
18633 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18634 addr1 and addr2 must be in consecutive memory locations
18635 (addr2 == addr1 + 8). */
18636
18637 int
18638 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18639 {
18640 rtx addr1, addr2;
18641 unsigned int reg1, reg2;
18642 int offset1, offset2;
18643
18644 /* The mems cannot be volatile. */
18645 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18646 return 0;
18647
18648 addr1 = XEXP (mem1, 0);
18649 addr2 = XEXP (mem2, 0);
18650
18651 /* Extract an offset (if used) from the first addr. */
18652 if (GET_CODE (addr1) == PLUS)
18653 {
18654 /* If not a REG, return zero. */
18655 if (!REG_P (XEXP (addr1, 0)))
18656 return 0;
18657 else
18658 {
18659 reg1 = REGNO (XEXP (addr1, 0));
18660 /* The offset must be constant! */
18661 if (!CONST_INT_P (XEXP (addr1, 1)))
18662 return 0;
18663 offset1 = INTVAL (XEXP (addr1, 1));
18664 }
18665 }
18666 else if (!REG_P (addr1))
18667 return 0;
18668 else
18669 {
18670 reg1 = REGNO (addr1);
18671 /* This was a simple (mem (reg)) expression. Offset is 0. */
18672 offset1 = 0;
18673 }
18674
18675 /* And now for the second addr. */
18676 if (GET_CODE (addr2) == PLUS)
18677 {
18678 /* If not a REG, return zero. */
18679 if (!REG_P (XEXP (addr2, 0)))
18680 return 0;
18681 else
18682 {
18683 reg2 = REGNO (XEXP (addr2, 0));
18684 /* The offset must be constant. */
18685 if (!CONST_INT_P (XEXP (addr2, 1)))
18686 return 0;
18687 offset2 = INTVAL (XEXP (addr2, 1));
18688 }
18689 }
18690 else if (!REG_P (addr2))
18691 return 0;
18692 else
18693 {
18694 reg2 = REGNO (addr2);
18695 /* This was a simple (mem (reg)) expression. Offset is 0. */
18696 offset2 = 0;
18697 }
18698
18699 /* Both of these must have the same base register. */
18700 if (reg1 != reg2)
18701 return 0;
18702
18703 /* The offset for the second addr must be 8 more than the first addr. */
18704 if (offset2 != offset1 + 8)
18705 return 0;
18706
18707 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18708 instructions. */
18709 return 1;
18710 }
18711 \f
18712 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18713 need to use DDmode, in all other cases we can use the same mode. */
18714 static machine_mode
18715 rs6000_secondary_memory_needed_mode (machine_mode mode)
18716 {
18717 if (lra_in_progress && mode == SDmode)
18718 return DDmode;
18719 return mode;
18720 }
18721
18722 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18723 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18724 only work on the traditional altivec registers, note if an altivec register
18725 was chosen. */
18726
18727 static enum rs6000_reg_type
18728 register_to_reg_type (rtx reg, bool *is_altivec)
18729 {
18730 HOST_WIDE_INT regno;
18731 enum reg_class rclass;
18732
18733 if (SUBREG_P (reg))
18734 reg = SUBREG_REG (reg);
18735
18736 if (!REG_P (reg))
18737 return NO_REG_TYPE;
18738
18739 regno = REGNO (reg);
18740 if (!HARD_REGISTER_NUM_P (regno))
18741 {
18742 if (!lra_in_progress && !reload_completed)
18743 return PSEUDO_REG_TYPE;
18744
18745 regno = true_regnum (reg);
18746 if (regno < 0 || !HARD_REGISTER_NUM_P (regno))
18747 return PSEUDO_REG_TYPE;
18748 }
18749
18750 gcc_assert (regno >= 0);
18751
18752 if (is_altivec && ALTIVEC_REGNO_P (regno))
18753 *is_altivec = true;
18754
18755 rclass = rs6000_regno_regclass[regno];
18756 return reg_class_to_reg_type[(int)rclass];
18757 }
18758
18759 /* Helper function to return the cost of adding a TOC entry address. */
18760
18761 static inline int
18762 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18763 {
18764 int ret;
18765
18766 if (TARGET_CMODEL != CMODEL_SMALL)
18767 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18768
18769 else
18770 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18771
18772 return ret;
18773 }
18774
18775 /* Helper function for rs6000_secondary_reload to determine whether the memory
18776 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18777 needs reloading. Return negative if the memory is not handled by the memory
18778 helper functions and to try a different reload method, 0 if no additional
18779 instructions are need, and positive to give the extra cost for the
18780 memory. */
18781
18782 static int
18783 rs6000_secondary_reload_memory (rtx addr,
18784 enum reg_class rclass,
18785 machine_mode mode)
18786 {
18787 int extra_cost = 0;
18788 rtx reg, and_arg, plus_arg0, plus_arg1;
18789 addr_mask_type addr_mask;
18790 const char *type = NULL;
18791 const char *fail_msg = NULL;
18792
18793 if (GPR_REG_CLASS_P (rclass))
18794 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18795
18796 else if (rclass == FLOAT_REGS)
18797 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18798
18799 else if (rclass == ALTIVEC_REGS)
18800 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18801
18802 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18803 else if (rclass == VSX_REGS)
18804 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18805 & ~RELOAD_REG_AND_M16);
18806
18807 /* If the register allocator hasn't made up its mind yet on the register
18808 class to use, settle on defaults to use. */
18809 else if (rclass == NO_REGS)
18810 {
18811 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18812 & ~RELOAD_REG_AND_M16);
18813
18814 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18815 addr_mask &= ~(RELOAD_REG_INDEXED
18816 | RELOAD_REG_PRE_INCDEC
18817 | RELOAD_REG_PRE_MODIFY);
18818 }
18819
18820 else
18821 addr_mask = 0;
18822
18823 /* If the register isn't valid in this register class, just return now. */
18824 if ((addr_mask & RELOAD_REG_VALID) == 0)
18825 {
18826 if (TARGET_DEBUG_ADDR)
18827 {
18828 fprintf (stderr,
18829 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18830 "not valid in class\n",
18831 GET_MODE_NAME (mode), reg_class_names[rclass]);
18832 debug_rtx (addr);
18833 }
18834
18835 return -1;
18836 }
18837
18838 switch (GET_CODE (addr))
18839 {
18840 /* Does the register class supports auto update forms for this mode? We
18841 don't need a scratch register, since the powerpc only supports
18842 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18843 case PRE_INC:
18844 case PRE_DEC:
18845 reg = XEXP (addr, 0);
18846 if (!base_reg_operand (addr, GET_MODE (reg)))
18847 {
18848 fail_msg = "no base register #1";
18849 extra_cost = -1;
18850 }
18851
18852 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
18853 {
18854 extra_cost = 1;
18855 type = "update";
18856 }
18857 break;
18858
18859 case PRE_MODIFY:
18860 reg = XEXP (addr, 0);
18861 plus_arg1 = XEXP (addr, 1);
18862 if (!base_reg_operand (reg, GET_MODE (reg))
18863 || GET_CODE (plus_arg1) != PLUS
18864 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
18865 {
18866 fail_msg = "bad PRE_MODIFY";
18867 extra_cost = -1;
18868 }
18869
18870 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
18871 {
18872 extra_cost = 1;
18873 type = "update";
18874 }
18875 break;
18876
18877 /* Do we need to simulate AND -16 to clear the bottom address bits used
18878 in VMX load/stores? Only allow the AND for vector sizes. */
18879 case AND:
18880 and_arg = XEXP (addr, 0);
18881 if (GET_MODE_SIZE (mode) != 16
18882 || !CONST_INT_P (XEXP (addr, 1))
18883 || INTVAL (XEXP (addr, 1)) != -16)
18884 {
18885 fail_msg = "bad Altivec AND #1";
18886 extra_cost = -1;
18887 }
18888
18889 if (rclass != ALTIVEC_REGS)
18890 {
18891 if (legitimate_indirect_address_p (and_arg, false))
18892 extra_cost = 1;
18893
18894 else if (legitimate_indexed_address_p (and_arg, false))
18895 extra_cost = 2;
18896
18897 else
18898 {
18899 fail_msg = "bad Altivec AND #2";
18900 extra_cost = -1;
18901 }
18902
18903 type = "and";
18904 }
18905 break;
18906
18907 /* If this is an indirect address, make sure it is a base register. */
18908 case REG:
18909 case SUBREG:
18910 if (!legitimate_indirect_address_p (addr, false))
18911 {
18912 extra_cost = 1;
18913 type = "move";
18914 }
18915 break;
18916
18917 /* If this is an indexed address, make sure the register class can handle
18918 indexed addresses for this mode. */
18919 case PLUS:
18920 plus_arg0 = XEXP (addr, 0);
18921 plus_arg1 = XEXP (addr, 1);
18922
18923 /* (plus (plus (reg) (constant)) (constant)) is generated during
18924 push_reload processing, so handle it now. */
18925 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
18926 {
18927 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18928 {
18929 extra_cost = 1;
18930 type = "offset";
18931 }
18932 }
18933
18934 /* (plus (plus (reg) (constant)) (reg)) is also generated during
18935 push_reload processing, so handle it now. */
18936 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
18937 {
18938 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
18939 {
18940 extra_cost = 1;
18941 type = "indexed #2";
18942 }
18943 }
18944
18945 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
18946 {
18947 fail_msg = "no base register #2";
18948 extra_cost = -1;
18949 }
18950
18951 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
18952 {
18953 if ((addr_mask & RELOAD_REG_INDEXED) == 0
18954 || !legitimate_indexed_address_p (addr, false))
18955 {
18956 extra_cost = 1;
18957 type = "indexed";
18958 }
18959 }
18960
18961 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
18962 && CONST_INT_P (plus_arg1))
18963 {
18964 if (!quad_address_offset_p (INTVAL (plus_arg1)))
18965 {
18966 extra_cost = 1;
18967 type = "vector d-form offset";
18968 }
18969 }
18970
18971 /* Make sure the register class can handle offset addresses. */
18972 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
18973 {
18974 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18975 {
18976 extra_cost = 1;
18977 type = "offset #2";
18978 }
18979 }
18980
18981 else
18982 {
18983 fail_msg = "bad PLUS";
18984 extra_cost = -1;
18985 }
18986
18987 break;
18988
18989 case LO_SUM:
18990 /* Quad offsets are restricted and can't handle normal addresses. */
18991 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18992 {
18993 extra_cost = -1;
18994 type = "vector d-form lo_sum";
18995 }
18996
18997 else if (!legitimate_lo_sum_address_p (mode, addr, false))
18998 {
18999 fail_msg = "bad LO_SUM";
19000 extra_cost = -1;
19001 }
19002
19003 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19004 {
19005 extra_cost = 1;
19006 type = "lo_sum";
19007 }
19008 break;
19009
19010 /* Static addresses need to create a TOC entry. */
19011 case CONST:
19012 case SYMBOL_REF:
19013 case LABEL_REF:
19014 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19015 {
19016 extra_cost = -1;
19017 type = "vector d-form lo_sum #2";
19018 }
19019
19020 else
19021 {
19022 type = "address";
19023 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
19024 }
19025 break;
19026
19027 /* TOC references look like offsetable memory. */
19028 case UNSPEC:
19029 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
19030 {
19031 fail_msg = "bad UNSPEC";
19032 extra_cost = -1;
19033 }
19034
19035 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19036 {
19037 extra_cost = -1;
19038 type = "vector d-form lo_sum #3";
19039 }
19040
19041 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19042 {
19043 extra_cost = 1;
19044 type = "toc reference";
19045 }
19046 break;
19047
19048 default:
19049 {
19050 fail_msg = "bad address";
19051 extra_cost = -1;
19052 }
19053 }
19054
19055 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19056 {
19057 if (extra_cost < 0)
19058 fprintf (stderr,
19059 "rs6000_secondary_reload_memory error: mode = %s, "
19060 "class = %s, addr_mask = '%s', %s\n",
19061 GET_MODE_NAME (mode),
19062 reg_class_names[rclass],
19063 rs6000_debug_addr_mask (addr_mask, false),
19064 (fail_msg != NULL) ? fail_msg : "<bad address>");
19065
19066 else
19067 fprintf (stderr,
19068 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19069 "addr_mask = '%s', extra cost = %d, %s\n",
19070 GET_MODE_NAME (mode),
19071 reg_class_names[rclass],
19072 rs6000_debug_addr_mask (addr_mask, false),
19073 extra_cost,
19074 (type) ? type : "<none>");
19075
19076 debug_rtx (addr);
19077 }
19078
19079 return extra_cost;
19080 }
19081
19082 /* Helper function for rs6000_secondary_reload to return true if a move to a
19083 different register classe is really a simple move. */
19084
19085 static bool
19086 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19087 enum rs6000_reg_type from_type,
19088 machine_mode mode)
19089 {
19090 int size = GET_MODE_SIZE (mode);
19091
19092 /* Add support for various direct moves available. In this function, we only
19093 look at cases where we don't need any extra registers, and one or more
19094 simple move insns are issued. Originally small integers are not allowed
19095 in FPR/VSX registers. Single precision binary floating is not a simple
19096 move because we need to convert to the single precision memory layout.
19097 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19098 need special direct move handling, which we do not support yet. */
19099 if (TARGET_DIRECT_MOVE
19100 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19101 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19102 {
19103 if (TARGET_POWERPC64)
19104 {
19105 /* ISA 2.07: MTVSRD or MVFVSRD. */
19106 if (size == 8)
19107 return true;
19108
19109 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19110 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19111 return true;
19112 }
19113
19114 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19115 if (TARGET_P8_VECTOR)
19116 {
19117 if (mode == SImode)
19118 return true;
19119
19120 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19121 return true;
19122 }
19123
19124 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19125 if (mode == SDmode)
19126 return true;
19127 }
19128
19129 /* Power6+: MFTGPR or MFFGPR. */
19130 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19131 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19132 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19133 return true;
19134
19135 /* Move to/from SPR. */
19136 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19137 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19138 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19139 return true;
19140
19141 return false;
19142 }
19143
19144 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19145 special direct moves that involve allocating an extra register, return the
19146 insn code of the helper function if there is such a function or
19147 CODE_FOR_nothing if not. */
19148
19149 static bool
19150 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19151 enum rs6000_reg_type from_type,
19152 machine_mode mode,
19153 secondary_reload_info *sri,
19154 bool altivec_p)
19155 {
19156 bool ret = false;
19157 enum insn_code icode = CODE_FOR_nothing;
19158 int cost = 0;
19159 int size = GET_MODE_SIZE (mode);
19160
19161 if (TARGET_POWERPC64 && size == 16)
19162 {
19163 /* Handle moving 128-bit values from GPRs to VSX point registers on
19164 ISA 2.07 (power8, power9) when running in 64-bit mode using
19165 XXPERMDI to glue the two 64-bit values back together. */
19166 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19167 {
19168 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19169 icode = reg_addr[mode].reload_vsx_gpr;
19170 }
19171
19172 /* Handle moving 128-bit values from VSX point registers to GPRs on
19173 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19174 bottom 64-bit value. */
19175 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19176 {
19177 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19178 icode = reg_addr[mode].reload_gpr_vsx;
19179 }
19180 }
19181
19182 else if (TARGET_POWERPC64 && mode == SFmode)
19183 {
19184 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19185 {
19186 cost = 3; /* xscvdpspn, mfvsrd, and. */
19187 icode = reg_addr[mode].reload_gpr_vsx;
19188 }
19189
19190 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19191 {
19192 cost = 2; /* mtvsrz, xscvspdpn. */
19193 icode = reg_addr[mode].reload_vsx_gpr;
19194 }
19195 }
19196
19197 else if (!TARGET_POWERPC64 && size == 8)
19198 {
19199 /* Handle moving 64-bit values from GPRs to floating point registers on
19200 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19201 32-bit values back together. Altivec register classes must be handled
19202 specially since a different instruction is used, and the secondary
19203 reload support requires a single instruction class in the scratch
19204 register constraint. However, right now TFmode is not allowed in
19205 Altivec registers, so the pattern will never match. */
19206 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19207 {
19208 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19209 icode = reg_addr[mode].reload_fpr_gpr;
19210 }
19211 }
19212
19213 if (icode != CODE_FOR_nothing)
19214 {
19215 ret = true;
19216 if (sri)
19217 {
19218 sri->icode = icode;
19219 sri->extra_cost = cost;
19220 }
19221 }
19222
19223 return ret;
19224 }
19225
19226 /* Return whether a move between two register classes can be done either
19227 directly (simple move) or via a pattern that uses a single extra temporary
19228 (using ISA 2.07's direct move in this case. */
19229
19230 static bool
19231 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19232 enum rs6000_reg_type from_type,
19233 machine_mode mode,
19234 secondary_reload_info *sri,
19235 bool altivec_p)
19236 {
19237 /* Fall back to load/store reloads if either type is not a register. */
19238 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19239 return false;
19240
19241 /* If we haven't allocated registers yet, assume the move can be done for the
19242 standard register types. */
19243 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19244 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19245 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19246 return true;
19247
19248 /* Moves to the same set of registers is a simple move for non-specialized
19249 registers. */
19250 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19251 return true;
19252
19253 /* Check whether a simple move can be done directly. */
19254 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19255 {
19256 if (sri)
19257 {
19258 sri->icode = CODE_FOR_nothing;
19259 sri->extra_cost = 0;
19260 }
19261 return true;
19262 }
19263
19264 /* Now check if we can do it in a few steps. */
19265 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19266 altivec_p);
19267 }
19268
19269 /* Inform reload about cases where moving X with a mode MODE to a register in
19270 RCLASS requires an extra scratch or immediate register. Return the class
19271 needed for the immediate register.
19272
19273 For VSX and Altivec, we may need a register to convert sp+offset into
19274 reg+sp.
19275
19276 For misaligned 64-bit gpr loads and stores we need a register to
19277 convert an offset address to indirect. */
19278
19279 static reg_class_t
19280 rs6000_secondary_reload (bool in_p,
19281 rtx x,
19282 reg_class_t rclass_i,
19283 machine_mode mode,
19284 secondary_reload_info *sri)
19285 {
19286 enum reg_class rclass = (enum reg_class) rclass_i;
19287 reg_class_t ret = ALL_REGS;
19288 enum insn_code icode;
19289 bool default_p = false;
19290 bool done_p = false;
19291
19292 /* Allow subreg of memory before/during reload. */
19293 bool memory_p = (MEM_P (x)
19294 || (!reload_completed && SUBREG_P (x)
19295 && MEM_P (SUBREG_REG (x))));
19296
19297 sri->icode = CODE_FOR_nothing;
19298 sri->t_icode = CODE_FOR_nothing;
19299 sri->extra_cost = 0;
19300 icode = ((in_p)
19301 ? reg_addr[mode].reload_load
19302 : reg_addr[mode].reload_store);
19303
19304 if (REG_P (x) || register_operand (x, mode))
19305 {
19306 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19307 bool altivec_p = (rclass == ALTIVEC_REGS);
19308 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19309
19310 if (!in_p)
19311 std::swap (to_type, from_type);
19312
19313 /* Can we do a direct move of some sort? */
19314 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19315 altivec_p))
19316 {
19317 icode = (enum insn_code)sri->icode;
19318 default_p = false;
19319 done_p = true;
19320 ret = NO_REGS;
19321 }
19322 }
19323
19324 /* Make sure 0.0 is not reloaded or forced into memory. */
19325 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19326 {
19327 ret = NO_REGS;
19328 default_p = false;
19329 done_p = true;
19330 }
19331
19332 /* If this is a scalar floating point value and we want to load it into the
19333 traditional Altivec registers, do it via a move via a traditional floating
19334 point register, unless we have D-form addressing. Also make sure that
19335 non-zero constants use a FPR. */
19336 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19337 && !mode_supports_vmx_dform (mode)
19338 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19339 && (memory_p || CONST_DOUBLE_P (x)))
19340 {
19341 ret = FLOAT_REGS;
19342 default_p = false;
19343 done_p = true;
19344 }
19345
19346 /* Handle reload of load/stores if we have reload helper functions. */
19347 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19348 {
19349 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19350 mode);
19351
19352 if (extra_cost >= 0)
19353 {
19354 done_p = true;
19355 ret = NO_REGS;
19356 if (extra_cost > 0)
19357 {
19358 sri->extra_cost = extra_cost;
19359 sri->icode = icode;
19360 }
19361 }
19362 }
19363
19364 /* Handle unaligned loads and stores of integer registers. */
19365 if (!done_p && TARGET_POWERPC64
19366 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19367 && memory_p
19368 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19369 {
19370 rtx addr = XEXP (x, 0);
19371 rtx off = address_offset (addr);
19372
19373 if (off != NULL_RTX)
19374 {
19375 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19376 unsigned HOST_WIDE_INT offset = INTVAL (off);
19377
19378 /* We need a secondary reload when our legitimate_address_p
19379 says the address is good (as otherwise the entire address
19380 will be reloaded), and the offset is not a multiple of
19381 four or we have an address wrap. Address wrap will only
19382 occur for LO_SUMs since legitimate_offset_address_p
19383 rejects addresses for 16-byte mems that will wrap. */
19384 if (GET_CODE (addr) == LO_SUM
19385 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19386 && ((offset & 3) != 0
19387 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19388 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19389 && (offset & 3) != 0))
19390 {
19391 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19392 if (in_p)
19393 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19394 : CODE_FOR_reload_di_load);
19395 else
19396 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19397 : CODE_FOR_reload_di_store);
19398 sri->extra_cost = 2;
19399 ret = NO_REGS;
19400 done_p = true;
19401 }
19402 else
19403 default_p = true;
19404 }
19405 else
19406 default_p = true;
19407 }
19408
19409 if (!done_p && !TARGET_POWERPC64
19410 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19411 && memory_p
19412 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19413 {
19414 rtx addr = XEXP (x, 0);
19415 rtx off = address_offset (addr);
19416
19417 if (off != NULL_RTX)
19418 {
19419 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19420 unsigned HOST_WIDE_INT offset = INTVAL (off);
19421
19422 /* We need a secondary reload when our legitimate_address_p
19423 says the address is good (as otherwise the entire address
19424 will be reloaded), and we have a wrap.
19425
19426 legitimate_lo_sum_address_p allows LO_SUM addresses to
19427 have any offset so test for wrap in the low 16 bits.
19428
19429 legitimate_offset_address_p checks for the range
19430 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19431 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19432 [0x7ff4,0x7fff] respectively, so test for the
19433 intersection of these ranges, [0x7ffc,0x7fff] and
19434 [0x7ff4,0x7ff7] respectively.
19435
19436 Note that the address we see here may have been
19437 manipulated by legitimize_reload_address. */
19438 if (GET_CODE (addr) == LO_SUM
19439 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19440 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19441 {
19442 if (in_p)
19443 sri->icode = CODE_FOR_reload_si_load;
19444 else
19445 sri->icode = CODE_FOR_reload_si_store;
19446 sri->extra_cost = 2;
19447 ret = NO_REGS;
19448 done_p = true;
19449 }
19450 else
19451 default_p = true;
19452 }
19453 else
19454 default_p = true;
19455 }
19456
19457 if (!done_p)
19458 default_p = true;
19459
19460 if (default_p)
19461 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19462
19463 gcc_assert (ret != ALL_REGS);
19464
19465 if (TARGET_DEBUG_ADDR)
19466 {
19467 fprintf (stderr,
19468 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19469 "mode = %s",
19470 reg_class_names[ret],
19471 in_p ? "true" : "false",
19472 reg_class_names[rclass],
19473 GET_MODE_NAME (mode));
19474
19475 if (reload_completed)
19476 fputs (", after reload", stderr);
19477
19478 if (!done_p)
19479 fputs (", done_p not set", stderr);
19480
19481 if (default_p)
19482 fputs (", default secondary reload", stderr);
19483
19484 if (sri->icode != CODE_FOR_nothing)
19485 fprintf (stderr, ", reload func = %s, extra cost = %d",
19486 insn_data[sri->icode].name, sri->extra_cost);
19487
19488 else if (sri->extra_cost > 0)
19489 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19490
19491 fputs ("\n", stderr);
19492 debug_rtx (x);
19493 }
19494
19495 return ret;
19496 }
19497
19498 /* Better tracing for rs6000_secondary_reload_inner. */
19499
19500 static void
19501 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19502 bool store_p)
19503 {
19504 rtx set, clobber;
19505
19506 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19507
19508 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19509 store_p ? "store" : "load");
19510
19511 if (store_p)
19512 set = gen_rtx_SET (mem, reg);
19513 else
19514 set = gen_rtx_SET (reg, mem);
19515
19516 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19517 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19518 }
19519
19520 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19521 ATTRIBUTE_NORETURN;
19522
19523 static void
19524 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19525 bool store_p)
19526 {
19527 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19528 gcc_unreachable ();
19529 }
19530
19531 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19532 reload helper functions. These were identified in
19533 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19534 reload, it calls the insns:
19535 reload_<RELOAD:mode>_<P:mptrsize>_store
19536 reload_<RELOAD:mode>_<P:mptrsize>_load
19537
19538 which in turn calls this function, to do whatever is necessary to create
19539 valid addresses. */
19540
19541 void
19542 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19543 {
19544 int regno = true_regnum (reg);
19545 machine_mode mode = GET_MODE (reg);
19546 addr_mask_type addr_mask;
19547 rtx addr;
19548 rtx new_addr;
19549 rtx op_reg, op0, op1;
19550 rtx and_op;
19551 rtx cc_clobber;
19552 rtvec rv;
19553
19554 if (regno < 0 || !HARD_REGISTER_NUM_P (regno) || !MEM_P (mem)
19555 || !base_reg_operand (scratch, GET_MODE (scratch)))
19556 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19557
19558 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19559 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19560
19561 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19562 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19563
19564 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19565 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19566
19567 else
19568 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19569
19570 /* Make sure the mode is valid in this register class. */
19571 if ((addr_mask & RELOAD_REG_VALID) == 0)
19572 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19573
19574 if (TARGET_DEBUG_ADDR)
19575 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19576
19577 new_addr = addr = XEXP (mem, 0);
19578 switch (GET_CODE (addr))
19579 {
19580 /* Does the register class support auto update forms for this mode? If
19581 not, do the update now. We don't need a scratch register, since the
19582 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19583 case PRE_INC:
19584 case PRE_DEC:
19585 op_reg = XEXP (addr, 0);
19586 if (!base_reg_operand (op_reg, Pmode))
19587 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19588
19589 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19590 {
19591 int delta = GET_MODE_SIZE (mode);
19592 if (GET_CODE (addr) == PRE_DEC)
19593 delta = -delta;
19594 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19595 new_addr = op_reg;
19596 }
19597 break;
19598
19599 case PRE_MODIFY:
19600 op0 = XEXP (addr, 0);
19601 op1 = XEXP (addr, 1);
19602 if (!base_reg_operand (op0, Pmode)
19603 || GET_CODE (op1) != PLUS
19604 || !rtx_equal_p (op0, XEXP (op1, 0)))
19605 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19606
19607 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19608 {
19609 emit_insn (gen_rtx_SET (op0, op1));
19610 new_addr = reg;
19611 }
19612 break;
19613
19614 /* Do we need to simulate AND -16 to clear the bottom address bits used
19615 in VMX load/stores? */
19616 case AND:
19617 op0 = XEXP (addr, 0);
19618 op1 = XEXP (addr, 1);
19619 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19620 {
19621 if (REG_P (op0) || SUBREG_P (op0))
19622 op_reg = op0;
19623
19624 else if (GET_CODE (op1) == PLUS)
19625 {
19626 emit_insn (gen_rtx_SET (scratch, op1));
19627 op_reg = scratch;
19628 }
19629
19630 else
19631 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19632
19633 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19634 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19635 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19636 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19637 new_addr = scratch;
19638 }
19639 break;
19640
19641 /* If this is an indirect address, make sure it is a base register. */
19642 case REG:
19643 case SUBREG:
19644 if (!base_reg_operand (addr, GET_MODE (addr)))
19645 {
19646 emit_insn (gen_rtx_SET (scratch, addr));
19647 new_addr = scratch;
19648 }
19649 break;
19650
19651 /* If this is an indexed address, make sure the register class can handle
19652 indexed addresses for this mode. */
19653 case PLUS:
19654 op0 = XEXP (addr, 0);
19655 op1 = XEXP (addr, 1);
19656 if (!base_reg_operand (op0, Pmode))
19657 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19658
19659 else if (int_reg_operand (op1, Pmode))
19660 {
19661 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19662 {
19663 emit_insn (gen_rtx_SET (scratch, addr));
19664 new_addr = scratch;
19665 }
19666 }
19667
19668 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19669 {
19670 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19671 || !quad_address_p (addr, mode, false))
19672 {
19673 emit_insn (gen_rtx_SET (scratch, addr));
19674 new_addr = scratch;
19675 }
19676 }
19677
19678 /* Make sure the register class can handle offset addresses. */
19679 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19680 {
19681 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19682 {
19683 emit_insn (gen_rtx_SET (scratch, addr));
19684 new_addr = scratch;
19685 }
19686 }
19687
19688 else
19689 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19690
19691 break;
19692
19693 case LO_SUM:
19694 op0 = XEXP (addr, 0);
19695 op1 = XEXP (addr, 1);
19696 if (!base_reg_operand (op0, Pmode))
19697 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19698
19699 else if (int_reg_operand (op1, Pmode))
19700 {
19701 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19702 {
19703 emit_insn (gen_rtx_SET (scratch, addr));
19704 new_addr = scratch;
19705 }
19706 }
19707
19708 /* Quad offsets are restricted and can't handle normal addresses. */
19709 else if (mode_supports_dq_form (mode))
19710 {
19711 emit_insn (gen_rtx_SET (scratch, addr));
19712 new_addr = scratch;
19713 }
19714
19715 /* Make sure the register class can handle offset addresses. */
19716 else if (legitimate_lo_sum_address_p (mode, addr, false))
19717 {
19718 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19719 {
19720 emit_insn (gen_rtx_SET (scratch, addr));
19721 new_addr = scratch;
19722 }
19723 }
19724
19725 else
19726 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19727
19728 break;
19729
19730 case SYMBOL_REF:
19731 case CONST:
19732 case LABEL_REF:
19733 rs6000_emit_move (scratch, addr, Pmode);
19734 new_addr = scratch;
19735 break;
19736
19737 default:
19738 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19739 }
19740
19741 /* Adjust the address if it changed. */
19742 if (addr != new_addr)
19743 {
19744 mem = replace_equiv_address_nv (mem, new_addr);
19745 if (TARGET_DEBUG_ADDR)
19746 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19747 }
19748
19749 /* Now create the move. */
19750 if (store_p)
19751 emit_insn (gen_rtx_SET (mem, reg));
19752 else
19753 emit_insn (gen_rtx_SET (reg, mem));
19754
19755 return;
19756 }
19757
19758 /* Convert reloads involving 64-bit gprs and misaligned offset
19759 addressing, or multiple 32-bit gprs and offsets that are too large,
19760 to use indirect addressing. */
19761
19762 void
19763 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19764 {
19765 int regno = true_regnum (reg);
19766 enum reg_class rclass;
19767 rtx addr;
19768 rtx scratch_or_premodify = scratch;
19769
19770 if (TARGET_DEBUG_ADDR)
19771 {
19772 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19773 store_p ? "store" : "load");
19774 fprintf (stderr, "reg:\n");
19775 debug_rtx (reg);
19776 fprintf (stderr, "mem:\n");
19777 debug_rtx (mem);
19778 fprintf (stderr, "scratch:\n");
19779 debug_rtx (scratch);
19780 }
19781
19782 gcc_assert (regno >= 0 && HARD_REGISTER_NUM_P (regno));
19783 gcc_assert (MEM_P (mem));
19784 rclass = REGNO_REG_CLASS (regno);
19785 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19786 addr = XEXP (mem, 0);
19787
19788 if (GET_CODE (addr) == PRE_MODIFY)
19789 {
19790 gcc_assert (REG_P (XEXP (addr, 0))
19791 && GET_CODE (XEXP (addr, 1)) == PLUS
19792 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19793 scratch_or_premodify = XEXP (addr, 0);
19794 addr = XEXP (addr, 1);
19795 }
19796 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19797
19798 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19799
19800 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19801
19802 /* Now create the move. */
19803 if (store_p)
19804 emit_insn (gen_rtx_SET (mem, reg));
19805 else
19806 emit_insn (gen_rtx_SET (reg, mem));
19807
19808 return;
19809 }
19810
19811 /* Given an rtx X being reloaded into a reg required to be
19812 in class CLASS, return the class of reg to actually use.
19813 In general this is just CLASS; but on some machines
19814 in some cases it is preferable to use a more restrictive class.
19815
19816 On the RS/6000, we have to return NO_REGS when we want to reload a
19817 floating-point CONST_DOUBLE to force it to be copied to memory.
19818
19819 We also don't want to reload integer values into floating-point
19820 registers if we can at all help it. In fact, this can
19821 cause reload to die, if it tries to generate a reload of CTR
19822 into a FP register and discovers it doesn't have the memory location
19823 required.
19824
19825 ??? Would it be a good idea to have reload do the converse, that is
19826 try to reload floating modes into FP registers if possible?
19827 */
19828
19829 static enum reg_class
19830 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
19831 {
19832 machine_mode mode = GET_MODE (x);
19833 bool is_constant = CONSTANT_P (x);
19834
19835 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19836 reload class for it. */
19837 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19838 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
19839 return NO_REGS;
19840
19841 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
19842 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
19843 return NO_REGS;
19844
19845 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
19846 the reloading of address expressions using PLUS into floating point
19847 registers. */
19848 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
19849 {
19850 if (is_constant)
19851 {
19852 /* Zero is always allowed in all VSX registers. */
19853 if (x == CONST0_RTX (mode))
19854 return rclass;
19855
19856 /* If this is a vector constant that can be formed with a few Altivec
19857 instructions, we want altivec registers. */
19858 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
19859 return ALTIVEC_REGS;
19860
19861 /* If this is an integer constant that can easily be loaded into
19862 vector registers, allow it. */
19863 if (CONST_INT_P (x))
19864 {
19865 HOST_WIDE_INT value = INTVAL (x);
19866
19867 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
19868 2.06 can generate it in the Altivec registers with
19869 VSPLTI<x>. */
19870 if (value == -1)
19871 {
19872 if (TARGET_P8_VECTOR)
19873 return rclass;
19874 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19875 return ALTIVEC_REGS;
19876 else
19877 return NO_REGS;
19878 }
19879
19880 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
19881 a sign extend in the Altivec registers. */
19882 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
19883 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
19884 return ALTIVEC_REGS;
19885 }
19886
19887 /* Force constant to memory. */
19888 return NO_REGS;
19889 }
19890
19891 /* D-form addressing can easily reload the value. */
19892 if (mode_supports_vmx_dform (mode)
19893 || mode_supports_dq_form (mode))
19894 return rclass;
19895
19896 /* If this is a scalar floating point value and we don't have D-form
19897 addressing, prefer the traditional floating point registers so that we
19898 can use D-form (register+offset) addressing. */
19899 if (rclass == VSX_REGS
19900 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
19901 return FLOAT_REGS;
19902
19903 /* Prefer the Altivec registers if Altivec is handling the vector
19904 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
19905 loads. */
19906 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
19907 || mode == V1TImode)
19908 return ALTIVEC_REGS;
19909
19910 return rclass;
19911 }
19912
19913 if (is_constant || GET_CODE (x) == PLUS)
19914 {
19915 if (reg_class_subset_p (GENERAL_REGS, rclass))
19916 return GENERAL_REGS;
19917 if (reg_class_subset_p (BASE_REGS, rclass))
19918 return BASE_REGS;
19919 return NO_REGS;
19920 }
19921
19922 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == GEN_OR_FLOAT_REGS)
19923 return GENERAL_REGS;
19924
19925 return rclass;
19926 }
19927
19928 /* Debug version of rs6000_preferred_reload_class. */
19929 static enum reg_class
19930 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
19931 {
19932 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
19933
19934 fprintf (stderr,
19935 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
19936 "mode = %s, x:\n",
19937 reg_class_names[ret], reg_class_names[rclass],
19938 GET_MODE_NAME (GET_MODE (x)));
19939 debug_rtx (x);
19940
19941 return ret;
19942 }
19943
19944 /* If we are copying between FP or AltiVec registers and anything else, we need
19945 a memory location. The exception is when we are targeting ppc64 and the
19946 move to/from fpr to gpr instructions are available. Also, under VSX, you
19947 can copy vector registers from the FP register set to the Altivec register
19948 set and vice versa. */
19949
19950 static bool
19951 rs6000_secondary_memory_needed (machine_mode mode,
19952 reg_class_t from_class,
19953 reg_class_t to_class)
19954 {
19955 enum rs6000_reg_type from_type, to_type;
19956 bool altivec_p = ((from_class == ALTIVEC_REGS)
19957 || (to_class == ALTIVEC_REGS));
19958
19959 /* If a simple/direct move is available, we don't need secondary memory */
19960 from_type = reg_class_to_reg_type[(int)from_class];
19961 to_type = reg_class_to_reg_type[(int)to_class];
19962
19963 if (rs6000_secondary_reload_move (to_type, from_type, mode,
19964 (secondary_reload_info *)0, altivec_p))
19965 return false;
19966
19967 /* If we have a floating point or vector register class, we need to use
19968 memory to transfer the data. */
19969 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
19970 return true;
19971
19972 return false;
19973 }
19974
19975 /* Debug version of rs6000_secondary_memory_needed. */
19976 static bool
19977 rs6000_debug_secondary_memory_needed (machine_mode mode,
19978 reg_class_t from_class,
19979 reg_class_t to_class)
19980 {
19981 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
19982
19983 fprintf (stderr,
19984 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
19985 "to_class = %s, mode = %s\n",
19986 ret ? "true" : "false",
19987 reg_class_names[from_class],
19988 reg_class_names[to_class],
19989 GET_MODE_NAME (mode));
19990
19991 return ret;
19992 }
19993
19994 /* Return the register class of a scratch register needed to copy IN into
19995 or out of a register in RCLASS in MODE. If it can be done directly,
19996 NO_REGS is returned. */
19997
19998 static enum reg_class
19999 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
20000 rtx in)
20001 {
20002 int regno;
20003
20004 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
20005 #if TARGET_MACHO
20006 && MACHOPIC_INDIRECT
20007 #endif
20008 ))
20009 {
20010 /* We cannot copy a symbolic operand directly into anything
20011 other than BASE_REGS for TARGET_ELF. So indicate that a
20012 register from BASE_REGS is needed as an intermediate
20013 register.
20014
20015 On Darwin, pic addresses require a load from memory, which
20016 needs a base register. */
20017 if (rclass != BASE_REGS
20018 && (SYMBOL_REF_P (in)
20019 || GET_CODE (in) == HIGH
20020 || GET_CODE (in) == LABEL_REF
20021 || GET_CODE (in) == CONST))
20022 return BASE_REGS;
20023 }
20024
20025 if (REG_P (in))
20026 {
20027 regno = REGNO (in);
20028 if (!HARD_REGISTER_NUM_P (regno))
20029 {
20030 regno = true_regnum (in);
20031 if (!HARD_REGISTER_NUM_P (regno))
20032 regno = -1;
20033 }
20034 }
20035 else if (SUBREG_P (in))
20036 {
20037 regno = true_regnum (in);
20038 if (!HARD_REGISTER_NUM_P (regno))
20039 regno = -1;
20040 }
20041 else
20042 regno = -1;
20043
20044 /* If we have VSX register moves, prefer moving scalar values between
20045 Altivec registers and GPR by going via an FPR (and then via memory)
20046 instead of reloading the secondary memory address for Altivec moves. */
20047 if (TARGET_VSX
20048 && GET_MODE_SIZE (mode) < 16
20049 && !mode_supports_vmx_dform (mode)
20050 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20051 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20052 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20053 && (regno >= 0 && INT_REGNO_P (regno)))))
20054 return FLOAT_REGS;
20055
20056 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20057 into anything. */
20058 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20059 || (regno >= 0 && INT_REGNO_P (regno)))
20060 return NO_REGS;
20061
20062 /* Constants, memory, and VSX registers can go into VSX registers (both the
20063 traditional floating point and the altivec registers). */
20064 if (rclass == VSX_REGS
20065 && (regno == -1 || VSX_REGNO_P (regno)))
20066 return NO_REGS;
20067
20068 /* Constants, memory, and FP registers can go into FP registers. */
20069 if ((regno == -1 || FP_REGNO_P (regno))
20070 && (rclass == FLOAT_REGS || rclass == GEN_OR_FLOAT_REGS))
20071 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20072
20073 /* Memory, and AltiVec registers can go into AltiVec registers. */
20074 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20075 && rclass == ALTIVEC_REGS)
20076 return NO_REGS;
20077
20078 /* We can copy among the CR registers. */
20079 if ((rclass == CR_REGS || rclass == CR0_REGS)
20080 && regno >= 0 && CR_REGNO_P (regno))
20081 return NO_REGS;
20082
20083 /* Otherwise, we need GENERAL_REGS. */
20084 return GENERAL_REGS;
20085 }
20086
20087 /* Debug version of rs6000_secondary_reload_class. */
20088 static enum reg_class
20089 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20090 machine_mode mode, rtx in)
20091 {
20092 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20093 fprintf (stderr,
20094 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20095 "mode = %s, input rtx:\n",
20096 reg_class_names[ret], reg_class_names[rclass],
20097 GET_MODE_NAME (mode));
20098 debug_rtx (in);
20099
20100 return ret;
20101 }
20102
20103 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20104
20105 static bool
20106 rs6000_can_change_mode_class (machine_mode from,
20107 machine_mode to,
20108 reg_class_t rclass)
20109 {
20110 unsigned from_size = GET_MODE_SIZE (from);
20111 unsigned to_size = GET_MODE_SIZE (to);
20112
20113 if (from_size != to_size)
20114 {
20115 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20116
20117 if (reg_classes_intersect_p (xclass, rclass))
20118 {
20119 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20120 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20121 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20122 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20123
20124 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20125 single register under VSX because the scalar part of the register
20126 is in the upper 64-bits, and not the lower 64-bits. Types like
20127 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20128 IEEE floating point can't overlap, and neither can small
20129 values. */
20130
20131 if (to_float128_vector_p && from_float128_vector_p)
20132 return true;
20133
20134 else if (to_float128_vector_p || from_float128_vector_p)
20135 return false;
20136
20137 /* TDmode in floating-mode registers must always go into a register
20138 pair with the most significant word in the even-numbered register
20139 to match ISA requirements. In little-endian mode, this does not
20140 match subreg numbering, so we cannot allow subregs. */
20141 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20142 return false;
20143
20144 if (from_size < 8 || to_size < 8)
20145 return false;
20146
20147 if (from_size == 8 && (8 * to_nregs) != to_size)
20148 return false;
20149
20150 if (to_size == 8 && (8 * from_nregs) != from_size)
20151 return false;
20152
20153 return true;
20154 }
20155 else
20156 return true;
20157 }
20158
20159 /* Since the VSX register set includes traditional floating point registers
20160 and altivec registers, just check for the size being different instead of
20161 trying to check whether the modes are vector modes. Otherwise it won't
20162 allow say DF and DI to change classes. For types like TFmode and TDmode
20163 that take 2 64-bit registers, rather than a single 128-bit register, don't
20164 allow subregs of those types to other 128 bit types. */
20165 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20166 {
20167 unsigned num_regs = (from_size + 15) / 16;
20168 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20169 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20170 return false;
20171
20172 return (from_size == 8 || from_size == 16);
20173 }
20174
20175 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20176 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20177 return false;
20178
20179 return true;
20180 }
20181
20182 /* Debug version of rs6000_can_change_mode_class. */
20183 static bool
20184 rs6000_debug_can_change_mode_class (machine_mode from,
20185 machine_mode to,
20186 reg_class_t rclass)
20187 {
20188 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20189
20190 fprintf (stderr,
20191 "rs6000_can_change_mode_class, return %s, from = %s, "
20192 "to = %s, rclass = %s\n",
20193 ret ? "true" : "false",
20194 GET_MODE_NAME (from), GET_MODE_NAME (to),
20195 reg_class_names[rclass]);
20196
20197 return ret;
20198 }
20199 \f
20200 /* Return a string to do a move operation of 128 bits of data. */
20201
20202 const char *
20203 rs6000_output_move_128bit (rtx operands[])
20204 {
20205 rtx dest = operands[0];
20206 rtx src = operands[1];
20207 machine_mode mode = GET_MODE (dest);
20208 int dest_regno;
20209 int src_regno;
20210 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20211 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20212
20213 if (REG_P (dest))
20214 {
20215 dest_regno = REGNO (dest);
20216 dest_gpr_p = INT_REGNO_P (dest_regno);
20217 dest_fp_p = FP_REGNO_P (dest_regno);
20218 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20219 dest_vsx_p = dest_fp_p | dest_vmx_p;
20220 }
20221 else
20222 {
20223 dest_regno = -1;
20224 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20225 }
20226
20227 if (REG_P (src))
20228 {
20229 src_regno = REGNO (src);
20230 src_gpr_p = INT_REGNO_P (src_regno);
20231 src_fp_p = FP_REGNO_P (src_regno);
20232 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20233 src_vsx_p = src_fp_p | src_vmx_p;
20234 }
20235 else
20236 {
20237 src_regno = -1;
20238 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20239 }
20240
20241 /* Register moves. */
20242 if (dest_regno >= 0 && src_regno >= 0)
20243 {
20244 if (dest_gpr_p)
20245 {
20246 if (src_gpr_p)
20247 return "#";
20248
20249 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20250 return (WORDS_BIG_ENDIAN
20251 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20252 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20253
20254 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20255 return "#";
20256 }
20257
20258 else if (TARGET_VSX && dest_vsx_p)
20259 {
20260 if (src_vsx_p)
20261 return "xxlor %x0,%x1,%x1";
20262
20263 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20264 return (WORDS_BIG_ENDIAN
20265 ? "mtvsrdd %x0,%1,%L1"
20266 : "mtvsrdd %x0,%L1,%1");
20267
20268 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20269 return "#";
20270 }
20271
20272 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20273 return "vor %0,%1,%1";
20274
20275 else if (dest_fp_p && src_fp_p)
20276 return "#";
20277 }
20278
20279 /* Loads. */
20280 else if (dest_regno >= 0 && MEM_P (src))
20281 {
20282 if (dest_gpr_p)
20283 {
20284 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20285 return "lq %0,%1";
20286 else
20287 return "#";
20288 }
20289
20290 else if (TARGET_ALTIVEC && dest_vmx_p
20291 && altivec_indexed_or_indirect_operand (src, mode))
20292 return "lvx %0,%y1";
20293
20294 else if (TARGET_VSX && dest_vsx_p)
20295 {
20296 if (mode_supports_dq_form (mode)
20297 && quad_address_p (XEXP (src, 0), mode, true))
20298 return "lxv %x0,%1";
20299
20300 else if (TARGET_P9_VECTOR)
20301 return "lxvx %x0,%y1";
20302
20303 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20304 return "lxvw4x %x0,%y1";
20305
20306 else
20307 return "lxvd2x %x0,%y1";
20308 }
20309
20310 else if (TARGET_ALTIVEC && dest_vmx_p)
20311 return "lvx %0,%y1";
20312
20313 else if (dest_fp_p)
20314 return "#";
20315 }
20316
20317 /* Stores. */
20318 else if (src_regno >= 0 && MEM_P (dest))
20319 {
20320 if (src_gpr_p)
20321 {
20322 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20323 return "stq %1,%0";
20324 else
20325 return "#";
20326 }
20327
20328 else if (TARGET_ALTIVEC && src_vmx_p
20329 && altivec_indexed_or_indirect_operand (dest, mode))
20330 return "stvx %1,%y0";
20331
20332 else if (TARGET_VSX && src_vsx_p)
20333 {
20334 if (mode_supports_dq_form (mode)
20335 && quad_address_p (XEXP (dest, 0), mode, true))
20336 return "stxv %x1,%0";
20337
20338 else if (TARGET_P9_VECTOR)
20339 return "stxvx %x1,%y0";
20340
20341 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20342 return "stxvw4x %x1,%y0";
20343
20344 else
20345 return "stxvd2x %x1,%y0";
20346 }
20347
20348 else if (TARGET_ALTIVEC && src_vmx_p)
20349 return "stvx %1,%y0";
20350
20351 else if (src_fp_p)
20352 return "#";
20353 }
20354
20355 /* Constants. */
20356 else if (dest_regno >= 0
20357 && (CONST_INT_P (src)
20358 || CONST_WIDE_INT_P (src)
20359 || CONST_DOUBLE_P (src)
20360 || GET_CODE (src) == CONST_VECTOR))
20361 {
20362 if (dest_gpr_p)
20363 return "#";
20364
20365 else if ((dest_vmx_p && TARGET_ALTIVEC)
20366 || (dest_vsx_p && TARGET_VSX))
20367 return output_vec_const_move (operands);
20368 }
20369
20370 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20371 }
20372
20373 /* Validate a 128-bit move. */
20374 bool
20375 rs6000_move_128bit_ok_p (rtx operands[])
20376 {
20377 machine_mode mode = GET_MODE (operands[0]);
20378 return (gpc_reg_operand (operands[0], mode)
20379 || gpc_reg_operand (operands[1], mode));
20380 }
20381
20382 /* Return true if a 128-bit move needs to be split. */
20383 bool
20384 rs6000_split_128bit_ok_p (rtx operands[])
20385 {
20386 if (!reload_completed)
20387 return false;
20388
20389 if (!gpr_or_gpr_p (operands[0], operands[1]))
20390 return false;
20391
20392 if (quad_load_store_p (operands[0], operands[1]))
20393 return false;
20394
20395 return true;
20396 }
20397
20398 \f
20399 /* Given a comparison operation, return the bit number in CCR to test. We
20400 know this is a valid comparison.
20401
20402 SCC_P is 1 if this is for an scc. That means that %D will have been
20403 used instead of %C, so the bits will be in different places.
20404
20405 Return -1 if OP isn't a valid comparison for some reason. */
20406
20407 int
20408 ccr_bit (rtx op, int scc_p)
20409 {
20410 enum rtx_code code = GET_CODE (op);
20411 machine_mode cc_mode;
20412 int cc_regnum;
20413 int base_bit;
20414 rtx reg;
20415
20416 if (!COMPARISON_P (op))
20417 return -1;
20418
20419 reg = XEXP (op, 0);
20420
20421 if (!REG_P (reg) || !CR_REGNO_P (REGNO (reg)))
20422 return -1;
20423
20424 cc_mode = GET_MODE (reg);
20425 cc_regnum = REGNO (reg);
20426 base_bit = 4 * (cc_regnum - CR0_REGNO);
20427
20428 validate_condition_mode (code, cc_mode);
20429
20430 /* When generating a sCOND operation, only positive conditions are
20431 allowed. */
20432 if (scc_p)
20433 switch (code)
20434 {
20435 case EQ:
20436 case GT:
20437 case LT:
20438 case UNORDERED:
20439 case GTU:
20440 case LTU:
20441 break;
20442 default:
20443 return -1;
20444 }
20445
20446 switch (code)
20447 {
20448 case NE:
20449 return scc_p ? base_bit + 3 : base_bit + 2;
20450 case EQ:
20451 return base_bit + 2;
20452 case GT: case GTU: case UNLE:
20453 return base_bit + 1;
20454 case LT: case LTU: case UNGE:
20455 return base_bit;
20456 case ORDERED: case UNORDERED:
20457 return base_bit + 3;
20458
20459 case GE: case GEU:
20460 /* If scc, we will have done a cror to put the bit in the
20461 unordered position. So test that bit. For integer, this is ! LT
20462 unless this is an scc insn. */
20463 return scc_p ? base_bit + 3 : base_bit;
20464
20465 case LE: case LEU:
20466 return scc_p ? base_bit + 3 : base_bit + 1;
20467
20468 default:
20469 return -1;
20470 }
20471 }
20472 \f
20473 /* Return the GOT register. */
20474
20475 rtx
20476 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20477 {
20478 /* The second flow pass currently (June 1999) can't update
20479 regs_ever_live without disturbing other parts of the compiler, so
20480 update it here to make the prolog/epilogue code happy. */
20481 if (!can_create_pseudo_p ()
20482 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20483 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20484
20485 crtl->uses_pic_offset_table = 1;
20486
20487 return pic_offset_table_rtx;
20488 }
20489 \f
20490 static rs6000_stack_t stack_info;
20491
20492 /* Function to init struct machine_function.
20493 This will be called, via a pointer variable,
20494 from push_function_context. */
20495
20496 static struct machine_function *
20497 rs6000_init_machine_status (void)
20498 {
20499 stack_info.reload_completed = 0;
20500 return ggc_cleared_alloc<machine_function> ();
20501 }
20502 \f
20503 #define INT_P(X) (CONST_INT_P (X) && GET_MODE (X) == VOIDmode)
20504
20505 /* Write out a function code label. */
20506
20507 void
20508 rs6000_output_function_entry (FILE *file, const char *fname)
20509 {
20510 if (fname[0] != '.')
20511 {
20512 switch (DEFAULT_ABI)
20513 {
20514 default:
20515 gcc_unreachable ();
20516
20517 case ABI_AIX:
20518 if (DOT_SYMBOLS)
20519 putc ('.', file);
20520 else
20521 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20522 break;
20523
20524 case ABI_ELFv2:
20525 case ABI_V4:
20526 case ABI_DARWIN:
20527 break;
20528 }
20529 }
20530
20531 RS6000_OUTPUT_BASENAME (file, fname);
20532 }
20533
20534 /* Print an operand. Recognize special options, documented below. */
20535
20536 #if TARGET_ELF
20537 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20538 only introduced by the linker, when applying the sda21
20539 relocation. */
20540 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20541 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20542 #else
20543 #define SMALL_DATA_RELOC "sda21"
20544 #define SMALL_DATA_REG 0
20545 #endif
20546
20547 void
20548 print_operand (FILE *file, rtx x, int code)
20549 {
20550 int i;
20551 unsigned HOST_WIDE_INT uval;
20552
20553 switch (code)
20554 {
20555 /* %a is output_address. */
20556
20557 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20558 output_operand. */
20559
20560 case 'D':
20561 /* Like 'J' but get to the GT bit only. */
20562 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20563 {
20564 output_operand_lossage ("invalid %%D value");
20565 return;
20566 }
20567
20568 /* Bit 1 is GT bit. */
20569 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20570
20571 /* Add one for shift count in rlinm for scc. */
20572 fprintf (file, "%d", i + 1);
20573 return;
20574
20575 case 'e':
20576 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20577 if (! INT_P (x))
20578 {
20579 output_operand_lossage ("invalid %%e value");
20580 return;
20581 }
20582
20583 uval = INTVAL (x);
20584 if ((uval & 0xffff) == 0 && uval != 0)
20585 putc ('s', file);
20586 return;
20587
20588 case 'E':
20589 /* X is a CR register. Print the number of the EQ bit of the CR */
20590 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20591 output_operand_lossage ("invalid %%E value");
20592 else
20593 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20594 return;
20595
20596 case 'f':
20597 /* X is a CR register. Print the shift count needed to move it
20598 to the high-order four bits. */
20599 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20600 output_operand_lossage ("invalid %%f value");
20601 else
20602 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20603 return;
20604
20605 case 'F':
20606 /* Similar, but print the count for the rotate in the opposite
20607 direction. */
20608 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20609 output_operand_lossage ("invalid %%F value");
20610 else
20611 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20612 return;
20613
20614 case 'G':
20615 /* X is a constant integer. If it is negative, print "m",
20616 otherwise print "z". This is to make an aze or ame insn. */
20617 if (!CONST_INT_P (x))
20618 output_operand_lossage ("invalid %%G value");
20619 else if (INTVAL (x) >= 0)
20620 putc ('z', file);
20621 else
20622 putc ('m', file);
20623 return;
20624
20625 case 'h':
20626 /* If constant, output low-order five bits. Otherwise, write
20627 normally. */
20628 if (INT_P (x))
20629 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20630 else
20631 print_operand (file, x, 0);
20632 return;
20633
20634 case 'H':
20635 /* If constant, output low-order six bits. Otherwise, write
20636 normally. */
20637 if (INT_P (x))
20638 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20639 else
20640 print_operand (file, x, 0);
20641 return;
20642
20643 case 'I':
20644 /* Print `i' if this is a constant, else nothing. */
20645 if (INT_P (x))
20646 putc ('i', file);
20647 return;
20648
20649 case 'j':
20650 /* Write the bit number in CCR for jump. */
20651 i = ccr_bit (x, 0);
20652 if (i == -1)
20653 output_operand_lossage ("invalid %%j code");
20654 else
20655 fprintf (file, "%d", i);
20656 return;
20657
20658 case 'J':
20659 /* Similar, but add one for shift count in rlinm for scc and pass
20660 scc flag to `ccr_bit'. */
20661 i = ccr_bit (x, 1);
20662 if (i == -1)
20663 output_operand_lossage ("invalid %%J code");
20664 else
20665 /* If we want bit 31, write a shift count of zero, not 32. */
20666 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20667 return;
20668
20669 case 'k':
20670 /* X must be a constant. Write the 1's complement of the
20671 constant. */
20672 if (! INT_P (x))
20673 output_operand_lossage ("invalid %%k value");
20674 else
20675 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20676 return;
20677
20678 case 'K':
20679 /* X must be a symbolic constant on ELF. Write an
20680 expression suitable for an 'addi' that adds in the low 16
20681 bits of the MEM. */
20682 if (GET_CODE (x) == CONST)
20683 {
20684 if (GET_CODE (XEXP (x, 0)) != PLUS
20685 || (!SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
20686 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20687 || !CONST_INT_P (XEXP (XEXP (x, 0), 1)))
20688 output_operand_lossage ("invalid %%K value");
20689 }
20690 print_operand_address (file, x);
20691 fputs ("@l", file);
20692 return;
20693
20694 /* %l is output_asm_label. */
20695
20696 case 'L':
20697 /* Write second word of DImode or DFmode reference. Works on register
20698 or non-indexed memory only. */
20699 if (REG_P (x))
20700 fputs (reg_names[REGNO (x) + 1], file);
20701 else if (MEM_P (x))
20702 {
20703 machine_mode mode = GET_MODE (x);
20704 /* Handle possible auto-increment. Since it is pre-increment and
20705 we have already done it, we can just use an offset of word. */
20706 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20707 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20708 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20709 UNITS_PER_WORD));
20710 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20711 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20712 UNITS_PER_WORD));
20713 else
20714 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20715 UNITS_PER_WORD),
20716 0));
20717
20718 if (small_data_operand (x, GET_MODE (x)))
20719 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20720 reg_names[SMALL_DATA_REG]);
20721 }
20722 return;
20723
20724 case 'N': /* Unused */
20725 /* Write the number of elements in the vector times 4. */
20726 if (GET_CODE (x) != PARALLEL)
20727 output_operand_lossage ("invalid %%N value");
20728 else
20729 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20730 return;
20731
20732 case 'O': /* Unused */
20733 /* Similar, but subtract 1 first. */
20734 if (GET_CODE (x) != PARALLEL)
20735 output_operand_lossage ("invalid %%O value");
20736 else
20737 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20738 return;
20739
20740 case 'p':
20741 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20742 if (! INT_P (x)
20743 || INTVAL (x) < 0
20744 || (i = exact_log2 (INTVAL (x))) < 0)
20745 output_operand_lossage ("invalid %%p value");
20746 else
20747 fprintf (file, "%d", i);
20748 return;
20749
20750 case 'P':
20751 /* The operand must be an indirect memory reference. The result
20752 is the register name. */
20753 if (!MEM_P (x) || !REG_P (XEXP (x, 0))
20754 || REGNO (XEXP (x, 0)) >= 32)
20755 output_operand_lossage ("invalid %%P value");
20756 else
20757 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20758 return;
20759
20760 case 'q':
20761 /* This outputs the logical code corresponding to a boolean
20762 expression. The expression may have one or both operands
20763 negated (if one, only the first one). For condition register
20764 logical operations, it will also treat the negated
20765 CR codes as NOTs, but not handle NOTs of them. */
20766 {
20767 const char *const *t = 0;
20768 const char *s;
20769 enum rtx_code code = GET_CODE (x);
20770 static const char * const tbl[3][3] = {
20771 { "and", "andc", "nor" },
20772 { "or", "orc", "nand" },
20773 { "xor", "eqv", "xor" } };
20774
20775 if (code == AND)
20776 t = tbl[0];
20777 else if (code == IOR)
20778 t = tbl[1];
20779 else if (code == XOR)
20780 t = tbl[2];
20781 else
20782 output_operand_lossage ("invalid %%q value");
20783
20784 if (GET_CODE (XEXP (x, 0)) != NOT)
20785 s = t[0];
20786 else
20787 {
20788 if (GET_CODE (XEXP (x, 1)) == NOT)
20789 s = t[2];
20790 else
20791 s = t[1];
20792 }
20793
20794 fputs (s, file);
20795 }
20796 return;
20797
20798 case 'Q':
20799 if (! TARGET_MFCRF)
20800 return;
20801 fputc (',', file);
20802 /* FALLTHRU */
20803
20804 case 'R':
20805 /* X is a CR register. Print the mask for `mtcrf'. */
20806 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20807 output_operand_lossage ("invalid %%R value");
20808 else
20809 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20810 return;
20811
20812 case 's':
20813 /* Low 5 bits of 32 - value */
20814 if (! INT_P (x))
20815 output_operand_lossage ("invalid %%s value");
20816 else
20817 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
20818 return;
20819
20820 case 't':
20821 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20822 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20823 {
20824 output_operand_lossage ("invalid %%t value");
20825 return;
20826 }
20827
20828 /* Bit 3 is OV bit. */
20829 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
20830
20831 /* If we want bit 31, write a shift count of zero, not 32. */
20832 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20833 return;
20834
20835 case 'T':
20836 /* Print the symbolic name of a branch target register. */
20837 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20838 x = XVECEXP (x, 0, 0);
20839 if (!REG_P (x) || (REGNO (x) != LR_REGNO
20840 && REGNO (x) != CTR_REGNO))
20841 output_operand_lossage ("invalid %%T value");
20842 else if (REGNO (x) == LR_REGNO)
20843 fputs ("lr", file);
20844 else
20845 fputs ("ctr", file);
20846 return;
20847
20848 case 'u':
20849 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20850 for use in unsigned operand. */
20851 if (! INT_P (x))
20852 {
20853 output_operand_lossage ("invalid %%u value");
20854 return;
20855 }
20856
20857 uval = INTVAL (x);
20858 if ((uval & 0xffff) == 0)
20859 uval >>= 16;
20860
20861 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
20862 return;
20863
20864 case 'v':
20865 /* High-order 16 bits of constant for use in signed operand. */
20866 if (! INT_P (x))
20867 output_operand_lossage ("invalid %%v value");
20868 else
20869 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
20870 (INTVAL (x) >> 16) & 0xffff);
20871 return;
20872
20873 case 'U':
20874 /* Print `u' if this has an auto-increment or auto-decrement. */
20875 if (MEM_P (x)
20876 && (GET_CODE (XEXP (x, 0)) == PRE_INC
20877 || GET_CODE (XEXP (x, 0)) == PRE_DEC
20878 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
20879 putc ('u', file);
20880 return;
20881
20882 case 'V':
20883 /* Print the trap code for this operand. */
20884 switch (GET_CODE (x))
20885 {
20886 case EQ:
20887 fputs ("eq", file); /* 4 */
20888 break;
20889 case NE:
20890 fputs ("ne", file); /* 24 */
20891 break;
20892 case LT:
20893 fputs ("lt", file); /* 16 */
20894 break;
20895 case LE:
20896 fputs ("le", file); /* 20 */
20897 break;
20898 case GT:
20899 fputs ("gt", file); /* 8 */
20900 break;
20901 case GE:
20902 fputs ("ge", file); /* 12 */
20903 break;
20904 case LTU:
20905 fputs ("llt", file); /* 2 */
20906 break;
20907 case LEU:
20908 fputs ("lle", file); /* 6 */
20909 break;
20910 case GTU:
20911 fputs ("lgt", file); /* 1 */
20912 break;
20913 case GEU:
20914 fputs ("lge", file); /* 5 */
20915 break;
20916 default:
20917 output_operand_lossage ("invalid %%V value");
20918 }
20919 break;
20920
20921 case 'w':
20922 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
20923 normally. */
20924 if (INT_P (x))
20925 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
20926 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
20927 else
20928 print_operand (file, x, 0);
20929 return;
20930
20931 case 'x':
20932 /* X is a FPR or Altivec register used in a VSX context. */
20933 if (!REG_P (x) || !VSX_REGNO_P (REGNO (x)))
20934 output_operand_lossage ("invalid %%x value");
20935 else
20936 {
20937 int reg = REGNO (x);
20938 int vsx_reg = (FP_REGNO_P (reg)
20939 ? reg - 32
20940 : reg - FIRST_ALTIVEC_REGNO + 32);
20941
20942 #ifdef TARGET_REGNAMES
20943 if (TARGET_REGNAMES)
20944 fprintf (file, "%%vs%d", vsx_reg);
20945 else
20946 #endif
20947 fprintf (file, "%d", vsx_reg);
20948 }
20949 return;
20950
20951 case 'X':
20952 if (MEM_P (x)
20953 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
20954 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
20955 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
20956 putc ('x', file);
20957 return;
20958
20959 case 'Y':
20960 /* Like 'L', for third word of TImode/PTImode */
20961 if (REG_P (x))
20962 fputs (reg_names[REGNO (x) + 2], file);
20963 else if (MEM_P (x))
20964 {
20965 machine_mode mode = GET_MODE (x);
20966 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20967 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20968 output_address (mode, plus_constant (Pmode,
20969 XEXP (XEXP (x, 0), 0), 8));
20970 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20971 output_address (mode, plus_constant (Pmode,
20972 XEXP (XEXP (x, 0), 0), 8));
20973 else
20974 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
20975 if (small_data_operand (x, GET_MODE (x)))
20976 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20977 reg_names[SMALL_DATA_REG]);
20978 }
20979 return;
20980
20981 case 'z':
20982 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20983 x = XVECEXP (x, 0, 1);
20984 /* X is a SYMBOL_REF. Write out the name preceded by a
20985 period and without any trailing data in brackets. Used for function
20986 names. If we are configured for System V (or the embedded ABI) on
20987 the PowerPC, do not emit the period, since those systems do not use
20988 TOCs and the like. */
20989 if (!SYMBOL_REF_P (x))
20990 {
20991 output_operand_lossage ("invalid %%z value");
20992 return;
20993 }
20994
20995 /* For macho, check to see if we need a stub. */
20996 if (TARGET_MACHO)
20997 {
20998 const char *name = XSTR (x, 0);
20999 #if TARGET_MACHO
21000 if (darwin_emit_branch_islands
21001 && MACHOPIC_INDIRECT
21002 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
21003 name = machopic_indirection_name (x, /*stub_p=*/true);
21004 #endif
21005 assemble_name (file, name);
21006 }
21007 else if (!DOT_SYMBOLS)
21008 assemble_name (file, XSTR (x, 0));
21009 else
21010 rs6000_output_function_entry (file, XSTR (x, 0));
21011 return;
21012
21013 case 'Z':
21014 /* Like 'L', for last word of TImode/PTImode. */
21015 if (REG_P (x))
21016 fputs (reg_names[REGNO (x) + 3], file);
21017 else if (MEM_P (x))
21018 {
21019 machine_mode mode = GET_MODE (x);
21020 if (GET_CODE (XEXP (x, 0)) == PRE_INC
21021 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
21022 output_address (mode, plus_constant (Pmode,
21023 XEXP (XEXP (x, 0), 0), 12));
21024 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21025 output_address (mode, plus_constant (Pmode,
21026 XEXP (XEXP (x, 0), 0), 12));
21027 else
21028 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
21029 if (small_data_operand (x, GET_MODE (x)))
21030 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21031 reg_names[SMALL_DATA_REG]);
21032 }
21033 return;
21034
21035 /* Print AltiVec memory operand. */
21036 case 'y':
21037 {
21038 rtx tmp;
21039
21040 gcc_assert (MEM_P (x));
21041
21042 tmp = XEXP (x, 0);
21043
21044 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21045 && GET_CODE (tmp) == AND
21046 && CONST_INT_P (XEXP (tmp, 1))
21047 && INTVAL (XEXP (tmp, 1)) == -16)
21048 tmp = XEXP (tmp, 0);
21049 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21050 && GET_CODE (tmp) == PRE_MODIFY)
21051 tmp = XEXP (tmp, 1);
21052 if (REG_P (tmp))
21053 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21054 else
21055 {
21056 if (GET_CODE (tmp) != PLUS
21057 || !REG_P (XEXP (tmp, 0))
21058 || !REG_P (XEXP (tmp, 1)))
21059 {
21060 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21061 break;
21062 }
21063
21064 if (REGNO (XEXP (tmp, 0)) == 0)
21065 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21066 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21067 else
21068 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21069 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21070 }
21071 break;
21072 }
21073
21074 case 0:
21075 if (REG_P (x))
21076 fprintf (file, "%s", reg_names[REGNO (x)]);
21077 else if (MEM_P (x))
21078 {
21079 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21080 know the width from the mode. */
21081 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21082 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21083 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21084 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21085 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21086 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21087 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21088 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21089 else
21090 output_address (GET_MODE (x), XEXP (x, 0));
21091 }
21092 else if (toc_relative_expr_p (x, false,
21093 &tocrel_base_oac, &tocrel_offset_oac))
21094 /* This hack along with a corresponding hack in
21095 rs6000_output_addr_const_extra arranges to output addends
21096 where the assembler expects to find them. eg.
21097 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21098 without this hack would be output as "x@toc+4". We
21099 want "x+4@toc". */
21100 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21101 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
21102 output_addr_const (file, XVECEXP (x, 0, 0));
21103 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21104 output_addr_const (file, XVECEXP (x, 0, 1));
21105 else
21106 output_addr_const (file, x);
21107 return;
21108
21109 case '&':
21110 if (const char *name = get_some_local_dynamic_name ())
21111 assemble_name (file, name);
21112 else
21113 output_operand_lossage ("'%%&' used without any "
21114 "local dynamic TLS references");
21115 return;
21116
21117 default:
21118 output_operand_lossage ("invalid %%xn code");
21119 }
21120 }
21121 \f
21122 /* Print the address of an operand. */
21123
21124 void
21125 print_operand_address (FILE *file, rtx x)
21126 {
21127 if (REG_P (x))
21128 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21129 else if (SYMBOL_REF_P (x) || GET_CODE (x) == CONST
21130 || GET_CODE (x) == LABEL_REF)
21131 {
21132 output_addr_const (file, x);
21133 if (small_data_operand (x, GET_MODE (x)))
21134 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21135 reg_names[SMALL_DATA_REG]);
21136 else
21137 gcc_assert (!TARGET_TOC);
21138 }
21139 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21140 && REG_P (XEXP (x, 1)))
21141 {
21142 if (REGNO (XEXP (x, 0)) == 0)
21143 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21144 reg_names[ REGNO (XEXP (x, 0)) ]);
21145 else
21146 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21147 reg_names[ REGNO (XEXP (x, 1)) ]);
21148 }
21149 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21150 && CONST_INT_P (XEXP (x, 1)))
21151 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21152 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21153 #if TARGET_MACHO
21154 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21155 && CONSTANT_P (XEXP (x, 1)))
21156 {
21157 fprintf (file, "lo16(");
21158 output_addr_const (file, XEXP (x, 1));
21159 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21160 }
21161 #endif
21162 #if TARGET_ELF
21163 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21164 && CONSTANT_P (XEXP (x, 1)))
21165 {
21166 output_addr_const (file, XEXP (x, 1));
21167 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21168 }
21169 #endif
21170 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21171 {
21172 /* This hack along with a corresponding hack in
21173 rs6000_output_addr_const_extra arranges to output addends
21174 where the assembler expects to find them. eg.
21175 (lo_sum (reg 9)
21176 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21177 without this hack would be output as "x@toc+8@l(9)". We
21178 want "x+8@toc@l(9)". */
21179 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21180 if (GET_CODE (x) == LO_SUM)
21181 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21182 else
21183 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21184 }
21185 else
21186 output_addr_const (file, x);
21187 }
21188 \f
21189 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21190
21191 static bool
21192 rs6000_output_addr_const_extra (FILE *file, rtx x)
21193 {
21194 if (GET_CODE (x) == UNSPEC)
21195 switch (XINT (x, 1))
21196 {
21197 case UNSPEC_TOCREL:
21198 gcc_checking_assert (SYMBOL_REF_P (XVECEXP (x, 0, 0))
21199 && REG_P (XVECEXP (x, 0, 1))
21200 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21201 output_addr_const (file, XVECEXP (x, 0, 0));
21202 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21203 {
21204 if (INTVAL (tocrel_offset_oac) >= 0)
21205 fprintf (file, "+");
21206 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21207 }
21208 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21209 {
21210 putc ('-', file);
21211 assemble_name (file, toc_label_name);
21212 need_toc_init = 1;
21213 }
21214 else if (TARGET_ELF)
21215 fputs ("@toc", file);
21216 return true;
21217
21218 #if TARGET_MACHO
21219 case UNSPEC_MACHOPIC_OFFSET:
21220 output_addr_const (file, XVECEXP (x, 0, 0));
21221 putc ('-', file);
21222 machopic_output_function_base_name (file);
21223 return true;
21224 #endif
21225 }
21226 return false;
21227 }
21228 \f
21229 /* Target hook for assembling integer objects. The PowerPC version has
21230 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21231 is defined. It also needs to handle DI-mode objects on 64-bit
21232 targets. */
21233
21234 static bool
21235 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21236 {
21237 #ifdef RELOCATABLE_NEEDS_FIXUP
21238 /* Special handling for SI values. */
21239 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21240 {
21241 static int recurse = 0;
21242
21243 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21244 the .fixup section. Since the TOC section is already relocated, we
21245 don't need to mark it here. We used to skip the text section, but it
21246 should never be valid for relocated addresses to be placed in the text
21247 section. */
21248 if (DEFAULT_ABI == ABI_V4
21249 && (TARGET_RELOCATABLE || flag_pic > 1)
21250 && in_section != toc_section
21251 && !recurse
21252 && !CONST_SCALAR_INT_P (x)
21253 && CONSTANT_P (x))
21254 {
21255 char buf[256];
21256
21257 recurse = 1;
21258 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21259 fixuplabelno++;
21260 ASM_OUTPUT_LABEL (asm_out_file, buf);
21261 fprintf (asm_out_file, "\t.long\t(");
21262 output_addr_const (asm_out_file, x);
21263 fprintf (asm_out_file, ")@fixup\n");
21264 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21265 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21266 fprintf (asm_out_file, "\t.long\t");
21267 assemble_name (asm_out_file, buf);
21268 fprintf (asm_out_file, "\n\t.previous\n");
21269 recurse = 0;
21270 return true;
21271 }
21272 /* Remove initial .'s to turn a -mcall-aixdesc function
21273 address into the address of the descriptor, not the function
21274 itself. */
21275 else if (SYMBOL_REF_P (x)
21276 && XSTR (x, 0)[0] == '.'
21277 && DEFAULT_ABI == ABI_AIX)
21278 {
21279 const char *name = XSTR (x, 0);
21280 while (*name == '.')
21281 name++;
21282
21283 fprintf (asm_out_file, "\t.long\t%s\n", name);
21284 return true;
21285 }
21286 }
21287 #endif /* RELOCATABLE_NEEDS_FIXUP */
21288 return default_assemble_integer (x, size, aligned_p);
21289 }
21290
21291 /* Return a template string for assembly to emit when making an
21292 external call. FUNOP is the call mem argument operand number. */
21293
21294 static const char *
21295 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21296 {
21297 /* -Wformat-overflow workaround, without which gcc thinks that %u
21298 might produce 10 digits. */
21299 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21300
21301 char arg[12];
21302 arg[0] = 0;
21303 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21304 {
21305 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21306 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21307 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21308 sprintf (arg, "(%%&@tlsld)");
21309 else
21310 gcc_unreachable ();
21311 }
21312
21313 /* The magic 32768 offset here corresponds to the offset of
21314 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21315 char z[11];
21316 sprintf (z, "%%z%u%s", funop,
21317 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21318 ? "+32768" : ""));
21319
21320 static char str[32]; /* 2 spare */
21321 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21322 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21323 sibcall ? "" : "\n\tnop");
21324 else if (DEFAULT_ABI == ABI_V4)
21325 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21326 flag_pic ? "@plt" : "");
21327 #if TARGET_MACHO
21328 /* If/when we remove the mlongcall opt, we can share the AIX/ELGv2 case. */
21329 else if (DEFAULT_ABI == ABI_DARWIN)
21330 {
21331 /* The cookie is in operand func+2. */
21332 gcc_checking_assert (GET_CODE (operands[funop + 2]) == CONST_INT);
21333 int cookie = INTVAL (operands[funop + 2]);
21334 if (cookie & CALL_LONG)
21335 {
21336 tree funname = get_identifier (XSTR (operands[funop], 0));
21337 tree labelname = get_prev_label (funname);
21338 gcc_checking_assert (labelname && !sibcall);
21339
21340 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21341 instruction will reach 'foo', otherwise link as 'bl L42'".
21342 "L42" should be a 'branch island', that will do a far jump to
21343 'foo'. Branch islands are generated in
21344 macho_branch_islands(). */
21345 sprintf (str, "jbsr %%z%u,%.10s", funop,
21346 IDENTIFIER_POINTER (labelname));
21347 }
21348 else
21349 /* Same as AIX or ELFv2, except to keep backwards compat, no nop
21350 after the call. */
21351 sprintf (str, "b%s %s%s", sibcall ? "" : "l", z, arg);
21352 }
21353 #endif
21354 else
21355 gcc_unreachable ();
21356 return str;
21357 }
21358
21359 const char *
21360 rs6000_call_template (rtx *operands, unsigned int funop)
21361 {
21362 return rs6000_call_template_1 (operands, funop, false);
21363 }
21364
21365 const char *
21366 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21367 {
21368 return rs6000_call_template_1 (operands, funop, true);
21369 }
21370
21371 /* As above, for indirect calls. */
21372
21373 static const char *
21374 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21375 bool sibcall)
21376 {
21377 /* -Wformat-overflow workaround, without which gcc thinks that %u
21378 might produce 10 digits. Note that -Wformat-overflow will not
21379 currently warn here for str[], so do not rely on a warning to
21380 ensure str[] is correctly sized. */
21381 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21382
21383 /* Currently, funop is either 0 or 1. The maximum string is always
21384 a !speculate 64-bit __tls_get_addr call.
21385
21386 ABI_AIX:
21387 . 9 ld 2,%3\n\t
21388 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21389 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21390 . 9 crset 2\n\t
21391 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21392 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21393 . 10 beq%T1l-\n\t
21394 . 10 ld 2,%4(1)
21395 .---
21396 .151
21397
21398 ABI_ELFv2:
21399 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21400 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21401 . 9 crset 2\n\t
21402 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21403 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21404 . 10 beq%T1l-\n\t
21405 . 10 ld 2,%3(1)
21406 .---
21407 .142
21408
21409 ABI_V4:
21410 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21411 . 35 .reloc .,R_PPC64_PLTSEQ,%z1+32768\n\t
21412 . 9 crset 2\n\t
21413 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21414 . 36 .reloc .,R_PPC64_PLTCALL,%z1+32768\n\t
21415 . 8 beq%T1l-
21416 .---
21417 .141 */
21418 static char str[160]; /* 8 spare */
21419 char *s = str;
21420 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21421
21422 if (DEFAULT_ABI == ABI_AIX)
21423 s += sprintf (s,
21424 "l%s 2,%%%u\n\t",
21425 ptrload, funop + 2);
21426
21427 /* We don't need the extra code to stop indirect call speculation if
21428 calling via LR. */
21429 bool speculate = (TARGET_MACHO
21430 || rs6000_speculate_indirect_jumps
21431 || (REG_P (operands[funop])
21432 && REGNO (operands[funop]) == LR_REGNO));
21433
21434 if (TARGET_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21435 {
21436 const char *rel64 = TARGET_64BIT ? "64" : "";
21437 char tls[29];
21438 tls[0] = 0;
21439 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21440 {
21441 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21442 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21443 rel64, funop + 1);
21444 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21445 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21446 rel64);
21447 else
21448 gcc_unreachable ();
21449 }
21450
21451 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21452 && flag_pic == 2 ? "+32768" : "");
21453 if (!speculate)
21454 {
21455 s += sprintf (s,
21456 "%s.reloc .,R_PPC%s_PLTSEQ,%%z%u%s\n\t",
21457 tls, rel64, funop, addend);
21458 s += sprintf (s, "crset 2\n\t");
21459 }
21460 s += sprintf (s,
21461 "%s.reloc .,R_PPC%s_PLTCALL,%%z%u%s\n\t",
21462 tls, rel64, funop, addend);
21463 }
21464 else if (!speculate)
21465 s += sprintf (s, "crset 2\n\t");
21466
21467 if (DEFAULT_ABI == ABI_AIX)
21468 {
21469 if (speculate)
21470 sprintf (s,
21471 "b%%T%ul\n\t"
21472 "l%s 2,%%%u(1)",
21473 funop, ptrload, funop + 3);
21474 else
21475 sprintf (s,
21476 "beq%%T%ul-\n\t"
21477 "l%s 2,%%%u(1)",
21478 funop, ptrload, funop + 3);
21479 }
21480 else if (DEFAULT_ABI == ABI_ELFv2)
21481 {
21482 if (speculate)
21483 sprintf (s,
21484 "b%%T%ul\n\t"
21485 "l%s 2,%%%u(1)",
21486 funop, ptrload, funop + 2);
21487 else
21488 sprintf (s,
21489 "beq%%T%ul-\n\t"
21490 "l%s 2,%%%u(1)",
21491 funop, ptrload, funop + 2);
21492 }
21493 else
21494 {
21495 if (speculate)
21496 sprintf (s,
21497 "b%%T%u%s",
21498 funop, sibcall ? "" : "l");
21499 else
21500 sprintf (s,
21501 "beq%%T%u%s-%s",
21502 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21503 }
21504 return str;
21505 }
21506
21507 const char *
21508 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21509 {
21510 return rs6000_indirect_call_template_1 (operands, funop, false);
21511 }
21512
21513 const char *
21514 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21515 {
21516 return rs6000_indirect_call_template_1 (operands, funop, true);
21517 }
21518
21519 #if HAVE_AS_PLTSEQ
21520 /* Output indirect call insns.
21521 WHICH is 0 for tocsave, 1 for plt16_ha, 2 for plt16_lo, 3 for mtctr. */
21522 const char *
21523 rs6000_pltseq_template (rtx *operands, int which)
21524 {
21525 const char *rel64 = TARGET_64BIT ? "64" : "";
21526 char tls[28];
21527 tls[0] = 0;
21528 if (TARGET_TLS_MARKERS && GET_CODE (operands[3]) == UNSPEC)
21529 {
21530 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21531 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%3\n\t",
21532 rel64);
21533 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21534 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21535 rel64);
21536 else
21537 gcc_unreachable ();
21538 }
21539
21540 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21541 static char str[96]; /* 15 spare */
21542 const char *off = WORDS_BIG_ENDIAN ? "+2" : "";
21543 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21544 && flag_pic == 2 ? "+32768" : "");
21545 switch (which)
21546 {
21547 case 0:
21548 sprintf (str,
21549 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2\n\t"
21550 "st%s",
21551 tls, rel64, TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)");
21552 break;
21553 case 1:
21554 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21555 sprintf (str,
21556 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2\n\t"
21557 "lis %%0,0",
21558 tls, off, rel64);
21559 else
21560 sprintf (str,
21561 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2%s\n\t"
21562 "addis %%0,%%1,0",
21563 tls, off, rel64, addend);
21564 break;
21565 case 2:
21566 sprintf (str,
21567 "%s.reloc .%s,R_PPC%s_PLT16_LO%s,%%z2%s\n\t"
21568 "l%s %%0,0(%%1)",
21569 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend,
21570 TARGET_64BIT ? "d" : "wz");
21571 break;
21572 case 3:
21573 sprintf (str,
21574 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2%s\n\t"
21575 "mtctr %%1",
21576 tls, rel64, addend);
21577 break;
21578 default:
21579 gcc_unreachable ();
21580 }
21581 return str;
21582 }
21583 #endif
21584
21585 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21586 /* Emit an assembler directive to set symbol visibility for DECL to
21587 VISIBILITY_TYPE. */
21588
21589 static void
21590 rs6000_assemble_visibility (tree decl, int vis)
21591 {
21592 if (TARGET_XCOFF)
21593 return;
21594
21595 /* Functions need to have their entry point symbol visibility set as
21596 well as their descriptor symbol visibility. */
21597 if (DEFAULT_ABI == ABI_AIX
21598 && DOT_SYMBOLS
21599 && TREE_CODE (decl) == FUNCTION_DECL)
21600 {
21601 static const char * const visibility_types[] = {
21602 NULL, "protected", "hidden", "internal"
21603 };
21604
21605 const char *name, *type;
21606
21607 name = ((* targetm.strip_name_encoding)
21608 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21609 type = visibility_types[vis];
21610
21611 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21612 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21613 }
21614 else
21615 default_assemble_visibility (decl, vis);
21616 }
21617 #endif
21618 \f
21619 enum rtx_code
21620 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21621 {
21622 /* Reversal of FP compares takes care -- an ordered compare
21623 becomes an unordered compare and vice versa. */
21624 if (mode == CCFPmode
21625 && (!flag_finite_math_only
21626 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21627 || code == UNEQ || code == LTGT))
21628 return reverse_condition_maybe_unordered (code);
21629 else
21630 return reverse_condition (code);
21631 }
21632
21633 /* Generate a compare for CODE. Return a brand-new rtx that
21634 represents the result of the compare. */
21635
21636 static rtx
21637 rs6000_generate_compare (rtx cmp, machine_mode mode)
21638 {
21639 machine_mode comp_mode;
21640 rtx compare_result;
21641 enum rtx_code code = GET_CODE (cmp);
21642 rtx op0 = XEXP (cmp, 0);
21643 rtx op1 = XEXP (cmp, 1);
21644
21645 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21646 comp_mode = CCmode;
21647 else if (FLOAT_MODE_P (mode))
21648 comp_mode = CCFPmode;
21649 else if (code == GTU || code == LTU
21650 || code == GEU || code == LEU)
21651 comp_mode = CCUNSmode;
21652 else if ((code == EQ || code == NE)
21653 && unsigned_reg_p (op0)
21654 && (unsigned_reg_p (op1)
21655 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21656 /* These are unsigned values, perhaps there will be a later
21657 ordering compare that can be shared with this one. */
21658 comp_mode = CCUNSmode;
21659 else
21660 comp_mode = CCmode;
21661
21662 /* If we have an unsigned compare, make sure we don't have a signed value as
21663 an immediate. */
21664 if (comp_mode == CCUNSmode && CONST_INT_P (op1)
21665 && INTVAL (op1) < 0)
21666 {
21667 op0 = copy_rtx_if_shared (op0);
21668 op1 = force_reg (GET_MODE (op0), op1);
21669 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21670 }
21671
21672 /* First, the compare. */
21673 compare_result = gen_reg_rtx (comp_mode);
21674
21675 /* IEEE 128-bit support in VSX registers when we do not have hardware
21676 support. */
21677 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21678 {
21679 rtx libfunc = NULL_RTX;
21680 bool check_nan = false;
21681 rtx dest;
21682
21683 switch (code)
21684 {
21685 case EQ:
21686 case NE:
21687 libfunc = optab_libfunc (eq_optab, mode);
21688 break;
21689
21690 case GT:
21691 case GE:
21692 libfunc = optab_libfunc (ge_optab, mode);
21693 break;
21694
21695 case LT:
21696 case LE:
21697 libfunc = optab_libfunc (le_optab, mode);
21698 break;
21699
21700 case UNORDERED:
21701 case ORDERED:
21702 libfunc = optab_libfunc (unord_optab, mode);
21703 code = (code == UNORDERED) ? NE : EQ;
21704 break;
21705
21706 case UNGE:
21707 case UNGT:
21708 check_nan = true;
21709 libfunc = optab_libfunc (ge_optab, mode);
21710 code = (code == UNGE) ? GE : GT;
21711 break;
21712
21713 case UNLE:
21714 case UNLT:
21715 check_nan = true;
21716 libfunc = optab_libfunc (le_optab, mode);
21717 code = (code == UNLE) ? LE : LT;
21718 break;
21719
21720 case UNEQ:
21721 case LTGT:
21722 check_nan = true;
21723 libfunc = optab_libfunc (eq_optab, mode);
21724 code = (code = UNEQ) ? EQ : NE;
21725 break;
21726
21727 default:
21728 gcc_unreachable ();
21729 }
21730
21731 gcc_assert (libfunc);
21732
21733 if (!check_nan)
21734 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21735 SImode, op0, mode, op1, mode);
21736
21737 /* The library signals an exception for signalling NaNs, so we need to
21738 handle isgreater, etc. by first checking isordered. */
21739 else
21740 {
21741 rtx ne_rtx, normal_dest, unord_dest;
21742 rtx unord_func = optab_libfunc (unord_optab, mode);
21743 rtx join_label = gen_label_rtx ();
21744 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21745 rtx unord_cmp = gen_reg_rtx (comp_mode);
21746
21747
21748 /* Test for either value being a NaN. */
21749 gcc_assert (unord_func);
21750 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21751 SImode, op0, mode, op1, mode);
21752
21753 /* Set value (0) if either value is a NaN, and jump to the join
21754 label. */
21755 dest = gen_reg_rtx (SImode);
21756 emit_move_insn (dest, const1_rtx);
21757 emit_insn (gen_rtx_SET (unord_cmp,
21758 gen_rtx_COMPARE (comp_mode, unord_dest,
21759 const0_rtx)));
21760
21761 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21762 emit_jump_insn (gen_rtx_SET (pc_rtx,
21763 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21764 join_ref,
21765 pc_rtx)));
21766
21767 /* Do the normal comparison, knowing that the values are not
21768 NaNs. */
21769 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21770 SImode, op0, mode, op1, mode);
21771
21772 emit_insn (gen_cstoresi4 (dest,
21773 gen_rtx_fmt_ee (code, SImode, normal_dest,
21774 const0_rtx),
21775 normal_dest, const0_rtx));
21776
21777 /* Join NaN and non-Nan paths. Compare dest against 0. */
21778 emit_label (join_label);
21779 code = NE;
21780 }
21781
21782 emit_insn (gen_rtx_SET (compare_result,
21783 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21784 }
21785
21786 else
21787 {
21788 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21789 CLOBBERs to match cmptf_internal2 pattern. */
21790 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21791 && FLOAT128_IBM_P (GET_MODE (op0))
21792 && TARGET_HARD_FLOAT)
21793 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21794 gen_rtvec (10,
21795 gen_rtx_SET (compare_result,
21796 gen_rtx_COMPARE (comp_mode, op0, op1)),
21797 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21798 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21799 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21800 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21801 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21802 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21803 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21804 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21805 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21806 else if (GET_CODE (op1) == UNSPEC
21807 && XINT (op1, 1) == UNSPEC_SP_TEST)
21808 {
21809 rtx op1b = XVECEXP (op1, 0, 0);
21810 comp_mode = CCEQmode;
21811 compare_result = gen_reg_rtx (CCEQmode);
21812 if (TARGET_64BIT)
21813 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21814 else
21815 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21816 }
21817 else
21818 emit_insn (gen_rtx_SET (compare_result,
21819 gen_rtx_COMPARE (comp_mode, op0, op1)));
21820 }
21821
21822 /* Some kinds of FP comparisons need an OR operation;
21823 under flag_finite_math_only we don't bother. */
21824 if (FLOAT_MODE_P (mode)
21825 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21826 && !flag_finite_math_only
21827 && (code == LE || code == GE
21828 || code == UNEQ || code == LTGT
21829 || code == UNGT || code == UNLT))
21830 {
21831 enum rtx_code or1, or2;
21832 rtx or1_rtx, or2_rtx, compare2_rtx;
21833 rtx or_result = gen_reg_rtx (CCEQmode);
21834
21835 switch (code)
21836 {
21837 case LE: or1 = LT; or2 = EQ; break;
21838 case GE: or1 = GT; or2 = EQ; break;
21839 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21840 case LTGT: or1 = LT; or2 = GT; break;
21841 case UNGT: or1 = UNORDERED; or2 = GT; break;
21842 case UNLT: or1 = UNORDERED; or2 = LT; break;
21843 default: gcc_unreachable ();
21844 }
21845 validate_condition_mode (or1, comp_mode);
21846 validate_condition_mode (or2, comp_mode);
21847 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21848 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21849 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21850 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21851 const_true_rtx);
21852 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21853
21854 compare_result = or_result;
21855 code = EQ;
21856 }
21857
21858 validate_condition_mode (code, GET_MODE (compare_result));
21859
21860 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21861 }
21862
21863 \f
21864 /* Return the diagnostic message string if the binary operation OP is
21865 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21866
21867 static const char*
21868 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21869 const_tree type1,
21870 const_tree type2)
21871 {
21872 machine_mode mode1 = TYPE_MODE (type1);
21873 machine_mode mode2 = TYPE_MODE (type2);
21874
21875 /* For complex modes, use the inner type. */
21876 if (COMPLEX_MODE_P (mode1))
21877 mode1 = GET_MODE_INNER (mode1);
21878
21879 if (COMPLEX_MODE_P (mode2))
21880 mode2 = GET_MODE_INNER (mode2);
21881
21882 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21883 double to intermix unless -mfloat128-convert. */
21884 if (mode1 == mode2)
21885 return NULL;
21886
21887 if (!TARGET_FLOAT128_CVT)
21888 {
21889 if ((mode1 == KFmode && mode2 == IFmode)
21890 || (mode1 == IFmode && mode2 == KFmode))
21891 return N_("__float128 and __ibm128 cannot be used in the same "
21892 "expression");
21893
21894 if (TARGET_IEEEQUAD
21895 && ((mode1 == IFmode && mode2 == TFmode)
21896 || (mode1 == TFmode && mode2 == IFmode)))
21897 return N_("__ibm128 and long double cannot be used in the same "
21898 "expression");
21899
21900 if (!TARGET_IEEEQUAD
21901 && ((mode1 == KFmode && mode2 == TFmode)
21902 || (mode1 == TFmode && mode2 == KFmode)))
21903 return N_("__float128 and long double cannot be used in the same "
21904 "expression");
21905 }
21906
21907 return NULL;
21908 }
21909
21910 \f
21911 /* Expand floating point conversion to/from __float128 and __ibm128. */
21912
21913 void
21914 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
21915 {
21916 machine_mode dest_mode = GET_MODE (dest);
21917 machine_mode src_mode = GET_MODE (src);
21918 convert_optab cvt = unknown_optab;
21919 bool do_move = false;
21920 rtx libfunc = NULL_RTX;
21921 rtx dest2;
21922 typedef rtx (*rtx_2func_t) (rtx, rtx);
21923 rtx_2func_t hw_convert = (rtx_2func_t)0;
21924 size_t kf_or_tf;
21925
21926 struct hw_conv_t {
21927 rtx_2func_t from_df;
21928 rtx_2func_t from_sf;
21929 rtx_2func_t from_si_sign;
21930 rtx_2func_t from_si_uns;
21931 rtx_2func_t from_di_sign;
21932 rtx_2func_t from_di_uns;
21933 rtx_2func_t to_df;
21934 rtx_2func_t to_sf;
21935 rtx_2func_t to_si_sign;
21936 rtx_2func_t to_si_uns;
21937 rtx_2func_t to_di_sign;
21938 rtx_2func_t to_di_uns;
21939 } hw_conversions[2] = {
21940 /* convertions to/from KFmode */
21941 {
21942 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
21943 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
21944 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
21945 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
21946 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
21947 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
21948 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
21949 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
21950 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
21951 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
21952 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
21953 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
21954 },
21955
21956 /* convertions to/from TFmode */
21957 {
21958 gen_extenddftf2_hw, /* TFmode <- DFmode. */
21959 gen_extendsftf2_hw, /* TFmode <- SFmode. */
21960 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
21961 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
21962 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
21963 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
21964 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
21965 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
21966 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
21967 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
21968 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
21969 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
21970 },
21971 };
21972
21973 if (dest_mode == src_mode)
21974 gcc_unreachable ();
21975
21976 /* Eliminate memory operations. */
21977 if (MEM_P (src))
21978 src = force_reg (src_mode, src);
21979
21980 if (MEM_P (dest))
21981 {
21982 rtx tmp = gen_reg_rtx (dest_mode);
21983 rs6000_expand_float128_convert (tmp, src, unsigned_p);
21984 rs6000_emit_move (dest, tmp, dest_mode);
21985 return;
21986 }
21987
21988 /* Convert to IEEE 128-bit floating point. */
21989 if (FLOAT128_IEEE_P (dest_mode))
21990 {
21991 if (dest_mode == KFmode)
21992 kf_or_tf = 0;
21993 else if (dest_mode == TFmode)
21994 kf_or_tf = 1;
21995 else
21996 gcc_unreachable ();
21997
21998 switch (src_mode)
21999 {
22000 case E_DFmode:
22001 cvt = sext_optab;
22002 hw_convert = hw_conversions[kf_or_tf].from_df;
22003 break;
22004
22005 case E_SFmode:
22006 cvt = sext_optab;
22007 hw_convert = hw_conversions[kf_or_tf].from_sf;
22008 break;
22009
22010 case E_KFmode:
22011 case E_IFmode:
22012 case E_TFmode:
22013 if (FLOAT128_IBM_P (src_mode))
22014 cvt = sext_optab;
22015 else
22016 do_move = true;
22017 break;
22018
22019 case E_SImode:
22020 if (unsigned_p)
22021 {
22022 cvt = ufloat_optab;
22023 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
22024 }
22025 else
22026 {
22027 cvt = sfloat_optab;
22028 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
22029 }
22030 break;
22031
22032 case E_DImode:
22033 if (unsigned_p)
22034 {
22035 cvt = ufloat_optab;
22036 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22037 }
22038 else
22039 {
22040 cvt = sfloat_optab;
22041 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22042 }
22043 break;
22044
22045 default:
22046 gcc_unreachable ();
22047 }
22048 }
22049
22050 /* Convert from IEEE 128-bit floating point. */
22051 else if (FLOAT128_IEEE_P (src_mode))
22052 {
22053 if (src_mode == KFmode)
22054 kf_or_tf = 0;
22055 else if (src_mode == TFmode)
22056 kf_or_tf = 1;
22057 else
22058 gcc_unreachable ();
22059
22060 switch (dest_mode)
22061 {
22062 case E_DFmode:
22063 cvt = trunc_optab;
22064 hw_convert = hw_conversions[kf_or_tf].to_df;
22065 break;
22066
22067 case E_SFmode:
22068 cvt = trunc_optab;
22069 hw_convert = hw_conversions[kf_or_tf].to_sf;
22070 break;
22071
22072 case E_KFmode:
22073 case E_IFmode:
22074 case E_TFmode:
22075 if (FLOAT128_IBM_P (dest_mode))
22076 cvt = trunc_optab;
22077 else
22078 do_move = true;
22079 break;
22080
22081 case E_SImode:
22082 if (unsigned_p)
22083 {
22084 cvt = ufix_optab;
22085 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22086 }
22087 else
22088 {
22089 cvt = sfix_optab;
22090 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22091 }
22092 break;
22093
22094 case E_DImode:
22095 if (unsigned_p)
22096 {
22097 cvt = ufix_optab;
22098 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22099 }
22100 else
22101 {
22102 cvt = sfix_optab;
22103 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22104 }
22105 break;
22106
22107 default:
22108 gcc_unreachable ();
22109 }
22110 }
22111
22112 /* Both IBM format. */
22113 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22114 do_move = true;
22115
22116 else
22117 gcc_unreachable ();
22118
22119 /* Handle conversion between TFmode/KFmode/IFmode. */
22120 if (do_move)
22121 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22122
22123 /* Handle conversion if we have hardware support. */
22124 else if (TARGET_FLOAT128_HW && hw_convert)
22125 emit_insn ((hw_convert) (dest, src));
22126
22127 /* Call an external function to do the conversion. */
22128 else if (cvt != unknown_optab)
22129 {
22130 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22131 gcc_assert (libfunc != NULL_RTX);
22132
22133 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22134 src, src_mode);
22135
22136 gcc_assert (dest2 != NULL_RTX);
22137 if (!rtx_equal_p (dest, dest2))
22138 emit_move_insn (dest, dest2);
22139 }
22140
22141 else
22142 gcc_unreachable ();
22143
22144 return;
22145 }
22146
22147 \f
22148 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22149 can be used as that dest register. Return the dest register. */
22150
22151 rtx
22152 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22153 {
22154 if (op2 == const0_rtx)
22155 return op1;
22156
22157 if (GET_CODE (scratch) == SCRATCH)
22158 scratch = gen_reg_rtx (mode);
22159
22160 if (logical_operand (op2, mode))
22161 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22162 else
22163 emit_insn (gen_rtx_SET (scratch,
22164 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22165
22166 return scratch;
22167 }
22168
22169 void
22170 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22171 {
22172 rtx condition_rtx;
22173 machine_mode op_mode;
22174 enum rtx_code cond_code;
22175 rtx result = operands[0];
22176
22177 condition_rtx = rs6000_generate_compare (operands[1], mode);
22178 cond_code = GET_CODE (condition_rtx);
22179
22180 if (cond_code == NE
22181 || cond_code == GE || cond_code == LE
22182 || cond_code == GEU || cond_code == LEU
22183 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22184 {
22185 rtx not_result = gen_reg_rtx (CCEQmode);
22186 rtx not_op, rev_cond_rtx;
22187 machine_mode cc_mode;
22188
22189 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22190
22191 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22192 SImode, XEXP (condition_rtx, 0), const0_rtx);
22193 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22194 emit_insn (gen_rtx_SET (not_result, not_op));
22195 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22196 }
22197
22198 op_mode = GET_MODE (XEXP (operands[1], 0));
22199 if (op_mode == VOIDmode)
22200 op_mode = GET_MODE (XEXP (operands[1], 1));
22201
22202 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22203 {
22204 PUT_MODE (condition_rtx, DImode);
22205 convert_move (result, condition_rtx, 0);
22206 }
22207 else
22208 {
22209 PUT_MODE (condition_rtx, SImode);
22210 emit_insn (gen_rtx_SET (result, condition_rtx));
22211 }
22212 }
22213
22214 /* Emit a branch of kind CODE to location LOC. */
22215
22216 void
22217 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22218 {
22219 rtx condition_rtx, loc_ref;
22220
22221 condition_rtx = rs6000_generate_compare (operands[0], mode);
22222 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22223 emit_jump_insn (gen_rtx_SET (pc_rtx,
22224 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22225 loc_ref, pc_rtx)));
22226 }
22227
22228 /* Return the string to output a conditional branch to LABEL, which is
22229 the operand template of the label, or NULL if the branch is really a
22230 conditional return.
22231
22232 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22233 condition code register and its mode specifies what kind of
22234 comparison we made.
22235
22236 REVERSED is nonzero if we should reverse the sense of the comparison.
22237
22238 INSN is the insn. */
22239
22240 char *
22241 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22242 {
22243 static char string[64];
22244 enum rtx_code code = GET_CODE (op);
22245 rtx cc_reg = XEXP (op, 0);
22246 machine_mode mode = GET_MODE (cc_reg);
22247 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22248 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22249 int really_reversed = reversed ^ need_longbranch;
22250 char *s = string;
22251 const char *ccode;
22252 const char *pred;
22253 rtx note;
22254
22255 validate_condition_mode (code, mode);
22256
22257 /* Work out which way this really branches. We could use
22258 reverse_condition_maybe_unordered here always but this
22259 makes the resulting assembler clearer. */
22260 if (really_reversed)
22261 {
22262 /* Reversal of FP compares takes care -- an ordered compare
22263 becomes an unordered compare and vice versa. */
22264 if (mode == CCFPmode)
22265 code = reverse_condition_maybe_unordered (code);
22266 else
22267 code = reverse_condition (code);
22268 }
22269
22270 switch (code)
22271 {
22272 /* Not all of these are actually distinct opcodes, but
22273 we distinguish them for clarity of the resulting assembler. */
22274 case NE: case LTGT:
22275 ccode = "ne"; break;
22276 case EQ: case UNEQ:
22277 ccode = "eq"; break;
22278 case GE: case GEU:
22279 ccode = "ge"; break;
22280 case GT: case GTU: case UNGT:
22281 ccode = "gt"; break;
22282 case LE: case LEU:
22283 ccode = "le"; break;
22284 case LT: case LTU: case UNLT:
22285 ccode = "lt"; break;
22286 case UNORDERED: ccode = "un"; break;
22287 case ORDERED: ccode = "nu"; break;
22288 case UNGE: ccode = "nl"; break;
22289 case UNLE: ccode = "ng"; break;
22290 default:
22291 gcc_unreachable ();
22292 }
22293
22294 /* Maybe we have a guess as to how likely the branch is. */
22295 pred = "";
22296 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22297 if (note != NULL_RTX)
22298 {
22299 /* PROB is the difference from 50%. */
22300 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22301 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22302
22303 /* Only hint for highly probable/improbable branches on newer cpus when
22304 we have real profile data, as static prediction overrides processor
22305 dynamic prediction. For older cpus we may as well always hint, but
22306 assume not taken for branches that are very close to 50% as a
22307 mispredicted taken branch is more expensive than a
22308 mispredicted not-taken branch. */
22309 if (rs6000_always_hint
22310 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22311 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22312 && br_prob_note_reliable_p (note)))
22313 {
22314 if (abs (prob) > REG_BR_PROB_BASE / 20
22315 && ((prob > 0) ^ need_longbranch))
22316 pred = "+";
22317 else
22318 pred = "-";
22319 }
22320 }
22321
22322 if (label == NULL)
22323 s += sprintf (s, "b%slr%s ", ccode, pred);
22324 else
22325 s += sprintf (s, "b%s%s ", ccode, pred);
22326
22327 /* We need to escape any '%' characters in the reg_names string.
22328 Assume they'd only be the first character.... */
22329 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22330 *s++ = '%';
22331 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22332
22333 if (label != NULL)
22334 {
22335 /* If the branch distance was too far, we may have to use an
22336 unconditional branch to go the distance. */
22337 if (need_longbranch)
22338 s += sprintf (s, ",$+8\n\tb %s", label);
22339 else
22340 s += sprintf (s, ",%s", label);
22341 }
22342
22343 return string;
22344 }
22345
22346 /* Return insn for VSX or Altivec comparisons. */
22347
22348 static rtx
22349 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22350 {
22351 rtx mask;
22352 machine_mode mode = GET_MODE (op0);
22353
22354 switch (code)
22355 {
22356 default:
22357 break;
22358
22359 case GE:
22360 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22361 return NULL_RTX;
22362 /* FALLTHRU */
22363
22364 case EQ:
22365 case GT:
22366 case GTU:
22367 case ORDERED:
22368 case UNORDERED:
22369 case UNEQ:
22370 case LTGT:
22371 mask = gen_reg_rtx (mode);
22372 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22373 return mask;
22374 }
22375
22376 return NULL_RTX;
22377 }
22378
22379 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22380 DMODE is expected destination mode. This is a recursive function. */
22381
22382 static rtx
22383 rs6000_emit_vector_compare (enum rtx_code rcode,
22384 rtx op0, rtx op1,
22385 machine_mode dmode)
22386 {
22387 rtx mask;
22388 bool swap_operands = false;
22389 bool try_again = false;
22390
22391 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22392 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22393
22394 /* See if the comparison works as is. */
22395 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22396 if (mask)
22397 return mask;
22398
22399 switch (rcode)
22400 {
22401 case LT:
22402 rcode = GT;
22403 swap_operands = true;
22404 try_again = true;
22405 break;
22406 case LTU:
22407 rcode = GTU;
22408 swap_operands = true;
22409 try_again = true;
22410 break;
22411 case NE:
22412 case UNLE:
22413 case UNLT:
22414 case UNGE:
22415 case UNGT:
22416 /* Invert condition and try again.
22417 e.g., A != B becomes ~(A==B). */
22418 {
22419 enum rtx_code rev_code;
22420 enum insn_code nor_code;
22421 rtx mask2;
22422
22423 rev_code = reverse_condition_maybe_unordered (rcode);
22424 if (rev_code == UNKNOWN)
22425 return NULL_RTX;
22426
22427 nor_code = optab_handler (one_cmpl_optab, dmode);
22428 if (nor_code == CODE_FOR_nothing)
22429 return NULL_RTX;
22430
22431 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22432 if (!mask2)
22433 return NULL_RTX;
22434
22435 mask = gen_reg_rtx (dmode);
22436 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22437 return mask;
22438 }
22439 break;
22440 case GE:
22441 case GEU:
22442 case LE:
22443 case LEU:
22444 /* Try GT/GTU/LT/LTU OR EQ */
22445 {
22446 rtx c_rtx, eq_rtx;
22447 enum insn_code ior_code;
22448 enum rtx_code new_code;
22449
22450 switch (rcode)
22451 {
22452 case GE:
22453 new_code = GT;
22454 break;
22455
22456 case GEU:
22457 new_code = GTU;
22458 break;
22459
22460 case LE:
22461 new_code = LT;
22462 break;
22463
22464 case LEU:
22465 new_code = LTU;
22466 break;
22467
22468 default:
22469 gcc_unreachable ();
22470 }
22471
22472 ior_code = optab_handler (ior_optab, dmode);
22473 if (ior_code == CODE_FOR_nothing)
22474 return NULL_RTX;
22475
22476 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22477 if (!c_rtx)
22478 return NULL_RTX;
22479
22480 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22481 if (!eq_rtx)
22482 return NULL_RTX;
22483
22484 mask = gen_reg_rtx (dmode);
22485 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22486 return mask;
22487 }
22488 break;
22489 default:
22490 return NULL_RTX;
22491 }
22492
22493 if (try_again)
22494 {
22495 if (swap_operands)
22496 std::swap (op0, op1);
22497
22498 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22499 if (mask)
22500 return mask;
22501 }
22502
22503 /* You only get two chances. */
22504 return NULL_RTX;
22505 }
22506
22507 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22508 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22509 operands for the relation operation COND. */
22510
22511 int
22512 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22513 rtx cond, rtx cc_op0, rtx cc_op1)
22514 {
22515 machine_mode dest_mode = GET_MODE (dest);
22516 machine_mode mask_mode = GET_MODE (cc_op0);
22517 enum rtx_code rcode = GET_CODE (cond);
22518 machine_mode cc_mode = CCmode;
22519 rtx mask;
22520 rtx cond2;
22521 bool invert_move = false;
22522
22523 if (VECTOR_UNIT_NONE_P (dest_mode))
22524 return 0;
22525
22526 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22527 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22528
22529 switch (rcode)
22530 {
22531 /* Swap operands if we can, and fall back to doing the operation as
22532 specified, and doing a NOR to invert the test. */
22533 case NE:
22534 case UNLE:
22535 case UNLT:
22536 case UNGE:
22537 case UNGT:
22538 /* Invert condition and try again.
22539 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22540 invert_move = true;
22541 rcode = reverse_condition_maybe_unordered (rcode);
22542 if (rcode == UNKNOWN)
22543 return 0;
22544 break;
22545
22546 case GE:
22547 case LE:
22548 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22549 {
22550 /* Invert condition to avoid compound test. */
22551 invert_move = true;
22552 rcode = reverse_condition (rcode);
22553 }
22554 break;
22555
22556 case GTU:
22557 case GEU:
22558 case LTU:
22559 case LEU:
22560 /* Mark unsigned tests with CCUNSmode. */
22561 cc_mode = CCUNSmode;
22562
22563 /* Invert condition to avoid compound test if necessary. */
22564 if (rcode == GEU || rcode == LEU)
22565 {
22566 invert_move = true;
22567 rcode = reverse_condition (rcode);
22568 }
22569 break;
22570
22571 default:
22572 break;
22573 }
22574
22575 /* Get the vector mask for the given relational operations. */
22576 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22577
22578 if (!mask)
22579 return 0;
22580
22581 if (invert_move)
22582 std::swap (op_true, op_false);
22583
22584 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22585 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22586 && (GET_CODE (op_true) == CONST_VECTOR
22587 || GET_CODE (op_false) == CONST_VECTOR))
22588 {
22589 rtx constant_0 = CONST0_RTX (dest_mode);
22590 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22591
22592 if (op_true == constant_m1 && op_false == constant_0)
22593 {
22594 emit_move_insn (dest, mask);
22595 return 1;
22596 }
22597
22598 else if (op_true == constant_0 && op_false == constant_m1)
22599 {
22600 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22601 return 1;
22602 }
22603
22604 /* If we can't use the vector comparison directly, perhaps we can use
22605 the mask for the true or false fields, instead of loading up a
22606 constant. */
22607 if (op_true == constant_m1)
22608 op_true = mask;
22609
22610 if (op_false == constant_0)
22611 op_false = mask;
22612 }
22613
22614 if (!REG_P (op_true) && !SUBREG_P (op_true))
22615 op_true = force_reg (dest_mode, op_true);
22616
22617 if (!REG_P (op_false) && !SUBREG_P (op_false))
22618 op_false = force_reg (dest_mode, op_false);
22619
22620 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22621 CONST0_RTX (dest_mode));
22622 emit_insn (gen_rtx_SET (dest,
22623 gen_rtx_IF_THEN_ELSE (dest_mode,
22624 cond2,
22625 op_true,
22626 op_false)));
22627 return 1;
22628 }
22629
22630 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22631 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22632 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22633 hardware has no such operation. */
22634
22635 static int
22636 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22637 {
22638 enum rtx_code code = GET_CODE (op);
22639 rtx op0 = XEXP (op, 0);
22640 rtx op1 = XEXP (op, 1);
22641 machine_mode compare_mode = GET_MODE (op0);
22642 machine_mode result_mode = GET_MODE (dest);
22643 bool max_p = false;
22644
22645 if (result_mode != compare_mode)
22646 return 0;
22647
22648 if (code == GE || code == GT)
22649 max_p = true;
22650 else if (code == LE || code == LT)
22651 max_p = false;
22652 else
22653 return 0;
22654
22655 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22656 ;
22657
22658 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22659 max_p = !max_p;
22660
22661 else
22662 return 0;
22663
22664 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22665 return 1;
22666 }
22667
22668 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22669 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22670 operands of the last comparison is nonzero/true, FALSE_COND if it is
22671 zero/false. Return 0 if the hardware has no such operation. */
22672
22673 static int
22674 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22675 {
22676 enum rtx_code code = GET_CODE (op);
22677 rtx op0 = XEXP (op, 0);
22678 rtx op1 = XEXP (op, 1);
22679 machine_mode result_mode = GET_MODE (dest);
22680 rtx compare_rtx;
22681 rtx cmove_rtx;
22682 rtx clobber_rtx;
22683
22684 if (!can_create_pseudo_p ())
22685 return 0;
22686
22687 switch (code)
22688 {
22689 case EQ:
22690 case GE:
22691 case GT:
22692 break;
22693
22694 case NE:
22695 case LT:
22696 case LE:
22697 code = swap_condition (code);
22698 std::swap (op0, op1);
22699 break;
22700
22701 default:
22702 return 0;
22703 }
22704
22705 /* Generate: [(parallel [(set (dest)
22706 (if_then_else (op (cmp1) (cmp2))
22707 (true)
22708 (false)))
22709 (clobber (scratch))])]. */
22710
22711 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22712 cmove_rtx = gen_rtx_SET (dest,
22713 gen_rtx_IF_THEN_ELSE (result_mode,
22714 compare_rtx,
22715 true_cond,
22716 false_cond));
22717
22718 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22719 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22720 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22721
22722 return 1;
22723 }
22724
22725 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22726 operands of the last comparison is nonzero/true, FALSE_COND if it
22727 is zero/false. Return 0 if the hardware has no such operation. */
22728
22729 int
22730 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22731 {
22732 enum rtx_code code = GET_CODE (op);
22733 rtx op0 = XEXP (op, 0);
22734 rtx op1 = XEXP (op, 1);
22735 machine_mode compare_mode = GET_MODE (op0);
22736 machine_mode result_mode = GET_MODE (dest);
22737 rtx temp;
22738 bool is_against_zero;
22739
22740 /* These modes should always match. */
22741 if (GET_MODE (op1) != compare_mode
22742 /* In the isel case however, we can use a compare immediate, so
22743 op1 may be a small constant. */
22744 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22745 return 0;
22746 if (GET_MODE (true_cond) != result_mode)
22747 return 0;
22748 if (GET_MODE (false_cond) != result_mode)
22749 return 0;
22750
22751 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22752 if (TARGET_P9_MINMAX
22753 && (compare_mode == SFmode || compare_mode == DFmode)
22754 && (result_mode == SFmode || result_mode == DFmode))
22755 {
22756 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22757 return 1;
22758
22759 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22760 return 1;
22761 }
22762
22763 /* Don't allow using floating point comparisons for integer results for
22764 now. */
22765 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22766 return 0;
22767
22768 /* First, work out if the hardware can do this at all, or
22769 if it's too slow.... */
22770 if (!FLOAT_MODE_P (compare_mode))
22771 {
22772 if (TARGET_ISEL)
22773 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22774 return 0;
22775 }
22776
22777 is_against_zero = op1 == CONST0_RTX (compare_mode);
22778
22779 /* A floating-point subtract might overflow, underflow, or produce
22780 an inexact result, thus changing the floating-point flags, so it
22781 can't be generated if we care about that. It's safe if one side
22782 of the construct is zero, since then no subtract will be
22783 generated. */
22784 if (SCALAR_FLOAT_MODE_P (compare_mode)
22785 && flag_trapping_math && ! is_against_zero)
22786 return 0;
22787
22788 /* Eliminate half of the comparisons by switching operands, this
22789 makes the remaining code simpler. */
22790 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22791 || code == LTGT || code == LT || code == UNLE)
22792 {
22793 code = reverse_condition_maybe_unordered (code);
22794 temp = true_cond;
22795 true_cond = false_cond;
22796 false_cond = temp;
22797 }
22798
22799 /* UNEQ and LTGT take four instructions for a comparison with zero,
22800 it'll probably be faster to use a branch here too. */
22801 if (code == UNEQ && HONOR_NANS (compare_mode))
22802 return 0;
22803
22804 /* We're going to try to implement comparisons by performing
22805 a subtract, then comparing against zero. Unfortunately,
22806 Inf - Inf is NaN which is not zero, and so if we don't
22807 know that the operand is finite and the comparison
22808 would treat EQ different to UNORDERED, we can't do it. */
22809 if (HONOR_INFINITIES (compare_mode)
22810 && code != GT && code != UNGE
22811 && (!CONST_DOUBLE_P (op1)
22812 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22813 /* Constructs of the form (a OP b ? a : b) are safe. */
22814 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22815 || (! rtx_equal_p (op0, true_cond)
22816 && ! rtx_equal_p (op1, true_cond))))
22817 return 0;
22818
22819 /* At this point we know we can use fsel. */
22820
22821 /* Reduce the comparison to a comparison against zero. */
22822 if (! is_against_zero)
22823 {
22824 temp = gen_reg_rtx (compare_mode);
22825 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22826 op0 = temp;
22827 op1 = CONST0_RTX (compare_mode);
22828 }
22829
22830 /* If we don't care about NaNs we can reduce some of the comparisons
22831 down to faster ones. */
22832 if (! HONOR_NANS (compare_mode))
22833 switch (code)
22834 {
22835 case GT:
22836 code = LE;
22837 temp = true_cond;
22838 true_cond = false_cond;
22839 false_cond = temp;
22840 break;
22841 case UNGE:
22842 code = GE;
22843 break;
22844 case UNEQ:
22845 code = EQ;
22846 break;
22847 default:
22848 break;
22849 }
22850
22851 /* Now, reduce everything down to a GE. */
22852 switch (code)
22853 {
22854 case GE:
22855 break;
22856
22857 case LE:
22858 temp = gen_reg_rtx (compare_mode);
22859 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22860 op0 = temp;
22861 break;
22862
22863 case ORDERED:
22864 temp = gen_reg_rtx (compare_mode);
22865 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
22866 op0 = temp;
22867 break;
22868
22869 case EQ:
22870 temp = gen_reg_rtx (compare_mode);
22871 emit_insn (gen_rtx_SET (temp,
22872 gen_rtx_NEG (compare_mode,
22873 gen_rtx_ABS (compare_mode, op0))));
22874 op0 = temp;
22875 break;
22876
22877 case UNGE:
22878 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22879 temp = gen_reg_rtx (result_mode);
22880 emit_insn (gen_rtx_SET (temp,
22881 gen_rtx_IF_THEN_ELSE (result_mode,
22882 gen_rtx_GE (VOIDmode,
22883 op0, op1),
22884 true_cond, false_cond)));
22885 false_cond = true_cond;
22886 true_cond = temp;
22887
22888 temp = gen_reg_rtx (compare_mode);
22889 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22890 op0 = temp;
22891 break;
22892
22893 case GT:
22894 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22895 temp = gen_reg_rtx (result_mode);
22896 emit_insn (gen_rtx_SET (temp,
22897 gen_rtx_IF_THEN_ELSE (result_mode,
22898 gen_rtx_GE (VOIDmode,
22899 op0, op1),
22900 true_cond, false_cond)));
22901 true_cond = false_cond;
22902 false_cond = temp;
22903
22904 temp = gen_reg_rtx (compare_mode);
22905 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22906 op0 = temp;
22907 break;
22908
22909 default:
22910 gcc_unreachable ();
22911 }
22912
22913 emit_insn (gen_rtx_SET (dest,
22914 gen_rtx_IF_THEN_ELSE (result_mode,
22915 gen_rtx_GE (VOIDmode,
22916 op0, op1),
22917 true_cond, false_cond)));
22918 return 1;
22919 }
22920
22921 /* Same as above, but for ints (isel). */
22922
22923 int
22924 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22925 {
22926 rtx condition_rtx, cr;
22927 machine_mode mode = GET_MODE (dest);
22928 enum rtx_code cond_code;
22929 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
22930 bool signedp;
22931
22932 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
22933 return 0;
22934
22935 /* We still have to do the compare, because isel doesn't do a
22936 compare, it just looks at the CRx bits set by a previous compare
22937 instruction. */
22938 condition_rtx = rs6000_generate_compare (op, mode);
22939 cond_code = GET_CODE (condition_rtx);
22940 cr = XEXP (condition_rtx, 0);
22941 signedp = GET_MODE (cr) == CCmode;
22942
22943 isel_func = (mode == SImode
22944 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
22945 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
22946
22947 switch (cond_code)
22948 {
22949 case LT: case GT: case LTU: case GTU: case EQ:
22950 /* isel handles these directly. */
22951 break;
22952
22953 default:
22954 /* We need to swap the sense of the comparison. */
22955 {
22956 std::swap (false_cond, true_cond);
22957 PUT_CODE (condition_rtx, reverse_condition (cond_code));
22958 }
22959 break;
22960 }
22961
22962 false_cond = force_reg (mode, false_cond);
22963 if (true_cond != const0_rtx)
22964 true_cond = force_reg (mode, true_cond);
22965
22966 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
22967
22968 return 1;
22969 }
22970
22971 void
22972 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
22973 {
22974 machine_mode mode = GET_MODE (op0);
22975 enum rtx_code c;
22976 rtx target;
22977
22978 /* VSX/altivec have direct min/max insns. */
22979 if ((code == SMAX || code == SMIN)
22980 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
22981 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
22982 {
22983 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
22984 return;
22985 }
22986
22987 if (code == SMAX || code == SMIN)
22988 c = GE;
22989 else
22990 c = GEU;
22991
22992 if (code == SMAX || code == UMAX)
22993 target = emit_conditional_move (dest, c, op0, op1, mode,
22994 op0, op1, mode, 0);
22995 else
22996 target = emit_conditional_move (dest, c, op0, op1, mode,
22997 op1, op0, mode, 0);
22998 gcc_assert (target);
22999 if (target != dest)
23000 emit_move_insn (dest, target);
23001 }
23002
23003 /* A subroutine of the atomic operation splitters. Jump to LABEL if
23004 COND is true. Mark the jump as unlikely to be taken. */
23005
23006 static void
23007 emit_unlikely_jump (rtx cond, rtx label)
23008 {
23009 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
23010 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
23011 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
23012 }
23013
23014 /* A subroutine of the atomic operation splitters. Emit a load-locked
23015 instruction in MODE. For QI/HImode, possibly use a pattern than includes
23016 the zero_extend operation. */
23017
23018 static void
23019 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
23020 {
23021 rtx (*fn) (rtx, rtx) = NULL;
23022
23023 switch (mode)
23024 {
23025 case E_QImode:
23026 fn = gen_load_lockedqi;
23027 break;
23028 case E_HImode:
23029 fn = gen_load_lockedhi;
23030 break;
23031 case E_SImode:
23032 if (GET_MODE (mem) == QImode)
23033 fn = gen_load_lockedqi_si;
23034 else if (GET_MODE (mem) == HImode)
23035 fn = gen_load_lockedhi_si;
23036 else
23037 fn = gen_load_lockedsi;
23038 break;
23039 case E_DImode:
23040 fn = gen_load_lockeddi;
23041 break;
23042 case E_TImode:
23043 fn = gen_load_lockedti;
23044 break;
23045 default:
23046 gcc_unreachable ();
23047 }
23048 emit_insn (fn (reg, mem));
23049 }
23050
23051 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23052 instruction in MODE. */
23053
23054 static void
23055 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23056 {
23057 rtx (*fn) (rtx, rtx, rtx) = NULL;
23058
23059 switch (mode)
23060 {
23061 case E_QImode:
23062 fn = gen_store_conditionalqi;
23063 break;
23064 case E_HImode:
23065 fn = gen_store_conditionalhi;
23066 break;
23067 case E_SImode:
23068 fn = gen_store_conditionalsi;
23069 break;
23070 case E_DImode:
23071 fn = gen_store_conditionaldi;
23072 break;
23073 case E_TImode:
23074 fn = gen_store_conditionalti;
23075 break;
23076 default:
23077 gcc_unreachable ();
23078 }
23079
23080 /* Emit sync before stwcx. to address PPC405 Erratum. */
23081 if (PPC405_ERRATUM77)
23082 emit_insn (gen_hwsync ());
23083
23084 emit_insn (fn (res, mem, val));
23085 }
23086
23087 /* Expand barriers before and after a load_locked/store_cond sequence. */
23088
23089 static rtx
23090 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23091 {
23092 rtx addr = XEXP (mem, 0);
23093
23094 if (!legitimate_indirect_address_p (addr, reload_completed)
23095 && !legitimate_indexed_address_p (addr, reload_completed))
23096 {
23097 addr = force_reg (Pmode, addr);
23098 mem = replace_equiv_address_nv (mem, addr);
23099 }
23100
23101 switch (model)
23102 {
23103 case MEMMODEL_RELAXED:
23104 case MEMMODEL_CONSUME:
23105 case MEMMODEL_ACQUIRE:
23106 break;
23107 case MEMMODEL_RELEASE:
23108 case MEMMODEL_ACQ_REL:
23109 emit_insn (gen_lwsync ());
23110 break;
23111 case MEMMODEL_SEQ_CST:
23112 emit_insn (gen_hwsync ());
23113 break;
23114 default:
23115 gcc_unreachable ();
23116 }
23117 return mem;
23118 }
23119
23120 static void
23121 rs6000_post_atomic_barrier (enum memmodel model)
23122 {
23123 switch (model)
23124 {
23125 case MEMMODEL_RELAXED:
23126 case MEMMODEL_CONSUME:
23127 case MEMMODEL_RELEASE:
23128 break;
23129 case MEMMODEL_ACQUIRE:
23130 case MEMMODEL_ACQ_REL:
23131 case MEMMODEL_SEQ_CST:
23132 emit_insn (gen_isync ());
23133 break;
23134 default:
23135 gcc_unreachable ();
23136 }
23137 }
23138
23139 /* A subroutine of the various atomic expanders. For sub-word operations,
23140 we must adjust things to operate on SImode. Given the original MEM,
23141 return a new aligned memory. Also build and return the quantities by
23142 which to shift and mask. */
23143
23144 static rtx
23145 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23146 {
23147 rtx addr, align, shift, mask, mem;
23148 HOST_WIDE_INT shift_mask;
23149 machine_mode mode = GET_MODE (orig_mem);
23150
23151 /* For smaller modes, we have to implement this via SImode. */
23152 shift_mask = (mode == QImode ? 0x18 : 0x10);
23153
23154 addr = XEXP (orig_mem, 0);
23155 addr = force_reg (GET_MODE (addr), addr);
23156
23157 /* Aligned memory containing subword. Generate a new memory. We
23158 do not want any of the existing MEM_ATTR data, as we're now
23159 accessing memory outside the original object. */
23160 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23161 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23162 mem = gen_rtx_MEM (SImode, align);
23163 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23164 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23165 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23166
23167 /* Shift amount for subword relative to aligned word. */
23168 shift = gen_reg_rtx (SImode);
23169 addr = gen_lowpart (SImode, addr);
23170 rtx tmp = gen_reg_rtx (SImode);
23171 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23172 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23173 if (BYTES_BIG_ENDIAN)
23174 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23175 shift, 1, OPTAB_LIB_WIDEN);
23176 *pshift = shift;
23177
23178 /* Mask for insertion. */
23179 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23180 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23181 *pmask = mask;
23182
23183 return mem;
23184 }
23185
23186 /* A subroutine of the various atomic expanders. For sub-word operands,
23187 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23188
23189 static rtx
23190 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23191 {
23192 rtx x;
23193
23194 x = gen_reg_rtx (SImode);
23195 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23196 gen_rtx_NOT (SImode, mask),
23197 oldval)));
23198
23199 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23200
23201 return x;
23202 }
23203
23204 /* A subroutine of the various atomic expanders. For sub-word operands,
23205 extract WIDE to NARROW via SHIFT. */
23206
23207 static void
23208 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23209 {
23210 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23211 wide, 1, OPTAB_LIB_WIDEN);
23212 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23213 }
23214
23215 /* Expand an atomic compare and swap operation. */
23216
23217 void
23218 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23219 {
23220 rtx boolval, retval, mem, oldval, newval, cond;
23221 rtx label1, label2, x, mask, shift;
23222 machine_mode mode, orig_mode;
23223 enum memmodel mod_s, mod_f;
23224 bool is_weak;
23225
23226 boolval = operands[0];
23227 retval = operands[1];
23228 mem = operands[2];
23229 oldval = operands[3];
23230 newval = operands[4];
23231 is_weak = (INTVAL (operands[5]) != 0);
23232 mod_s = memmodel_base (INTVAL (operands[6]));
23233 mod_f = memmodel_base (INTVAL (operands[7]));
23234 orig_mode = mode = GET_MODE (mem);
23235
23236 mask = shift = NULL_RTX;
23237 if (mode == QImode || mode == HImode)
23238 {
23239 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23240 lwarx and shift/mask operations. With power8, we need to do the
23241 comparison in SImode, but the store is still done in QI/HImode. */
23242 oldval = convert_modes (SImode, mode, oldval, 1);
23243
23244 if (!TARGET_SYNC_HI_QI)
23245 {
23246 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23247
23248 /* Shift and mask OLDVAL into position with the word. */
23249 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23250 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23251
23252 /* Shift and mask NEWVAL into position within the word. */
23253 newval = convert_modes (SImode, mode, newval, 1);
23254 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23255 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23256 }
23257
23258 /* Prepare to adjust the return value. */
23259 retval = gen_reg_rtx (SImode);
23260 mode = SImode;
23261 }
23262 else if (reg_overlap_mentioned_p (retval, oldval))
23263 oldval = copy_to_reg (oldval);
23264
23265 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23266 oldval = copy_to_mode_reg (mode, oldval);
23267
23268 if (reg_overlap_mentioned_p (retval, newval))
23269 newval = copy_to_reg (newval);
23270
23271 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23272
23273 label1 = NULL_RTX;
23274 if (!is_weak)
23275 {
23276 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23277 emit_label (XEXP (label1, 0));
23278 }
23279 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23280
23281 emit_load_locked (mode, retval, mem);
23282
23283 x = retval;
23284 if (mask)
23285 x = expand_simple_binop (SImode, AND, retval, mask,
23286 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23287
23288 cond = gen_reg_rtx (CCmode);
23289 /* If we have TImode, synthesize a comparison. */
23290 if (mode != TImode)
23291 x = gen_rtx_COMPARE (CCmode, x, oldval);
23292 else
23293 {
23294 rtx xor1_result = gen_reg_rtx (DImode);
23295 rtx xor2_result = gen_reg_rtx (DImode);
23296 rtx or_result = gen_reg_rtx (DImode);
23297 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23298 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23299 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23300 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23301
23302 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23303 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23304 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23305 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23306 }
23307
23308 emit_insn (gen_rtx_SET (cond, x));
23309
23310 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23311 emit_unlikely_jump (x, label2);
23312
23313 x = newval;
23314 if (mask)
23315 x = rs6000_mask_atomic_subword (retval, newval, mask);
23316
23317 emit_store_conditional (orig_mode, cond, mem, x);
23318
23319 if (!is_weak)
23320 {
23321 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23322 emit_unlikely_jump (x, label1);
23323 }
23324
23325 if (!is_mm_relaxed (mod_f))
23326 emit_label (XEXP (label2, 0));
23327
23328 rs6000_post_atomic_barrier (mod_s);
23329
23330 if (is_mm_relaxed (mod_f))
23331 emit_label (XEXP (label2, 0));
23332
23333 if (shift)
23334 rs6000_finish_atomic_subword (operands[1], retval, shift);
23335 else if (mode != GET_MODE (operands[1]))
23336 convert_move (operands[1], retval, 1);
23337
23338 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23339 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23340 emit_insn (gen_rtx_SET (boolval, x));
23341 }
23342
23343 /* Expand an atomic exchange operation. */
23344
23345 void
23346 rs6000_expand_atomic_exchange (rtx operands[])
23347 {
23348 rtx retval, mem, val, cond;
23349 machine_mode mode;
23350 enum memmodel model;
23351 rtx label, x, mask, shift;
23352
23353 retval = operands[0];
23354 mem = operands[1];
23355 val = operands[2];
23356 model = memmodel_base (INTVAL (operands[3]));
23357 mode = GET_MODE (mem);
23358
23359 mask = shift = NULL_RTX;
23360 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23361 {
23362 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23363
23364 /* Shift and mask VAL into position with the word. */
23365 val = convert_modes (SImode, mode, val, 1);
23366 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23367 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23368
23369 /* Prepare to adjust the return value. */
23370 retval = gen_reg_rtx (SImode);
23371 mode = SImode;
23372 }
23373
23374 mem = rs6000_pre_atomic_barrier (mem, model);
23375
23376 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23377 emit_label (XEXP (label, 0));
23378
23379 emit_load_locked (mode, retval, mem);
23380
23381 x = val;
23382 if (mask)
23383 x = rs6000_mask_atomic_subword (retval, val, mask);
23384
23385 cond = gen_reg_rtx (CCmode);
23386 emit_store_conditional (mode, cond, mem, x);
23387
23388 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23389 emit_unlikely_jump (x, label);
23390
23391 rs6000_post_atomic_barrier (model);
23392
23393 if (shift)
23394 rs6000_finish_atomic_subword (operands[0], retval, shift);
23395 }
23396
23397 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23398 to perform. MEM is the memory on which to operate. VAL is the second
23399 operand of the binary operator. BEFORE and AFTER are optional locations to
23400 return the value of MEM either before of after the operation. MODEL_RTX
23401 is a CONST_INT containing the memory model to use. */
23402
23403 void
23404 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23405 rtx orig_before, rtx orig_after, rtx model_rtx)
23406 {
23407 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23408 machine_mode mode = GET_MODE (mem);
23409 machine_mode store_mode = mode;
23410 rtx label, x, cond, mask, shift;
23411 rtx before = orig_before, after = orig_after;
23412
23413 mask = shift = NULL_RTX;
23414 /* On power8, we want to use SImode for the operation. On previous systems,
23415 use the operation in a subword and shift/mask to get the proper byte or
23416 halfword. */
23417 if (mode == QImode || mode == HImode)
23418 {
23419 if (TARGET_SYNC_HI_QI)
23420 {
23421 val = convert_modes (SImode, mode, val, 1);
23422
23423 /* Prepare to adjust the return value. */
23424 before = gen_reg_rtx (SImode);
23425 if (after)
23426 after = gen_reg_rtx (SImode);
23427 mode = SImode;
23428 }
23429 else
23430 {
23431 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23432
23433 /* Shift and mask VAL into position with the word. */
23434 val = convert_modes (SImode, mode, val, 1);
23435 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23436 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23437
23438 switch (code)
23439 {
23440 case IOR:
23441 case XOR:
23442 /* We've already zero-extended VAL. That is sufficient to
23443 make certain that it does not affect other bits. */
23444 mask = NULL;
23445 break;
23446
23447 case AND:
23448 /* If we make certain that all of the other bits in VAL are
23449 set, that will be sufficient to not affect other bits. */
23450 x = gen_rtx_NOT (SImode, mask);
23451 x = gen_rtx_IOR (SImode, x, val);
23452 emit_insn (gen_rtx_SET (val, x));
23453 mask = NULL;
23454 break;
23455
23456 case NOT:
23457 case PLUS:
23458 case MINUS:
23459 /* These will all affect bits outside the field and need
23460 adjustment via MASK within the loop. */
23461 break;
23462
23463 default:
23464 gcc_unreachable ();
23465 }
23466
23467 /* Prepare to adjust the return value. */
23468 before = gen_reg_rtx (SImode);
23469 if (after)
23470 after = gen_reg_rtx (SImode);
23471 store_mode = mode = SImode;
23472 }
23473 }
23474
23475 mem = rs6000_pre_atomic_barrier (mem, model);
23476
23477 label = gen_label_rtx ();
23478 emit_label (label);
23479 label = gen_rtx_LABEL_REF (VOIDmode, label);
23480
23481 if (before == NULL_RTX)
23482 before = gen_reg_rtx (mode);
23483
23484 emit_load_locked (mode, before, mem);
23485
23486 if (code == NOT)
23487 {
23488 x = expand_simple_binop (mode, AND, before, val,
23489 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23490 after = expand_simple_unop (mode, NOT, x, after, 1);
23491 }
23492 else
23493 {
23494 after = expand_simple_binop (mode, code, before, val,
23495 after, 1, OPTAB_LIB_WIDEN);
23496 }
23497
23498 x = after;
23499 if (mask)
23500 {
23501 x = expand_simple_binop (SImode, AND, after, mask,
23502 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23503 x = rs6000_mask_atomic_subword (before, x, mask);
23504 }
23505 else if (store_mode != mode)
23506 x = convert_modes (store_mode, mode, x, 1);
23507
23508 cond = gen_reg_rtx (CCmode);
23509 emit_store_conditional (store_mode, cond, mem, x);
23510
23511 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23512 emit_unlikely_jump (x, label);
23513
23514 rs6000_post_atomic_barrier (model);
23515
23516 if (shift)
23517 {
23518 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23519 then do the calcuations in a SImode register. */
23520 if (orig_before)
23521 rs6000_finish_atomic_subword (orig_before, before, shift);
23522 if (orig_after)
23523 rs6000_finish_atomic_subword (orig_after, after, shift);
23524 }
23525 else if (store_mode != mode)
23526 {
23527 /* QImode/HImode on machines with lbarx/lharx where we do the native
23528 operation and then do the calcuations in a SImode register. */
23529 if (orig_before)
23530 convert_move (orig_before, before, 1);
23531 if (orig_after)
23532 convert_move (orig_after, after, 1);
23533 }
23534 else if (orig_after && after != orig_after)
23535 emit_move_insn (orig_after, after);
23536 }
23537
23538 /* Emit instructions to move SRC to DST. Called by splitters for
23539 multi-register moves. It will emit at most one instruction for
23540 each register that is accessed; that is, it won't emit li/lis pairs
23541 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23542 register. */
23543
23544 void
23545 rs6000_split_multireg_move (rtx dst, rtx src)
23546 {
23547 /* The register number of the first register being moved. */
23548 int reg;
23549 /* The mode that is to be moved. */
23550 machine_mode mode;
23551 /* The mode that the move is being done in, and its size. */
23552 machine_mode reg_mode;
23553 int reg_mode_size;
23554 /* The number of registers that will be moved. */
23555 int nregs;
23556
23557 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23558 mode = GET_MODE (dst);
23559 nregs = hard_regno_nregs (reg, mode);
23560 if (FP_REGNO_P (reg))
23561 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23562 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23563 else if (ALTIVEC_REGNO_P (reg))
23564 reg_mode = V16QImode;
23565 else
23566 reg_mode = word_mode;
23567 reg_mode_size = GET_MODE_SIZE (reg_mode);
23568
23569 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23570
23571 /* TDmode residing in FP registers is special, since the ISA requires that
23572 the lower-numbered word of a register pair is always the most significant
23573 word, even in little-endian mode. This does not match the usual subreg
23574 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23575 the appropriate constituent registers "by hand" in little-endian mode.
23576
23577 Note we do not need to check for destructive overlap here since TDmode
23578 can only reside in even/odd register pairs. */
23579 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23580 {
23581 rtx p_src, p_dst;
23582 int i;
23583
23584 for (i = 0; i < nregs; i++)
23585 {
23586 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23587 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23588 else
23589 p_src = simplify_gen_subreg (reg_mode, src, mode,
23590 i * reg_mode_size);
23591
23592 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23593 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23594 else
23595 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23596 i * reg_mode_size);
23597
23598 emit_insn (gen_rtx_SET (p_dst, p_src));
23599 }
23600
23601 return;
23602 }
23603
23604 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23605 {
23606 /* Move register range backwards, if we might have destructive
23607 overlap. */
23608 int i;
23609 for (i = nregs - 1; i >= 0; i--)
23610 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23611 i * reg_mode_size),
23612 simplify_gen_subreg (reg_mode, src, mode,
23613 i * reg_mode_size)));
23614 }
23615 else
23616 {
23617 int i;
23618 int j = -1;
23619 bool used_update = false;
23620 rtx restore_basereg = NULL_RTX;
23621
23622 if (MEM_P (src) && INT_REGNO_P (reg))
23623 {
23624 rtx breg;
23625
23626 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23627 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23628 {
23629 rtx delta_rtx;
23630 breg = XEXP (XEXP (src, 0), 0);
23631 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23632 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23633 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23634 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23635 src = replace_equiv_address (src, breg);
23636 }
23637 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23638 {
23639 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23640 {
23641 rtx basereg = XEXP (XEXP (src, 0), 0);
23642 if (TARGET_UPDATE)
23643 {
23644 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23645 emit_insn (gen_rtx_SET (ndst,
23646 gen_rtx_MEM (reg_mode,
23647 XEXP (src, 0))));
23648 used_update = true;
23649 }
23650 else
23651 emit_insn (gen_rtx_SET (basereg,
23652 XEXP (XEXP (src, 0), 1)));
23653 src = replace_equiv_address (src, basereg);
23654 }
23655 else
23656 {
23657 rtx basereg = gen_rtx_REG (Pmode, reg);
23658 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23659 src = replace_equiv_address (src, basereg);
23660 }
23661 }
23662
23663 breg = XEXP (src, 0);
23664 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23665 breg = XEXP (breg, 0);
23666
23667 /* If the base register we are using to address memory is
23668 also a destination reg, then change that register last. */
23669 if (REG_P (breg)
23670 && REGNO (breg) >= REGNO (dst)
23671 && REGNO (breg) < REGNO (dst) + nregs)
23672 j = REGNO (breg) - REGNO (dst);
23673 }
23674 else if (MEM_P (dst) && INT_REGNO_P (reg))
23675 {
23676 rtx breg;
23677
23678 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23679 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23680 {
23681 rtx delta_rtx;
23682 breg = XEXP (XEXP (dst, 0), 0);
23683 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23684 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23685 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23686
23687 /* We have to update the breg before doing the store.
23688 Use store with update, if available. */
23689
23690 if (TARGET_UPDATE)
23691 {
23692 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23693 emit_insn (TARGET_32BIT
23694 ? (TARGET_POWERPC64
23695 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23696 : gen_movsi_si_update (breg, breg, delta_rtx, nsrc))
23697 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23698 used_update = true;
23699 }
23700 else
23701 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23702 dst = replace_equiv_address (dst, breg);
23703 }
23704 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23705 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23706 {
23707 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23708 {
23709 rtx basereg = XEXP (XEXP (dst, 0), 0);
23710 if (TARGET_UPDATE)
23711 {
23712 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23713 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23714 XEXP (dst, 0)),
23715 nsrc));
23716 used_update = true;
23717 }
23718 else
23719 emit_insn (gen_rtx_SET (basereg,
23720 XEXP (XEXP (dst, 0), 1)));
23721 dst = replace_equiv_address (dst, basereg);
23722 }
23723 else
23724 {
23725 rtx basereg = XEXP (XEXP (dst, 0), 0);
23726 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23727 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23728 && REG_P (basereg)
23729 && REG_P (offsetreg)
23730 && REGNO (basereg) != REGNO (offsetreg));
23731 if (REGNO (basereg) == 0)
23732 {
23733 rtx tmp = offsetreg;
23734 offsetreg = basereg;
23735 basereg = tmp;
23736 }
23737 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23738 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23739 dst = replace_equiv_address (dst, basereg);
23740 }
23741 }
23742 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23743 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23744 }
23745
23746 for (i = 0; i < nregs; i++)
23747 {
23748 /* Calculate index to next subword. */
23749 ++j;
23750 if (j == nregs)
23751 j = 0;
23752
23753 /* If compiler already emitted move of first word by
23754 store with update, no need to do anything. */
23755 if (j == 0 && used_update)
23756 continue;
23757
23758 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23759 j * reg_mode_size),
23760 simplify_gen_subreg (reg_mode, src, mode,
23761 j * reg_mode_size)));
23762 }
23763 if (restore_basereg != NULL_RTX)
23764 emit_insn (restore_basereg);
23765 }
23766 }
23767
23768 \f
23769 /* This page contains routines that are used to determine what the
23770 function prologue and epilogue code will do and write them out. */
23771
23772 /* Determine whether the REG is really used. */
23773
23774 static bool
23775 save_reg_p (int reg)
23776 {
23777 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23778 {
23779 /* When calling eh_return, we must return true for all the cases
23780 where conditional_register_usage marks the PIC offset reg
23781 call used or fixed. */
23782 if (crtl->calls_eh_return
23783 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
23784 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
23785 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
23786 return true;
23787
23788 /* We need to mark the PIC offset register live for the same
23789 conditions as it is set up in rs6000_emit_prologue, or
23790 otherwise it won't be saved before we clobber it. */
23791 if (TARGET_TOC && TARGET_MINIMAL_TOC
23792 && !constant_pool_empty_p ())
23793 return true;
23794
23795 if (DEFAULT_ABI == ABI_V4
23796 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
23797 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
23798 return true;
23799
23800 if (DEFAULT_ABI == ABI_DARWIN
23801 && flag_pic && crtl->uses_pic_offset_table)
23802 return true;
23803 }
23804
23805 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
23806 }
23807
23808 /* Return the first fixed-point register that is required to be
23809 saved. 32 if none. */
23810
23811 int
23812 first_reg_to_save (void)
23813 {
23814 int first_reg;
23815
23816 /* Find lowest numbered live register. */
23817 for (first_reg = 13; first_reg <= 31; first_reg++)
23818 if (save_reg_p (first_reg))
23819 break;
23820
23821 return first_reg;
23822 }
23823
23824 /* Similar, for FP regs. */
23825
23826 int
23827 first_fp_reg_to_save (void)
23828 {
23829 int first_reg;
23830
23831 /* Find lowest numbered live register. */
23832 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
23833 if (save_reg_p (first_reg))
23834 break;
23835
23836 return first_reg;
23837 }
23838
23839 /* Similar, for AltiVec regs. */
23840
23841 static int
23842 first_altivec_reg_to_save (void)
23843 {
23844 int i;
23845
23846 /* Stack frame remains as is unless we are in AltiVec ABI. */
23847 if (! TARGET_ALTIVEC_ABI)
23848 return LAST_ALTIVEC_REGNO + 1;
23849
23850 /* On Darwin, the unwind routines are compiled without
23851 TARGET_ALTIVEC, and use save_world to save/restore the
23852 altivec registers when necessary. */
23853 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23854 && ! TARGET_ALTIVEC)
23855 return FIRST_ALTIVEC_REGNO + 20;
23856
23857 /* Find lowest numbered live register. */
23858 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
23859 if (save_reg_p (i))
23860 break;
23861
23862 return i;
23863 }
23864
23865 /* Return a 32-bit mask of the AltiVec registers we need to set in
23866 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
23867 the 32-bit word is 0. */
23868
23869 static unsigned int
23870 compute_vrsave_mask (void)
23871 {
23872 unsigned int i, mask = 0;
23873
23874 /* On Darwin, the unwind routines are compiled without
23875 TARGET_ALTIVEC, and use save_world to save/restore the
23876 call-saved altivec registers when necessary. */
23877 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23878 && ! TARGET_ALTIVEC)
23879 mask |= 0xFFF;
23880
23881 /* First, find out if we use _any_ altivec registers. */
23882 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
23883 if (df_regs_ever_live_p (i))
23884 mask |= ALTIVEC_REG_BIT (i);
23885
23886 if (mask == 0)
23887 return mask;
23888
23889 /* Next, remove the argument registers from the set. These must
23890 be in the VRSAVE mask set by the caller, so we don't need to add
23891 them in again. More importantly, the mask we compute here is
23892 used to generate CLOBBERs in the set_vrsave insn, and we do not
23893 wish the argument registers to die. */
23894 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
23895 mask &= ~ALTIVEC_REG_BIT (i);
23896
23897 /* Similarly, remove the return value from the set. */
23898 {
23899 bool yes = false;
23900 diddle_return_value (is_altivec_return_reg, &yes);
23901 if (yes)
23902 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
23903 }
23904
23905 return mask;
23906 }
23907
23908 /* For a very restricted set of circumstances, we can cut down the
23909 size of prologues/epilogues by calling our own save/restore-the-world
23910 routines. */
23911
23912 static void
23913 compute_save_world_info (rs6000_stack_t *info)
23914 {
23915 info->world_save_p = 1;
23916 info->world_save_p
23917 = (WORLD_SAVE_P (info)
23918 && DEFAULT_ABI == ABI_DARWIN
23919 && !cfun->has_nonlocal_label
23920 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
23921 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
23922 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
23923 && info->cr_save_p);
23924
23925 /* This will not work in conjunction with sibcalls. Make sure there
23926 are none. (This check is expensive, but seldom executed.) */
23927 if (WORLD_SAVE_P (info))
23928 {
23929 rtx_insn *insn;
23930 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
23931 if (CALL_P (insn) && SIBLING_CALL_P (insn))
23932 {
23933 info->world_save_p = 0;
23934 break;
23935 }
23936 }
23937
23938 if (WORLD_SAVE_P (info))
23939 {
23940 /* Even if we're not touching VRsave, make sure there's room on the
23941 stack for it, if it looks like we're calling SAVE_WORLD, which
23942 will attempt to save it. */
23943 info->vrsave_size = 4;
23944
23945 /* If we are going to save the world, we need to save the link register too. */
23946 info->lr_save_p = 1;
23947
23948 /* "Save" the VRsave register too if we're saving the world. */
23949 if (info->vrsave_mask == 0)
23950 info->vrsave_mask = compute_vrsave_mask ();
23951
23952 /* Because the Darwin register save/restore routines only handle
23953 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
23954 check. */
23955 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
23956 && (info->first_altivec_reg_save
23957 >= FIRST_SAVED_ALTIVEC_REGNO));
23958 }
23959
23960 return;
23961 }
23962
23963
23964 static void
23965 is_altivec_return_reg (rtx reg, void *xyes)
23966 {
23967 bool *yes = (bool *) xyes;
23968 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
23969 *yes = true;
23970 }
23971
23972 \f
23973 /* Return whether REG is a global user reg or has been specifed by
23974 -ffixed-REG. We should not restore these, and so cannot use
23975 lmw or out-of-line restore functions if there are any. We also
23976 can't save them (well, emit frame notes for them), because frame
23977 unwinding during exception handling will restore saved registers. */
23978
23979 static bool
23980 fixed_reg_p (int reg)
23981 {
23982 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
23983 backend sets it, overriding anything the user might have given. */
23984 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
23985 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
23986 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
23987 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
23988 return false;
23989
23990 return fixed_regs[reg];
23991 }
23992
23993 /* Determine the strategy for savings/restoring registers. */
23994
23995 enum {
23996 SAVE_MULTIPLE = 0x1,
23997 SAVE_INLINE_GPRS = 0x2,
23998 SAVE_INLINE_FPRS = 0x4,
23999 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
24000 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
24001 SAVE_INLINE_VRS = 0x20,
24002 REST_MULTIPLE = 0x100,
24003 REST_INLINE_GPRS = 0x200,
24004 REST_INLINE_FPRS = 0x400,
24005 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
24006 REST_INLINE_VRS = 0x1000
24007 };
24008
24009 static int
24010 rs6000_savres_strategy (rs6000_stack_t *info,
24011 bool using_static_chain_p)
24012 {
24013 int strategy = 0;
24014
24015 /* Select between in-line and out-of-line save and restore of regs.
24016 First, all the obvious cases where we don't use out-of-line. */
24017 if (crtl->calls_eh_return
24018 || cfun->machine->ra_need_lr)
24019 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
24020 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
24021 | SAVE_INLINE_VRS | REST_INLINE_VRS);
24022
24023 if (info->first_gp_reg_save == 32)
24024 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24025
24026 if (info->first_fp_reg_save == 64)
24027 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24028
24029 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
24030 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24031
24032 /* Define cutoff for using out-of-line functions to save registers. */
24033 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24034 {
24035 if (!optimize_size)
24036 {
24037 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24038 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24039 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24040 }
24041 else
24042 {
24043 /* Prefer out-of-line restore if it will exit. */
24044 if (info->first_fp_reg_save > 61)
24045 strategy |= SAVE_INLINE_FPRS;
24046 if (info->first_gp_reg_save > 29)
24047 {
24048 if (info->first_fp_reg_save == 64)
24049 strategy |= SAVE_INLINE_GPRS;
24050 else
24051 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24052 }
24053 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24054 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24055 }
24056 }
24057 else if (DEFAULT_ABI == ABI_DARWIN)
24058 {
24059 if (info->first_fp_reg_save > 60)
24060 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24061 if (info->first_gp_reg_save > 29)
24062 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24063 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24064 }
24065 else
24066 {
24067 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24068 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24069 || info->first_fp_reg_save > 61)
24070 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24071 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24072 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24073 }
24074
24075 /* Don't bother to try to save things out-of-line if r11 is occupied
24076 by the static chain. It would require too much fiddling and the
24077 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24078 pointer on Darwin, and AIX uses r1 or r12. */
24079 if (using_static_chain_p
24080 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24081 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24082 | SAVE_INLINE_GPRS
24083 | SAVE_INLINE_VRS);
24084
24085 /* Don't ever restore fixed regs. That means we can't use the
24086 out-of-line register restore functions if a fixed reg is in the
24087 range of regs restored. */
24088 if (!(strategy & REST_INLINE_FPRS))
24089 for (int i = info->first_fp_reg_save; i < 64; i++)
24090 if (fixed_regs[i])
24091 {
24092 strategy |= REST_INLINE_FPRS;
24093 break;
24094 }
24095
24096 /* We can only use the out-of-line routines to restore fprs if we've
24097 saved all the registers from first_fp_reg_save in the prologue.
24098 Otherwise, we risk loading garbage. Of course, if we have saved
24099 out-of-line then we know we haven't skipped any fprs. */
24100 if ((strategy & SAVE_INLINE_FPRS)
24101 && !(strategy & REST_INLINE_FPRS))
24102 for (int i = info->first_fp_reg_save; i < 64; i++)
24103 if (!save_reg_p (i))
24104 {
24105 strategy |= REST_INLINE_FPRS;
24106 break;
24107 }
24108
24109 /* Similarly, for altivec regs. */
24110 if (!(strategy & REST_INLINE_VRS))
24111 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24112 if (fixed_regs[i])
24113 {
24114 strategy |= REST_INLINE_VRS;
24115 break;
24116 }
24117
24118 if ((strategy & SAVE_INLINE_VRS)
24119 && !(strategy & REST_INLINE_VRS))
24120 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24121 if (!save_reg_p (i))
24122 {
24123 strategy |= REST_INLINE_VRS;
24124 break;
24125 }
24126
24127 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24128 saved is an out-of-line save or restore. Set up the value for
24129 the next test (excluding out-of-line gprs). */
24130 bool lr_save_p = (info->lr_save_p
24131 || !(strategy & SAVE_INLINE_FPRS)
24132 || !(strategy & SAVE_INLINE_VRS)
24133 || !(strategy & REST_INLINE_FPRS)
24134 || !(strategy & REST_INLINE_VRS));
24135
24136 if (TARGET_MULTIPLE
24137 && !TARGET_POWERPC64
24138 && info->first_gp_reg_save < 31
24139 && !(flag_shrink_wrap
24140 && flag_shrink_wrap_separate
24141 && optimize_function_for_speed_p (cfun)))
24142 {
24143 int count = 0;
24144 for (int i = info->first_gp_reg_save; i < 32; i++)
24145 if (save_reg_p (i))
24146 count++;
24147
24148 if (count <= 1)
24149 /* Don't use store multiple if only one reg needs to be
24150 saved. This can occur for example when the ABI_V4 pic reg
24151 (r30) needs to be saved to make calls, but r31 is not
24152 used. */
24153 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24154 else
24155 {
24156 /* Prefer store multiple for saves over out-of-line
24157 routines, since the store-multiple instruction will
24158 always be smaller. */
24159 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24160
24161 /* The situation is more complicated with load multiple.
24162 We'd prefer to use the out-of-line routines for restores,
24163 since the "exit" out-of-line routines can handle the
24164 restore of LR and the frame teardown. However if doesn't
24165 make sense to use the out-of-line routine if that is the
24166 only reason we'd need to save LR, and we can't use the
24167 "exit" out-of-line gpr restore if we have saved some
24168 fprs; In those cases it is advantageous to use load
24169 multiple when available. */
24170 if (info->first_fp_reg_save != 64 || !lr_save_p)
24171 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24172 }
24173 }
24174
24175 /* Using the "exit" out-of-line routine does not improve code size
24176 if using it would require lr to be saved and if only saving one
24177 or two gprs. */
24178 else if (!lr_save_p && info->first_gp_reg_save > 29)
24179 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24180
24181 /* Don't ever restore fixed regs. */
24182 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24183 for (int i = info->first_gp_reg_save; i < 32; i++)
24184 if (fixed_reg_p (i))
24185 {
24186 strategy |= REST_INLINE_GPRS;
24187 strategy &= ~REST_MULTIPLE;
24188 break;
24189 }
24190
24191 /* We can only use load multiple or the out-of-line routines to
24192 restore gprs if we've saved all the registers from
24193 first_gp_reg_save. Otherwise, we risk loading garbage.
24194 Of course, if we have saved out-of-line or used stmw then we know
24195 we haven't skipped any gprs. */
24196 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24197 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24198 for (int i = info->first_gp_reg_save; i < 32; i++)
24199 if (!save_reg_p (i))
24200 {
24201 strategy |= REST_INLINE_GPRS;
24202 strategy &= ~REST_MULTIPLE;
24203 break;
24204 }
24205
24206 if (TARGET_ELF && TARGET_64BIT)
24207 {
24208 if (!(strategy & SAVE_INLINE_FPRS))
24209 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24210 else if (!(strategy & SAVE_INLINE_GPRS)
24211 && info->first_fp_reg_save == 64)
24212 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24213 }
24214 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24215 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24216
24217 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24218 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24219
24220 return strategy;
24221 }
24222
24223 /* Calculate the stack information for the current function. This is
24224 complicated by having two separate calling sequences, the AIX calling
24225 sequence and the V.4 calling sequence.
24226
24227 AIX (and Darwin/Mac OS X) stack frames look like:
24228 32-bit 64-bit
24229 SP----> +---------------------------------------+
24230 | back chain to caller | 0 0
24231 +---------------------------------------+
24232 | saved CR | 4 8 (8-11)
24233 +---------------------------------------+
24234 | saved LR | 8 16
24235 +---------------------------------------+
24236 | reserved for compilers | 12 24
24237 +---------------------------------------+
24238 | reserved for binders | 16 32
24239 +---------------------------------------+
24240 | saved TOC pointer | 20 40
24241 +---------------------------------------+
24242 | Parameter save area (+padding*) (P) | 24 48
24243 +---------------------------------------+
24244 | Alloca space (A) | 24+P etc.
24245 +---------------------------------------+
24246 | Local variable space (L) | 24+P+A
24247 +---------------------------------------+
24248 | Float/int conversion temporary (X) | 24+P+A+L
24249 +---------------------------------------+
24250 | Save area for AltiVec registers (W) | 24+P+A+L+X
24251 +---------------------------------------+
24252 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24253 +---------------------------------------+
24254 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24255 +---------------------------------------+
24256 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24257 +---------------------------------------+
24258 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24259 +---------------------------------------+
24260 old SP->| back chain to caller's caller |
24261 +---------------------------------------+
24262
24263 * If the alloca area is present, the parameter save area is
24264 padded so that the former starts 16-byte aligned.
24265
24266 The required alignment for AIX configurations is two words (i.e., 8
24267 or 16 bytes).
24268
24269 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24270
24271 SP----> +---------------------------------------+
24272 | Back chain to caller | 0
24273 +---------------------------------------+
24274 | Save area for CR | 8
24275 +---------------------------------------+
24276 | Saved LR | 16
24277 +---------------------------------------+
24278 | Saved TOC pointer | 24
24279 +---------------------------------------+
24280 | Parameter save area (+padding*) (P) | 32
24281 +---------------------------------------+
24282 | Alloca space (A) | 32+P
24283 +---------------------------------------+
24284 | Local variable space (L) | 32+P+A
24285 +---------------------------------------+
24286 | Save area for AltiVec registers (W) | 32+P+A+L
24287 +---------------------------------------+
24288 | AltiVec alignment padding (Y) | 32+P+A+L+W
24289 +---------------------------------------+
24290 | Save area for GP registers (G) | 32+P+A+L+W+Y
24291 +---------------------------------------+
24292 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24293 +---------------------------------------+
24294 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24295 +---------------------------------------+
24296
24297 * If the alloca area is present, the parameter save area is
24298 padded so that the former starts 16-byte aligned.
24299
24300 V.4 stack frames look like:
24301
24302 SP----> +---------------------------------------+
24303 | back chain to caller | 0
24304 +---------------------------------------+
24305 | caller's saved LR | 4
24306 +---------------------------------------+
24307 | Parameter save area (+padding*) (P) | 8
24308 +---------------------------------------+
24309 | Alloca space (A) | 8+P
24310 +---------------------------------------+
24311 | Varargs save area (V) | 8+P+A
24312 +---------------------------------------+
24313 | Local variable space (L) | 8+P+A+V
24314 +---------------------------------------+
24315 | Float/int conversion temporary (X) | 8+P+A+V+L
24316 +---------------------------------------+
24317 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24318 +---------------------------------------+
24319 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24320 +---------------------------------------+
24321 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24322 +---------------------------------------+
24323 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24324 +---------------------------------------+
24325 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24326 +---------------------------------------+
24327 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24328 +---------------------------------------+
24329 old SP->| back chain to caller's caller |
24330 +---------------------------------------+
24331
24332 * If the alloca area is present and the required alignment is
24333 16 bytes, the parameter save area is padded so that the
24334 alloca area starts 16-byte aligned.
24335
24336 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24337 given. (But note below and in sysv4.h that we require only 8 and
24338 may round up the size of our stack frame anyways. The historical
24339 reason is early versions of powerpc-linux which didn't properly
24340 align the stack at program startup. A happy side-effect is that
24341 -mno-eabi libraries can be used with -meabi programs.)
24342
24343 The EABI configuration defaults to the V.4 layout. However,
24344 the stack alignment requirements may differ. If -mno-eabi is not
24345 given, the required stack alignment is 8 bytes; if -mno-eabi is
24346 given, the required alignment is 16 bytes. (But see V.4 comment
24347 above.) */
24348
24349 #ifndef ABI_STACK_BOUNDARY
24350 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24351 #endif
24352
24353 static rs6000_stack_t *
24354 rs6000_stack_info (void)
24355 {
24356 /* We should never be called for thunks, we are not set up for that. */
24357 gcc_assert (!cfun->is_thunk);
24358
24359 rs6000_stack_t *info = &stack_info;
24360 int reg_size = TARGET_32BIT ? 4 : 8;
24361 int ehrd_size;
24362 int ehcr_size;
24363 int save_align;
24364 int first_gp;
24365 HOST_WIDE_INT non_fixed_size;
24366 bool using_static_chain_p;
24367
24368 if (reload_completed && info->reload_completed)
24369 return info;
24370
24371 memset (info, 0, sizeof (*info));
24372 info->reload_completed = reload_completed;
24373
24374 /* Select which calling sequence. */
24375 info->abi = DEFAULT_ABI;
24376
24377 /* Calculate which registers need to be saved & save area size. */
24378 info->first_gp_reg_save = first_reg_to_save ();
24379 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24380 even if it currently looks like we won't. Reload may need it to
24381 get at a constant; if so, it will have already created a constant
24382 pool entry for it. */
24383 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24384 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24385 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24386 && crtl->uses_const_pool
24387 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24388 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24389 else
24390 first_gp = info->first_gp_reg_save;
24391
24392 info->gp_size = reg_size * (32 - first_gp);
24393
24394 info->first_fp_reg_save = first_fp_reg_to_save ();
24395 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24396
24397 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24398 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24399 - info->first_altivec_reg_save);
24400
24401 /* Does this function call anything? */
24402 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24403
24404 /* Determine if we need to save the condition code registers. */
24405 if (save_reg_p (CR2_REGNO)
24406 || save_reg_p (CR3_REGNO)
24407 || save_reg_p (CR4_REGNO))
24408 {
24409 info->cr_save_p = 1;
24410 if (DEFAULT_ABI == ABI_V4)
24411 info->cr_size = reg_size;
24412 }
24413
24414 /* If the current function calls __builtin_eh_return, then we need
24415 to allocate stack space for registers that will hold data for
24416 the exception handler. */
24417 if (crtl->calls_eh_return)
24418 {
24419 unsigned int i;
24420 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24421 continue;
24422
24423 ehrd_size = i * UNITS_PER_WORD;
24424 }
24425 else
24426 ehrd_size = 0;
24427
24428 /* In the ELFv2 ABI, we also need to allocate space for separate
24429 CR field save areas if the function calls __builtin_eh_return. */
24430 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24431 {
24432 /* This hard-codes that we have three call-saved CR fields. */
24433 ehcr_size = 3 * reg_size;
24434 /* We do *not* use the regular CR save mechanism. */
24435 info->cr_save_p = 0;
24436 }
24437 else
24438 ehcr_size = 0;
24439
24440 /* Determine various sizes. */
24441 info->reg_size = reg_size;
24442 info->fixed_size = RS6000_SAVE_AREA;
24443 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24444 if (cfun->calls_alloca)
24445 info->parm_size =
24446 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24447 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24448 else
24449 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24450 TARGET_ALTIVEC ? 16 : 8);
24451 if (FRAME_GROWS_DOWNWARD)
24452 info->vars_size
24453 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24454 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24455 - (info->fixed_size + info->vars_size + info->parm_size);
24456
24457 if (TARGET_ALTIVEC_ABI)
24458 info->vrsave_mask = compute_vrsave_mask ();
24459
24460 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24461 info->vrsave_size = 4;
24462
24463 compute_save_world_info (info);
24464
24465 /* Calculate the offsets. */
24466 switch (DEFAULT_ABI)
24467 {
24468 case ABI_NONE:
24469 default:
24470 gcc_unreachable ();
24471
24472 case ABI_AIX:
24473 case ABI_ELFv2:
24474 case ABI_DARWIN:
24475 info->fp_save_offset = -info->fp_size;
24476 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24477
24478 if (TARGET_ALTIVEC_ABI)
24479 {
24480 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24481
24482 /* Align stack so vector save area is on a quadword boundary.
24483 The padding goes above the vectors. */
24484 if (info->altivec_size != 0)
24485 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24486
24487 info->altivec_save_offset = info->vrsave_save_offset
24488 - info->altivec_padding_size
24489 - info->altivec_size;
24490 gcc_assert (info->altivec_size == 0
24491 || info->altivec_save_offset % 16 == 0);
24492
24493 /* Adjust for AltiVec case. */
24494 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24495 }
24496 else
24497 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24498
24499 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24500 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24501 info->lr_save_offset = 2*reg_size;
24502 break;
24503
24504 case ABI_V4:
24505 info->fp_save_offset = -info->fp_size;
24506 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24507 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24508
24509 if (TARGET_ALTIVEC_ABI)
24510 {
24511 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24512
24513 /* Align stack so vector save area is on a quadword boundary. */
24514 if (info->altivec_size != 0)
24515 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24516
24517 info->altivec_save_offset = info->vrsave_save_offset
24518 - info->altivec_padding_size
24519 - info->altivec_size;
24520
24521 /* Adjust for AltiVec case. */
24522 info->ehrd_offset = info->altivec_save_offset;
24523 }
24524 else
24525 info->ehrd_offset = info->cr_save_offset;
24526
24527 info->ehrd_offset -= ehrd_size;
24528 info->lr_save_offset = reg_size;
24529 }
24530
24531 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24532 info->save_size = RS6000_ALIGN (info->fp_size
24533 + info->gp_size
24534 + info->altivec_size
24535 + info->altivec_padding_size
24536 + ehrd_size
24537 + ehcr_size
24538 + info->cr_size
24539 + info->vrsave_size,
24540 save_align);
24541
24542 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24543
24544 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24545 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24546
24547 /* Determine if we need to save the link register. */
24548 if (info->calls_p
24549 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24550 && crtl->profile
24551 && !TARGET_PROFILE_KERNEL)
24552 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24553 #ifdef TARGET_RELOCATABLE
24554 || (DEFAULT_ABI == ABI_V4
24555 && (TARGET_RELOCATABLE || flag_pic > 1)
24556 && !constant_pool_empty_p ())
24557 #endif
24558 || rs6000_ra_ever_killed ())
24559 info->lr_save_p = 1;
24560
24561 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24562 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24563 && call_used_regs[STATIC_CHAIN_REGNUM]);
24564 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24565
24566 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24567 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24568 || !(info->savres_strategy & SAVE_INLINE_VRS)
24569 || !(info->savres_strategy & REST_INLINE_GPRS)
24570 || !(info->savres_strategy & REST_INLINE_FPRS)
24571 || !(info->savres_strategy & REST_INLINE_VRS))
24572 info->lr_save_p = 1;
24573
24574 if (info->lr_save_p)
24575 df_set_regs_ever_live (LR_REGNO, true);
24576
24577 /* Determine if we need to allocate any stack frame:
24578
24579 For AIX we need to push the stack if a frame pointer is needed
24580 (because the stack might be dynamically adjusted), if we are
24581 debugging, if we make calls, or if the sum of fp_save, gp_save,
24582 and local variables are more than the space needed to save all
24583 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24584 + 18*8 = 288 (GPR13 reserved).
24585
24586 For V.4 we don't have the stack cushion that AIX uses, but assume
24587 that the debugger can handle stackless frames. */
24588
24589 if (info->calls_p)
24590 info->push_p = 1;
24591
24592 else if (DEFAULT_ABI == ABI_V4)
24593 info->push_p = non_fixed_size != 0;
24594
24595 else if (frame_pointer_needed)
24596 info->push_p = 1;
24597
24598 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24599 info->push_p = 1;
24600
24601 else
24602 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24603
24604 return info;
24605 }
24606
24607 static void
24608 debug_stack_info (rs6000_stack_t *info)
24609 {
24610 const char *abi_string;
24611
24612 if (! info)
24613 info = rs6000_stack_info ();
24614
24615 fprintf (stderr, "\nStack information for function %s:\n",
24616 ((current_function_decl && DECL_NAME (current_function_decl))
24617 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24618 : "<unknown>"));
24619
24620 switch (info->abi)
24621 {
24622 default: abi_string = "Unknown"; break;
24623 case ABI_NONE: abi_string = "NONE"; break;
24624 case ABI_AIX: abi_string = "AIX"; break;
24625 case ABI_ELFv2: abi_string = "ELFv2"; break;
24626 case ABI_DARWIN: abi_string = "Darwin"; break;
24627 case ABI_V4: abi_string = "V.4"; break;
24628 }
24629
24630 fprintf (stderr, "\tABI = %5s\n", abi_string);
24631
24632 if (TARGET_ALTIVEC_ABI)
24633 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24634
24635 if (info->first_gp_reg_save != 32)
24636 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24637
24638 if (info->first_fp_reg_save != 64)
24639 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24640
24641 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24642 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24643 info->first_altivec_reg_save);
24644
24645 if (info->lr_save_p)
24646 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24647
24648 if (info->cr_save_p)
24649 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24650
24651 if (info->vrsave_mask)
24652 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24653
24654 if (info->push_p)
24655 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24656
24657 if (info->calls_p)
24658 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24659
24660 if (info->gp_size)
24661 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24662
24663 if (info->fp_size)
24664 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24665
24666 if (info->altivec_size)
24667 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24668 info->altivec_save_offset);
24669
24670 if (info->vrsave_size)
24671 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24672 info->vrsave_save_offset);
24673
24674 if (info->lr_save_p)
24675 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24676
24677 if (info->cr_save_p)
24678 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24679
24680 if (info->varargs_save_offset)
24681 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24682
24683 if (info->total_size)
24684 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24685 info->total_size);
24686
24687 if (info->vars_size)
24688 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24689 info->vars_size);
24690
24691 if (info->parm_size)
24692 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24693
24694 if (info->fixed_size)
24695 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24696
24697 if (info->gp_size)
24698 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24699
24700 if (info->fp_size)
24701 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24702
24703 if (info->altivec_size)
24704 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24705
24706 if (info->vrsave_size)
24707 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24708
24709 if (info->altivec_padding_size)
24710 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24711 info->altivec_padding_size);
24712
24713 if (info->cr_size)
24714 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24715
24716 if (info->save_size)
24717 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24718
24719 if (info->reg_size != 4)
24720 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24721
24722 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24723
24724 if (info->abi == ABI_DARWIN)
24725 fprintf (stderr, "\tWORLD_SAVE_P = %5d\n", WORLD_SAVE_P(info));
24726
24727 fprintf (stderr, "\n");
24728 }
24729
24730 rtx
24731 rs6000_return_addr (int count, rtx frame)
24732 {
24733 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24734 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24735 if (count != 0
24736 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24737 {
24738 cfun->machine->ra_needs_full_frame = 1;
24739
24740 if (count == 0)
24741 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24742 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24743 frame = stack_pointer_rtx;
24744 rtx prev_frame_addr = memory_address (Pmode, frame);
24745 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
24746 rtx lr_save_off = plus_constant (Pmode,
24747 prev_frame, RETURN_ADDRESS_OFFSET);
24748 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
24749 return gen_rtx_MEM (Pmode, lr_save_addr);
24750 }
24751
24752 cfun->machine->ra_need_lr = 1;
24753 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24754 }
24755
24756 /* Say whether a function is a candidate for sibcall handling or not. */
24757
24758 static bool
24759 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24760 {
24761 tree fntype;
24762
24763 /* The sibcall epilogue may clobber the static chain register.
24764 ??? We could work harder and avoid that, but it's probably
24765 not worth the hassle in practice. */
24766 if (CALL_EXPR_STATIC_CHAIN (exp))
24767 return false;
24768
24769 if (decl)
24770 fntype = TREE_TYPE (decl);
24771 else
24772 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24773
24774 /* We can't do it if the called function has more vector parameters
24775 than the current function; there's nowhere to put the VRsave code. */
24776 if (TARGET_ALTIVEC_ABI
24777 && TARGET_ALTIVEC_VRSAVE
24778 && !(decl && decl == current_function_decl))
24779 {
24780 function_args_iterator args_iter;
24781 tree type;
24782 int nvreg = 0;
24783
24784 /* Functions with vector parameters are required to have a
24785 prototype, so the argument type info must be available
24786 here. */
24787 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24788 if (TREE_CODE (type) == VECTOR_TYPE
24789 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24790 nvreg++;
24791
24792 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24793 if (TREE_CODE (type) == VECTOR_TYPE
24794 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24795 nvreg--;
24796
24797 if (nvreg > 0)
24798 return false;
24799 }
24800
24801 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24802 functions, because the callee may have a different TOC pointer to
24803 the caller and there's no way to ensure we restore the TOC when
24804 we return. With the secure-plt SYSV ABI we can't make non-local
24805 calls when -fpic/PIC because the plt call stubs use r30. */
24806 if (DEFAULT_ABI == ABI_DARWIN
24807 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24808 && decl
24809 && !DECL_EXTERNAL (decl)
24810 && !DECL_WEAK (decl)
24811 && (*targetm.binds_local_p) (decl))
24812 || (DEFAULT_ABI == ABI_V4
24813 && (!TARGET_SECURE_PLT
24814 || !flag_pic
24815 || (decl
24816 && (*targetm.binds_local_p) (decl)))))
24817 {
24818 tree attr_list = TYPE_ATTRIBUTES (fntype);
24819
24820 if (!lookup_attribute ("longcall", attr_list)
24821 || lookup_attribute ("shortcall", attr_list))
24822 return true;
24823 }
24824
24825 return false;
24826 }
24827
24828 static int
24829 rs6000_ra_ever_killed (void)
24830 {
24831 rtx_insn *top;
24832 rtx reg;
24833 rtx_insn *insn;
24834
24835 if (cfun->is_thunk)
24836 return 0;
24837
24838 if (cfun->machine->lr_save_state)
24839 return cfun->machine->lr_save_state - 1;
24840
24841 /* regs_ever_live has LR marked as used if any sibcalls are present,
24842 but this should not force saving and restoring in the
24843 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
24844 clobbers LR, so that is inappropriate. */
24845
24846 /* Also, the prologue can generate a store into LR that
24847 doesn't really count, like this:
24848
24849 move LR->R0
24850 bcl to set PIC register
24851 move LR->R31
24852 move R0->LR
24853
24854 When we're called from the epilogue, we need to avoid counting
24855 this as a store. */
24856
24857 push_topmost_sequence ();
24858 top = get_insns ();
24859 pop_topmost_sequence ();
24860 reg = gen_rtx_REG (Pmode, LR_REGNO);
24861
24862 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
24863 {
24864 if (INSN_P (insn))
24865 {
24866 if (CALL_P (insn))
24867 {
24868 if (!SIBLING_CALL_P (insn))
24869 return 1;
24870 }
24871 else if (find_regno_note (insn, REG_INC, LR_REGNO))
24872 return 1;
24873 else if (set_of (reg, insn) != NULL_RTX
24874 && !prologue_epilogue_contains (insn))
24875 return 1;
24876 }
24877 }
24878 return 0;
24879 }
24880 \f
24881 /* Emit instructions needed to load the TOC register.
24882 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
24883 a constant pool; or for SVR4 -fpic. */
24884
24885 void
24886 rs6000_emit_load_toc_table (int fromprolog)
24887 {
24888 rtx dest;
24889 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
24890
24891 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
24892 {
24893 char buf[30];
24894 rtx lab, tmp1, tmp2, got;
24895
24896 lab = gen_label_rtx ();
24897 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
24898 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24899 if (flag_pic == 2)
24900 {
24901 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24902 need_toc_init = 1;
24903 }
24904 else
24905 got = rs6000_got_sym ();
24906 tmp1 = tmp2 = dest;
24907 if (!fromprolog)
24908 {
24909 tmp1 = gen_reg_rtx (Pmode);
24910 tmp2 = gen_reg_rtx (Pmode);
24911 }
24912 emit_insn (gen_load_toc_v4_PIC_1 (lab));
24913 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
24914 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
24915 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
24916 }
24917 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
24918 {
24919 emit_insn (gen_load_toc_v4_pic_si ());
24920 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24921 }
24922 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
24923 {
24924 char buf[30];
24925 rtx temp0 = (fromprolog
24926 ? gen_rtx_REG (Pmode, 0)
24927 : gen_reg_rtx (Pmode));
24928
24929 if (fromprolog)
24930 {
24931 rtx symF, symL;
24932
24933 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
24934 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24935
24936 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
24937 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24938
24939 emit_insn (gen_load_toc_v4_PIC_1 (symF));
24940 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24941 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
24942 }
24943 else
24944 {
24945 rtx tocsym, lab;
24946
24947 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24948 need_toc_init = 1;
24949 lab = gen_label_rtx ();
24950 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
24951 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24952 if (TARGET_LINK_STACK)
24953 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
24954 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
24955 }
24956 emit_insn (gen_addsi3 (dest, temp0, dest));
24957 }
24958 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
24959 {
24960 /* This is for AIX code running in non-PIC ELF32. */
24961 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24962
24963 need_toc_init = 1;
24964 emit_insn (gen_elf_high (dest, realsym));
24965 emit_insn (gen_elf_low (dest, dest, realsym));
24966 }
24967 else
24968 {
24969 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24970
24971 if (TARGET_32BIT)
24972 emit_insn (gen_load_toc_aix_si (dest));
24973 else
24974 emit_insn (gen_load_toc_aix_di (dest));
24975 }
24976 }
24977
24978 /* Emit instructions to restore the link register after determining where
24979 its value has been stored. */
24980
24981 void
24982 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
24983 {
24984 rs6000_stack_t *info = rs6000_stack_info ();
24985 rtx operands[2];
24986
24987 operands[0] = source;
24988 operands[1] = scratch;
24989
24990 if (info->lr_save_p)
24991 {
24992 rtx frame_rtx = stack_pointer_rtx;
24993 HOST_WIDE_INT sp_offset = 0;
24994 rtx tmp;
24995
24996 if (frame_pointer_needed
24997 || cfun->calls_alloca
24998 || info->total_size > 32767)
24999 {
25000 tmp = gen_frame_mem (Pmode, frame_rtx);
25001 emit_move_insn (operands[1], tmp);
25002 frame_rtx = operands[1];
25003 }
25004 else if (info->push_p)
25005 sp_offset = info->total_size;
25006
25007 tmp = plus_constant (Pmode, frame_rtx,
25008 info->lr_save_offset + sp_offset);
25009 tmp = gen_frame_mem (Pmode, tmp);
25010 emit_move_insn (tmp, operands[0]);
25011 }
25012 else
25013 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
25014
25015 /* Freeze lr_save_p. We've just emitted rtl that depends on the
25016 state of lr_save_p so any change from here on would be a bug. In
25017 particular, stop rs6000_ra_ever_killed from considering the SET
25018 of lr we may have added just above. */
25019 cfun->machine->lr_save_state = info->lr_save_p + 1;
25020 }
25021
25022 static GTY(()) alias_set_type set = -1;
25023
25024 alias_set_type
25025 get_TOC_alias_set (void)
25026 {
25027 if (set == -1)
25028 set = new_alias_set ();
25029 return set;
25030 }
25031
25032 /* This returns nonzero if the current function uses the TOC. This is
25033 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25034 is generated by the ABI_V4 load_toc_* patterns.
25035 Return 2 instead of 1 if the load_toc_* pattern is in the function
25036 partition that doesn't start the function. */
25037 #if TARGET_ELF
25038 static int
25039 uses_TOC (void)
25040 {
25041 rtx_insn *insn;
25042 int ret = 1;
25043
25044 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25045 {
25046 if (INSN_P (insn))
25047 {
25048 rtx pat = PATTERN (insn);
25049 int i;
25050
25051 if (GET_CODE (pat) == PARALLEL)
25052 for (i = 0; i < XVECLEN (pat, 0); i++)
25053 {
25054 rtx sub = XVECEXP (pat, 0, i);
25055 if (GET_CODE (sub) == USE)
25056 {
25057 sub = XEXP (sub, 0);
25058 if (GET_CODE (sub) == UNSPEC
25059 && XINT (sub, 1) == UNSPEC_TOC)
25060 return ret;
25061 }
25062 }
25063 }
25064 else if (crtl->has_bb_partition
25065 && NOTE_P (insn)
25066 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25067 ret = 2;
25068 }
25069 return 0;
25070 }
25071 #endif
25072
25073 rtx
25074 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25075 {
25076 rtx tocrel, tocreg, hi;
25077
25078 if (TARGET_DEBUG_ADDR)
25079 {
25080 if (SYMBOL_REF_P (symbol))
25081 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25082 XSTR (symbol, 0));
25083 else
25084 {
25085 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25086 GET_RTX_NAME (GET_CODE (symbol)));
25087 debug_rtx (symbol);
25088 }
25089 }
25090
25091 if (!can_create_pseudo_p ())
25092 df_set_regs_ever_live (TOC_REGISTER, true);
25093
25094 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25095 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25096 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25097 return tocrel;
25098
25099 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25100 if (largetoc_reg != NULL)
25101 {
25102 emit_move_insn (largetoc_reg, hi);
25103 hi = largetoc_reg;
25104 }
25105 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25106 }
25107
25108 /* Issue assembly directives that create a reference to the given DWARF
25109 FRAME_TABLE_LABEL from the current function section. */
25110 void
25111 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25112 {
25113 fprintf (asm_out_file, "\t.ref %s\n",
25114 (* targetm.strip_name_encoding) (frame_table_label));
25115 }
25116 \f
25117 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25118 and the change to the stack pointer. */
25119
25120 static void
25121 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25122 {
25123 rtvec p;
25124 int i;
25125 rtx regs[3];
25126
25127 i = 0;
25128 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25129 if (hard_frame_needed)
25130 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25131 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25132 || (hard_frame_needed
25133 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25134 regs[i++] = fp;
25135
25136 p = rtvec_alloc (i);
25137 while (--i >= 0)
25138 {
25139 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25140 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25141 }
25142
25143 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25144 }
25145
25146 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25147 and set the appropriate attributes for the generated insn. Return the
25148 first insn which adjusts the stack pointer or the last insn before
25149 the stack adjustment loop.
25150
25151 SIZE_INT is used to create the CFI note for the allocation.
25152
25153 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25154 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25155
25156 ORIG_SP contains the backchain value that must be stored at *sp. */
25157
25158 static rtx_insn *
25159 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25160 {
25161 rtx_insn *insn;
25162
25163 rtx size_rtx = GEN_INT (-size_int);
25164 if (size_int > 32767)
25165 {
25166 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25167 /* Need a note here so that try_split doesn't get confused. */
25168 if (get_last_insn () == NULL_RTX)
25169 emit_note (NOTE_INSN_DELETED);
25170 insn = emit_move_insn (tmp_reg, size_rtx);
25171 try_split (PATTERN (insn), insn, 0);
25172 size_rtx = tmp_reg;
25173 }
25174
25175 if (TARGET_32BIT)
25176 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25177 stack_pointer_rtx,
25178 size_rtx,
25179 orig_sp));
25180 else
25181 insn = emit_insn (gen_movdi_update_stack (stack_pointer_rtx,
25182 stack_pointer_rtx,
25183 size_rtx,
25184 orig_sp));
25185 rtx par = PATTERN (insn);
25186 gcc_assert (GET_CODE (par) == PARALLEL);
25187 rtx set = XVECEXP (par, 0, 0);
25188 gcc_assert (GET_CODE (set) == SET);
25189 rtx mem = SET_DEST (set);
25190 gcc_assert (MEM_P (mem));
25191 MEM_NOTRAP_P (mem) = 1;
25192 set_mem_alias_set (mem, get_frame_alias_set ());
25193
25194 RTX_FRAME_RELATED_P (insn) = 1;
25195 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25196 gen_rtx_SET (stack_pointer_rtx,
25197 gen_rtx_PLUS (Pmode,
25198 stack_pointer_rtx,
25199 GEN_INT (-size_int))));
25200
25201 /* Emit a blockage to ensure the allocation/probing insns are
25202 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25203 note for similar reasons. */
25204 if (flag_stack_clash_protection)
25205 {
25206 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25207 emit_insn (gen_blockage ());
25208 }
25209
25210 return insn;
25211 }
25212
25213 static HOST_WIDE_INT
25214 get_stack_clash_protection_probe_interval (void)
25215 {
25216 return (HOST_WIDE_INT_1U
25217 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25218 }
25219
25220 static HOST_WIDE_INT
25221 get_stack_clash_protection_guard_size (void)
25222 {
25223 return (HOST_WIDE_INT_1U
25224 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25225 }
25226
25227 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25228 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25229
25230 COPY_REG, if non-null, should contain a copy of the original
25231 stack pointer at exit from this function.
25232
25233 This is subtly different than the Ada probing in that it tries hard to
25234 prevent attacks that jump the stack guard. Thus it is never allowed to
25235 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25236 space without a suitable probe. */
25237 static rtx_insn *
25238 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25239 rtx copy_reg)
25240 {
25241 rtx orig_sp = copy_reg;
25242
25243 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25244
25245 /* Round the size down to a multiple of PROBE_INTERVAL. */
25246 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25247
25248 /* If explicitly requested,
25249 or the rounded size is not the same as the original size
25250 or the the rounded size is greater than a page,
25251 then we will need a copy of the original stack pointer. */
25252 if (rounded_size != orig_size
25253 || rounded_size > probe_interval
25254 || copy_reg)
25255 {
25256 /* If the caller did not request a copy of the incoming stack
25257 pointer, then we use r0 to hold the copy. */
25258 if (!copy_reg)
25259 orig_sp = gen_rtx_REG (Pmode, 0);
25260 emit_move_insn (orig_sp, stack_pointer_rtx);
25261 }
25262
25263 /* There's three cases here.
25264
25265 One is a single probe which is the most common and most efficiently
25266 implemented as it does not have to have a copy of the original
25267 stack pointer if there are no residuals.
25268
25269 Second is unrolled allocation/probes which we use if there's just
25270 a few of them. It needs to save the original stack pointer into a
25271 temporary for use as a source register in the allocation/probe.
25272
25273 Last is a loop. This is the most uncommon case and least efficient. */
25274 rtx_insn *retval = NULL;
25275 if (rounded_size == probe_interval)
25276 {
25277 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25278
25279 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25280 }
25281 else if (rounded_size <= 8 * probe_interval)
25282 {
25283 /* The ABI requires using the store with update insns to allocate
25284 space and store the backchain into the stack
25285
25286 So we save the current stack pointer into a temporary, then
25287 emit the store-with-update insns to store the saved stack pointer
25288 into the right location in each new page. */
25289 for (int i = 0; i < rounded_size; i += probe_interval)
25290 {
25291 rtx_insn *insn
25292 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25293
25294 /* Save the first stack adjustment in RETVAL. */
25295 if (i == 0)
25296 retval = insn;
25297 }
25298
25299 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25300 }
25301 else
25302 {
25303 /* Compute the ending address. */
25304 rtx end_addr
25305 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25306 rtx rs = GEN_INT (-rounded_size);
25307 rtx_insn *insn;
25308 if (add_operand (rs, Pmode))
25309 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25310 else
25311 {
25312 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25313 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25314 stack_pointer_rtx));
25315 /* Describe the effect of INSN to the CFI engine. */
25316 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25317 gen_rtx_SET (end_addr,
25318 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25319 rs)));
25320 }
25321 RTX_FRAME_RELATED_P (insn) = 1;
25322
25323 /* Emit the loop. */
25324 if (TARGET_64BIT)
25325 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25326 stack_pointer_rtx, orig_sp,
25327 end_addr));
25328 else
25329 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25330 stack_pointer_rtx, orig_sp,
25331 end_addr));
25332 RTX_FRAME_RELATED_P (retval) = 1;
25333 /* Describe the effect of INSN to the CFI engine. */
25334 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25335 gen_rtx_SET (stack_pointer_rtx, end_addr));
25336
25337 /* Emit a blockage to ensure the allocation/probing insns are
25338 not optimized, combined, removed, etc. Other cases handle this
25339 within their call to rs6000_emit_allocate_stack_1. */
25340 emit_insn (gen_blockage ());
25341
25342 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25343 }
25344
25345 if (orig_size != rounded_size)
25346 {
25347 /* Allocate (and implicitly probe) any residual space. */
25348 HOST_WIDE_INT residual = orig_size - rounded_size;
25349
25350 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25351
25352 /* If the residual was the only allocation, then we can return the
25353 allocating insn. */
25354 if (!retval)
25355 retval = insn;
25356 }
25357
25358 return retval;
25359 }
25360
25361 /* Emit the correct code for allocating stack space, as insns.
25362 If COPY_REG, make sure a copy of the old frame is left there.
25363 The generated code may use hard register 0 as a temporary. */
25364
25365 static rtx_insn *
25366 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25367 {
25368 rtx_insn *insn;
25369 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25370 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25371 rtx todec = gen_int_mode (-size, Pmode);
25372
25373 if (INTVAL (todec) != -size)
25374 {
25375 warning (0, "stack frame too large");
25376 emit_insn (gen_trap ());
25377 return 0;
25378 }
25379
25380 if (crtl->limit_stack)
25381 {
25382 if (REG_P (stack_limit_rtx)
25383 && REGNO (stack_limit_rtx) > 1
25384 && REGNO (stack_limit_rtx) <= 31)
25385 {
25386 rtx_insn *insn
25387 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25388 gcc_assert (insn);
25389 emit_insn (insn);
25390 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25391 }
25392 else if (SYMBOL_REF_P (stack_limit_rtx)
25393 && TARGET_32BIT
25394 && DEFAULT_ABI == ABI_V4
25395 && !flag_pic)
25396 {
25397 rtx toload = gen_rtx_CONST (VOIDmode,
25398 gen_rtx_PLUS (Pmode,
25399 stack_limit_rtx,
25400 GEN_INT (size)));
25401
25402 emit_insn (gen_elf_high (tmp_reg, toload));
25403 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25404 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25405 const0_rtx));
25406 }
25407 else
25408 warning (0, "stack limit expression is not supported");
25409 }
25410
25411 if (flag_stack_clash_protection)
25412 {
25413 if (size < get_stack_clash_protection_guard_size ())
25414 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25415 else
25416 {
25417 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25418 copy_reg);
25419
25420 /* If we asked for a copy with an offset, then we still need add in
25421 the offset. */
25422 if (copy_reg && copy_off)
25423 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25424 return insn;
25425 }
25426 }
25427
25428 if (copy_reg)
25429 {
25430 if (copy_off != 0)
25431 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25432 else
25433 emit_move_insn (copy_reg, stack_reg);
25434 }
25435
25436 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25437 it now and set the alias set/attributes. The above gen_*_update
25438 calls will generate a PARALLEL with the MEM set being the first
25439 operation. */
25440 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25441 return insn;
25442 }
25443
25444 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25445
25446 #if PROBE_INTERVAL > 32768
25447 #error Cannot use indexed addressing mode for stack probing
25448 #endif
25449
25450 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25451 inclusive. These are offsets from the current stack pointer. */
25452
25453 static void
25454 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25455 {
25456 /* See if we have a constant small number of probes to generate. If so,
25457 that's the easy case. */
25458 if (first + size <= 32768)
25459 {
25460 HOST_WIDE_INT i;
25461
25462 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25463 it exceeds SIZE. If only one probe is needed, this will not
25464 generate any code. Then probe at FIRST + SIZE. */
25465 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25466 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25467 -(first + i)));
25468
25469 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25470 -(first + size)));
25471 }
25472
25473 /* Otherwise, do the same as above, but in a loop. Note that we must be
25474 extra careful with variables wrapping around because we might be at
25475 the very top (or the very bottom) of the address space and we have
25476 to be able to handle this case properly; in particular, we use an
25477 equality test for the loop condition. */
25478 else
25479 {
25480 HOST_WIDE_INT rounded_size;
25481 rtx r12 = gen_rtx_REG (Pmode, 12);
25482 rtx r0 = gen_rtx_REG (Pmode, 0);
25483
25484 /* Sanity check for the addressing mode we're going to use. */
25485 gcc_assert (first <= 32768);
25486
25487 /* Step 1: round SIZE to the previous multiple of the interval. */
25488
25489 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25490
25491
25492 /* Step 2: compute initial and final value of the loop counter. */
25493
25494 /* TEST_ADDR = SP + FIRST. */
25495 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25496 -first)));
25497
25498 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25499 if (rounded_size > 32768)
25500 {
25501 emit_move_insn (r0, GEN_INT (-rounded_size));
25502 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25503 }
25504 else
25505 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25506 -rounded_size)));
25507
25508
25509 /* Step 3: the loop
25510
25511 do
25512 {
25513 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25514 probe at TEST_ADDR
25515 }
25516 while (TEST_ADDR != LAST_ADDR)
25517
25518 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25519 until it is equal to ROUNDED_SIZE. */
25520
25521 if (TARGET_64BIT)
25522 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25523 else
25524 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25525
25526
25527 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25528 that SIZE is equal to ROUNDED_SIZE. */
25529
25530 if (size != rounded_size)
25531 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25532 }
25533 }
25534
25535 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25536 addresses, not offsets. */
25537
25538 static const char *
25539 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25540 {
25541 static int labelno = 0;
25542 char loop_lab[32];
25543 rtx xops[2];
25544
25545 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25546
25547 /* Loop. */
25548 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25549
25550 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25551 xops[0] = reg1;
25552 xops[1] = GEN_INT (-PROBE_INTERVAL);
25553 output_asm_insn ("addi %0,%0,%1", xops);
25554
25555 /* Probe at TEST_ADDR. */
25556 xops[1] = gen_rtx_REG (Pmode, 0);
25557 output_asm_insn ("stw %1,0(%0)", xops);
25558
25559 /* Test if TEST_ADDR == LAST_ADDR. */
25560 xops[1] = reg2;
25561 if (TARGET_64BIT)
25562 output_asm_insn ("cmpd 0,%0,%1", xops);
25563 else
25564 output_asm_insn ("cmpw 0,%0,%1", xops);
25565
25566 /* Branch. */
25567 fputs ("\tbne 0,", asm_out_file);
25568 assemble_name_raw (asm_out_file, loop_lab);
25569 fputc ('\n', asm_out_file);
25570
25571 return "";
25572 }
25573
25574 /* This function is called when rs6000_frame_related is processing
25575 SETs within a PARALLEL, and returns whether the REGNO save ought to
25576 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25577 for out-of-line register save functions, store multiple, and the
25578 Darwin world_save. They may contain registers that don't really
25579 need saving. */
25580
25581 static bool
25582 interesting_frame_related_regno (unsigned int regno)
25583 {
25584 /* Saves apparently of r0 are actually saving LR. It doesn't make
25585 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25586 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25587 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25588 as frame related. */
25589 if (regno == 0)
25590 return true;
25591 /* If we see CR2 then we are here on a Darwin world save. Saves of
25592 CR2 signify the whole CR is being saved. This is a long-standing
25593 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25594 that CR needs to be saved. */
25595 if (regno == CR2_REGNO)
25596 return true;
25597 /* Omit frame info for any user-defined global regs. If frame info
25598 is supplied for them, frame unwinding will restore a user reg.
25599 Also omit frame info for any reg we don't need to save, as that
25600 bloats frame info and can cause problems with shrink wrapping.
25601 Since global regs won't be seen as needing to be saved, both of
25602 these conditions are covered by save_reg_p. */
25603 return save_reg_p (regno);
25604 }
25605
25606 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25607 addresses, not offsets.
25608
25609 REG2 contains the backchain that must be stored into *sp at each allocation.
25610
25611 This is subtly different than the Ada probing above in that it tries hard
25612 to prevent attacks that jump the stack guard. Thus, it is never allowed
25613 to allocate more than PROBE_INTERVAL bytes of stack space without a
25614 suitable probe. */
25615
25616 static const char *
25617 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25618 {
25619 static int labelno = 0;
25620 char loop_lab[32];
25621 rtx xops[3];
25622
25623 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25624
25625 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25626
25627 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25628
25629 /* This allocates and probes. */
25630 xops[0] = reg1;
25631 xops[1] = reg2;
25632 xops[2] = GEN_INT (-probe_interval);
25633 if (TARGET_64BIT)
25634 output_asm_insn ("stdu %1,%2(%0)", xops);
25635 else
25636 output_asm_insn ("stwu %1,%2(%0)", xops);
25637
25638 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25639 xops[0] = reg1;
25640 xops[1] = reg3;
25641 if (TARGET_64BIT)
25642 output_asm_insn ("cmpd 0,%0,%1", xops);
25643 else
25644 output_asm_insn ("cmpw 0,%0,%1", xops);
25645
25646 fputs ("\tbne 0,", asm_out_file);
25647 assemble_name_raw (asm_out_file, loop_lab);
25648 fputc ('\n', asm_out_file);
25649
25650 return "";
25651 }
25652
25653 /* Wrapper around the output_probe_stack_range routines. */
25654 const char *
25655 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25656 {
25657 if (flag_stack_clash_protection)
25658 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25659 else
25660 return output_probe_stack_range_1 (reg1, reg3);
25661 }
25662
25663 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25664 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25665 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25666 deduce these equivalences by itself so it wasn't necessary to hold
25667 its hand so much. Don't be tempted to always supply d2_f_d_e with
25668 the actual cfa register, ie. r31 when we are using a hard frame
25669 pointer. That fails when saving regs off r1, and sched moves the
25670 r31 setup past the reg saves. */
25671
25672 static rtx_insn *
25673 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25674 rtx reg2, rtx repl2)
25675 {
25676 rtx repl;
25677
25678 if (REGNO (reg) == STACK_POINTER_REGNUM)
25679 {
25680 gcc_checking_assert (val == 0);
25681 repl = NULL_RTX;
25682 }
25683 else
25684 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25685 GEN_INT (val));
25686
25687 rtx pat = PATTERN (insn);
25688 if (!repl && !reg2)
25689 {
25690 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25691 if (GET_CODE (pat) == PARALLEL)
25692 for (int i = 0; i < XVECLEN (pat, 0); i++)
25693 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25694 {
25695 rtx set = XVECEXP (pat, 0, i);
25696
25697 if (!REG_P (SET_SRC (set))
25698 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25699 RTX_FRAME_RELATED_P (set) = 1;
25700 }
25701 RTX_FRAME_RELATED_P (insn) = 1;
25702 return insn;
25703 }
25704
25705 /* We expect that 'pat' is either a SET or a PARALLEL containing
25706 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25707 are important so they all have to be marked RTX_FRAME_RELATED_P.
25708 Call simplify_replace_rtx on the SETs rather than the whole insn
25709 so as to leave the other stuff alone (for example USE of r12). */
25710
25711 set_used_flags (pat);
25712 if (GET_CODE (pat) == SET)
25713 {
25714 if (repl)
25715 pat = simplify_replace_rtx (pat, reg, repl);
25716 if (reg2)
25717 pat = simplify_replace_rtx (pat, reg2, repl2);
25718 }
25719 else if (GET_CODE (pat) == PARALLEL)
25720 {
25721 pat = shallow_copy_rtx (pat);
25722 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25723
25724 for (int i = 0; i < XVECLEN (pat, 0); i++)
25725 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25726 {
25727 rtx set = XVECEXP (pat, 0, i);
25728
25729 if (repl)
25730 set = simplify_replace_rtx (set, reg, repl);
25731 if (reg2)
25732 set = simplify_replace_rtx (set, reg2, repl2);
25733 XVECEXP (pat, 0, i) = set;
25734
25735 if (!REG_P (SET_SRC (set))
25736 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25737 RTX_FRAME_RELATED_P (set) = 1;
25738 }
25739 }
25740 else
25741 gcc_unreachable ();
25742
25743 RTX_FRAME_RELATED_P (insn) = 1;
25744 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25745
25746 return insn;
25747 }
25748
25749 /* Returns an insn that has a vrsave set operation with the
25750 appropriate CLOBBERs. */
25751
25752 static rtx
25753 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25754 {
25755 int nclobs, i;
25756 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25757 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25758
25759 clobs[0]
25760 = gen_rtx_SET (vrsave,
25761 gen_rtx_UNSPEC_VOLATILE (SImode,
25762 gen_rtvec (2, reg, vrsave),
25763 UNSPECV_SET_VRSAVE));
25764
25765 nclobs = 1;
25766
25767 /* We need to clobber the registers in the mask so the scheduler
25768 does not move sets to VRSAVE before sets of AltiVec registers.
25769
25770 However, if the function receives nonlocal gotos, reload will set
25771 all call saved registers live. We will end up with:
25772
25773 (set (reg 999) (mem))
25774 (parallel [ (set (reg vrsave) (unspec blah))
25775 (clobber (reg 999))])
25776
25777 The clobber will cause the store into reg 999 to be dead, and
25778 flow will attempt to delete an epilogue insn. In this case, we
25779 need an unspec use/set of the register. */
25780
25781 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25782 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25783 {
25784 if (!epiloguep || call_used_regs [i])
25785 clobs[nclobs++] = gen_hard_reg_clobber (V4SImode, i);
25786 else
25787 {
25788 rtx reg = gen_rtx_REG (V4SImode, i);
25789
25790 clobs[nclobs++]
25791 = gen_rtx_SET (reg,
25792 gen_rtx_UNSPEC (V4SImode,
25793 gen_rtvec (1, reg), 27));
25794 }
25795 }
25796
25797 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25798
25799 for (i = 0; i < nclobs; ++i)
25800 XVECEXP (insn, 0, i) = clobs[i];
25801
25802 return insn;
25803 }
25804
25805 static rtx
25806 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25807 {
25808 rtx addr, mem;
25809
25810 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25811 mem = gen_frame_mem (GET_MODE (reg), addr);
25812 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25813 }
25814
25815 static rtx
25816 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25817 {
25818 return gen_frame_set (reg, frame_reg, offset, false);
25819 }
25820
25821 static rtx
25822 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25823 {
25824 return gen_frame_set (reg, frame_reg, offset, true);
25825 }
25826
25827 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25828 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25829
25830 static rtx_insn *
25831 emit_frame_save (rtx frame_reg, machine_mode mode,
25832 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25833 {
25834 rtx reg;
25835
25836 /* Some cases that need register indexed addressing. */
25837 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25838 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25839
25840 reg = gen_rtx_REG (mode, regno);
25841 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25842 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25843 NULL_RTX, NULL_RTX);
25844 }
25845
25846 /* Emit an offset memory reference suitable for a frame store, while
25847 converting to a valid addressing mode. */
25848
25849 static rtx
25850 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25851 {
25852 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25853 }
25854
25855 #ifndef TARGET_FIX_AND_CONTINUE
25856 #define TARGET_FIX_AND_CONTINUE 0
25857 #endif
25858
25859 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25860 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25861 #define LAST_SAVRES_REGISTER 31
25862 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25863
25864 enum {
25865 SAVRES_LR = 0x1,
25866 SAVRES_SAVE = 0x2,
25867 SAVRES_REG = 0x0c,
25868 SAVRES_GPR = 0,
25869 SAVRES_FPR = 4,
25870 SAVRES_VR = 8
25871 };
25872
25873 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25874
25875 /* Temporary holding space for an out-of-line register save/restore
25876 routine name. */
25877 static char savres_routine_name[30];
25878
25879 /* Return the name for an out-of-line register save/restore routine.
25880 We are saving/restoring GPRs if GPR is true. */
25881
25882 static char *
25883 rs6000_savres_routine_name (int regno, int sel)
25884 {
25885 const char *prefix = "";
25886 const char *suffix = "";
25887
25888 /* Different targets are supposed to define
25889 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25890 routine name could be defined with:
25891
25892 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25893
25894 This is a nice idea in practice, but in reality, things are
25895 complicated in several ways:
25896
25897 - ELF targets have save/restore routines for GPRs.
25898
25899 - PPC64 ELF targets have routines for save/restore of GPRs that
25900 differ in what they do with the link register, so having a set
25901 prefix doesn't work. (We only use one of the save routines at
25902 the moment, though.)
25903
25904 - PPC32 elf targets have "exit" versions of the restore routines
25905 that restore the link register and can save some extra space.
25906 These require an extra suffix. (There are also "tail" versions
25907 of the restore routines and "GOT" versions of the save routines,
25908 but we don't generate those at present. Same problems apply,
25909 though.)
25910
25911 We deal with all this by synthesizing our own prefix/suffix and
25912 using that for the simple sprintf call shown above. */
25913 if (DEFAULT_ABI == ABI_V4)
25914 {
25915 if (TARGET_64BIT)
25916 goto aix_names;
25917
25918 if ((sel & SAVRES_REG) == SAVRES_GPR)
25919 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25920 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25921 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25922 else if ((sel & SAVRES_REG) == SAVRES_VR)
25923 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25924 else
25925 abort ();
25926
25927 if ((sel & SAVRES_LR))
25928 suffix = "_x";
25929 }
25930 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25931 {
25932 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25933 /* No out-of-line save/restore routines for GPRs on AIX. */
25934 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25935 #endif
25936
25937 aix_names:
25938 if ((sel & SAVRES_REG) == SAVRES_GPR)
25939 prefix = ((sel & SAVRES_SAVE)
25940 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25941 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25942 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25943 {
25944 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25945 if ((sel & SAVRES_LR))
25946 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
25947 else
25948 #endif
25949 {
25950 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
25951 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
25952 }
25953 }
25954 else if ((sel & SAVRES_REG) == SAVRES_VR)
25955 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25956 else
25957 abort ();
25958 }
25959
25960 if (DEFAULT_ABI == ABI_DARWIN)
25961 {
25962 /* The Darwin approach is (slightly) different, in order to be
25963 compatible with code generated by the system toolchain. There is a
25964 single symbol for the start of save sequence, and the code here
25965 embeds an offset into that code on the basis of the first register
25966 to be saved. */
25967 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
25968 if ((sel & SAVRES_REG) == SAVRES_GPR)
25969 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
25970 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
25971 (regno - 13) * 4, prefix, regno);
25972 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25973 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
25974 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
25975 else if ((sel & SAVRES_REG) == SAVRES_VR)
25976 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
25977 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
25978 else
25979 abort ();
25980 }
25981 else
25982 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
25983
25984 return savres_routine_name;
25985 }
25986
25987 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25988 We are saving/restoring GPRs if GPR is true. */
25989
25990 static rtx
25991 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
25992 {
25993 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
25994 ? info->first_gp_reg_save
25995 : (sel & SAVRES_REG) == SAVRES_FPR
25996 ? info->first_fp_reg_save - 32
25997 : (sel & SAVRES_REG) == SAVRES_VR
25998 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
25999 : -1);
26000 rtx sym;
26001 int select = sel;
26002
26003 /* Don't generate bogus routine names. */
26004 gcc_assert (FIRST_SAVRES_REGISTER <= regno
26005 && regno <= LAST_SAVRES_REGISTER
26006 && select >= 0 && select <= 12);
26007
26008 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
26009
26010 if (sym == NULL)
26011 {
26012 char *name;
26013
26014 name = rs6000_savres_routine_name (regno, sel);
26015
26016 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
26017 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
26018 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
26019 }
26020
26021 return sym;
26022 }
26023
26024 /* Emit a sequence of insns, including a stack tie if needed, for
26025 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
26026 reset the stack pointer, but move the base of the frame into
26027 reg UPDT_REGNO for use by out-of-line register restore routines. */
26028
26029 static rtx
26030 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
26031 unsigned updt_regno)
26032 {
26033 /* If there is nothing to do, don't do anything. */
26034 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26035 return NULL_RTX;
26036
26037 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26038
26039 /* This blockage is needed so that sched doesn't decide to move
26040 the sp change before the register restores. */
26041 if (DEFAULT_ABI == ABI_V4)
26042 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26043 GEN_INT (frame_off)));
26044
26045 /* If we are restoring registers out-of-line, we will be using the
26046 "exit" variants of the restore routines, which will reset the
26047 stack for us. But we do need to point updt_reg into the
26048 right place for those routines. */
26049 if (frame_off != 0)
26050 return emit_insn (gen_add3_insn (updt_reg_rtx,
26051 frame_reg_rtx, GEN_INT (frame_off)));
26052 else
26053 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26054
26055 return NULL_RTX;
26056 }
26057
26058 /* Return the register number used as a pointer by out-of-line
26059 save/restore functions. */
26060
26061 static inline unsigned
26062 ptr_regno_for_savres (int sel)
26063 {
26064 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26065 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26066 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26067 }
26068
26069 /* Construct a parallel rtx describing the effect of a call to an
26070 out-of-line register save/restore routine, and emit the insn
26071 or jump_insn as appropriate. */
26072
26073 static rtx_insn *
26074 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26075 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26076 machine_mode reg_mode, int sel)
26077 {
26078 int i;
26079 int offset, start_reg, end_reg, n_regs, use_reg;
26080 int reg_size = GET_MODE_SIZE (reg_mode);
26081 rtx sym;
26082 rtvec p;
26083 rtx par;
26084 rtx_insn *insn;
26085
26086 offset = 0;
26087 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26088 ? info->first_gp_reg_save
26089 : (sel & SAVRES_REG) == SAVRES_FPR
26090 ? info->first_fp_reg_save
26091 : (sel & SAVRES_REG) == SAVRES_VR
26092 ? info->first_altivec_reg_save
26093 : -1);
26094 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26095 ? 32
26096 : (sel & SAVRES_REG) == SAVRES_FPR
26097 ? 64
26098 : (sel & SAVRES_REG) == SAVRES_VR
26099 ? LAST_ALTIVEC_REGNO + 1
26100 : -1);
26101 n_regs = end_reg - start_reg;
26102 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26103 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26104 + n_regs);
26105
26106 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26107 RTVEC_ELT (p, offset++) = ret_rtx;
26108
26109 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
26110
26111 sym = rs6000_savres_routine_sym (info, sel);
26112 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26113
26114 use_reg = ptr_regno_for_savres (sel);
26115 if ((sel & SAVRES_REG) == SAVRES_VR)
26116 {
26117 /* Vector regs are saved/restored using [reg+reg] addressing. */
26118 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, use_reg);
26119 RTVEC_ELT (p, offset++)
26120 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26121 }
26122 else
26123 RTVEC_ELT (p, offset++)
26124 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26125
26126 for (i = 0; i < end_reg - start_reg; i++)
26127 RTVEC_ELT (p, i + offset)
26128 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26129 frame_reg_rtx, save_area_offset + reg_size * i,
26130 (sel & SAVRES_SAVE) != 0);
26131
26132 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26133 RTVEC_ELT (p, i + offset)
26134 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26135
26136 par = gen_rtx_PARALLEL (VOIDmode, p);
26137
26138 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26139 {
26140 insn = emit_jump_insn (par);
26141 JUMP_LABEL (insn) = ret_rtx;
26142 }
26143 else
26144 insn = emit_insn (par);
26145 return insn;
26146 }
26147
26148 /* Emit prologue code to store CR fields that need to be saved into REG. This
26149 function should only be called when moving the non-volatile CRs to REG, it
26150 is not a general purpose routine to move the entire set of CRs to REG.
26151 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26152 volatile CRs. */
26153
26154 static void
26155 rs6000_emit_prologue_move_from_cr (rtx reg)
26156 {
26157 /* Only the ELFv2 ABI allows storing only selected fields. */
26158 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26159 {
26160 int i, cr_reg[8], count = 0;
26161
26162 /* Collect CR fields that must be saved. */
26163 for (i = 0; i < 8; i++)
26164 if (save_reg_p (CR0_REGNO + i))
26165 cr_reg[count++] = i;
26166
26167 /* If it's just a single one, use mfcrf. */
26168 if (count == 1)
26169 {
26170 rtvec p = rtvec_alloc (1);
26171 rtvec r = rtvec_alloc (2);
26172 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26173 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26174 RTVEC_ELT (p, 0)
26175 = gen_rtx_SET (reg,
26176 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26177
26178 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26179 return;
26180 }
26181
26182 /* ??? It might be better to handle count == 2 / 3 cases here
26183 as well, using logical operations to combine the values. */
26184 }
26185
26186 emit_insn (gen_prologue_movesi_from_cr (reg));
26187 }
26188
26189 /* Return whether the split-stack arg pointer (r12) is used. */
26190
26191 static bool
26192 split_stack_arg_pointer_used_p (void)
26193 {
26194 /* If the pseudo holding the arg pointer is no longer a pseudo,
26195 then the arg pointer is used. */
26196 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26197 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26198 || HARD_REGISTER_P (cfun->machine->split_stack_arg_pointer)))
26199 return true;
26200
26201 /* Unfortunately we also need to do some code scanning, since
26202 r12 may have been substituted for the pseudo. */
26203 rtx_insn *insn;
26204 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26205 FOR_BB_INSNS (bb, insn)
26206 if (NONDEBUG_INSN_P (insn))
26207 {
26208 /* A call destroys r12. */
26209 if (CALL_P (insn))
26210 return false;
26211
26212 df_ref use;
26213 FOR_EACH_INSN_USE (use, insn)
26214 {
26215 rtx x = DF_REF_REG (use);
26216 if (REG_P (x) && REGNO (x) == 12)
26217 return true;
26218 }
26219 df_ref def;
26220 FOR_EACH_INSN_DEF (def, insn)
26221 {
26222 rtx x = DF_REF_REG (def);
26223 if (REG_P (x) && REGNO (x) == 12)
26224 return false;
26225 }
26226 }
26227 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26228 }
26229
26230 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26231
26232 static bool
26233 rs6000_global_entry_point_needed_p (void)
26234 {
26235 /* Only needed for the ELFv2 ABI. */
26236 if (DEFAULT_ABI != ABI_ELFv2)
26237 return false;
26238
26239 /* With -msingle-pic-base, we assume the whole program shares the same
26240 TOC, so no global entry point prologues are needed anywhere. */
26241 if (TARGET_SINGLE_PIC_BASE)
26242 return false;
26243
26244 /* Ensure we have a global entry point for thunks. ??? We could
26245 avoid that if the target routine doesn't need a global entry point,
26246 but we do not know whether this is the case at this point. */
26247 if (cfun->is_thunk)
26248 return true;
26249
26250 /* For regular functions, rs6000_emit_prologue sets this flag if the
26251 routine ever uses the TOC pointer. */
26252 return cfun->machine->r2_setup_needed;
26253 }
26254
26255 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26256 static sbitmap
26257 rs6000_get_separate_components (void)
26258 {
26259 rs6000_stack_t *info = rs6000_stack_info ();
26260
26261 if (WORLD_SAVE_P (info))
26262 return NULL;
26263
26264 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26265 && !(info->savres_strategy & REST_MULTIPLE));
26266
26267 /* Component 0 is the save/restore of LR (done via GPR0).
26268 Component 2 is the save of the TOC (GPR2).
26269 Components 13..31 are the save/restore of GPR13..GPR31.
26270 Components 46..63 are the save/restore of FPR14..FPR31. */
26271
26272 cfun->machine->n_components = 64;
26273
26274 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26275 bitmap_clear (components);
26276
26277 int reg_size = TARGET_32BIT ? 4 : 8;
26278 int fp_reg_size = 8;
26279
26280 /* The GPRs we need saved to the frame. */
26281 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26282 && (info->savres_strategy & REST_INLINE_GPRS))
26283 {
26284 int offset = info->gp_save_offset;
26285 if (info->push_p)
26286 offset += info->total_size;
26287
26288 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26289 {
26290 if (IN_RANGE (offset, -0x8000, 0x7fff)
26291 && save_reg_p (regno))
26292 bitmap_set_bit (components, regno);
26293
26294 offset += reg_size;
26295 }
26296 }
26297
26298 /* Don't mess with the hard frame pointer. */
26299 if (frame_pointer_needed)
26300 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26301
26302 /* Don't mess with the fixed TOC register. */
26303 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26304 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26305 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26306 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26307
26308 /* The FPRs we need saved to the frame. */
26309 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26310 && (info->savres_strategy & REST_INLINE_FPRS))
26311 {
26312 int offset = info->fp_save_offset;
26313 if (info->push_p)
26314 offset += info->total_size;
26315
26316 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26317 {
26318 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26319 bitmap_set_bit (components, regno);
26320
26321 offset += fp_reg_size;
26322 }
26323 }
26324
26325 /* Optimize LR save and restore if we can. This is component 0. Any
26326 out-of-line register save/restore routines need LR. */
26327 if (info->lr_save_p
26328 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26329 && (info->savres_strategy & SAVE_INLINE_GPRS)
26330 && (info->savres_strategy & REST_INLINE_GPRS)
26331 && (info->savres_strategy & SAVE_INLINE_FPRS)
26332 && (info->savres_strategy & REST_INLINE_FPRS)
26333 && (info->savres_strategy & SAVE_INLINE_VRS)
26334 && (info->savres_strategy & REST_INLINE_VRS))
26335 {
26336 int offset = info->lr_save_offset;
26337 if (info->push_p)
26338 offset += info->total_size;
26339 if (IN_RANGE (offset, -0x8000, 0x7fff))
26340 bitmap_set_bit (components, 0);
26341 }
26342
26343 /* Optimize saving the TOC. This is component 2. */
26344 if (cfun->machine->save_toc_in_prologue)
26345 bitmap_set_bit (components, 2);
26346
26347 return components;
26348 }
26349
26350 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26351 static sbitmap
26352 rs6000_components_for_bb (basic_block bb)
26353 {
26354 rs6000_stack_t *info = rs6000_stack_info ();
26355
26356 bitmap in = DF_LIVE_IN (bb);
26357 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26358 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26359
26360 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26361 bitmap_clear (components);
26362
26363 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26364
26365 /* GPRs. */
26366 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26367 if (bitmap_bit_p (in, regno)
26368 || bitmap_bit_p (gen, regno)
26369 || bitmap_bit_p (kill, regno))
26370 bitmap_set_bit (components, regno);
26371
26372 /* FPRs. */
26373 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26374 if (bitmap_bit_p (in, regno)
26375 || bitmap_bit_p (gen, regno)
26376 || bitmap_bit_p (kill, regno))
26377 bitmap_set_bit (components, regno);
26378
26379 /* The link register. */
26380 if (bitmap_bit_p (in, LR_REGNO)
26381 || bitmap_bit_p (gen, LR_REGNO)
26382 || bitmap_bit_p (kill, LR_REGNO))
26383 bitmap_set_bit (components, 0);
26384
26385 /* The TOC save. */
26386 if (bitmap_bit_p (in, TOC_REGNUM)
26387 || bitmap_bit_p (gen, TOC_REGNUM)
26388 || bitmap_bit_p (kill, TOC_REGNUM))
26389 bitmap_set_bit (components, 2);
26390
26391 return components;
26392 }
26393
26394 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26395 static void
26396 rs6000_disqualify_components (sbitmap components, edge e,
26397 sbitmap edge_components, bool /*is_prologue*/)
26398 {
26399 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26400 live where we want to place that code. */
26401 if (bitmap_bit_p (edge_components, 0)
26402 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26403 {
26404 if (dump_file)
26405 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26406 "on entry to bb %d\n", e->dest->index);
26407 bitmap_clear_bit (components, 0);
26408 }
26409 }
26410
26411 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26412 static void
26413 rs6000_emit_prologue_components (sbitmap components)
26414 {
26415 rs6000_stack_t *info = rs6000_stack_info ();
26416 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26417 ? HARD_FRAME_POINTER_REGNUM
26418 : STACK_POINTER_REGNUM);
26419
26420 machine_mode reg_mode = Pmode;
26421 int reg_size = TARGET_32BIT ? 4 : 8;
26422 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26423 int fp_reg_size = 8;
26424
26425 /* Prologue for LR. */
26426 if (bitmap_bit_p (components, 0))
26427 {
26428 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26429 rtx reg = gen_rtx_REG (reg_mode, 0);
26430 rtx_insn *insn = emit_move_insn (reg, lr);
26431 RTX_FRAME_RELATED_P (insn) = 1;
26432 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26433
26434 int offset = info->lr_save_offset;
26435 if (info->push_p)
26436 offset += info->total_size;
26437
26438 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26439 RTX_FRAME_RELATED_P (insn) = 1;
26440 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26441 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26442 }
26443
26444 /* Prologue for TOC. */
26445 if (bitmap_bit_p (components, 2))
26446 {
26447 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26448 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26449 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26450 }
26451
26452 /* Prologue for the GPRs. */
26453 int offset = info->gp_save_offset;
26454 if (info->push_p)
26455 offset += info->total_size;
26456
26457 for (int i = info->first_gp_reg_save; i < 32; i++)
26458 {
26459 if (bitmap_bit_p (components, i))
26460 {
26461 rtx reg = gen_rtx_REG (reg_mode, i);
26462 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26463 RTX_FRAME_RELATED_P (insn) = 1;
26464 rtx set = copy_rtx (single_set (insn));
26465 add_reg_note (insn, REG_CFA_OFFSET, set);
26466 }
26467
26468 offset += reg_size;
26469 }
26470
26471 /* Prologue for the FPRs. */
26472 offset = info->fp_save_offset;
26473 if (info->push_p)
26474 offset += info->total_size;
26475
26476 for (int i = info->first_fp_reg_save; i < 64; i++)
26477 {
26478 if (bitmap_bit_p (components, i))
26479 {
26480 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26481 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26482 RTX_FRAME_RELATED_P (insn) = 1;
26483 rtx set = copy_rtx (single_set (insn));
26484 add_reg_note (insn, REG_CFA_OFFSET, set);
26485 }
26486
26487 offset += fp_reg_size;
26488 }
26489 }
26490
26491 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26492 static void
26493 rs6000_emit_epilogue_components (sbitmap components)
26494 {
26495 rs6000_stack_t *info = rs6000_stack_info ();
26496 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26497 ? HARD_FRAME_POINTER_REGNUM
26498 : STACK_POINTER_REGNUM);
26499
26500 machine_mode reg_mode = Pmode;
26501 int reg_size = TARGET_32BIT ? 4 : 8;
26502
26503 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26504 int fp_reg_size = 8;
26505
26506 /* Epilogue for the FPRs. */
26507 int offset = info->fp_save_offset;
26508 if (info->push_p)
26509 offset += info->total_size;
26510
26511 for (int i = info->first_fp_reg_save; i < 64; i++)
26512 {
26513 if (bitmap_bit_p (components, i))
26514 {
26515 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26516 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26517 RTX_FRAME_RELATED_P (insn) = 1;
26518 add_reg_note (insn, REG_CFA_RESTORE, reg);
26519 }
26520
26521 offset += fp_reg_size;
26522 }
26523
26524 /* Epilogue for the GPRs. */
26525 offset = info->gp_save_offset;
26526 if (info->push_p)
26527 offset += info->total_size;
26528
26529 for (int i = info->first_gp_reg_save; i < 32; i++)
26530 {
26531 if (bitmap_bit_p (components, i))
26532 {
26533 rtx reg = gen_rtx_REG (reg_mode, i);
26534 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26535 RTX_FRAME_RELATED_P (insn) = 1;
26536 add_reg_note (insn, REG_CFA_RESTORE, reg);
26537 }
26538
26539 offset += reg_size;
26540 }
26541
26542 /* Epilogue for LR. */
26543 if (bitmap_bit_p (components, 0))
26544 {
26545 int offset = info->lr_save_offset;
26546 if (info->push_p)
26547 offset += info->total_size;
26548
26549 rtx reg = gen_rtx_REG (reg_mode, 0);
26550 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26551
26552 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26553 insn = emit_move_insn (lr, reg);
26554 RTX_FRAME_RELATED_P (insn) = 1;
26555 add_reg_note (insn, REG_CFA_RESTORE, lr);
26556 }
26557 }
26558
26559 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26560 static void
26561 rs6000_set_handled_components (sbitmap components)
26562 {
26563 rs6000_stack_t *info = rs6000_stack_info ();
26564
26565 for (int i = info->first_gp_reg_save; i < 32; i++)
26566 if (bitmap_bit_p (components, i))
26567 cfun->machine->gpr_is_wrapped_separately[i] = true;
26568
26569 for (int i = info->first_fp_reg_save; i < 64; i++)
26570 if (bitmap_bit_p (components, i))
26571 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26572
26573 if (bitmap_bit_p (components, 0))
26574 cfun->machine->lr_is_wrapped_separately = true;
26575
26576 if (bitmap_bit_p (components, 2))
26577 cfun->machine->toc_is_wrapped_separately = true;
26578 }
26579
26580 /* VRSAVE is a bit vector representing which AltiVec registers
26581 are used. The OS uses this to determine which vector
26582 registers to save on a context switch. We need to save
26583 VRSAVE on the stack frame, add whatever AltiVec registers we
26584 used in this function, and do the corresponding magic in the
26585 epilogue. */
26586 static void
26587 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26588 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26589 {
26590 /* Get VRSAVE into a GPR. */
26591 rtx reg = gen_rtx_REG (SImode, save_regno);
26592 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26593 if (TARGET_MACHO)
26594 emit_insn (gen_get_vrsave_internal (reg));
26595 else
26596 emit_insn (gen_rtx_SET (reg, vrsave));
26597
26598 /* Save VRSAVE. */
26599 int offset = info->vrsave_save_offset + frame_off;
26600 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26601
26602 /* Include the registers in the mask. */
26603 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26604
26605 emit_insn (generate_set_vrsave (reg, info, 0));
26606 }
26607
26608 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26609 called, it left the arg pointer to the old stack in r29. Otherwise, the
26610 arg pointer is the top of the current frame. */
26611 static void
26612 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26613 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26614 {
26615 cfun->machine->split_stack_argp_used = true;
26616
26617 if (sp_adjust)
26618 {
26619 rtx r12 = gen_rtx_REG (Pmode, 12);
26620 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26621 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26622 emit_insn_before (set_r12, sp_adjust);
26623 }
26624 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26625 {
26626 rtx r12 = gen_rtx_REG (Pmode, 12);
26627 if (frame_off == 0)
26628 emit_move_insn (r12, frame_reg_rtx);
26629 else
26630 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26631 }
26632
26633 if (info->push_p)
26634 {
26635 rtx r12 = gen_rtx_REG (Pmode, 12);
26636 rtx r29 = gen_rtx_REG (Pmode, 29);
26637 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26638 rtx not_more = gen_label_rtx ();
26639 rtx jump;
26640
26641 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26642 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26643 gen_rtx_LABEL_REF (VOIDmode, not_more),
26644 pc_rtx);
26645 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26646 JUMP_LABEL (jump) = not_more;
26647 LABEL_NUSES (not_more) += 1;
26648 emit_move_insn (r12, r29);
26649 emit_label (not_more);
26650 }
26651 }
26652
26653 /* Emit function prologue as insns. */
26654
26655 void
26656 rs6000_emit_prologue (void)
26657 {
26658 rs6000_stack_t *info = rs6000_stack_info ();
26659 machine_mode reg_mode = Pmode;
26660 int reg_size = TARGET_32BIT ? 4 : 8;
26661 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26662 int fp_reg_size = 8;
26663 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26664 rtx frame_reg_rtx = sp_reg_rtx;
26665 unsigned int cr_save_regno;
26666 rtx cr_save_rtx = NULL_RTX;
26667 rtx_insn *insn;
26668 int strategy;
26669 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26670 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26671 && call_used_regs[STATIC_CHAIN_REGNUM]);
26672 int using_split_stack = (flag_split_stack
26673 && (lookup_attribute ("no_split_stack",
26674 DECL_ATTRIBUTES (cfun->decl))
26675 == NULL));
26676
26677 /* Offset to top of frame for frame_reg and sp respectively. */
26678 HOST_WIDE_INT frame_off = 0;
26679 HOST_WIDE_INT sp_off = 0;
26680 /* sp_adjust is the stack adjusting instruction, tracked so that the
26681 insn setting up the split-stack arg pointer can be emitted just
26682 prior to it, when r12 is not used here for other purposes. */
26683 rtx_insn *sp_adjust = 0;
26684
26685 #if CHECKING_P
26686 /* Track and check usage of r0, r11, r12. */
26687 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26688 #define START_USE(R) do \
26689 { \
26690 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26691 reg_inuse |= 1 << (R); \
26692 } while (0)
26693 #define END_USE(R) do \
26694 { \
26695 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26696 reg_inuse &= ~(1 << (R)); \
26697 } while (0)
26698 #define NOT_INUSE(R) do \
26699 { \
26700 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26701 } while (0)
26702 #else
26703 #define START_USE(R) do {} while (0)
26704 #define END_USE(R) do {} while (0)
26705 #define NOT_INUSE(R) do {} while (0)
26706 #endif
26707
26708 if (DEFAULT_ABI == ABI_ELFv2
26709 && !TARGET_SINGLE_PIC_BASE)
26710 {
26711 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26712
26713 /* With -mminimal-toc we may generate an extra use of r2 below. */
26714 if (TARGET_TOC && TARGET_MINIMAL_TOC
26715 && !constant_pool_empty_p ())
26716 cfun->machine->r2_setup_needed = true;
26717 }
26718
26719
26720 if (flag_stack_usage_info)
26721 current_function_static_stack_size = info->total_size;
26722
26723 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26724 {
26725 HOST_WIDE_INT size = info->total_size;
26726
26727 if (crtl->is_leaf && !cfun->calls_alloca)
26728 {
26729 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
26730 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26731 size - get_stack_check_protect ());
26732 }
26733 else if (size > 0)
26734 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
26735 }
26736
26737 if (TARGET_FIX_AND_CONTINUE)
26738 {
26739 /* gdb on darwin arranges to forward a function from the old
26740 address by modifying the first 5 instructions of the function
26741 to branch to the overriding function. This is necessary to
26742 permit function pointers that point to the old function to
26743 actually forward to the new function. */
26744 emit_insn (gen_nop ());
26745 emit_insn (gen_nop ());
26746 emit_insn (gen_nop ());
26747 emit_insn (gen_nop ());
26748 emit_insn (gen_nop ());
26749 }
26750
26751 /* Handle world saves specially here. */
26752 if (WORLD_SAVE_P (info))
26753 {
26754 int i, j, sz;
26755 rtx treg;
26756 rtvec p;
26757 rtx reg0;
26758
26759 /* save_world expects lr in r0. */
26760 reg0 = gen_rtx_REG (Pmode, 0);
26761 if (info->lr_save_p)
26762 {
26763 insn = emit_move_insn (reg0,
26764 gen_rtx_REG (Pmode, LR_REGNO));
26765 RTX_FRAME_RELATED_P (insn) = 1;
26766 }
26767
26768 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26769 assumptions about the offsets of various bits of the stack
26770 frame. */
26771 gcc_assert (info->gp_save_offset == -220
26772 && info->fp_save_offset == -144
26773 && info->lr_save_offset == 8
26774 && info->cr_save_offset == 4
26775 && info->push_p
26776 && info->lr_save_p
26777 && (!crtl->calls_eh_return
26778 || info->ehrd_offset == -432)
26779 && info->vrsave_save_offset == -224
26780 && info->altivec_save_offset == -416);
26781
26782 treg = gen_rtx_REG (SImode, 11);
26783 emit_move_insn (treg, GEN_INT (-info->total_size));
26784
26785 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26786 in R11. It also clobbers R12, so beware! */
26787
26788 /* Preserve CR2 for save_world prologues */
26789 sz = 5;
26790 sz += 32 - info->first_gp_reg_save;
26791 sz += 64 - info->first_fp_reg_save;
26792 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26793 p = rtvec_alloc (sz);
26794 j = 0;
26795 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, LR_REGNO);
26796 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26797 gen_rtx_SYMBOL_REF (Pmode,
26798 "*save_world"));
26799 /* We do floats first so that the instruction pattern matches
26800 properly. */
26801 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26802 RTVEC_ELT (p, j++)
26803 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
26804 info->first_fp_reg_save + i),
26805 frame_reg_rtx,
26806 info->fp_save_offset + frame_off + 8 * i);
26807 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26808 RTVEC_ELT (p, j++)
26809 = gen_frame_store (gen_rtx_REG (V4SImode,
26810 info->first_altivec_reg_save + i),
26811 frame_reg_rtx,
26812 info->altivec_save_offset + frame_off + 16 * i);
26813 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26814 RTVEC_ELT (p, j++)
26815 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26816 frame_reg_rtx,
26817 info->gp_save_offset + frame_off + reg_size * i);
26818
26819 /* CR register traditionally saved as CR2. */
26820 RTVEC_ELT (p, j++)
26821 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26822 frame_reg_rtx, info->cr_save_offset + frame_off);
26823 /* Explain about use of R0. */
26824 if (info->lr_save_p)
26825 RTVEC_ELT (p, j++)
26826 = gen_frame_store (reg0,
26827 frame_reg_rtx, info->lr_save_offset + frame_off);
26828 /* Explain what happens to the stack pointer. */
26829 {
26830 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26831 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26832 }
26833
26834 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26835 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26836 treg, GEN_INT (-info->total_size));
26837 sp_off = frame_off = info->total_size;
26838 }
26839
26840 strategy = info->savres_strategy;
26841
26842 /* For V.4, update stack before we do any saving and set back pointer. */
26843 if (! WORLD_SAVE_P (info)
26844 && info->push_p
26845 && (DEFAULT_ABI == ABI_V4
26846 || crtl->calls_eh_return))
26847 {
26848 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26849 || !(strategy & SAVE_INLINE_GPRS)
26850 || !(strategy & SAVE_INLINE_VRS));
26851 int ptr_regno = -1;
26852 rtx ptr_reg = NULL_RTX;
26853 int ptr_off = 0;
26854
26855 if (info->total_size < 32767)
26856 frame_off = info->total_size;
26857 else if (need_r11)
26858 ptr_regno = 11;
26859 else if (info->cr_save_p
26860 || info->lr_save_p
26861 || info->first_fp_reg_save < 64
26862 || info->first_gp_reg_save < 32
26863 || info->altivec_size != 0
26864 || info->vrsave_size != 0
26865 || crtl->calls_eh_return)
26866 ptr_regno = 12;
26867 else
26868 {
26869 /* The prologue won't be saving any regs so there is no need
26870 to set up a frame register to access any frame save area.
26871 We also won't be using frame_off anywhere below, but set
26872 the correct value anyway to protect against future
26873 changes to this function. */
26874 frame_off = info->total_size;
26875 }
26876 if (ptr_regno != -1)
26877 {
26878 /* Set up the frame offset to that needed by the first
26879 out-of-line save function. */
26880 START_USE (ptr_regno);
26881 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26882 frame_reg_rtx = ptr_reg;
26883 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26884 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26885 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26886 ptr_off = info->gp_save_offset + info->gp_size;
26887 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26888 ptr_off = info->altivec_save_offset + info->altivec_size;
26889 frame_off = -ptr_off;
26890 }
26891 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26892 ptr_reg, ptr_off);
26893 if (REGNO (frame_reg_rtx) == 12)
26894 sp_adjust = 0;
26895 sp_off = info->total_size;
26896 if (frame_reg_rtx != sp_reg_rtx)
26897 rs6000_emit_stack_tie (frame_reg_rtx, false);
26898 }
26899
26900 /* If we use the link register, get it into r0. */
26901 if (!WORLD_SAVE_P (info) && info->lr_save_p
26902 && !cfun->machine->lr_is_wrapped_separately)
26903 {
26904 rtx addr, reg, mem;
26905
26906 reg = gen_rtx_REG (Pmode, 0);
26907 START_USE (0);
26908 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26909 RTX_FRAME_RELATED_P (insn) = 1;
26910
26911 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26912 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26913 {
26914 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26915 GEN_INT (info->lr_save_offset + frame_off));
26916 mem = gen_rtx_MEM (Pmode, addr);
26917 /* This should not be of rs6000_sr_alias_set, because of
26918 __builtin_return_address. */
26919
26920 insn = emit_move_insn (mem, reg);
26921 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26922 NULL_RTX, NULL_RTX);
26923 END_USE (0);
26924 }
26925 }
26926
26927 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26928 r12 will be needed by out-of-line gpr save. */
26929 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26930 && !(strategy & (SAVE_INLINE_GPRS
26931 | SAVE_NOINLINE_GPRS_SAVES_LR))
26932 ? 11 : 12);
26933 if (!WORLD_SAVE_P (info)
26934 && info->cr_save_p
26935 && REGNO (frame_reg_rtx) != cr_save_regno
26936 && !(using_static_chain_p && cr_save_regno == 11)
26937 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26938 {
26939 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26940 START_USE (cr_save_regno);
26941 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26942 }
26943
26944 /* Do any required saving of fpr's. If only one or two to save, do
26945 it ourselves. Otherwise, call function. */
26946 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26947 {
26948 int offset = info->fp_save_offset + frame_off;
26949 for (int i = info->first_fp_reg_save; i < 64; i++)
26950 {
26951 if (save_reg_p (i)
26952 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
26953 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
26954 sp_off - frame_off);
26955
26956 offset += fp_reg_size;
26957 }
26958 }
26959 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26960 {
26961 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26962 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26963 unsigned ptr_regno = ptr_regno_for_savres (sel);
26964 rtx ptr_reg = frame_reg_rtx;
26965
26966 if (REGNO (frame_reg_rtx) == ptr_regno)
26967 gcc_checking_assert (frame_off == 0);
26968 else
26969 {
26970 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26971 NOT_INUSE (ptr_regno);
26972 emit_insn (gen_add3_insn (ptr_reg,
26973 frame_reg_rtx, GEN_INT (frame_off)));
26974 }
26975 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26976 info->fp_save_offset,
26977 info->lr_save_offset,
26978 DFmode, sel);
26979 rs6000_frame_related (insn, ptr_reg, sp_off,
26980 NULL_RTX, NULL_RTX);
26981 if (lr)
26982 END_USE (0);
26983 }
26984
26985 /* Save GPRs. This is done as a PARALLEL if we are using
26986 the store-multiple instructions. */
26987 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26988 {
26989 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26990 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26991 unsigned ptr_regno = ptr_regno_for_savres (sel);
26992 rtx ptr_reg = frame_reg_rtx;
26993 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
26994 int end_save = info->gp_save_offset + info->gp_size;
26995 int ptr_off;
26996
26997 if (ptr_regno == 12)
26998 sp_adjust = 0;
26999 if (!ptr_set_up)
27000 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27001
27002 /* Need to adjust r11 (r12) if we saved any FPRs. */
27003 if (end_save + frame_off != 0)
27004 {
27005 rtx offset = GEN_INT (end_save + frame_off);
27006
27007 if (ptr_set_up)
27008 frame_off = -end_save;
27009 else
27010 NOT_INUSE (ptr_regno);
27011 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27012 }
27013 else if (!ptr_set_up)
27014 {
27015 NOT_INUSE (ptr_regno);
27016 emit_move_insn (ptr_reg, frame_reg_rtx);
27017 }
27018 ptr_off = -end_save;
27019 insn = rs6000_emit_savres_rtx (info, ptr_reg,
27020 info->gp_save_offset + ptr_off,
27021 info->lr_save_offset + ptr_off,
27022 reg_mode, sel);
27023 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
27024 NULL_RTX, NULL_RTX);
27025 if (lr)
27026 END_USE (0);
27027 }
27028 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
27029 {
27030 rtvec p;
27031 int i;
27032 p = rtvec_alloc (32 - info->first_gp_reg_save);
27033 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27034 RTVEC_ELT (p, i)
27035 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27036 frame_reg_rtx,
27037 info->gp_save_offset + frame_off + reg_size * i);
27038 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27039 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27040 NULL_RTX, NULL_RTX);
27041 }
27042 else if (!WORLD_SAVE_P (info))
27043 {
27044 int offset = info->gp_save_offset + frame_off;
27045 for (int i = info->first_gp_reg_save; i < 32; i++)
27046 {
27047 if (save_reg_p (i)
27048 && !cfun->machine->gpr_is_wrapped_separately[i])
27049 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27050 sp_off - frame_off);
27051
27052 offset += reg_size;
27053 }
27054 }
27055
27056 if (crtl->calls_eh_return)
27057 {
27058 unsigned int i;
27059 rtvec p;
27060
27061 for (i = 0; ; ++i)
27062 {
27063 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27064 if (regno == INVALID_REGNUM)
27065 break;
27066 }
27067
27068 p = rtvec_alloc (i);
27069
27070 for (i = 0; ; ++i)
27071 {
27072 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27073 if (regno == INVALID_REGNUM)
27074 break;
27075
27076 rtx set
27077 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27078 sp_reg_rtx,
27079 info->ehrd_offset + sp_off + reg_size * (int) i);
27080 RTVEC_ELT (p, i) = set;
27081 RTX_FRAME_RELATED_P (set) = 1;
27082 }
27083
27084 insn = emit_insn (gen_blockage ());
27085 RTX_FRAME_RELATED_P (insn) = 1;
27086 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27087 }
27088
27089 /* In AIX ABI we need to make sure r2 is really saved. */
27090 if (TARGET_AIX && crtl->calls_eh_return)
27091 {
27092 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27093 rtx join_insn, note;
27094 rtx_insn *save_insn;
27095 long toc_restore_insn;
27096
27097 tmp_reg = gen_rtx_REG (Pmode, 11);
27098 tmp_reg_si = gen_rtx_REG (SImode, 11);
27099 if (using_static_chain_p)
27100 {
27101 START_USE (0);
27102 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27103 }
27104 else
27105 START_USE (11);
27106 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27107 /* Peek at instruction to which this function returns. If it's
27108 restoring r2, then we know we've already saved r2. We can't
27109 unconditionally save r2 because the value we have will already
27110 be updated if we arrived at this function via a plt call or
27111 toc adjusting stub. */
27112 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27113 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27114 + RS6000_TOC_SAVE_SLOT);
27115 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27116 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27117 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27118 validate_condition_mode (EQ, CCUNSmode);
27119 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27120 emit_insn (gen_rtx_SET (compare_result,
27121 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27122 toc_save_done = gen_label_rtx ();
27123 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27124 gen_rtx_EQ (VOIDmode, compare_result,
27125 const0_rtx),
27126 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27127 pc_rtx);
27128 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27129 JUMP_LABEL (jump) = toc_save_done;
27130 LABEL_NUSES (toc_save_done) += 1;
27131
27132 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27133 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27134 sp_off - frame_off);
27135
27136 emit_label (toc_save_done);
27137
27138 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27139 have a CFG that has different saves along different paths.
27140 Move the note to a dummy blockage insn, which describes that
27141 R2 is unconditionally saved after the label. */
27142 /* ??? An alternate representation might be a special insn pattern
27143 containing both the branch and the store. That might let the
27144 code that minimizes the number of DW_CFA_advance opcodes better
27145 freedom in placing the annotations. */
27146 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27147 if (note)
27148 remove_note (save_insn, note);
27149 else
27150 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27151 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27152 RTX_FRAME_RELATED_P (save_insn) = 0;
27153
27154 join_insn = emit_insn (gen_blockage ());
27155 REG_NOTES (join_insn) = note;
27156 RTX_FRAME_RELATED_P (join_insn) = 1;
27157
27158 if (using_static_chain_p)
27159 {
27160 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27161 END_USE (0);
27162 }
27163 else
27164 END_USE (11);
27165 }
27166
27167 /* Save CR if we use any that must be preserved. */
27168 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27169 {
27170 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27171 GEN_INT (info->cr_save_offset + frame_off));
27172 rtx mem = gen_frame_mem (SImode, addr);
27173
27174 /* If we didn't copy cr before, do so now using r0. */
27175 if (cr_save_rtx == NULL_RTX)
27176 {
27177 START_USE (0);
27178 cr_save_rtx = gen_rtx_REG (SImode, 0);
27179 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27180 }
27181
27182 /* Saving CR requires a two-instruction sequence: one instruction
27183 to move the CR to a general-purpose register, and a second
27184 instruction that stores the GPR to memory.
27185
27186 We do not emit any DWARF CFI records for the first of these,
27187 because we cannot properly represent the fact that CR is saved in
27188 a register. One reason is that we cannot express that multiple
27189 CR fields are saved; another reason is that on 64-bit, the size
27190 of the CR register in DWARF (4 bytes) differs from the size of
27191 a general-purpose register.
27192
27193 This means if any intervening instruction were to clobber one of
27194 the call-saved CR fields, we'd have incorrect CFI. To prevent
27195 this from happening, we mark the store to memory as a use of
27196 those CR fields, which prevents any such instruction from being
27197 scheduled in between the two instructions. */
27198 rtx crsave_v[9];
27199 int n_crsave = 0;
27200 int i;
27201
27202 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27203 for (i = 0; i < 8; i++)
27204 if (save_reg_p (CR0_REGNO + i))
27205 crsave_v[n_crsave++]
27206 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27207
27208 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27209 gen_rtvec_v (n_crsave, crsave_v)));
27210 END_USE (REGNO (cr_save_rtx));
27211
27212 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27213 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27214 so we need to construct a frame expression manually. */
27215 RTX_FRAME_RELATED_P (insn) = 1;
27216
27217 /* Update address to be stack-pointer relative, like
27218 rs6000_frame_related would do. */
27219 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27220 GEN_INT (info->cr_save_offset + sp_off));
27221 mem = gen_frame_mem (SImode, addr);
27222
27223 if (DEFAULT_ABI == ABI_ELFv2)
27224 {
27225 /* In the ELFv2 ABI we generate separate CFI records for each
27226 CR field that was actually saved. They all point to the
27227 same 32-bit stack slot. */
27228 rtx crframe[8];
27229 int n_crframe = 0;
27230
27231 for (i = 0; i < 8; i++)
27232 if (save_reg_p (CR0_REGNO + i))
27233 {
27234 crframe[n_crframe]
27235 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27236
27237 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27238 n_crframe++;
27239 }
27240
27241 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27242 gen_rtx_PARALLEL (VOIDmode,
27243 gen_rtvec_v (n_crframe, crframe)));
27244 }
27245 else
27246 {
27247 /* In other ABIs, by convention, we use a single CR regnum to
27248 represent the fact that all call-saved CR fields are saved.
27249 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27250 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27251 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27252 }
27253 }
27254
27255 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27256 *separate* slots if the routine calls __builtin_eh_return, so
27257 that they can be independently restored by the unwinder. */
27258 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27259 {
27260 int i, cr_off = info->ehcr_offset;
27261 rtx crsave;
27262
27263 /* ??? We might get better performance by using multiple mfocrf
27264 instructions. */
27265 crsave = gen_rtx_REG (SImode, 0);
27266 emit_insn (gen_prologue_movesi_from_cr (crsave));
27267
27268 for (i = 0; i < 8; i++)
27269 if (!call_used_regs[CR0_REGNO + i])
27270 {
27271 rtvec p = rtvec_alloc (2);
27272 RTVEC_ELT (p, 0)
27273 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27274 RTVEC_ELT (p, 1)
27275 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27276
27277 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27278
27279 RTX_FRAME_RELATED_P (insn) = 1;
27280 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27281 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27282 sp_reg_rtx, cr_off + sp_off));
27283
27284 cr_off += reg_size;
27285 }
27286 }
27287
27288 /* If we are emitting stack probes, but allocate no stack, then
27289 just note that in the dump file. */
27290 if (flag_stack_clash_protection
27291 && dump_file
27292 && !info->push_p)
27293 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27294
27295 /* Update stack and set back pointer unless this is V.4,
27296 for which it was done previously. */
27297 if (!WORLD_SAVE_P (info) && info->push_p
27298 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27299 {
27300 rtx ptr_reg = NULL;
27301 int ptr_off = 0;
27302
27303 /* If saving altivec regs we need to be able to address all save
27304 locations using a 16-bit offset. */
27305 if ((strategy & SAVE_INLINE_VRS) == 0
27306 || (info->altivec_size != 0
27307 && (info->altivec_save_offset + info->altivec_size - 16
27308 + info->total_size - frame_off) > 32767)
27309 || (info->vrsave_size != 0
27310 && (info->vrsave_save_offset
27311 + info->total_size - frame_off) > 32767))
27312 {
27313 int sel = SAVRES_SAVE | SAVRES_VR;
27314 unsigned ptr_regno = ptr_regno_for_savres (sel);
27315
27316 if (using_static_chain_p
27317 && ptr_regno == STATIC_CHAIN_REGNUM)
27318 ptr_regno = 12;
27319 if (REGNO (frame_reg_rtx) != ptr_regno)
27320 START_USE (ptr_regno);
27321 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27322 frame_reg_rtx = ptr_reg;
27323 ptr_off = info->altivec_save_offset + info->altivec_size;
27324 frame_off = -ptr_off;
27325 }
27326 else if (REGNO (frame_reg_rtx) == 1)
27327 frame_off = info->total_size;
27328 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27329 ptr_reg, ptr_off);
27330 if (REGNO (frame_reg_rtx) == 12)
27331 sp_adjust = 0;
27332 sp_off = info->total_size;
27333 if (frame_reg_rtx != sp_reg_rtx)
27334 rs6000_emit_stack_tie (frame_reg_rtx, false);
27335 }
27336
27337 /* Set frame pointer, if needed. */
27338 if (frame_pointer_needed)
27339 {
27340 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27341 sp_reg_rtx);
27342 RTX_FRAME_RELATED_P (insn) = 1;
27343 }
27344
27345 /* Save AltiVec registers if needed. Save here because the red zone does
27346 not always include AltiVec registers. */
27347 if (!WORLD_SAVE_P (info)
27348 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27349 {
27350 int end_save = info->altivec_save_offset + info->altivec_size;
27351 int ptr_off;
27352 /* Oddly, the vector save/restore functions point r0 at the end
27353 of the save area, then use r11 or r12 to load offsets for
27354 [reg+reg] addressing. */
27355 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27356 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27357 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27358
27359 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27360 NOT_INUSE (0);
27361 if (scratch_regno == 12)
27362 sp_adjust = 0;
27363 if (end_save + frame_off != 0)
27364 {
27365 rtx offset = GEN_INT (end_save + frame_off);
27366
27367 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27368 }
27369 else
27370 emit_move_insn (ptr_reg, frame_reg_rtx);
27371
27372 ptr_off = -end_save;
27373 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27374 info->altivec_save_offset + ptr_off,
27375 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27376 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27377 NULL_RTX, NULL_RTX);
27378 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27379 {
27380 /* The oddity mentioned above clobbered our frame reg. */
27381 emit_move_insn (frame_reg_rtx, ptr_reg);
27382 frame_off = ptr_off;
27383 }
27384 }
27385 else if (!WORLD_SAVE_P (info)
27386 && info->altivec_size != 0)
27387 {
27388 int i;
27389
27390 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27391 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27392 {
27393 rtx areg, savereg, mem;
27394 HOST_WIDE_INT offset;
27395
27396 offset = (info->altivec_save_offset + frame_off
27397 + 16 * (i - info->first_altivec_reg_save));
27398
27399 savereg = gen_rtx_REG (V4SImode, i);
27400
27401 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27402 {
27403 mem = gen_frame_mem (V4SImode,
27404 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27405 GEN_INT (offset)));
27406 insn = emit_insn (gen_rtx_SET (mem, savereg));
27407 areg = NULL_RTX;
27408 }
27409 else
27410 {
27411 NOT_INUSE (0);
27412 areg = gen_rtx_REG (Pmode, 0);
27413 emit_move_insn (areg, GEN_INT (offset));
27414
27415 /* AltiVec addressing mode is [reg+reg]. */
27416 mem = gen_frame_mem (V4SImode,
27417 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27418
27419 /* Rather than emitting a generic move, force use of the stvx
27420 instruction, which we always want on ISA 2.07 (power8) systems.
27421 In particular we don't want xxpermdi/stxvd2x for little
27422 endian. */
27423 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27424 }
27425
27426 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27427 areg, GEN_INT (offset));
27428 }
27429 }
27430
27431 /* VRSAVE is a bit vector representing which AltiVec registers
27432 are used. The OS uses this to determine which vector
27433 registers to save on a context switch. We need to save
27434 VRSAVE on the stack frame, add whatever AltiVec registers we
27435 used in this function, and do the corresponding magic in the
27436 epilogue. */
27437
27438 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27439 {
27440 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27441 be using r12 as frame_reg_rtx and r11 as the static chain
27442 pointer for nested functions. */
27443 int save_regno = 12;
27444 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27445 && !using_static_chain_p)
27446 save_regno = 11;
27447 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27448 {
27449 save_regno = 11;
27450 if (using_static_chain_p)
27451 save_regno = 0;
27452 }
27453 NOT_INUSE (save_regno);
27454
27455 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27456 }
27457
27458 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27459 if (!TARGET_SINGLE_PIC_BASE
27460 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27461 && !constant_pool_empty_p ())
27462 || (DEFAULT_ABI == ABI_V4
27463 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27464 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27465 {
27466 /* If emit_load_toc_table will use the link register, we need to save
27467 it. We use R12 for this purpose because emit_load_toc_table
27468 can use register 0. This allows us to use a plain 'blr' to return
27469 from the procedure more often. */
27470 int save_LR_around_toc_setup = (TARGET_ELF
27471 && DEFAULT_ABI == ABI_V4
27472 && flag_pic
27473 && ! info->lr_save_p
27474 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27475 if (save_LR_around_toc_setup)
27476 {
27477 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27478 rtx tmp = gen_rtx_REG (Pmode, 12);
27479
27480 sp_adjust = 0;
27481 insn = emit_move_insn (tmp, lr);
27482 RTX_FRAME_RELATED_P (insn) = 1;
27483
27484 rs6000_emit_load_toc_table (TRUE);
27485
27486 insn = emit_move_insn (lr, tmp);
27487 add_reg_note (insn, REG_CFA_RESTORE, lr);
27488 RTX_FRAME_RELATED_P (insn) = 1;
27489 }
27490 else
27491 rs6000_emit_load_toc_table (TRUE);
27492 }
27493
27494 #if TARGET_MACHO
27495 if (!TARGET_SINGLE_PIC_BASE
27496 && DEFAULT_ABI == ABI_DARWIN
27497 && flag_pic && crtl->uses_pic_offset_table)
27498 {
27499 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27500 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27501
27502 /* Save and restore LR locally around this call (in R0). */
27503 if (!info->lr_save_p)
27504 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27505
27506 emit_insn (gen_load_macho_picbase (src));
27507
27508 emit_move_insn (gen_rtx_REG (Pmode,
27509 RS6000_PIC_OFFSET_TABLE_REGNUM),
27510 lr);
27511
27512 if (!info->lr_save_p)
27513 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27514 }
27515 #endif
27516
27517 /* If we need to, save the TOC register after doing the stack setup.
27518 Do not emit eh frame info for this save. The unwinder wants info,
27519 conceptually attached to instructions in this function, about
27520 register values in the caller of this function. This R2 may have
27521 already been changed from the value in the caller.
27522 We don't attempt to write accurate DWARF EH frame info for R2
27523 because code emitted by gcc for a (non-pointer) function call
27524 doesn't save and restore R2. Instead, R2 is managed out-of-line
27525 by a linker generated plt call stub when the function resides in
27526 a shared library. This behavior is costly to describe in DWARF,
27527 both in terms of the size of DWARF info and the time taken in the
27528 unwinder to interpret it. R2 changes, apart from the
27529 calls_eh_return case earlier in this function, are handled by
27530 linux-unwind.h frob_update_context. */
27531 if (rs6000_save_toc_in_prologue_p ()
27532 && !cfun->machine->toc_is_wrapped_separately)
27533 {
27534 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27535 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27536 }
27537
27538 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27539 if (using_split_stack && split_stack_arg_pointer_used_p ())
27540 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27541 }
27542
27543 /* Output .extern statements for the save/restore routines we use. */
27544
27545 static void
27546 rs6000_output_savres_externs (FILE *file)
27547 {
27548 rs6000_stack_t *info = rs6000_stack_info ();
27549
27550 if (TARGET_DEBUG_STACK)
27551 debug_stack_info (info);
27552
27553 /* Write .extern for any function we will call to save and restore
27554 fp values. */
27555 if (info->first_fp_reg_save < 64
27556 && !TARGET_MACHO
27557 && !TARGET_ELF)
27558 {
27559 char *name;
27560 int regno = info->first_fp_reg_save - 32;
27561
27562 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27563 {
27564 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27565 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27566 name = rs6000_savres_routine_name (regno, sel);
27567 fprintf (file, "\t.extern %s\n", name);
27568 }
27569 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27570 {
27571 bool lr = (info->savres_strategy
27572 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27573 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27574 name = rs6000_savres_routine_name (regno, sel);
27575 fprintf (file, "\t.extern %s\n", name);
27576 }
27577 }
27578 }
27579
27580 /* Write function prologue. */
27581
27582 static void
27583 rs6000_output_function_prologue (FILE *file)
27584 {
27585 if (!cfun->is_thunk)
27586 rs6000_output_savres_externs (file);
27587
27588 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27589 immediately after the global entry point label. */
27590 if (rs6000_global_entry_point_needed_p ())
27591 {
27592 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27593
27594 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27595
27596 if (TARGET_CMODEL != CMODEL_LARGE)
27597 {
27598 /* In the small and medium code models, we assume the TOC is less
27599 2 GB away from the text section, so it can be computed via the
27600 following two-instruction sequence. */
27601 char buf[256];
27602
27603 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27604 fprintf (file, "0:\taddis 2,12,.TOC.-");
27605 assemble_name (file, buf);
27606 fprintf (file, "@ha\n");
27607 fprintf (file, "\taddi 2,2,.TOC.-");
27608 assemble_name (file, buf);
27609 fprintf (file, "@l\n");
27610 }
27611 else
27612 {
27613 /* In the large code model, we allow arbitrary offsets between the
27614 TOC and the text section, so we have to load the offset from
27615 memory. The data field is emitted directly before the global
27616 entry point in rs6000_elf_declare_function_name. */
27617 char buf[256];
27618
27619 #ifdef HAVE_AS_ENTRY_MARKERS
27620 /* If supported by the linker, emit a marker relocation. If the
27621 total code size of the final executable or shared library
27622 happens to fit into 2 GB after all, the linker will replace
27623 this code sequence with the sequence for the small or medium
27624 code model. */
27625 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27626 #endif
27627 fprintf (file, "\tld 2,");
27628 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27629 assemble_name (file, buf);
27630 fprintf (file, "-");
27631 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27632 assemble_name (file, buf);
27633 fprintf (file, "(12)\n");
27634 fprintf (file, "\tadd 2,2,12\n");
27635 }
27636
27637 fputs ("\t.localentry\t", file);
27638 assemble_name (file, name);
27639 fputs (",.-", file);
27640 assemble_name (file, name);
27641 fputs ("\n", file);
27642 }
27643
27644 /* Output -mprofile-kernel code. This needs to be done here instead of
27645 in output_function_profile since it must go after the ELFv2 ABI
27646 local entry point. */
27647 if (TARGET_PROFILE_KERNEL && crtl->profile)
27648 {
27649 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27650 gcc_assert (!TARGET_32BIT);
27651
27652 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27653
27654 /* In the ELFv2 ABI we have no compiler stack word. It must be
27655 the resposibility of _mcount to preserve the static chain
27656 register if required. */
27657 if (DEFAULT_ABI != ABI_ELFv2
27658 && cfun->static_chain_decl != NULL)
27659 {
27660 asm_fprintf (file, "\tstd %s,24(%s)\n",
27661 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27662 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27663 asm_fprintf (file, "\tld %s,24(%s)\n",
27664 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27665 }
27666 else
27667 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27668 }
27669
27670 rs6000_pic_labelno++;
27671 }
27672
27673 /* -mprofile-kernel code calls mcount before the function prolog,
27674 so a profiled leaf function should stay a leaf function. */
27675 static bool
27676 rs6000_keep_leaf_when_profiled ()
27677 {
27678 return TARGET_PROFILE_KERNEL;
27679 }
27680
27681 /* Non-zero if vmx regs are restored before the frame pop, zero if
27682 we restore after the pop when possible. */
27683 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27684
27685 /* Restoring cr is a two step process: loading a reg from the frame
27686 save, then moving the reg to cr. For ABI_V4 we must let the
27687 unwinder know that the stack location is no longer valid at or
27688 before the stack deallocation, but we can't emit a cfa_restore for
27689 cr at the stack deallocation like we do for other registers.
27690 The trouble is that it is possible for the move to cr to be
27691 scheduled after the stack deallocation. So say exactly where cr
27692 is located on each of the two insns. */
27693
27694 static rtx
27695 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27696 {
27697 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27698 rtx reg = gen_rtx_REG (SImode, regno);
27699 rtx_insn *insn = emit_move_insn (reg, mem);
27700
27701 if (!exit_func && DEFAULT_ABI == ABI_V4)
27702 {
27703 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27704 rtx set = gen_rtx_SET (reg, cr);
27705
27706 add_reg_note (insn, REG_CFA_REGISTER, set);
27707 RTX_FRAME_RELATED_P (insn) = 1;
27708 }
27709 return reg;
27710 }
27711
27712 /* Reload CR from REG. */
27713
27714 static void
27715 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
27716 {
27717 int count = 0;
27718 int i;
27719
27720 if (using_mfcr_multiple)
27721 {
27722 for (i = 0; i < 8; i++)
27723 if (save_reg_p (CR0_REGNO + i))
27724 count++;
27725 gcc_assert (count);
27726 }
27727
27728 if (using_mfcr_multiple && count > 1)
27729 {
27730 rtx_insn *insn;
27731 rtvec p;
27732 int ndx;
27733
27734 p = rtvec_alloc (count);
27735
27736 ndx = 0;
27737 for (i = 0; i < 8; i++)
27738 if (save_reg_p (CR0_REGNO + i))
27739 {
27740 rtvec r = rtvec_alloc (2);
27741 RTVEC_ELT (r, 0) = reg;
27742 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27743 RTVEC_ELT (p, ndx) =
27744 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27745 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27746 ndx++;
27747 }
27748 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27749 gcc_assert (ndx == count);
27750
27751 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27752 CR field separately. */
27753 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27754 {
27755 for (i = 0; i < 8; i++)
27756 if (save_reg_p (CR0_REGNO + i))
27757 add_reg_note (insn, REG_CFA_RESTORE,
27758 gen_rtx_REG (SImode, CR0_REGNO + i));
27759
27760 RTX_FRAME_RELATED_P (insn) = 1;
27761 }
27762 }
27763 else
27764 for (i = 0; i < 8; i++)
27765 if (save_reg_p (CR0_REGNO + i))
27766 {
27767 rtx insn = emit_insn (gen_movsi_to_cr_one
27768 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27769
27770 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27771 CR field separately, attached to the insn that in fact
27772 restores this particular CR field. */
27773 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27774 {
27775 add_reg_note (insn, REG_CFA_RESTORE,
27776 gen_rtx_REG (SImode, CR0_REGNO + i));
27777
27778 RTX_FRAME_RELATED_P (insn) = 1;
27779 }
27780 }
27781
27782 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27783 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27784 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27785 {
27786 rtx_insn *insn = get_last_insn ();
27787 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27788
27789 add_reg_note (insn, REG_CFA_RESTORE, cr);
27790 RTX_FRAME_RELATED_P (insn) = 1;
27791 }
27792 }
27793
27794 /* Like cr, the move to lr instruction can be scheduled after the
27795 stack deallocation, but unlike cr, its stack frame save is still
27796 valid. So we only need to emit the cfa_restore on the correct
27797 instruction. */
27798
27799 static void
27800 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27801 {
27802 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27803 rtx reg = gen_rtx_REG (Pmode, regno);
27804
27805 emit_move_insn (reg, mem);
27806 }
27807
27808 static void
27809 restore_saved_lr (int regno, bool exit_func)
27810 {
27811 rtx reg = gen_rtx_REG (Pmode, regno);
27812 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27813 rtx_insn *insn = emit_move_insn (lr, reg);
27814
27815 if (!exit_func && flag_shrink_wrap)
27816 {
27817 add_reg_note (insn, REG_CFA_RESTORE, lr);
27818 RTX_FRAME_RELATED_P (insn) = 1;
27819 }
27820 }
27821
27822 static rtx
27823 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27824 {
27825 if (DEFAULT_ABI == ABI_ELFv2)
27826 {
27827 int i;
27828 for (i = 0; i < 8; i++)
27829 if (save_reg_p (CR0_REGNO + i))
27830 {
27831 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27832 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27833 cfa_restores);
27834 }
27835 }
27836 else if (info->cr_save_p)
27837 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27838 gen_rtx_REG (SImode, CR2_REGNO),
27839 cfa_restores);
27840
27841 if (info->lr_save_p)
27842 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27843 gen_rtx_REG (Pmode, LR_REGNO),
27844 cfa_restores);
27845 return cfa_restores;
27846 }
27847
27848 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27849 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27850 below stack pointer not cloberred by signals. */
27851
27852 static inline bool
27853 offset_below_red_zone_p (HOST_WIDE_INT offset)
27854 {
27855 return offset < (DEFAULT_ABI == ABI_V4
27856 ? 0
27857 : TARGET_32BIT ? -220 : -288);
27858 }
27859
27860 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27861
27862 static void
27863 emit_cfa_restores (rtx cfa_restores)
27864 {
27865 rtx_insn *insn = get_last_insn ();
27866 rtx *loc = &REG_NOTES (insn);
27867
27868 while (*loc)
27869 loc = &XEXP (*loc, 1);
27870 *loc = cfa_restores;
27871 RTX_FRAME_RELATED_P (insn) = 1;
27872 }
27873
27874 /* Emit function epilogue as insns. */
27875
27876 void
27877 rs6000_emit_epilogue (int sibcall)
27878 {
27879 rs6000_stack_t *info;
27880 int restoring_GPRs_inline;
27881 int restoring_FPRs_inline;
27882 int using_load_multiple;
27883 int using_mtcr_multiple;
27884 int use_backchain_to_restore_sp;
27885 int restore_lr;
27886 int strategy;
27887 HOST_WIDE_INT frame_off = 0;
27888 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27889 rtx frame_reg_rtx = sp_reg_rtx;
27890 rtx cfa_restores = NULL_RTX;
27891 rtx insn;
27892 rtx cr_save_reg = NULL_RTX;
27893 machine_mode reg_mode = Pmode;
27894 int reg_size = TARGET_32BIT ? 4 : 8;
27895 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
27896 int fp_reg_size = 8;
27897 int i;
27898 bool exit_func;
27899 unsigned ptr_regno;
27900
27901 info = rs6000_stack_info ();
27902
27903 strategy = info->savres_strategy;
27904 using_load_multiple = strategy & REST_MULTIPLE;
27905 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
27906 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
27907 using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
27908 || rs6000_tune == PROCESSOR_PPC603
27909 || rs6000_tune == PROCESSOR_PPC750
27910 || optimize_size);
27911 /* Restore via the backchain when we have a large frame, since this
27912 is more efficient than an addis, addi pair. The second condition
27913 here will not trigger at the moment; We don't actually need a
27914 frame pointer for alloca, but the generic parts of the compiler
27915 give us one anyway. */
27916 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
27917 ? info->lr_save_offset
27918 : 0) > 32767
27919 || (cfun->calls_alloca
27920 && !frame_pointer_needed));
27921 restore_lr = (info->lr_save_p
27922 && (restoring_FPRs_inline
27923 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27924 && (restoring_GPRs_inline
27925 || info->first_fp_reg_save < 64)
27926 && !cfun->machine->lr_is_wrapped_separately);
27927
27928
27929 if (WORLD_SAVE_P (info))
27930 {
27931 int i, j;
27932 char rname[30];
27933 const char *alloc_rname;
27934 rtvec p;
27935
27936 /* eh_rest_world_r10 will return to the location saved in the LR
27937 stack slot (which is not likely to be our caller.)
27938 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27939 rest_world is similar, except any R10 parameter is ignored.
27940 The exception-handling stuff that was here in 2.95 is no
27941 longer necessary. */
27942
27943 p = rtvec_alloc (9
27944 + 32 - info->first_gp_reg_save
27945 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27946 + 63 + 1 - info->first_fp_reg_save);
27947
27948 strcpy (rname, ((crtl->calls_eh_return) ?
27949 "*eh_rest_world_r10" : "*rest_world"));
27950 alloc_rname = ggc_strdup (rname);
27951
27952 j = 0;
27953 RTVEC_ELT (p, j++) = ret_rtx;
27954 RTVEC_ELT (p, j++)
27955 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
27956 /* The instruction pattern requires a clobber here;
27957 it is shared with the restVEC helper. */
27958 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 11);
27959
27960 {
27961 /* CR register traditionally saved as CR2. */
27962 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27963 RTVEC_ELT (p, j++)
27964 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27965 if (flag_shrink_wrap)
27966 {
27967 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27968 gen_rtx_REG (Pmode, LR_REGNO),
27969 cfa_restores);
27970 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27971 }
27972 }
27973
27974 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27975 {
27976 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27977 RTVEC_ELT (p, j++)
27978 = gen_frame_load (reg,
27979 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27980 if (flag_shrink_wrap
27981 && save_reg_p (info->first_gp_reg_save + i))
27982 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27983 }
27984 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27985 {
27986 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27987 RTVEC_ELT (p, j++)
27988 = gen_frame_load (reg,
27989 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27990 if (flag_shrink_wrap
27991 && save_reg_p (info->first_altivec_reg_save + i))
27992 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27993 }
27994 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27995 {
27996 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
27997 info->first_fp_reg_save + i);
27998 RTVEC_ELT (p, j++)
27999 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
28000 if (flag_shrink_wrap
28001 && save_reg_p (info->first_fp_reg_save + i))
28002 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28003 }
28004 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 0);
28005 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 12);
28006 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 7);
28007 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 8);
28008 RTVEC_ELT (p, j++)
28009 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
28010 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28011
28012 if (flag_shrink_wrap)
28013 {
28014 REG_NOTES (insn) = cfa_restores;
28015 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28016 RTX_FRAME_RELATED_P (insn) = 1;
28017 }
28018 return;
28019 }
28020
28021 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
28022 if (info->push_p)
28023 frame_off = info->total_size;
28024
28025 /* Restore AltiVec registers if we must do so before adjusting the
28026 stack. */
28027 if (info->altivec_size != 0
28028 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28029 || (DEFAULT_ABI != ABI_V4
28030 && offset_below_red_zone_p (info->altivec_save_offset))))
28031 {
28032 int i;
28033 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28034
28035 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28036 if (use_backchain_to_restore_sp)
28037 {
28038 int frame_regno = 11;
28039
28040 if ((strategy & REST_INLINE_VRS) == 0)
28041 {
28042 /* Of r11 and r12, select the one not clobbered by an
28043 out-of-line restore function for the frame register. */
28044 frame_regno = 11 + 12 - scratch_regno;
28045 }
28046 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28047 emit_move_insn (frame_reg_rtx,
28048 gen_rtx_MEM (Pmode, sp_reg_rtx));
28049 frame_off = 0;
28050 }
28051 else if (frame_pointer_needed)
28052 frame_reg_rtx = hard_frame_pointer_rtx;
28053
28054 if ((strategy & REST_INLINE_VRS) == 0)
28055 {
28056 int end_save = info->altivec_save_offset + info->altivec_size;
28057 int ptr_off;
28058 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28059 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28060
28061 if (end_save + frame_off != 0)
28062 {
28063 rtx offset = GEN_INT (end_save + frame_off);
28064
28065 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28066 }
28067 else
28068 emit_move_insn (ptr_reg, frame_reg_rtx);
28069
28070 ptr_off = -end_save;
28071 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28072 info->altivec_save_offset + ptr_off,
28073 0, V4SImode, SAVRES_VR);
28074 }
28075 else
28076 {
28077 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28078 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28079 {
28080 rtx addr, areg, mem, insn;
28081 rtx reg = gen_rtx_REG (V4SImode, i);
28082 HOST_WIDE_INT offset
28083 = (info->altivec_save_offset + frame_off
28084 + 16 * (i - info->first_altivec_reg_save));
28085
28086 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28087 {
28088 mem = gen_frame_mem (V4SImode,
28089 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28090 GEN_INT (offset)));
28091 insn = gen_rtx_SET (reg, mem);
28092 }
28093 else
28094 {
28095 areg = gen_rtx_REG (Pmode, 0);
28096 emit_move_insn (areg, GEN_INT (offset));
28097
28098 /* AltiVec addressing mode is [reg+reg]. */
28099 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28100 mem = gen_frame_mem (V4SImode, addr);
28101
28102 /* Rather than emitting a generic move, force use of the
28103 lvx instruction, which we always want. In particular we
28104 don't want lxvd2x/xxpermdi for little endian. */
28105 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28106 }
28107
28108 (void) emit_insn (insn);
28109 }
28110 }
28111
28112 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28113 if (((strategy & REST_INLINE_VRS) == 0
28114 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28115 && (flag_shrink_wrap
28116 || (offset_below_red_zone_p
28117 (info->altivec_save_offset
28118 + 16 * (i - info->first_altivec_reg_save))))
28119 && save_reg_p (i))
28120 {
28121 rtx reg = gen_rtx_REG (V4SImode, i);
28122 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28123 }
28124 }
28125
28126 /* Restore VRSAVE if we must do so before adjusting the stack. */
28127 if (info->vrsave_size != 0
28128 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28129 || (DEFAULT_ABI != ABI_V4
28130 && offset_below_red_zone_p (info->vrsave_save_offset))))
28131 {
28132 rtx reg;
28133
28134 if (frame_reg_rtx == sp_reg_rtx)
28135 {
28136 if (use_backchain_to_restore_sp)
28137 {
28138 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28139 emit_move_insn (frame_reg_rtx,
28140 gen_rtx_MEM (Pmode, sp_reg_rtx));
28141 frame_off = 0;
28142 }
28143 else if (frame_pointer_needed)
28144 frame_reg_rtx = hard_frame_pointer_rtx;
28145 }
28146
28147 reg = gen_rtx_REG (SImode, 12);
28148 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28149 info->vrsave_save_offset + frame_off));
28150
28151 emit_insn (generate_set_vrsave (reg, info, 1));
28152 }
28153
28154 insn = NULL_RTX;
28155 /* If we have a large stack frame, restore the old stack pointer
28156 using the backchain. */
28157 if (use_backchain_to_restore_sp)
28158 {
28159 if (frame_reg_rtx == sp_reg_rtx)
28160 {
28161 /* Under V.4, don't reset the stack pointer until after we're done
28162 loading the saved registers. */
28163 if (DEFAULT_ABI == ABI_V4)
28164 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28165
28166 insn = emit_move_insn (frame_reg_rtx,
28167 gen_rtx_MEM (Pmode, sp_reg_rtx));
28168 frame_off = 0;
28169 }
28170 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28171 && DEFAULT_ABI == ABI_V4)
28172 /* frame_reg_rtx has been set up by the altivec restore. */
28173 ;
28174 else
28175 {
28176 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28177 frame_reg_rtx = sp_reg_rtx;
28178 }
28179 }
28180 /* If we have a frame pointer, we can restore the old stack pointer
28181 from it. */
28182 else if (frame_pointer_needed)
28183 {
28184 frame_reg_rtx = sp_reg_rtx;
28185 if (DEFAULT_ABI == ABI_V4)
28186 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28187 /* Prevent reordering memory accesses against stack pointer restore. */
28188 else if (cfun->calls_alloca
28189 || offset_below_red_zone_p (-info->total_size))
28190 rs6000_emit_stack_tie (frame_reg_rtx, true);
28191
28192 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28193 GEN_INT (info->total_size)));
28194 frame_off = 0;
28195 }
28196 else if (info->push_p
28197 && DEFAULT_ABI != ABI_V4
28198 && !crtl->calls_eh_return)
28199 {
28200 /* Prevent reordering memory accesses against stack pointer restore. */
28201 if (cfun->calls_alloca
28202 || offset_below_red_zone_p (-info->total_size))
28203 rs6000_emit_stack_tie (frame_reg_rtx, false);
28204 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28205 GEN_INT (info->total_size)));
28206 frame_off = 0;
28207 }
28208 if (insn && frame_reg_rtx == sp_reg_rtx)
28209 {
28210 if (cfa_restores)
28211 {
28212 REG_NOTES (insn) = cfa_restores;
28213 cfa_restores = NULL_RTX;
28214 }
28215 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28216 RTX_FRAME_RELATED_P (insn) = 1;
28217 }
28218
28219 /* Restore AltiVec registers if we have not done so already. */
28220 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28221 && info->altivec_size != 0
28222 && (DEFAULT_ABI == ABI_V4
28223 || !offset_below_red_zone_p (info->altivec_save_offset)))
28224 {
28225 int i;
28226
28227 if ((strategy & REST_INLINE_VRS) == 0)
28228 {
28229 int end_save = info->altivec_save_offset + info->altivec_size;
28230 int ptr_off;
28231 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28232 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28233 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28234
28235 if (end_save + frame_off != 0)
28236 {
28237 rtx offset = GEN_INT (end_save + frame_off);
28238
28239 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28240 }
28241 else
28242 emit_move_insn (ptr_reg, frame_reg_rtx);
28243
28244 ptr_off = -end_save;
28245 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28246 info->altivec_save_offset + ptr_off,
28247 0, V4SImode, SAVRES_VR);
28248 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28249 {
28250 /* Frame reg was clobbered by out-of-line save. Restore it
28251 from ptr_reg, and if we are calling out-of-line gpr or
28252 fpr restore set up the correct pointer and offset. */
28253 unsigned newptr_regno = 1;
28254 if (!restoring_GPRs_inline)
28255 {
28256 bool lr = info->gp_save_offset + info->gp_size == 0;
28257 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28258 newptr_regno = ptr_regno_for_savres (sel);
28259 end_save = info->gp_save_offset + info->gp_size;
28260 }
28261 else if (!restoring_FPRs_inline)
28262 {
28263 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28264 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28265 newptr_regno = ptr_regno_for_savres (sel);
28266 end_save = info->fp_save_offset + info->fp_size;
28267 }
28268
28269 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28270 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28271
28272 if (end_save + ptr_off != 0)
28273 {
28274 rtx offset = GEN_INT (end_save + ptr_off);
28275
28276 frame_off = -end_save;
28277 if (TARGET_32BIT)
28278 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28279 ptr_reg, offset));
28280 else
28281 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28282 ptr_reg, offset));
28283 }
28284 else
28285 {
28286 frame_off = ptr_off;
28287 emit_move_insn (frame_reg_rtx, ptr_reg);
28288 }
28289 }
28290 }
28291 else
28292 {
28293 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28294 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28295 {
28296 rtx addr, areg, mem, insn;
28297 rtx reg = gen_rtx_REG (V4SImode, i);
28298 HOST_WIDE_INT offset
28299 = (info->altivec_save_offset + frame_off
28300 + 16 * (i - info->first_altivec_reg_save));
28301
28302 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28303 {
28304 mem = gen_frame_mem (V4SImode,
28305 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28306 GEN_INT (offset)));
28307 insn = gen_rtx_SET (reg, mem);
28308 }
28309 else
28310 {
28311 areg = gen_rtx_REG (Pmode, 0);
28312 emit_move_insn (areg, GEN_INT (offset));
28313
28314 /* AltiVec addressing mode is [reg+reg]. */
28315 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28316 mem = gen_frame_mem (V4SImode, addr);
28317
28318 /* Rather than emitting a generic move, force use of the
28319 lvx instruction, which we always want. In particular we
28320 don't want lxvd2x/xxpermdi for little endian. */
28321 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28322 }
28323
28324 (void) emit_insn (insn);
28325 }
28326 }
28327
28328 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28329 if (((strategy & REST_INLINE_VRS) == 0
28330 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28331 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28332 && save_reg_p (i))
28333 {
28334 rtx reg = gen_rtx_REG (V4SImode, i);
28335 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28336 }
28337 }
28338
28339 /* Restore VRSAVE if we have not done so already. */
28340 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28341 && info->vrsave_size != 0
28342 && (DEFAULT_ABI == ABI_V4
28343 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28344 {
28345 rtx reg;
28346
28347 reg = gen_rtx_REG (SImode, 12);
28348 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28349 info->vrsave_save_offset + frame_off));
28350
28351 emit_insn (generate_set_vrsave (reg, info, 1));
28352 }
28353
28354 /* If we exit by an out-of-line restore function on ABI_V4 then that
28355 function will deallocate the stack, so we don't need to worry
28356 about the unwinder restoring cr from an invalid stack frame
28357 location. */
28358 exit_func = (!restoring_FPRs_inline
28359 || (!restoring_GPRs_inline
28360 && info->first_fp_reg_save == 64));
28361
28362 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28363 *separate* slots if the routine calls __builtin_eh_return, so
28364 that they can be independently restored by the unwinder. */
28365 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28366 {
28367 int i, cr_off = info->ehcr_offset;
28368
28369 for (i = 0; i < 8; i++)
28370 if (!call_used_regs[CR0_REGNO + i])
28371 {
28372 rtx reg = gen_rtx_REG (SImode, 0);
28373 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28374 cr_off + frame_off));
28375
28376 insn = emit_insn (gen_movsi_to_cr_one
28377 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28378
28379 if (!exit_func && flag_shrink_wrap)
28380 {
28381 add_reg_note (insn, REG_CFA_RESTORE,
28382 gen_rtx_REG (SImode, CR0_REGNO + i));
28383
28384 RTX_FRAME_RELATED_P (insn) = 1;
28385 }
28386
28387 cr_off += reg_size;
28388 }
28389 }
28390
28391 /* Get the old lr if we saved it. If we are restoring registers
28392 out-of-line, then the out-of-line routines can do this for us. */
28393 if (restore_lr && restoring_GPRs_inline)
28394 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28395
28396 /* Get the old cr if we saved it. */
28397 if (info->cr_save_p)
28398 {
28399 unsigned cr_save_regno = 12;
28400
28401 if (!restoring_GPRs_inline)
28402 {
28403 /* Ensure we don't use the register used by the out-of-line
28404 gpr register restore below. */
28405 bool lr = info->gp_save_offset + info->gp_size == 0;
28406 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28407 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28408
28409 if (gpr_ptr_regno == 12)
28410 cr_save_regno = 11;
28411 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28412 }
28413 else if (REGNO (frame_reg_rtx) == 12)
28414 cr_save_regno = 11;
28415
28416 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28417 info->cr_save_offset + frame_off,
28418 exit_func);
28419 }
28420
28421 /* Set LR here to try to overlap restores below. */
28422 if (restore_lr && restoring_GPRs_inline)
28423 restore_saved_lr (0, exit_func);
28424
28425 /* Load exception handler data registers, if needed. */
28426 if (crtl->calls_eh_return)
28427 {
28428 unsigned int i, regno;
28429
28430 if (TARGET_AIX)
28431 {
28432 rtx reg = gen_rtx_REG (reg_mode, 2);
28433 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28434 frame_off + RS6000_TOC_SAVE_SLOT));
28435 }
28436
28437 for (i = 0; ; ++i)
28438 {
28439 rtx mem;
28440
28441 regno = EH_RETURN_DATA_REGNO (i);
28442 if (regno == INVALID_REGNUM)
28443 break;
28444
28445 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28446 info->ehrd_offset + frame_off
28447 + reg_size * (int) i);
28448
28449 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28450 }
28451 }
28452
28453 /* Restore GPRs. This is done as a PARALLEL if we are using
28454 the load-multiple instructions. */
28455 if (!restoring_GPRs_inline)
28456 {
28457 /* We are jumping to an out-of-line function. */
28458 rtx ptr_reg;
28459 int end_save = info->gp_save_offset + info->gp_size;
28460 bool can_use_exit = end_save == 0;
28461 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28462 int ptr_off;
28463
28464 /* Emit stack reset code if we need it. */
28465 ptr_regno = ptr_regno_for_savres (sel);
28466 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28467 if (can_use_exit)
28468 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28469 else if (end_save + frame_off != 0)
28470 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28471 GEN_INT (end_save + frame_off)));
28472 else if (REGNO (frame_reg_rtx) != ptr_regno)
28473 emit_move_insn (ptr_reg, frame_reg_rtx);
28474 if (REGNO (frame_reg_rtx) == ptr_regno)
28475 frame_off = -end_save;
28476
28477 if (can_use_exit && info->cr_save_p)
28478 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28479
28480 ptr_off = -end_save;
28481 rs6000_emit_savres_rtx (info, ptr_reg,
28482 info->gp_save_offset + ptr_off,
28483 info->lr_save_offset + ptr_off,
28484 reg_mode, sel);
28485 }
28486 else if (using_load_multiple)
28487 {
28488 rtvec p;
28489 p = rtvec_alloc (32 - info->first_gp_reg_save);
28490 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28491 RTVEC_ELT (p, i)
28492 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28493 frame_reg_rtx,
28494 info->gp_save_offset + frame_off + reg_size * i);
28495 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28496 }
28497 else
28498 {
28499 int offset = info->gp_save_offset + frame_off;
28500 for (i = info->first_gp_reg_save; i < 32; i++)
28501 {
28502 if (save_reg_p (i)
28503 && !cfun->machine->gpr_is_wrapped_separately[i])
28504 {
28505 rtx reg = gen_rtx_REG (reg_mode, i);
28506 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28507 }
28508
28509 offset += reg_size;
28510 }
28511 }
28512
28513 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28514 {
28515 /* If the frame pointer was used then we can't delay emitting
28516 a REG_CFA_DEF_CFA note. This must happen on the insn that
28517 restores the frame pointer, r31. We may have already emitted
28518 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28519 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28520 be harmless if emitted. */
28521 if (frame_pointer_needed)
28522 {
28523 insn = get_last_insn ();
28524 add_reg_note (insn, REG_CFA_DEF_CFA,
28525 plus_constant (Pmode, frame_reg_rtx, frame_off));
28526 RTX_FRAME_RELATED_P (insn) = 1;
28527 }
28528
28529 /* Set up cfa_restores. We always need these when
28530 shrink-wrapping. If not shrink-wrapping then we only need
28531 the cfa_restore when the stack location is no longer valid.
28532 The cfa_restores must be emitted on or before the insn that
28533 invalidates the stack, and of course must not be emitted
28534 before the insn that actually does the restore. The latter
28535 is why it is a bad idea to emit the cfa_restores as a group
28536 on the last instruction here that actually does a restore:
28537 That insn may be reordered with respect to others doing
28538 restores. */
28539 if (flag_shrink_wrap
28540 && !restoring_GPRs_inline
28541 && info->first_fp_reg_save == 64)
28542 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28543
28544 for (i = info->first_gp_reg_save; i < 32; i++)
28545 if (save_reg_p (i)
28546 && !cfun->machine->gpr_is_wrapped_separately[i])
28547 {
28548 rtx reg = gen_rtx_REG (reg_mode, i);
28549 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28550 }
28551 }
28552
28553 if (!restoring_GPRs_inline
28554 && info->first_fp_reg_save == 64)
28555 {
28556 /* We are jumping to an out-of-line function. */
28557 if (cfa_restores)
28558 emit_cfa_restores (cfa_restores);
28559 return;
28560 }
28561
28562 if (restore_lr && !restoring_GPRs_inline)
28563 {
28564 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28565 restore_saved_lr (0, exit_func);
28566 }
28567
28568 /* Restore fpr's if we need to do it without calling a function. */
28569 if (restoring_FPRs_inline)
28570 {
28571 int offset = info->fp_save_offset + frame_off;
28572 for (i = info->first_fp_reg_save; i < 64; i++)
28573 {
28574 if (save_reg_p (i)
28575 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28576 {
28577 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28578 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28579 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28580 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28581 cfa_restores);
28582 }
28583
28584 offset += fp_reg_size;
28585 }
28586 }
28587
28588 /* If we saved cr, restore it here. Just those that were used. */
28589 if (info->cr_save_p)
28590 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28591
28592 /* If this is V.4, unwind the stack pointer after all of the loads
28593 have been done, or set up r11 if we are restoring fp out of line. */
28594 ptr_regno = 1;
28595 if (!restoring_FPRs_inline)
28596 {
28597 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28598 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28599 ptr_regno = ptr_regno_for_savres (sel);
28600 }
28601
28602 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28603 if (REGNO (frame_reg_rtx) == ptr_regno)
28604 frame_off = 0;
28605
28606 if (insn && restoring_FPRs_inline)
28607 {
28608 if (cfa_restores)
28609 {
28610 REG_NOTES (insn) = cfa_restores;
28611 cfa_restores = NULL_RTX;
28612 }
28613 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28614 RTX_FRAME_RELATED_P (insn) = 1;
28615 }
28616
28617 if (crtl->calls_eh_return)
28618 {
28619 rtx sa = EH_RETURN_STACKADJ_RTX;
28620 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28621 }
28622
28623 if (!sibcall && restoring_FPRs_inline)
28624 {
28625 if (cfa_restores)
28626 {
28627 /* We can't hang the cfa_restores off a simple return,
28628 since the shrink-wrap code sometimes uses an existing
28629 return. This means there might be a path from
28630 pre-prologue code to this return, and dwarf2cfi code
28631 wants the eh_frame unwinder state to be the same on
28632 all paths to any point. So we need to emit the
28633 cfa_restores before the return. For -m64 we really
28634 don't need epilogue cfa_restores at all, except for
28635 this irritating dwarf2cfi with shrink-wrap
28636 requirement; The stack red-zone means eh_frame info
28637 from the prologue telling the unwinder to restore
28638 from the stack is perfectly good right to the end of
28639 the function. */
28640 emit_insn (gen_blockage ());
28641 emit_cfa_restores (cfa_restores);
28642 cfa_restores = NULL_RTX;
28643 }
28644
28645 emit_jump_insn (targetm.gen_simple_return ());
28646 }
28647
28648 if (!sibcall && !restoring_FPRs_inline)
28649 {
28650 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28651 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28652 int elt = 0;
28653 RTVEC_ELT (p, elt++) = ret_rtx;
28654 if (lr)
28655 RTVEC_ELT (p, elt++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
28656
28657 /* We have to restore more than two FP registers, so branch to the
28658 restore function. It will return to our caller. */
28659 int i;
28660 int reg;
28661 rtx sym;
28662
28663 if (flag_shrink_wrap)
28664 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28665
28666 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28667 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28668 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28669 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28670
28671 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28672 {
28673 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28674
28675 RTVEC_ELT (p, elt++)
28676 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28677 if (flag_shrink_wrap
28678 && save_reg_p (info->first_fp_reg_save + i))
28679 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28680 }
28681
28682 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28683 }
28684
28685 if (cfa_restores)
28686 {
28687 if (sibcall)
28688 /* Ensure the cfa_restores are hung off an insn that won't
28689 be reordered above other restores. */
28690 emit_insn (gen_blockage ());
28691
28692 emit_cfa_restores (cfa_restores);
28693 }
28694 }
28695
28696 /* Write function epilogue. */
28697
28698 static void
28699 rs6000_output_function_epilogue (FILE *file)
28700 {
28701 #if TARGET_MACHO
28702 macho_branch_islands ();
28703
28704 {
28705 rtx_insn *insn = get_last_insn ();
28706 rtx_insn *deleted_debug_label = NULL;
28707
28708 /* Mach-O doesn't support labels at the end of objects, so if
28709 it looks like we might want one, take special action.
28710
28711 First, collect any sequence of deleted debug labels. */
28712 while (insn
28713 && NOTE_P (insn)
28714 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28715 {
28716 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28717 notes only, instead set their CODE_LABEL_NUMBER to -1,
28718 otherwise there would be code generation differences
28719 in between -g and -g0. */
28720 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28721 deleted_debug_label = insn;
28722 insn = PREV_INSN (insn);
28723 }
28724
28725 /* Second, if we have:
28726 label:
28727 barrier
28728 then this needs to be detected, so skip past the barrier. */
28729
28730 if (insn && BARRIER_P (insn))
28731 insn = PREV_INSN (insn);
28732
28733 /* Up to now we've only seen notes or barriers. */
28734 if (insn)
28735 {
28736 if (LABEL_P (insn)
28737 || (NOTE_P (insn)
28738 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28739 /* Trailing label: <barrier>. */
28740 fputs ("\tnop\n", file);
28741 else
28742 {
28743 /* Lastly, see if we have a completely empty function body. */
28744 while (insn && ! INSN_P (insn))
28745 insn = PREV_INSN (insn);
28746 /* If we don't find any insns, we've got an empty function body;
28747 I.e. completely empty - without a return or branch. This is
28748 taken as the case where a function body has been removed
28749 because it contains an inline __builtin_unreachable(). GCC
28750 states that reaching __builtin_unreachable() means UB so we're
28751 not obliged to do anything special; however, we want
28752 non-zero-sized function bodies. To meet this, and help the
28753 user out, let's trap the case. */
28754 if (insn == NULL)
28755 fputs ("\ttrap\n", file);
28756 }
28757 }
28758 else if (deleted_debug_label)
28759 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28760 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28761 CODE_LABEL_NUMBER (insn) = -1;
28762 }
28763 #endif
28764
28765 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28766 on its format.
28767
28768 We don't output a traceback table if -finhibit-size-directive was
28769 used. The documentation for -finhibit-size-directive reads
28770 ``don't output a @code{.size} assembler directive, or anything
28771 else that would cause trouble if the function is split in the
28772 middle, and the two halves are placed at locations far apart in
28773 memory.'' The traceback table has this property, since it
28774 includes the offset from the start of the function to the
28775 traceback table itself.
28776
28777 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28778 different traceback table. */
28779 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28780 && ! flag_inhibit_size_directive
28781 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28782 {
28783 const char *fname = NULL;
28784 const char *language_string = lang_hooks.name;
28785 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28786 int i;
28787 int optional_tbtab;
28788 rs6000_stack_t *info = rs6000_stack_info ();
28789
28790 if (rs6000_traceback == traceback_full)
28791 optional_tbtab = 1;
28792 else if (rs6000_traceback == traceback_part)
28793 optional_tbtab = 0;
28794 else
28795 optional_tbtab = !optimize_size && !TARGET_ELF;
28796
28797 if (optional_tbtab)
28798 {
28799 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28800 while (*fname == '.') /* V.4 encodes . in the name */
28801 fname++;
28802
28803 /* Need label immediately before tbtab, so we can compute
28804 its offset from the function start. */
28805 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28806 ASM_OUTPUT_LABEL (file, fname);
28807 }
28808
28809 /* The .tbtab pseudo-op can only be used for the first eight
28810 expressions, since it can't handle the possibly variable
28811 length fields that follow. However, if you omit the optional
28812 fields, the assembler outputs zeros for all optional fields
28813 anyways, giving each variable length field is minimum length
28814 (as defined in sys/debug.h). Thus we cannot use the .tbtab
28815 pseudo-op at all. */
28816
28817 /* An all-zero word flags the start of the tbtab, for debuggers
28818 that have to find it by searching forward from the entry
28819 point or from the current pc. */
28820 fputs ("\t.long 0\n", file);
28821
28822 /* Tbtab format type. Use format type 0. */
28823 fputs ("\t.byte 0,", file);
28824
28825 /* Language type. Unfortunately, there does not seem to be any
28826 official way to discover the language being compiled, so we
28827 use language_string.
28828 C is 0. Fortran is 1. Ada is 3. C++ is 9.
28829 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28830 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
28831 numbers either, so for now use 0. */
28832 if (lang_GNU_C ()
28833 || ! strcmp (language_string, "GNU GIMPLE")
28834 || ! strcmp (language_string, "GNU Go")
28835 || ! strcmp (language_string, "GNU D")
28836 || ! strcmp (language_string, "libgccjit"))
28837 i = 0;
28838 else if (! strcmp (language_string, "GNU F77")
28839 || lang_GNU_Fortran ())
28840 i = 1;
28841 else if (! strcmp (language_string, "GNU Ada"))
28842 i = 3;
28843 else if (lang_GNU_CXX ()
28844 || ! strcmp (language_string, "GNU Objective-C++"))
28845 i = 9;
28846 else if (! strcmp (language_string, "GNU Java"))
28847 i = 13;
28848 else if (! strcmp (language_string, "GNU Objective-C"))
28849 i = 14;
28850 else
28851 gcc_unreachable ();
28852 fprintf (file, "%d,", i);
28853
28854 /* 8 single bit fields: global linkage (not set for C extern linkage,
28855 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28856 from start of procedure stored in tbtab, internal function, function
28857 has controlled storage, function has no toc, function uses fp,
28858 function logs/aborts fp operations. */
28859 /* Assume that fp operations are used if any fp reg must be saved. */
28860 fprintf (file, "%d,",
28861 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28862
28863 /* 6 bitfields: function is interrupt handler, name present in
28864 proc table, function calls alloca, on condition directives
28865 (controls stack walks, 3 bits), saves condition reg, saves
28866 link reg. */
28867 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28868 set up as a frame pointer, even when there is no alloca call. */
28869 fprintf (file, "%d,",
28870 ((optional_tbtab << 6)
28871 | ((optional_tbtab & frame_pointer_needed) << 5)
28872 | (info->cr_save_p << 1)
28873 | (info->lr_save_p)));
28874
28875 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28876 (6 bits). */
28877 fprintf (file, "%d,",
28878 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28879
28880 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28881 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28882
28883 if (optional_tbtab)
28884 {
28885 /* Compute the parameter info from the function decl argument
28886 list. */
28887 tree decl;
28888 int next_parm_info_bit = 31;
28889
28890 for (decl = DECL_ARGUMENTS (current_function_decl);
28891 decl; decl = DECL_CHAIN (decl))
28892 {
28893 rtx parameter = DECL_INCOMING_RTL (decl);
28894 machine_mode mode = GET_MODE (parameter);
28895
28896 if (REG_P (parameter))
28897 {
28898 if (SCALAR_FLOAT_MODE_P (mode))
28899 {
28900 int bits;
28901
28902 float_parms++;
28903
28904 switch (mode)
28905 {
28906 case E_SFmode:
28907 case E_SDmode:
28908 bits = 0x2;
28909 break;
28910
28911 case E_DFmode:
28912 case E_DDmode:
28913 case E_TFmode:
28914 case E_TDmode:
28915 case E_IFmode:
28916 case E_KFmode:
28917 bits = 0x3;
28918 break;
28919
28920 default:
28921 gcc_unreachable ();
28922 }
28923
28924 /* If only one bit will fit, don't or in this entry. */
28925 if (next_parm_info_bit > 0)
28926 parm_info |= (bits << (next_parm_info_bit - 1));
28927 next_parm_info_bit -= 2;
28928 }
28929 else
28930 {
28931 fixed_parms += ((GET_MODE_SIZE (mode)
28932 + (UNITS_PER_WORD - 1))
28933 / UNITS_PER_WORD);
28934 next_parm_info_bit -= 1;
28935 }
28936 }
28937 }
28938 }
28939
28940 /* Number of fixed point parameters. */
28941 /* This is actually the number of words of fixed point parameters; thus
28942 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28943 fprintf (file, "%d,", fixed_parms);
28944
28945 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28946 all on stack. */
28947 /* This is actually the number of fp registers that hold parameters;
28948 and thus the maximum value is 13. */
28949 /* Set parameters on stack bit if parameters are not in their original
28950 registers, regardless of whether they are on the stack? Xlc
28951 seems to set the bit when not optimizing. */
28952 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28953
28954 if (optional_tbtab)
28955 {
28956 /* Optional fields follow. Some are variable length. */
28957
28958 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28959 float, 11 double float. */
28960 /* There is an entry for each parameter in a register, in the order
28961 that they occur in the parameter list. Any intervening arguments
28962 on the stack are ignored. If the list overflows a long (max
28963 possible length 34 bits) then completely leave off all elements
28964 that don't fit. */
28965 /* Only emit this long if there was at least one parameter. */
28966 if (fixed_parms || float_parms)
28967 fprintf (file, "\t.long %d\n", parm_info);
28968
28969 /* Offset from start of code to tb table. */
28970 fputs ("\t.long ", file);
28971 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28972 RS6000_OUTPUT_BASENAME (file, fname);
28973 putc ('-', file);
28974 rs6000_output_function_entry (file, fname);
28975 putc ('\n', file);
28976
28977 /* Interrupt handler mask. */
28978 /* Omit this long, since we never set the interrupt handler bit
28979 above. */
28980
28981 /* Number of CTL (controlled storage) anchors. */
28982 /* Omit this long, since the has_ctl bit is never set above. */
28983
28984 /* Displacement into stack of each CTL anchor. */
28985 /* Omit this list of longs, because there are no CTL anchors. */
28986
28987 /* Length of function name. */
28988 if (*fname == '*')
28989 ++fname;
28990 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28991
28992 /* Function name. */
28993 assemble_string (fname, strlen (fname));
28994
28995 /* Register for alloca automatic storage; this is always reg 31.
28996 Only emit this if the alloca bit was set above. */
28997 if (frame_pointer_needed)
28998 fputs ("\t.byte 31\n", file);
28999
29000 fputs ("\t.align 2\n", file);
29001 }
29002 }
29003
29004 /* Arrange to define .LCTOC1 label, if not already done. */
29005 if (need_toc_init)
29006 {
29007 need_toc_init = 0;
29008 if (!toc_initialized)
29009 {
29010 switch_to_section (toc_section);
29011 switch_to_section (current_function_section ());
29012 }
29013 }
29014 }
29015
29016 /* -fsplit-stack support. */
29017
29018 /* A SYMBOL_REF for __morestack. */
29019 static GTY(()) rtx morestack_ref;
29020
29021 static rtx
29022 gen_add3_const (rtx rt, rtx ra, long c)
29023 {
29024 if (TARGET_64BIT)
29025 return gen_adddi3 (rt, ra, GEN_INT (c));
29026 else
29027 return gen_addsi3 (rt, ra, GEN_INT (c));
29028 }
29029
29030 /* Emit -fsplit-stack prologue, which goes before the regular function
29031 prologue (at local entry point in the case of ELFv2). */
29032
29033 void
29034 rs6000_expand_split_stack_prologue (void)
29035 {
29036 rs6000_stack_t *info = rs6000_stack_info ();
29037 unsigned HOST_WIDE_INT allocate;
29038 long alloc_hi, alloc_lo;
29039 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29040 rtx_insn *insn;
29041
29042 gcc_assert (flag_split_stack && reload_completed);
29043
29044 if (!info->push_p)
29045 return;
29046
29047 if (global_regs[29])
29048 {
29049 error ("%qs uses register r29", "%<-fsplit-stack%>");
29050 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29051 "conflicts with %qD", global_regs_decl[29]);
29052 }
29053
29054 allocate = info->total_size;
29055 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29056 {
29057 sorry ("Stack frame larger than 2G is not supported for "
29058 "%<-fsplit-stack%>");
29059 return;
29060 }
29061 if (morestack_ref == NULL_RTX)
29062 {
29063 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29064 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29065 | SYMBOL_FLAG_FUNCTION);
29066 }
29067
29068 r0 = gen_rtx_REG (Pmode, 0);
29069 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29070 r12 = gen_rtx_REG (Pmode, 12);
29071 emit_insn (gen_load_split_stack_limit (r0));
29072 /* Always emit two insns here to calculate the requested stack,
29073 so that the linker can edit them when adjusting size for calling
29074 non-split-stack code. */
29075 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29076 alloc_lo = -allocate - alloc_hi;
29077 if (alloc_hi != 0)
29078 {
29079 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29080 if (alloc_lo != 0)
29081 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29082 else
29083 emit_insn (gen_nop ());
29084 }
29085 else
29086 {
29087 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29088 emit_insn (gen_nop ());
29089 }
29090
29091 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29092 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29093 ok_label = gen_label_rtx ();
29094 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29095 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29096 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29097 pc_rtx);
29098 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29099 JUMP_LABEL (insn) = ok_label;
29100 /* Mark the jump as very likely to be taken. */
29101 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29102
29103 lr = gen_rtx_REG (Pmode, LR_REGNO);
29104 insn = emit_move_insn (r0, lr);
29105 RTX_FRAME_RELATED_P (insn) = 1;
29106 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29107 RTX_FRAME_RELATED_P (insn) = 1;
29108
29109 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29110 const0_rtx, const0_rtx));
29111 call_fusage = NULL_RTX;
29112 use_reg (&call_fusage, r12);
29113 /* Say the call uses r0, even though it doesn't, to stop regrename
29114 from twiddling with the insns saving lr, trashing args for cfun.
29115 The insns restoring lr are similarly protected by making
29116 split_stack_return use r0. */
29117 use_reg (&call_fusage, r0);
29118 add_function_usage_to (insn, call_fusage);
29119 /* Indicate that this function can't jump to non-local gotos. */
29120 make_reg_eh_region_note_nothrow_nononlocal (insn);
29121 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29122 insn = emit_move_insn (lr, r0);
29123 add_reg_note (insn, REG_CFA_RESTORE, lr);
29124 RTX_FRAME_RELATED_P (insn) = 1;
29125 emit_insn (gen_split_stack_return ());
29126
29127 emit_label (ok_label);
29128 LABEL_NUSES (ok_label) = 1;
29129 }
29130
29131 /* Return the internal arg pointer used for function incoming
29132 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29133 to copy it to a pseudo in order for it to be preserved over calls
29134 and suchlike. We'd really like to use a pseudo here for the
29135 internal arg pointer but data-flow analysis is not prepared to
29136 accept pseudos as live at the beginning of a function. */
29137
29138 static rtx
29139 rs6000_internal_arg_pointer (void)
29140 {
29141 if (flag_split_stack
29142 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29143 == NULL))
29144
29145 {
29146 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29147 {
29148 rtx pat;
29149
29150 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29151 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29152
29153 /* Put the pseudo initialization right after the note at the
29154 beginning of the function. */
29155 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29156 gen_rtx_REG (Pmode, 12));
29157 push_topmost_sequence ();
29158 emit_insn_after (pat, get_insns ());
29159 pop_topmost_sequence ();
29160 }
29161 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29162 FIRST_PARM_OFFSET (current_function_decl));
29163 return copy_to_reg (ret);
29164 }
29165 return virtual_incoming_args_rtx;
29166 }
29167
29168 /* We may have to tell the dataflow pass that the split stack prologue
29169 is initializing a register. */
29170
29171 static void
29172 rs6000_live_on_entry (bitmap regs)
29173 {
29174 if (flag_split_stack)
29175 bitmap_set_bit (regs, 12);
29176 }
29177
29178 /* Emit -fsplit-stack dynamic stack allocation space check. */
29179
29180 void
29181 rs6000_split_stack_space_check (rtx size, rtx label)
29182 {
29183 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29184 rtx limit = gen_reg_rtx (Pmode);
29185 rtx requested = gen_reg_rtx (Pmode);
29186 rtx cmp = gen_reg_rtx (CCUNSmode);
29187 rtx jump;
29188
29189 emit_insn (gen_load_split_stack_limit (limit));
29190 if (CONST_INT_P (size))
29191 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29192 else
29193 {
29194 size = force_reg (Pmode, size);
29195 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29196 }
29197 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29198 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29199 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29200 gen_rtx_LABEL_REF (VOIDmode, label),
29201 pc_rtx);
29202 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29203 JUMP_LABEL (jump) = label;
29204 }
29205 \f
29206 /* A C compound statement that outputs the assembler code for a thunk
29207 function, used to implement C++ virtual function calls with
29208 multiple inheritance. The thunk acts as a wrapper around a virtual
29209 function, adjusting the implicit object parameter before handing
29210 control off to the real function.
29211
29212 First, emit code to add the integer DELTA to the location that
29213 contains the incoming first argument. Assume that this argument
29214 contains a pointer, and is the one used to pass the `this' pointer
29215 in C++. This is the incoming argument *before* the function
29216 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29217 values of all other incoming arguments.
29218
29219 After the addition, emit code to jump to FUNCTION, which is a
29220 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29221 not touch the return address. Hence returning from FUNCTION will
29222 return to whoever called the current `thunk'.
29223
29224 The effect must be as if FUNCTION had been called directly with the
29225 adjusted first argument. This macro is responsible for emitting
29226 all of the code for a thunk function; output_function_prologue()
29227 and output_function_epilogue() are not invoked.
29228
29229 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29230 been extracted from it.) It might possibly be useful on some
29231 targets, but probably not.
29232
29233 If you do not define this macro, the target-independent code in the
29234 C++ frontend will generate a less efficient heavyweight thunk that
29235 calls FUNCTION instead of jumping to it. The generic approach does
29236 not support varargs. */
29237
29238 static void
29239 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29240 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29241 tree function)
29242 {
29243 rtx this_rtx, funexp;
29244 rtx_insn *insn;
29245
29246 reload_completed = 1;
29247 epilogue_completed = 1;
29248
29249 /* Mark the end of the (empty) prologue. */
29250 emit_note (NOTE_INSN_PROLOGUE_END);
29251
29252 /* Find the "this" pointer. If the function returns a structure,
29253 the structure return pointer is in r3. */
29254 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29255 this_rtx = gen_rtx_REG (Pmode, 4);
29256 else
29257 this_rtx = gen_rtx_REG (Pmode, 3);
29258
29259 /* Apply the constant offset, if required. */
29260 if (delta)
29261 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29262
29263 /* Apply the offset from the vtable, if required. */
29264 if (vcall_offset)
29265 {
29266 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29267 rtx tmp = gen_rtx_REG (Pmode, 12);
29268
29269 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29270 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29271 {
29272 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29273 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29274 }
29275 else
29276 {
29277 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29278
29279 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29280 }
29281 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29282 }
29283
29284 /* Generate a tail call to the target function. */
29285 if (!TREE_USED (function))
29286 {
29287 assemble_external (function);
29288 TREE_USED (function) = 1;
29289 }
29290 funexp = XEXP (DECL_RTL (function), 0);
29291 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29292
29293 #if TARGET_MACHO
29294 if (MACHOPIC_INDIRECT)
29295 funexp = machopic_indirect_call_target (funexp);
29296 #endif
29297
29298 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29299 generate sibcall RTL explicitly. */
29300 insn = emit_call_insn (
29301 gen_rtx_PARALLEL (VOIDmode,
29302 gen_rtvec (3,
29303 gen_rtx_CALL (VOIDmode,
29304 funexp, const0_rtx),
29305 gen_rtx_USE (VOIDmode, const0_rtx),
29306 simple_return_rtx)));
29307 SIBLING_CALL_P (insn) = 1;
29308 emit_barrier ();
29309
29310 /* Run just enough of rest_of_compilation to get the insns emitted.
29311 There's not really enough bulk here to make other passes such as
29312 instruction scheduling worth while. Note that use_thunk calls
29313 assemble_start_function and assemble_end_function. */
29314 insn = get_insns ();
29315 shorten_branches (insn);
29316 final_start_function (insn, file, 1);
29317 final (insn, file, 1);
29318 final_end_function ();
29319
29320 reload_completed = 0;
29321 epilogue_completed = 0;
29322 }
29323 \f
29324 /* A quick summary of the various types of 'constant-pool tables'
29325 under PowerPC:
29326
29327 Target Flags Name One table per
29328 AIX (none) AIX TOC object file
29329 AIX -mfull-toc AIX TOC object file
29330 AIX -mminimal-toc AIX minimal TOC translation unit
29331 SVR4/EABI (none) SVR4 SDATA object file
29332 SVR4/EABI -fpic SVR4 pic object file
29333 SVR4/EABI -fPIC SVR4 PIC translation unit
29334 SVR4/EABI -mrelocatable EABI TOC function
29335 SVR4/EABI -maix AIX TOC object file
29336 SVR4/EABI -maix -mminimal-toc
29337 AIX minimal TOC translation unit
29338
29339 Name Reg. Set by entries contains:
29340 made by addrs? fp? sum?
29341
29342 AIX TOC 2 crt0 as Y option option
29343 AIX minimal TOC 30 prolog gcc Y Y option
29344 SVR4 SDATA 13 crt0 gcc N Y N
29345 SVR4 pic 30 prolog ld Y not yet N
29346 SVR4 PIC 30 prolog gcc Y option option
29347 EABI TOC 30 prolog gcc Y option option
29348
29349 */
29350
29351 /* Hash functions for the hash table. */
29352
29353 static unsigned
29354 rs6000_hash_constant (rtx k)
29355 {
29356 enum rtx_code code = GET_CODE (k);
29357 machine_mode mode = GET_MODE (k);
29358 unsigned result = (code << 3) ^ mode;
29359 const char *format;
29360 int flen, fidx;
29361
29362 format = GET_RTX_FORMAT (code);
29363 flen = strlen (format);
29364 fidx = 0;
29365
29366 switch (code)
29367 {
29368 case LABEL_REF:
29369 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29370
29371 case CONST_WIDE_INT:
29372 {
29373 int i;
29374 flen = CONST_WIDE_INT_NUNITS (k);
29375 for (i = 0; i < flen; i++)
29376 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29377 return result;
29378 }
29379
29380 case CONST_DOUBLE:
29381 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29382
29383 case CODE_LABEL:
29384 fidx = 3;
29385 break;
29386
29387 default:
29388 break;
29389 }
29390
29391 for (; fidx < flen; fidx++)
29392 switch (format[fidx])
29393 {
29394 case 's':
29395 {
29396 unsigned i, len;
29397 const char *str = XSTR (k, fidx);
29398 len = strlen (str);
29399 result = result * 613 + len;
29400 for (i = 0; i < len; i++)
29401 result = result * 613 + (unsigned) str[i];
29402 break;
29403 }
29404 case 'u':
29405 case 'e':
29406 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29407 break;
29408 case 'i':
29409 case 'n':
29410 result = result * 613 + (unsigned) XINT (k, fidx);
29411 break;
29412 case 'w':
29413 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29414 result = result * 613 + (unsigned) XWINT (k, fidx);
29415 else
29416 {
29417 size_t i;
29418 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29419 result = result * 613 + (unsigned) (XWINT (k, fidx)
29420 >> CHAR_BIT * i);
29421 }
29422 break;
29423 case '0':
29424 break;
29425 default:
29426 gcc_unreachable ();
29427 }
29428
29429 return result;
29430 }
29431
29432 hashval_t
29433 toc_hasher::hash (toc_hash_struct *thc)
29434 {
29435 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29436 }
29437
29438 /* Compare H1 and H2 for equivalence. */
29439
29440 bool
29441 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29442 {
29443 rtx r1 = h1->key;
29444 rtx r2 = h2->key;
29445
29446 if (h1->key_mode != h2->key_mode)
29447 return 0;
29448
29449 return rtx_equal_p (r1, r2);
29450 }
29451
29452 /* These are the names given by the C++ front-end to vtables, and
29453 vtable-like objects. Ideally, this logic should not be here;
29454 instead, there should be some programmatic way of inquiring as
29455 to whether or not an object is a vtable. */
29456
29457 #define VTABLE_NAME_P(NAME) \
29458 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29459 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29460 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29461 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29462 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29463
29464 #ifdef NO_DOLLAR_IN_LABEL
29465 /* Return a GGC-allocated character string translating dollar signs in
29466 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29467
29468 const char *
29469 rs6000_xcoff_strip_dollar (const char *name)
29470 {
29471 char *strip, *p;
29472 const char *q;
29473 size_t len;
29474
29475 q = (const char *) strchr (name, '$');
29476
29477 if (q == 0 || q == name)
29478 return name;
29479
29480 len = strlen (name);
29481 strip = XALLOCAVEC (char, len + 1);
29482 strcpy (strip, name);
29483 p = strip + (q - name);
29484 while (p)
29485 {
29486 *p = '_';
29487 p = strchr (p + 1, '$');
29488 }
29489
29490 return ggc_alloc_string (strip, len);
29491 }
29492 #endif
29493
29494 void
29495 rs6000_output_symbol_ref (FILE *file, rtx x)
29496 {
29497 const char *name = XSTR (x, 0);
29498
29499 /* Currently C++ toc references to vtables can be emitted before it
29500 is decided whether the vtable is public or private. If this is
29501 the case, then the linker will eventually complain that there is
29502 a reference to an unknown section. Thus, for vtables only,
29503 we emit the TOC reference to reference the identifier and not the
29504 symbol. */
29505 if (VTABLE_NAME_P (name))
29506 {
29507 RS6000_OUTPUT_BASENAME (file, name);
29508 }
29509 else
29510 assemble_name (file, name);
29511 }
29512
29513 /* Output a TOC entry. We derive the entry name from what is being
29514 written. */
29515
29516 void
29517 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29518 {
29519 char buf[256];
29520 const char *name = buf;
29521 rtx base = x;
29522 HOST_WIDE_INT offset = 0;
29523
29524 gcc_assert (!TARGET_NO_TOC);
29525
29526 /* When the linker won't eliminate them, don't output duplicate
29527 TOC entries (this happens on AIX if there is any kind of TOC,
29528 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29529 CODE_LABELs. */
29530 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29531 {
29532 struct toc_hash_struct *h;
29533
29534 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29535 time because GGC is not initialized at that point. */
29536 if (toc_hash_table == NULL)
29537 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29538
29539 h = ggc_alloc<toc_hash_struct> ();
29540 h->key = x;
29541 h->key_mode = mode;
29542 h->labelno = labelno;
29543
29544 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29545 if (*found == NULL)
29546 *found = h;
29547 else /* This is indeed a duplicate.
29548 Set this label equal to that label. */
29549 {
29550 fputs ("\t.set ", file);
29551 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29552 fprintf (file, "%d,", labelno);
29553 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29554 fprintf (file, "%d\n", ((*found)->labelno));
29555
29556 #ifdef HAVE_AS_TLS
29557 if (TARGET_XCOFF && SYMBOL_REF_P (x)
29558 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29559 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29560 {
29561 fputs ("\t.set ", file);
29562 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29563 fprintf (file, "%d,", labelno);
29564 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29565 fprintf (file, "%d\n", ((*found)->labelno));
29566 }
29567 #endif
29568 return;
29569 }
29570 }
29571
29572 /* If we're going to put a double constant in the TOC, make sure it's
29573 aligned properly when strict alignment is on. */
29574 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29575 && STRICT_ALIGNMENT
29576 && GET_MODE_BITSIZE (mode) >= 64
29577 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29578 ASM_OUTPUT_ALIGN (file, 3);
29579 }
29580
29581 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29582
29583 /* Handle FP constants specially. Note that if we have a minimal
29584 TOC, things we put here aren't actually in the TOC, so we can allow
29585 FP constants. */
29586 if (CONST_DOUBLE_P (x)
29587 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29588 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29589 {
29590 long k[4];
29591
29592 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29593 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29594 else
29595 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29596
29597 if (TARGET_64BIT)
29598 {
29599 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29600 fputs (DOUBLE_INT_ASM_OP, file);
29601 else
29602 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29603 k[0] & 0xffffffff, k[1] & 0xffffffff,
29604 k[2] & 0xffffffff, k[3] & 0xffffffff);
29605 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29606 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29607 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29608 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29609 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29610 return;
29611 }
29612 else
29613 {
29614 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29615 fputs ("\t.long ", file);
29616 else
29617 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29618 k[0] & 0xffffffff, k[1] & 0xffffffff,
29619 k[2] & 0xffffffff, k[3] & 0xffffffff);
29620 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29621 k[0] & 0xffffffff, k[1] & 0xffffffff,
29622 k[2] & 0xffffffff, k[3] & 0xffffffff);
29623 return;
29624 }
29625 }
29626 else if (CONST_DOUBLE_P (x)
29627 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29628 {
29629 long k[2];
29630
29631 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29632 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29633 else
29634 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29635
29636 if (TARGET_64BIT)
29637 {
29638 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29639 fputs (DOUBLE_INT_ASM_OP, file);
29640 else
29641 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29642 k[0] & 0xffffffff, k[1] & 0xffffffff);
29643 fprintf (file, "0x%lx%08lx\n",
29644 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29645 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29646 return;
29647 }
29648 else
29649 {
29650 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29651 fputs ("\t.long ", file);
29652 else
29653 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29654 k[0] & 0xffffffff, k[1] & 0xffffffff);
29655 fprintf (file, "0x%lx,0x%lx\n",
29656 k[0] & 0xffffffff, k[1] & 0xffffffff);
29657 return;
29658 }
29659 }
29660 else if (CONST_DOUBLE_P (x)
29661 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29662 {
29663 long l;
29664
29665 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29666 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29667 else
29668 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29669
29670 if (TARGET_64BIT)
29671 {
29672 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29673 fputs (DOUBLE_INT_ASM_OP, file);
29674 else
29675 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29676 if (WORDS_BIG_ENDIAN)
29677 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29678 else
29679 fprintf (file, "0x%lx\n", l & 0xffffffff);
29680 return;
29681 }
29682 else
29683 {
29684 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29685 fputs ("\t.long ", file);
29686 else
29687 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29688 fprintf (file, "0x%lx\n", l & 0xffffffff);
29689 return;
29690 }
29691 }
29692 else if (GET_MODE (x) == VOIDmode && CONST_INT_P (x))
29693 {
29694 unsigned HOST_WIDE_INT low;
29695 HOST_WIDE_INT high;
29696
29697 low = INTVAL (x) & 0xffffffff;
29698 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29699
29700 /* TOC entries are always Pmode-sized, so when big-endian
29701 smaller integer constants in the TOC need to be padded.
29702 (This is still a win over putting the constants in
29703 a separate constant pool, because then we'd have
29704 to have both a TOC entry _and_ the actual constant.)
29705
29706 For a 32-bit target, CONST_INT values are loaded and shifted
29707 entirely within `low' and can be stored in one TOC entry. */
29708
29709 /* It would be easy to make this work, but it doesn't now. */
29710 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29711
29712 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29713 {
29714 low |= high << 32;
29715 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29716 high = (HOST_WIDE_INT) low >> 32;
29717 low &= 0xffffffff;
29718 }
29719
29720 if (TARGET_64BIT)
29721 {
29722 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29723 fputs (DOUBLE_INT_ASM_OP, file);
29724 else
29725 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29726 (long) high & 0xffffffff, (long) low & 0xffffffff);
29727 fprintf (file, "0x%lx%08lx\n",
29728 (long) high & 0xffffffff, (long) low & 0xffffffff);
29729 return;
29730 }
29731 else
29732 {
29733 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29734 {
29735 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29736 fputs ("\t.long ", file);
29737 else
29738 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29739 (long) high & 0xffffffff, (long) low & 0xffffffff);
29740 fprintf (file, "0x%lx,0x%lx\n",
29741 (long) high & 0xffffffff, (long) low & 0xffffffff);
29742 }
29743 else
29744 {
29745 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29746 fputs ("\t.long ", file);
29747 else
29748 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29749 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29750 }
29751 return;
29752 }
29753 }
29754
29755 if (GET_CODE (x) == CONST)
29756 {
29757 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29758 && CONST_INT_P (XEXP (XEXP (x, 0), 1)));
29759
29760 base = XEXP (XEXP (x, 0), 0);
29761 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29762 }
29763
29764 switch (GET_CODE (base))
29765 {
29766 case SYMBOL_REF:
29767 name = XSTR (base, 0);
29768 break;
29769
29770 case LABEL_REF:
29771 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29772 CODE_LABEL_NUMBER (XEXP (base, 0)));
29773 break;
29774
29775 case CODE_LABEL:
29776 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29777 break;
29778
29779 default:
29780 gcc_unreachable ();
29781 }
29782
29783 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29784 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29785 else
29786 {
29787 fputs ("\t.tc ", file);
29788 RS6000_OUTPUT_BASENAME (file, name);
29789
29790 if (offset < 0)
29791 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29792 else if (offset)
29793 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29794
29795 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29796 after other TOC symbols, reducing overflow of small TOC access
29797 to [TC] symbols. */
29798 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29799 ? "[TE]," : "[TC],", file);
29800 }
29801
29802 /* Currently C++ toc references to vtables can be emitted before it
29803 is decided whether the vtable is public or private. If this is
29804 the case, then the linker will eventually complain that there is
29805 a TOC reference to an unknown section. Thus, for vtables only,
29806 we emit the TOC reference to reference the symbol and not the
29807 section. */
29808 if (VTABLE_NAME_P (name))
29809 {
29810 RS6000_OUTPUT_BASENAME (file, name);
29811 if (offset < 0)
29812 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29813 else if (offset > 0)
29814 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29815 }
29816 else
29817 output_addr_const (file, x);
29818
29819 #if HAVE_AS_TLS
29820 if (TARGET_XCOFF && SYMBOL_REF_P (base))
29821 {
29822 switch (SYMBOL_REF_TLS_MODEL (base))
29823 {
29824 case 0:
29825 break;
29826 case TLS_MODEL_LOCAL_EXEC:
29827 fputs ("@le", file);
29828 break;
29829 case TLS_MODEL_INITIAL_EXEC:
29830 fputs ("@ie", file);
29831 break;
29832 /* Use global-dynamic for local-dynamic. */
29833 case TLS_MODEL_GLOBAL_DYNAMIC:
29834 case TLS_MODEL_LOCAL_DYNAMIC:
29835 putc ('\n', file);
29836 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29837 fputs ("\t.tc .", file);
29838 RS6000_OUTPUT_BASENAME (file, name);
29839 fputs ("[TC],", file);
29840 output_addr_const (file, x);
29841 fputs ("@m", file);
29842 break;
29843 default:
29844 gcc_unreachable ();
29845 }
29846 }
29847 #endif
29848
29849 putc ('\n', file);
29850 }
29851 \f
29852 /* Output an assembler pseudo-op to write an ASCII string of N characters
29853 starting at P to FILE.
29854
29855 On the RS/6000, we have to do this using the .byte operation and
29856 write out special characters outside the quoted string.
29857 Also, the assembler is broken; very long strings are truncated,
29858 so we must artificially break them up early. */
29859
29860 void
29861 output_ascii (FILE *file, const char *p, int n)
29862 {
29863 char c;
29864 int i, count_string;
29865 const char *for_string = "\t.byte \"";
29866 const char *for_decimal = "\t.byte ";
29867 const char *to_close = NULL;
29868
29869 count_string = 0;
29870 for (i = 0; i < n; i++)
29871 {
29872 c = *p++;
29873 if (c >= ' ' && c < 0177)
29874 {
29875 if (for_string)
29876 fputs (for_string, file);
29877 putc (c, file);
29878
29879 /* Write two quotes to get one. */
29880 if (c == '"')
29881 {
29882 putc (c, file);
29883 ++count_string;
29884 }
29885
29886 for_string = NULL;
29887 for_decimal = "\"\n\t.byte ";
29888 to_close = "\"\n";
29889 ++count_string;
29890
29891 if (count_string >= 512)
29892 {
29893 fputs (to_close, file);
29894
29895 for_string = "\t.byte \"";
29896 for_decimal = "\t.byte ";
29897 to_close = NULL;
29898 count_string = 0;
29899 }
29900 }
29901 else
29902 {
29903 if (for_decimal)
29904 fputs (for_decimal, file);
29905 fprintf (file, "%d", c);
29906
29907 for_string = "\n\t.byte \"";
29908 for_decimal = ", ";
29909 to_close = "\n";
29910 count_string = 0;
29911 }
29912 }
29913
29914 /* Now close the string if we have written one. Then end the line. */
29915 if (to_close)
29916 fputs (to_close, file);
29917 }
29918 \f
29919 /* Generate a unique section name for FILENAME for a section type
29920 represented by SECTION_DESC. Output goes into BUF.
29921
29922 SECTION_DESC can be any string, as long as it is different for each
29923 possible section type.
29924
29925 We name the section in the same manner as xlc. The name begins with an
29926 underscore followed by the filename (after stripping any leading directory
29927 names) with the last period replaced by the string SECTION_DESC. If
29928 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29929 the name. */
29930
29931 void
29932 rs6000_gen_section_name (char **buf, const char *filename,
29933 const char *section_desc)
29934 {
29935 const char *q, *after_last_slash, *last_period = 0;
29936 char *p;
29937 int len;
29938
29939 after_last_slash = filename;
29940 for (q = filename; *q; q++)
29941 {
29942 if (*q == '/')
29943 after_last_slash = q + 1;
29944 else if (*q == '.')
29945 last_period = q;
29946 }
29947
29948 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29949 *buf = (char *) xmalloc (len);
29950
29951 p = *buf;
29952 *p++ = '_';
29953
29954 for (q = after_last_slash; *q; q++)
29955 {
29956 if (q == last_period)
29957 {
29958 strcpy (p, section_desc);
29959 p += strlen (section_desc);
29960 break;
29961 }
29962
29963 else if (ISALNUM (*q))
29964 *p++ = *q;
29965 }
29966
29967 if (last_period == 0)
29968 strcpy (p, section_desc);
29969 else
29970 *p = '\0';
29971 }
29972 \f
29973 /* Emit profile function. */
29974
29975 void
29976 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29977 {
29978 /* Non-standard profiling for kernels, which just saves LR then calls
29979 _mcount without worrying about arg saves. The idea is to change
29980 the function prologue as little as possible as it isn't easy to
29981 account for arg save/restore code added just for _mcount. */
29982 if (TARGET_PROFILE_KERNEL)
29983 return;
29984
29985 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29986 {
29987 #ifndef NO_PROFILE_COUNTERS
29988 # define NO_PROFILE_COUNTERS 0
29989 #endif
29990 if (NO_PROFILE_COUNTERS)
29991 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29992 LCT_NORMAL, VOIDmode);
29993 else
29994 {
29995 char buf[30];
29996 const char *label_name;
29997 rtx fun;
29998
29999 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30000 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
30001 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
30002
30003 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
30004 LCT_NORMAL, VOIDmode, fun, Pmode);
30005 }
30006 }
30007 else if (DEFAULT_ABI == ABI_DARWIN)
30008 {
30009 const char *mcount_name = RS6000_MCOUNT;
30010 int caller_addr_regno = LR_REGNO;
30011
30012 /* Be conservative and always set this, at least for now. */
30013 crtl->uses_pic_offset_table = 1;
30014
30015 #if TARGET_MACHO
30016 /* For PIC code, set up a stub and collect the caller's address
30017 from r0, which is where the prologue puts it. */
30018 if (MACHOPIC_INDIRECT
30019 && crtl->uses_pic_offset_table)
30020 caller_addr_regno = 0;
30021 #endif
30022 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30023 LCT_NORMAL, VOIDmode,
30024 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30025 }
30026 }
30027
30028 /* Write function profiler code. */
30029
30030 void
30031 output_function_profiler (FILE *file, int labelno)
30032 {
30033 char buf[100];
30034
30035 switch (DEFAULT_ABI)
30036 {
30037 default:
30038 gcc_unreachable ();
30039
30040 case ABI_V4:
30041 if (!TARGET_32BIT)
30042 {
30043 warning (0, "no profiling of 64-bit code for this ABI");
30044 return;
30045 }
30046 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30047 fprintf (file, "\tmflr %s\n", reg_names[0]);
30048 if (NO_PROFILE_COUNTERS)
30049 {
30050 asm_fprintf (file, "\tstw %s,4(%s)\n",
30051 reg_names[0], reg_names[1]);
30052 }
30053 else if (TARGET_SECURE_PLT && flag_pic)
30054 {
30055 if (TARGET_LINK_STACK)
30056 {
30057 char name[32];
30058 get_ppc476_thunk_name (name);
30059 asm_fprintf (file, "\tbl %s\n", name);
30060 }
30061 else
30062 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30063 asm_fprintf (file, "\tstw %s,4(%s)\n",
30064 reg_names[0], reg_names[1]);
30065 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30066 asm_fprintf (file, "\taddis %s,%s,",
30067 reg_names[12], reg_names[12]);
30068 assemble_name (file, buf);
30069 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30070 assemble_name (file, buf);
30071 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30072 }
30073 else if (flag_pic == 1)
30074 {
30075 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30076 asm_fprintf (file, "\tstw %s,4(%s)\n",
30077 reg_names[0], reg_names[1]);
30078 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30079 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30080 assemble_name (file, buf);
30081 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30082 }
30083 else if (flag_pic > 1)
30084 {
30085 asm_fprintf (file, "\tstw %s,4(%s)\n",
30086 reg_names[0], reg_names[1]);
30087 /* Now, we need to get the address of the label. */
30088 if (TARGET_LINK_STACK)
30089 {
30090 char name[32];
30091 get_ppc476_thunk_name (name);
30092 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30093 assemble_name (file, buf);
30094 fputs ("-.\n1:", file);
30095 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30096 asm_fprintf (file, "\taddi %s,%s,4\n",
30097 reg_names[11], reg_names[11]);
30098 }
30099 else
30100 {
30101 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30102 assemble_name (file, buf);
30103 fputs ("-.\n1:", file);
30104 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30105 }
30106 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30107 reg_names[0], reg_names[11]);
30108 asm_fprintf (file, "\tadd %s,%s,%s\n",
30109 reg_names[0], reg_names[0], reg_names[11]);
30110 }
30111 else
30112 {
30113 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30114 assemble_name (file, buf);
30115 fputs ("@ha\n", file);
30116 asm_fprintf (file, "\tstw %s,4(%s)\n",
30117 reg_names[0], reg_names[1]);
30118 asm_fprintf (file, "\tla %s,", reg_names[0]);
30119 assemble_name (file, buf);
30120 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30121 }
30122
30123 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30124 fprintf (file, "\tbl %s%s\n",
30125 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30126 break;
30127
30128 case ABI_AIX:
30129 case ABI_ELFv2:
30130 case ABI_DARWIN:
30131 /* Don't do anything, done in output_profile_hook (). */
30132 break;
30133 }
30134 }
30135
30136 \f
30137
30138 /* The following variable value is the last issued insn. */
30139
30140 static rtx_insn *last_scheduled_insn;
30141
30142 /* The following variable helps to balance issuing of load and
30143 store instructions */
30144
30145 static int load_store_pendulum;
30146
30147 /* The following variable helps pair divide insns during scheduling. */
30148 static int divide_cnt;
30149 /* The following variable helps pair and alternate vector and vector load
30150 insns during scheduling. */
30151 static int vec_pairing;
30152
30153
30154 /* Power4 load update and store update instructions are cracked into a
30155 load or store and an integer insn which are executed in the same cycle.
30156 Branches have their own dispatch slot which does not count against the
30157 GCC issue rate, but it changes the program flow so there are no other
30158 instructions to issue in this cycle. */
30159
30160 static int
30161 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30162 {
30163 last_scheduled_insn = insn;
30164 if (GET_CODE (PATTERN (insn)) == USE
30165 || GET_CODE (PATTERN (insn)) == CLOBBER)
30166 {
30167 cached_can_issue_more = more;
30168 return cached_can_issue_more;
30169 }
30170
30171 if (insn_terminates_group_p (insn, current_group))
30172 {
30173 cached_can_issue_more = 0;
30174 return cached_can_issue_more;
30175 }
30176
30177 /* If no reservation, but reach here */
30178 if (recog_memoized (insn) < 0)
30179 return more;
30180
30181 if (rs6000_sched_groups)
30182 {
30183 if (is_microcoded_insn (insn))
30184 cached_can_issue_more = 0;
30185 else if (is_cracked_insn (insn))
30186 cached_can_issue_more = more > 2 ? more - 2 : 0;
30187 else
30188 cached_can_issue_more = more - 1;
30189
30190 return cached_can_issue_more;
30191 }
30192
30193 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30194 return 0;
30195
30196 cached_can_issue_more = more - 1;
30197 return cached_can_issue_more;
30198 }
30199
30200 static int
30201 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30202 {
30203 int r = rs6000_variable_issue_1 (insn, more);
30204 if (verbose)
30205 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30206 return r;
30207 }
30208
30209 /* Adjust the cost of a scheduling dependency. Return the new cost of
30210 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30211
30212 static int
30213 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30214 unsigned int)
30215 {
30216 enum attr_type attr_type;
30217
30218 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30219 return cost;
30220
30221 switch (dep_type)
30222 {
30223 case REG_DEP_TRUE:
30224 {
30225 /* Data dependency; DEP_INSN writes a register that INSN reads
30226 some cycles later. */
30227
30228 /* Separate a load from a narrower, dependent store. */
30229 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30230 && GET_CODE (PATTERN (insn)) == SET
30231 && GET_CODE (PATTERN (dep_insn)) == SET
30232 && MEM_P (XEXP (PATTERN (insn), 1))
30233 && MEM_P (XEXP (PATTERN (dep_insn), 0))
30234 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30235 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30236 return cost + 14;
30237
30238 attr_type = get_attr_type (insn);
30239
30240 switch (attr_type)
30241 {
30242 case TYPE_JMPREG:
30243 /* Tell the first scheduling pass about the latency between
30244 a mtctr and bctr (and mtlr and br/blr). The first
30245 scheduling pass will not know about this latency since
30246 the mtctr instruction, which has the latency associated
30247 to it, will be generated by reload. */
30248 return 4;
30249 case TYPE_BRANCH:
30250 /* Leave some extra cycles between a compare and its
30251 dependent branch, to inhibit expensive mispredicts. */
30252 if ((rs6000_tune == PROCESSOR_PPC603
30253 || rs6000_tune == PROCESSOR_PPC604
30254 || rs6000_tune == PROCESSOR_PPC604e
30255 || rs6000_tune == PROCESSOR_PPC620
30256 || rs6000_tune == PROCESSOR_PPC630
30257 || rs6000_tune == PROCESSOR_PPC750
30258 || rs6000_tune == PROCESSOR_PPC7400
30259 || rs6000_tune == PROCESSOR_PPC7450
30260 || rs6000_tune == PROCESSOR_PPCE5500
30261 || rs6000_tune == PROCESSOR_PPCE6500
30262 || rs6000_tune == PROCESSOR_POWER4
30263 || rs6000_tune == PROCESSOR_POWER5
30264 || rs6000_tune == PROCESSOR_POWER7
30265 || rs6000_tune == PROCESSOR_POWER8
30266 || rs6000_tune == PROCESSOR_POWER9
30267 || rs6000_tune == PROCESSOR_CELL)
30268 && recog_memoized (dep_insn)
30269 && (INSN_CODE (dep_insn) >= 0))
30270
30271 switch (get_attr_type (dep_insn))
30272 {
30273 case TYPE_CMP:
30274 case TYPE_FPCOMPARE:
30275 case TYPE_CR_LOGICAL:
30276 return cost + 2;
30277 case TYPE_EXTS:
30278 case TYPE_MUL:
30279 if (get_attr_dot (dep_insn) == DOT_YES)
30280 return cost + 2;
30281 else
30282 break;
30283 case TYPE_SHIFT:
30284 if (get_attr_dot (dep_insn) == DOT_YES
30285 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30286 return cost + 2;
30287 else
30288 break;
30289 default:
30290 break;
30291 }
30292 break;
30293
30294 case TYPE_STORE:
30295 case TYPE_FPSTORE:
30296 if ((rs6000_tune == PROCESSOR_POWER6)
30297 && recog_memoized (dep_insn)
30298 && (INSN_CODE (dep_insn) >= 0))
30299 {
30300
30301 if (GET_CODE (PATTERN (insn)) != SET)
30302 /* If this happens, we have to extend this to schedule
30303 optimally. Return default for now. */
30304 return cost;
30305
30306 /* Adjust the cost for the case where the value written
30307 by a fixed point operation is used as the address
30308 gen value on a store. */
30309 switch (get_attr_type (dep_insn))
30310 {
30311 case TYPE_LOAD:
30312 case TYPE_CNTLZ:
30313 {
30314 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30315 return get_attr_sign_extend (dep_insn)
30316 == SIGN_EXTEND_YES ? 6 : 4;
30317 break;
30318 }
30319 case TYPE_SHIFT:
30320 {
30321 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30322 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30323 6 : 3;
30324 break;
30325 }
30326 case TYPE_INTEGER:
30327 case TYPE_ADD:
30328 case TYPE_LOGICAL:
30329 case TYPE_EXTS:
30330 case TYPE_INSERT:
30331 {
30332 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30333 return 3;
30334 break;
30335 }
30336 case TYPE_STORE:
30337 case TYPE_FPLOAD:
30338 case TYPE_FPSTORE:
30339 {
30340 if (get_attr_update (dep_insn) == UPDATE_YES
30341 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30342 return 3;
30343 break;
30344 }
30345 case TYPE_MUL:
30346 {
30347 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30348 return 17;
30349 break;
30350 }
30351 case TYPE_DIV:
30352 {
30353 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30354 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30355 break;
30356 }
30357 default:
30358 break;
30359 }
30360 }
30361 break;
30362
30363 case TYPE_LOAD:
30364 if ((rs6000_tune == PROCESSOR_POWER6)
30365 && recog_memoized (dep_insn)
30366 && (INSN_CODE (dep_insn) >= 0))
30367 {
30368
30369 /* Adjust the cost for the case where the value written
30370 by a fixed point instruction is used within the address
30371 gen portion of a subsequent load(u)(x) */
30372 switch (get_attr_type (dep_insn))
30373 {
30374 case TYPE_LOAD:
30375 case TYPE_CNTLZ:
30376 {
30377 if (set_to_load_agen (dep_insn, insn))
30378 return get_attr_sign_extend (dep_insn)
30379 == SIGN_EXTEND_YES ? 6 : 4;
30380 break;
30381 }
30382 case TYPE_SHIFT:
30383 {
30384 if (set_to_load_agen (dep_insn, insn))
30385 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30386 6 : 3;
30387 break;
30388 }
30389 case TYPE_INTEGER:
30390 case TYPE_ADD:
30391 case TYPE_LOGICAL:
30392 case TYPE_EXTS:
30393 case TYPE_INSERT:
30394 {
30395 if (set_to_load_agen (dep_insn, insn))
30396 return 3;
30397 break;
30398 }
30399 case TYPE_STORE:
30400 case TYPE_FPLOAD:
30401 case TYPE_FPSTORE:
30402 {
30403 if (get_attr_update (dep_insn) == UPDATE_YES
30404 && set_to_load_agen (dep_insn, insn))
30405 return 3;
30406 break;
30407 }
30408 case TYPE_MUL:
30409 {
30410 if (set_to_load_agen (dep_insn, insn))
30411 return 17;
30412 break;
30413 }
30414 case TYPE_DIV:
30415 {
30416 if (set_to_load_agen (dep_insn, insn))
30417 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30418 break;
30419 }
30420 default:
30421 break;
30422 }
30423 }
30424 break;
30425
30426 case TYPE_FPLOAD:
30427 if ((rs6000_tune == PROCESSOR_POWER6)
30428 && get_attr_update (insn) == UPDATE_NO
30429 && recog_memoized (dep_insn)
30430 && (INSN_CODE (dep_insn) >= 0)
30431 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30432 return 2;
30433
30434 default:
30435 break;
30436 }
30437
30438 /* Fall out to return default cost. */
30439 }
30440 break;
30441
30442 case REG_DEP_OUTPUT:
30443 /* Output dependency; DEP_INSN writes a register that INSN writes some
30444 cycles later. */
30445 if ((rs6000_tune == PROCESSOR_POWER6)
30446 && recog_memoized (dep_insn)
30447 && (INSN_CODE (dep_insn) >= 0))
30448 {
30449 attr_type = get_attr_type (insn);
30450
30451 switch (attr_type)
30452 {
30453 case TYPE_FP:
30454 case TYPE_FPSIMPLE:
30455 if (get_attr_type (dep_insn) == TYPE_FP
30456 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30457 return 1;
30458 break;
30459 case TYPE_FPLOAD:
30460 if (get_attr_update (insn) == UPDATE_NO
30461 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30462 return 2;
30463 break;
30464 default:
30465 break;
30466 }
30467 }
30468 /* Fall through, no cost for output dependency. */
30469 /* FALLTHRU */
30470
30471 case REG_DEP_ANTI:
30472 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30473 cycles later. */
30474 return 0;
30475
30476 default:
30477 gcc_unreachable ();
30478 }
30479
30480 return cost;
30481 }
30482
30483 /* Debug version of rs6000_adjust_cost. */
30484
30485 static int
30486 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30487 int cost, unsigned int dw)
30488 {
30489 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30490
30491 if (ret != cost)
30492 {
30493 const char *dep;
30494
30495 switch (dep_type)
30496 {
30497 default: dep = "unknown depencency"; break;
30498 case REG_DEP_TRUE: dep = "data dependency"; break;
30499 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30500 case REG_DEP_ANTI: dep = "anti depencency"; break;
30501 }
30502
30503 fprintf (stderr,
30504 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30505 "%s, insn:\n", ret, cost, dep);
30506
30507 debug_rtx (insn);
30508 }
30509
30510 return ret;
30511 }
30512
30513 /* The function returns a true if INSN is microcoded.
30514 Return false otherwise. */
30515
30516 static bool
30517 is_microcoded_insn (rtx_insn *insn)
30518 {
30519 if (!insn || !NONDEBUG_INSN_P (insn)
30520 || GET_CODE (PATTERN (insn)) == USE
30521 || GET_CODE (PATTERN (insn)) == CLOBBER)
30522 return false;
30523
30524 if (rs6000_tune == PROCESSOR_CELL)
30525 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30526
30527 if (rs6000_sched_groups
30528 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30529 {
30530 enum attr_type type = get_attr_type (insn);
30531 if ((type == TYPE_LOAD
30532 && get_attr_update (insn) == UPDATE_YES
30533 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30534 || ((type == TYPE_LOAD || type == TYPE_STORE)
30535 && get_attr_update (insn) == UPDATE_YES
30536 && get_attr_indexed (insn) == INDEXED_YES)
30537 || type == TYPE_MFCR)
30538 return true;
30539 }
30540
30541 return false;
30542 }
30543
30544 /* The function returns true if INSN is cracked into 2 instructions
30545 by the processor (and therefore occupies 2 issue slots). */
30546
30547 static bool
30548 is_cracked_insn (rtx_insn *insn)
30549 {
30550 if (!insn || !NONDEBUG_INSN_P (insn)
30551 || GET_CODE (PATTERN (insn)) == USE
30552 || GET_CODE (PATTERN (insn)) == CLOBBER)
30553 return false;
30554
30555 if (rs6000_sched_groups
30556 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30557 {
30558 enum attr_type type = get_attr_type (insn);
30559 if ((type == TYPE_LOAD
30560 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30561 && get_attr_update (insn) == UPDATE_NO)
30562 || (type == TYPE_LOAD
30563 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30564 && get_attr_update (insn) == UPDATE_YES
30565 && get_attr_indexed (insn) == INDEXED_NO)
30566 || (type == TYPE_STORE
30567 && get_attr_update (insn) == UPDATE_YES
30568 && get_attr_indexed (insn) == INDEXED_NO)
30569 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30570 && get_attr_update (insn) == UPDATE_YES)
30571 || (type == TYPE_CR_LOGICAL
30572 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30573 || (type == TYPE_EXTS
30574 && get_attr_dot (insn) == DOT_YES)
30575 || (type == TYPE_SHIFT
30576 && get_attr_dot (insn) == DOT_YES
30577 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30578 || (type == TYPE_MUL
30579 && get_attr_dot (insn) == DOT_YES)
30580 || type == TYPE_DIV
30581 || (type == TYPE_INSERT
30582 && get_attr_size (insn) == SIZE_32))
30583 return true;
30584 }
30585
30586 return false;
30587 }
30588
30589 /* The function returns true if INSN can be issued only from
30590 the branch slot. */
30591
30592 static bool
30593 is_branch_slot_insn (rtx_insn *insn)
30594 {
30595 if (!insn || !NONDEBUG_INSN_P (insn)
30596 || GET_CODE (PATTERN (insn)) == USE
30597 || GET_CODE (PATTERN (insn)) == CLOBBER)
30598 return false;
30599
30600 if (rs6000_sched_groups)
30601 {
30602 enum attr_type type = get_attr_type (insn);
30603 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30604 return true;
30605 return false;
30606 }
30607
30608 return false;
30609 }
30610
30611 /* The function returns true if out_inst sets a value that is
30612 used in the address generation computation of in_insn */
30613 static bool
30614 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30615 {
30616 rtx out_set, in_set;
30617
30618 /* For performance reasons, only handle the simple case where
30619 both loads are a single_set. */
30620 out_set = single_set (out_insn);
30621 if (out_set)
30622 {
30623 in_set = single_set (in_insn);
30624 if (in_set)
30625 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30626 }
30627
30628 return false;
30629 }
30630
30631 /* Try to determine base/offset/size parts of the given MEM.
30632 Return true if successful, false if all the values couldn't
30633 be determined.
30634
30635 This function only looks for REG or REG+CONST address forms.
30636 REG+REG address form will return false. */
30637
30638 static bool
30639 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30640 HOST_WIDE_INT *size)
30641 {
30642 rtx addr_rtx;
30643 if MEM_SIZE_KNOWN_P (mem)
30644 *size = MEM_SIZE (mem);
30645 else
30646 return false;
30647
30648 addr_rtx = (XEXP (mem, 0));
30649 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30650 addr_rtx = XEXP (addr_rtx, 1);
30651
30652 *offset = 0;
30653 while (GET_CODE (addr_rtx) == PLUS
30654 && CONST_INT_P (XEXP (addr_rtx, 1)))
30655 {
30656 *offset += INTVAL (XEXP (addr_rtx, 1));
30657 addr_rtx = XEXP (addr_rtx, 0);
30658 }
30659 if (!REG_P (addr_rtx))
30660 return false;
30661
30662 *base = addr_rtx;
30663 return true;
30664 }
30665
30666 /* The function returns true if the target storage location of
30667 mem1 is adjacent to the target storage location of mem2 */
30668 /* Return 1 if memory locations are adjacent. */
30669
30670 static bool
30671 adjacent_mem_locations (rtx mem1, rtx mem2)
30672 {
30673 rtx reg1, reg2;
30674 HOST_WIDE_INT off1, size1, off2, size2;
30675
30676 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30677 && get_memref_parts (mem2, &reg2, &off2, &size2))
30678 return ((REGNO (reg1) == REGNO (reg2))
30679 && ((off1 + size1 == off2)
30680 || (off2 + size2 == off1)));
30681
30682 return false;
30683 }
30684
30685 /* This function returns true if it can be determined that the two MEM
30686 locations overlap by at least 1 byte based on base reg/offset/size. */
30687
30688 static bool
30689 mem_locations_overlap (rtx mem1, rtx mem2)
30690 {
30691 rtx reg1, reg2;
30692 HOST_WIDE_INT off1, size1, off2, size2;
30693
30694 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30695 && get_memref_parts (mem2, &reg2, &off2, &size2))
30696 return ((REGNO (reg1) == REGNO (reg2))
30697 && (((off1 <= off2) && (off1 + size1 > off2))
30698 || ((off2 <= off1) && (off2 + size2 > off1))));
30699
30700 return false;
30701 }
30702
30703 /* A C statement (sans semicolon) to update the integer scheduling
30704 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30705 INSN earlier, reduce the priority to execute INSN later. Do not
30706 define this macro if you do not need to adjust the scheduling
30707 priorities of insns. */
30708
30709 static int
30710 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30711 {
30712 rtx load_mem, str_mem;
30713 /* On machines (like the 750) which have asymmetric integer units,
30714 where one integer unit can do multiply and divides and the other
30715 can't, reduce the priority of multiply/divide so it is scheduled
30716 before other integer operations. */
30717
30718 #if 0
30719 if (! INSN_P (insn))
30720 return priority;
30721
30722 if (GET_CODE (PATTERN (insn)) == USE)
30723 return priority;
30724
30725 switch (rs6000_tune) {
30726 case PROCESSOR_PPC750:
30727 switch (get_attr_type (insn))
30728 {
30729 default:
30730 break;
30731
30732 case TYPE_MUL:
30733 case TYPE_DIV:
30734 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30735 priority, priority);
30736 if (priority >= 0 && priority < 0x01000000)
30737 priority >>= 3;
30738 break;
30739 }
30740 }
30741 #endif
30742
30743 if (insn_must_be_first_in_group (insn)
30744 && reload_completed
30745 && current_sched_info->sched_max_insns_priority
30746 && rs6000_sched_restricted_insns_priority)
30747 {
30748
30749 /* Prioritize insns that can be dispatched only in the first
30750 dispatch slot. */
30751 if (rs6000_sched_restricted_insns_priority == 1)
30752 /* Attach highest priority to insn. This means that in
30753 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30754 precede 'priority' (critical path) considerations. */
30755 return current_sched_info->sched_max_insns_priority;
30756 else if (rs6000_sched_restricted_insns_priority == 2)
30757 /* Increase priority of insn by a minimal amount. This means that in
30758 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30759 considerations precede dispatch-slot restriction considerations. */
30760 return (priority + 1);
30761 }
30762
30763 if (rs6000_tune == PROCESSOR_POWER6
30764 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30765 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30766 /* Attach highest priority to insn if the scheduler has just issued two
30767 stores and this instruction is a load, or two loads and this instruction
30768 is a store. Power6 wants loads and stores scheduled alternately
30769 when possible */
30770 return current_sched_info->sched_max_insns_priority;
30771
30772 return priority;
30773 }
30774
30775 /* Return true if the instruction is nonpipelined on the Cell. */
30776 static bool
30777 is_nonpipeline_insn (rtx_insn *insn)
30778 {
30779 enum attr_type type;
30780 if (!insn || !NONDEBUG_INSN_P (insn)
30781 || GET_CODE (PATTERN (insn)) == USE
30782 || GET_CODE (PATTERN (insn)) == CLOBBER)
30783 return false;
30784
30785 type = get_attr_type (insn);
30786 if (type == TYPE_MUL
30787 || type == TYPE_DIV
30788 || type == TYPE_SDIV
30789 || type == TYPE_DDIV
30790 || type == TYPE_SSQRT
30791 || type == TYPE_DSQRT
30792 || type == TYPE_MFCR
30793 || type == TYPE_MFCRF
30794 || type == TYPE_MFJMPR)
30795 {
30796 return true;
30797 }
30798 return false;
30799 }
30800
30801
30802 /* Return how many instructions the machine can issue per cycle. */
30803
30804 static int
30805 rs6000_issue_rate (void)
30806 {
30807 /* Unless scheduling for register pressure, use issue rate of 1 for
30808 first scheduling pass to decrease degradation. */
30809 if (!reload_completed && !flag_sched_pressure)
30810 return 1;
30811
30812 switch (rs6000_tune) {
30813 case PROCESSOR_RS64A:
30814 case PROCESSOR_PPC601: /* ? */
30815 case PROCESSOR_PPC7450:
30816 return 3;
30817 case PROCESSOR_PPC440:
30818 case PROCESSOR_PPC603:
30819 case PROCESSOR_PPC750:
30820 case PROCESSOR_PPC7400:
30821 case PROCESSOR_PPC8540:
30822 case PROCESSOR_PPC8548:
30823 case PROCESSOR_CELL:
30824 case PROCESSOR_PPCE300C2:
30825 case PROCESSOR_PPCE300C3:
30826 case PROCESSOR_PPCE500MC:
30827 case PROCESSOR_PPCE500MC64:
30828 case PROCESSOR_PPCE5500:
30829 case PROCESSOR_PPCE6500:
30830 case PROCESSOR_TITAN:
30831 return 2;
30832 case PROCESSOR_PPC476:
30833 case PROCESSOR_PPC604:
30834 case PROCESSOR_PPC604e:
30835 case PROCESSOR_PPC620:
30836 case PROCESSOR_PPC630:
30837 return 4;
30838 case PROCESSOR_POWER4:
30839 case PROCESSOR_POWER5:
30840 case PROCESSOR_POWER6:
30841 case PROCESSOR_POWER7:
30842 return 5;
30843 case PROCESSOR_POWER8:
30844 return 7;
30845 case PROCESSOR_POWER9:
30846 return 6;
30847 default:
30848 return 1;
30849 }
30850 }
30851
30852 /* Return how many instructions to look ahead for better insn
30853 scheduling. */
30854
30855 static int
30856 rs6000_use_sched_lookahead (void)
30857 {
30858 switch (rs6000_tune)
30859 {
30860 case PROCESSOR_PPC8540:
30861 case PROCESSOR_PPC8548:
30862 return 4;
30863
30864 case PROCESSOR_CELL:
30865 return (reload_completed ? 8 : 0);
30866
30867 default:
30868 return 0;
30869 }
30870 }
30871
30872 /* We are choosing insn from the ready queue. Return zero if INSN can be
30873 chosen. */
30874 static int
30875 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30876 {
30877 if (ready_index == 0)
30878 return 0;
30879
30880 if (rs6000_tune != PROCESSOR_CELL)
30881 return 0;
30882
30883 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30884
30885 if (!reload_completed
30886 || is_nonpipeline_insn (insn)
30887 || is_microcoded_insn (insn))
30888 return 1;
30889
30890 return 0;
30891 }
30892
30893 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30894 and return true. */
30895
30896 static bool
30897 find_mem_ref (rtx pat, rtx *mem_ref)
30898 {
30899 const char * fmt;
30900 int i, j;
30901
30902 /* stack_tie does not produce any real memory traffic. */
30903 if (tie_operand (pat, VOIDmode))
30904 return false;
30905
30906 if (MEM_P (pat))
30907 {
30908 *mem_ref = pat;
30909 return true;
30910 }
30911
30912 /* Recursively process the pattern. */
30913 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30914
30915 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30916 {
30917 if (fmt[i] == 'e')
30918 {
30919 if (find_mem_ref (XEXP (pat, i), mem_ref))
30920 return true;
30921 }
30922 else if (fmt[i] == 'E')
30923 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30924 {
30925 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30926 return true;
30927 }
30928 }
30929
30930 return false;
30931 }
30932
30933 /* Determine if PAT is a PATTERN of a load insn. */
30934
30935 static bool
30936 is_load_insn1 (rtx pat, rtx *load_mem)
30937 {
30938 if (!pat || pat == NULL_RTX)
30939 return false;
30940
30941 if (GET_CODE (pat) == SET)
30942 return find_mem_ref (SET_SRC (pat), load_mem);
30943
30944 if (GET_CODE (pat) == PARALLEL)
30945 {
30946 int i;
30947
30948 for (i = 0; i < XVECLEN (pat, 0); i++)
30949 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30950 return true;
30951 }
30952
30953 return false;
30954 }
30955
30956 /* Determine if INSN loads from memory. */
30957
30958 static bool
30959 is_load_insn (rtx insn, rtx *load_mem)
30960 {
30961 if (!insn || !INSN_P (insn))
30962 return false;
30963
30964 if (CALL_P (insn))
30965 return false;
30966
30967 return is_load_insn1 (PATTERN (insn), load_mem);
30968 }
30969
30970 /* Determine if PAT is a PATTERN of a store insn. */
30971
30972 static bool
30973 is_store_insn1 (rtx pat, rtx *str_mem)
30974 {
30975 if (!pat || pat == NULL_RTX)
30976 return false;
30977
30978 if (GET_CODE (pat) == SET)
30979 return find_mem_ref (SET_DEST (pat), str_mem);
30980
30981 if (GET_CODE (pat) == PARALLEL)
30982 {
30983 int i;
30984
30985 for (i = 0; i < XVECLEN (pat, 0); i++)
30986 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30987 return true;
30988 }
30989
30990 return false;
30991 }
30992
30993 /* Determine if INSN stores to memory. */
30994
30995 static bool
30996 is_store_insn (rtx insn, rtx *str_mem)
30997 {
30998 if (!insn || !INSN_P (insn))
30999 return false;
31000
31001 return is_store_insn1 (PATTERN (insn), str_mem);
31002 }
31003
31004 /* Return whether TYPE is a Power9 pairable vector instruction type. */
31005
31006 static bool
31007 is_power9_pairable_vec_type (enum attr_type type)
31008 {
31009 switch (type)
31010 {
31011 case TYPE_VECSIMPLE:
31012 case TYPE_VECCOMPLEX:
31013 case TYPE_VECDIV:
31014 case TYPE_VECCMP:
31015 case TYPE_VECPERM:
31016 case TYPE_VECFLOAT:
31017 case TYPE_VECFDIV:
31018 case TYPE_VECDOUBLE:
31019 return true;
31020 default:
31021 break;
31022 }
31023 return false;
31024 }
31025
31026 /* Returns whether the dependence between INSN and NEXT is considered
31027 costly by the given target. */
31028
31029 static bool
31030 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31031 {
31032 rtx insn;
31033 rtx next;
31034 rtx load_mem, str_mem;
31035
31036 /* If the flag is not enabled - no dependence is considered costly;
31037 allow all dependent insns in the same group.
31038 This is the most aggressive option. */
31039 if (rs6000_sched_costly_dep == no_dep_costly)
31040 return false;
31041
31042 /* If the flag is set to 1 - a dependence is always considered costly;
31043 do not allow dependent instructions in the same group.
31044 This is the most conservative option. */
31045 if (rs6000_sched_costly_dep == all_deps_costly)
31046 return true;
31047
31048 insn = DEP_PRO (dep);
31049 next = DEP_CON (dep);
31050
31051 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31052 && is_load_insn (next, &load_mem)
31053 && is_store_insn (insn, &str_mem))
31054 /* Prevent load after store in the same group. */
31055 return true;
31056
31057 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31058 && is_load_insn (next, &load_mem)
31059 && is_store_insn (insn, &str_mem)
31060 && DEP_TYPE (dep) == REG_DEP_TRUE
31061 && mem_locations_overlap(str_mem, load_mem))
31062 /* Prevent load after store in the same group if it is a true
31063 dependence. */
31064 return true;
31065
31066 /* The flag is set to X; dependences with latency >= X are considered costly,
31067 and will not be scheduled in the same group. */
31068 if (rs6000_sched_costly_dep <= max_dep_latency
31069 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31070 return true;
31071
31072 return false;
31073 }
31074
31075 /* Return the next insn after INSN that is found before TAIL is reached,
31076 skipping any "non-active" insns - insns that will not actually occupy
31077 an issue slot. Return NULL_RTX if such an insn is not found. */
31078
31079 static rtx_insn *
31080 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31081 {
31082 if (insn == NULL_RTX || insn == tail)
31083 return NULL;
31084
31085 while (1)
31086 {
31087 insn = NEXT_INSN (insn);
31088 if (insn == NULL_RTX || insn == tail)
31089 return NULL;
31090
31091 if (CALL_P (insn)
31092 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31093 || (NONJUMP_INSN_P (insn)
31094 && GET_CODE (PATTERN (insn)) != USE
31095 && GET_CODE (PATTERN (insn)) != CLOBBER
31096 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31097 break;
31098 }
31099 return insn;
31100 }
31101
31102 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31103
31104 static int
31105 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31106 {
31107 int pos;
31108 int i;
31109 rtx_insn *tmp;
31110 enum attr_type type, type2;
31111
31112 type = get_attr_type (last_scheduled_insn);
31113
31114 /* Try to issue fixed point divides back-to-back in pairs so they will be
31115 routed to separate execution units and execute in parallel. */
31116 if (type == TYPE_DIV && divide_cnt == 0)
31117 {
31118 /* First divide has been scheduled. */
31119 divide_cnt = 1;
31120
31121 /* Scan the ready list looking for another divide, if found move it
31122 to the end of the list so it is chosen next. */
31123 pos = lastpos;
31124 while (pos >= 0)
31125 {
31126 if (recog_memoized (ready[pos]) >= 0
31127 && get_attr_type (ready[pos]) == TYPE_DIV)
31128 {
31129 tmp = ready[pos];
31130 for (i = pos; i < lastpos; i++)
31131 ready[i] = ready[i + 1];
31132 ready[lastpos] = tmp;
31133 break;
31134 }
31135 pos--;
31136 }
31137 }
31138 else
31139 {
31140 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31141 divide_cnt = 0;
31142
31143 /* The best dispatch throughput for vector and vector load insns can be
31144 achieved by interleaving a vector and vector load such that they'll
31145 dispatch to the same superslice. If this pairing cannot be achieved
31146 then it is best to pair vector insns together and vector load insns
31147 together.
31148
31149 To aid in this pairing, vec_pairing maintains the current state with
31150 the following values:
31151
31152 0 : Initial state, no vecload/vector pairing has been started.
31153
31154 1 : A vecload or vector insn has been issued and a candidate for
31155 pairing has been found and moved to the end of the ready
31156 list. */
31157 if (type == TYPE_VECLOAD)
31158 {
31159 /* Issued a vecload. */
31160 if (vec_pairing == 0)
31161 {
31162 int vecload_pos = -1;
31163 /* We issued a single vecload, look for a vector insn to pair it
31164 with. If one isn't found, try to pair another vecload. */
31165 pos = lastpos;
31166 while (pos >= 0)
31167 {
31168 if (recog_memoized (ready[pos]) >= 0)
31169 {
31170 type2 = get_attr_type (ready[pos]);
31171 if (is_power9_pairable_vec_type (type2))
31172 {
31173 /* Found a vector insn to pair with, move it to the
31174 end of the ready list so it is scheduled next. */
31175 tmp = ready[pos];
31176 for (i = pos; i < lastpos; i++)
31177 ready[i] = ready[i + 1];
31178 ready[lastpos] = tmp;
31179 vec_pairing = 1;
31180 return cached_can_issue_more;
31181 }
31182 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31183 /* Remember position of first vecload seen. */
31184 vecload_pos = pos;
31185 }
31186 pos--;
31187 }
31188 if (vecload_pos >= 0)
31189 {
31190 /* Didn't find a vector to pair with but did find a vecload,
31191 move it to the end of the ready list. */
31192 tmp = ready[vecload_pos];
31193 for (i = vecload_pos; i < lastpos; i++)
31194 ready[i] = ready[i + 1];
31195 ready[lastpos] = tmp;
31196 vec_pairing = 1;
31197 return cached_can_issue_more;
31198 }
31199 }
31200 }
31201 else if (is_power9_pairable_vec_type (type))
31202 {
31203 /* Issued a vector operation. */
31204 if (vec_pairing == 0)
31205 {
31206 int vec_pos = -1;
31207 /* We issued a single vector insn, look for a vecload to pair it
31208 with. If one isn't found, try to pair another vector. */
31209 pos = lastpos;
31210 while (pos >= 0)
31211 {
31212 if (recog_memoized (ready[pos]) >= 0)
31213 {
31214 type2 = get_attr_type (ready[pos]);
31215 if (type2 == TYPE_VECLOAD)
31216 {
31217 /* Found a vecload insn to pair with, move it to the
31218 end of the ready list so it is scheduled next. */
31219 tmp = ready[pos];
31220 for (i = pos; i < lastpos; i++)
31221 ready[i] = ready[i + 1];
31222 ready[lastpos] = tmp;
31223 vec_pairing = 1;
31224 return cached_can_issue_more;
31225 }
31226 else if (is_power9_pairable_vec_type (type2)
31227 && vec_pos == -1)
31228 /* Remember position of first vector insn seen. */
31229 vec_pos = pos;
31230 }
31231 pos--;
31232 }
31233 if (vec_pos >= 0)
31234 {
31235 /* Didn't find a vecload to pair with but did find a vector
31236 insn, move it to the end of the ready list. */
31237 tmp = ready[vec_pos];
31238 for (i = vec_pos; i < lastpos; i++)
31239 ready[i] = ready[i + 1];
31240 ready[lastpos] = tmp;
31241 vec_pairing = 1;
31242 return cached_can_issue_more;
31243 }
31244 }
31245 }
31246
31247 /* We've either finished a vec/vecload pair, couldn't find an insn to
31248 continue the current pair, or the last insn had nothing to do with
31249 with pairing. In any case, reset the state. */
31250 vec_pairing = 0;
31251 }
31252
31253 return cached_can_issue_more;
31254 }
31255
31256 /* We are about to begin issuing insns for this clock cycle. */
31257
31258 static int
31259 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31260 rtx_insn **ready ATTRIBUTE_UNUSED,
31261 int *pn_ready ATTRIBUTE_UNUSED,
31262 int clock_var ATTRIBUTE_UNUSED)
31263 {
31264 int n_ready = *pn_ready;
31265
31266 if (sched_verbose)
31267 fprintf (dump, "// rs6000_sched_reorder :\n");
31268
31269 /* Reorder the ready list, if the second to last ready insn
31270 is a nonepipeline insn. */
31271 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31272 {
31273 if (is_nonpipeline_insn (ready[n_ready - 1])
31274 && (recog_memoized (ready[n_ready - 2]) > 0))
31275 /* Simply swap first two insns. */
31276 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31277 }
31278
31279 if (rs6000_tune == PROCESSOR_POWER6)
31280 load_store_pendulum = 0;
31281
31282 return rs6000_issue_rate ();
31283 }
31284
31285 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31286
31287 static int
31288 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31289 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31290 {
31291 if (sched_verbose)
31292 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31293
31294 /* For Power6, we need to handle some special cases to try and keep the
31295 store queue from overflowing and triggering expensive flushes.
31296
31297 This code monitors how load and store instructions are being issued
31298 and skews the ready list one way or the other to increase the likelihood
31299 that a desired instruction is issued at the proper time.
31300
31301 A couple of things are done. First, we maintain a "load_store_pendulum"
31302 to track the current state of load/store issue.
31303
31304 - If the pendulum is at zero, then no loads or stores have been
31305 issued in the current cycle so we do nothing.
31306
31307 - If the pendulum is 1, then a single load has been issued in this
31308 cycle and we attempt to locate another load in the ready list to
31309 issue with it.
31310
31311 - If the pendulum is -2, then two stores have already been
31312 issued in this cycle, so we increase the priority of the first load
31313 in the ready list to increase it's likelihood of being chosen first
31314 in the next cycle.
31315
31316 - If the pendulum is -1, then a single store has been issued in this
31317 cycle and we attempt to locate another store in the ready list to
31318 issue with it, preferring a store to an adjacent memory location to
31319 facilitate store pairing in the store queue.
31320
31321 - If the pendulum is 2, then two loads have already been
31322 issued in this cycle, so we increase the priority of the first store
31323 in the ready list to increase it's likelihood of being chosen first
31324 in the next cycle.
31325
31326 - If the pendulum < -2 or > 2, then do nothing.
31327
31328 Note: This code covers the most common scenarios. There exist non
31329 load/store instructions which make use of the LSU and which
31330 would need to be accounted for to strictly model the behavior
31331 of the machine. Those instructions are currently unaccounted
31332 for to help minimize compile time overhead of this code.
31333 */
31334 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31335 {
31336 int pos;
31337 int i;
31338 rtx_insn *tmp;
31339 rtx load_mem, str_mem;
31340
31341 if (is_store_insn (last_scheduled_insn, &str_mem))
31342 /* Issuing a store, swing the load_store_pendulum to the left */
31343 load_store_pendulum--;
31344 else if (is_load_insn (last_scheduled_insn, &load_mem))
31345 /* Issuing a load, swing the load_store_pendulum to the right */
31346 load_store_pendulum++;
31347 else
31348 return cached_can_issue_more;
31349
31350 /* If the pendulum is balanced, or there is only one instruction on
31351 the ready list, then all is well, so return. */
31352 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31353 return cached_can_issue_more;
31354
31355 if (load_store_pendulum == 1)
31356 {
31357 /* A load has been issued in this cycle. Scan the ready list
31358 for another load to issue with it */
31359 pos = *pn_ready-1;
31360
31361 while (pos >= 0)
31362 {
31363 if (is_load_insn (ready[pos], &load_mem))
31364 {
31365 /* Found a load. Move it to the head of the ready list,
31366 and adjust it's priority so that it is more likely to
31367 stay there */
31368 tmp = ready[pos];
31369 for (i=pos; i<*pn_ready-1; i++)
31370 ready[i] = ready[i + 1];
31371 ready[*pn_ready-1] = tmp;
31372
31373 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31374 INSN_PRIORITY (tmp)++;
31375 break;
31376 }
31377 pos--;
31378 }
31379 }
31380 else if (load_store_pendulum == -2)
31381 {
31382 /* Two stores have been issued in this cycle. Increase the
31383 priority of the first load in the ready list to favor it for
31384 issuing in the next cycle. */
31385 pos = *pn_ready-1;
31386
31387 while (pos >= 0)
31388 {
31389 if (is_load_insn (ready[pos], &load_mem)
31390 && !sel_sched_p ()
31391 && INSN_PRIORITY_KNOWN (ready[pos]))
31392 {
31393 INSN_PRIORITY (ready[pos])++;
31394
31395 /* Adjust the pendulum to account for the fact that a load
31396 was found and increased in priority. This is to prevent
31397 increasing the priority of multiple loads */
31398 load_store_pendulum--;
31399
31400 break;
31401 }
31402 pos--;
31403 }
31404 }
31405 else if (load_store_pendulum == -1)
31406 {
31407 /* A store has been issued in this cycle. Scan the ready list for
31408 another store to issue with it, preferring a store to an adjacent
31409 memory location */
31410 int first_store_pos = -1;
31411
31412 pos = *pn_ready-1;
31413
31414 while (pos >= 0)
31415 {
31416 if (is_store_insn (ready[pos], &str_mem))
31417 {
31418 rtx str_mem2;
31419 /* Maintain the index of the first store found on the
31420 list */
31421 if (first_store_pos == -1)
31422 first_store_pos = pos;
31423
31424 if (is_store_insn (last_scheduled_insn, &str_mem2)
31425 && adjacent_mem_locations (str_mem, str_mem2))
31426 {
31427 /* Found an adjacent store. Move it to the head of the
31428 ready list, and adjust it's priority so that it is
31429 more likely to stay there */
31430 tmp = ready[pos];
31431 for (i=pos; i<*pn_ready-1; i++)
31432 ready[i] = ready[i + 1];
31433 ready[*pn_ready-1] = tmp;
31434
31435 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31436 INSN_PRIORITY (tmp)++;
31437
31438 first_store_pos = -1;
31439
31440 break;
31441 };
31442 }
31443 pos--;
31444 }
31445
31446 if (first_store_pos >= 0)
31447 {
31448 /* An adjacent store wasn't found, but a non-adjacent store was,
31449 so move the non-adjacent store to the front of the ready
31450 list, and adjust its priority so that it is more likely to
31451 stay there. */
31452 tmp = ready[first_store_pos];
31453 for (i=first_store_pos; i<*pn_ready-1; i++)
31454 ready[i] = ready[i + 1];
31455 ready[*pn_ready-1] = tmp;
31456 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31457 INSN_PRIORITY (tmp)++;
31458 }
31459 }
31460 else if (load_store_pendulum == 2)
31461 {
31462 /* Two loads have been issued in this cycle. Increase the priority
31463 of the first store in the ready list to favor it for issuing in
31464 the next cycle. */
31465 pos = *pn_ready-1;
31466
31467 while (pos >= 0)
31468 {
31469 if (is_store_insn (ready[pos], &str_mem)
31470 && !sel_sched_p ()
31471 && INSN_PRIORITY_KNOWN (ready[pos]))
31472 {
31473 INSN_PRIORITY (ready[pos])++;
31474
31475 /* Adjust the pendulum to account for the fact that a store
31476 was found and increased in priority. This is to prevent
31477 increasing the priority of multiple stores */
31478 load_store_pendulum++;
31479
31480 break;
31481 }
31482 pos--;
31483 }
31484 }
31485 }
31486
31487 /* Do Power9 dependent reordering if necessary. */
31488 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31489 && recog_memoized (last_scheduled_insn) >= 0)
31490 return power9_sched_reorder2 (ready, *pn_ready - 1);
31491
31492 return cached_can_issue_more;
31493 }
31494
31495 /* Return whether the presence of INSN causes a dispatch group termination
31496 of group WHICH_GROUP.
31497
31498 If WHICH_GROUP == current_group, this function will return true if INSN
31499 causes the termination of the current group (i.e, the dispatch group to
31500 which INSN belongs). This means that INSN will be the last insn in the
31501 group it belongs to.
31502
31503 If WHICH_GROUP == previous_group, this function will return true if INSN
31504 causes the termination of the previous group (i.e, the dispatch group that
31505 precedes the group to which INSN belongs). This means that INSN will be
31506 the first insn in the group it belongs to). */
31507
31508 static bool
31509 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31510 {
31511 bool first, last;
31512
31513 if (! insn)
31514 return false;
31515
31516 first = insn_must_be_first_in_group (insn);
31517 last = insn_must_be_last_in_group (insn);
31518
31519 if (first && last)
31520 return true;
31521
31522 if (which_group == current_group)
31523 return last;
31524 else if (which_group == previous_group)
31525 return first;
31526
31527 return false;
31528 }
31529
31530
31531 static bool
31532 insn_must_be_first_in_group (rtx_insn *insn)
31533 {
31534 enum attr_type type;
31535
31536 if (!insn
31537 || NOTE_P (insn)
31538 || DEBUG_INSN_P (insn)
31539 || GET_CODE (PATTERN (insn)) == USE
31540 || GET_CODE (PATTERN (insn)) == CLOBBER)
31541 return false;
31542
31543 switch (rs6000_tune)
31544 {
31545 case PROCESSOR_POWER5:
31546 if (is_cracked_insn (insn))
31547 return true;
31548 /* FALLTHRU */
31549 case PROCESSOR_POWER4:
31550 if (is_microcoded_insn (insn))
31551 return true;
31552
31553 if (!rs6000_sched_groups)
31554 return false;
31555
31556 type = get_attr_type (insn);
31557
31558 switch (type)
31559 {
31560 case TYPE_MFCR:
31561 case TYPE_MFCRF:
31562 case TYPE_MTCR:
31563 case TYPE_CR_LOGICAL:
31564 case TYPE_MTJMPR:
31565 case TYPE_MFJMPR:
31566 case TYPE_DIV:
31567 case TYPE_LOAD_L:
31568 case TYPE_STORE_C:
31569 case TYPE_ISYNC:
31570 case TYPE_SYNC:
31571 return true;
31572 default:
31573 break;
31574 }
31575 break;
31576 case PROCESSOR_POWER6:
31577 type = get_attr_type (insn);
31578
31579 switch (type)
31580 {
31581 case TYPE_EXTS:
31582 case TYPE_CNTLZ:
31583 case TYPE_TRAP:
31584 case TYPE_MUL:
31585 case TYPE_INSERT:
31586 case TYPE_FPCOMPARE:
31587 case TYPE_MFCR:
31588 case TYPE_MTCR:
31589 case TYPE_MFJMPR:
31590 case TYPE_MTJMPR:
31591 case TYPE_ISYNC:
31592 case TYPE_SYNC:
31593 case TYPE_LOAD_L:
31594 case TYPE_STORE_C:
31595 return true;
31596 case TYPE_SHIFT:
31597 if (get_attr_dot (insn) == DOT_NO
31598 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31599 return true;
31600 else
31601 break;
31602 case TYPE_DIV:
31603 if (get_attr_size (insn) == SIZE_32)
31604 return true;
31605 else
31606 break;
31607 case TYPE_LOAD:
31608 case TYPE_STORE:
31609 case TYPE_FPLOAD:
31610 case TYPE_FPSTORE:
31611 if (get_attr_update (insn) == UPDATE_YES)
31612 return true;
31613 else
31614 break;
31615 default:
31616 break;
31617 }
31618 break;
31619 case PROCESSOR_POWER7:
31620 type = get_attr_type (insn);
31621
31622 switch (type)
31623 {
31624 case TYPE_CR_LOGICAL:
31625 case TYPE_MFCR:
31626 case TYPE_MFCRF:
31627 case TYPE_MTCR:
31628 case TYPE_DIV:
31629 case TYPE_ISYNC:
31630 case TYPE_LOAD_L:
31631 case TYPE_STORE_C:
31632 case TYPE_MFJMPR:
31633 case TYPE_MTJMPR:
31634 return true;
31635 case TYPE_MUL:
31636 case TYPE_SHIFT:
31637 case TYPE_EXTS:
31638 if (get_attr_dot (insn) == DOT_YES)
31639 return true;
31640 else
31641 break;
31642 case TYPE_LOAD:
31643 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31644 || get_attr_update (insn) == UPDATE_YES)
31645 return true;
31646 else
31647 break;
31648 case TYPE_STORE:
31649 case TYPE_FPLOAD:
31650 case TYPE_FPSTORE:
31651 if (get_attr_update (insn) == UPDATE_YES)
31652 return true;
31653 else
31654 break;
31655 default:
31656 break;
31657 }
31658 break;
31659 case PROCESSOR_POWER8:
31660 type = get_attr_type (insn);
31661
31662 switch (type)
31663 {
31664 case TYPE_CR_LOGICAL:
31665 case TYPE_MFCR:
31666 case TYPE_MFCRF:
31667 case TYPE_MTCR:
31668 case TYPE_SYNC:
31669 case TYPE_ISYNC:
31670 case TYPE_LOAD_L:
31671 case TYPE_STORE_C:
31672 case TYPE_VECSTORE:
31673 case TYPE_MFJMPR:
31674 case TYPE_MTJMPR:
31675 return true;
31676 case TYPE_SHIFT:
31677 case TYPE_EXTS:
31678 case TYPE_MUL:
31679 if (get_attr_dot (insn) == DOT_YES)
31680 return true;
31681 else
31682 break;
31683 case TYPE_LOAD:
31684 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31685 || get_attr_update (insn) == UPDATE_YES)
31686 return true;
31687 else
31688 break;
31689 case TYPE_STORE:
31690 if (get_attr_update (insn) == UPDATE_YES
31691 && get_attr_indexed (insn) == INDEXED_YES)
31692 return true;
31693 else
31694 break;
31695 default:
31696 break;
31697 }
31698 break;
31699 default:
31700 break;
31701 }
31702
31703 return false;
31704 }
31705
31706 static bool
31707 insn_must_be_last_in_group (rtx_insn *insn)
31708 {
31709 enum attr_type type;
31710
31711 if (!insn
31712 || NOTE_P (insn)
31713 || DEBUG_INSN_P (insn)
31714 || GET_CODE (PATTERN (insn)) == USE
31715 || GET_CODE (PATTERN (insn)) == CLOBBER)
31716 return false;
31717
31718 switch (rs6000_tune) {
31719 case PROCESSOR_POWER4:
31720 case PROCESSOR_POWER5:
31721 if (is_microcoded_insn (insn))
31722 return true;
31723
31724 if (is_branch_slot_insn (insn))
31725 return true;
31726
31727 break;
31728 case PROCESSOR_POWER6:
31729 type = get_attr_type (insn);
31730
31731 switch (type)
31732 {
31733 case TYPE_EXTS:
31734 case TYPE_CNTLZ:
31735 case TYPE_TRAP:
31736 case TYPE_MUL:
31737 case TYPE_FPCOMPARE:
31738 case TYPE_MFCR:
31739 case TYPE_MTCR:
31740 case TYPE_MFJMPR:
31741 case TYPE_MTJMPR:
31742 case TYPE_ISYNC:
31743 case TYPE_SYNC:
31744 case TYPE_LOAD_L:
31745 case TYPE_STORE_C:
31746 return true;
31747 case TYPE_SHIFT:
31748 if (get_attr_dot (insn) == DOT_NO
31749 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31750 return true;
31751 else
31752 break;
31753 case TYPE_DIV:
31754 if (get_attr_size (insn) == SIZE_32)
31755 return true;
31756 else
31757 break;
31758 default:
31759 break;
31760 }
31761 break;
31762 case PROCESSOR_POWER7:
31763 type = get_attr_type (insn);
31764
31765 switch (type)
31766 {
31767 case TYPE_ISYNC:
31768 case TYPE_SYNC:
31769 case TYPE_LOAD_L:
31770 case TYPE_STORE_C:
31771 return true;
31772 case TYPE_LOAD:
31773 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31774 && get_attr_update (insn) == UPDATE_YES)
31775 return true;
31776 else
31777 break;
31778 case TYPE_STORE:
31779 if (get_attr_update (insn) == UPDATE_YES
31780 && get_attr_indexed (insn) == INDEXED_YES)
31781 return true;
31782 else
31783 break;
31784 default:
31785 break;
31786 }
31787 break;
31788 case PROCESSOR_POWER8:
31789 type = get_attr_type (insn);
31790
31791 switch (type)
31792 {
31793 case TYPE_MFCR:
31794 case TYPE_MTCR:
31795 case TYPE_ISYNC:
31796 case TYPE_SYNC:
31797 case TYPE_LOAD_L:
31798 case TYPE_STORE_C:
31799 return true;
31800 case TYPE_LOAD:
31801 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31802 && get_attr_update (insn) == UPDATE_YES)
31803 return true;
31804 else
31805 break;
31806 case TYPE_STORE:
31807 if (get_attr_update (insn) == UPDATE_YES
31808 && get_attr_indexed (insn) == INDEXED_YES)
31809 return true;
31810 else
31811 break;
31812 default:
31813 break;
31814 }
31815 break;
31816 default:
31817 break;
31818 }
31819
31820 return false;
31821 }
31822
31823 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31824 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31825
31826 static bool
31827 is_costly_group (rtx *group_insns, rtx next_insn)
31828 {
31829 int i;
31830 int issue_rate = rs6000_issue_rate ();
31831
31832 for (i = 0; i < issue_rate; i++)
31833 {
31834 sd_iterator_def sd_it;
31835 dep_t dep;
31836 rtx insn = group_insns[i];
31837
31838 if (!insn)
31839 continue;
31840
31841 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31842 {
31843 rtx next = DEP_CON (dep);
31844
31845 if (next == next_insn
31846 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31847 return true;
31848 }
31849 }
31850
31851 return false;
31852 }
31853
31854 /* Utility of the function redefine_groups.
31855 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31856 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31857 to keep it "far" (in a separate group) from GROUP_INSNS, following
31858 one of the following schemes, depending on the value of the flag
31859 -minsert_sched_nops = X:
31860 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31861 in order to force NEXT_INSN into a separate group.
31862 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31863 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31864 insertion (has a group just ended, how many vacant issue slots remain in the
31865 last group, and how many dispatch groups were encountered so far). */
31866
31867 static int
31868 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31869 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31870 int *group_count)
31871 {
31872 rtx nop;
31873 bool force;
31874 int issue_rate = rs6000_issue_rate ();
31875 bool end = *group_end;
31876 int i;
31877
31878 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31879 return can_issue_more;
31880
31881 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31882 return can_issue_more;
31883
31884 force = is_costly_group (group_insns, next_insn);
31885 if (!force)
31886 return can_issue_more;
31887
31888 if (sched_verbose > 6)
31889 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31890 *group_count ,can_issue_more);
31891
31892 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31893 {
31894 if (*group_end)
31895 can_issue_more = 0;
31896
31897 /* Since only a branch can be issued in the last issue_slot, it is
31898 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31899 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31900 in this case the last nop will start a new group and the branch
31901 will be forced to the new group. */
31902 if (can_issue_more && !is_branch_slot_insn (next_insn))
31903 can_issue_more--;
31904
31905 /* Do we have a special group ending nop? */
31906 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
31907 || rs6000_tune == PROCESSOR_POWER8)
31908 {
31909 nop = gen_group_ending_nop ();
31910 emit_insn_before (nop, next_insn);
31911 can_issue_more = 0;
31912 }
31913 else
31914 while (can_issue_more > 0)
31915 {
31916 nop = gen_nop ();
31917 emit_insn_before (nop, next_insn);
31918 can_issue_more--;
31919 }
31920
31921 *group_end = true;
31922 return 0;
31923 }
31924
31925 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31926 {
31927 int n_nops = rs6000_sched_insert_nops;
31928
31929 /* Nops can't be issued from the branch slot, so the effective
31930 issue_rate for nops is 'issue_rate - 1'. */
31931 if (can_issue_more == 0)
31932 can_issue_more = issue_rate;
31933 can_issue_more--;
31934 if (can_issue_more == 0)
31935 {
31936 can_issue_more = issue_rate - 1;
31937 (*group_count)++;
31938 end = true;
31939 for (i = 0; i < issue_rate; i++)
31940 {
31941 group_insns[i] = 0;
31942 }
31943 }
31944
31945 while (n_nops > 0)
31946 {
31947 nop = gen_nop ();
31948 emit_insn_before (nop, next_insn);
31949 if (can_issue_more == issue_rate - 1) /* new group begins */
31950 end = false;
31951 can_issue_more--;
31952 if (can_issue_more == 0)
31953 {
31954 can_issue_more = issue_rate - 1;
31955 (*group_count)++;
31956 end = true;
31957 for (i = 0; i < issue_rate; i++)
31958 {
31959 group_insns[i] = 0;
31960 }
31961 }
31962 n_nops--;
31963 }
31964
31965 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31966 can_issue_more++;
31967
31968 /* Is next_insn going to start a new group? */
31969 *group_end
31970 = (end
31971 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31972 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31973 || (can_issue_more < issue_rate &&
31974 insn_terminates_group_p (next_insn, previous_group)));
31975 if (*group_end && end)
31976 (*group_count)--;
31977
31978 if (sched_verbose > 6)
31979 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31980 *group_count, can_issue_more);
31981 return can_issue_more;
31982 }
31983
31984 return can_issue_more;
31985 }
31986
31987 /* This function tries to synch the dispatch groups that the compiler "sees"
31988 with the dispatch groups that the processor dispatcher is expected to
31989 form in practice. It tries to achieve this synchronization by forcing the
31990 estimated processor grouping on the compiler (as opposed to the function
31991 'pad_goups' which tries to force the scheduler's grouping on the processor).
31992
31993 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31994 examines the (estimated) dispatch groups that will be formed by the processor
31995 dispatcher. It marks these group boundaries to reflect the estimated
31996 processor grouping, overriding the grouping that the scheduler had marked.
31997 Depending on the value of the flag '-minsert-sched-nops' this function can
31998 force certain insns into separate groups or force a certain distance between
31999 them by inserting nops, for example, if there exists a "costly dependence"
32000 between the insns.
32001
32002 The function estimates the group boundaries that the processor will form as
32003 follows: It keeps track of how many vacant issue slots are available after
32004 each insn. A subsequent insn will start a new group if one of the following
32005 4 cases applies:
32006 - no more vacant issue slots remain in the current dispatch group.
32007 - only the last issue slot, which is the branch slot, is vacant, but the next
32008 insn is not a branch.
32009 - only the last 2 or less issue slots, including the branch slot, are vacant,
32010 which means that a cracked insn (which occupies two issue slots) can't be
32011 issued in this group.
32012 - less than 'issue_rate' slots are vacant, and the next insn always needs to
32013 start a new group. */
32014
32015 static int
32016 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32017 rtx_insn *tail)
32018 {
32019 rtx_insn *insn, *next_insn;
32020 int issue_rate;
32021 int can_issue_more;
32022 int slot, i;
32023 bool group_end;
32024 int group_count = 0;
32025 rtx *group_insns;
32026
32027 /* Initialize. */
32028 issue_rate = rs6000_issue_rate ();
32029 group_insns = XALLOCAVEC (rtx, issue_rate);
32030 for (i = 0; i < issue_rate; i++)
32031 {
32032 group_insns[i] = 0;
32033 }
32034 can_issue_more = issue_rate;
32035 slot = 0;
32036 insn = get_next_active_insn (prev_head_insn, tail);
32037 group_end = false;
32038
32039 while (insn != NULL_RTX)
32040 {
32041 slot = (issue_rate - can_issue_more);
32042 group_insns[slot] = insn;
32043 can_issue_more =
32044 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32045 if (insn_terminates_group_p (insn, current_group))
32046 can_issue_more = 0;
32047
32048 next_insn = get_next_active_insn (insn, tail);
32049 if (next_insn == NULL_RTX)
32050 return group_count + 1;
32051
32052 /* Is next_insn going to start a new group? */
32053 group_end
32054 = (can_issue_more == 0
32055 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32056 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32057 || (can_issue_more < issue_rate &&
32058 insn_terminates_group_p (next_insn, previous_group)));
32059
32060 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32061 next_insn, &group_end, can_issue_more,
32062 &group_count);
32063
32064 if (group_end)
32065 {
32066 group_count++;
32067 can_issue_more = 0;
32068 for (i = 0; i < issue_rate; i++)
32069 {
32070 group_insns[i] = 0;
32071 }
32072 }
32073
32074 if (GET_MODE (next_insn) == TImode && can_issue_more)
32075 PUT_MODE (next_insn, VOIDmode);
32076 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32077 PUT_MODE (next_insn, TImode);
32078
32079 insn = next_insn;
32080 if (can_issue_more == 0)
32081 can_issue_more = issue_rate;
32082 } /* while */
32083
32084 return group_count;
32085 }
32086
32087 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32088 dispatch group boundaries that the scheduler had marked. Pad with nops
32089 any dispatch groups which have vacant issue slots, in order to force the
32090 scheduler's grouping on the processor dispatcher. The function
32091 returns the number of dispatch groups found. */
32092
32093 static int
32094 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32095 rtx_insn *tail)
32096 {
32097 rtx_insn *insn, *next_insn;
32098 rtx nop;
32099 int issue_rate;
32100 int can_issue_more;
32101 int group_end;
32102 int group_count = 0;
32103
32104 /* Initialize issue_rate. */
32105 issue_rate = rs6000_issue_rate ();
32106 can_issue_more = issue_rate;
32107
32108 insn = get_next_active_insn (prev_head_insn, tail);
32109 next_insn = get_next_active_insn (insn, tail);
32110
32111 while (insn != NULL_RTX)
32112 {
32113 can_issue_more =
32114 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32115
32116 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32117
32118 if (next_insn == NULL_RTX)
32119 break;
32120
32121 if (group_end)
32122 {
32123 /* If the scheduler had marked group termination at this location
32124 (between insn and next_insn), and neither insn nor next_insn will
32125 force group termination, pad the group with nops to force group
32126 termination. */
32127 if (can_issue_more
32128 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32129 && !insn_terminates_group_p (insn, current_group)
32130 && !insn_terminates_group_p (next_insn, previous_group))
32131 {
32132 if (!is_branch_slot_insn (next_insn))
32133 can_issue_more--;
32134
32135 while (can_issue_more)
32136 {
32137 nop = gen_nop ();
32138 emit_insn_before (nop, next_insn);
32139 can_issue_more--;
32140 }
32141 }
32142
32143 can_issue_more = issue_rate;
32144 group_count++;
32145 }
32146
32147 insn = next_insn;
32148 next_insn = get_next_active_insn (insn, tail);
32149 }
32150
32151 return group_count;
32152 }
32153
32154 /* We're beginning a new block. Initialize data structures as necessary. */
32155
32156 static void
32157 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32158 int sched_verbose ATTRIBUTE_UNUSED,
32159 int max_ready ATTRIBUTE_UNUSED)
32160 {
32161 last_scheduled_insn = NULL;
32162 load_store_pendulum = 0;
32163 divide_cnt = 0;
32164 vec_pairing = 0;
32165 }
32166
32167 /* The following function is called at the end of scheduling BB.
32168 After reload, it inserts nops at insn group bundling. */
32169
32170 static void
32171 rs6000_sched_finish (FILE *dump, int sched_verbose)
32172 {
32173 int n_groups;
32174
32175 if (sched_verbose)
32176 fprintf (dump, "=== Finishing schedule.\n");
32177
32178 if (reload_completed && rs6000_sched_groups)
32179 {
32180 /* Do not run sched_finish hook when selective scheduling enabled. */
32181 if (sel_sched_p ())
32182 return;
32183
32184 if (rs6000_sched_insert_nops == sched_finish_none)
32185 return;
32186
32187 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32188 n_groups = pad_groups (dump, sched_verbose,
32189 current_sched_info->prev_head,
32190 current_sched_info->next_tail);
32191 else
32192 n_groups = redefine_groups (dump, sched_verbose,
32193 current_sched_info->prev_head,
32194 current_sched_info->next_tail);
32195
32196 if (sched_verbose >= 6)
32197 {
32198 fprintf (dump, "ngroups = %d\n", n_groups);
32199 print_rtl (dump, current_sched_info->prev_head);
32200 fprintf (dump, "Done finish_sched\n");
32201 }
32202 }
32203 }
32204
32205 struct rs6000_sched_context
32206 {
32207 short cached_can_issue_more;
32208 rtx_insn *last_scheduled_insn;
32209 int load_store_pendulum;
32210 int divide_cnt;
32211 int vec_pairing;
32212 };
32213
32214 typedef struct rs6000_sched_context rs6000_sched_context_def;
32215 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32216
32217 /* Allocate store for new scheduling context. */
32218 static void *
32219 rs6000_alloc_sched_context (void)
32220 {
32221 return xmalloc (sizeof (rs6000_sched_context_def));
32222 }
32223
32224 /* If CLEAN_P is true then initializes _SC with clean data,
32225 and from the global context otherwise. */
32226 static void
32227 rs6000_init_sched_context (void *_sc, bool clean_p)
32228 {
32229 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32230
32231 if (clean_p)
32232 {
32233 sc->cached_can_issue_more = 0;
32234 sc->last_scheduled_insn = NULL;
32235 sc->load_store_pendulum = 0;
32236 sc->divide_cnt = 0;
32237 sc->vec_pairing = 0;
32238 }
32239 else
32240 {
32241 sc->cached_can_issue_more = cached_can_issue_more;
32242 sc->last_scheduled_insn = last_scheduled_insn;
32243 sc->load_store_pendulum = load_store_pendulum;
32244 sc->divide_cnt = divide_cnt;
32245 sc->vec_pairing = vec_pairing;
32246 }
32247 }
32248
32249 /* Sets the global scheduling context to the one pointed to by _SC. */
32250 static void
32251 rs6000_set_sched_context (void *_sc)
32252 {
32253 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32254
32255 gcc_assert (sc != NULL);
32256
32257 cached_can_issue_more = sc->cached_can_issue_more;
32258 last_scheduled_insn = sc->last_scheduled_insn;
32259 load_store_pendulum = sc->load_store_pendulum;
32260 divide_cnt = sc->divide_cnt;
32261 vec_pairing = sc->vec_pairing;
32262 }
32263
32264 /* Free _SC. */
32265 static void
32266 rs6000_free_sched_context (void *_sc)
32267 {
32268 gcc_assert (_sc != NULL);
32269
32270 free (_sc);
32271 }
32272
32273 static bool
32274 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32275 {
32276 switch (get_attr_type (insn))
32277 {
32278 case TYPE_DIV:
32279 case TYPE_SDIV:
32280 case TYPE_DDIV:
32281 case TYPE_VECDIV:
32282 case TYPE_SSQRT:
32283 case TYPE_DSQRT:
32284 return false;
32285
32286 default:
32287 return true;
32288 }
32289 }
32290 \f
32291 /* Length in units of the trampoline for entering a nested function. */
32292
32293 int
32294 rs6000_trampoline_size (void)
32295 {
32296 int ret = 0;
32297
32298 switch (DEFAULT_ABI)
32299 {
32300 default:
32301 gcc_unreachable ();
32302
32303 case ABI_AIX:
32304 ret = (TARGET_32BIT) ? 12 : 24;
32305 break;
32306
32307 case ABI_ELFv2:
32308 gcc_assert (!TARGET_32BIT);
32309 ret = 32;
32310 break;
32311
32312 case ABI_DARWIN:
32313 case ABI_V4:
32314 ret = (TARGET_32BIT) ? 40 : 48;
32315 break;
32316 }
32317
32318 return ret;
32319 }
32320
32321 /* Emit RTL insns to initialize the variable parts of a trampoline.
32322 FNADDR is an RTX for the address of the function's pure code.
32323 CXT is an RTX for the static chain value for the function. */
32324
32325 static void
32326 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32327 {
32328 int regsize = (TARGET_32BIT) ? 4 : 8;
32329 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32330 rtx ctx_reg = force_reg (Pmode, cxt);
32331 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32332
32333 switch (DEFAULT_ABI)
32334 {
32335 default:
32336 gcc_unreachable ();
32337
32338 /* Under AIX, just build the 3 word function descriptor */
32339 case ABI_AIX:
32340 {
32341 rtx fnmem, fn_reg, toc_reg;
32342
32343 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32344 error ("you cannot take the address of a nested function if you use "
32345 "the %qs option", "-mno-pointers-to-nested-functions");
32346
32347 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32348 fn_reg = gen_reg_rtx (Pmode);
32349 toc_reg = gen_reg_rtx (Pmode);
32350
32351 /* Macro to shorten the code expansions below. */
32352 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32353
32354 m_tramp = replace_equiv_address (m_tramp, addr);
32355
32356 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32357 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32358 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32359 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32360 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32361
32362 # undef MEM_PLUS
32363 }
32364 break;
32365
32366 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32367 case ABI_ELFv2:
32368 case ABI_DARWIN:
32369 case ABI_V4:
32370 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32371 LCT_NORMAL, VOIDmode,
32372 addr, Pmode,
32373 GEN_INT (rs6000_trampoline_size ()), SImode,
32374 fnaddr, Pmode,
32375 ctx_reg, Pmode);
32376 break;
32377 }
32378 }
32379
32380 \f
32381 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32382 identifier as an argument, so the front end shouldn't look it up. */
32383
32384 static bool
32385 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32386 {
32387 return is_attribute_p ("altivec", attr_id);
32388 }
32389
32390 /* Handle the "altivec" attribute. The attribute may have
32391 arguments as follows:
32392
32393 __attribute__((altivec(vector__)))
32394 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32395 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32396
32397 and may appear more than once (e.g., 'vector bool char') in a
32398 given declaration. */
32399
32400 static tree
32401 rs6000_handle_altivec_attribute (tree *node,
32402 tree name ATTRIBUTE_UNUSED,
32403 tree args,
32404 int flags ATTRIBUTE_UNUSED,
32405 bool *no_add_attrs)
32406 {
32407 tree type = *node, result = NULL_TREE;
32408 machine_mode mode;
32409 int unsigned_p;
32410 char altivec_type
32411 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32412 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32413 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32414 : '?');
32415
32416 while (POINTER_TYPE_P (type)
32417 || TREE_CODE (type) == FUNCTION_TYPE
32418 || TREE_CODE (type) == METHOD_TYPE
32419 || TREE_CODE (type) == ARRAY_TYPE)
32420 type = TREE_TYPE (type);
32421
32422 mode = TYPE_MODE (type);
32423
32424 /* Check for invalid AltiVec type qualifiers. */
32425 if (type == long_double_type_node)
32426 error ("use of %<long double%> in AltiVec types is invalid");
32427 else if (type == boolean_type_node)
32428 error ("use of boolean types in AltiVec types is invalid");
32429 else if (TREE_CODE (type) == COMPLEX_TYPE)
32430 error ("use of %<complex%> in AltiVec types is invalid");
32431 else if (DECIMAL_FLOAT_MODE_P (mode))
32432 error ("use of decimal floating point types in AltiVec types is invalid");
32433 else if (!TARGET_VSX)
32434 {
32435 if (type == long_unsigned_type_node || type == long_integer_type_node)
32436 {
32437 if (TARGET_64BIT)
32438 error ("use of %<long%> in AltiVec types is invalid for "
32439 "64-bit code without %qs", "-mvsx");
32440 else if (rs6000_warn_altivec_long)
32441 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32442 "use %<int%>");
32443 }
32444 else if (type == long_long_unsigned_type_node
32445 || type == long_long_integer_type_node)
32446 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32447 "-mvsx");
32448 else if (type == double_type_node)
32449 error ("use of %<double%> in AltiVec types is invalid without %qs",
32450 "-mvsx");
32451 }
32452
32453 switch (altivec_type)
32454 {
32455 case 'v':
32456 unsigned_p = TYPE_UNSIGNED (type);
32457 switch (mode)
32458 {
32459 case E_TImode:
32460 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32461 break;
32462 case E_DImode:
32463 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32464 break;
32465 case E_SImode:
32466 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32467 break;
32468 case E_HImode:
32469 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32470 break;
32471 case E_QImode:
32472 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32473 break;
32474 case E_SFmode: result = V4SF_type_node; break;
32475 case E_DFmode: result = V2DF_type_node; break;
32476 /* If the user says 'vector int bool', we may be handed the 'bool'
32477 attribute _before_ the 'vector' attribute, and so select the
32478 proper type in the 'b' case below. */
32479 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32480 case E_V2DImode: case E_V2DFmode:
32481 result = type;
32482 default: break;
32483 }
32484 break;
32485 case 'b':
32486 switch (mode)
32487 {
32488 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32489 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32490 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32491 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32492 default: break;
32493 }
32494 break;
32495 case 'p':
32496 switch (mode)
32497 {
32498 case E_V8HImode: result = pixel_V8HI_type_node;
32499 default: break;
32500 }
32501 default: break;
32502 }
32503
32504 /* Propagate qualifiers attached to the element type
32505 onto the vector type. */
32506 if (result && result != type && TYPE_QUALS (type))
32507 result = build_qualified_type (result, TYPE_QUALS (type));
32508
32509 *no_add_attrs = true; /* No need to hang on to the attribute. */
32510
32511 if (result)
32512 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32513
32514 return NULL_TREE;
32515 }
32516
32517 /* AltiVec defines five built-in scalar types that serve as vector
32518 elements; we must teach the compiler how to mangle them. The 128-bit
32519 floating point mangling is target-specific as well. */
32520
32521 static const char *
32522 rs6000_mangle_type (const_tree type)
32523 {
32524 type = TYPE_MAIN_VARIANT (type);
32525
32526 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32527 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32528 return NULL;
32529
32530 if (type == bool_char_type_node) return "U6__boolc";
32531 if (type == bool_short_type_node) return "U6__bools";
32532 if (type == pixel_type_node) return "u7__pixel";
32533 if (type == bool_int_type_node) return "U6__booli";
32534 if (type == bool_long_long_type_node) return "U6__boolx";
32535
32536 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32537 return "g";
32538 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32539 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32540
32541 /* For all other types, use the default mangling. */
32542 return NULL;
32543 }
32544
32545 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32546 struct attribute_spec.handler. */
32547
32548 static tree
32549 rs6000_handle_longcall_attribute (tree *node, tree name,
32550 tree args ATTRIBUTE_UNUSED,
32551 int flags ATTRIBUTE_UNUSED,
32552 bool *no_add_attrs)
32553 {
32554 if (TREE_CODE (*node) != FUNCTION_TYPE
32555 && TREE_CODE (*node) != FIELD_DECL
32556 && TREE_CODE (*node) != TYPE_DECL)
32557 {
32558 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32559 name);
32560 *no_add_attrs = true;
32561 }
32562
32563 return NULL_TREE;
32564 }
32565
32566 /* Set longcall attributes on all functions declared when
32567 rs6000_default_long_calls is true. */
32568 static void
32569 rs6000_set_default_type_attributes (tree type)
32570 {
32571 if (rs6000_default_long_calls
32572 && (TREE_CODE (type) == FUNCTION_TYPE
32573 || TREE_CODE (type) == METHOD_TYPE))
32574 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32575 NULL_TREE,
32576 TYPE_ATTRIBUTES (type));
32577
32578 #if TARGET_MACHO
32579 darwin_set_default_type_attributes (type);
32580 #endif
32581 }
32582
32583 /* Return a reference suitable for calling a function with the
32584 longcall attribute. */
32585
32586 static rtx
32587 rs6000_longcall_ref (rtx call_ref, rtx arg)
32588 {
32589 /* System V adds '.' to the internal name, so skip them. */
32590 const char *call_name = XSTR (call_ref, 0);
32591 if (*call_name == '.')
32592 {
32593 while (*call_name == '.')
32594 call_name++;
32595
32596 tree node = get_identifier (call_name);
32597 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32598 }
32599
32600 if (TARGET_PLTSEQ)
32601 {
32602 rtx base = const0_rtx;
32603 int regno;
32604 if (DEFAULT_ABI == ABI_ELFv2)
32605 {
32606 base = gen_rtx_REG (Pmode, TOC_REGISTER);
32607 regno = 12;
32608 }
32609 else
32610 {
32611 if (flag_pic)
32612 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32613 regno = 11;
32614 }
32615 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
32616 may be used by a function global entry point. For SysV4, r11
32617 is used by __glink_PLTresolve lazy resolver entry. */
32618 rtx reg = gen_rtx_REG (Pmode, regno);
32619 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
32620 UNSPEC_PLT16_HA);
32621 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
32622 UNSPEC_PLT16_LO);
32623 emit_insn (gen_rtx_SET (reg, hi));
32624 emit_insn (gen_rtx_SET (reg, lo));
32625 return reg;
32626 }
32627
32628 return force_reg (Pmode, call_ref);
32629 }
32630 \f
32631 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32632 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32633 #endif
32634
32635 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32636 struct attribute_spec.handler. */
32637 static tree
32638 rs6000_handle_struct_attribute (tree *node, tree name,
32639 tree args ATTRIBUTE_UNUSED,
32640 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32641 {
32642 tree *type = NULL;
32643 if (DECL_P (*node))
32644 {
32645 if (TREE_CODE (*node) == TYPE_DECL)
32646 type = &TREE_TYPE (*node);
32647 }
32648 else
32649 type = node;
32650
32651 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32652 || TREE_CODE (*type) == UNION_TYPE)))
32653 {
32654 warning (OPT_Wattributes, "%qE attribute ignored", name);
32655 *no_add_attrs = true;
32656 }
32657
32658 else if ((is_attribute_p ("ms_struct", name)
32659 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32660 || ((is_attribute_p ("gcc_struct", name)
32661 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32662 {
32663 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32664 name);
32665 *no_add_attrs = true;
32666 }
32667
32668 return NULL_TREE;
32669 }
32670
32671 static bool
32672 rs6000_ms_bitfield_layout_p (const_tree record_type)
32673 {
32674 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32675 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32676 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32677 }
32678 \f
32679 #ifdef USING_ELFOS_H
32680
32681 /* A get_unnamed_section callback, used for switching to toc_section. */
32682
32683 static void
32684 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32685 {
32686 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32687 && TARGET_MINIMAL_TOC)
32688 {
32689 if (!toc_initialized)
32690 {
32691 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32692 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32693 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32694 fprintf (asm_out_file, "\t.tc ");
32695 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32696 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32697 fprintf (asm_out_file, "\n");
32698
32699 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32700 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32701 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32702 fprintf (asm_out_file, " = .+32768\n");
32703 toc_initialized = 1;
32704 }
32705 else
32706 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32707 }
32708 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32709 {
32710 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32711 if (!toc_initialized)
32712 {
32713 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32714 toc_initialized = 1;
32715 }
32716 }
32717 else
32718 {
32719 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32720 if (!toc_initialized)
32721 {
32722 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32723 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32724 fprintf (asm_out_file, " = .+32768\n");
32725 toc_initialized = 1;
32726 }
32727 }
32728 }
32729
32730 /* Implement TARGET_ASM_INIT_SECTIONS. */
32731
32732 static void
32733 rs6000_elf_asm_init_sections (void)
32734 {
32735 toc_section
32736 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32737
32738 sdata2_section
32739 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32740 SDATA2_SECTION_ASM_OP);
32741 }
32742
32743 /* Implement TARGET_SELECT_RTX_SECTION. */
32744
32745 static section *
32746 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32747 unsigned HOST_WIDE_INT align)
32748 {
32749 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32750 return toc_section;
32751 else
32752 return default_elf_select_rtx_section (mode, x, align);
32753 }
32754 \f
32755 /* For a SYMBOL_REF, set generic flags and then perform some
32756 target-specific processing.
32757
32758 When the AIX ABI is requested on a non-AIX system, replace the
32759 function name with the real name (with a leading .) rather than the
32760 function descriptor name. This saves a lot of overriding code to
32761 read the prefixes. */
32762
32763 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32764 static void
32765 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32766 {
32767 default_encode_section_info (decl, rtl, first);
32768
32769 if (first
32770 && TREE_CODE (decl) == FUNCTION_DECL
32771 && !TARGET_AIX
32772 && DEFAULT_ABI == ABI_AIX)
32773 {
32774 rtx sym_ref = XEXP (rtl, 0);
32775 size_t len = strlen (XSTR (sym_ref, 0));
32776 char *str = XALLOCAVEC (char, len + 2);
32777 str[0] = '.';
32778 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32779 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32780 }
32781 }
32782
32783 static inline bool
32784 compare_section_name (const char *section, const char *templ)
32785 {
32786 int len;
32787
32788 len = strlen (templ);
32789 return (strncmp (section, templ, len) == 0
32790 && (section[len] == 0 || section[len] == '.'));
32791 }
32792
32793 bool
32794 rs6000_elf_in_small_data_p (const_tree decl)
32795 {
32796 if (rs6000_sdata == SDATA_NONE)
32797 return false;
32798
32799 /* We want to merge strings, so we never consider them small data. */
32800 if (TREE_CODE (decl) == STRING_CST)
32801 return false;
32802
32803 /* Functions are never in the small data area. */
32804 if (TREE_CODE (decl) == FUNCTION_DECL)
32805 return false;
32806
32807 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32808 {
32809 const char *section = DECL_SECTION_NAME (decl);
32810 if (compare_section_name (section, ".sdata")
32811 || compare_section_name (section, ".sdata2")
32812 || compare_section_name (section, ".gnu.linkonce.s")
32813 || compare_section_name (section, ".sbss")
32814 || compare_section_name (section, ".sbss2")
32815 || compare_section_name (section, ".gnu.linkonce.sb")
32816 || strcmp (section, ".PPC.EMB.sdata0") == 0
32817 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32818 return true;
32819 }
32820 else
32821 {
32822 /* If we are told not to put readonly data in sdata, then don't. */
32823 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
32824 && !rs6000_readonly_in_sdata)
32825 return false;
32826
32827 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32828
32829 if (size > 0
32830 && size <= g_switch_value
32831 /* If it's not public, and we're not going to reference it there,
32832 there's no need to put it in the small data section. */
32833 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32834 return true;
32835 }
32836
32837 return false;
32838 }
32839
32840 #endif /* USING_ELFOS_H */
32841 \f
32842 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32843
32844 static bool
32845 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32846 {
32847 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32848 }
32849
32850 /* Do not place thread-local symbols refs in the object blocks. */
32851
32852 static bool
32853 rs6000_use_blocks_for_decl_p (const_tree decl)
32854 {
32855 return !DECL_THREAD_LOCAL_P (decl);
32856 }
32857 \f
32858 /* Return a REG that occurs in ADDR with coefficient 1.
32859 ADDR can be effectively incremented by incrementing REG.
32860
32861 r0 is special and we must not select it as an address
32862 register by this routine since our caller will try to
32863 increment the returned register via an "la" instruction. */
32864
32865 rtx
32866 find_addr_reg (rtx addr)
32867 {
32868 while (GET_CODE (addr) == PLUS)
32869 {
32870 if (REG_P (XEXP (addr, 0))
32871 && REGNO (XEXP (addr, 0)) != 0)
32872 addr = XEXP (addr, 0);
32873 else if (REG_P (XEXP (addr, 1))
32874 && REGNO (XEXP (addr, 1)) != 0)
32875 addr = XEXP (addr, 1);
32876 else if (CONSTANT_P (XEXP (addr, 0)))
32877 addr = XEXP (addr, 1);
32878 else if (CONSTANT_P (XEXP (addr, 1)))
32879 addr = XEXP (addr, 0);
32880 else
32881 gcc_unreachable ();
32882 }
32883 gcc_assert (REG_P (addr) && REGNO (addr) != 0);
32884 return addr;
32885 }
32886
32887 void
32888 rs6000_fatal_bad_address (rtx op)
32889 {
32890 fatal_insn ("bad address", op);
32891 }
32892
32893 #if TARGET_MACHO
32894
32895 typedef struct branch_island_d {
32896 tree function_name;
32897 tree label_name;
32898 int line_number;
32899 } branch_island;
32900
32901
32902 static vec<branch_island, va_gc> *branch_islands;
32903
32904 /* Remember to generate a branch island for far calls to the given
32905 function. */
32906
32907 static void
32908 add_compiler_branch_island (tree label_name, tree function_name,
32909 int line_number)
32910 {
32911 branch_island bi = {function_name, label_name, line_number};
32912 vec_safe_push (branch_islands, bi);
32913 }
32914
32915 /* Generate far-jump branch islands for everything recorded in
32916 branch_islands. Invoked immediately after the last instruction of
32917 the epilogue has been emitted; the branch islands must be appended
32918 to, and contiguous with, the function body. Mach-O stubs are
32919 generated in machopic_output_stub(). */
32920
32921 static void
32922 macho_branch_islands (void)
32923 {
32924 char tmp_buf[512];
32925
32926 while (!vec_safe_is_empty (branch_islands))
32927 {
32928 branch_island *bi = &branch_islands->last ();
32929 const char *label = IDENTIFIER_POINTER (bi->label_name);
32930 const char *name = IDENTIFIER_POINTER (bi->function_name);
32931 char name_buf[512];
32932 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32933 if (name[0] == '*' || name[0] == '&')
32934 strcpy (name_buf, name+1);
32935 else
32936 {
32937 name_buf[0] = '_';
32938 strcpy (name_buf+1, name);
32939 }
32940 strcpy (tmp_buf, "\n");
32941 strcat (tmp_buf, label);
32942 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32943 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32944 dbxout_stabd (N_SLINE, bi->line_number);
32945 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32946 if (flag_pic)
32947 {
32948 if (TARGET_LINK_STACK)
32949 {
32950 char name[32];
32951 get_ppc476_thunk_name (name);
32952 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32953 strcat (tmp_buf, name);
32954 strcat (tmp_buf, "\n");
32955 strcat (tmp_buf, label);
32956 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32957 }
32958 else
32959 {
32960 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32961 strcat (tmp_buf, label);
32962 strcat (tmp_buf, "_pic\n");
32963 strcat (tmp_buf, label);
32964 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32965 }
32966
32967 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32968 strcat (tmp_buf, name_buf);
32969 strcat (tmp_buf, " - ");
32970 strcat (tmp_buf, label);
32971 strcat (tmp_buf, "_pic)\n");
32972
32973 strcat (tmp_buf, "\tmtlr r0\n");
32974
32975 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32976 strcat (tmp_buf, name_buf);
32977 strcat (tmp_buf, " - ");
32978 strcat (tmp_buf, label);
32979 strcat (tmp_buf, "_pic)\n");
32980
32981 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32982 }
32983 else
32984 {
32985 strcat (tmp_buf, ":\nlis r12,hi16(");
32986 strcat (tmp_buf, name_buf);
32987 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32988 strcat (tmp_buf, name_buf);
32989 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32990 }
32991 output_asm_insn (tmp_buf, 0);
32992 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32993 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32994 dbxout_stabd (N_SLINE, bi->line_number);
32995 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32996 branch_islands->pop ();
32997 }
32998 }
32999
33000 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
33001 already there or not. */
33002
33003 static int
33004 no_previous_def (tree function_name)
33005 {
33006 branch_island *bi;
33007 unsigned ix;
33008
33009 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33010 if (function_name == bi->function_name)
33011 return 0;
33012 return 1;
33013 }
33014
33015 /* GET_PREV_LABEL gets the label name from the previous definition of
33016 the function. */
33017
33018 static tree
33019 get_prev_label (tree function_name)
33020 {
33021 branch_island *bi;
33022 unsigned ix;
33023
33024 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33025 if (function_name == bi->function_name)
33026 return bi->label_name;
33027 return NULL_TREE;
33028 }
33029
33030 /* Generate PIC and indirect symbol stubs. */
33031
33032 void
33033 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33034 {
33035 unsigned int length;
33036 char *symbol_name, *lazy_ptr_name;
33037 char *local_label_0;
33038 static int label = 0;
33039
33040 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33041 symb = (*targetm.strip_name_encoding) (symb);
33042
33043
33044 length = strlen (symb);
33045 symbol_name = XALLOCAVEC (char, length + 32);
33046 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33047
33048 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33049 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33050
33051 if (flag_pic == 2)
33052 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33053 else
33054 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33055
33056 if (flag_pic == 2)
33057 {
33058 fprintf (file, "\t.align 5\n");
33059
33060 fprintf (file, "%s:\n", stub);
33061 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33062
33063 label++;
33064 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
33065 sprintf (local_label_0, "\"L%011d$spb\"", label);
33066
33067 fprintf (file, "\tmflr r0\n");
33068 if (TARGET_LINK_STACK)
33069 {
33070 char name[32];
33071 get_ppc476_thunk_name (name);
33072 fprintf (file, "\tbl %s\n", name);
33073 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33074 }
33075 else
33076 {
33077 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33078 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33079 }
33080 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33081 lazy_ptr_name, local_label_0);
33082 fprintf (file, "\tmtlr r0\n");
33083 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33084 (TARGET_64BIT ? "ldu" : "lwzu"),
33085 lazy_ptr_name, local_label_0);
33086 fprintf (file, "\tmtctr r12\n");
33087 fprintf (file, "\tbctr\n");
33088 }
33089 else
33090 {
33091 fprintf (file, "\t.align 4\n");
33092
33093 fprintf (file, "%s:\n", stub);
33094 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33095
33096 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33097 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33098 (TARGET_64BIT ? "ldu" : "lwzu"),
33099 lazy_ptr_name);
33100 fprintf (file, "\tmtctr r12\n");
33101 fprintf (file, "\tbctr\n");
33102 }
33103
33104 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33105 fprintf (file, "%s:\n", lazy_ptr_name);
33106 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33107 fprintf (file, "%sdyld_stub_binding_helper\n",
33108 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33109 }
33110
33111 /* Legitimize PIC addresses. If the address is already
33112 position-independent, we return ORIG. Newly generated
33113 position-independent addresses go into a reg. This is REG if non
33114 zero, otherwise we allocate register(s) as necessary. */
33115
33116 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33117
33118 rtx
33119 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33120 rtx reg)
33121 {
33122 rtx base, offset;
33123
33124 if (reg == NULL && !reload_completed)
33125 reg = gen_reg_rtx (Pmode);
33126
33127 if (GET_CODE (orig) == CONST)
33128 {
33129 rtx reg_temp;
33130
33131 if (GET_CODE (XEXP (orig, 0)) == PLUS
33132 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33133 return orig;
33134
33135 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33136
33137 /* Use a different reg for the intermediate value, as
33138 it will be marked UNCHANGING. */
33139 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33140 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33141 Pmode, reg_temp);
33142 offset =
33143 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33144 Pmode, reg);
33145
33146 if (CONST_INT_P (offset))
33147 {
33148 if (SMALL_INT (offset))
33149 return plus_constant (Pmode, base, INTVAL (offset));
33150 else if (!reload_completed)
33151 offset = force_reg (Pmode, offset);
33152 else
33153 {
33154 rtx mem = force_const_mem (Pmode, orig);
33155 return machopic_legitimize_pic_address (mem, Pmode, reg);
33156 }
33157 }
33158 return gen_rtx_PLUS (Pmode, base, offset);
33159 }
33160
33161 /* Fall back on generic machopic code. */
33162 return machopic_legitimize_pic_address (orig, mode, reg);
33163 }
33164
33165 /* Output a .machine directive for the Darwin assembler, and call
33166 the generic start_file routine. */
33167
33168 static void
33169 rs6000_darwin_file_start (void)
33170 {
33171 static const struct
33172 {
33173 const char *arg;
33174 const char *name;
33175 HOST_WIDE_INT if_set;
33176 } mapping[] = {
33177 { "ppc64", "ppc64", MASK_64BIT },
33178 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33179 { "power4", "ppc970", 0 },
33180 { "G5", "ppc970", 0 },
33181 { "7450", "ppc7450", 0 },
33182 { "7400", "ppc7400", MASK_ALTIVEC },
33183 { "G4", "ppc7400", 0 },
33184 { "750", "ppc750", 0 },
33185 { "740", "ppc750", 0 },
33186 { "G3", "ppc750", 0 },
33187 { "604e", "ppc604e", 0 },
33188 { "604", "ppc604", 0 },
33189 { "603e", "ppc603", 0 },
33190 { "603", "ppc603", 0 },
33191 { "601", "ppc601", 0 },
33192 { NULL, "ppc", 0 } };
33193 const char *cpu_id = "";
33194 size_t i;
33195
33196 rs6000_file_start ();
33197 darwin_file_start ();
33198
33199 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33200
33201 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33202 cpu_id = rs6000_default_cpu;
33203
33204 if (global_options_set.x_rs6000_cpu_index)
33205 cpu_id = processor_target_table[rs6000_cpu_index].name;
33206
33207 /* Look through the mapping array. Pick the first name that either
33208 matches the argument, has a bit set in IF_SET that is also set
33209 in the target flags, or has a NULL name. */
33210
33211 i = 0;
33212 while (mapping[i].arg != NULL
33213 && strcmp (mapping[i].arg, cpu_id) != 0
33214 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33215 i++;
33216
33217 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33218 }
33219
33220 #endif /* TARGET_MACHO */
33221
33222 #if TARGET_ELF
33223 static int
33224 rs6000_elf_reloc_rw_mask (void)
33225 {
33226 if (flag_pic)
33227 return 3;
33228 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33229 return 2;
33230 else
33231 return 0;
33232 }
33233
33234 /* Record an element in the table of global constructors. SYMBOL is
33235 a SYMBOL_REF of the function to be called; PRIORITY is a number
33236 between 0 and MAX_INIT_PRIORITY.
33237
33238 This differs from default_named_section_asm_out_constructor in
33239 that we have special handling for -mrelocatable. */
33240
33241 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33242 static void
33243 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33244 {
33245 const char *section = ".ctors";
33246 char buf[18];
33247
33248 if (priority != DEFAULT_INIT_PRIORITY)
33249 {
33250 sprintf (buf, ".ctors.%.5u",
33251 /* Invert the numbering so the linker puts us in the proper
33252 order; constructors are run from right to left, and the
33253 linker sorts in increasing order. */
33254 MAX_INIT_PRIORITY - priority);
33255 section = buf;
33256 }
33257
33258 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33259 assemble_align (POINTER_SIZE);
33260
33261 if (DEFAULT_ABI == ABI_V4
33262 && (TARGET_RELOCATABLE || flag_pic > 1))
33263 {
33264 fputs ("\t.long (", asm_out_file);
33265 output_addr_const (asm_out_file, symbol);
33266 fputs (")@fixup\n", asm_out_file);
33267 }
33268 else
33269 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33270 }
33271
33272 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33273 static void
33274 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33275 {
33276 const char *section = ".dtors";
33277 char buf[18];
33278
33279 if (priority != DEFAULT_INIT_PRIORITY)
33280 {
33281 sprintf (buf, ".dtors.%.5u",
33282 /* Invert the numbering so the linker puts us in the proper
33283 order; constructors are run from right to left, and the
33284 linker sorts in increasing order. */
33285 MAX_INIT_PRIORITY - priority);
33286 section = buf;
33287 }
33288
33289 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33290 assemble_align (POINTER_SIZE);
33291
33292 if (DEFAULT_ABI == ABI_V4
33293 && (TARGET_RELOCATABLE || flag_pic > 1))
33294 {
33295 fputs ("\t.long (", asm_out_file);
33296 output_addr_const (asm_out_file, symbol);
33297 fputs (")@fixup\n", asm_out_file);
33298 }
33299 else
33300 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33301 }
33302
33303 void
33304 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33305 {
33306 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33307 {
33308 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33309 ASM_OUTPUT_LABEL (file, name);
33310 fputs (DOUBLE_INT_ASM_OP, file);
33311 rs6000_output_function_entry (file, name);
33312 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33313 if (DOT_SYMBOLS)
33314 {
33315 fputs ("\t.size\t", file);
33316 assemble_name (file, name);
33317 fputs (",24\n\t.type\t.", file);
33318 assemble_name (file, name);
33319 fputs (",@function\n", file);
33320 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33321 {
33322 fputs ("\t.globl\t.", file);
33323 assemble_name (file, name);
33324 putc ('\n', file);
33325 }
33326 }
33327 else
33328 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33329 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33330 rs6000_output_function_entry (file, name);
33331 fputs (":\n", file);
33332 return;
33333 }
33334
33335 int uses_toc;
33336 if (DEFAULT_ABI == ABI_V4
33337 && (TARGET_RELOCATABLE || flag_pic > 1)
33338 && !TARGET_SECURE_PLT
33339 && (!constant_pool_empty_p () || crtl->profile)
33340 && (uses_toc = uses_TOC ()))
33341 {
33342 char buf[256];
33343
33344 if (uses_toc == 2)
33345 switch_to_other_text_partition ();
33346 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33347
33348 fprintf (file, "\t.long ");
33349 assemble_name (file, toc_label_name);
33350 need_toc_init = 1;
33351 putc ('-', file);
33352 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33353 assemble_name (file, buf);
33354 putc ('\n', file);
33355 if (uses_toc == 2)
33356 switch_to_other_text_partition ();
33357 }
33358
33359 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33360 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33361
33362 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33363 {
33364 char buf[256];
33365
33366 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33367
33368 fprintf (file, "\t.quad .TOC.-");
33369 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33370 assemble_name (file, buf);
33371 putc ('\n', file);
33372 }
33373
33374 if (DEFAULT_ABI == ABI_AIX)
33375 {
33376 const char *desc_name, *orig_name;
33377
33378 orig_name = (*targetm.strip_name_encoding) (name);
33379 desc_name = orig_name;
33380 while (*desc_name == '.')
33381 desc_name++;
33382
33383 if (TREE_PUBLIC (decl))
33384 fprintf (file, "\t.globl %s\n", desc_name);
33385
33386 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33387 fprintf (file, "%s:\n", desc_name);
33388 fprintf (file, "\t.long %s\n", orig_name);
33389 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33390 fputs ("\t.long 0\n", file);
33391 fprintf (file, "\t.previous\n");
33392 }
33393 ASM_OUTPUT_LABEL (file, name);
33394 }
33395
33396 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33397 static void
33398 rs6000_elf_file_end (void)
33399 {
33400 #ifdef HAVE_AS_GNU_ATTRIBUTE
33401 /* ??? The value emitted depends on options active at file end.
33402 Assume anyone using #pragma or attributes that might change
33403 options knows what they are doing. */
33404 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33405 && rs6000_passes_float)
33406 {
33407 int fp;
33408
33409 if (TARGET_HARD_FLOAT)
33410 fp = 1;
33411 else
33412 fp = 2;
33413 if (rs6000_passes_long_double)
33414 {
33415 if (!TARGET_LONG_DOUBLE_128)
33416 fp |= 2 * 4;
33417 else if (TARGET_IEEEQUAD)
33418 fp |= 3 * 4;
33419 else
33420 fp |= 1 * 4;
33421 }
33422 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33423 }
33424 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33425 {
33426 if (rs6000_passes_vector)
33427 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33428 (TARGET_ALTIVEC_ABI ? 2 : 1));
33429 if (rs6000_returns_struct)
33430 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33431 aix_struct_return ? 2 : 1);
33432 }
33433 #endif
33434 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33435 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33436 file_end_indicate_exec_stack ();
33437 #endif
33438
33439 if (flag_split_stack)
33440 file_end_indicate_split_stack ();
33441
33442 if (cpu_builtin_p)
33443 {
33444 /* We have expanded a CPU builtin, so we need to emit a reference to
33445 the special symbol that LIBC uses to declare it supports the
33446 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33447 switch_to_section (data_section);
33448 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33449 fprintf (asm_out_file, "\t%s %s\n",
33450 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33451 }
33452 }
33453 #endif
33454
33455 #if TARGET_XCOFF
33456
33457 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33458 #define HAVE_XCOFF_DWARF_EXTRAS 0
33459 #endif
33460
33461 static enum unwind_info_type
33462 rs6000_xcoff_debug_unwind_info (void)
33463 {
33464 return UI_NONE;
33465 }
33466
33467 static void
33468 rs6000_xcoff_asm_output_anchor (rtx symbol)
33469 {
33470 char buffer[100];
33471
33472 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33473 SYMBOL_REF_BLOCK_OFFSET (symbol));
33474 fprintf (asm_out_file, "%s", SET_ASM_OP);
33475 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33476 fprintf (asm_out_file, ",");
33477 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33478 fprintf (asm_out_file, "\n");
33479 }
33480
33481 static void
33482 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33483 {
33484 fputs (GLOBAL_ASM_OP, stream);
33485 RS6000_OUTPUT_BASENAME (stream, name);
33486 putc ('\n', stream);
33487 }
33488
33489 /* A get_unnamed_decl callback, used for read-only sections. PTR
33490 points to the section string variable. */
33491
33492 static void
33493 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33494 {
33495 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33496 *(const char *const *) directive,
33497 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33498 }
33499
33500 /* Likewise for read-write sections. */
33501
33502 static void
33503 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33504 {
33505 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33506 *(const char *const *) directive,
33507 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33508 }
33509
33510 static void
33511 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33512 {
33513 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33514 *(const char *const *) directive,
33515 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33516 }
33517
33518 /* A get_unnamed_section callback, used for switching to toc_section. */
33519
33520 static void
33521 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33522 {
33523 if (TARGET_MINIMAL_TOC)
33524 {
33525 /* toc_section is always selected at least once from
33526 rs6000_xcoff_file_start, so this is guaranteed to
33527 always be defined once and only once in each file. */
33528 if (!toc_initialized)
33529 {
33530 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33531 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33532 toc_initialized = 1;
33533 }
33534 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33535 (TARGET_32BIT ? "" : ",3"));
33536 }
33537 else
33538 fputs ("\t.toc\n", asm_out_file);
33539 }
33540
33541 /* Implement TARGET_ASM_INIT_SECTIONS. */
33542
33543 static void
33544 rs6000_xcoff_asm_init_sections (void)
33545 {
33546 read_only_data_section
33547 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33548 &xcoff_read_only_section_name);
33549
33550 private_data_section
33551 = get_unnamed_section (SECTION_WRITE,
33552 rs6000_xcoff_output_readwrite_section_asm_op,
33553 &xcoff_private_data_section_name);
33554
33555 read_only_private_data_section
33556 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33557 &xcoff_private_rodata_section_name);
33558
33559 tls_data_section
33560 = get_unnamed_section (SECTION_TLS,
33561 rs6000_xcoff_output_tls_section_asm_op,
33562 &xcoff_tls_data_section_name);
33563
33564 tls_private_data_section
33565 = get_unnamed_section (SECTION_TLS,
33566 rs6000_xcoff_output_tls_section_asm_op,
33567 &xcoff_private_data_section_name);
33568
33569 toc_section
33570 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33571
33572 readonly_data_section = read_only_data_section;
33573 }
33574
33575 static int
33576 rs6000_xcoff_reloc_rw_mask (void)
33577 {
33578 return 3;
33579 }
33580
33581 static void
33582 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33583 tree decl ATTRIBUTE_UNUSED)
33584 {
33585 int smclass;
33586 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33587
33588 if (flags & SECTION_EXCLUDE)
33589 smclass = 4;
33590 else if (flags & SECTION_DEBUG)
33591 {
33592 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33593 return;
33594 }
33595 else if (flags & SECTION_CODE)
33596 smclass = 0;
33597 else if (flags & SECTION_TLS)
33598 smclass = 3;
33599 else if (flags & SECTION_WRITE)
33600 smclass = 2;
33601 else
33602 smclass = 1;
33603
33604 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33605 (flags & SECTION_CODE) ? "." : "",
33606 name, suffix[smclass], flags & SECTION_ENTSIZE);
33607 }
33608
33609 #define IN_NAMED_SECTION(DECL) \
33610 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33611 && DECL_SECTION_NAME (DECL) != NULL)
33612
33613 static section *
33614 rs6000_xcoff_select_section (tree decl, int reloc,
33615 unsigned HOST_WIDE_INT align)
33616 {
33617 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33618 named section. */
33619 if (align > BIGGEST_ALIGNMENT)
33620 {
33621 resolve_unique_section (decl, reloc, true);
33622 if (IN_NAMED_SECTION (decl))
33623 return get_named_section (decl, NULL, reloc);
33624 }
33625
33626 if (decl_readonly_section (decl, reloc))
33627 {
33628 if (TREE_PUBLIC (decl))
33629 return read_only_data_section;
33630 else
33631 return read_only_private_data_section;
33632 }
33633 else
33634 {
33635 #if HAVE_AS_TLS
33636 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33637 {
33638 if (TREE_PUBLIC (decl))
33639 return tls_data_section;
33640 else if (bss_initializer_p (decl))
33641 {
33642 /* Convert to COMMON to emit in BSS. */
33643 DECL_COMMON (decl) = 1;
33644 return tls_comm_section;
33645 }
33646 else
33647 return tls_private_data_section;
33648 }
33649 else
33650 #endif
33651 if (TREE_PUBLIC (decl))
33652 return data_section;
33653 else
33654 return private_data_section;
33655 }
33656 }
33657
33658 static void
33659 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33660 {
33661 const char *name;
33662
33663 /* Use select_section for private data and uninitialized data with
33664 alignment <= BIGGEST_ALIGNMENT. */
33665 if (!TREE_PUBLIC (decl)
33666 || DECL_COMMON (decl)
33667 || (DECL_INITIAL (decl) == NULL_TREE
33668 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33669 || DECL_INITIAL (decl) == error_mark_node
33670 || (flag_zero_initialized_in_bss
33671 && initializer_zerop (DECL_INITIAL (decl))))
33672 return;
33673
33674 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33675 name = (*targetm.strip_name_encoding) (name);
33676 set_decl_section_name (decl, name);
33677 }
33678
33679 /* Select section for constant in constant pool.
33680
33681 On RS/6000, all constants are in the private read-only data area.
33682 However, if this is being placed in the TOC it must be output as a
33683 toc entry. */
33684
33685 static section *
33686 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33687 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33688 {
33689 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33690 return toc_section;
33691 else
33692 return read_only_private_data_section;
33693 }
33694
33695 /* Remove any trailing [DS] or the like from the symbol name. */
33696
33697 static const char *
33698 rs6000_xcoff_strip_name_encoding (const char *name)
33699 {
33700 size_t len;
33701 if (*name == '*')
33702 name++;
33703 len = strlen (name);
33704 if (name[len - 1] == ']')
33705 return ggc_alloc_string (name, len - 4);
33706 else
33707 return name;
33708 }
33709
33710 /* Section attributes. AIX is always PIC. */
33711
33712 static unsigned int
33713 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33714 {
33715 unsigned int align;
33716 unsigned int flags = default_section_type_flags (decl, name, reloc);
33717
33718 /* Align to at least UNIT size. */
33719 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33720 align = MIN_UNITS_PER_WORD;
33721 else
33722 /* Increase alignment of large objects if not already stricter. */
33723 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33724 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33725 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33726
33727 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33728 }
33729
33730 /* Output at beginning of assembler file.
33731
33732 Initialize the section names for the RS/6000 at this point.
33733
33734 Specify filename, including full path, to assembler.
33735
33736 We want to go into the TOC section so at least one .toc will be emitted.
33737 Also, in order to output proper .bs/.es pairs, we need at least one static
33738 [RW] section emitted.
33739
33740 Finally, declare mcount when profiling to make the assembler happy. */
33741
33742 static void
33743 rs6000_xcoff_file_start (void)
33744 {
33745 rs6000_gen_section_name (&xcoff_bss_section_name,
33746 main_input_filename, ".bss_");
33747 rs6000_gen_section_name (&xcoff_private_data_section_name,
33748 main_input_filename, ".rw_");
33749 rs6000_gen_section_name (&xcoff_private_rodata_section_name,
33750 main_input_filename, ".rop_");
33751 rs6000_gen_section_name (&xcoff_read_only_section_name,
33752 main_input_filename, ".ro_");
33753 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33754 main_input_filename, ".tls_");
33755 rs6000_gen_section_name (&xcoff_tbss_section_name,
33756 main_input_filename, ".tbss_[UL]");
33757
33758 fputs ("\t.file\t", asm_out_file);
33759 output_quoted_string (asm_out_file, main_input_filename);
33760 fputc ('\n', asm_out_file);
33761 if (write_symbols != NO_DEBUG)
33762 switch_to_section (private_data_section);
33763 switch_to_section (toc_section);
33764 switch_to_section (text_section);
33765 if (profile_flag)
33766 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33767 rs6000_file_start ();
33768 }
33769
33770 /* Output at end of assembler file.
33771 On the RS/6000, referencing data should automatically pull in text. */
33772
33773 static void
33774 rs6000_xcoff_file_end (void)
33775 {
33776 switch_to_section (text_section);
33777 fputs ("_section_.text:\n", asm_out_file);
33778 switch_to_section (data_section);
33779 fputs (TARGET_32BIT
33780 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33781 asm_out_file);
33782 }
33783
33784 struct declare_alias_data
33785 {
33786 FILE *file;
33787 bool function_descriptor;
33788 };
33789
33790 /* Declare alias N. A helper function for for_node_and_aliases. */
33791
33792 static bool
33793 rs6000_declare_alias (struct symtab_node *n, void *d)
33794 {
33795 struct declare_alias_data *data = (struct declare_alias_data *)d;
33796 /* Main symbol is output specially, because varasm machinery does part of
33797 the job for us - we do not need to declare .globl/lglobs and such. */
33798 if (!n->alias || n->weakref)
33799 return false;
33800
33801 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33802 return false;
33803
33804 /* Prevent assemble_alias from trying to use .set pseudo operation
33805 that does not behave as expected by the middle-end. */
33806 TREE_ASM_WRITTEN (n->decl) = true;
33807
33808 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33809 char *buffer = (char *) alloca (strlen (name) + 2);
33810 char *p;
33811 int dollar_inside = 0;
33812
33813 strcpy (buffer, name);
33814 p = strchr (buffer, '$');
33815 while (p) {
33816 *p = '_';
33817 dollar_inside++;
33818 p = strchr (p + 1, '$');
33819 }
33820 if (TREE_PUBLIC (n->decl))
33821 {
33822 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33823 {
33824 if (dollar_inside) {
33825 if (data->function_descriptor)
33826 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33827 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33828 }
33829 if (data->function_descriptor)
33830 {
33831 fputs ("\t.globl .", data->file);
33832 RS6000_OUTPUT_BASENAME (data->file, buffer);
33833 putc ('\n', data->file);
33834 }
33835 fputs ("\t.globl ", data->file);
33836 RS6000_OUTPUT_BASENAME (data->file, buffer);
33837 putc ('\n', data->file);
33838 }
33839 #ifdef ASM_WEAKEN_DECL
33840 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33841 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33842 #endif
33843 }
33844 else
33845 {
33846 if (dollar_inside)
33847 {
33848 if (data->function_descriptor)
33849 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33850 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33851 }
33852 if (data->function_descriptor)
33853 {
33854 fputs ("\t.lglobl .", data->file);
33855 RS6000_OUTPUT_BASENAME (data->file, buffer);
33856 putc ('\n', data->file);
33857 }
33858 fputs ("\t.lglobl ", data->file);
33859 RS6000_OUTPUT_BASENAME (data->file, buffer);
33860 putc ('\n', data->file);
33861 }
33862 if (data->function_descriptor)
33863 fputs (".", data->file);
33864 RS6000_OUTPUT_BASENAME (data->file, buffer);
33865 fputs (":\n", data->file);
33866 return false;
33867 }
33868
33869
33870 #ifdef HAVE_GAS_HIDDEN
33871 /* Helper function to calculate visibility of a DECL
33872 and return the value as a const string. */
33873
33874 static const char *
33875 rs6000_xcoff_visibility (tree decl)
33876 {
33877 static const char * const visibility_types[] = {
33878 "", ",protected", ",hidden", ",internal"
33879 };
33880
33881 enum symbol_visibility vis = DECL_VISIBILITY (decl);
33882 return visibility_types[vis];
33883 }
33884 #endif
33885
33886
33887 /* This macro produces the initial definition of a function name.
33888 On the RS/6000, we need to place an extra '.' in the function name and
33889 output the function descriptor.
33890 Dollar signs are converted to underscores.
33891
33892 The csect for the function will have already been created when
33893 text_section was selected. We do have to go back to that csect, however.
33894
33895 The third and fourth parameters to the .function pseudo-op (16 and 044)
33896 are placeholders which no longer have any use.
33897
33898 Because AIX assembler's .set command has unexpected semantics, we output
33899 all aliases as alternative labels in front of the definition. */
33900
33901 void
33902 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33903 {
33904 char *buffer = (char *) alloca (strlen (name) + 1);
33905 char *p;
33906 int dollar_inside = 0;
33907 struct declare_alias_data data = {file, false};
33908
33909 strcpy (buffer, name);
33910 p = strchr (buffer, '$');
33911 while (p) {
33912 *p = '_';
33913 dollar_inside++;
33914 p = strchr (p + 1, '$');
33915 }
33916 if (TREE_PUBLIC (decl))
33917 {
33918 if (!RS6000_WEAK || !DECL_WEAK (decl))
33919 {
33920 if (dollar_inside) {
33921 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33922 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33923 }
33924 fputs ("\t.globl .", file);
33925 RS6000_OUTPUT_BASENAME (file, buffer);
33926 #ifdef HAVE_GAS_HIDDEN
33927 fputs (rs6000_xcoff_visibility (decl), file);
33928 #endif
33929 putc ('\n', file);
33930 }
33931 }
33932 else
33933 {
33934 if (dollar_inside) {
33935 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33936 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33937 }
33938 fputs ("\t.lglobl .", file);
33939 RS6000_OUTPUT_BASENAME (file, buffer);
33940 putc ('\n', file);
33941 }
33942 fputs ("\t.csect ", file);
33943 RS6000_OUTPUT_BASENAME (file, buffer);
33944 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33945 RS6000_OUTPUT_BASENAME (file, buffer);
33946 fputs (":\n", file);
33947 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33948 &data, true);
33949 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
33950 RS6000_OUTPUT_BASENAME (file, buffer);
33951 fputs (", TOC[tc0], 0\n", file);
33952 in_section = NULL;
33953 switch_to_section (function_section (decl));
33954 putc ('.', file);
33955 RS6000_OUTPUT_BASENAME (file, buffer);
33956 fputs (":\n", file);
33957 data.function_descriptor = true;
33958 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33959 &data, true);
33960 if (!DECL_IGNORED_P (decl))
33961 {
33962 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33963 xcoffout_declare_function (file, decl, buffer);
33964 else if (write_symbols == DWARF2_DEBUG)
33965 {
33966 name = (*targetm.strip_name_encoding) (name);
33967 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
33968 }
33969 }
33970 return;
33971 }
33972
33973
33974 /* Output assembly language to globalize a symbol from a DECL,
33975 possibly with visibility. */
33976
33977 void
33978 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
33979 {
33980 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
33981 fputs (GLOBAL_ASM_OP, stream);
33982 RS6000_OUTPUT_BASENAME (stream, name);
33983 #ifdef HAVE_GAS_HIDDEN
33984 fputs (rs6000_xcoff_visibility (decl), stream);
33985 #endif
33986 putc ('\n', stream);
33987 }
33988
33989 /* Output assembly language to define a symbol as COMMON from a DECL,
33990 possibly with visibility. */
33991
33992 void
33993 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
33994 tree decl ATTRIBUTE_UNUSED,
33995 const char *name,
33996 unsigned HOST_WIDE_INT size,
33997 unsigned HOST_WIDE_INT align)
33998 {
33999 unsigned HOST_WIDE_INT align2 = 2;
34000
34001 if (align > 32)
34002 align2 = floor_log2 (align / BITS_PER_UNIT);
34003 else if (size > 4)
34004 align2 = 3;
34005
34006 fputs (COMMON_ASM_OP, stream);
34007 RS6000_OUTPUT_BASENAME (stream, name);
34008
34009 fprintf (stream,
34010 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
34011 size, align2);
34012
34013 #ifdef HAVE_GAS_HIDDEN
34014 if (decl != NULL)
34015 fputs (rs6000_xcoff_visibility (decl), stream);
34016 #endif
34017 putc ('\n', stream);
34018 }
34019
34020 /* This macro produces the initial definition of a object (variable) name.
34021 Because AIX assembler's .set command has unexpected semantics, we output
34022 all aliases as alternative labels in front of the definition. */
34023
34024 void
34025 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34026 {
34027 struct declare_alias_data data = {file, false};
34028 RS6000_OUTPUT_BASENAME (file, name);
34029 fputs (":\n", file);
34030 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34031 &data, true);
34032 }
34033
34034 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34035
34036 void
34037 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34038 {
34039 fputs (integer_asm_op (size, FALSE), file);
34040 assemble_name (file, label);
34041 fputs ("-$", file);
34042 }
34043
34044 /* Output a symbol offset relative to the dbase for the current object.
34045 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34046 signed offsets.
34047
34048 __gcc_unwind_dbase is embedded in all executables/libraries through
34049 libgcc/config/rs6000/crtdbase.S. */
34050
34051 void
34052 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34053 {
34054 fputs (integer_asm_op (size, FALSE), file);
34055 assemble_name (file, label);
34056 fputs("-__gcc_unwind_dbase", file);
34057 }
34058
34059 #ifdef HAVE_AS_TLS
34060 static void
34061 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34062 {
34063 rtx symbol;
34064 int flags;
34065 const char *symname;
34066
34067 default_encode_section_info (decl, rtl, first);
34068
34069 /* Careful not to prod global register variables. */
34070 if (!MEM_P (rtl))
34071 return;
34072 symbol = XEXP (rtl, 0);
34073 if (!SYMBOL_REF_P (symbol))
34074 return;
34075
34076 flags = SYMBOL_REF_FLAGS (symbol);
34077
34078 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34079 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34080
34081 SYMBOL_REF_FLAGS (symbol) = flags;
34082
34083 /* Append mapping class to extern decls. */
34084 symname = XSTR (symbol, 0);
34085 if (decl /* sync condition with assemble_external () */
34086 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34087 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34088 || TREE_CODE (decl) == FUNCTION_DECL)
34089 && symname[strlen (symname) - 1] != ']')
34090 {
34091 char *newname = (char *) alloca (strlen (symname) + 5);
34092 strcpy (newname, symname);
34093 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34094 ? "[DS]" : "[UA]"));
34095 XSTR (symbol, 0) = ggc_strdup (newname);
34096 }
34097 }
34098 #endif /* HAVE_AS_TLS */
34099 #endif /* TARGET_XCOFF */
34100
34101 void
34102 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34103 const char *name, const char *val)
34104 {
34105 fputs ("\t.weak\t", stream);
34106 RS6000_OUTPUT_BASENAME (stream, name);
34107 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34108 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34109 {
34110 if (TARGET_XCOFF)
34111 fputs ("[DS]", stream);
34112 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34113 if (TARGET_XCOFF)
34114 fputs (rs6000_xcoff_visibility (decl), stream);
34115 #endif
34116 fputs ("\n\t.weak\t.", stream);
34117 RS6000_OUTPUT_BASENAME (stream, name);
34118 }
34119 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34120 if (TARGET_XCOFF)
34121 fputs (rs6000_xcoff_visibility (decl), stream);
34122 #endif
34123 fputc ('\n', stream);
34124 if (val)
34125 {
34126 #ifdef ASM_OUTPUT_DEF
34127 ASM_OUTPUT_DEF (stream, name, val);
34128 #endif
34129 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34130 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34131 {
34132 fputs ("\t.set\t.", stream);
34133 RS6000_OUTPUT_BASENAME (stream, name);
34134 fputs (",.", stream);
34135 RS6000_OUTPUT_BASENAME (stream, val);
34136 fputc ('\n', stream);
34137 }
34138 }
34139 }
34140
34141
34142 /* Return true if INSN should not be copied. */
34143
34144 static bool
34145 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34146 {
34147 return recog_memoized (insn) >= 0
34148 && get_attr_cannot_copy (insn);
34149 }
34150
34151 /* Compute a (partial) cost for rtx X. Return true if the complete
34152 cost has been computed, and false if subexpressions should be
34153 scanned. In either case, *TOTAL contains the cost result. */
34154
34155 static bool
34156 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34157 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34158 {
34159 int code = GET_CODE (x);
34160
34161 switch (code)
34162 {
34163 /* On the RS/6000, if it is valid in the insn, it is free. */
34164 case CONST_INT:
34165 if (((outer_code == SET
34166 || outer_code == PLUS
34167 || outer_code == MINUS)
34168 && (satisfies_constraint_I (x)
34169 || satisfies_constraint_L (x)))
34170 || (outer_code == AND
34171 && (satisfies_constraint_K (x)
34172 || (mode == SImode
34173 ? satisfies_constraint_L (x)
34174 : satisfies_constraint_J (x))))
34175 || ((outer_code == IOR || outer_code == XOR)
34176 && (satisfies_constraint_K (x)
34177 || (mode == SImode
34178 ? satisfies_constraint_L (x)
34179 : satisfies_constraint_J (x))))
34180 || outer_code == ASHIFT
34181 || outer_code == ASHIFTRT
34182 || outer_code == LSHIFTRT
34183 || outer_code == ROTATE
34184 || outer_code == ROTATERT
34185 || outer_code == ZERO_EXTRACT
34186 || (outer_code == MULT
34187 && satisfies_constraint_I (x))
34188 || ((outer_code == DIV || outer_code == UDIV
34189 || outer_code == MOD || outer_code == UMOD)
34190 && exact_log2 (INTVAL (x)) >= 0)
34191 || (outer_code == COMPARE
34192 && (satisfies_constraint_I (x)
34193 || satisfies_constraint_K (x)))
34194 || ((outer_code == EQ || outer_code == NE)
34195 && (satisfies_constraint_I (x)
34196 || satisfies_constraint_K (x)
34197 || (mode == SImode
34198 ? satisfies_constraint_L (x)
34199 : satisfies_constraint_J (x))))
34200 || (outer_code == GTU
34201 && satisfies_constraint_I (x))
34202 || (outer_code == LTU
34203 && satisfies_constraint_P (x)))
34204 {
34205 *total = 0;
34206 return true;
34207 }
34208 else if ((outer_code == PLUS
34209 && reg_or_add_cint_operand (x, VOIDmode))
34210 || (outer_code == MINUS
34211 && reg_or_sub_cint_operand (x, VOIDmode))
34212 || ((outer_code == SET
34213 || outer_code == IOR
34214 || outer_code == XOR)
34215 && (INTVAL (x)
34216 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34217 {
34218 *total = COSTS_N_INSNS (1);
34219 return true;
34220 }
34221 /* FALLTHRU */
34222
34223 case CONST_DOUBLE:
34224 case CONST_WIDE_INT:
34225 case CONST:
34226 case HIGH:
34227 case SYMBOL_REF:
34228 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34229 return true;
34230
34231 case MEM:
34232 /* When optimizing for size, MEM should be slightly more expensive
34233 than generating address, e.g., (plus (reg) (const)).
34234 L1 cache latency is about two instructions. */
34235 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34236 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34237 *total += COSTS_N_INSNS (100);
34238 return true;
34239
34240 case LABEL_REF:
34241 *total = 0;
34242 return true;
34243
34244 case PLUS:
34245 case MINUS:
34246 if (FLOAT_MODE_P (mode))
34247 *total = rs6000_cost->fp;
34248 else
34249 *total = COSTS_N_INSNS (1);
34250 return false;
34251
34252 case MULT:
34253 if (CONST_INT_P (XEXP (x, 1))
34254 && satisfies_constraint_I (XEXP (x, 1)))
34255 {
34256 if (INTVAL (XEXP (x, 1)) >= -256
34257 && INTVAL (XEXP (x, 1)) <= 255)
34258 *total = rs6000_cost->mulsi_const9;
34259 else
34260 *total = rs6000_cost->mulsi_const;
34261 }
34262 else if (mode == SFmode)
34263 *total = rs6000_cost->fp;
34264 else if (FLOAT_MODE_P (mode))
34265 *total = rs6000_cost->dmul;
34266 else if (mode == DImode)
34267 *total = rs6000_cost->muldi;
34268 else
34269 *total = rs6000_cost->mulsi;
34270 return false;
34271
34272 case FMA:
34273 if (mode == SFmode)
34274 *total = rs6000_cost->fp;
34275 else
34276 *total = rs6000_cost->dmul;
34277 break;
34278
34279 case DIV:
34280 case MOD:
34281 if (FLOAT_MODE_P (mode))
34282 {
34283 *total = mode == DFmode ? rs6000_cost->ddiv
34284 : rs6000_cost->sdiv;
34285 return false;
34286 }
34287 /* FALLTHRU */
34288
34289 case UDIV:
34290 case UMOD:
34291 if (CONST_INT_P (XEXP (x, 1))
34292 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34293 {
34294 if (code == DIV || code == MOD)
34295 /* Shift, addze */
34296 *total = COSTS_N_INSNS (2);
34297 else
34298 /* Shift */
34299 *total = COSTS_N_INSNS (1);
34300 }
34301 else
34302 {
34303 if (GET_MODE (XEXP (x, 1)) == DImode)
34304 *total = rs6000_cost->divdi;
34305 else
34306 *total = rs6000_cost->divsi;
34307 }
34308 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34309 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34310 *total += COSTS_N_INSNS (2);
34311 return false;
34312
34313 case CTZ:
34314 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34315 return false;
34316
34317 case FFS:
34318 *total = COSTS_N_INSNS (4);
34319 return false;
34320
34321 case POPCOUNT:
34322 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34323 return false;
34324
34325 case PARITY:
34326 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34327 return false;
34328
34329 case NOT:
34330 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34331 *total = 0;
34332 else
34333 *total = COSTS_N_INSNS (1);
34334 return false;
34335
34336 case AND:
34337 if (CONST_INT_P (XEXP (x, 1)))
34338 {
34339 rtx left = XEXP (x, 0);
34340 rtx_code left_code = GET_CODE (left);
34341
34342 /* rotate-and-mask: 1 insn. */
34343 if ((left_code == ROTATE
34344 || left_code == ASHIFT
34345 || left_code == LSHIFTRT)
34346 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34347 {
34348 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34349 if (!CONST_INT_P (XEXP (left, 1)))
34350 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34351 *total += COSTS_N_INSNS (1);
34352 return true;
34353 }
34354
34355 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34356 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34357 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34358 || (val & 0xffff) == val
34359 || (val & 0xffff0000) == val
34360 || ((val & 0xffff) == 0 && mode == SImode))
34361 {
34362 *total = rtx_cost (left, mode, AND, 0, speed);
34363 *total += COSTS_N_INSNS (1);
34364 return true;
34365 }
34366
34367 /* 2 insns. */
34368 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34369 {
34370 *total = rtx_cost (left, mode, AND, 0, speed);
34371 *total += COSTS_N_INSNS (2);
34372 return true;
34373 }
34374 }
34375
34376 *total = COSTS_N_INSNS (1);
34377 return false;
34378
34379 case IOR:
34380 /* FIXME */
34381 *total = COSTS_N_INSNS (1);
34382 return true;
34383
34384 case CLZ:
34385 case XOR:
34386 case ZERO_EXTRACT:
34387 *total = COSTS_N_INSNS (1);
34388 return false;
34389
34390 case ASHIFT:
34391 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34392 the sign extend and shift separately within the insn. */
34393 if (TARGET_EXTSWSLI && mode == DImode
34394 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34395 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34396 {
34397 *total = 0;
34398 return false;
34399 }
34400 /* fall through */
34401
34402 case ASHIFTRT:
34403 case LSHIFTRT:
34404 case ROTATE:
34405 case ROTATERT:
34406 /* Handle mul_highpart. */
34407 if (outer_code == TRUNCATE
34408 && GET_CODE (XEXP (x, 0)) == MULT)
34409 {
34410 if (mode == DImode)
34411 *total = rs6000_cost->muldi;
34412 else
34413 *total = rs6000_cost->mulsi;
34414 return true;
34415 }
34416 else if (outer_code == AND)
34417 *total = 0;
34418 else
34419 *total = COSTS_N_INSNS (1);
34420 return false;
34421
34422 case SIGN_EXTEND:
34423 case ZERO_EXTEND:
34424 if (MEM_P (XEXP (x, 0)))
34425 *total = 0;
34426 else
34427 *total = COSTS_N_INSNS (1);
34428 return false;
34429
34430 case COMPARE:
34431 case NEG:
34432 case ABS:
34433 if (!FLOAT_MODE_P (mode))
34434 {
34435 *total = COSTS_N_INSNS (1);
34436 return false;
34437 }
34438 /* FALLTHRU */
34439
34440 case FLOAT:
34441 case UNSIGNED_FLOAT:
34442 case FIX:
34443 case UNSIGNED_FIX:
34444 case FLOAT_TRUNCATE:
34445 *total = rs6000_cost->fp;
34446 return false;
34447
34448 case FLOAT_EXTEND:
34449 if (mode == DFmode)
34450 *total = rs6000_cost->sfdf_convert;
34451 else
34452 *total = rs6000_cost->fp;
34453 return false;
34454
34455 case UNSPEC:
34456 switch (XINT (x, 1))
34457 {
34458 case UNSPEC_FRSP:
34459 *total = rs6000_cost->fp;
34460 return true;
34461
34462 default:
34463 break;
34464 }
34465 break;
34466
34467 case CALL:
34468 case IF_THEN_ELSE:
34469 if (!speed)
34470 {
34471 *total = COSTS_N_INSNS (1);
34472 return true;
34473 }
34474 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34475 {
34476 *total = rs6000_cost->fp;
34477 return false;
34478 }
34479 break;
34480
34481 case NE:
34482 case EQ:
34483 case GTU:
34484 case LTU:
34485 /* Carry bit requires mode == Pmode.
34486 NEG or PLUS already counted so only add one. */
34487 if (mode == Pmode
34488 && (outer_code == NEG || outer_code == PLUS))
34489 {
34490 *total = COSTS_N_INSNS (1);
34491 return true;
34492 }
34493 /* FALLTHRU */
34494
34495 case GT:
34496 case LT:
34497 case UNORDERED:
34498 if (outer_code == SET)
34499 {
34500 if (XEXP (x, 1) == const0_rtx)
34501 {
34502 *total = COSTS_N_INSNS (2);
34503 return true;
34504 }
34505 else
34506 {
34507 *total = COSTS_N_INSNS (3);
34508 return false;
34509 }
34510 }
34511 /* CC COMPARE. */
34512 if (outer_code == COMPARE)
34513 {
34514 *total = 0;
34515 return true;
34516 }
34517 break;
34518
34519 default:
34520 break;
34521 }
34522
34523 return false;
34524 }
34525
34526 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34527
34528 static bool
34529 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34530 int opno, int *total, bool speed)
34531 {
34532 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34533
34534 fprintf (stderr,
34535 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34536 "opno = %d, total = %d, speed = %s, x:\n",
34537 ret ? "complete" : "scan inner",
34538 GET_MODE_NAME (mode),
34539 GET_RTX_NAME (outer_code),
34540 opno,
34541 *total,
34542 speed ? "true" : "false");
34543
34544 debug_rtx (x);
34545
34546 return ret;
34547 }
34548
34549 static int
34550 rs6000_insn_cost (rtx_insn *insn, bool speed)
34551 {
34552 if (recog_memoized (insn) < 0)
34553 return 0;
34554
34555 if (!speed)
34556 return get_attr_length (insn);
34557
34558 int cost = get_attr_cost (insn);
34559 if (cost > 0)
34560 return cost;
34561
34562 int n = get_attr_length (insn) / 4;
34563 enum attr_type type = get_attr_type (insn);
34564
34565 switch (type)
34566 {
34567 case TYPE_LOAD:
34568 case TYPE_FPLOAD:
34569 case TYPE_VECLOAD:
34570 cost = COSTS_N_INSNS (n + 1);
34571 break;
34572
34573 case TYPE_MUL:
34574 switch (get_attr_size (insn))
34575 {
34576 case SIZE_8:
34577 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34578 break;
34579 case SIZE_16:
34580 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34581 break;
34582 case SIZE_32:
34583 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34584 break;
34585 case SIZE_64:
34586 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34587 break;
34588 default:
34589 gcc_unreachable ();
34590 }
34591 break;
34592 case TYPE_DIV:
34593 switch (get_attr_size (insn))
34594 {
34595 case SIZE_32:
34596 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34597 break;
34598 case SIZE_64:
34599 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34600 break;
34601 default:
34602 gcc_unreachable ();
34603 }
34604 break;
34605
34606 case TYPE_FP:
34607 cost = n * rs6000_cost->fp;
34608 break;
34609 case TYPE_DMUL:
34610 cost = n * rs6000_cost->dmul;
34611 break;
34612 case TYPE_SDIV:
34613 cost = n * rs6000_cost->sdiv;
34614 break;
34615 case TYPE_DDIV:
34616 cost = n * rs6000_cost->ddiv;
34617 break;
34618
34619 case TYPE_SYNC:
34620 case TYPE_LOAD_L:
34621 case TYPE_MFCR:
34622 case TYPE_MFCRF:
34623 cost = COSTS_N_INSNS (n + 2);
34624 break;
34625
34626 default:
34627 cost = COSTS_N_INSNS (n);
34628 }
34629
34630 return cost;
34631 }
34632
34633 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34634
34635 static int
34636 rs6000_debug_address_cost (rtx x, machine_mode mode,
34637 addr_space_t as, bool speed)
34638 {
34639 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34640
34641 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34642 ret, speed ? "true" : "false");
34643 debug_rtx (x);
34644
34645 return ret;
34646 }
34647
34648
34649 /* A C expression returning the cost of moving data from a register of class
34650 CLASS1 to one of CLASS2. */
34651
34652 static int
34653 rs6000_register_move_cost (machine_mode mode,
34654 reg_class_t from, reg_class_t to)
34655 {
34656 int ret;
34657 reg_class_t rclass;
34658
34659 if (TARGET_DEBUG_COST)
34660 dbg_cost_ctrl++;
34661
34662 /* If we have VSX, we can easily move between FPR or Altivec registers,
34663 otherwise we can only easily move within classes.
34664 Do this first so we give best-case answers for union classes
34665 containing both gprs and vsx regs. */
34666 HARD_REG_SET to_vsx, from_vsx;
34667 COPY_HARD_REG_SET (to_vsx, reg_class_contents[to]);
34668 AND_HARD_REG_SET (to_vsx, reg_class_contents[VSX_REGS]);
34669 COPY_HARD_REG_SET (from_vsx, reg_class_contents[from]);
34670 AND_HARD_REG_SET (from_vsx, reg_class_contents[VSX_REGS]);
34671 if (!hard_reg_set_empty_p (to_vsx)
34672 && !hard_reg_set_empty_p (from_vsx)
34673 && (TARGET_VSX
34674 || hard_reg_set_intersect_p (to_vsx, from_vsx)))
34675 {
34676 int reg = FIRST_FPR_REGNO;
34677 if (TARGET_VSX
34678 || (TEST_HARD_REG_BIT (to_vsx, FIRST_ALTIVEC_REGNO)
34679 && TEST_HARD_REG_BIT (from_vsx, FIRST_ALTIVEC_REGNO)))
34680 reg = FIRST_ALTIVEC_REGNO;
34681 ret = 2 * hard_regno_nregs (reg, mode);
34682 }
34683
34684 /* Moves from/to GENERAL_REGS. */
34685 else if ((rclass = from, reg_classes_intersect_p (to, GENERAL_REGS))
34686 || (rclass = to, reg_classes_intersect_p (from, GENERAL_REGS)))
34687 {
34688 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34689 {
34690 if (TARGET_DIRECT_MOVE)
34691 {
34692 if (rs6000_tune == PROCESSOR_POWER9)
34693 ret = 2 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34694 else
34695 ret = 4 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34696 /* SFmode requires a conversion when moving between gprs
34697 and vsx. */
34698 if (mode == SFmode)
34699 ret += 2;
34700 }
34701 else
34702 ret = (rs6000_memory_move_cost (mode, rclass, false)
34703 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34704 }
34705
34706 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34707 shift. */
34708 else if (rclass == CR_REGS)
34709 ret = 4;
34710
34711 /* For those processors that have slow LR/CTR moves, make them more
34712 expensive than memory in order to bias spills to memory .*/
34713 else if ((rs6000_tune == PROCESSOR_POWER6
34714 || rs6000_tune == PROCESSOR_POWER7
34715 || rs6000_tune == PROCESSOR_POWER8
34716 || rs6000_tune == PROCESSOR_POWER9)
34717 && reg_class_subset_p (rclass, SPECIAL_REGS))
34718 ret = 6 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34719
34720 else
34721 /* A move will cost one instruction per GPR moved. */
34722 ret = 2 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34723 }
34724
34725 /* Everything else has to go through GENERAL_REGS. */
34726 else
34727 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34728 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34729
34730 if (TARGET_DEBUG_COST)
34731 {
34732 if (dbg_cost_ctrl == 1)
34733 fprintf (stderr,
34734 "rs6000_register_move_cost: ret=%d, mode=%s, from=%s, to=%s\n",
34735 ret, GET_MODE_NAME (mode), reg_class_names[from],
34736 reg_class_names[to]);
34737 dbg_cost_ctrl--;
34738 }
34739
34740 return ret;
34741 }
34742
34743 /* A C expressions returning the cost of moving data of MODE from a register to
34744 or from memory. */
34745
34746 static int
34747 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34748 bool in ATTRIBUTE_UNUSED)
34749 {
34750 int ret;
34751
34752 if (TARGET_DEBUG_COST)
34753 dbg_cost_ctrl++;
34754
34755 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34756 ret = 4 * hard_regno_nregs (0, mode);
34757 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34758 || reg_classes_intersect_p (rclass, VSX_REGS)))
34759 ret = 4 * hard_regno_nregs (32, mode);
34760 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34761 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34762 else
34763 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34764
34765 if (TARGET_DEBUG_COST)
34766 {
34767 if (dbg_cost_ctrl == 1)
34768 fprintf (stderr,
34769 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34770 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34771 dbg_cost_ctrl--;
34772 }
34773
34774 return ret;
34775 }
34776
34777 /* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS.
34778
34779 The register allocator chooses GEN_OR_VSX_REGS for the allocno
34780 class if GENERAL_REGS and VSX_REGS cost is lower than the memory
34781 cost. This happens a lot when TARGET_DIRECT_MOVE makes the register
34782 move cost between GENERAL_REGS and VSX_REGS low.
34783
34784 It might seem reasonable to use a union class. After all, if usage
34785 of vsr is low and gpr high, it might make sense to spill gpr to vsr
34786 rather than memory. However, in cases where register pressure of
34787 both is high, like the cactus_adm spec test, allowing
34788 GEN_OR_VSX_REGS as the allocno class results in bad decisions in
34789 the first scheduling pass. This is partly due to an allocno of
34790 GEN_OR_VSX_REGS wrongly contributing to the GENERAL_REGS pressure
34791 class, which gives too high a pressure for GENERAL_REGS and too low
34792 for VSX_REGS. So, force a choice of the subclass here.
34793
34794 The best class is also the union if GENERAL_REGS and VSX_REGS have
34795 the same cost. In that case we do use GEN_OR_VSX_REGS as the
34796 allocno class, since trying to narrow down the class by regno mode
34797 is prone to error. For example, SImode is allowed in VSX regs and
34798 in some cases (eg. gcc.target/powerpc/p9-xxbr-3.c do_bswap32_vect)
34799 it would be wrong to choose an allocno of GENERAL_REGS based on
34800 SImode. */
34801
34802 static reg_class_t
34803 rs6000_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED,
34804 reg_class_t allocno_class,
34805 reg_class_t best_class)
34806 {
34807 switch (allocno_class)
34808 {
34809 case GEN_OR_VSX_REGS:
34810 /* best_class must be a subset of allocno_class. */
34811 gcc_checking_assert (best_class == GEN_OR_VSX_REGS
34812 || best_class == GEN_OR_FLOAT_REGS
34813 || best_class == VSX_REGS
34814 || best_class == ALTIVEC_REGS
34815 || best_class == FLOAT_REGS
34816 || best_class == GENERAL_REGS
34817 || best_class == BASE_REGS);
34818 /* Use best_class but choose wider classes when copying from the
34819 wider class to best_class is cheap. This mimics IRA choice
34820 of allocno class. */
34821 if (best_class == BASE_REGS)
34822 return GENERAL_REGS;
34823 if (TARGET_VSX
34824 && (best_class == FLOAT_REGS || best_class == ALTIVEC_REGS))
34825 return VSX_REGS;
34826 return best_class;
34827
34828 default:
34829 break;
34830 }
34831
34832 return allocno_class;
34833 }
34834
34835 /* Returns a code for a target-specific builtin that implements
34836 reciprocal of the function, or NULL_TREE if not available. */
34837
34838 static tree
34839 rs6000_builtin_reciprocal (tree fndecl)
34840 {
34841 switch (DECL_FUNCTION_CODE (fndecl))
34842 {
34843 case VSX_BUILTIN_XVSQRTDP:
34844 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34845 return NULL_TREE;
34846
34847 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34848
34849 case VSX_BUILTIN_XVSQRTSP:
34850 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34851 return NULL_TREE;
34852
34853 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34854
34855 default:
34856 return NULL_TREE;
34857 }
34858 }
34859
34860 /* Load up a constant. If the mode is a vector mode, splat the value across
34861 all of the vector elements. */
34862
34863 static rtx
34864 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34865 {
34866 rtx reg;
34867
34868 if (mode == SFmode || mode == DFmode)
34869 {
34870 rtx d = const_double_from_real_value (dconst, mode);
34871 reg = force_reg (mode, d);
34872 }
34873 else if (mode == V4SFmode)
34874 {
34875 rtx d = const_double_from_real_value (dconst, SFmode);
34876 rtvec v = gen_rtvec (4, d, d, d, d);
34877 reg = gen_reg_rtx (mode);
34878 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34879 }
34880 else if (mode == V2DFmode)
34881 {
34882 rtx d = const_double_from_real_value (dconst, DFmode);
34883 rtvec v = gen_rtvec (2, d, d);
34884 reg = gen_reg_rtx (mode);
34885 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34886 }
34887 else
34888 gcc_unreachable ();
34889
34890 return reg;
34891 }
34892
34893 /* Generate an FMA instruction. */
34894
34895 static void
34896 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34897 {
34898 machine_mode mode = GET_MODE (target);
34899 rtx dst;
34900
34901 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34902 gcc_assert (dst != NULL);
34903
34904 if (dst != target)
34905 emit_move_insn (target, dst);
34906 }
34907
34908 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34909
34910 static void
34911 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34912 {
34913 machine_mode mode = GET_MODE (dst);
34914 rtx r;
34915
34916 /* This is a tad more complicated, since the fnma_optab is for
34917 a different expression: fma(-m1, m2, a), which is the same
34918 thing except in the case of signed zeros.
34919
34920 Fortunately we know that if FMA is supported that FNMSUB is
34921 also supported in the ISA. Just expand it directly. */
34922
34923 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34924
34925 r = gen_rtx_NEG (mode, a);
34926 r = gen_rtx_FMA (mode, m1, m2, r);
34927 r = gen_rtx_NEG (mode, r);
34928 emit_insn (gen_rtx_SET (dst, r));
34929 }
34930
34931 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34932 add a reg_note saying that this was a division. Support both scalar and
34933 vector divide. Assumes no trapping math and finite arguments. */
34934
34935 void
34936 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34937 {
34938 machine_mode mode = GET_MODE (dst);
34939 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34940 int i;
34941
34942 /* Low precision estimates guarantee 5 bits of accuracy. High
34943 precision estimates guarantee 14 bits of accuracy. SFmode
34944 requires 23 bits of accuracy. DFmode requires 52 bits of
34945 accuracy. Each pass at least doubles the accuracy, leading
34946 to the following. */
34947 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34948 if (mode == DFmode || mode == V2DFmode)
34949 passes++;
34950
34951 enum insn_code code = optab_handler (smul_optab, mode);
34952 insn_gen_fn gen_mul = GEN_FCN (code);
34953
34954 gcc_assert (code != CODE_FOR_nothing);
34955
34956 one = rs6000_load_constant_and_splat (mode, dconst1);
34957
34958 /* x0 = 1./d estimate */
34959 x0 = gen_reg_rtx (mode);
34960 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34961 UNSPEC_FRES)));
34962
34963 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34964 if (passes > 1) {
34965
34966 /* e0 = 1. - d * x0 */
34967 e0 = gen_reg_rtx (mode);
34968 rs6000_emit_nmsub (e0, d, x0, one);
34969
34970 /* x1 = x0 + e0 * x0 */
34971 x1 = gen_reg_rtx (mode);
34972 rs6000_emit_madd (x1, e0, x0, x0);
34973
34974 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34975 ++i, xprev = xnext, eprev = enext) {
34976
34977 /* enext = eprev * eprev */
34978 enext = gen_reg_rtx (mode);
34979 emit_insn (gen_mul (enext, eprev, eprev));
34980
34981 /* xnext = xprev + enext * xprev */
34982 xnext = gen_reg_rtx (mode);
34983 rs6000_emit_madd (xnext, enext, xprev, xprev);
34984 }
34985
34986 } else
34987 xprev = x0;
34988
34989 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34990
34991 /* u = n * xprev */
34992 u = gen_reg_rtx (mode);
34993 emit_insn (gen_mul (u, n, xprev));
34994
34995 /* v = n - (d * u) */
34996 v = gen_reg_rtx (mode);
34997 rs6000_emit_nmsub (v, d, u, n);
34998
34999 /* dst = (v * xprev) + u */
35000 rs6000_emit_madd (dst, v, xprev, u);
35001
35002 if (note_p)
35003 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
35004 }
35005
35006 /* Goldschmidt's Algorithm for single/double-precision floating point
35007 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
35008
35009 void
35010 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
35011 {
35012 machine_mode mode = GET_MODE (src);
35013 rtx e = gen_reg_rtx (mode);
35014 rtx g = gen_reg_rtx (mode);
35015 rtx h = gen_reg_rtx (mode);
35016
35017 /* Low precision estimates guarantee 5 bits of accuracy. High
35018 precision estimates guarantee 14 bits of accuracy. SFmode
35019 requires 23 bits of accuracy. DFmode requires 52 bits of
35020 accuracy. Each pass at least doubles the accuracy, leading
35021 to the following. */
35022 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35023 if (mode == DFmode || mode == V2DFmode)
35024 passes++;
35025
35026 int i;
35027 rtx mhalf;
35028 enum insn_code code = optab_handler (smul_optab, mode);
35029 insn_gen_fn gen_mul = GEN_FCN (code);
35030
35031 gcc_assert (code != CODE_FOR_nothing);
35032
35033 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35034
35035 /* e = rsqrt estimate */
35036 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35037 UNSPEC_RSQRT)));
35038
35039 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35040 if (!recip)
35041 {
35042 rtx zero = force_reg (mode, CONST0_RTX (mode));
35043
35044 if (mode == SFmode)
35045 {
35046 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35047 e, zero, mode, 0);
35048 if (target != e)
35049 emit_move_insn (e, target);
35050 }
35051 else
35052 {
35053 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35054 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35055 }
35056 }
35057
35058 /* g = sqrt estimate. */
35059 emit_insn (gen_mul (g, e, src));
35060 /* h = 1/(2*sqrt) estimate. */
35061 emit_insn (gen_mul (h, e, mhalf));
35062
35063 if (recip)
35064 {
35065 if (passes == 1)
35066 {
35067 rtx t = gen_reg_rtx (mode);
35068 rs6000_emit_nmsub (t, g, h, mhalf);
35069 /* Apply correction directly to 1/rsqrt estimate. */
35070 rs6000_emit_madd (dst, e, t, e);
35071 }
35072 else
35073 {
35074 for (i = 0; i < passes; i++)
35075 {
35076 rtx t1 = gen_reg_rtx (mode);
35077 rtx g1 = gen_reg_rtx (mode);
35078 rtx h1 = gen_reg_rtx (mode);
35079
35080 rs6000_emit_nmsub (t1, g, h, mhalf);
35081 rs6000_emit_madd (g1, g, t1, g);
35082 rs6000_emit_madd (h1, h, t1, h);
35083
35084 g = g1;
35085 h = h1;
35086 }
35087 /* Multiply by 2 for 1/rsqrt. */
35088 emit_insn (gen_add3_insn (dst, h, h));
35089 }
35090 }
35091 else
35092 {
35093 rtx t = gen_reg_rtx (mode);
35094 rs6000_emit_nmsub (t, g, h, mhalf);
35095 rs6000_emit_madd (dst, g, t, g);
35096 }
35097
35098 return;
35099 }
35100
35101 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35102 (Power7) targets. DST is the target, and SRC is the argument operand. */
35103
35104 void
35105 rs6000_emit_popcount (rtx dst, rtx src)
35106 {
35107 machine_mode mode = GET_MODE (dst);
35108 rtx tmp1, tmp2;
35109
35110 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35111 if (TARGET_POPCNTD)
35112 {
35113 if (mode == SImode)
35114 emit_insn (gen_popcntdsi2 (dst, src));
35115 else
35116 emit_insn (gen_popcntddi2 (dst, src));
35117 return;
35118 }
35119
35120 tmp1 = gen_reg_rtx (mode);
35121
35122 if (mode == SImode)
35123 {
35124 emit_insn (gen_popcntbsi2 (tmp1, src));
35125 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35126 NULL_RTX, 0);
35127 tmp2 = force_reg (SImode, tmp2);
35128 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35129 }
35130 else
35131 {
35132 emit_insn (gen_popcntbdi2 (tmp1, src));
35133 tmp2 = expand_mult (DImode, tmp1,
35134 GEN_INT ((HOST_WIDE_INT)
35135 0x01010101 << 32 | 0x01010101),
35136 NULL_RTX, 0);
35137 tmp2 = force_reg (DImode, tmp2);
35138 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35139 }
35140 }
35141
35142
35143 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35144 target, and SRC is the argument operand. */
35145
35146 void
35147 rs6000_emit_parity (rtx dst, rtx src)
35148 {
35149 machine_mode mode = GET_MODE (dst);
35150 rtx tmp;
35151
35152 tmp = gen_reg_rtx (mode);
35153
35154 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35155 if (TARGET_CMPB)
35156 {
35157 if (mode == SImode)
35158 {
35159 emit_insn (gen_popcntbsi2 (tmp, src));
35160 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35161 }
35162 else
35163 {
35164 emit_insn (gen_popcntbdi2 (tmp, src));
35165 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35166 }
35167 return;
35168 }
35169
35170 if (mode == SImode)
35171 {
35172 /* Is mult+shift >= shift+xor+shift+xor? */
35173 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35174 {
35175 rtx tmp1, tmp2, tmp3, tmp4;
35176
35177 tmp1 = gen_reg_rtx (SImode);
35178 emit_insn (gen_popcntbsi2 (tmp1, src));
35179
35180 tmp2 = gen_reg_rtx (SImode);
35181 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35182 tmp3 = gen_reg_rtx (SImode);
35183 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35184
35185 tmp4 = gen_reg_rtx (SImode);
35186 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35187 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35188 }
35189 else
35190 rs6000_emit_popcount (tmp, src);
35191 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35192 }
35193 else
35194 {
35195 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35196 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35197 {
35198 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35199
35200 tmp1 = gen_reg_rtx (DImode);
35201 emit_insn (gen_popcntbdi2 (tmp1, src));
35202
35203 tmp2 = gen_reg_rtx (DImode);
35204 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35205 tmp3 = gen_reg_rtx (DImode);
35206 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35207
35208 tmp4 = gen_reg_rtx (DImode);
35209 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35210 tmp5 = gen_reg_rtx (DImode);
35211 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35212
35213 tmp6 = gen_reg_rtx (DImode);
35214 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35215 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35216 }
35217 else
35218 rs6000_emit_popcount (tmp, src);
35219 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35220 }
35221 }
35222
35223 /* Expand an Altivec constant permutation for little endian mode.
35224 OP0 and OP1 are the input vectors and TARGET is the output vector.
35225 SEL specifies the constant permutation vector.
35226
35227 There are two issues: First, the two input operands must be
35228 swapped so that together they form a double-wide array in LE
35229 order. Second, the vperm instruction has surprising behavior
35230 in LE mode: it interprets the elements of the source vectors
35231 in BE mode ("left to right") and interprets the elements of
35232 the destination vector in LE mode ("right to left"). To
35233 correct for this, we must subtract each element of the permute
35234 control vector from 31.
35235
35236 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35237 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35238 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35239 serve as the permute control vector. Then, in BE mode,
35240
35241 vperm 9,10,11,12
35242
35243 places the desired result in vr9. However, in LE mode the
35244 vector contents will be
35245
35246 vr10 = 00000003 00000002 00000001 00000000
35247 vr11 = 00000007 00000006 00000005 00000004
35248
35249 The result of the vperm using the same permute control vector is
35250
35251 vr9 = 05000000 07000000 01000000 03000000
35252
35253 That is, the leftmost 4 bytes of vr10 are interpreted as the
35254 source for the rightmost 4 bytes of vr9, and so on.
35255
35256 If we change the permute control vector to
35257
35258 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35259
35260 and issue
35261
35262 vperm 9,11,10,12
35263
35264 we get the desired
35265
35266 vr9 = 00000006 00000004 00000002 00000000. */
35267
35268 static void
35269 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35270 const vec_perm_indices &sel)
35271 {
35272 unsigned int i;
35273 rtx perm[16];
35274 rtx constv, unspec;
35275
35276 /* Unpack and adjust the constant selector. */
35277 for (i = 0; i < 16; ++i)
35278 {
35279 unsigned int elt = 31 - (sel[i] & 31);
35280 perm[i] = GEN_INT (elt);
35281 }
35282
35283 /* Expand to a permute, swapping the inputs and using the
35284 adjusted selector. */
35285 if (!REG_P (op0))
35286 op0 = force_reg (V16QImode, op0);
35287 if (!REG_P (op1))
35288 op1 = force_reg (V16QImode, op1);
35289
35290 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35291 constv = force_reg (V16QImode, constv);
35292 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35293 UNSPEC_VPERM);
35294 if (!REG_P (target))
35295 {
35296 rtx tmp = gen_reg_rtx (V16QImode);
35297 emit_move_insn (tmp, unspec);
35298 unspec = tmp;
35299 }
35300
35301 emit_move_insn (target, unspec);
35302 }
35303
35304 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35305 permute control vector. But here it's not a constant, so we must
35306 generate a vector NAND or NOR to do the adjustment. */
35307
35308 void
35309 altivec_expand_vec_perm_le (rtx operands[4])
35310 {
35311 rtx notx, iorx, unspec;
35312 rtx target = operands[0];
35313 rtx op0 = operands[1];
35314 rtx op1 = operands[2];
35315 rtx sel = operands[3];
35316 rtx tmp = target;
35317 rtx norreg = gen_reg_rtx (V16QImode);
35318 machine_mode mode = GET_MODE (target);
35319
35320 /* Get everything in regs so the pattern matches. */
35321 if (!REG_P (op0))
35322 op0 = force_reg (mode, op0);
35323 if (!REG_P (op1))
35324 op1 = force_reg (mode, op1);
35325 if (!REG_P (sel))
35326 sel = force_reg (V16QImode, sel);
35327 if (!REG_P (target))
35328 tmp = gen_reg_rtx (mode);
35329
35330 if (TARGET_P9_VECTOR)
35331 {
35332 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35333 UNSPEC_VPERMR);
35334 }
35335 else
35336 {
35337 /* Invert the selector with a VNAND if available, else a VNOR.
35338 The VNAND is preferred for future fusion opportunities. */
35339 notx = gen_rtx_NOT (V16QImode, sel);
35340 iorx = (TARGET_P8_VECTOR
35341 ? gen_rtx_IOR (V16QImode, notx, notx)
35342 : gen_rtx_AND (V16QImode, notx, notx));
35343 emit_insn (gen_rtx_SET (norreg, iorx));
35344
35345 /* Permute with operands reversed and adjusted selector. */
35346 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35347 UNSPEC_VPERM);
35348 }
35349
35350 /* Copy into target, possibly by way of a register. */
35351 if (!REG_P (target))
35352 {
35353 emit_move_insn (tmp, unspec);
35354 unspec = tmp;
35355 }
35356
35357 emit_move_insn (target, unspec);
35358 }
35359
35360 /* Expand an Altivec constant permutation. Return true if we match
35361 an efficient implementation; false to fall back to VPERM.
35362
35363 OP0 and OP1 are the input vectors and TARGET is the output vector.
35364 SEL specifies the constant permutation vector. */
35365
35366 static bool
35367 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35368 const vec_perm_indices &sel)
35369 {
35370 struct altivec_perm_insn {
35371 HOST_WIDE_INT mask;
35372 enum insn_code impl;
35373 unsigned char perm[16];
35374 };
35375 static const struct altivec_perm_insn patterns[] = {
35376 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35377 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35378 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35379 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35380 { OPTION_MASK_ALTIVEC,
35381 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35382 : CODE_FOR_altivec_vmrglb_direct),
35383 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35384 { OPTION_MASK_ALTIVEC,
35385 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35386 : CODE_FOR_altivec_vmrglh_direct),
35387 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35388 { OPTION_MASK_ALTIVEC,
35389 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35390 : CODE_FOR_altivec_vmrglw_direct),
35391 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35392 { OPTION_MASK_ALTIVEC,
35393 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35394 : CODE_FOR_altivec_vmrghb_direct),
35395 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35396 { OPTION_MASK_ALTIVEC,
35397 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35398 : CODE_FOR_altivec_vmrghh_direct),
35399 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35400 { OPTION_MASK_ALTIVEC,
35401 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35402 : CODE_FOR_altivec_vmrghw_direct),
35403 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35404 { OPTION_MASK_P8_VECTOR,
35405 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35406 : CODE_FOR_p8_vmrgow_v4sf_direct),
35407 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35408 { OPTION_MASK_P8_VECTOR,
35409 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35410 : CODE_FOR_p8_vmrgew_v4sf_direct),
35411 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35412 };
35413
35414 unsigned int i, j, elt, which;
35415 unsigned char perm[16];
35416 rtx x;
35417 bool one_vec;
35418
35419 /* Unpack the constant selector. */
35420 for (i = which = 0; i < 16; ++i)
35421 {
35422 elt = sel[i] & 31;
35423 which |= (elt < 16 ? 1 : 2);
35424 perm[i] = elt;
35425 }
35426
35427 /* Simplify the constant selector based on operands. */
35428 switch (which)
35429 {
35430 default:
35431 gcc_unreachable ();
35432
35433 case 3:
35434 one_vec = false;
35435 if (!rtx_equal_p (op0, op1))
35436 break;
35437 /* FALLTHRU */
35438
35439 case 2:
35440 for (i = 0; i < 16; ++i)
35441 perm[i] &= 15;
35442 op0 = op1;
35443 one_vec = true;
35444 break;
35445
35446 case 1:
35447 op1 = op0;
35448 one_vec = true;
35449 break;
35450 }
35451
35452 /* Look for splat patterns. */
35453 if (one_vec)
35454 {
35455 elt = perm[0];
35456
35457 for (i = 0; i < 16; ++i)
35458 if (perm[i] != elt)
35459 break;
35460 if (i == 16)
35461 {
35462 if (!BYTES_BIG_ENDIAN)
35463 elt = 15 - elt;
35464 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35465 return true;
35466 }
35467
35468 if (elt % 2 == 0)
35469 {
35470 for (i = 0; i < 16; i += 2)
35471 if (perm[i] != elt || perm[i + 1] != elt + 1)
35472 break;
35473 if (i == 16)
35474 {
35475 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35476 x = gen_reg_rtx (V8HImode);
35477 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35478 GEN_INT (field)));
35479 emit_move_insn (target, gen_lowpart (V16QImode, x));
35480 return true;
35481 }
35482 }
35483
35484 if (elt % 4 == 0)
35485 {
35486 for (i = 0; i < 16; i += 4)
35487 if (perm[i] != elt
35488 || perm[i + 1] != elt + 1
35489 || perm[i + 2] != elt + 2
35490 || perm[i + 3] != elt + 3)
35491 break;
35492 if (i == 16)
35493 {
35494 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35495 x = gen_reg_rtx (V4SImode);
35496 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35497 GEN_INT (field)));
35498 emit_move_insn (target, gen_lowpart (V16QImode, x));
35499 return true;
35500 }
35501 }
35502 }
35503
35504 /* Look for merge and pack patterns. */
35505 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35506 {
35507 bool swapped;
35508
35509 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35510 continue;
35511
35512 elt = patterns[j].perm[0];
35513 if (perm[0] == elt)
35514 swapped = false;
35515 else if (perm[0] == elt + 16)
35516 swapped = true;
35517 else
35518 continue;
35519 for (i = 1; i < 16; ++i)
35520 {
35521 elt = patterns[j].perm[i];
35522 if (swapped)
35523 elt = (elt >= 16 ? elt - 16 : elt + 16);
35524 else if (one_vec && elt >= 16)
35525 elt -= 16;
35526 if (perm[i] != elt)
35527 break;
35528 }
35529 if (i == 16)
35530 {
35531 enum insn_code icode = patterns[j].impl;
35532 machine_mode omode = insn_data[icode].operand[0].mode;
35533 machine_mode imode = insn_data[icode].operand[1].mode;
35534
35535 /* For little-endian, don't use vpkuwum and vpkuhum if the
35536 underlying vector type is not V4SI and V8HI, respectively.
35537 For example, using vpkuwum with a V8HI picks up the even
35538 halfwords (BE numbering) when the even halfwords (LE
35539 numbering) are what we need. */
35540 if (!BYTES_BIG_ENDIAN
35541 && icode == CODE_FOR_altivec_vpkuwum_direct
35542 && ((REG_P (op0)
35543 && GET_MODE (op0) != V4SImode)
35544 || (SUBREG_P (op0)
35545 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35546 continue;
35547 if (!BYTES_BIG_ENDIAN
35548 && icode == CODE_FOR_altivec_vpkuhum_direct
35549 && ((REG_P (op0)
35550 && GET_MODE (op0) != V8HImode)
35551 || (SUBREG_P (op0)
35552 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35553 continue;
35554
35555 /* For little-endian, the two input operands must be swapped
35556 (or swapped back) to ensure proper right-to-left numbering
35557 from 0 to 2N-1. */
35558 if (swapped ^ !BYTES_BIG_ENDIAN)
35559 std::swap (op0, op1);
35560 if (imode != V16QImode)
35561 {
35562 op0 = gen_lowpart (imode, op0);
35563 op1 = gen_lowpart (imode, op1);
35564 }
35565 if (omode == V16QImode)
35566 x = target;
35567 else
35568 x = gen_reg_rtx (omode);
35569 emit_insn (GEN_FCN (icode) (x, op0, op1));
35570 if (omode != V16QImode)
35571 emit_move_insn (target, gen_lowpart (V16QImode, x));
35572 return true;
35573 }
35574 }
35575
35576 if (!BYTES_BIG_ENDIAN)
35577 {
35578 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35579 return true;
35580 }
35581
35582 return false;
35583 }
35584
35585 /* Expand a VSX Permute Doubleword constant permutation.
35586 Return true if we match an efficient implementation. */
35587
35588 static bool
35589 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35590 unsigned char perm0, unsigned char perm1)
35591 {
35592 rtx x;
35593
35594 /* If both selectors come from the same operand, fold to single op. */
35595 if ((perm0 & 2) == (perm1 & 2))
35596 {
35597 if (perm0 & 2)
35598 op0 = op1;
35599 else
35600 op1 = op0;
35601 }
35602 /* If both operands are equal, fold to simpler permutation. */
35603 if (rtx_equal_p (op0, op1))
35604 {
35605 perm0 = perm0 & 1;
35606 perm1 = (perm1 & 1) + 2;
35607 }
35608 /* If the first selector comes from the second operand, swap. */
35609 else if (perm0 & 2)
35610 {
35611 if (perm1 & 2)
35612 return false;
35613 perm0 -= 2;
35614 perm1 += 2;
35615 std::swap (op0, op1);
35616 }
35617 /* If the second selector does not come from the second operand, fail. */
35618 else if ((perm1 & 2) == 0)
35619 return false;
35620
35621 /* Success! */
35622 if (target != NULL)
35623 {
35624 machine_mode vmode, dmode;
35625 rtvec v;
35626
35627 vmode = GET_MODE (target);
35628 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35629 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35630 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35631 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35632 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35633 emit_insn (gen_rtx_SET (target, x));
35634 }
35635 return true;
35636 }
35637
35638 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35639
35640 static bool
35641 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35642 rtx op1, const vec_perm_indices &sel)
35643 {
35644 bool testing_p = !target;
35645
35646 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35647 if (TARGET_ALTIVEC && testing_p)
35648 return true;
35649
35650 /* Check for ps_merge* or xxpermdi insns. */
35651 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35652 {
35653 if (testing_p)
35654 {
35655 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35656 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35657 }
35658 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35659 return true;
35660 }
35661
35662 if (TARGET_ALTIVEC)
35663 {
35664 /* Force the target-independent code to lower to V16QImode. */
35665 if (vmode != V16QImode)
35666 return false;
35667 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35668 return true;
35669 }
35670
35671 return false;
35672 }
35673
35674 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35675 OP0 and OP1 are the input vectors and TARGET is the output vector.
35676 PERM specifies the constant permutation vector. */
35677
35678 static void
35679 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35680 machine_mode vmode, const vec_perm_builder &perm)
35681 {
35682 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35683 if (x != target)
35684 emit_move_insn (target, x);
35685 }
35686
35687 /* Expand an extract even operation. */
35688
35689 void
35690 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35691 {
35692 machine_mode vmode = GET_MODE (target);
35693 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35694 vec_perm_builder perm (nelt, nelt, 1);
35695
35696 for (i = 0; i < nelt; i++)
35697 perm.quick_push (i * 2);
35698
35699 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35700 }
35701
35702 /* Expand a vector interleave operation. */
35703
35704 void
35705 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35706 {
35707 machine_mode vmode = GET_MODE (target);
35708 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35709 vec_perm_builder perm (nelt, nelt, 1);
35710
35711 high = (highp ? 0 : nelt / 2);
35712 for (i = 0; i < nelt / 2; i++)
35713 {
35714 perm.quick_push (i + high);
35715 perm.quick_push (i + nelt + high);
35716 }
35717
35718 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35719 }
35720
35721 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35722 void
35723 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35724 {
35725 HOST_WIDE_INT hwi_scale (scale);
35726 REAL_VALUE_TYPE r_pow;
35727 rtvec v = rtvec_alloc (2);
35728 rtx elt;
35729 rtx scale_vec = gen_reg_rtx (V2DFmode);
35730 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35731 elt = const_double_from_real_value (r_pow, DFmode);
35732 RTVEC_ELT (v, 0) = elt;
35733 RTVEC_ELT (v, 1) = elt;
35734 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35735 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35736 }
35737
35738 /* Return an RTX representing where to find the function value of a
35739 function returning MODE. */
35740 static rtx
35741 rs6000_complex_function_value (machine_mode mode)
35742 {
35743 unsigned int regno;
35744 rtx r1, r2;
35745 machine_mode inner = GET_MODE_INNER (mode);
35746 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35747
35748 if (TARGET_FLOAT128_TYPE
35749 && (mode == KCmode
35750 || (mode == TCmode && TARGET_IEEEQUAD)))
35751 regno = ALTIVEC_ARG_RETURN;
35752
35753 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35754 regno = FP_ARG_RETURN;
35755
35756 else
35757 {
35758 regno = GP_ARG_RETURN;
35759
35760 /* 32-bit is OK since it'll go in r3/r4. */
35761 if (TARGET_32BIT && inner_bytes >= 4)
35762 return gen_rtx_REG (mode, regno);
35763 }
35764
35765 if (inner_bytes >= 8)
35766 return gen_rtx_REG (mode, regno);
35767
35768 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35769 const0_rtx);
35770 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35771 GEN_INT (inner_bytes));
35772 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35773 }
35774
35775 /* Return an rtx describing a return value of MODE as a PARALLEL
35776 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35777 stride REG_STRIDE. */
35778
35779 static rtx
35780 rs6000_parallel_return (machine_mode mode,
35781 int n_elts, machine_mode elt_mode,
35782 unsigned int regno, unsigned int reg_stride)
35783 {
35784 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35785
35786 int i;
35787 for (i = 0; i < n_elts; i++)
35788 {
35789 rtx r = gen_rtx_REG (elt_mode, regno);
35790 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35791 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35792 regno += reg_stride;
35793 }
35794
35795 return par;
35796 }
35797
35798 /* Target hook for TARGET_FUNCTION_VALUE.
35799
35800 An integer value is in r3 and a floating-point value is in fp1,
35801 unless -msoft-float. */
35802
35803 static rtx
35804 rs6000_function_value (const_tree valtype,
35805 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35806 bool outgoing ATTRIBUTE_UNUSED)
35807 {
35808 machine_mode mode;
35809 unsigned int regno;
35810 machine_mode elt_mode;
35811 int n_elts;
35812
35813 /* Special handling for structs in darwin64. */
35814 if (TARGET_MACHO
35815 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35816 {
35817 CUMULATIVE_ARGS valcum;
35818 rtx valret;
35819
35820 valcum.words = 0;
35821 valcum.fregno = FP_ARG_MIN_REG;
35822 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35823 /* Do a trial code generation as if this were going to be passed as
35824 an argument; if any part goes in memory, we return NULL. */
35825 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35826 if (valret)
35827 return valret;
35828 /* Otherwise fall through to standard ABI rules. */
35829 }
35830
35831 mode = TYPE_MODE (valtype);
35832
35833 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35834 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35835 {
35836 int first_reg, n_regs;
35837
35838 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35839 {
35840 /* _Decimal128 must use even/odd register pairs. */
35841 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35842 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35843 }
35844 else
35845 {
35846 first_reg = ALTIVEC_ARG_RETURN;
35847 n_regs = 1;
35848 }
35849
35850 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35851 }
35852
35853 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35854 if (TARGET_32BIT && TARGET_POWERPC64)
35855 switch (mode)
35856 {
35857 default:
35858 break;
35859 case E_DImode:
35860 case E_SCmode:
35861 case E_DCmode:
35862 case E_TCmode:
35863 int count = GET_MODE_SIZE (mode) / 4;
35864 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35865 }
35866
35867 if ((INTEGRAL_TYPE_P (valtype)
35868 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35869 || POINTER_TYPE_P (valtype))
35870 mode = TARGET_32BIT ? SImode : DImode;
35871
35872 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35873 /* _Decimal128 must use an even/odd register pair. */
35874 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35875 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35876 && !FLOAT128_VECTOR_P (mode))
35877 regno = FP_ARG_RETURN;
35878 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35879 && targetm.calls.split_complex_arg)
35880 return rs6000_complex_function_value (mode);
35881 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35882 return register is used in both cases, and we won't see V2DImode/V2DFmode
35883 for pure altivec, combine the two cases. */
35884 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35885 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35886 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35887 regno = ALTIVEC_ARG_RETURN;
35888 else
35889 regno = GP_ARG_RETURN;
35890
35891 return gen_rtx_REG (mode, regno);
35892 }
35893
35894 /* Define how to find the value returned by a library function
35895 assuming the value has mode MODE. */
35896 rtx
35897 rs6000_libcall_value (machine_mode mode)
35898 {
35899 unsigned int regno;
35900
35901 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35902 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35903 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35904
35905 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35906 /* _Decimal128 must use an even/odd register pair. */
35907 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35908 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
35909 regno = FP_ARG_RETURN;
35910 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35911 return register is used in both cases, and we won't see V2DImode/V2DFmode
35912 for pure altivec, combine the two cases. */
35913 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35914 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35915 regno = ALTIVEC_ARG_RETURN;
35916 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35917 return rs6000_complex_function_value (mode);
35918 else
35919 regno = GP_ARG_RETURN;
35920
35921 return gen_rtx_REG (mode, regno);
35922 }
35923
35924 /* Compute register pressure classes. We implement the target hook to avoid
35925 IRA picking something like GEN_OR_FLOAT_REGS as a pressure class, which can
35926 lead to incorrect estimates of number of available registers and therefor
35927 increased register pressure/spill. */
35928 static int
35929 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35930 {
35931 int n;
35932
35933 n = 0;
35934 pressure_classes[n++] = GENERAL_REGS;
35935 if (TARGET_VSX)
35936 pressure_classes[n++] = VSX_REGS;
35937 else
35938 {
35939 if (TARGET_ALTIVEC)
35940 pressure_classes[n++] = ALTIVEC_REGS;
35941 if (TARGET_HARD_FLOAT)
35942 pressure_classes[n++] = FLOAT_REGS;
35943 }
35944 pressure_classes[n++] = CR_REGS;
35945 pressure_classes[n++] = SPECIAL_REGS;
35946
35947 return n;
35948 }
35949
35950 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35951 Frame pointer elimination is automatically handled.
35952
35953 For the RS/6000, if frame pointer elimination is being done, we would like
35954 to convert ap into fp, not sp.
35955
35956 We need r30 if -mminimal-toc was specified, and there are constant pool
35957 references. */
35958
35959 static bool
35960 rs6000_can_eliminate (const int from, const int to)
35961 {
35962 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35963 ? ! frame_pointer_needed
35964 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35965 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
35966 || constant_pool_empty_p ()
35967 : true);
35968 }
35969
35970 /* Define the offset between two registers, FROM to be eliminated and its
35971 replacement TO, at the start of a routine. */
35972 HOST_WIDE_INT
35973 rs6000_initial_elimination_offset (int from, int to)
35974 {
35975 rs6000_stack_t *info = rs6000_stack_info ();
35976 HOST_WIDE_INT offset;
35977
35978 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35979 offset = info->push_p ? 0 : -info->total_size;
35980 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35981 {
35982 offset = info->push_p ? 0 : -info->total_size;
35983 if (FRAME_GROWS_DOWNWARD)
35984 offset += info->fixed_size + info->vars_size + info->parm_size;
35985 }
35986 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35987 offset = FRAME_GROWS_DOWNWARD
35988 ? info->fixed_size + info->vars_size + info->parm_size
35989 : 0;
35990 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35991 offset = info->total_size;
35992 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35993 offset = info->push_p ? info->total_size : 0;
35994 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35995 offset = 0;
35996 else
35997 gcc_unreachable ();
35998
35999 return offset;
36000 }
36001
36002 /* Fill in sizes of registers used by unwinder. */
36003
36004 static void
36005 rs6000_init_dwarf_reg_sizes_extra (tree address)
36006 {
36007 if (TARGET_MACHO && ! TARGET_ALTIVEC)
36008 {
36009 int i;
36010 machine_mode mode = TYPE_MODE (char_type_node);
36011 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
36012 rtx mem = gen_rtx_MEM (BLKmode, addr);
36013 rtx value = gen_int_mode (16, mode);
36014
36015 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
36016 The unwinder still needs to know the size of Altivec registers. */
36017
36018 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
36019 {
36020 int column = DWARF_REG_TO_UNWIND_COLUMN
36021 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36022 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36023
36024 emit_move_insn (adjust_address (mem, mode, offset), value);
36025 }
36026 }
36027 }
36028
36029 /* Map internal gcc register numbers to debug format register numbers.
36030 FORMAT specifies the type of debug register number to use:
36031 0 -- debug information, except for frame-related sections
36032 1 -- DWARF .debug_frame section
36033 2 -- DWARF .eh_frame section */
36034
36035 unsigned int
36036 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36037 {
36038 /* On some platforms, we use the standard DWARF register
36039 numbering for .debug_info and .debug_frame. */
36040 if ((format == 0 && write_symbols == DWARF2_DEBUG) || format == 1)
36041 {
36042 #ifdef RS6000_USE_DWARF_NUMBERING
36043 if (regno <= 31)
36044 return regno;
36045 if (FP_REGNO_P (regno))
36046 return regno - FIRST_FPR_REGNO + 32;
36047 if (ALTIVEC_REGNO_P (regno))
36048 return regno - FIRST_ALTIVEC_REGNO + 1124;
36049 if (regno == LR_REGNO)
36050 return 108;
36051 if (regno == CTR_REGNO)
36052 return 109;
36053 if (regno == CA_REGNO)
36054 return 101; /* XER */
36055 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36056 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36057 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36058 to the DWARF reg for CR. */
36059 if (format == 1 && regno == CR2_REGNO)
36060 return 64;
36061 if (CR_REGNO_P (regno))
36062 return regno - CR0_REGNO + 86;
36063 if (regno == VRSAVE_REGNO)
36064 return 356;
36065 if (regno == VSCR_REGNO)
36066 return 67;
36067
36068 /* These do not make much sense. */
36069 if (regno == FRAME_POINTER_REGNUM)
36070 return 111;
36071 if (regno == ARG_POINTER_REGNUM)
36072 return 67;
36073 if (regno == 64)
36074 return 100;
36075
36076 gcc_unreachable ();
36077 #endif
36078 }
36079
36080 /* We use the GCC 7 (and before) internal number for non-DWARF debug
36081 information, and also for .eh_frame. */
36082 /* Translate the regnos to their numbers in GCC 7 (and before). */
36083 if (regno <= 31)
36084 return regno;
36085 if (FP_REGNO_P (regno))
36086 return regno - FIRST_FPR_REGNO + 32;
36087 if (ALTIVEC_REGNO_P (regno))
36088 return regno - FIRST_ALTIVEC_REGNO + 77;
36089 if (regno == LR_REGNO)
36090 return 65;
36091 if (regno == CTR_REGNO)
36092 return 66;
36093 if (regno == CA_REGNO)
36094 return 76; /* XER */
36095 if (CR_REGNO_P (regno))
36096 return regno - CR0_REGNO + 68;
36097 if (regno == VRSAVE_REGNO)
36098 return 109;
36099 if (regno == VSCR_REGNO)
36100 return 110;
36101
36102 if (regno == FRAME_POINTER_REGNUM)
36103 return 111;
36104 if (regno == ARG_POINTER_REGNUM)
36105 return 67;
36106 if (regno == 64)
36107 return 64;
36108
36109 gcc_unreachable ();
36110 }
36111
36112 /* target hook eh_return_filter_mode */
36113 static scalar_int_mode
36114 rs6000_eh_return_filter_mode (void)
36115 {
36116 return TARGET_32BIT ? SImode : word_mode;
36117 }
36118
36119 /* Target hook for translate_mode_attribute. */
36120 static machine_mode
36121 rs6000_translate_mode_attribute (machine_mode mode)
36122 {
36123 if ((FLOAT128_IEEE_P (mode)
36124 && ieee128_float_type_node == long_double_type_node)
36125 || (FLOAT128_IBM_P (mode)
36126 && ibm128_float_type_node == long_double_type_node))
36127 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
36128 return mode;
36129 }
36130
36131 /* Target hook for scalar_mode_supported_p. */
36132 static bool
36133 rs6000_scalar_mode_supported_p (scalar_mode mode)
36134 {
36135 /* -m32 does not support TImode. This is the default, from
36136 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36137 same ABI as for -m32. But default_scalar_mode_supported_p allows
36138 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36139 for -mpowerpc64. */
36140 if (TARGET_32BIT && mode == TImode)
36141 return false;
36142
36143 if (DECIMAL_FLOAT_MODE_P (mode))
36144 return default_decimal_float_supported_p ();
36145 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36146 return true;
36147 else
36148 return default_scalar_mode_supported_p (mode);
36149 }
36150
36151 /* Target hook for vector_mode_supported_p. */
36152 static bool
36153 rs6000_vector_mode_supported_p (machine_mode mode)
36154 {
36155 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36156 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36157 double-double. */
36158 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36159 return true;
36160
36161 else
36162 return false;
36163 }
36164
36165 /* Target hook for floatn_mode. */
36166 static opt_scalar_float_mode
36167 rs6000_floatn_mode (int n, bool extended)
36168 {
36169 if (extended)
36170 {
36171 switch (n)
36172 {
36173 case 32:
36174 return DFmode;
36175
36176 case 64:
36177 if (TARGET_FLOAT128_TYPE)
36178 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36179 else
36180 return opt_scalar_float_mode ();
36181
36182 case 128:
36183 return opt_scalar_float_mode ();
36184
36185 default:
36186 /* Those are the only valid _FloatNx types. */
36187 gcc_unreachable ();
36188 }
36189 }
36190 else
36191 {
36192 switch (n)
36193 {
36194 case 32:
36195 return SFmode;
36196
36197 case 64:
36198 return DFmode;
36199
36200 case 128:
36201 if (TARGET_FLOAT128_TYPE)
36202 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36203 else
36204 return opt_scalar_float_mode ();
36205
36206 default:
36207 return opt_scalar_float_mode ();
36208 }
36209 }
36210
36211 }
36212
36213 /* Target hook for c_mode_for_suffix. */
36214 static machine_mode
36215 rs6000_c_mode_for_suffix (char suffix)
36216 {
36217 if (TARGET_FLOAT128_TYPE)
36218 {
36219 if (suffix == 'q' || suffix == 'Q')
36220 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36221
36222 /* At the moment, we are not defining a suffix for IBM extended double.
36223 If/when the default for -mabi=ieeelongdouble is changed, and we want
36224 to support __ibm128 constants in legacy library code, we may need to
36225 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36226 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36227 __float80 constants. */
36228 }
36229
36230 return VOIDmode;
36231 }
36232
36233 /* Target hook for invalid_arg_for_unprototyped_fn. */
36234 static const char *
36235 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36236 {
36237 return (!rs6000_darwin64_abi
36238 && typelist == 0
36239 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36240 && (funcdecl == NULL_TREE
36241 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36242 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36243 ? N_("AltiVec argument passed to unprototyped function")
36244 : NULL;
36245 }
36246
36247 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36248 setup by using __stack_chk_fail_local hidden function instead of
36249 calling __stack_chk_fail directly. Otherwise it is better to call
36250 __stack_chk_fail directly. */
36251
36252 static tree ATTRIBUTE_UNUSED
36253 rs6000_stack_protect_fail (void)
36254 {
36255 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36256 ? default_hidden_stack_protect_fail ()
36257 : default_external_stack_protect_fail ();
36258 }
36259
36260 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36261
36262 #if TARGET_ELF
36263 static unsigned HOST_WIDE_INT
36264 rs6000_asan_shadow_offset (void)
36265 {
36266 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36267 }
36268 #endif
36269 \f
36270 /* Mask options that we want to support inside of attribute((target)) and
36271 #pragma GCC target operations. Note, we do not include things like
36272 64/32-bit, endianness, hard/soft floating point, etc. that would have
36273 different calling sequences. */
36274
36275 struct rs6000_opt_mask {
36276 const char *name; /* option name */
36277 HOST_WIDE_INT mask; /* mask to set */
36278 bool invert; /* invert sense of mask */
36279 bool valid_target; /* option is a target option */
36280 };
36281
36282 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36283 {
36284 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36285 { "cmpb", OPTION_MASK_CMPB, false, true },
36286 { "crypto", OPTION_MASK_CRYPTO, false, true },
36287 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36288 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36289 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36290 false, true },
36291 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36292 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36293 { "fprnd", OPTION_MASK_FPRND, false, true },
36294 { "hard-dfp", OPTION_MASK_DFP, false, true },
36295 { "htm", OPTION_MASK_HTM, false, true },
36296 { "isel", OPTION_MASK_ISEL, false, true },
36297 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36298 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36299 { "modulo", OPTION_MASK_MODULO, false, true },
36300 { "mulhw", OPTION_MASK_MULHW, false, true },
36301 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36302 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36303 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36304 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36305 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36306 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36307 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36308 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36309 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36310 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36311 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36312 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36313 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36314 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36315 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36316 { "string", 0, false, true },
36317 { "update", OPTION_MASK_NO_UPDATE, true , true },
36318 { "vsx", OPTION_MASK_VSX, false, true },
36319 #ifdef OPTION_MASK_64BIT
36320 #if TARGET_AIX_OS
36321 { "aix64", OPTION_MASK_64BIT, false, false },
36322 { "aix32", OPTION_MASK_64BIT, true, false },
36323 #else
36324 { "64", OPTION_MASK_64BIT, false, false },
36325 { "32", OPTION_MASK_64BIT, true, false },
36326 #endif
36327 #endif
36328 #ifdef OPTION_MASK_EABI
36329 { "eabi", OPTION_MASK_EABI, false, false },
36330 #endif
36331 #ifdef OPTION_MASK_LITTLE_ENDIAN
36332 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36333 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36334 #endif
36335 #ifdef OPTION_MASK_RELOCATABLE
36336 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36337 #endif
36338 #ifdef OPTION_MASK_STRICT_ALIGN
36339 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36340 #endif
36341 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36342 { "string", 0, false, false },
36343 };
36344
36345 /* Builtin mask mapping for printing the flags. */
36346 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36347 {
36348 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36349 { "vsx", RS6000_BTM_VSX, false, false },
36350 { "fre", RS6000_BTM_FRE, false, false },
36351 { "fres", RS6000_BTM_FRES, false, false },
36352 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36353 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36354 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36355 { "cell", RS6000_BTM_CELL, false, false },
36356 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36357 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36358 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36359 { "crypto", RS6000_BTM_CRYPTO, false, false },
36360 { "htm", RS6000_BTM_HTM, false, false },
36361 { "hard-dfp", RS6000_BTM_DFP, false, false },
36362 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36363 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36364 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36365 { "float128", RS6000_BTM_FLOAT128, false, false },
36366 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36367 };
36368
36369 /* Option variables that we want to support inside attribute((target)) and
36370 #pragma GCC target operations. */
36371
36372 struct rs6000_opt_var {
36373 const char *name; /* option name */
36374 size_t global_offset; /* offset of the option in global_options. */
36375 size_t target_offset; /* offset of the option in target options. */
36376 };
36377
36378 static struct rs6000_opt_var const rs6000_opt_vars[] =
36379 {
36380 { "friz",
36381 offsetof (struct gcc_options, x_TARGET_FRIZ),
36382 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36383 { "avoid-indexed-addresses",
36384 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36385 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36386 { "longcall",
36387 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36388 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36389 { "optimize-swaps",
36390 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36391 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36392 { "allow-movmisalign",
36393 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36394 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36395 { "sched-groups",
36396 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36397 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36398 { "always-hint",
36399 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36400 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36401 { "align-branch-targets",
36402 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36403 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36404 { "tls-markers",
36405 offsetof (struct gcc_options, x_tls_markers),
36406 offsetof (struct cl_target_option, x_tls_markers), },
36407 { "sched-prolog",
36408 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36409 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36410 { "sched-epilog",
36411 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36412 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36413 { "speculate-indirect-jumps",
36414 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36415 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36416 };
36417
36418 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36419 parsing. Return true if there were no errors. */
36420
36421 static bool
36422 rs6000_inner_target_options (tree args, bool attr_p)
36423 {
36424 bool ret = true;
36425
36426 if (args == NULL_TREE)
36427 ;
36428
36429 else if (TREE_CODE (args) == STRING_CST)
36430 {
36431 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36432 char *q;
36433
36434 while ((q = strtok (p, ",")) != NULL)
36435 {
36436 bool error_p = false;
36437 bool not_valid_p = false;
36438 const char *cpu_opt = NULL;
36439
36440 p = NULL;
36441 if (strncmp (q, "cpu=", 4) == 0)
36442 {
36443 int cpu_index = rs6000_cpu_name_lookup (q+4);
36444 if (cpu_index >= 0)
36445 rs6000_cpu_index = cpu_index;
36446 else
36447 {
36448 error_p = true;
36449 cpu_opt = q+4;
36450 }
36451 }
36452 else if (strncmp (q, "tune=", 5) == 0)
36453 {
36454 int tune_index = rs6000_cpu_name_lookup (q+5);
36455 if (tune_index >= 0)
36456 rs6000_tune_index = tune_index;
36457 else
36458 {
36459 error_p = true;
36460 cpu_opt = q+5;
36461 }
36462 }
36463 else
36464 {
36465 size_t i;
36466 bool invert = false;
36467 char *r = q;
36468
36469 error_p = true;
36470 if (strncmp (r, "no-", 3) == 0)
36471 {
36472 invert = true;
36473 r += 3;
36474 }
36475
36476 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36477 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36478 {
36479 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36480
36481 if (!rs6000_opt_masks[i].valid_target)
36482 not_valid_p = true;
36483 else
36484 {
36485 error_p = false;
36486 rs6000_isa_flags_explicit |= mask;
36487
36488 /* VSX needs altivec, so -mvsx automagically sets
36489 altivec and disables -mavoid-indexed-addresses. */
36490 if (!invert)
36491 {
36492 if (mask == OPTION_MASK_VSX)
36493 {
36494 mask |= OPTION_MASK_ALTIVEC;
36495 TARGET_AVOID_XFORM = 0;
36496 }
36497 }
36498
36499 if (rs6000_opt_masks[i].invert)
36500 invert = !invert;
36501
36502 if (invert)
36503 rs6000_isa_flags &= ~mask;
36504 else
36505 rs6000_isa_flags |= mask;
36506 }
36507 break;
36508 }
36509
36510 if (error_p && !not_valid_p)
36511 {
36512 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36513 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36514 {
36515 size_t j = rs6000_opt_vars[i].global_offset;
36516 *((int *) ((char *)&global_options + j)) = !invert;
36517 error_p = false;
36518 not_valid_p = false;
36519 break;
36520 }
36521 }
36522 }
36523
36524 if (error_p)
36525 {
36526 const char *eprefix, *esuffix;
36527
36528 ret = false;
36529 if (attr_p)
36530 {
36531 eprefix = "__attribute__((__target__(";
36532 esuffix = ")))";
36533 }
36534 else
36535 {
36536 eprefix = "#pragma GCC target ";
36537 esuffix = "";
36538 }
36539
36540 if (cpu_opt)
36541 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36542 q, esuffix);
36543 else if (not_valid_p)
36544 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36545 else
36546 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36547 }
36548 }
36549 }
36550
36551 else if (TREE_CODE (args) == TREE_LIST)
36552 {
36553 do
36554 {
36555 tree value = TREE_VALUE (args);
36556 if (value)
36557 {
36558 bool ret2 = rs6000_inner_target_options (value, attr_p);
36559 if (!ret2)
36560 ret = false;
36561 }
36562 args = TREE_CHAIN (args);
36563 }
36564 while (args != NULL_TREE);
36565 }
36566
36567 else
36568 {
36569 error ("attribute %<target%> argument not a string");
36570 return false;
36571 }
36572
36573 return ret;
36574 }
36575
36576 /* Print out the target options as a list for -mdebug=target. */
36577
36578 static void
36579 rs6000_debug_target_options (tree args, const char *prefix)
36580 {
36581 if (args == NULL_TREE)
36582 fprintf (stderr, "%s<NULL>", prefix);
36583
36584 else if (TREE_CODE (args) == STRING_CST)
36585 {
36586 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36587 char *q;
36588
36589 while ((q = strtok (p, ",")) != NULL)
36590 {
36591 p = NULL;
36592 fprintf (stderr, "%s\"%s\"", prefix, q);
36593 prefix = ", ";
36594 }
36595 }
36596
36597 else if (TREE_CODE (args) == TREE_LIST)
36598 {
36599 do
36600 {
36601 tree value = TREE_VALUE (args);
36602 if (value)
36603 {
36604 rs6000_debug_target_options (value, prefix);
36605 prefix = ", ";
36606 }
36607 args = TREE_CHAIN (args);
36608 }
36609 while (args != NULL_TREE);
36610 }
36611
36612 else
36613 gcc_unreachable ();
36614
36615 return;
36616 }
36617
36618 \f
36619 /* Hook to validate attribute((target("..."))). */
36620
36621 static bool
36622 rs6000_valid_attribute_p (tree fndecl,
36623 tree ARG_UNUSED (name),
36624 tree args,
36625 int flags)
36626 {
36627 struct cl_target_option cur_target;
36628 bool ret;
36629 tree old_optimize;
36630 tree new_target, new_optimize;
36631 tree func_optimize;
36632
36633 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36634
36635 if (TARGET_DEBUG_TARGET)
36636 {
36637 tree tname = DECL_NAME (fndecl);
36638 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36639 if (tname)
36640 fprintf (stderr, "function: %.*s\n",
36641 (int) IDENTIFIER_LENGTH (tname),
36642 IDENTIFIER_POINTER (tname));
36643 else
36644 fprintf (stderr, "function: unknown\n");
36645
36646 fprintf (stderr, "args:");
36647 rs6000_debug_target_options (args, " ");
36648 fprintf (stderr, "\n");
36649
36650 if (flags)
36651 fprintf (stderr, "flags: 0x%x\n", flags);
36652
36653 fprintf (stderr, "--------------------\n");
36654 }
36655
36656 /* attribute((target("default"))) does nothing, beyond
36657 affecting multi-versioning. */
36658 if (TREE_VALUE (args)
36659 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36660 && TREE_CHAIN (args) == NULL_TREE
36661 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36662 return true;
36663
36664 old_optimize = build_optimization_node (&global_options);
36665 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36666
36667 /* If the function changed the optimization levels as well as setting target
36668 options, start with the optimizations specified. */
36669 if (func_optimize && func_optimize != old_optimize)
36670 cl_optimization_restore (&global_options,
36671 TREE_OPTIMIZATION (func_optimize));
36672
36673 /* The target attributes may also change some optimization flags, so update
36674 the optimization options if necessary. */
36675 cl_target_option_save (&cur_target, &global_options);
36676 rs6000_cpu_index = rs6000_tune_index = -1;
36677 ret = rs6000_inner_target_options (args, true);
36678
36679 /* Set up any additional state. */
36680 if (ret)
36681 {
36682 ret = rs6000_option_override_internal (false);
36683 new_target = build_target_option_node (&global_options);
36684 }
36685 else
36686 new_target = NULL;
36687
36688 new_optimize = build_optimization_node (&global_options);
36689
36690 if (!new_target)
36691 ret = false;
36692
36693 else if (fndecl)
36694 {
36695 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36696
36697 if (old_optimize != new_optimize)
36698 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36699 }
36700
36701 cl_target_option_restore (&global_options, &cur_target);
36702
36703 if (old_optimize != new_optimize)
36704 cl_optimization_restore (&global_options,
36705 TREE_OPTIMIZATION (old_optimize));
36706
36707 return ret;
36708 }
36709
36710 \f
36711 /* Hook to validate the current #pragma GCC target and set the state, and
36712 update the macros based on what was changed. If ARGS is NULL, then
36713 POP_TARGET is used to reset the options. */
36714
36715 bool
36716 rs6000_pragma_target_parse (tree args, tree pop_target)
36717 {
36718 tree prev_tree = build_target_option_node (&global_options);
36719 tree cur_tree;
36720 struct cl_target_option *prev_opt, *cur_opt;
36721 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36722 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36723
36724 if (TARGET_DEBUG_TARGET)
36725 {
36726 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36727 fprintf (stderr, "args:");
36728 rs6000_debug_target_options (args, " ");
36729 fprintf (stderr, "\n");
36730
36731 if (pop_target)
36732 {
36733 fprintf (stderr, "pop_target:\n");
36734 debug_tree (pop_target);
36735 }
36736 else
36737 fprintf (stderr, "pop_target: <NULL>\n");
36738
36739 fprintf (stderr, "--------------------\n");
36740 }
36741
36742 if (! args)
36743 {
36744 cur_tree = ((pop_target)
36745 ? pop_target
36746 : target_option_default_node);
36747 cl_target_option_restore (&global_options,
36748 TREE_TARGET_OPTION (cur_tree));
36749 }
36750 else
36751 {
36752 rs6000_cpu_index = rs6000_tune_index = -1;
36753 if (!rs6000_inner_target_options (args, false)
36754 || !rs6000_option_override_internal (false)
36755 || (cur_tree = build_target_option_node (&global_options))
36756 == NULL_TREE)
36757 {
36758 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36759 fprintf (stderr, "invalid pragma\n");
36760
36761 return false;
36762 }
36763 }
36764
36765 target_option_current_node = cur_tree;
36766 rs6000_activate_target_options (target_option_current_node);
36767
36768 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36769 change the macros that are defined. */
36770 if (rs6000_target_modify_macros_ptr)
36771 {
36772 prev_opt = TREE_TARGET_OPTION (prev_tree);
36773 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36774 prev_flags = prev_opt->x_rs6000_isa_flags;
36775
36776 cur_opt = TREE_TARGET_OPTION (cur_tree);
36777 cur_flags = cur_opt->x_rs6000_isa_flags;
36778 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36779
36780 diff_bumask = (prev_bumask ^ cur_bumask);
36781 diff_flags = (prev_flags ^ cur_flags);
36782
36783 if ((diff_flags != 0) || (diff_bumask != 0))
36784 {
36785 /* Delete old macros. */
36786 rs6000_target_modify_macros_ptr (false,
36787 prev_flags & diff_flags,
36788 prev_bumask & diff_bumask);
36789
36790 /* Define new macros. */
36791 rs6000_target_modify_macros_ptr (true,
36792 cur_flags & diff_flags,
36793 cur_bumask & diff_bumask);
36794 }
36795 }
36796
36797 return true;
36798 }
36799
36800 \f
36801 /* Remember the last target of rs6000_set_current_function. */
36802 static GTY(()) tree rs6000_previous_fndecl;
36803
36804 /* Restore target's globals from NEW_TREE and invalidate the
36805 rs6000_previous_fndecl cache. */
36806
36807 void
36808 rs6000_activate_target_options (tree new_tree)
36809 {
36810 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36811 if (TREE_TARGET_GLOBALS (new_tree))
36812 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36813 else if (new_tree == target_option_default_node)
36814 restore_target_globals (&default_target_globals);
36815 else
36816 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36817 rs6000_previous_fndecl = NULL_TREE;
36818 }
36819
36820 /* Establish appropriate back-end context for processing the function
36821 FNDECL. The argument might be NULL to indicate processing at top
36822 level, outside of any function scope. */
36823 static void
36824 rs6000_set_current_function (tree fndecl)
36825 {
36826 if (TARGET_DEBUG_TARGET)
36827 {
36828 fprintf (stderr, "\n==================== rs6000_set_current_function");
36829
36830 if (fndecl)
36831 fprintf (stderr, ", fndecl %s (%p)",
36832 (DECL_NAME (fndecl)
36833 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36834 : "<unknown>"), (void *)fndecl);
36835
36836 if (rs6000_previous_fndecl)
36837 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36838
36839 fprintf (stderr, "\n");
36840 }
36841
36842 /* Only change the context if the function changes. This hook is called
36843 several times in the course of compiling a function, and we don't want to
36844 slow things down too much or call target_reinit when it isn't safe. */
36845 if (fndecl == rs6000_previous_fndecl)
36846 return;
36847
36848 tree old_tree;
36849 if (rs6000_previous_fndecl == NULL_TREE)
36850 old_tree = target_option_current_node;
36851 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36852 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36853 else
36854 old_tree = target_option_default_node;
36855
36856 tree new_tree;
36857 if (fndecl == NULL_TREE)
36858 {
36859 if (old_tree != target_option_current_node)
36860 new_tree = target_option_current_node;
36861 else
36862 new_tree = NULL_TREE;
36863 }
36864 else
36865 {
36866 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36867 if (new_tree == NULL_TREE)
36868 new_tree = target_option_default_node;
36869 }
36870
36871 if (TARGET_DEBUG_TARGET)
36872 {
36873 if (new_tree)
36874 {
36875 fprintf (stderr, "\nnew fndecl target specific options:\n");
36876 debug_tree (new_tree);
36877 }
36878
36879 if (old_tree)
36880 {
36881 fprintf (stderr, "\nold fndecl target specific options:\n");
36882 debug_tree (old_tree);
36883 }
36884
36885 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36886 fprintf (stderr, "--------------------\n");
36887 }
36888
36889 if (new_tree && old_tree != new_tree)
36890 rs6000_activate_target_options (new_tree);
36891
36892 if (fndecl)
36893 rs6000_previous_fndecl = fndecl;
36894 }
36895
36896 \f
36897 /* Save the current options */
36898
36899 static void
36900 rs6000_function_specific_save (struct cl_target_option *ptr,
36901 struct gcc_options *opts)
36902 {
36903 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36904 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36905 }
36906
36907 /* Restore the current options */
36908
36909 static void
36910 rs6000_function_specific_restore (struct gcc_options *opts,
36911 struct cl_target_option *ptr)
36912
36913 {
36914 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36915 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36916 (void) rs6000_option_override_internal (false);
36917 }
36918
36919 /* Print the current options */
36920
36921 static void
36922 rs6000_function_specific_print (FILE *file, int indent,
36923 struct cl_target_option *ptr)
36924 {
36925 rs6000_print_isa_options (file, indent, "Isa options set",
36926 ptr->x_rs6000_isa_flags);
36927
36928 rs6000_print_isa_options (file, indent, "Isa options explicit",
36929 ptr->x_rs6000_isa_flags_explicit);
36930 }
36931
36932 /* Helper function to print the current isa or misc options on a line. */
36933
36934 static void
36935 rs6000_print_options_internal (FILE *file,
36936 int indent,
36937 const char *string,
36938 HOST_WIDE_INT flags,
36939 const char *prefix,
36940 const struct rs6000_opt_mask *opts,
36941 size_t num_elements)
36942 {
36943 size_t i;
36944 size_t start_column = 0;
36945 size_t cur_column;
36946 size_t max_column = 120;
36947 size_t prefix_len = strlen (prefix);
36948 size_t comma_len = 0;
36949 const char *comma = "";
36950
36951 if (indent)
36952 start_column += fprintf (file, "%*s", indent, "");
36953
36954 if (!flags)
36955 {
36956 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36957 return;
36958 }
36959
36960 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36961
36962 /* Print the various mask options. */
36963 cur_column = start_column;
36964 for (i = 0; i < num_elements; i++)
36965 {
36966 bool invert = opts[i].invert;
36967 const char *name = opts[i].name;
36968 const char *no_str = "";
36969 HOST_WIDE_INT mask = opts[i].mask;
36970 size_t len = comma_len + prefix_len + strlen (name);
36971
36972 if (!invert)
36973 {
36974 if ((flags & mask) == 0)
36975 {
36976 no_str = "no-";
36977 len += sizeof ("no-") - 1;
36978 }
36979
36980 flags &= ~mask;
36981 }
36982
36983 else
36984 {
36985 if ((flags & mask) != 0)
36986 {
36987 no_str = "no-";
36988 len += sizeof ("no-") - 1;
36989 }
36990
36991 flags |= mask;
36992 }
36993
36994 cur_column += len;
36995 if (cur_column > max_column)
36996 {
36997 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36998 cur_column = start_column + len;
36999 comma = "";
37000 }
37001
37002 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
37003 comma = ", ";
37004 comma_len = sizeof (", ") - 1;
37005 }
37006
37007 fputs ("\n", file);
37008 }
37009
37010 /* Helper function to print the current isa options on a line. */
37011
37012 static void
37013 rs6000_print_isa_options (FILE *file, int indent, const char *string,
37014 HOST_WIDE_INT flags)
37015 {
37016 rs6000_print_options_internal (file, indent, string, flags, "-m",
37017 &rs6000_opt_masks[0],
37018 ARRAY_SIZE (rs6000_opt_masks));
37019 }
37020
37021 static void
37022 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37023 HOST_WIDE_INT flags)
37024 {
37025 rs6000_print_options_internal (file, indent, string, flags, "",
37026 &rs6000_builtin_mask_names[0],
37027 ARRAY_SIZE (rs6000_builtin_mask_names));
37028 }
37029
37030 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37031 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37032 -mupper-regs-df, etc.).
37033
37034 If the user used -mno-power8-vector, we need to turn off all of the implicit
37035 ISA 2.07 and 3.0 options that relate to the vector unit.
37036
37037 If the user used -mno-power9-vector, we need to turn off all of the implicit
37038 ISA 3.0 options that relate to the vector unit.
37039
37040 This function does not handle explicit options such as the user specifying
37041 -mdirect-move. These are handled in rs6000_option_override_internal, and
37042 the appropriate error is given if needed.
37043
37044 We return a mask of all of the implicit options that should not be enabled
37045 by default. */
37046
37047 static HOST_WIDE_INT
37048 rs6000_disable_incompatible_switches (void)
37049 {
37050 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37051 size_t i, j;
37052
37053 static const struct {
37054 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37055 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37056 const char *const name; /* name of the switch. */
37057 } flags[] = {
37058 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37059 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37060 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37061 };
37062
37063 for (i = 0; i < ARRAY_SIZE (flags); i++)
37064 {
37065 HOST_WIDE_INT no_flag = flags[i].no_flag;
37066
37067 if ((rs6000_isa_flags & no_flag) == 0
37068 && (rs6000_isa_flags_explicit & no_flag) != 0)
37069 {
37070 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37071 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37072 & rs6000_isa_flags
37073 & dep_flags);
37074
37075 if (set_flags)
37076 {
37077 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37078 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37079 {
37080 set_flags &= ~rs6000_opt_masks[j].mask;
37081 error ("%<-mno-%s%> turns off %<-m%s%>",
37082 flags[i].name,
37083 rs6000_opt_masks[j].name);
37084 }
37085
37086 gcc_assert (!set_flags);
37087 }
37088
37089 rs6000_isa_flags &= ~dep_flags;
37090 ignore_masks |= no_flag | dep_flags;
37091 }
37092 }
37093
37094 return ignore_masks;
37095 }
37096
37097 \f
37098 /* Helper function for printing the function name when debugging. */
37099
37100 static const char *
37101 get_decl_name (tree fn)
37102 {
37103 tree name;
37104
37105 if (!fn)
37106 return "<null>";
37107
37108 name = DECL_NAME (fn);
37109 if (!name)
37110 return "<no-name>";
37111
37112 return IDENTIFIER_POINTER (name);
37113 }
37114
37115 /* Return the clone id of the target we are compiling code for in a target
37116 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37117 the priority list for the target clones (ordered from lowest to
37118 highest). */
37119
37120 static int
37121 rs6000_clone_priority (tree fndecl)
37122 {
37123 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37124 HOST_WIDE_INT isa_masks;
37125 int ret = CLONE_DEFAULT;
37126 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37127 const char *attrs_str = NULL;
37128
37129 attrs = TREE_VALUE (TREE_VALUE (attrs));
37130 attrs_str = TREE_STRING_POINTER (attrs);
37131
37132 /* Return priority zero for default function. Return the ISA needed for the
37133 function if it is not the default. */
37134 if (strcmp (attrs_str, "default") != 0)
37135 {
37136 if (fn_opts == NULL_TREE)
37137 fn_opts = target_option_default_node;
37138
37139 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37140 isa_masks = rs6000_isa_flags;
37141 else
37142 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37143
37144 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37145 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37146 break;
37147 }
37148
37149 if (TARGET_DEBUG_TARGET)
37150 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37151 get_decl_name (fndecl), ret);
37152
37153 return ret;
37154 }
37155
37156 /* This compares the priority of target features in function DECL1 and DECL2.
37157 It returns positive value if DECL1 is higher priority, negative value if
37158 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37159 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37160
37161 static int
37162 rs6000_compare_version_priority (tree decl1, tree decl2)
37163 {
37164 int priority1 = rs6000_clone_priority (decl1);
37165 int priority2 = rs6000_clone_priority (decl2);
37166 int ret = priority1 - priority2;
37167
37168 if (TARGET_DEBUG_TARGET)
37169 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37170 get_decl_name (decl1), get_decl_name (decl2), ret);
37171
37172 return ret;
37173 }
37174
37175 /* Make a dispatcher declaration for the multi-versioned function DECL.
37176 Calls to DECL function will be replaced with calls to the dispatcher
37177 by the front-end. Returns the decl of the dispatcher function. */
37178
37179 static tree
37180 rs6000_get_function_versions_dispatcher (void *decl)
37181 {
37182 tree fn = (tree) decl;
37183 struct cgraph_node *node = NULL;
37184 struct cgraph_node *default_node = NULL;
37185 struct cgraph_function_version_info *node_v = NULL;
37186 struct cgraph_function_version_info *first_v = NULL;
37187
37188 tree dispatch_decl = NULL;
37189
37190 struct cgraph_function_version_info *default_version_info = NULL;
37191 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37192
37193 if (TARGET_DEBUG_TARGET)
37194 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37195 get_decl_name (fn));
37196
37197 node = cgraph_node::get (fn);
37198 gcc_assert (node != NULL);
37199
37200 node_v = node->function_version ();
37201 gcc_assert (node_v != NULL);
37202
37203 if (node_v->dispatcher_resolver != NULL)
37204 return node_v->dispatcher_resolver;
37205
37206 /* Find the default version and make it the first node. */
37207 first_v = node_v;
37208 /* Go to the beginning of the chain. */
37209 while (first_v->prev != NULL)
37210 first_v = first_v->prev;
37211
37212 default_version_info = first_v;
37213 while (default_version_info != NULL)
37214 {
37215 const tree decl2 = default_version_info->this_node->decl;
37216 if (is_function_default_version (decl2))
37217 break;
37218 default_version_info = default_version_info->next;
37219 }
37220
37221 /* If there is no default node, just return NULL. */
37222 if (default_version_info == NULL)
37223 return NULL;
37224
37225 /* Make default info the first node. */
37226 if (first_v != default_version_info)
37227 {
37228 default_version_info->prev->next = default_version_info->next;
37229 if (default_version_info->next)
37230 default_version_info->next->prev = default_version_info->prev;
37231 first_v->prev = default_version_info;
37232 default_version_info->next = first_v;
37233 default_version_info->prev = NULL;
37234 }
37235
37236 default_node = default_version_info->this_node;
37237
37238 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37239 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37240 "target_clones attribute needs GLIBC (2.23 and newer) that "
37241 "exports hardware capability bits");
37242 #else
37243
37244 if (targetm.has_ifunc_p ())
37245 {
37246 struct cgraph_function_version_info *it_v = NULL;
37247 struct cgraph_node *dispatcher_node = NULL;
37248 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37249
37250 /* Right now, the dispatching is done via ifunc. */
37251 dispatch_decl = make_dispatcher_decl (default_node->decl);
37252
37253 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37254 gcc_assert (dispatcher_node != NULL);
37255 dispatcher_node->dispatcher_function = 1;
37256 dispatcher_version_info
37257 = dispatcher_node->insert_new_function_version ();
37258 dispatcher_version_info->next = default_version_info;
37259 dispatcher_node->definition = 1;
37260
37261 /* Set the dispatcher for all the versions. */
37262 it_v = default_version_info;
37263 while (it_v != NULL)
37264 {
37265 it_v->dispatcher_resolver = dispatch_decl;
37266 it_v = it_v->next;
37267 }
37268 }
37269 else
37270 {
37271 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37272 "multiversioning needs ifunc which is not supported "
37273 "on this target");
37274 }
37275 #endif
37276
37277 return dispatch_decl;
37278 }
37279
37280 /* Make the resolver function decl to dispatch the versions of a multi-
37281 versioned function, DEFAULT_DECL. Create an empty basic block in the
37282 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37283 function. */
37284
37285 static tree
37286 make_resolver_func (const tree default_decl,
37287 const tree dispatch_decl,
37288 basic_block *empty_bb)
37289 {
37290 /* Make the resolver function static. The resolver function returns
37291 void *. */
37292 tree decl_name = clone_function_name (default_decl, "resolver");
37293 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37294 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37295 tree decl = build_fn_decl (resolver_name, type);
37296 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37297
37298 DECL_NAME (decl) = decl_name;
37299 TREE_USED (decl) = 1;
37300 DECL_ARTIFICIAL (decl) = 1;
37301 DECL_IGNORED_P (decl) = 0;
37302 TREE_PUBLIC (decl) = 0;
37303 DECL_UNINLINABLE (decl) = 1;
37304
37305 /* Resolver is not external, body is generated. */
37306 DECL_EXTERNAL (decl) = 0;
37307 DECL_EXTERNAL (dispatch_decl) = 0;
37308
37309 DECL_CONTEXT (decl) = NULL_TREE;
37310 DECL_INITIAL (decl) = make_node (BLOCK);
37311 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37312
37313 /* Build result decl and add to function_decl. */
37314 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37315 DECL_CONTEXT (t) = decl;
37316 DECL_ARTIFICIAL (t) = 1;
37317 DECL_IGNORED_P (t) = 1;
37318 DECL_RESULT (decl) = t;
37319
37320 gimplify_function_tree (decl);
37321 push_cfun (DECL_STRUCT_FUNCTION (decl));
37322 *empty_bb = init_lowered_empty_function (decl, false,
37323 profile_count::uninitialized ());
37324
37325 cgraph_node::add_new_function (decl, true);
37326 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37327
37328 pop_cfun ();
37329
37330 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37331 DECL_ATTRIBUTES (dispatch_decl)
37332 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37333
37334 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37335
37336 return decl;
37337 }
37338
37339 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37340 return a pointer to VERSION_DECL if we are running on a machine that
37341 supports the index CLONE_ISA hardware architecture bits. This function will
37342 be called during version dispatch to decide which function version to
37343 execute. It returns the basic block at the end, to which more conditions
37344 can be added. */
37345
37346 static basic_block
37347 add_condition_to_bb (tree function_decl, tree version_decl,
37348 int clone_isa, basic_block new_bb)
37349 {
37350 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37351
37352 gcc_assert (new_bb != NULL);
37353 gimple_seq gseq = bb_seq (new_bb);
37354
37355
37356 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37357 build_fold_addr_expr (version_decl));
37358 tree result_var = create_tmp_var (ptr_type_node);
37359 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37360 gimple *return_stmt = gimple_build_return (result_var);
37361
37362 if (clone_isa == CLONE_DEFAULT)
37363 {
37364 gimple_seq_add_stmt (&gseq, convert_stmt);
37365 gimple_seq_add_stmt (&gseq, return_stmt);
37366 set_bb_seq (new_bb, gseq);
37367 gimple_set_bb (convert_stmt, new_bb);
37368 gimple_set_bb (return_stmt, new_bb);
37369 pop_cfun ();
37370 return new_bb;
37371 }
37372
37373 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37374 tree cond_var = create_tmp_var (bool_int_type_node);
37375 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37376 const char *arg_str = rs6000_clone_map[clone_isa].name;
37377 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37378 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37379 gimple_call_set_lhs (call_cond_stmt, cond_var);
37380
37381 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37382 gimple_set_bb (call_cond_stmt, new_bb);
37383 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37384
37385 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37386 NULL_TREE, NULL_TREE);
37387 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37388 gimple_set_bb (if_else_stmt, new_bb);
37389 gimple_seq_add_stmt (&gseq, if_else_stmt);
37390
37391 gimple_seq_add_stmt (&gseq, convert_stmt);
37392 gimple_seq_add_stmt (&gseq, return_stmt);
37393 set_bb_seq (new_bb, gseq);
37394
37395 basic_block bb1 = new_bb;
37396 edge e12 = split_block (bb1, if_else_stmt);
37397 basic_block bb2 = e12->dest;
37398 e12->flags &= ~EDGE_FALLTHRU;
37399 e12->flags |= EDGE_TRUE_VALUE;
37400
37401 edge e23 = split_block (bb2, return_stmt);
37402 gimple_set_bb (convert_stmt, bb2);
37403 gimple_set_bb (return_stmt, bb2);
37404
37405 basic_block bb3 = e23->dest;
37406 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37407
37408 remove_edge (e23);
37409 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37410
37411 pop_cfun ();
37412 return bb3;
37413 }
37414
37415 /* This function generates the dispatch function for multi-versioned functions.
37416 DISPATCH_DECL is the function which will contain the dispatch logic.
37417 FNDECLS are the function choices for dispatch, and is a tree chain.
37418 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37419 code is generated. */
37420
37421 static int
37422 dispatch_function_versions (tree dispatch_decl,
37423 void *fndecls_p,
37424 basic_block *empty_bb)
37425 {
37426 int ix;
37427 tree ele;
37428 vec<tree> *fndecls;
37429 tree clones[CLONE_MAX];
37430
37431 if (TARGET_DEBUG_TARGET)
37432 fputs ("dispatch_function_versions, top\n", stderr);
37433
37434 gcc_assert (dispatch_decl != NULL
37435 && fndecls_p != NULL
37436 && empty_bb != NULL);
37437
37438 /* fndecls_p is actually a vector. */
37439 fndecls = static_cast<vec<tree> *> (fndecls_p);
37440
37441 /* At least one more version other than the default. */
37442 gcc_assert (fndecls->length () >= 2);
37443
37444 /* The first version in the vector is the default decl. */
37445 memset ((void *) clones, '\0', sizeof (clones));
37446 clones[CLONE_DEFAULT] = (*fndecls)[0];
37447
37448 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37449 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37450 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37451 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37452 to insert the code here to do the call. */
37453
37454 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37455 {
37456 int priority = rs6000_clone_priority (ele);
37457 if (!clones[priority])
37458 clones[priority] = ele;
37459 }
37460
37461 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37462 if (clones[ix])
37463 {
37464 if (TARGET_DEBUG_TARGET)
37465 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37466 ix, get_decl_name (clones[ix]));
37467
37468 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37469 *empty_bb);
37470 }
37471
37472 return 0;
37473 }
37474
37475 /* Generate the dispatching code body to dispatch multi-versioned function
37476 DECL. The target hook is called to process the "target" attributes and
37477 provide the code to dispatch the right function at run-time. NODE points
37478 to the dispatcher decl whose body will be created. */
37479
37480 static tree
37481 rs6000_generate_version_dispatcher_body (void *node_p)
37482 {
37483 tree resolver;
37484 basic_block empty_bb;
37485 struct cgraph_node *node = (cgraph_node *) node_p;
37486 struct cgraph_function_version_info *ninfo = node->function_version ();
37487
37488 if (ninfo->dispatcher_resolver)
37489 return ninfo->dispatcher_resolver;
37490
37491 /* node is going to be an alias, so remove the finalized bit. */
37492 node->definition = false;
37493
37494 /* The first version in the chain corresponds to the default version. */
37495 ninfo->dispatcher_resolver = resolver
37496 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37497
37498 if (TARGET_DEBUG_TARGET)
37499 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37500 get_decl_name (resolver));
37501
37502 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37503 auto_vec<tree, 2> fn_ver_vec;
37504
37505 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37506 vinfo;
37507 vinfo = vinfo->next)
37508 {
37509 struct cgraph_node *version = vinfo->this_node;
37510 /* Check for virtual functions here again, as by this time it should
37511 have been determined if this function needs a vtable index or
37512 not. This happens for methods in derived classes that override
37513 virtual methods in base classes but are not explicitly marked as
37514 virtual. */
37515 if (DECL_VINDEX (version->decl))
37516 sorry ("Virtual function multiversioning not supported");
37517
37518 fn_ver_vec.safe_push (version->decl);
37519 }
37520
37521 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37522 cgraph_edge::rebuild_edges ();
37523 pop_cfun ();
37524 return resolver;
37525 }
37526
37527 \f
37528 /* Hook to determine if one function can safely inline another. */
37529
37530 static bool
37531 rs6000_can_inline_p (tree caller, tree callee)
37532 {
37533 bool ret = false;
37534 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37535 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37536
37537 /* If callee has no option attributes, then it is ok to inline. */
37538 if (!callee_tree)
37539 ret = true;
37540
37541 /* If caller has no option attributes, but callee does then it is not ok to
37542 inline. */
37543 else if (!caller_tree)
37544 ret = false;
37545
37546 else
37547 {
37548 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37549 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37550
37551 /* Callee's options should a subset of the caller's, i.e. a vsx function
37552 can inline an altivec function but a non-vsx function can't inline a
37553 vsx function. */
37554 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37555 == callee_opts->x_rs6000_isa_flags)
37556 ret = true;
37557 }
37558
37559 if (TARGET_DEBUG_TARGET)
37560 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37561 get_decl_name (caller), get_decl_name (callee),
37562 (ret ? "can" : "cannot"));
37563
37564 return ret;
37565 }
37566 \f
37567 /* Allocate a stack temp and fixup the address so it meets the particular
37568 memory requirements (either offetable or REG+REG addressing). */
37569
37570 rtx
37571 rs6000_allocate_stack_temp (machine_mode mode,
37572 bool offsettable_p,
37573 bool reg_reg_p)
37574 {
37575 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37576 rtx addr = XEXP (stack, 0);
37577 int strict_p = reload_completed;
37578
37579 if (!legitimate_indirect_address_p (addr, strict_p))
37580 {
37581 if (offsettable_p
37582 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37583 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37584
37585 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37586 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37587 }
37588
37589 return stack;
37590 }
37591
37592 /* Given a memory reference, if it is not a reg or reg+reg addressing,
37593 convert to such a form to deal with memory reference instructions
37594 like STFIWX and LDBRX that only take reg+reg addressing. */
37595
37596 rtx
37597 rs6000_force_indexed_or_indirect_mem (rtx x)
37598 {
37599 machine_mode mode = GET_MODE (x);
37600
37601 gcc_assert (MEM_P (x));
37602 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
37603 {
37604 rtx addr = XEXP (x, 0);
37605 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37606 {
37607 rtx reg = XEXP (addr, 0);
37608 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37609 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37610 gcc_assert (REG_P (reg));
37611 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37612 addr = reg;
37613 }
37614 else if (GET_CODE (addr) == PRE_MODIFY)
37615 {
37616 rtx reg = XEXP (addr, 0);
37617 rtx expr = XEXP (addr, 1);
37618 gcc_assert (REG_P (reg));
37619 gcc_assert (GET_CODE (expr) == PLUS);
37620 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37621 addr = reg;
37622 }
37623
37624 x = replace_equiv_address (x, force_reg (Pmode, addr));
37625 }
37626
37627 return x;
37628 }
37629
37630 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37631
37632 On the RS/6000, all integer constants are acceptable, most won't be valid
37633 for particular insns, though. Only easy FP constants are acceptable. */
37634
37635 static bool
37636 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37637 {
37638 if (TARGET_ELF && tls_referenced_p (x))
37639 return false;
37640
37641 if (CONST_DOUBLE_P (x))
37642 return easy_fp_constant (x, mode);
37643
37644 if (GET_CODE (x) == CONST_VECTOR)
37645 return easy_vector_constant (x, mode);
37646
37647 return true;
37648 }
37649
37650 \f
37651 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37652
37653 static bool
37654 chain_already_loaded (rtx_insn *last)
37655 {
37656 for (; last != NULL; last = PREV_INSN (last))
37657 {
37658 if (NONJUMP_INSN_P (last))
37659 {
37660 rtx patt = PATTERN (last);
37661
37662 if (GET_CODE (patt) == SET)
37663 {
37664 rtx lhs = XEXP (patt, 0);
37665
37666 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37667 return true;
37668 }
37669 }
37670 }
37671 return false;
37672 }
37673
37674 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37675
37676 void
37677 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37678 {
37679 rtx func = func_desc;
37680 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37681 rtx toc_load = NULL_RTX;
37682 rtx toc_restore = NULL_RTX;
37683 rtx func_addr;
37684 rtx abi_reg = NULL_RTX;
37685 rtx call[4];
37686 int n_call;
37687 rtx insn;
37688 bool is_pltseq_longcall;
37689
37690 if (global_tlsarg)
37691 tlsarg = global_tlsarg;
37692
37693 /* Handle longcall attributes. */
37694 is_pltseq_longcall = false;
37695 if ((INTVAL (cookie) & CALL_LONG) != 0
37696 && GET_CODE (func_desc) == SYMBOL_REF)
37697 {
37698 func = rs6000_longcall_ref (func_desc, tlsarg);
37699 if (TARGET_PLTSEQ)
37700 is_pltseq_longcall = true;
37701 }
37702
37703 /* Handle indirect calls. */
37704 if (!SYMBOL_REF_P (func)
37705 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
37706 {
37707 /* Save the TOC into its reserved slot before the call,
37708 and prepare to restore it after the call. */
37709 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37710 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37711 gen_rtvec (1, stack_toc_offset),
37712 UNSPEC_TOCSLOT);
37713 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37714
37715 /* Can we optimize saving the TOC in the prologue or
37716 do we need to do it at every call? */
37717 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37718 cfun->machine->save_toc_in_prologue = true;
37719 else
37720 {
37721 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37722 rtx stack_toc_mem = gen_frame_mem (Pmode,
37723 gen_rtx_PLUS (Pmode, stack_ptr,
37724 stack_toc_offset));
37725 MEM_VOLATILE_P (stack_toc_mem) = 1;
37726 if (is_pltseq_longcall)
37727 {
37728 /* Use USPEC_PLTSEQ here to emit every instruction in an
37729 inline PLT call sequence with a reloc, enabling the
37730 linker to edit the sequence back to a direct call
37731 when that makes sense. */
37732 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
37733 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37734 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
37735 }
37736 else
37737 emit_move_insn (stack_toc_mem, toc_reg);
37738 }
37739
37740 if (DEFAULT_ABI == ABI_ELFv2)
37741 {
37742 /* A function pointer in the ELFv2 ABI is just a plain address, but
37743 the ABI requires it to be loaded into r12 before the call. */
37744 func_addr = gen_rtx_REG (Pmode, 12);
37745 if (!rtx_equal_p (func_addr, func))
37746 emit_move_insn (func_addr, func);
37747 abi_reg = func_addr;
37748 /* Indirect calls via CTR are strongly preferred over indirect
37749 calls via LR, so move the address there. Needed to mark
37750 this insn for linker plt sequence editing too. */
37751 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37752 if (is_pltseq_longcall)
37753 {
37754 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
37755 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37756 emit_insn (gen_rtx_SET (func_addr, mark_func));
37757 v = gen_rtvec (2, func_addr, func_desc);
37758 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37759 }
37760 else
37761 emit_move_insn (func_addr, abi_reg);
37762 }
37763 else
37764 {
37765 /* A function pointer under AIX is a pointer to a data area whose
37766 first word contains the actual address of the function, whose
37767 second word contains a pointer to its TOC, and whose third word
37768 contains a value to place in the static chain register (r11).
37769 Note that if we load the static chain, our "trampoline" need
37770 not have any executable code. */
37771
37772 /* Load up address of the actual function. */
37773 func = force_reg (Pmode, func);
37774 func_addr = gen_reg_rtx (Pmode);
37775 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
37776
37777 /* Indirect calls via CTR are strongly preferred over indirect
37778 calls via LR, so move the address there. */
37779 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
37780 emit_move_insn (ctr_reg, func_addr);
37781 func_addr = ctr_reg;
37782
37783 /* Prepare to load the TOC of the called function. Note that the
37784 TOC load must happen immediately before the actual call so
37785 that unwinding the TOC registers works correctly. See the
37786 comment in frob_update_context. */
37787 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37788 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37789 gen_rtx_PLUS (Pmode, func,
37790 func_toc_offset));
37791 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37792
37793 /* If we have a static chain, load it up. But, if the call was
37794 originally direct, the 3rd word has not been written since no
37795 trampoline has been built, so we ought not to load it, lest we
37796 override a static chain value. */
37797 if (!(GET_CODE (func_desc) == SYMBOL_REF
37798 && SYMBOL_REF_FUNCTION_P (func_desc))
37799 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37800 && !chain_already_loaded (get_current_sequence ()->next->last))
37801 {
37802 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37803 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37804 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37805 gen_rtx_PLUS (Pmode, func,
37806 func_sc_offset));
37807 emit_move_insn (sc_reg, func_sc_mem);
37808 abi_reg = sc_reg;
37809 }
37810 }
37811 }
37812 else
37813 {
37814 /* Direct calls use the TOC: for local calls, the callee will
37815 assume the TOC register is set; for non-local calls, the
37816 PLT stub needs the TOC register. */
37817 abi_reg = toc_reg;
37818 func_addr = func;
37819 }
37820
37821 /* Create the call. */
37822 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37823 if (value != NULL_RTX)
37824 call[0] = gen_rtx_SET (value, call[0]);
37825 n_call = 1;
37826
37827 if (toc_load)
37828 call[n_call++] = toc_load;
37829 if (toc_restore)
37830 call[n_call++] = toc_restore;
37831
37832 call[n_call++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37833
37834 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37835 insn = emit_call_insn (insn);
37836
37837 /* Mention all registers defined by the ABI to hold information
37838 as uses in CALL_INSN_FUNCTION_USAGE. */
37839 if (abi_reg)
37840 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37841 }
37842
37843 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37844
37845 void
37846 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37847 {
37848 rtx call[2];
37849 rtx insn;
37850
37851 gcc_assert (INTVAL (cookie) == 0);
37852
37853 if (global_tlsarg)
37854 tlsarg = global_tlsarg;
37855
37856 /* Create the call. */
37857 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
37858 if (value != NULL_RTX)
37859 call[0] = gen_rtx_SET (value, call[0]);
37860
37861 call[1] = simple_return_rtx;
37862
37863 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37864 insn = emit_call_insn (insn);
37865
37866 /* Note use of the TOC register. */
37867 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37868 }
37869
37870 /* Expand code to perform a call under the SYSV4 ABI. */
37871
37872 void
37873 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37874 {
37875 rtx func = func_desc;
37876 rtx func_addr;
37877 rtx call[4];
37878 rtx insn;
37879 rtx abi_reg = NULL_RTX;
37880 int n;
37881
37882 if (global_tlsarg)
37883 tlsarg = global_tlsarg;
37884
37885 /* Handle longcall attributes. */
37886 if ((INTVAL (cookie) & CALL_LONG) != 0
37887 && GET_CODE (func_desc) == SYMBOL_REF)
37888 {
37889 func = rs6000_longcall_ref (func_desc, tlsarg);
37890 /* If the longcall was implemented as an inline PLT call using
37891 PLT unspecs then func will be REG:r11. If not, func will be
37892 a pseudo reg. The inline PLT call sequence supports lazy
37893 linking (and longcalls to functions in dlopen'd libraries).
37894 The other style of longcalls don't. The lazy linking entry
37895 to the dynamic symbol resolver requires r11 be the function
37896 address (as it is for linker generated PLT stubs). Ensure
37897 r11 stays valid to the bctrl by marking r11 used by the call. */
37898 if (TARGET_PLTSEQ)
37899 abi_reg = func;
37900 }
37901
37902 /* Handle indirect calls. */
37903 if (GET_CODE (func) != SYMBOL_REF)
37904 {
37905 func = force_reg (Pmode, func);
37906
37907 /* Indirect calls via CTR are strongly preferred over indirect
37908 calls via LR, so move the address there. That can't be left
37909 to reload because we want to mark every instruction in an
37910 inline PLT call sequence with a reloc, enabling the linker to
37911 edit the sequence back to a direct call when that makes sense. */
37912 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37913 if (abi_reg)
37914 {
37915 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
37916 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37917 emit_insn (gen_rtx_SET (func_addr, mark_func));
37918 v = gen_rtvec (2, func_addr, func_desc);
37919 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37920 }
37921 else
37922 emit_move_insn (func_addr, func);
37923 }
37924 else
37925 func_addr = func;
37926
37927 /* Create the call. */
37928 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37929 if (value != NULL_RTX)
37930 call[0] = gen_rtx_SET (value, call[0]);
37931
37932 call[1] = gen_rtx_USE (VOIDmode, cookie);
37933 n = 2;
37934 if (TARGET_SECURE_PLT
37935 && flag_pic
37936 && GET_CODE (func_addr) == SYMBOL_REF
37937 && !SYMBOL_REF_LOCAL_P (func_addr))
37938 call[n++] = gen_rtx_USE (VOIDmode, pic_offset_table_rtx);
37939
37940 call[n++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37941
37942 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n, call));
37943 insn = emit_call_insn (insn);
37944 if (abi_reg)
37945 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37946 }
37947
37948 /* Expand code to perform a sibling call under the SysV4 ABI. */
37949
37950 void
37951 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37952 {
37953 rtx func = func_desc;
37954 rtx func_addr;
37955 rtx call[3];
37956 rtx insn;
37957 rtx abi_reg = NULL_RTX;
37958
37959 if (global_tlsarg)
37960 tlsarg = global_tlsarg;
37961
37962 /* Handle longcall attributes. */
37963 if ((INTVAL (cookie) & CALL_LONG) != 0
37964 && GET_CODE (func_desc) == SYMBOL_REF)
37965 {
37966 func = rs6000_longcall_ref (func_desc, tlsarg);
37967 /* If the longcall was implemented as an inline PLT call using
37968 PLT unspecs then func will be REG:r11. If not, func will be
37969 a pseudo reg. The inline PLT call sequence supports lazy
37970 linking (and longcalls to functions in dlopen'd libraries).
37971 The other style of longcalls don't. The lazy linking entry
37972 to the dynamic symbol resolver requires r11 be the function
37973 address (as it is for linker generated PLT stubs). Ensure
37974 r11 stays valid to the bctr by marking r11 used by the call. */
37975 if (TARGET_PLTSEQ)
37976 abi_reg = func;
37977 }
37978
37979 /* Handle indirect calls. */
37980 if (GET_CODE (func) != SYMBOL_REF)
37981 {
37982 func = force_reg (Pmode, func);
37983
37984 /* Indirect sibcalls must go via CTR. That can't be left to
37985 reload because we want to mark every instruction in an inline
37986 PLT call sequence with a reloc, enabling the linker to edit
37987 the sequence back to a direct call when that makes sense. */
37988 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37989 if (abi_reg)
37990 {
37991 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
37992 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37993 emit_insn (gen_rtx_SET (func_addr, mark_func));
37994 v = gen_rtvec (2, func_addr, func_desc);
37995 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37996 }
37997 else
37998 emit_move_insn (func_addr, func);
37999 }
38000 else
38001 func_addr = func;
38002
38003 /* Create the call. */
38004 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38005 if (value != NULL_RTX)
38006 call[0] = gen_rtx_SET (value, call[0]);
38007
38008 call[1] = gen_rtx_USE (VOIDmode, cookie);
38009 call[2] = simple_return_rtx;
38010
38011 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38012 insn = emit_call_insn (insn);
38013 if (abi_reg)
38014 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
38015 }
38016
38017 #if TARGET_MACHO
38018
38019 /* Expand code to perform a call under the Darwin ABI.
38020 Modulo handling of mlongcall, this is much the same as sysv.
38021 if/when the longcall optimisation is removed, we could drop this
38022 code and use the sysv case (taking care to avoid the tls stuff).
38023
38024 We can use this for sibcalls too, if needed. */
38025
38026 void
38027 rs6000_call_darwin_1 (rtx value, rtx func_desc, rtx tlsarg,
38028 rtx cookie, bool sibcall)
38029 {
38030 rtx func = func_desc;
38031 rtx func_addr;
38032 rtx call[3];
38033 rtx insn;
38034 int cookie_val = INTVAL (cookie);
38035 bool make_island = false;
38036
38037 /* Handle longcall attributes, there are two cases for Darwin:
38038 1) Newer linkers are capable of synthesising any branch islands needed.
38039 2) We need a helper branch island synthesised by the compiler.
38040 The second case has mostly been retired and we don't use it for m64.
38041 In fact, it's is an optimisation, we could just indirect as sysv does..
38042 ... however, backwards compatibility for now.
38043 If we're going to use this, then we need to keep the CALL_LONG bit set,
38044 so that we can pick up the special insn form later. */
38045 if ((cookie_val & CALL_LONG) != 0
38046 && GET_CODE (func_desc) == SYMBOL_REF)
38047 {
38048 if (darwin_emit_branch_islands && TARGET_32BIT)
38049 make_island = true; /* Do nothing yet, retain the CALL_LONG flag. */
38050 else
38051 {
38052 /* The linker is capable of doing this, but the user explicitly
38053 asked for -mlongcall, so we'll do the 'normal' version. */
38054 func = rs6000_longcall_ref (func_desc, NULL_RTX);
38055 cookie_val &= ~CALL_LONG; /* Handled, zap it. */
38056 }
38057 }
38058
38059 /* Handle indirect calls. */
38060 if (GET_CODE (func) != SYMBOL_REF)
38061 {
38062 func = force_reg (Pmode, func);
38063
38064 /* Indirect calls via CTR are strongly preferred over indirect
38065 calls via LR, and are required for indirect sibcalls, so move
38066 the address there. */
38067 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38068 emit_move_insn (func_addr, func);
38069 }
38070 else
38071 func_addr = func;
38072
38073 /* Create the call. */
38074 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38075 if (value != NULL_RTX)
38076 call[0] = gen_rtx_SET (value, call[0]);
38077
38078 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (cookie_val));
38079
38080 if (sibcall)
38081 call[2] = simple_return_rtx;
38082 else
38083 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38084
38085 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38086 insn = emit_call_insn (insn);
38087 /* Now we have the debug info in the insn, we can set up the branch island
38088 if we're using one. */
38089 if (make_island)
38090 {
38091 tree funname = get_identifier (XSTR (func_desc, 0));
38092
38093 if (no_previous_def (funname))
38094 {
38095 rtx label_rtx = gen_label_rtx ();
38096 char *label_buf, temp_buf[256];
38097 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
38098 CODE_LABEL_NUMBER (label_rtx));
38099 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
38100 tree labelname = get_identifier (label_buf);
38101 add_compiler_branch_island (labelname, funname,
38102 insn_line ((const rtx_insn*)insn));
38103 }
38104 }
38105 }
38106 #endif
38107
38108 void
38109 rs6000_call_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38110 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38111 {
38112 #if TARGET_MACHO
38113 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, false);
38114 #else
38115 gcc_unreachable();
38116 #endif
38117 }
38118
38119
38120 void
38121 rs6000_sibcall_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38122 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38123 {
38124 #if TARGET_MACHO
38125 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, true);
38126 #else
38127 gcc_unreachable();
38128 #endif
38129 }
38130
38131
38132 /* Return whether we need to always update the saved TOC pointer when we update
38133 the stack pointer. */
38134
38135 static bool
38136 rs6000_save_toc_in_prologue_p (void)
38137 {
38138 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38139 }
38140
38141 #ifdef HAVE_GAS_HIDDEN
38142 # define USE_HIDDEN_LINKONCE 1
38143 #else
38144 # define USE_HIDDEN_LINKONCE 0
38145 #endif
38146
38147 /* Fills in the label name that should be used for a 476 link stack thunk. */
38148
38149 void
38150 get_ppc476_thunk_name (char name[32])
38151 {
38152 gcc_assert (TARGET_LINK_STACK);
38153
38154 if (USE_HIDDEN_LINKONCE)
38155 sprintf (name, "__ppc476.get_thunk");
38156 else
38157 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38158 }
38159
38160 /* This function emits the simple thunk routine that is used to preserve
38161 the link stack on the 476 cpu. */
38162
38163 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38164 static void
38165 rs6000_code_end (void)
38166 {
38167 char name[32];
38168 tree decl;
38169
38170 if (!TARGET_LINK_STACK)
38171 return;
38172
38173 get_ppc476_thunk_name (name);
38174
38175 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38176 build_function_type_list (void_type_node, NULL_TREE));
38177 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38178 NULL_TREE, void_type_node);
38179 TREE_PUBLIC (decl) = 1;
38180 TREE_STATIC (decl) = 1;
38181
38182 #if RS6000_WEAK
38183 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38184 {
38185 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38186 targetm.asm_out.unique_section (decl, 0);
38187 switch_to_section (get_named_section (decl, NULL, 0));
38188 DECL_WEAK (decl) = 1;
38189 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38190 targetm.asm_out.globalize_label (asm_out_file, name);
38191 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38192 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38193 }
38194 else
38195 #endif
38196 {
38197 switch_to_section (text_section);
38198 ASM_OUTPUT_LABEL (asm_out_file, name);
38199 }
38200
38201 DECL_INITIAL (decl) = make_node (BLOCK);
38202 current_function_decl = decl;
38203 allocate_struct_function (decl, false);
38204 init_function_start (decl);
38205 first_function_block_is_cold = false;
38206 /* Make sure unwind info is emitted for the thunk if needed. */
38207 final_start_function (emit_barrier (), asm_out_file, 1);
38208
38209 fputs ("\tblr\n", asm_out_file);
38210
38211 final_end_function ();
38212 init_insn_lengths ();
38213 free_after_compilation (cfun);
38214 set_cfun (NULL);
38215 current_function_decl = NULL;
38216 }
38217
38218 /* Add r30 to hard reg set if the prologue sets it up and it is not
38219 pic_offset_table_rtx. */
38220
38221 static void
38222 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38223 {
38224 if (!TARGET_SINGLE_PIC_BASE
38225 && TARGET_TOC
38226 && TARGET_MINIMAL_TOC
38227 && !constant_pool_empty_p ())
38228 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38229 if (cfun->machine->split_stack_argp_used)
38230 add_to_hard_reg_set (&set->set, Pmode, 12);
38231
38232 /* Make sure the hard reg set doesn't include r2, which was possibly added
38233 via PIC_OFFSET_TABLE_REGNUM. */
38234 if (TARGET_TOC)
38235 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38236 }
38237
38238 \f
38239 /* Helper function for rs6000_split_logical to emit a logical instruction after
38240 spliting the operation to single GPR registers.
38241
38242 DEST is the destination register.
38243 OP1 and OP2 are the input source registers.
38244 CODE is the base operation (AND, IOR, XOR, NOT).
38245 MODE is the machine mode.
38246 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38247 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38248 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38249
38250 static void
38251 rs6000_split_logical_inner (rtx dest,
38252 rtx op1,
38253 rtx op2,
38254 enum rtx_code code,
38255 machine_mode mode,
38256 bool complement_final_p,
38257 bool complement_op1_p,
38258 bool complement_op2_p)
38259 {
38260 rtx bool_rtx;
38261
38262 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38263 if (op2 && CONST_INT_P (op2)
38264 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38265 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38266 {
38267 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38268 HOST_WIDE_INT value = INTVAL (op2) & mask;
38269
38270 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38271 if (code == AND)
38272 {
38273 if (value == 0)
38274 {
38275 emit_insn (gen_rtx_SET (dest, const0_rtx));
38276 return;
38277 }
38278
38279 else if (value == mask)
38280 {
38281 if (!rtx_equal_p (dest, op1))
38282 emit_insn (gen_rtx_SET (dest, op1));
38283 return;
38284 }
38285 }
38286
38287 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38288 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38289 else if (code == IOR || code == XOR)
38290 {
38291 if (value == 0)
38292 {
38293 if (!rtx_equal_p (dest, op1))
38294 emit_insn (gen_rtx_SET (dest, op1));
38295 return;
38296 }
38297 }
38298 }
38299
38300 if (code == AND && mode == SImode
38301 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38302 {
38303 emit_insn (gen_andsi3 (dest, op1, op2));
38304 return;
38305 }
38306
38307 if (complement_op1_p)
38308 op1 = gen_rtx_NOT (mode, op1);
38309
38310 if (complement_op2_p)
38311 op2 = gen_rtx_NOT (mode, op2);
38312
38313 /* For canonical RTL, if only one arm is inverted it is the first. */
38314 if (!complement_op1_p && complement_op2_p)
38315 std::swap (op1, op2);
38316
38317 bool_rtx = ((code == NOT)
38318 ? gen_rtx_NOT (mode, op1)
38319 : gen_rtx_fmt_ee (code, mode, op1, op2));
38320
38321 if (complement_final_p)
38322 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38323
38324 emit_insn (gen_rtx_SET (dest, bool_rtx));
38325 }
38326
38327 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38328 operations are split immediately during RTL generation to allow for more
38329 optimizations of the AND/IOR/XOR.
38330
38331 OPERANDS is an array containing the destination and two input operands.
38332 CODE is the base operation (AND, IOR, XOR, NOT).
38333 MODE is the machine mode.
38334 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38335 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38336 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38337 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38338 formation of the AND instructions. */
38339
38340 static void
38341 rs6000_split_logical_di (rtx operands[3],
38342 enum rtx_code code,
38343 bool complement_final_p,
38344 bool complement_op1_p,
38345 bool complement_op2_p)
38346 {
38347 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38348 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38349 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38350 enum hi_lo { hi = 0, lo = 1 };
38351 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38352 size_t i;
38353
38354 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38355 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38356 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38357 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38358
38359 if (code == NOT)
38360 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38361 else
38362 {
38363 if (!CONST_INT_P (operands[2]))
38364 {
38365 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38366 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38367 }
38368 else
38369 {
38370 HOST_WIDE_INT value = INTVAL (operands[2]);
38371 HOST_WIDE_INT value_hi_lo[2];
38372
38373 gcc_assert (!complement_final_p);
38374 gcc_assert (!complement_op1_p);
38375 gcc_assert (!complement_op2_p);
38376
38377 value_hi_lo[hi] = value >> 32;
38378 value_hi_lo[lo] = value & lower_32bits;
38379
38380 for (i = 0; i < 2; i++)
38381 {
38382 HOST_WIDE_INT sub_value = value_hi_lo[i];
38383
38384 if (sub_value & sign_bit)
38385 sub_value |= upper_32bits;
38386
38387 op2_hi_lo[i] = GEN_INT (sub_value);
38388
38389 /* If this is an AND instruction, check to see if we need to load
38390 the value in a register. */
38391 if (code == AND && sub_value != -1 && sub_value != 0
38392 && !and_operand (op2_hi_lo[i], SImode))
38393 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38394 }
38395 }
38396 }
38397
38398 for (i = 0; i < 2; i++)
38399 {
38400 /* Split large IOR/XOR operations. */
38401 if ((code == IOR || code == XOR)
38402 && CONST_INT_P (op2_hi_lo[i])
38403 && !complement_final_p
38404 && !complement_op1_p
38405 && !complement_op2_p
38406 && !logical_const_operand (op2_hi_lo[i], SImode))
38407 {
38408 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38409 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38410 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38411 rtx tmp = gen_reg_rtx (SImode);
38412
38413 /* Make sure the constant is sign extended. */
38414 if ((hi_16bits & sign_bit) != 0)
38415 hi_16bits |= upper_32bits;
38416
38417 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38418 code, SImode, false, false, false);
38419
38420 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38421 code, SImode, false, false, false);
38422 }
38423 else
38424 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38425 code, SImode, complement_final_p,
38426 complement_op1_p, complement_op2_p);
38427 }
38428
38429 return;
38430 }
38431
38432 /* Split the insns that make up boolean operations operating on multiple GPR
38433 registers. The boolean MD patterns ensure that the inputs either are
38434 exactly the same as the output registers, or there is no overlap.
38435
38436 OPERANDS is an array containing the destination and two input operands.
38437 CODE is the base operation (AND, IOR, XOR, NOT).
38438 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38439 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38440 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38441
38442 void
38443 rs6000_split_logical (rtx operands[3],
38444 enum rtx_code code,
38445 bool complement_final_p,
38446 bool complement_op1_p,
38447 bool complement_op2_p)
38448 {
38449 machine_mode mode = GET_MODE (operands[0]);
38450 machine_mode sub_mode;
38451 rtx op0, op1, op2;
38452 int sub_size, regno0, regno1, nregs, i;
38453
38454 /* If this is DImode, use the specialized version that can run before
38455 register allocation. */
38456 if (mode == DImode && !TARGET_POWERPC64)
38457 {
38458 rs6000_split_logical_di (operands, code, complement_final_p,
38459 complement_op1_p, complement_op2_p);
38460 return;
38461 }
38462
38463 op0 = operands[0];
38464 op1 = operands[1];
38465 op2 = (code == NOT) ? NULL_RTX : operands[2];
38466 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38467 sub_size = GET_MODE_SIZE (sub_mode);
38468 regno0 = REGNO (op0);
38469 regno1 = REGNO (op1);
38470
38471 gcc_assert (reload_completed);
38472 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38473 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38474
38475 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38476 gcc_assert (nregs > 1);
38477
38478 if (op2 && REG_P (op2))
38479 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38480
38481 for (i = 0; i < nregs; i++)
38482 {
38483 int offset = i * sub_size;
38484 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38485 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38486 rtx sub_op2 = ((code == NOT)
38487 ? NULL_RTX
38488 : simplify_subreg (sub_mode, op2, mode, offset));
38489
38490 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38491 complement_final_p, complement_op1_p,
38492 complement_op2_p);
38493 }
38494
38495 return;
38496 }
38497
38498 \f
38499 /* Return true if the peephole2 can combine a load involving a combination of
38500 an addis instruction and a load with an offset that can be fused together on
38501 a power8. */
38502
38503 bool
38504 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38505 rtx addis_value, /* addis value. */
38506 rtx target, /* target register that is loaded. */
38507 rtx mem) /* bottom part of the memory addr. */
38508 {
38509 rtx addr;
38510 rtx base_reg;
38511
38512 /* Validate arguments. */
38513 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38514 return false;
38515
38516 if (!base_reg_operand (target, GET_MODE (target)))
38517 return false;
38518
38519 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38520 return false;
38521
38522 /* Allow sign/zero extension. */
38523 if (GET_CODE (mem) == ZERO_EXTEND
38524 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38525 mem = XEXP (mem, 0);
38526
38527 if (!MEM_P (mem))
38528 return false;
38529
38530 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38531 return false;
38532
38533 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38534 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38535 return false;
38536
38537 /* Validate that the register used to load the high value is either the
38538 register being loaded, or we can safely replace its use.
38539
38540 This function is only called from the peephole2 pass and we assume that
38541 there are 2 instructions in the peephole (addis and load), so we want to
38542 check if the target register was not used in the memory address and the
38543 register to hold the addis result is dead after the peephole. */
38544 if (REGNO (addis_reg) != REGNO (target))
38545 {
38546 if (reg_mentioned_p (target, mem))
38547 return false;
38548
38549 if (!peep2_reg_dead_p (2, addis_reg))
38550 return false;
38551
38552 /* If the target register being loaded is the stack pointer, we must
38553 avoid loading any other value into it, even temporarily. */
38554 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38555 return false;
38556 }
38557
38558 base_reg = XEXP (addr, 0);
38559 return REGNO (addis_reg) == REGNO (base_reg);
38560 }
38561
38562 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38563 sequence. We adjust the addis register to use the target register. If the
38564 load sign extends, we adjust the code to do the zero extending load, and an
38565 explicit sign extension later since the fusion only covers zero extending
38566 loads.
38567
38568 The operands are:
38569 operands[0] register set with addis (to be replaced with target)
38570 operands[1] value set via addis
38571 operands[2] target register being loaded
38572 operands[3] D-form memory reference using operands[0]. */
38573
38574 void
38575 expand_fusion_gpr_load (rtx *operands)
38576 {
38577 rtx addis_value = operands[1];
38578 rtx target = operands[2];
38579 rtx orig_mem = operands[3];
38580 rtx new_addr, new_mem, orig_addr, offset;
38581 enum rtx_code plus_or_lo_sum;
38582 machine_mode target_mode = GET_MODE (target);
38583 machine_mode extend_mode = target_mode;
38584 machine_mode ptr_mode = Pmode;
38585 enum rtx_code extend = UNKNOWN;
38586
38587 if (GET_CODE (orig_mem) == ZERO_EXTEND
38588 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38589 {
38590 extend = GET_CODE (orig_mem);
38591 orig_mem = XEXP (orig_mem, 0);
38592 target_mode = GET_MODE (orig_mem);
38593 }
38594
38595 gcc_assert (MEM_P (orig_mem));
38596
38597 orig_addr = XEXP (orig_mem, 0);
38598 plus_or_lo_sum = GET_CODE (orig_addr);
38599 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38600
38601 offset = XEXP (orig_addr, 1);
38602 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38603 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38604
38605 if (extend != UNKNOWN)
38606 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38607
38608 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38609 UNSPEC_FUSION_GPR);
38610 emit_insn (gen_rtx_SET (target, new_mem));
38611
38612 if (extend == SIGN_EXTEND)
38613 {
38614 int sub_off = ((BYTES_BIG_ENDIAN)
38615 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38616 : 0);
38617 rtx sign_reg
38618 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38619
38620 emit_insn (gen_rtx_SET (target,
38621 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38622 }
38623
38624 return;
38625 }
38626
38627 /* Emit the addis instruction that will be part of a fused instruction
38628 sequence. */
38629
38630 void
38631 emit_fusion_addis (rtx target, rtx addis_value)
38632 {
38633 rtx fuse_ops[10];
38634 const char *addis_str = NULL;
38635
38636 /* Emit the addis instruction. */
38637 fuse_ops[0] = target;
38638 if (satisfies_constraint_L (addis_value))
38639 {
38640 fuse_ops[1] = addis_value;
38641 addis_str = "lis %0,%v1";
38642 }
38643
38644 else if (GET_CODE (addis_value) == PLUS)
38645 {
38646 rtx op0 = XEXP (addis_value, 0);
38647 rtx op1 = XEXP (addis_value, 1);
38648
38649 if (REG_P (op0) && CONST_INT_P (op1)
38650 && satisfies_constraint_L (op1))
38651 {
38652 fuse_ops[1] = op0;
38653 fuse_ops[2] = op1;
38654 addis_str = "addis %0,%1,%v2";
38655 }
38656 }
38657
38658 else if (GET_CODE (addis_value) == HIGH)
38659 {
38660 rtx value = XEXP (addis_value, 0);
38661 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38662 {
38663 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38664 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38665 if (TARGET_ELF)
38666 addis_str = "addis %0,%2,%1@toc@ha";
38667
38668 else if (TARGET_XCOFF)
38669 addis_str = "addis %0,%1@u(%2)";
38670
38671 else
38672 gcc_unreachable ();
38673 }
38674
38675 else if (GET_CODE (value) == PLUS)
38676 {
38677 rtx op0 = XEXP (value, 0);
38678 rtx op1 = XEXP (value, 1);
38679
38680 if (GET_CODE (op0) == UNSPEC
38681 && XINT (op0, 1) == UNSPEC_TOCREL
38682 && CONST_INT_P (op1))
38683 {
38684 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38685 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38686 fuse_ops[3] = op1;
38687 if (TARGET_ELF)
38688 addis_str = "addis %0,%2,%1+%3@toc@ha";
38689
38690 else if (TARGET_XCOFF)
38691 addis_str = "addis %0,%1+%3@u(%2)";
38692
38693 else
38694 gcc_unreachable ();
38695 }
38696 }
38697
38698 else if (satisfies_constraint_L (value))
38699 {
38700 fuse_ops[1] = value;
38701 addis_str = "lis %0,%v1";
38702 }
38703
38704 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38705 {
38706 fuse_ops[1] = value;
38707 addis_str = "lis %0,%1@ha";
38708 }
38709 }
38710
38711 if (!addis_str)
38712 fatal_insn ("Could not generate addis value for fusion", addis_value);
38713
38714 output_asm_insn (addis_str, fuse_ops);
38715 }
38716
38717 /* Emit a D-form load or store instruction that is the second instruction
38718 of a fusion sequence. */
38719
38720 static void
38721 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
38722 {
38723 rtx fuse_ops[10];
38724 char insn_template[80];
38725
38726 fuse_ops[0] = load_reg;
38727 fuse_ops[1] = addis_reg;
38728
38729 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38730 {
38731 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38732 fuse_ops[2] = offset;
38733 output_asm_insn (insn_template, fuse_ops);
38734 }
38735
38736 else if (GET_CODE (offset) == UNSPEC
38737 && XINT (offset, 1) == UNSPEC_TOCREL)
38738 {
38739 if (TARGET_ELF)
38740 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38741
38742 else if (TARGET_XCOFF)
38743 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38744
38745 else
38746 gcc_unreachable ();
38747
38748 fuse_ops[2] = XVECEXP (offset, 0, 0);
38749 output_asm_insn (insn_template, fuse_ops);
38750 }
38751
38752 else if (GET_CODE (offset) == PLUS
38753 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38754 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38755 && CONST_INT_P (XEXP (offset, 1)))
38756 {
38757 rtx tocrel_unspec = XEXP (offset, 0);
38758 if (TARGET_ELF)
38759 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38760
38761 else if (TARGET_XCOFF)
38762 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38763
38764 else
38765 gcc_unreachable ();
38766
38767 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38768 fuse_ops[3] = XEXP (offset, 1);
38769 output_asm_insn (insn_template, fuse_ops);
38770 }
38771
38772 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38773 {
38774 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38775
38776 fuse_ops[2] = offset;
38777 output_asm_insn (insn_template, fuse_ops);
38778 }
38779
38780 else
38781 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38782
38783 return;
38784 }
38785
38786 /* Given an address, convert it into the addis and load offset parts. Addresses
38787 created during the peephole2 process look like:
38788 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38789 (unspec [(...)] UNSPEC_TOCREL)) */
38790
38791 static void
38792 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38793 {
38794 rtx hi, lo;
38795
38796 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38797 {
38798 hi = XEXP (addr, 0);
38799 lo = XEXP (addr, 1);
38800 }
38801 else
38802 gcc_unreachable ();
38803
38804 *p_hi = hi;
38805 *p_lo = lo;
38806 }
38807
38808 /* Return a string to fuse an addis instruction with a gpr load to the same
38809 register that we loaded up the addis instruction. The address that is used
38810 is the logical address that was formed during peephole2:
38811 (lo_sum (high) (low-part))
38812
38813 The code is complicated, so we call output_asm_insn directly, and just
38814 return "". */
38815
38816 const char *
38817 emit_fusion_gpr_load (rtx target, rtx mem)
38818 {
38819 rtx addis_value;
38820 rtx addr;
38821 rtx load_offset;
38822 const char *load_str = NULL;
38823 machine_mode mode;
38824
38825 if (GET_CODE (mem) == ZERO_EXTEND)
38826 mem = XEXP (mem, 0);
38827
38828 gcc_assert (REG_P (target) && MEM_P (mem));
38829
38830 addr = XEXP (mem, 0);
38831 fusion_split_address (addr, &addis_value, &load_offset);
38832
38833 /* Now emit the load instruction to the same register. */
38834 mode = GET_MODE (mem);
38835 switch (mode)
38836 {
38837 case E_QImode:
38838 load_str = "lbz";
38839 break;
38840
38841 case E_HImode:
38842 load_str = "lhz";
38843 break;
38844
38845 case E_SImode:
38846 case E_SFmode:
38847 load_str = "lwz";
38848 break;
38849
38850 case E_DImode:
38851 case E_DFmode:
38852 gcc_assert (TARGET_POWERPC64);
38853 load_str = "ld";
38854 break;
38855
38856 default:
38857 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38858 }
38859
38860 /* Emit the addis instruction. */
38861 emit_fusion_addis (target, addis_value);
38862
38863 /* Emit the D-form load instruction. */
38864 emit_fusion_load (target, target, load_offset, load_str);
38865
38866 return "";
38867 }
38868 \f
38869
38870 #ifdef RS6000_GLIBC_ATOMIC_FENV
38871 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38872 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38873 #endif
38874
38875 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38876
38877 static void
38878 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38879 {
38880 if (!TARGET_HARD_FLOAT)
38881 {
38882 #ifdef RS6000_GLIBC_ATOMIC_FENV
38883 if (atomic_hold_decl == NULL_TREE)
38884 {
38885 atomic_hold_decl
38886 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38887 get_identifier ("__atomic_feholdexcept"),
38888 build_function_type_list (void_type_node,
38889 double_ptr_type_node,
38890 NULL_TREE));
38891 TREE_PUBLIC (atomic_hold_decl) = 1;
38892 DECL_EXTERNAL (atomic_hold_decl) = 1;
38893 }
38894
38895 if (atomic_clear_decl == NULL_TREE)
38896 {
38897 atomic_clear_decl
38898 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38899 get_identifier ("__atomic_feclearexcept"),
38900 build_function_type_list (void_type_node,
38901 NULL_TREE));
38902 TREE_PUBLIC (atomic_clear_decl) = 1;
38903 DECL_EXTERNAL (atomic_clear_decl) = 1;
38904 }
38905
38906 tree const_double = build_qualified_type (double_type_node,
38907 TYPE_QUAL_CONST);
38908 tree const_double_ptr = build_pointer_type (const_double);
38909 if (atomic_update_decl == NULL_TREE)
38910 {
38911 atomic_update_decl
38912 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38913 get_identifier ("__atomic_feupdateenv"),
38914 build_function_type_list (void_type_node,
38915 const_double_ptr,
38916 NULL_TREE));
38917 TREE_PUBLIC (atomic_update_decl) = 1;
38918 DECL_EXTERNAL (atomic_update_decl) = 1;
38919 }
38920
38921 tree fenv_var = create_tmp_var_raw (double_type_node);
38922 TREE_ADDRESSABLE (fenv_var) = 1;
38923 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38924
38925 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38926 *clear = build_call_expr (atomic_clear_decl, 0);
38927 *update = build_call_expr (atomic_update_decl, 1,
38928 fold_convert (const_double_ptr, fenv_addr));
38929 #endif
38930 return;
38931 }
38932
38933 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38934 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38935 tree call_mffs = build_call_expr (mffs, 0);
38936
38937 /* Generates the equivalent of feholdexcept (&fenv_var)
38938
38939 *fenv_var = __builtin_mffs ();
38940 double fenv_hold;
38941 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38942 __builtin_mtfsf (0xff, fenv_hold); */
38943
38944 /* Mask to clear everything except for the rounding modes and non-IEEE
38945 arithmetic flag. */
38946 const unsigned HOST_WIDE_INT hold_exception_mask =
38947 HOST_WIDE_INT_C (0xffffffff00000007);
38948
38949 tree fenv_var = create_tmp_var_raw (double_type_node);
38950
38951 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38952
38953 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38954 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38955 build_int_cst (uint64_type_node,
38956 hold_exception_mask));
38957
38958 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38959 fenv_llu_and);
38960
38961 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38962 build_int_cst (unsigned_type_node, 0xff),
38963 fenv_hold_mtfsf);
38964
38965 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38966
38967 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38968
38969 double fenv_clear = __builtin_mffs ();
38970 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38971 __builtin_mtfsf (0xff, fenv_clear); */
38972
38973 /* Mask to clear everything except for the rounding modes and non-IEEE
38974 arithmetic flag. */
38975 const unsigned HOST_WIDE_INT clear_exception_mask =
38976 HOST_WIDE_INT_C (0xffffffff00000000);
38977
38978 tree fenv_clear = create_tmp_var_raw (double_type_node);
38979
38980 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38981
38982 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38983 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38984 fenv_clean_llu,
38985 build_int_cst (uint64_type_node,
38986 clear_exception_mask));
38987
38988 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38989 fenv_clear_llu_and);
38990
38991 tree clear_mtfsf = build_call_expr (mtfsf, 2,
38992 build_int_cst (unsigned_type_node, 0xff),
38993 fenv_clear_mtfsf);
38994
38995 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
38996
38997 /* Generates the equivalent of feupdateenv (&fenv_var)
38998
38999 double old_fenv = __builtin_mffs ();
39000 double fenv_update;
39001 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
39002 (*(uint64_t*)fenv_var 0x1ff80fff);
39003 __builtin_mtfsf (0xff, fenv_update); */
39004
39005 const unsigned HOST_WIDE_INT update_exception_mask =
39006 HOST_WIDE_INT_C (0xffffffff1fffff00);
39007 const unsigned HOST_WIDE_INT new_exception_mask =
39008 HOST_WIDE_INT_C (0x1ff80fff);
39009
39010 tree old_fenv = create_tmp_var_raw (double_type_node);
39011 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
39012
39013 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
39014 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
39015 build_int_cst (uint64_type_node,
39016 update_exception_mask));
39017
39018 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
39019 build_int_cst (uint64_type_node,
39020 new_exception_mask));
39021
39022 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39023 old_llu_and, new_llu_and);
39024
39025 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39026 new_llu_mask);
39027
39028 tree update_mtfsf = build_call_expr (mtfsf, 2,
39029 build_int_cst (unsigned_type_node, 0xff),
39030 fenv_update_mtfsf);
39031
39032 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39033 }
39034
39035 void
39036 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
39037 {
39038 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39039
39040 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39041 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39042
39043 /* The destination of the vmrgew instruction layout is:
39044 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39045 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39046 vmrgew instruction will be correct. */
39047 if (BYTES_BIG_ENDIAN)
39048 {
39049 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
39050 GEN_INT (0)));
39051 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
39052 GEN_INT (3)));
39053 }
39054 else
39055 {
39056 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
39057 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
39058 }
39059
39060 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39061 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39062
39063 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
39064 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
39065
39066 if (BYTES_BIG_ENDIAN)
39067 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39068 else
39069 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39070 }
39071
39072 void
39073 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39074 {
39075 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39076
39077 rtx_tmp0 = gen_reg_rtx (V2DImode);
39078 rtx_tmp1 = gen_reg_rtx (V2DImode);
39079
39080 /* The destination of the vmrgew instruction layout is:
39081 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39082 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39083 vmrgew instruction will be correct. */
39084 if (BYTES_BIG_ENDIAN)
39085 {
39086 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39087 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39088 }
39089 else
39090 {
39091 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39092 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39093 }
39094
39095 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39096 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39097
39098 if (signed_convert)
39099 {
39100 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39101 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39102 }
39103 else
39104 {
39105 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39106 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39107 }
39108
39109 if (BYTES_BIG_ENDIAN)
39110 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39111 else
39112 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39113 }
39114
39115 void
39116 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39117 rtx src2)
39118 {
39119 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39120
39121 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39122 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39123
39124 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39125 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39126
39127 rtx_tmp2 = gen_reg_rtx (V4SImode);
39128 rtx_tmp3 = gen_reg_rtx (V4SImode);
39129
39130 if (signed_convert)
39131 {
39132 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39133 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39134 }
39135 else
39136 {
39137 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39138 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39139 }
39140
39141 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39142 }
39143
39144 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39145
39146 static bool
39147 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39148 optimization_type opt_type)
39149 {
39150 switch (op)
39151 {
39152 case rsqrt_optab:
39153 return (opt_type == OPTIMIZE_FOR_SPEED
39154 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39155
39156 default:
39157 return true;
39158 }
39159 }
39160
39161 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39162
39163 static HOST_WIDE_INT
39164 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39165 {
39166 if (TREE_CODE (exp) == STRING_CST
39167 && (STRICT_ALIGNMENT || !optimize_size))
39168 return MAX (align, BITS_PER_WORD);
39169 return align;
39170 }
39171
39172 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39173
39174 static HOST_WIDE_INT
39175 rs6000_starting_frame_offset (void)
39176 {
39177 if (FRAME_GROWS_DOWNWARD)
39178 return 0;
39179 return RS6000_STARTING_FRAME_OFFSET;
39180 }
39181 \f
39182
39183 /* Create an alias for a mangled name where we have changed the mangling (in
39184 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
39185 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
39186
39187 #if TARGET_ELF && RS6000_WEAK
39188 static void
39189 rs6000_globalize_decl_name (FILE * stream, tree decl)
39190 {
39191 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
39192
39193 targetm.asm_out.globalize_label (stream, name);
39194
39195 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
39196 {
39197 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
39198 const char *old_name;
39199
39200 ieee128_mangling_gcc_8_1 = true;
39201 lang_hooks.set_decl_assembler_name (decl);
39202 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
39203 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
39204 ieee128_mangling_gcc_8_1 = false;
39205
39206 if (strcmp (name, old_name) != 0)
39207 {
39208 fprintf (stream, "\t.weak %s\n", old_name);
39209 fprintf (stream, "\t.set %s,%s\n", old_name, name);
39210 }
39211 }
39212 }
39213 #endif
39214
39215 \f
39216 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
39217 function names from <foo>l to <foo>f128 if the default long double type is
39218 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
39219 include file switches the names on systems that support long double as IEEE
39220 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
39221 In the future, glibc will export names like __ieee128_sinf128 and we can
39222 switch to using those instead of using sinf128, which pollutes the user's
39223 namespace.
39224
39225 This will switch the names for Fortran math functions as well (which doesn't
39226 use math.h). However, Fortran needs other changes to the compiler and
39227 library before you can switch the real*16 type at compile time.
39228
39229 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
39230 only do this if the default is that long double is IBM extended double, and
39231 the user asked for IEEE 128-bit. */
39232
39233 static tree
39234 rs6000_mangle_decl_assembler_name (tree decl, tree id)
39235 {
39236 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
39237 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
39238 {
39239 size_t len = IDENTIFIER_LENGTH (id);
39240 const char *name = IDENTIFIER_POINTER (id);
39241
39242 if (name[len - 1] == 'l')
39243 {
39244 bool uses_ieee128_p = false;
39245 tree type = TREE_TYPE (decl);
39246 machine_mode ret_mode = TYPE_MODE (type);
39247
39248 /* See if the function returns a IEEE 128-bit floating point type or
39249 complex type. */
39250 if (ret_mode == TFmode || ret_mode == TCmode)
39251 uses_ieee128_p = true;
39252 else
39253 {
39254 function_args_iterator args_iter;
39255 tree arg;
39256
39257 /* See if the function passes a IEEE 128-bit floating point type
39258 or complex type. */
39259 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
39260 {
39261 machine_mode arg_mode = TYPE_MODE (arg);
39262 if (arg_mode == TFmode || arg_mode == TCmode)
39263 {
39264 uses_ieee128_p = true;
39265 break;
39266 }
39267 }
39268 }
39269
39270 /* If we passed or returned an IEEE 128-bit floating point type,
39271 change the name. */
39272 if (uses_ieee128_p)
39273 {
39274 char *name2 = (char *) alloca (len + 4);
39275 memcpy (name2, name, len - 1);
39276 strcpy (name2 + len - 1, "f128");
39277 id = get_identifier (name2);
39278 }
39279 }
39280 }
39281
39282 return id;
39283 }
39284
39285 \f
39286 struct gcc_target targetm = TARGET_INITIALIZER;
39287
39288 #include "gt-rs6000.h"