rs6000: wk -> ws+p8v
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2019 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84 #include "tree-vrp.h"
85 #include "tree-ssanames.h"
86
87 /* This file should be included last. */
88 #include "target-def.h"
89
90 #ifndef TARGET_NO_PROTOTYPE
91 #define TARGET_NO_PROTOTYPE 0
92 #endif
93
94 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
95 systems will also set long double to be IEEE 128-bit. AIX and Darwin
96 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
97 those systems will not pick up this default. This needs to be after all
98 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
99 properly defined. */
100 #ifndef TARGET_IEEEQUAD_DEFAULT
101 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
102 #define TARGET_IEEEQUAD_DEFAULT 1
103 #else
104 #define TARGET_IEEEQUAD_DEFAULT 0
105 #endif
106 #endif
107
108 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
109
110 /* Structure used to define the rs6000 stack */
111 typedef struct rs6000_stack {
112 int reload_completed; /* stack info won't change from here on */
113 int first_gp_reg_save; /* first callee saved GP register used */
114 int first_fp_reg_save; /* first callee saved FP register used */
115 int first_altivec_reg_save; /* first callee saved AltiVec register used */
116 int lr_save_p; /* true if the link reg needs to be saved */
117 int cr_save_p; /* true if the CR reg needs to be saved */
118 unsigned int vrsave_mask; /* mask of vec registers to save */
119 int push_p; /* true if we need to allocate stack space */
120 int calls_p; /* true if the function makes any calls */
121 int world_save_p; /* true if we're saving *everything*:
122 r13-r31, cr, f14-f31, vrsave, v20-v31 */
123 enum rs6000_abi abi; /* which ABI to use */
124 int gp_save_offset; /* offset to save GP regs from initial SP */
125 int fp_save_offset; /* offset to save FP regs from initial SP */
126 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
127 int lr_save_offset; /* offset to save LR from initial SP */
128 int cr_save_offset; /* offset to save CR from initial SP */
129 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
130 int varargs_save_offset; /* offset to save the varargs registers */
131 int ehrd_offset; /* offset to EH return data */
132 int ehcr_offset; /* offset to EH CR field data */
133 int reg_size; /* register size (4 or 8) */
134 HOST_WIDE_INT vars_size; /* variable save area size */
135 int parm_size; /* outgoing parameter size */
136 int save_size; /* save area size */
137 int fixed_size; /* fixed size of stack frame */
138 int gp_size; /* size of saved GP registers */
139 int fp_size; /* size of saved FP registers */
140 int altivec_size; /* size of saved AltiVec registers */
141 int cr_size; /* size to hold CR if not in fixed area */
142 int vrsave_size; /* size to hold VRSAVE */
143 int altivec_padding_size; /* size of altivec alignment padding */
144 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
145 int savres_strategy;
146 } rs6000_stack_t;
147
148 /* A C structure for machine-specific, per-function data.
149 This is added to the cfun structure. */
150 typedef struct GTY(()) machine_function
151 {
152 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
153 int ra_needs_full_frame;
154 /* Flags if __builtin_return_address (0) was used. */
155 int ra_need_lr;
156 /* Cache lr_save_p after expansion of builtin_eh_return. */
157 int lr_save_state;
158 /* Whether we need to save the TOC to the reserved stack location in the
159 function prologue. */
160 bool save_toc_in_prologue;
161 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
162 varargs save area. */
163 HOST_WIDE_INT varargs_save_offset;
164 /* Alternative internal arg pointer for -fsplit-stack. */
165 rtx split_stack_arg_pointer;
166 bool split_stack_argp_used;
167 /* Flag if r2 setup is needed with ELFv2 ABI. */
168 bool r2_setup_needed;
169 /* The number of components we use for separate shrink-wrapping. */
170 int n_components;
171 /* The components already handled by separate shrink-wrapping, which should
172 not be considered by the prologue and epilogue. */
173 bool gpr_is_wrapped_separately[32];
174 bool fpr_is_wrapped_separately[32];
175 bool lr_is_wrapped_separately;
176 bool toc_is_wrapped_separately;
177 } machine_function;
178
179 /* Support targetm.vectorize.builtin_mask_for_load. */
180 static GTY(()) tree altivec_builtin_mask_for_load;
181
182 /* Set to nonzero once AIX common-mode calls have been defined. */
183 static GTY(()) int common_mode_defined;
184
185 /* Label number of label created for -mrelocatable, to call to so we can
186 get the address of the GOT section */
187 static int rs6000_pic_labelno;
188
189 #ifdef USING_ELFOS_H
190 /* Counter for labels which are to be placed in .fixup. */
191 int fixuplabelno = 0;
192 #endif
193
194 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
195 int dot_symbols;
196
197 /* Specify the machine mode that pointers have. After generation of rtl, the
198 compiler makes no further distinction between pointers and any other objects
199 of this machine mode. */
200 scalar_int_mode rs6000_pmode;
201
202 #if TARGET_ELF
203 /* Note whether IEEE 128-bit floating point was passed or returned, either as
204 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
205 floating point. We changed the default C++ mangling for these types and we
206 may want to generate a weak alias of the old mangling (U10__float128) to the
207 new mangling (u9__ieee128). */
208 static bool rs6000_passes_ieee128;
209 #endif
210
211 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
212 name used in current releases (i.e. u9__ieee128). */
213 static bool ieee128_mangling_gcc_8_1;
214
215 /* Width in bits of a pointer. */
216 unsigned rs6000_pointer_size;
217
218 #ifdef HAVE_AS_GNU_ATTRIBUTE
219 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
220 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
221 # endif
222 /* Flag whether floating point values have been passed/returned.
223 Note that this doesn't say whether fprs are used, since the
224 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
225 should be set for soft-float values passed in gprs and ieee128
226 values passed in vsx registers. */
227 static bool rs6000_passes_float;
228 static bool rs6000_passes_long_double;
229 /* Flag whether vector values have been passed/returned. */
230 static bool rs6000_passes_vector;
231 /* Flag whether small (<= 8 byte) structures have been returned. */
232 static bool rs6000_returns_struct;
233 #endif
234
235 /* Value is TRUE if register/mode pair is acceptable. */
236 static bool rs6000_hard_regno_mode_ok_p
237 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
238
239 /* Maximum number of registers needed for a given register class and mode. */
240 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
241
242 /* How many registers are needed for a given register and mode. */
243 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
244
245 /* Map register number to register class. */
246 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
247
248 static int dbg_cost_ctrl;
249
250 /* Built in types. */
251 tree rs6000_builtin_types[RS6000_BTI_MAX];
252 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
253
254 /* Flag to say the TOC is initialized */
255 int toc_initialized, need_toc_init;
256 char toc_label_name[10];
257
258 /* Cached value of rs6000_variable_issue. This is cached in
259 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
260 static short cached_can_issue_more;
261
262 static GTY(()) section *read_only_data_section;
263 static GTY(()) section *private_data_section;
264 static GTY(()) section *tls_data_section;
265 static GTY(()) section *tls_private_data_section;
266 static GTY(()) section *read_only_private_data_section;
267 static GTY(()) section *sdata2_section;
268 static GTY(()) section *toc_section;
269
270 struct builtin_description
271 {
272 const HOST_WIDE_INT mask;
273 const enum insn_code icode;
274 const char *const name;
275 const enum rs6000_builtins code;
276 };
277
278 /* Describe the vector unit used for modes. */
279 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
280 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
281
282 /* Register classes for various constraints that are based on the target
283 switches. */
284 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
285
286 /* Describe the alignment of a vector. */
287 int rs6000_vector_align[NUM_MACHINE_MODES];
288
289 /* Map selected modes to types for builtins. */
290 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
291
292 /* What modes to automatically generate reciprocal divide estimate (fre) and
293 reciprocal sqrt (frsqrte) for. */
294 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
295
296 /* Masks to determine which reciprocal esitmate instructions to generate
297 automatically. */
298 enum rs6000_recip_mask {
299 RECIP_SF_DIV = 0x001, /* Use divide estimate */
300 RECIP_DF_DIV = 0x002,
301 RECIP_V4SF_DIV = 0x004,
302 RECIP_V2DF_DIV = 0x008,
303
304 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
305 RECIP_DF_RSQRT = 0x020,
306 RECIP_V4SF_RSQRT = 0x040,
307 RECIP_V2DF_RSQRT = 0x080,
308
309 /* Various combination of flags for -mrecip=xxx. */
310 RECIP_NONE = 0,
311 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
312 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
313 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
314
315 RECIP_HIGH_PRECISION = RECIP_ALL,
316
317 /* On low precision machines like the power5, don't enable double precision
318 reciprocal square root estimate, since it isn't accurate enough. */
319 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
320 };
321
322 /* -mrecip options. */
323 static struct
324 {
325 const char *string; /* option name */
326 unsigned int mask; /* mask bits to set */
327 } recip_options[] = {
328 { "all", RECIP_ALL },
329 { "none", RECIP_NONE },
330 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
331 | RECIP_V2DF_DIV) },
332 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
333 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
334 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
335 | RECIP_V2DF_RSQRT) },
336 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
337 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
338 };
339
340 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
341 static const struct
342 {
343 const char *cpu;
344 unsigned int cpuid;
345 } cpu_is_info[] = {
346 { "power9", PPC_PLATFORM_POWER9 },
347 { "power8", PPC_PLATFORM_POWER8 },
348 { "power7", PPC_PLATFORM_POWER7 },
349 { "power6x", PPC_PLATFORM_POWER6X },
350 { "power6", PPC_PLATFORM_POWER6 },
351 { "power5+", PPC_PLATFORM_POWER5_PLUS },
352 { "power5", PPC_PLATFORM_POWER5 },
353 { "ppc970", PPC_PLATFORM_PPC970 },
354 { "power4", PPC_PLATFORM_POWER4 },
355 { "ppca2", PPC_PLATFORM_PPCA2 },
356 { "ppc476", PPC_PLATFORM_PPC476 },
357 { "ppc464", PPC_PLATFORM_PPC464 },
358 { "ppc440", PPC_PLATFORM_PPC440 },
359 { "ppc405", PPC_PLATFORM_PPC405 },
360 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
361 };
362
363 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
364 static const struct
365 {
366 const char *hwcap;
367 int mask;
368 unsigned int id;
369 } cpu_supports_info[] = {
370 /* AT_HWCAP masks. */
371 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
372 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
373 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
374 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
375 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
376 { "booke", PPC_FEATURE_BOOKE, 0 },
377 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
378 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
379 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
380 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
381 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
382 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
383 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
384 { "notb", PPC_FEATURE_NO_TB, 0 },
385 { "pa6t", PPC_FEATURE_PA6T, 0 },
386 { "power4", PPC_FEATURE_POWER4, 0 },
387 { "power5", PPC_FEATURE_POWER5, 0 },
388 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
389 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
390 { "ppc32", PPC_FEATURE_32, 0 },
391 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
392 { "ppc64", PPC_FEATURE_64, 0 },
393 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
394 { "smt", PPC_FEATURE_SMT, 0 },
395 { "spe", PPC_FEATURE_HAS_SPE, 0 },
396 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
397 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
398 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
399
400 /* AT_HWCAP2 masks. */
401 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
402 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
403 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
404 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
405 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
406 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
407 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
408 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
409 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
410 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
411 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
412 { "darn", PPC_FEATURE2_DARN, 1 },
413 { "scv", PPC_FEATURE2_SCV, 1 }
414 };
415
416 /* On PowerPC, we have a limited number of target clones that we care about
417 which means we can use an array to hold the options, rather than having more
418 elaborate data structures to identify each possible variation. Order the
419 clones from the default to the highest ISA. */
420 enum {
421 CLONE_DEFAULT = 0, /* default clone. */
422 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
423 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
424 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
425 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
426 CLONE_MAX
427 };
428
429 /* Map compiler ISA bits into HWCAP names. */
430 struct clone_map {
431 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
432 const char *name; /* name to use in __builtin_cpu_supports. */
433 };
434
435 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
436 { 0, "" }, /* Default options. */
437 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
438 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
439 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
440 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
441 };
442
443
444 /* Newer LIBCs explicitly export this symbol to declare that they provide
445 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
446 reference to this symbol whenever we expand a CPU builtin, so that
447 we never link against an old LIBC. */
448 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
449
450 /* True if we have expanded a CPU builtin. */
451 bool cpu_builtin_p;
452
453 /* Pointer to function (in rs6000-c.c) that can define or undefine target
454 macros that have changed. Languages that don't support the preprocessor
455 don't link in rs6000-c.c, so we can't call it directly. */
456 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
457
458 /* Simplfy register classes into simpler classifications. We assume
459 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
460 check for standard register classes (gpr/floating/altivec/vsx) and
461 floating/vector classes (float/altivec/vsx). */
462
463 enum rs6000_reg_type {
464 NO_REG_TYPE,
465 PSEUDO_REG_TYPE,
466 GPR_REG_TYPE,
467 VSX_REG_TYPE,
468 ALTIVEC_REG_TYPE,
469 FPR_REG_TYPE,
470 SPR_REG_TYPE,
471 CR_REG_TYPE
472 };
473
474 /* Map register class to register type. */
475 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
476
477 /* First/last register type for the 'normal' register types (i.e. general
478 purpose, floating point, altivec, and VSX registers). */
479 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
480
481 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
482
483
484 /* Register classes we care about in secondary reload or go if legitimate
485 address. We only need to worry about GPR, FPR, and Altivec registers here,
486 along an ANY field that is the OR of the 3 register classes. */
487
488 enum rs6000_reload_reg_type {
489 RELOAD_REG_GPR, /* General purpose registers. */
490 RELOAD_REG_FPR, /* Traditional floating point regs. */
491 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
492 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
493 N_RELOAD_REG
494 };
495
496 /* For setting up register classes, loop through the 3 register classes mapping
497 into real registers, and skip the ANY class, which is just an OR of the
498 bits. */
499 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
500 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
501
502 /* Map reload register type to a register in the register class. */
503 struct reload_reg_map_type {
504 const char *name; /* Register class name. */
505 int reg; /* Register in the register class. */
506 };
507
508 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
509 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
510 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
511 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
512 { "Any", -1 }, /* RELOAD_REG_ANY. */
513 };
514
515 /* Mask bits for each register class, indexed per mode. Historically the
516 compiler has been more restrictive which types can do PRE_MODIFY instead of
517 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
518 typedef unsigned char addr_mask_type;
519
520 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
521 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
522 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
523 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
524 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
525 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
526 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
527 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
528
529 /* Register type masks based on the type, of valid addressing modes. */
530 struct rs6000_reg_addr {
531 enum insn_code reload_load; /* INSN to reload for loading. */
532 enum insn_code reload_store; /* INSN to reload for storing. */
533 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
534 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
535 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
536 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
537 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
538 };
539
540 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
541
542 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
543 static inline bool
544 mode_supports_pre_incdec_p (machine_mode mode)
545 {
546 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
547 != 0);
548 }
549
550 /* Helper function to say whether a mode supports PRE_MODIFY. */
551 static inline bool
552 mode_supports_pre_modify_p (machine_mode mode)
553 {
554 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
555 != 0);
556 }
557
558 /* Return true if we have D-form addressing in altivec registers. */
559 static inline bool
560 mode_supports_vmx_dform (machine_mode mode)
561 {
562 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
563 }
564
565 /* Return true if we have D-form addressing in VSX registers. This addressing
566 is more limited than normal d-form addressing in that the offset must be
567 aligned on a 16-byte boundary. */
568 static inline bool
569 mode_supports_dq_form (machine_mode mode)
570 {
571 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
572 != 0);
573 }
574
575 /* Given that there exists at least one variable that is set (produced)
576 by OUT_INSN and read (consumed) by IN_INSN, return true iff
577 IN_INSN represents one or more memory store operations and none of
578 the variables set by OUT_INSN is used by IN_INSN as the address of a
579 store operation. If either IN_INSN or OUT_INSN does not represent
580 a "single" RTL SET expression (as loosely defined by the
581 implementation of the single_set function) or a PARALLEL with only
582 SETs, CLOBBERs, and USEs inside, this function returns false.
583
584 This rs6000-specific version of store_data_bypass_p checks for
585 certain conditions that result in assertion failures (and internal
586 compiler errors) in the generic store_data_bypass_p function and
587 returns false rather than calling store_data_bypass_p if one of the
588 problematic conditions is detected. */
589
590 int
591 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
592 {
593 rtx out_set, in_set;
594 rtx out_pat, in_pat;
595 rtx out_exp, in_exp;
596 int i, j;
597
598 in_set = single_set (in_insn);
599 if (in_set)
600 {
601 if (MEM_P (SET_DEST (in_set)))
602 {
603 out_set = single_set (out_insn);
604 if (!out_set)
605 {
606 out_pat = PATTERN (out_insn);
607 if (GET_CODE (out_pat) == PARALLEL)
608 {
609 for (i = 0; i < XVECLEN (out_pat, 0); i++)
610 {
611 out_exp = XVECEXP (out_pat, 0, i);
612 if ((GET_CODE (out_exp) == CLOBBER)
613 || (GET_CODE (out_exp) == USE))
614 continue;
615 else if (GET_CODE (out_exp) != SET)
616 return false;
617 }
618 }
619 }
620 }
621 }
622 else
623 {
624 in_pat = PATTERN (in_insn);
625 if (GET_CODE (in_pat) != PARALLEL)
626 return false;
627
628 for (i = 0; i < XVECLEN (in_pat, 0); i++)
629 {
630 in_exp = XVECEXP (in_pat, 0, i);
631 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
632 continue;
633 else if (GET_CODE (in_exp) != SET)
634 return false;
635
636 if (MEM_P (SET_DEST (in_exp)))
637 {
638 out_set = single_set (out_insn);
639 if (!out_set)
640 {
641 out_pat = PATTERN (out_insn);
642 if (GET_CODE (out_pat) != PARALLEL)
643 return false;
644 for (j = 0; j < XVECLEN (out_pat, 0); j++)
645 {
646 out_exp = XVECEXP (out_pat, 0, j);
647 if ((GET_CODE (out_exp) == CLOBBER)
648 || (GET_CODE (out_exp) == USE))
649 continue;
650 else if (GET_CODE (out_exp) != SET)
651 return false;
652 }
653 }
654 }
655 }
656 }
657 return store_data_bypass_p (out_insn, in_insn);
658 }
659
660 \f
661 /* Processor costs (relative to an add) */
662
663 const struct processor_costs *rs6000_cost;
664
665 /* Instruction size costs on 32bit processors. */
666 static const
667 struct processor_costs size32_cost = {
668 COSTS_N_INSNS (1), /* mulsi */
669 COSTS_N_INSNS (1), /* mulsi_const */
670 COSTS_N_INSNS (1), /* mulsi_const9 */
671 COSTS_N_INSNS (1), /* muldi */
672 COSTS_N_INSNS (1), /* divsi */
673 COSTS_N_INSNS (1), /* divdi */
674 COSTS_N_INSNS (1), /* fp */
675 COSTS_N_INSNS (1), /* dmul */
676 COSTS_N_INSNS (1), /* sdiv */
677 COSTS_N_INSNS (1), /* ddiv */
678 32, /* cache line size */
679 0, /* l1 cache */
680 0, /* l2 cache */
681 0, /* streams */
682 0, /* SF->DF convert */
683 };
684
685 /* Instruction size costs on 64bit processors. */
686 static const
687 struct processor_costs size64_cost = {
688 COSTS_N_INSNS (1), /* mulsi */
689 COSTS_N_INSNS (1), /* mulsi_const */
690 COSTS_N_INSNS (1), /* mulsi_const9 */
691 COSTS_N_INSNS (1), /* muldi */
692 COSTS_N_INSNS (1), /* divsi */
693 COSTS_N_INSNS (1), /* divdi */
694 COSTS_N_INSNS (1), /* fp */
695 COSTS_N_INSNS (1), /* dmul */
696 COSTS_N_INSNS (1), /* sdiv */
697 COSTS_N_INSNS (1), /* ddiv */
698 128, /* cache line size */
699 0, /* l1 cache */
700 0, /* l2 cache */
701 0, /* streams */
702 0, /* SF->DF convert */
703 };
704
705 /* Instruction costs on RS64A processors. */
706 static const
707 struct processor_costs rs64a_cost = {
708 COSTS_N_INSNS (20), /* mulsi */
709 COSTS_N_INSNS (12), /* mulsi_const */
710 COSTS_N_INSNS (8), /* mulsi_const9 */
711 COSTS_N_INSNS (34), /* muldi */
712 COSTS_N_INSNS (65), /* divsi */
713 COSTS_N_INSNS (67), /* divdi */
714 COSTS_N_INSNS (4), /* fp */
715 COSTS_N_INSNS (4), /* dmul */
716 COSTS_N_INSNS (31), /* sdiv */
717 COSTS_N_INSNS (31), /* ddiv */
718 128, /* cache line size */
719 128, /* l1 cache */
720 2048, /* l2 cache */
721 1, /* streams */
722 0, /* SF->DF convert */
723 };
724
725 /* Instruction costs on MPCCORE processors. */
726 static const
727 struct processor_costs mpccore_cost = {
728 COSTS_N_INSNS (2), /* mulsi */
729 COSTS_N_INSNS (2), /* mulsi_const */
730 COSTS_N_INSNS (2), /* mulsi_const9 */
731 COSTS_N_INSNS (2), /* muldi */
732 COSTS_N_INSNS (6), /* divsi */
733 COSTS_N_INSNS (6), /* divdi */
734 COSTS_N_INSNS (4), /* fp */
735 COSTS_N_INSNS (5), /* dmul */
736 COSTS_N_INSNS (10), /* sdiv */
737 COSTS_N_INSNS (17), /* ddiv */
738 32, /* cache line size */
739 4, /* l1 cache */
740 16, /* l2 cache */
741 1, /* streams */
742 0, /* SF->DF convert */
743 };
744
745 /* Instruction costs on PPC403 processors. */
746 static const
747 struct processor_costs ppc403_cost = {
748 COSTS_N_INSNS (4), /* mulsi */
749 COSTS_N_INSNS (4), /* mulsi_const */
750 COSTS_N_INSNS (4), /* mulsi_const9 */
751 COSTS_N_INSNS (4), /* muldi */
752 COSTS_N_INSNS (33), /* divsi */
753 COSTS_N_INSNS (33), /* divdi */
754 COSTS_N_INSNS (11), /* fp */
755 COSTS_N_INSNS (11), /* dmul */
756 COSTS_N_INSNS (11), /* sdiv */
757 COSTS_N_INSNS (11), /* ddiv */
758 32, /* cache line size */
759 4, /* l1 cache */
760 16, /* l2 cache */
761 1, /* streams */
762 0, /* SF->DF convert */
763 };
764
765 /* Instruction costs on PPC405 processors. */
766 static const
767 struct processor_costs ppc405_cost = {
768 COSTS_N_INSNS (5), /* mulsi */
769 COSTS_N_INSNS (4), /* mulsi_const */
770 COSTS_N_INSNS (3), /* mulsi_const9 */
771 COSTS_N_INSNS (5), /* muldi */
772 COSTS_N_INSNS (35), /* divsi */
773 COSTS_N_INSNS (35), /* divdi */
774 COSTS_N_INSNS (11), /* fp */
775 COSTS_N_INSNS (11), /* dmul */
776 COSTS_N_INSNS (11), /* sdiv */
777 COSTS_N_INSNS (11), /* ddiv */
778 32, /* cache line size */
779 16, /* l1 cache */
780 128, /* l2 cache */
781 1, /* streams */
782 0, /* SF->DF convert */
783 };
784
785 /* Instruction costs on PPC440 processors. */
786 static const
787 struct processor_costs ppc440_cost = {
788 COSTS_N_INSNS (3), /* mulsi */
789 COSTS_N_INSNS (2), /* mulsi_const */
790 COSTS_N_INSNS (2), /* mulsi_const9 */
791 COSTS_N_INSNS (3), /* muldi */
792 COSTS_N_INSNS (34), /* divsi */
793 COSTS_N_INSNS (34), /* divdi */
794 COSTS_N_INSNS (5), /* fp */
795 COSTS_N_INSNS (5), /* dmul */
796 COSTS_N_INSNS (19), /* sdiv */
797 COSTS_N_INSNS (33), /* ddiv */
798 32, /* cache line size */
799 32, /* l1 cache */
800 256, /* l2 cache */
801 1, /* streams */
802 0, /* SF->DF convert */
803 };
804
805 /* Instruction costs on PPC476 processors. */
806 static const
807 struct processor_costs ppc476_cost = {
808 COSTS_N_INSNS (4), /* mulsi */
809 COSTS_N_INSNS (4), /* mulsi_const */
810 COSTS_N_INSNS (4), /* mulsi_const9 */
811 COSTS_N_INSNS (4), /* muldi */
812 COSTS_N_INSNS (11), /* divsi */
813 COSTS_N_INSNS (11), /* divdi */
814 COSTS_N_INSNS (6), /* fp */
815 COSTS_N_INSNS (6), /* dmul */
816 COSTS_N_INSNS (19), /* sdiv */
817 COSTS_N_INSNS (33), /* ddiv */
818 32, /* l1 cache line size */
819 32, /* l1 cache */
820 512, /* l2 cache */
821 1, /* streams */
822 0, /* SF->DF convert */
823 };
824
825 /* Instruction costs on PPC601 processors. */
826 static const
827 struct processor_costs ppc601_cost = {
828 COSTS_N_INSNS (5), /* mulsi */
829 COSTS_N_INSNS (5), /* mulsi_const */
830 COSTS_N_INSNS (5), /* mulsi_const9 */
831 COSTS_N_INSNS (5), /* muldi */
832 COSTS_N_INSNS (36), /* divsi */
833 COSTS_N_INSNS (36), /* divdi */
834 COSTS_N_INSNS (4), /* fp */
835 COSTS_N_INSNS (5), /* dmul */
836 COSTS_N_INSNS (17), /* sdiv */
837 COSTS_N_INSNS (31), /* ddiv */
838 32, /* cache line size */
839 32, /* l1 cache */
840 256, /* l2 cache */
841 1, /* streams */
842 0, /* SF->DF convert */
843 };
844
845 /* Instruction costs on PPC603 processors. */
846 static const
847 struct processor_costs ppc603_cost = {
848 COSTS_N_INSNS (5), /* mulsi */
849 COSTS_N_INSNS (3), /* mulsi_const */
850 COSTS_N_INSNS (2), /* mulsi_const9 */
851 COSTS_N_INSNS (5), /* muldi */
852 COSTS_N_INSNS (37), /* divsi */
853 COSTS_N_INSNS (37), /* divdi */
854 COSTS_N_INSNS (3), /* fp */
855 COSTS_N_INSNS (4), /* dmul */
856 COSTS_N_INSNS (18), /* sdiv */
857 COSTS_N_INSNS (33), /* ddiv */
858 32, /* cache line size */
859 8, /* l1 cache */
860 64, /* l2 cache */
861 1, /* streams */
862 0, /* SF->DF convert */
863 };
864
865 /* Instruction costs on PPC604 processors. */
866 static const
867 struct processor_costs ppc604_cost = {
868 COSTS_N_INSNS (4), /* mulsi */
869 COSTS_N_INSNS (4), /* mulsi_const */
870 COSTS_N_INSNS (4), /* mulsi_const9 */
871 COSTS_N_INSNS (4), /* muldi */
872 COSTS_N_INSNS (20), /* divsi */
873 COSTS_N_INSNS (20), /* divdi */
874 COSTS_N_INSNS (3), /* fp */
875 COSTS_N_INSNS (3), /* dmul */
876 COSTS_N_INSNS (18), /* sdiv */
877 COSTS_N_INSNS (32), /* ddiv */
878 32, /* cache line size */
879 16, /* l1 cache */
880 512, /* l2 cache */
881 1, /* streams */
882 0, /* SF->DF convert */
883 };
884
885 /* Instruction costs on PPC604e processors. */
886 static const
887 struct processor_costs ppc604e_cost = {
888 COSTS_N_INSNS (2), /* mulsi */
889 COSTS_N_INSNS (2), /* mulsi_const */
890 COSTS_N_INSNS (2), /* mulsi_const9 */
891 COSTS_N_INSNS (2), /* muldi */
892 COSTS_N_INSNS (20), /* divsi */
893 COSTS_N_INSNS (20), /* divdi */
894 COSTS_N_INSNS (3), /* fp */
895 COSTS_N_INSNS (3), /* dmul */
896 COSTS_N_INSNS (18), /* sdiv */
897 COSTS_N_INSNS (32), /* ddiv */
898 32, /* cache line size */
899 32, /* l1 cache */
900 1024, /* l2 cache */
901 1, /* streams */
902 0, /* SF->DF convert */
903 };
904
905 /* Instruction costs on PPC620 processors. */
906 static const
907 struct processor_costs ppc620_cost = {
908 COSTS_N_INSNS (5), /* mulsi */
909 COSTS_N_INSNS (4), /* mulsi_const */
910 COSTS_N_INSNS (3), /* mulsi_const9 */
911 COSTS_N_INSNS (7), /* muldi */
912 COSTS_N_INSNS (21), /* divsi */
913 COSTS_N_INSNS (37), /* divdi */
914 COSTS_N_INSNS (3), /* fp */
915 COSTS_N_INSNS (3), /* dmul */
916 COSTS_N_INSNS (18), /* sdiv */
917 COSTS_N_INSNS (32), /* ddiv */
918 128, /* cache line size */
919 32, /* l1 cache */
920 1024, /* l2 cache */
921 1, /* streams */
922 0, /* SF->DF convert */
923 };
924
925 /* Instruction costs on PPC630 processors. */
926 static const
927 struct processor_costs ppc630_cost = {
928 COSTS_N_INSNS (5), /* mulsi */
929 COSTS_N_INSNS (4), /* mulsi_const */
930 COSTS_N_INSNS (3), /* mulsi_const9 */
931 COSTS_N_INSNS (7), /* muldi */
932 COSTS_N_INSNS (21), /* divsi */
933 COSTS_N_INSNS (37), /* divdi */
934 COSTS_N_INSNS (3), /* fp */
935 COSTS_N_INSNS (3), /* dmul */
936 COSTS_N_INSNS (17), /* sdiv */
937 COSTS_N_INSNS (21), /* ddiv */
938 128, /* cache line size */
939 64, /* l1 cache */
940 1024, /* l2 cache */
941 1, /* streams */
942 0, /* SF->DF convert */
943 };
944
945 /* Instruction costs on Cell processor. */
946 /* COSTS_N_INSNS (1) ~ one add. */
947 static const
948 struct processor_costs ppccell_cost = {
949 COSTS_N_INSNS (9/2)+2, /* mulsi */
950 COSTS_N_INSNS (6/2), /* mulsi_const */
951 COSTS_N_INSNS (6/2), /* mulsi_const9 */
952 COSTS_N_INSNS (15/2)+2, /* muldi */
953 COSTS_N_INSNS (38/2), /* divsi */
954 COSTS_N_INSNS (70/2), /* divdi */
955 COSTS_N_INSNS (10/2), /* fp */
956 COSTS_N_INSNS (10/2), /* dmul */
957 COSTS_N_INSNS (74/2), /* sdiv */
958 COSTS_N_INSNS (74/2), /* ddiv */
959 128, /* cache line size */
960 32, /* l1 cache */
961 512, /* l2 cache */
962 6, /* streams */
963 0, /* SF->DF convert */
964 };
965
966 /* Instruction costs on PPC750 and PPC7400 processors. */
967 static const
968 struct processor_costs ppc750_cost = {
969 COSTS_N_INSNS (5), /* mulsi */
970 COSTS_N_INSNS (3), /* mulsi_const */
971 COSTS_N_INSNS (2), /* mulsi_const9 */
972 COSTS_N_INSNS (5), /* muldi */
973 COSTS_N_INSNS (17), /* divsi */
974 COSTS_N_INSNS (17), /* divdi */
975 COSTS_N_INSNS (3), /* fp */
976 COSTS_N_INSNS (3), /* dmul */
977 COSTS_N_INSNS (17), /* sdiv */
978 COSTS_N_INSNS (31), /* ddiv */
979 32, /* cache line size */
980 32, /* l1 cache */
981 512, /* l2 cache */
982 1, /* streams */
983 0, /* SF->DF convert */
984 };
985
986 /* Instruction costs on PPC7450 processors. */
987 static const
988 struct processor_costs ppc7450_cost = {
989 COSTS_N_INSNS (4), /* mulsi */
990 COSTS_N_INSNS (3), /* mulsi_const */
991 COSTS_N_INSNS (3), /* mulsi_const9 */
992 COSTS_N_INSNS (4), /* muldi */
993 COSTS_N_INSNS (23), /* divsi */
994 COSTS_N_INSNS (23), /* divdi */
995 COSTS_N_INSNS (5), /* fp */
996 COSTS_N_INSNS (5), /* dmul */
997 COSTS_N_INSNS (21), /* sdiv */
998 COSTS_N_INSNS (35), /* ddiv */
999 32, /* cache line size */
1000 32, /* l1 cache */
1001 1024, /* l2 cache */
1002 1, /* streams */
1003 0, /* SF->DF convert */
1004 };
1005
1006 /* Instruction costs on PPC8540 processors. */
1007 static const
1008 struct processor_costs ppc8540_cost = {
1009 COSTS_N_INSNS (4), /* mulsi */
1010 COSTS_N_INSNS (4), /* mulsi_const */
1011 COSTS_N_INSNS (4), /* mulsi_const9 */
1012 COSTS_N_INSNS (4), /* muldi */
1013 COSTS_N_INSNS (19), /* divsi */
1014 COSTS_N_INSNS (19), /* divdi */
1015 COSTS_N_INSNS (4), /* fp */
1016 COSTS_N_INSNS (4), /* dmul */
1017 COSTS_N_INSNS (29), /* sdiv */
1018 COSTS_N_INSNS (29), /* ddiv */
1019 32, /* cache line size */
1020 32, /* l1 cache */
1021 256, /* l2 cache */
1022 1, /* prefetch streams /*/
1023 0, /* SF->DF convert */
1024 };
1025
1026 /* Instruction costs on E300C2 and E300C3 cores. */
1027 static const
1028 struct processor_costs ppce300c2c3_cost = {
1029 COSTS_N_INSNS (4), /* mulsi */
1030 COSTS_N_INSNS (4), /* mulsi_const */
1031 COSTS_N_INSNS (4), /* mulsi_const9 */
1032 COSTS_N_INSNS (4), /* muldi */
1033 COSTS_N_INSNS (19), /* divsi */
1034 COSTS_N_INSNS (19), /* divdi */
1035 COSTS_N_INSNS (3), /* fp */
1036 COSTS_N_INSNS (4), /* dmul */
1037 COSTS_N_INSNS (18), /* sdiv */
1038 COSTS_N_INSNS (33), /* ddiv */
1039 32,
1040 16, /* l1 cache */
1041 16, /* l2 cache */
1042 1, /* prefetch streams /*/
1043 0, /* SF->DF convert */
1044 };
1045
1046 /* Instruction costs on PPCE500MC processors. */
1047 static const
1048 struct processor_costs ppce500mc_cost = {
1049 COSTS_N_INSNS (4), /* mulsi */
1050 COSTS_N_INSNS (4), /* mulsi_const */
1051 COSTS_N_INSNS (4), /* mulsi_const9 */
1052 COSTS_N_INSNS (4), /* muldi */
1053 COSTS_N_INSNS (14), /* divsi */
1054 COSTS_N_INSNS (14), /* divdi */
1055 COSTS_N_INSNS (8), /* fp */
1056 COSTS_N_INSNS (10), /* dmul */
1057 COSTS_N_INSNS (36), /* sdiv */
1058 COSTS_N_INSNS (66), /* ddiv */
1059 64, /* cache line size */
1060 32, /* l1 cache */
1061 128, /* l2 cache */
1062 1, /* prefetch streams /*/
1063 0, /* SF->DF convert */
1064 };
1065
1066 /* Instruction costs on PPCE500MC64 processors. */
1067 static const
1068 struct processor_costs ppce500mc64_cost = {
1069 COSTS_N_INSNS (4), /* mulsi */
1070 COSTS_N_INSNS (4), /* mulsi_const */
1071 COSTS_N_INSNS (4), /* mulsi_const9 */
1072 COSTS_N_INSNS (4), /* muldi */
1073 COSTS_N_INSNS (14), /* divsi */
1074 COSTS_N_INSNS (14), /* divdi */
1075 COSTS_N_INSNS (4), /* fp */
1076 COSTS_N_INSNS (10), /* dmul */
1077 COSTS_N_INSNS (36), /* sdiv */
1078 COSTS_N_INSNS (66), /* ddiv */
1079 64, /* cache line size */
1080 32, /* l1 cache */
1081 128, /* l2 cache */
1082 1, /* prefetch streams /*/
1083 0, /* SF->DF convert */
1084 };
1085
1086 /* Instruction costs on PPCE5500 processors. */
1087 static const
1088 struct processor_costs ppce5500_cost = {
1089 COSTS_N_INSNS (5), /* mulsi */
1090 COSTS_N_INSNS (5), /* mulsi_const */
1091 COSTS_N_INSNS (4), /* mulsi_const9 */
1092 COSTS_N_INSNS (5), /* muldi */
1093 COSTS_N_INSNS (14), /* divsi */
1094 COSTS_N_INSNS (14), /* divdi */
1095 COSTS_N_INSNS (7), /* fp */
1096 COSTS_N_INSNS (10), /* dmul */
1097 COSTS_N_INSNS (36), /* sdiv */
1098 COSTS_N_INSNS (66), /* ddiv */
1099 64, /* cache line size */
1100 32, /* l1 cache */
1101 128, /* l2 cache */
1102 1, /* prefetch streams /*/
1103 0, /* SF->DF convert */
1104 };
1105
1106 /* Instruction costs on PPCE6500 processors. */
1107 static const
1108 struct processor_costs ppce6500_cost = {
1109 COSTS_N_INSNS (5), /* mulsi */
1110 COSTS_N_INSNS (5), /* mulsi_const */
1111 COSTS_N_INSNS (4), /* mulsi_const9 */
1112 COSTS_N_INSNS (5), /* muldi */
1113 COSTS_N_INSNS (14), /* divsi */
1114 COSTS_N_INSNS (14), /* divdi */
1115 COSTS_N_INSNS (7), /* fp */
1116 COSTS_N_INSNS (10), /* dmul */
1117 COSTS_N_INSNS (36), /* sdiv */
1118 COSTS_N_INSNS (66), /* ddiv */
1119 64, /* cache line size */
1120 32, /* l1 cache */
1121 128, /* l2 cache */
1122 1, /* prefetch streams /*/
1123 0, /* SF->DF convert */
1124 };
1125
1126 /* Instruction costs on AppliedMicro Titan processors. */
1127 static const
1128 struct processor_costs titan_cost = {
1129 COSTS_N_INSNS (5), /* mulsi */
1130 COSTS_N_INSNS (5), /* mulsi_const */
1131 COSTS_N_INSNS (5), /* mulsi_const9 */
1132 COSTS_N_INSNS (5), /* muldi */
1133 COSTS_N_INSNS (18), /* divsi */
1134 COSTS_N_INSNS (18), /* divdi */
1135 COSTS_N_INSNS (10), /* fp */
1136 COSTS_N_INSNS (10), /* dmul */
1137 COSTS_N_INSNS (46), /* sdiv */
1138 COSTS_N_INSNS (72), /* ddiv */
1139 32, /* cache line size */
1140 32, /* l1 cache */
1141 512, /* l2 cache */
1142 1, /* prefetch streams /*/
1143 0, /* SF->DF convert */
1144 };
1145
1146 /* Instruction costs on POWER4 and POWER5 processors. */
1147 static const
1148 struct processor_costs power4_cost = {
1149 COSTS_N_INSNS (3), /* mulsi */
1150 COSTS_N_INSNS (2), /* mulsi_const */
1151 COSTS_N_INSNS (2), /* mulsi_const9 */
1152 COSTS_N_INSNS (4), /* muldi */
1153 COSTS_N_INSNS (18), /* divsi */
1154 COSTS_N_INSNS (34), /* divdi */
1155 COSTS_N_INSNS (3), /* fp */
1156 COSTS_N_INSNS (3), /* dmul */
1157 COSTS_N_INSNS (17), /* sdiv */
1158 COSTS_N_INSNS (17), /* ddiv */
1159 128, /* cache line size */
1160 32, /* l1 cache */
1161 1024, /* l2 cache */
1162 8, /* prefetch streams /*/
1163 0, /* SF->DF convert */
1164 };
1165
1166 /* Instruction costs on POWER6 processors. */
1167 static const
1168 struct processor_costs power6_cost = {
1169 COSTS_N_INSNS (8), /* mulsi */
1170 COSTS_N_INSNS (8), /* mulsi_const */
1171 COSTS_N_INSNS (8), /* mulsi_const9 */
1172 COSTS_N_INSNS (8), /* muldi */
1173 COSTS_N_INSNS (22), /* divsi */
1174 COSTS_N_INSNS (28), /* divdi */
1175 COSTS_N_INSNS (3), /* fp */
1176 COSTS_N_INSNS (3), /* dmul */
1177 COSTS_N_INSNS (13), /* sdiv */
1178 COSTS_N_INSNS (16), /* ddiv */
1179 128, /* cache line size */
1180 64, /* l1 cache */
1181 2048, /* l2 cache */
1182 16, /* prefetch streams */
1183 0, /* SF->DF convert */
1184 };
1185
1186 /* Instruction costs on POWER7 processors. */
1187 static const
1188 struct processor_costs power7_cost = {
1189 COSTS_N_INSNS (2), /* mulsi */
1190 COSTS_N_INSNS (2), /* mulsi_const */
1191 COSTS_N_INSNS (2), /* mulsi_const9 */
1192 COSTS_N_INSNS (2), /* muldi */
1193 COSTS_N_INSNS (18), /* divsi */
1194 COSTS_N_INSNS (34), /* divdi */
1195 COSTS_N_INSNS (3), /* fp */
1196 COSTS_N_INSNS (3), /* dmul */
1197 COSTS_N_INSNS (13), /* sdiv */
1198 COSTS_N_INSNS (16), /* ddiv */
1199 128, /* cache line size */
1200 32, /* l1 cache */
1201 256, /* l2 cache */
1202 12, /* prefetch streams */
1203 COSTS_N_INSNS (3), /* SF->DF convert */
1204 };
1205
1206 /* Instruction costs on POWER8 processors. */
1207 static const
1208 struct processor_costs power8_cost = {
1209 COSTS_N_INSNS (3), /* mulsi */
1210 COSTS_N_INSNS (3), /* mulsi_const */
1211 COSTS_N_INSNS (3), /* mulsi_const9 */
1212 COSTS_N_INSNS (3), /* muldi */
1213 COSTS_N_INSNS (19), /* divsi */
1214 COSTS_N_INSNS (35), /* divdi */
1215 COSTS_N_INSNS (3), /* fp */
1216 COSTS_N_INSNS (3), /* dmul */
1217 COSTS_N_INSNS (14), /* sdiv */
1218 COSTS_N_INSNS (17), /* ddiv */
1219 128, /* cache line size */
1220 32, /* l1 cache */
1221 256, /* l2 cache */
1222 12, /* prefetch streams */
1223 COSTS_N_INSNS (3), /* SF->DF convert */
1224 };
1225
1226 /* Instruction costs on POWER9 processors. */
1227 static const
1228 struct processor_costs power9_cost = {
1229 COSTS_N_INSNS (3), /* mulsi */
1230 COSTS_N_INSNS (3), /* mulsi_const */
1231 COSTS_N_INSNS (3), /* mulsi_const9 */
1232 COSTS_N_INSNS (3), /* muldi */
1233 COSTS_N_INSNS (8), /* divsi */
1234 COSTS_N_INSNS (12), /* divdi */
1235 COSTS_N_INSNS (3), /* fp */
1236 COSTS_N_INSNS (3), /* dmul */
1237 COSTS_N_INSNS (13), /* sdiv */
1238 COSTS_N_INSNS (18), /* ddiv */
1239 128, /* cache line size */
1240 32, /* l1 cache */
1241 512, /* l2 cache */
1242 8, /* prefetch streams */
1243 COSTS_N_INSNS (3), /* SF->DF convert */
1244 };
1245
1246 /* Instruction costs on POWER A2 processors. */
1247 static const
1248 struct processor_costs ppca2_cost = {
1249 COSTS_N_INSNS (16), /* mulsi */
1250 COSTS_N_INSNS (16), /* mulsi_const */
1251 COSTS_N_INSNS (16), /* mulsi_const9 */
1252 COSTS_N_INSNS (16), /* muldi */
1253 COSTS_N_INSNS (22), /* divsi */
1254 COSTS_N_INSNS (28), /* divdi */
1255 COSTS_N_INSNS (3), /* fp */
1256 COSTS_N_INSNS (3), /* dmul */
1257 COSTS_N_INSNS (59), /* sdiv */
1258 COSTS_N_INSNS (72), /* ddiv */
1259 64,
1260 16, /* l1 cache */
1261 2048, /* l2 cache */
1262 16, /* prefetch streams */
1263 0, /* SF->DF convert */
1264 };
1265
1266 \f
1267 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1268 #undef RS6000_BUILTIN_0
1269 #undef RS6000_BUILTIN_1
1270 #undef RS6000_BUILTIN_2
1271 #undef RS6000_BUILTIN_3
1272 #undef RS6000_BUILTIN_A
1273 #undef RS6000_BUILTIN_D
1274 #undef RS6000_BUILTIN_H
1275 #undef RS6000_BUILTIN_P
1276 #undef RS6000_BUILTIN_X
1277
1278 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1279 { NAME, ICODE, MASK, ATTR },
1280
1281 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1282 { NAME, ICODE, MASK, ATTR },
1283
1284 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1285 { NAME, ICODE, MASK, ATTR },
1286
1287 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1288 { NAME, ICODE, MASK, ATTR },
1289
1290 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1291 { NAME, ICODE, MASK, ATTR },
1292
1293 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1294 { NAME, ICODE, MASK, ATTR },
1295
1296 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1297 { NAME, ICODE, MASK, ATTR },
1298
1299 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1300 { NAME, ICODE, MASK, ATTR },
1301
1302 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1303 { NAME, ICODE, MASK, ATTR },
1304
1305 struct rs6000_builtin_info_type {
1306 const char *name;
1307 const enum insn_code icode;
1308 const HOST_WIDE_INT mask;
1309 const unsigned attr;
1310 };
1311
1312 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1313 {
1314 #include "rs6000-builtin.def"
1315 };
1316
1317 #undef RS6000_BUILTIN_0
1318 #undef RS6000_BUILTIN_1
1319 #undef RS6000_BUILTIN_2
1320 #undef RS6000_BUILTIN_3
1321 #undef RS6000_BUILTIN_A
1322 #undef RS6000_BUILTIN_D
1323 #undef RS6000_BUILTIN_H
1324 #undef RS6000_BUILTIN_P
1325 #undef RS6000_BUILTIN_X
1326
1327 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1328 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1329
1330 \f
1331 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1332 static struct machine_function * rs6000_init_machine_status (void);
1333 static int rs6000_ra_ever_killed (void);
1334 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1336 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1337 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1338 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1339 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1340 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1341 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1342 bool);
1343 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1344 unsigned int);
1345 static bool is_microcoded_insn (rtx_insn *);
1346 static bool is_nonpipeline_insn (rtx_insn *);
1347 static bool is_cracked_insn (rtx_insn *);
1348 static bool is_load_insn (rtx, rtx *);
1349 static bool is_store_insn (rtx, rtx *);
1350 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1351 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1352 static bool insn_must_be_first_in_group (rtx_insn *);
1353 static bool insn_must_be_last_in_group (rtx_insn *);
1354 static void altivec_init_builtins (void);
1355 static tree builtin_function_type (machine_mode, machine_mode,
1356 machine_mode, machine_mode,
1357 enum rs6000_builtins, const char *name);
1358 static void rs6000_common_init_builtins (void);
1359 static void htm_init_builtins (void);
1360 static rs6000_stack_t *rs6000_stack_info (void);
1361 static void is_altivec_return_reg (rtx, void *);
1362 int easy_vector_constant (rtx, machine_mode);
1363 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1364 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1365 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1366 bool, bool);
1367 #if TARGET_MACHO
1368 static void macho_branch_islands (void);
1369 static tree get_prev_label (tree);
1370 #endif
1371 static bool rs6000_mode_dependent_address (const_rtx);
1372 static bool rs6000_debug_mode_dependent_address (const_rtx);
1373 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1374 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1375 machine_mode, rtx);
1376 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1377 machine_mode,
1378 rtx);
1379 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1380 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1381 enum reg_class);
1382 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1383 reg_class_t,
1384 reg_class_t);
1385 static bool rs6000_debug_can_change_mode_class (machine_mode,
1386 machine_mode,
1387 reg_class_t);
1388 static bool rs6000_save_toc_in_prologue_p (void);
1389 static rtx rs6000_internal_arg_pointer (void);
1390
1391 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1392 = rs6000_mode_dependent_address;
1393
1394 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1395 machine_mode, rtx)
1396 = rs6000_secondary_reload_class;
1397
1398 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1399 = rs6000_preferred_reload_class;
1400
1401 const int INSN_NOT_AVAILABLE = -1;
1402
1403 static void rs6000_print_isa_options (FILE *, int, const char *,
1404 HOST_WIDE_INT);
1405 static void rs6000_print_builtin_options (FILE *, int, const char *,
1406 HOST_WIDE_INT);
1407 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1408
1409 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1410 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1411 enum rs6000_reg_type,
1412 machine_mode,
1413 secondary_reload_info *,
1414 bool);
1415 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1416 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1417 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1418
1419 /* Hash table stuff for keeping track of TOC entries. */
1420
1421 struct GTY((for_user)) toc_hash_struct
1422 {
1423 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1424 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1425 rtx key;
1426 machine_mode key_mode;
1427 int labelno;
1428 };
1429
1430 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1431 {
1432 static hashval_t hash (toc_hash_struct *);
1433 static bool equal (toc_hash_struct *, toc_hash_struct *);
1434 };
1435
1436 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1437
1438 /* Hash table to keep track of the argument types for builtin functions. */
1439
1440 struct GTY((for_user)) builtin_hash_struct
1441 {
1442 tree type;
1443 machine_mode mode[4]; /* return value + 3 arguments. */
1444 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1445 };
1446
1447 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1448 {
1449 static hashval_t hash (builtin_hash_struct *);
1450 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1451 };
1452
1453 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1454
1455 \f
1456 /* Default register names. */
1457 char rs6000_reg_names[][8] =
1458 {
1459 /* GPRs */
1460 "0", "1", "2", "3", "4", "5", "6", "7",
1461 "8", "9", "10", "11", "12", "13", "14", "15",
1462 "16", "17", "18", "19", "20", "21", "22", "23",
1463 "24", "25", "26", "27", "28", "29", "30", "31",
1464 /* FPRs */
1465 "0", "1", "2", "3", "4", "5", "6", "7",
1466 "8", "9", "10", "11", "12", "13", "14", "15",
1467 "16", "17", "18", "19", "20", "21", "22", "23",
1468 "24", "25", "26", "27", "28", "29", "30", "31",
1469 /* VRs */
1470 "0", "1", "2", "3", "4", "5", "6", "7",
1471 "8", "9", "10", "11", "12", "13", "14", "15",
1472 "16", "17", "18", "19", "20", "21", "22", "23",
1473 "24", "25", "26", "27", "28", "29", "30", "31",
1474 /* lr ctr ca ap */
1475 "lr", "ctr", "ca", "ap",
1476 /* cr0..cr7 */
1477 "0", "1", "2", "3", "4", "5", "6", "7",
1478 /* vrsave vscr sfp */
1479 "vrsave", "vscr", "sfp",
1480 };
1481
1482 #ifdef TARGET_REGNAMES
1483 static const char alt_reg_names[][8] =
1484 {
1485 /* GPRs */
1486 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1487 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1488 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1489 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1490 /* FPRs */
1491 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1492 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1493 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1494 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1495 /* VRs */
1496 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1497 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1498 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1499 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1500 /* lr ctr ca ap */
1501 "lr", "ctr", "ca", "ap",
1502 /* cr0..cr7 */
1503 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1504 /* vrsave vscr sfp */
1505 "vrsave", "vscr", "sfp",
1506 };
1507 #endif
1508
1509 /* Table of valid machine attributes. */
1510
1511 static const struct attribute_spec rs6000_attribute_table[] =
1512 {
1513 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1514 affects_type_identity, handler, exclude } */
1515 { "altivec", 1, 1, false, true, false, false,
1516 rs6000_handle_altivec_attribute, NULL },
1517 { "longcall", 0, 0, false, true, true, false,
1518 rs6000_handle_longcall_attribute, NULL },
1519 { "shortcall", 0, 0, false, true, true, false,
1520 rs6000_handle_longcall_attribute, NULL },
1521 { "ms_struct", 0, 0, false, false, false, false,
1522 rs6000_handle_struct_attribute, NULL },
1523 { "gcc_struct", 0, 0, false, false, false, false,
1524 rs6000_handle_struct_attribute, NULL },
1525 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1526 SUBTARGET_ATTRIBUTE_TABLE,
1527 #endif
1528 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1529 };
1530 \f
1531 #ifndef TARGET_PROFILE_KERNEL
1532 #define TARGET_PROFILE_KERNEL 0
1533 #endif
1534
1535 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1536 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1537 \f
1538 /* Initialize the GCC target structure. */
1539 #undef TARGET_ATTRIBUTE_TABLE
1540 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1541 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1542 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1543 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1544 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1545
1546 #undef TARGET_ASM_ALIGNED_DI_OP
1547 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1548
1549 /* Default unaligned ops are only provided for ELF. Find the ops needed
1550 for non-ELF systems. */
1551 #ifndef OBJECT_FORMAT_ELF
1552 #if TARGET_XCOFF
1553 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1554 64-bit targets. */
1555 #undef TARGET_ASM_UNALIGNED_HI_OP
1556 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1557 #undef TARGET_ASM_UNALIGNED_SI_OP
1558 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1559 #undef TARGET_ASM_UNALIGNED_DI_OP
1560 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1561 #else
1562 /* For Darwin. */
1563 #undef TARGET_ASM_UNALIGNED_HI_OP
1564 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1565 #undef TARGET_ASM_UNALIGNED_SI_OP
1566 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1567 #undef TARGET_ASM_UNALIGNED_DI_OP
1568 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1569 #undef TARGET_ASM_ALIGNED_DI_OP
1570 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1571 #endif
1572 #endif
1573
1574 /* This hook deals with fixups for relocatable code and DI-mode objects
1575 in 64-bit code. */
1576 #undef TARGET_ASM_INTEGER
1577 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1578
1579 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1580 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1581 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1582 #endif
1583
1584 #undef TARGET_SET_UP_BY_PROLOGUE
1585 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1586
1587 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1588 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1589 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1590 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1591 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1592 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1593 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1594 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1595 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1596 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1597 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1598 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1599
1600 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1601 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1602
1603 #undef TARGET_INTERNAL_ARG_POINTER
1604 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1605
1606 #undef TARGET_HAVE_TLS
1607 #define TARGET_HAVE_TLS HAVE_AS_TLS
1608
1609 #undef TARGET_CANNOT_FORCE_CONST_MEM
1610 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1611
1612 #undef TARGET_DELEGITIMIZE_ADDRESS
1613 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1614
1615 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1616 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1617
1618 #undef TARGET_LEGITIMATE_COMBINED_INSN
1619 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1620
1621 #undef TARGET_ASM_FUNCTION_PROLOGUE
1622 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1623 #undef TARGET_ASM_FUNCTION_EPILOGUE
1624 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1625
1626 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1627 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1628
1629 #undef TARGET_LEGITIMIZE_ADDRESS
1630 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1631
1632 #undef TARGET_SCHED_VARIABLE_ISSUE
1633 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1634
1635 #undef TARGET_SCHED_ISSUE_RATE
1636 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1637 #undef TARGET_SCHED_ADJUST_COST
1638 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1639 #undef TARGET_SCHED_ADJUST_PRIORITY
1640 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1641 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1642 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1643 #undef TARGET_SCHED_INIT
1644 #define TARGET_SCHED_INIT rs6000_sched_init
1645 #undef TARGET_SCHED_FINISH
1646 #define TARGET_SCHED_FINISH rs6000_sched_finish
1647 #undef TARGET_SCHED_REORDER
1648 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1649 #undef TARGET_SCHED_REORDER2
1650 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1651
1652 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1653 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1654
1655 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1656 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1657
1658 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1659 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1660 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1661 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1662 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1663 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1664 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1665 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1666
1667 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1668 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1669
1670 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1671 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1672 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1673 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1674 rs6000_builtin_support_vector_misalignment
1675 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1676 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1677 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1678 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1679 rs6000_builtin_vectorization_cost
1680 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1681 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1682 rs6000_preferred_simd_mode
1683 #undef TARGET_VECTORIZE_INIT_COST
1684 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1685 #undef TARGET_VECTORIZE_ADD_STMT_COST
1686 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1687 #undef TARGET_VECTORIZE_FINISH_COST
1688 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1689 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1690 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1691
1692 #undef TARGET_INIT_BUILTINS
1693 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1694 #undef TARGET_BUILTIN_DECL
1695 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1696
1697 #undef TARGET_FOLD_BUILTIN
1698 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1699 #undef TARGET_GIMPLE_FOLD_BUILTIN
1700 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1701
1702 #undef TARGET_EXPAND_BUILTIN
1703 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1704
1705 #undef TARGET_MANGLE_TYPE
1706 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1707
1708 #undef TARGET_INIT_LIBFUNCS
1709 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1710
1711 #if TARGET_MACHO
1712 #undef TARGET_BINDS_LOCAL_P
1713 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1714 #endif
1715
1716 #undef TARGET_MS_BITFIELD_LAYOUT_P
1717 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1718
1719 #undef TARGET_ASM_OUTPUT_MI_THUNK
1720 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1721
1722 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1723 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1724
1725 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1726 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1727
1728 #undef TARGET_REGISTER_MOVE_COST
1729 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1730 #undef TARGET_MEMORY_MOVE_COST
1731 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1732 #undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS
1733 #define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS \
1734 rs6000_ira_change_pseudo_allocno_class
1735 #undef TARGET_CANNOT_COPY_INSN_P
1736 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1737 #undef TARGET_RTX_COSTS
1738 #define TARGET_RTX_COSTS rs6000_rtx_costs
1739 #undef TARGET_ADDRESS_COST
1740 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1741 #undef TARGET_INSN_COST
1742 #define TARGET_INSN_COST rs6000_insn_cost
1743
1744 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1745 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1746
1747 #undef TARGET_PROMOTE_FUNCTION_MODE
1748 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1749
1750 #undef TARGET_RETURN_IN_MEMORY
1751 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1752
1753 #undef TARGET_RETURN_IN_MSB
1754 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1755
1756 #undef TARGET_SETUP_INCOMING_VARARGS
1757 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1758
1759 /* Always strict argument naming on rs6000. */
1760 #undef TARGET_STRICT_ARGUMENT_NAMING
1761 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1762 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1763 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1764 #undef TARGET_SPLIT_COMPLEX_ARG
1765 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1766 #undef TARGET_MUST_PASS_IN_STACK
1767 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1768 #undef TARGET_PASS_BY_REFERENCE
1769 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1770 #undef TARGET_ARG_PARTIAL_BYTES
1771 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1772 #undef TARGET_FUNCTION_ARG_ADVANCE
1773 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1774 #undef TARGET_FUNCTION_ARG
1775 #define TARGET_FUNCTION_ARG rs6000_function_arg
1776 #undef TARGET_FUNCTION_ARG_PADDING
1777 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1778 #undef TARGET_FUNCTION_ARG_BOUNDARY
1779 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1780
1781 #undef TARGET_BUILD_BUILTIN_VA_LIST
1782 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1783
1784 #undef TARGET_EXPAND_BUILTIN_VA_START
1785 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1786
1787 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1788 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1789
1790 #undef TARGET_EH_RETURN_FILTER_MODE
1791 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1792
1793 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1794 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1795
1796 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1797 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1798
1799 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1800 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1801
1802 #undef TARGET_FLOATN_MODE
1803 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1804
1805 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1806 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1807
1808 #undef TARGET_MD_ASM_ADJUST
1809 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1810
1811 #undef TARGET_OPTION_OVERRIDE
1812 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1813
1814 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1815 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1816 rs6000_builtin_vectorized_function
1817
1818 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1819 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1820 rs6000_builtin_md_vectorized_function
1821
1822 #undef TARGET_STACK_PROTECT_GUARD
1823 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1824
1825 #if !TARGET_MACHO
1826 #undef TARGET_STACK_PROTECT_FAIL
1827 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1828 #endif
1829
1830 #ifdef HAVE_AS_TLS
1831 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1832 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1833 #endif
1834
1835 /* Use a 32-bit anchor range. This leads to sequences like:
1836
1837 addis tmp,anchor,high
1838 add dest,tmp,low
1839
1840 where tmp itself acts as an anchor, and can be shared between
1841 accesses to the same 64k page. */
1842 #undef TARGET_MIN_ANCHOR_OFFSET
1843 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1844 #undef TARGET_MAX_ANCHOR_OFFSET
1845 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1846 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1847 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1848 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1849 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1850
1851 #undef TARGET_BUILTIN_RECIPROCAL
1852 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1853
1854 #undef TARGET_SECONDARY_RELOAD
1855 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1856 #undef TARGET_SECONDARY_MEMORY_NEEDED
1857 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1858 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1859 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1860
1861 #undef TARGET_LEGITIMATE_ADDRESS_P
1862 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1863
1864 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1865 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1866
1867 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1868 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1869
1870 #undef TARGET_CAN_ELIMINATE
1871 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1872
1873 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1874 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1875
1876 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1877 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1878
1879 #undef TARGET_TRAMPOLINE_INIT
1880 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1881
1882 #undef TARGET_FUNCTION_VALUE
1883 #define TARGET_FUNCTION_VALUE rs6000_function_value
1884
1885 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1886 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1887
1888 #undef TARGET_OPTION_SAVE
1889 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1890
1891 #undef TARGET_OPTION_RESTORE
1892 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1893
1894 #undef TARGET_OPTION_PRINT
1895 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1896
1897 #undef TARGET_CAN_INLINE_P
1898 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1899
1900 #undef TARGET_SET_CURRENT_FUNCTION
1901 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1902
1903 #undef TARGET_LEGITIMATE_CONSTANT_P
1904 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1905
1906 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1907 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1908
1909 #undef TARGET_CAN_USE_DOLOOP_P
1910 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1911
1912 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1913 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1914
1915 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1916 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1917 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1918 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1919 #undef TARGET_UNWIND_WORD_MODE
1920 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1921
1922 #undef TARGET_OFFLOAD_OPTIONS
1923 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1924
1925 #undef TARGET_C_MODE_FOR_SUFFIX
1926 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1927
1928 #undef TARGET_INVALID_BINARY_OP
1929 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1930
1931 #undef TARGET_OPTAB_SUPPORTED_P
1932 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1933
1934 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1935 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1936
1937 #undef TARGET_COMPARE_VERSION_PRIORITY
1938 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1939
1940 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1941 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1942 rs6000_generate_version_dispatcher_body
1943
1944 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1945 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1946 rs6000_get_function_versions_dispatcher
1947
1948 #undef TARGET_OPTION_FUNCTION_VERSIONS
1949 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1950
1951 #undef TARGET_HARD_REGNO_NREGS
1952 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1953 #undef TARGET_HARD_REGNO_MODE_OK
1954 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1955
1956 #undef TARGET_MODES_TIEABLE_P
1957 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1958
1959 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1960 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1961 rs6000_hard_regno_call_part_clobbered
1962
1963 #undef TARGET_SLOW_UNALIGNED_ACCESS
1964 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1965
1966 #undef TARGET_CAN_CHANGE_MODE_CLASS
1967 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1968
1969 #undef TARGET_CONSTANT_ALIGNMENT
1970 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1971
1972 #undef TARGET_STARTING_FRAME_OFFSET
1973 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1974
1975 #if TARGET_ELF && RS6000_WEAK
1976 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1977 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1978 #endif
1979
1980 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1981 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1982
1983 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1984 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1985 \f
1986
1987 /* Processor table. */
1988 struct rs6000_ptt
1989 {
1990 const char *const name; /* Canonical processor name. */
1991 const enum processor_type processor; /* Processor type enum value. */
1992 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1993 };
1994
1995 static struct rs6000_ptt const processor_target_table[] =
1996 {
1997 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1998 #include "rs6000-cpus.def"
1999 #undef RS6000_CPU
2000 };
2001
2002 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2003 name is invalid. */
2004
2005 static int
2006 rs6000_cpu_name_lookup (const char *name)
2007 {
2008 size_t i;
2009
2010 if (name != NULL)
2011 {
2012 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2013 if (! strcmp (name, processor_target_table[i].name))
2014 return (int)i;
2015 }
2016
2017 return -1;
2018 }
2019
2020 \f
2021 /* Return number of consecutive hard regs needed starting at reg REGNO
2022 to hold something of mode MODE.
2023 This is ordinarily the length in words of a value of mode MODE
2024 but can be less for certain modes in special long registers.
2025
2026 POWER and PowerPC GPRs hold 32 bits worth;
2027 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2028
2029 static int
2030 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2031 {
2032 unsigned HOST_WIDE_INT reg_size;
2033
2034 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2035 128-bit floating point that can go in vector registers, which has VSX
2036 memory addressing. */
2037 if (FP_REGNO_P (regno))
2038 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2039 ? UNITS_PER_VSX_WORD
2040 : UNITS_PER_FP_WORD);
2041
2042 else if (ALTIVEC_REGNO_P (regno))
2043 reg_size = UNITS_PER_ALTIVEC_WORD;
2044
2045 else
2046 reg_size = UNITS_PER_WORD;
2047
2048 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2049 }
2050
2051 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2052 MODE. */
2053 static int
2054 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2055 {
2056 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2057
2058 if (COMPLEX_MODE_P (mode))
2059 mode = GET_MODE_INNER (mode);
2060
2061 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2062 register combinations, and use PTImode where we need to deal with quad
2063 word memory operations. Don't allow quad words in the argument or frame
2064 pointer registers, just registers 0..31. */
2065 if (mode == PTImode)
2066 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2067 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2068 && ((regno & 1) == 0));
2069
2070 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2071 implementations. Don't allow an item to be split between a FP register
2072 and an Altivec register. Allow TImode in all VSX registers if the user
2073 asked for it. */
2074 if (TARGET_VSX && VSX_REGNO_P (regno)
2075 && (VECTOR_MEM_VSX_P (mode)
2076 || FLOAT128_VECTOR_P (mode)
2077 || reg_addr[mode].scalar_in_vmx_p
2078 || mode == TImode
2079 || (TARGET_VADDUQM && mode == V1TImode)))
2080 {
2081 if (FP_REGNO_P (regno))
2082 return FP_REGNO_P (last_regno);
2083
2084 if (ALTIVEC_REGNO_P (regno))
2085 {
2086 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2087 return 0;
2088
2089 return ALTIVEC_REGNO_P (last_regno);
2090 }
2091 }
2092
2093 /* The GPRs can hold any mode, but values bigger than one register
2094 cannot go past R31. */
2095 if (INT_REGNO_P (regno))
2096 return INT_REGNO_P (last_regno);
2097
2098 /* The float registers (except for VSX vector modes) can only hold floating
2099 modes and DImode. */
2100 if (FP_REGNO_P (regno))
2101 {
2102 if (FLOAT128_VECTOR_P (mode))
2103 return false;
2104
2105 if (SCALAR_FLOAT_MODE_P (mode)
2106 && (mode != TDmode || (regno % 2) == 0)
2107 && FP_REGNO_P (last_regno))
2108 return 1;
2109
2110 if (GET_MODE_CLASS (mode) == MODE_INT)
2111 {
2112 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2113 return 1;
2114
2115 if (TARGET_P8_VECTOR && (mode == SImode))
2116 return 1;
2117
2118 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2119 return 1;
2120 }
2121
2122 return 0;
2123 }
2124
2125 /* The CR register can only hold CC modes. */
2126 if (CR_REGNO_P (regno))
2127 return GET_MODE_CLASS (mode) == MODE_CC;
2128
2129 if (CA_REGNO_P (regno))
2130 return mode == Pmode || mode == SImode;
2131
2132 /* AltiVec only in AldyVec registers. */
2133 if (ALTIVEC_REGNO_P (regno))
2134 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2135 || mode == V1TImode);
2136
2137 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2138 and it must be able to fit within the register set. */
2139
2140 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2141 }
2142
2143 /* Implement TARGET_HARD_REGNO_NREGS. */
2144
2145 static unsigned int
2146 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2147 {
2148 return rs6000_hard_regno_nregs[mode][regno];
2149 }
2150
2151 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2152
2153 static bool
2154 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2155 {
2156 return rs6000_hard_regno_mode_ok_p[mode][regno];
2157 }
2158
2159 /* Implement TARGET_MODES_TIEABLE_P.
2160
2161 PTImode cannot tie with other modes because PTImode is restricted to even
2162 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2163 57744).
2164
2165 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2166 128-bit floating point on VSX systems ties with other vectors. */
2167
2168 static bool
2169 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2170 {
2171 if (mode1 == PTImode)
2172 return mode2 == PTImode;
2173 if (mode2 == PTImode)
2174 return false;
2175
2176 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2177 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2178 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2179 return false;
2180
2181 if (SCALAR_FLOAT_MODE_P (mode1))
2182 return SCALAR_FLOAT_MODE_P (mode2);
2183 if (SCALAR_FLOAT_MODE_P (mode2))
2184 return false;
2185
2186 if (GET_MODE_CLASS (mode1) == MODE_CC)
2187 return GET_MODE_CLASS (mode2) == MODE_CC;
2188 if (GET_MODE_CLASS (mode2) == MODE_CC)
2189 return false;
2190
2191 return true;
2192 }
2193
2194 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2195
2196 static bool
2197 rs6000_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
2198 unsigned int regno, machine_mode mode)
2199 {
2200 if (TARGET_32BIT
2201 && TARGET_POWERPC64
2202 && GET_MODE_SIZE (mode) > 4
2203 && INT_REGNO_P (regno))
2204 return true;
2205
2206 if (TARGET_VSX
2207 && FP_REGNO_P (regno)
2208 && GET_MODE_SIZE (mode) > 8
2209 && !FLOAT128_2REG_P (mode))
2210 return true;
2211
2212 return false;
2213 }
2214
2215 /* Print interesting facts about registers. */
2216 static void
2217 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2218 {
2219 int r, m;
2220
2221 for (r = first_regno; r <= last_regno; ++r)
2222 {
2223 const char *comma = "";
2224 int len;
2225
2226 if (first_regno == last_regno)
2227 fprintf (stderr, "%s:\t", reg_name);
2228 else
2229 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2230
2231 len = 8;
2232 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2233 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2234 {
2235 if (len > 70)
2236 {
2237 fprintf (stderr, ",\n\t");
2238 len = 8;
2239 comma = "";
2240 }
2241
2242 if (rs6000_hard_regno_nregs[m][r] > 1)
2243 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2244 rs6000_hard_regno_nregs[m][r]);
2245 else
2246 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2247
2248 comma = ", ";
2249 }
2250
2251 if (call_used_regs[r])
2252 {
2253 if (len > 70)
2254 {
2255 fprintf (stderr, ",\n\t");
2256 len = 8;
2257 comma = "";
2258 }
2259
2260 len += fprintf (stderr, "%s%s", comma, "call-used");
2261 comma = ", ";
2262 }
2263
2264 if (fixed_regs[r])
2265 {
2266 if (len > 70)
2267 {
2268 fprintf (stderr, ",\n\t");
2269 len = 8;
2270 comma = "";
2271 }
2272
2273 len += fprintf (stderr, "%s%s", comma, "fixed");
2274 comma = ", ";
2275 }
2276
2277 if (len > 70)
2278 {
2279 fprintf (stderr, ",\n\t");
2280 comma = "";
2281 }
2282
2283 len += fprintf (stderr, "%sreg-class = %s", comma,
2284 reg_class_names[(int)rs6000_regno_regclass[r]]);
2285 comma = ", ";
2286
2287 if (len > 70)
2288 {
2289 fprintf (stderr, ",\n\t");
2290 comma = "";
2291 }
2292
2293 fprintf (stderr, "%sregno = %d\n", comma, r);
2294 }
2295 }
2296
2297 static const char *
2298 rs6000_debug_vector_unit (enum rs6000_vector v)
2299 {
2300 const char *ret;
2301
2302 switch (v)
2303 {
2304 case VECTOR_NONE: ret = "none"; break;
2305 case VECTOR_ALTIVEC: ret = "altivec"; break;
2306 case VECTOR_VSX: ret = "vsx"; break;
2307 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2308 default: ret = "unknown"; break;
2309 }
2310
2311 return ret;
2312 }
2313
2314 /* Inner function printing just the address mask for a particular reload
2315 register class. */
2316 DEBUG_FUNCTION char *
2317 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2318 {
2319 static char ret[8];
2320 char *p = ret;
2321
2322 if ((mask & RELOAD_REG_VALID) != 0)
2323 *p++ = 'v';
2324 else if (keep_spaces)
2325 *p++ = ' ';
2326
2327 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2328 *p++ = 'm';
2329 else if (keep_spaces)
2330 *p++ = ' ';
2331
2332 if ((mask & RELOAD_REG_INDEXED) != 0)
2333 *p++ = 'i';
2334 else if (keep_spaces)
2335 *p++ = ' ';
2336
2337 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2338 *p++ = 'O';
2339 else if ((mask & RELOAD_REG_OFFSET) != 0)
2340 *p++ = 'o';
2341 else if (keep_spaces)
2342 *p++ = ' ';
2343
2344 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2345 *p++ = '+';
2346 else if (keep_spaces)
2347 *p++ = ' ';
2348
2349 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2350 *p++ = '+';
2351 else if (keep_spaces)
2352 *p++ = ' ';
2353
2354 if ((mask & RELOAD_REG_AND_M16) != 0)
2355 *p++ = '&';
2356 else if (keep_spaces)
2357 *p++ = ' ';
2358
2359 *p = '\0';
2360
2361 return ret;
2362 }
2363
2364 /* Print the address masks in a human readble fashion. */
2365 DEBUG_FUNCTION void
2366 rs6000_debug_print_mode (ssize_t m)
2367 {
2368 ssize_t rc;
2369 int spaces = 0;
2370
2371 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2372 for (rc = 0; rc < N_RELOAD_REG; rc++)
2373 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2374 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2375
2376 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2377 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2378 {
2379 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2380 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2381 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2382 spaces = 0;
2383 }
2384 else
2385 spaces += sizeof (" Reload=sl") - 1;
2386
2387 if (reg_addr[m].scalar_in_vmx_p)
2388 {
2389 fprintf (stderr, "%*s Upper=y", spaces, "");
2390 spaces = 0;
2391 }
2392 else
2393 spaces += sizeof (" Upper=y") - 1;
2394
2395 if (rs6000_vector_unit[m] != VECTOR_NONE
2396 || rs6000_vector_mem[m] != VECTOR_NONE)
2397 {
2398 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2399 spaces, "",
2400 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2401 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2402 }
2403
2404 fputs ("\n", stderr);
2405 }
2406
2407 #define DEBUG_FMT_ID "%-32s= "
2408 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2409 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2410 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2411
2412 /* Print various interesting information with -mdebug=reg. */
2413 static void
2414 rs6000_debug_reg_global (void)
2415 {
2416 static const char *const tf[2] = { "false", "true" };
2417 const char *nl = (const char *)0;
2418 int m;
2419 size_t m1, m2, v;
2420 char costly_num[20];
2421 char nop_num[20];
2422 char flags_buffer[40];
2423 const char *costly_str;
2424 const char *nop_str;
2425 const char *trace_str;
2426 const char *abi_str;
2427 const char *cmodel_str;
2428 struct cl_target_option cl_opts;
2429
2430 /* Modes we want tieable information on. */
2431 static const machine_mode print_tieable_modes[] = {
2432 QImode,
2433 HImode,
2434 SImode,
2435 DImode,
2436 TImode,
2437 PTImode,
2438 SFmode,
2439 DFmode,
2440 TFmode,
2441 IFmode,
2442 KFmode,
2443 SDmode,
2444 DDmode,
2445 TDmode,
2446 V16QImode,
2447 V8HImode,
2448 V4SImode,
2449 V2DImode,
2450 V1TImode,
2451 V32QImode,
2452 V16HImode,
2453 V8SImode,
2454 V4DImode,
2455 V2TImode,
2456 V4SFmode,
2457 V2DFmode,
2458 V8SFmode,
2459 V4DFmode,
2460 CCmode,
2461 CCUNSmode,
2462 CCEQmode,
2463 };
2464
2465 /* Virtual regs we are interested in. */
2466 const static struct {
2467 int regno; /* register number. */
2468 const char *name; /* register name. */
2469 } virtual_regs[] = {
2470 { STACK_POINTER_REGNUM, "stack pointer:" },
2471 { TOC_REGNUM, "toc: " },
2472 { STATIC_CHAIN_REGNUM, "static chain: " },
2473 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2474 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2475 { ARG_POINTER_REGNUM, "arg pointer: " },
2476 { FRAME_POINTER_REGNUM, "frame pointer:" },
2477 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2478 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2479 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2480 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2481 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2482 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2483 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2484 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2485 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2486 };
2487
2488 fputs ("\nHard register information:\n", stderr);
2489 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2490 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2491 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2492 LAST_ALTIVEC_REGNO,
2493 "vs");
2494 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2495 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2496 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2497 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2498 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2499 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2500
2501 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2502 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2503 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2504
2505 fprintf (stderr,
2506 "\n"
2507 "d reg_class = %s\n"
2508 "f reg_class = %s\n"
2509 "v reg_class = %s\n"
2510 "wa reg_class = %s\n"
2511 "wd reg_class = %s\n"
2512 "we reg_class = %s\n"
2513 "wf reg_class = %s\n"
2514 "wg reg_class = %s\n"
2515 "wi reg_class = %s\n"
2516 "wl reg_class = %s\n"
2517 "wm reg_class = %s\n"
2518 "wp reg_class = %s\n"
2519 "wq reg_class = %s\n"
2520 "wr reg_class = %s\n"
2521 "ws reg_class = %s\n"
2522 "wt reg_class = %s\n"
2523 "wv reg_class = %s\n"
2524 "ww reg_class = %s\n"
2525 "wx reg_class = %s\n"
2526 "wz reg_class = %s\n"
2527 "wA reg_class = %s\n"
2528 "\n",
2529 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2530 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2531 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2532 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2533 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2534 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2535 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2536 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2537 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2538 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2539 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2540 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2541 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2542 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2543 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2544 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]]);
2550
2551 nl = "\n";
2552 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2553 rs6000_debug_print_mode (m);
2554
2555 fputs ("\n", stderr);
2556
2557 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2558 {
2559 machine_mode mode1 = print_tieable_modes[m1];
2560 bool first_time = true;
2561
2562 nl = (const char *)0;
2563 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2564 {
2565 machine_mode mode2 = print_tieable_modes[m2];
2566 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2567 {
2568 if (first_time)
2569 {
2570 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2571 nl = "\n";
2572 first_time = false;
2573 }
2574
2575 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2576 }
2577 }
2578
2579 if (!first_time)
2580 fputs ("\n", stderr);
2581 }
2582
2583 if (nl)
2584 fputs (nl, stderr);
2585
2586 if (rs6000_recip_control)
2587 {
2588 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2589
2590 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2591 if (rs6000_recip_bits[m])
2592 {
2593 fprintf (stderr,
2594 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2595 GET_MODE_NAME (m),
2596 (RS6000_RECIP_AUTO_RE_P (m)
2597 ? "auto"
2598 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2599 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2600 ? "auto"
2601 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2602 }
2603
2604 fputs ("\n", stderr);
2605 }
2606
2607 if (rs6000_cpu_index >= 0)
2608 {
2609 const char *name = processor_target_table[rs6000_cpu_index].name;
2610 HOST_WIDE_INT flags
2611 = processor_target_table[rs6000_cpu_index].target_enable;
2612
2613 sprintf (flags_buffer, "-mcpu=%s flags", name);
2614 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2615 }
2616 else
2617 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2618
2619 if (rs6000_tune_index >= 0)
2620 {
2621 const char *name = processor_target_table[rs6000_tune_index].name;
2622 HOST_WIDE_INT flags
2623 = processor_target_table[rs6000_tune_index].target_enable;
2624
2625 sprintf (flags_buffer, "-mtune=%s flags", name);
2626 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2627 }
2628 else
2629 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2630
2631 cl_target_option_save (&cl_opts, &global_options);
2632 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2633 rs6000_isa_flags);
2634
2635 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2636 rs6000_isa_flags_explicit);
2637
2638 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2639 rs6000_builtin_mask);
2640
2641 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2642
2643 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2644 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2645
2646 switch (rs6000_sched_costly_dep)
2647 {
2648 case max_dep_latency:
2649 costly_str = "max_dep_latency";
2650 break;
2651
2652 case no_dep_costly:
2653 costly_str = "no_dep_costly";
2654 break;
2655
2656 case all_deps_costly:
2657 costly_str = "all_deps_costly";
2658 break;
2659
2660 case true_store_to_load_dep_costly:
2661 costly_str = "true_store_to_load_dep_costly";
2662 break;
2663
2664 case store_to_load_dep_costly:
2665 costly_str = "store_to_load_dep_costly";
2666 break;
2667
2668 default:
2669 costly_str = costly_num;
2670 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2671 break;
2672 }
2673
2674 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2675
2676 switch (rs6000_sched_insert_nops)
2677 {
2678 case sched_finish_regroup_exact:
2679 nop_str = "sched_finish_regroup_exact";
2680 break;
2681
2682 case sched_finish_pad_groups:
2683 nop_str = "sched_finish_pad_groups";
2684 break;
2685
2686 case sched_finish_none:
2687 nop_str = "sched_finish_none";
2688 break;
2689
2690 default:
2691 nop_str = nop_num;
2692 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2693 break;
2694 }
2695
2696 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2697
2698 switch (rs6000_sdata)
2699 {
2700 default:
2701 case SDATA_NONE:
2702 break;
2703
2704 case SDATA_DATA:
2705 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2706 break;
2707
2708 case SDATA_SYSV:
2709 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2710 break;
2711
2712 case SDATA_EABI:
2713 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2714 break;
2715
2716 }
2717
2718 switch (rs6000_traceback)
2719 {
2720 case traceback_default: trace_str = "default"; break;
2721 case traceback_none: trace_str = "none"; break;
2722 case traceback_part: trace_str = "part"; break;
2723 case traceback_full: trace_str = "full"; break;
2724 default: trace_str = "unknown"; break;
2725 }
2726
2727 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2728
2729 switch (rs6000_current_cmodel)
2730 {
2731 case CMODEL_SMALL: cmodel_str = "small"; break;
2732 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2733 case CMODEL_LARGE: cmodel_str = "large"; break;
2734 default: cmodel_str = "unknown"; break;
2735 }
2736
2737 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2738
2739 switch (rs6000_current_abi)
2740 {
2741 case ABI_NONE: abi_str = "none"; break;
2742 case ABI_AIX: abi_str = "aix"; break;
2743 case ABI_ELFv2: abi_str = "ELFv2"; break;
2744 case ABI_V4: abi_str = "V4"; break;
2745 case ABI_DARWIN: abi_str = "darwin"; break;
2746 default: abi_str = "unknown"; break;
2747 }
2748
2749 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2750
2751 if (rs6000_altivec_abi)
2752 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2753
2754 if (rs6000_darwin64_abi)
2755 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2756
2757 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2758 (TARGET_SOFT_FLOAT ? "true" : "false"));
2759
2760 if (TARGET_LINK_STACK)
2761 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2762
2763 if (TARGET_P8_FUSION)
2764 {
2765 char options[80];
2766
2767 strcpy (options, "power8");
2768 if (TARGET_P8_FUSION_SIGN)
2769 strcat (options, ", sign");
2770
2771 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2772 }
2773
2774 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2775 TARGET_SECURE_PLT ? "secure" : "bss");
2776 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2777 aix_struct_return ? "aix" : "sysv");
2778 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2779 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2780 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2781 tf[!!rs6000_align_branch_targets]);
2782 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2783 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2784 rs6000_long_double_type_size);
2785 if (rs6000_long_double_type_size > 64)
2786 {
2787 fprintf (stderr, DEBUG_FMT_S, "long double type",
2788 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2789 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2790 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2791 }
2792 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2793 (int)rs6000_sched_restricted_insns_priority);
2794 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2795 (int)END_BUILTINS);
2796 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2797 (int)RS6000_BUILTIN_COUNT);
2798
2799 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2800 (int)TARGET_FLOAT128_ENABLE_TYPE);
2801
2802 if (TARGET_VSX)
2803 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2804 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2805
2806 if (TARGET_DIRECT_MOVE_128)
2807 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2808 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2809 }
2810
2811 \f
2812 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2813 legitimate address support to figure out the appropriate addressing to
2814 use. */
2815
2816 static void
2817 rs6000_setup_reg_addr_masks (void)
2818 {
2819 ssize_t rc, reg, m, nregs;
2820 addr_mask_type any_addr_mask, addr_mask;
2821
2822 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2823 {
2824 machine_mode m2 = (machine_mode) m;
2825 bool complex_p = false;
2826 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2827 size_t msize;
2828
2829 if (COMPLEX_MODE_P (m2))
2830 {
2831 complex_p = true;
2832 m2 = GET_MODE_INNER (m2);
2833 }
2834
2835 msize = GET_MODE_SIZE (m2);
2836
2837 /* SDmode is special in that we want to access it only via REG+REG
2838 addressing on power7 and above, since we want to use the LFIWZX and
2839 STFIWZX instructions to load it. */
2840 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2841
2842 any_addr_mask = 0;
2843 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2844 {
2845 addr_mask = 0;
2846 reg = reload_reg_map[rc].reg;
2847
2848 /* Can mode values go in the GPR/FPR/Altivec registers? */
2849 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2850 {
2851 bool small_int_vsx_p = (small_int_p
2852 && (rc == RELOAD_REG_FPR
2853 || rc == RELOAD_REG_VMX));
2854
2855 nregs = rs6000_hard_regno_nregs[m][reg];
2856 addr_mask |= RELOAD_REG_VALID;
2857
2858 /* Indicate if the mode takes more than 1 physical register. If
2859 it takes a single register, indicate it can do REG+REG
2860 addressing. Small integers in VSX registers can only do
2861 REG+REG addressing. */
2862 if (small_int_vsx_p)
2863 addr_mask |= RELOAD_REG_INDEXED;
2864 else if (nregs > 1 || m == BLKmode || complex_p)
2865 addr_mask |= RELOAD_REG_MULTIPLE;
2866 else
2867 addr_mask |= RELOAD_REG_INDEXED;
2868
2869 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2870 addressing. If we allow scalars into Altivec registers,
2871 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2872
2873 For VSX systems, we don't allow update addressing for
2874 DFmode/SFmode if those registers can go in both the
2875 traditional floating point registers and Altivec registers.
2876 The load/store instructions for the Altivec registers do not
2877 have update forms. If we allowed update addressing, it seems
2878 to break IV-OPT code using floating point if the index type is
2879 int instead of long (PR target/81550 and target/84042). */
2880
2881 if (TARGET_UPDATE
2882 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2883 && msize <= 8
2884 && !VECTOR_MODE_P (m2)
2885 && !FLOAT128_VECTOR_P (m2)
2886 && !complex_p
2887 && (m != E_DFmode || !TARGET_VSX)
2888 && (m != E_SFmode || !TARGET_P8_VECTOR)
2889 && !small_int_vsx_p)
2890 {
2891 addr_mask |= RELOAD_REG_PRE_INCDEC;
2892
2893 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2894 we don't allow PRE_MODIFY for some multi-register
2895 operations. */
2896 switch (m)
2897 {
2898 default:
2899 addr_mask |= RELOAD_REG_PRE_MODIFY;
2900 break;
2901
2902 case E_DImode:
2903 if (TARGET_POWERPC64)
2904 addr_mask |= RELOAD_REG_PRE_MODIFY;
2905 break;
2906
2907 case E_DFmode:
2908 case E_DDmode:
2909 if (TARGET_HARD_FLOAT)
2910 addr_mask |= RELOAD_REG_PRE_MODIFY;
2911 break;
2912 }
2913 }
2914 }
2915
2916 /* GPR and FPR registers can do REG+OFFSET addressing, except
2917 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2918 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2919 if ((addr_mask != 0) && !indexed_only_p
2920 && msize <= 8
2921 && (rc == RELOAD_REG_GPR
2922 || ((msize == 8 || m2 == SFmode)
2923 && (rc == RELOAD_REG_FPR
2924 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2925 addr_mask |= RELOAD_REG_OFFSET;
2926
2927 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2928 instructions are enabled. The offset for 128-bit VSX registers is
2929 only 12-bits. While GPRs can handle the full offset range, VSX
2930 registers can only handle the restricted range. */
2931 else if ((addr_mask != 0) && !indexed_only_p
2932 && msize == 16 && TARGET_P9_VECTOR
2933 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2934 || (m2 == TImode && TARGET_VSX)))
2935 {
2936 addr_mask |= RELOAD_REG_OFFSET;
2937 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2938 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2939 }
2940
2941 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2942 addressing on 128-bit types. */
2943 if (rc == RELOAD_REG_VMX && msize == 16
2944 && (addr_mask & RELOAD_REG_VALID) != 0)
2945 addr_mask |= RELOAD_REG_AND_M16;
2946
2947 reg_addr[m].addr_mask[rc] = addr_mask;
2948 any_addr_mask |= addr_mask;
2949 }
2950
2951 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2952 }
2953 }
2954
2955 \f
2956 /* Initialize the various global tables that are based on register size. */
2957 static void
2958 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2959 {
2960 ssize_t r, m, c;
2961 int align64;
2962 int align32;
2963
2964 /* Precalculate REGNO_REG_CLASS. */
2965 rs6000_regno_regclass[0] = GENERAL_REGS;
2966 for (r = 1; r < 32; ++r)
2967 rs6000_regno_regclass[r] = BASE_REGS;
2968
2969 for (r = 32; r < 64; ++r)
2970 rs6000_regno_regclass[r] = FLOAT_REGS;
2971
2972 for (r = 64; HARD_REGISTER_NUM_P (r); ++r)
2973 rs6000_regno_regclass[r] = NO_REGS;
2974
2975 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2976 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2977
2978 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2979 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2980 rs6000_regno_regclass[r] = CR_REGS;
2981
2982 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2983 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2984 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
2985 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2986 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2987 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2988 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2989
2990 /* Precalculate register class to simpler reload register class. We don't
2991 need all of the register classes that are combinations of different
2992 classes, just the simple ones that have constraint letters. */
2993 for (c = 0; c < N_REG_CLASSES; c++)
2994 reg_class_to_reg_type[c] = NO_REG_TYPE;
2995
2996 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2997 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2998 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2999 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3000 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3001 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3002 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3003 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3004 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3005 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3006
3007 if (TARGET_VSX)
3008 {
3009 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3010 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3011 }
3012 else
3013 {
3014 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3015 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3016 }
3017
3018 /* Precalculate the valid memory formats as well as the vector information,
3019 this must be set up before the rs6000_hard_regno_nregs_internal calls
3020 below. */
3021 gcc_assert ((int)VECTOR_NONE == 0);
3022 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3023 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_mem));
3024
3025 gcc_assert ((int)CODE_FOR_nothing == 0);
3026 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3027
3028 gcc_assert ((int)NO_REGS == 0);
3029 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3030
3031 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3032 believes it can use native alignment or still uses 128-bit alignment. */
3033 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3034 {
3035 align64 = 64;
3036 align32 = 32;
3037 }
3038 else
3039 {
3040 align64 = 128;
3041 align32 = 128;
3042 }
3043
3044 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3045 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3046 if (TARGET_FLOAT128_TYPE)
3047 {
3048 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3049 rs6000_vector_align[KFmode] = 128;
3050
3051 if (FLOAT128_IEEE_P (TFmode))
3052 {
3053 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3054 rs6000_vector_align[TFmode] = 128;
3055 }
3056 }
3057
3058 /* V2DF mode, VSX only. */
3059 if (TARGET_VSX)
3060 {
3061 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3062 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3063 rs6000_vector_align[V2DFmode] = align64;
3064 }
3065
3066 /* V4SF mode, either VSX or Altivec. */
3067 if (TARGET_VSX)
3068 {
3069 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3070 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3071 rs6000_vector_align[V4SFmode] = align32;
3072 }
3073 else if (TARGET_ALTIVEC)
3074 {
3075 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3076 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3077 rs6000_vector_align[V4SFmode] = align32;
3078 }
3079
3080 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3081 and stores. */
3082 if (TARGET_ALTIVEC)
3083 {
3084 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3085 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3086 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3087 rs6000_vector_align[V4SImode] = align32;
3088 rs6000_vector_align[V8HImode] = align32;
3089 rs6000_vector_align[V16QImode] = align32;
3090
3091 if (TARGET_VSX)
3092 {
3093 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3094 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3095 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3096 }
3097 else
3098 {
3099 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3100 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3101 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3102 }
3103 }
3104
3105 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3106 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3107 if (TARGET_VSX)
3108 {
3109 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3110 rs6000_vector_unit[V2DImode]
3111 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3112 rs6000_vector_align[V2DImode] = align64;
3113
3114 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3115 rs6000_vector_unit[V1TImode]
3116 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3117 rs6000_vector_align[V1TImode] = 128;
3118 }
3119
3120 /* DFmode, see if we want to use the VSX unit. Memory is handled
3121 differently, so don't set rs6000_vector_mem. */
3122 if (TARGET_VSX)
3123 {
3124 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3125 rs6000_vector_align[DFmode] = 64;
3126 }
3127
3128 /* SFmode, see if we want to use the VSX unit. */
3129 if (TARGET_P8_VECTOR)
3130 {
3131 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3132 rs6000_vector_align[SFmode] = 32;
3133 }
3134
3135 /* Allow TImode in VSX register and set the VSX memory macros. */
3136 if (TARGET_VSX)
3137 {
3138 rs6000_vector_mem[TImode] = VECTOR_VSX;
3139 rs6000_vector_align[TImode] = align64;
3140 }
3141
3142 /* Register class constraints for the constraints that depend on compile
3143 switches. When the VSX code was added, different constraints were added
3144 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3145 of the VSX registers are used. The register classes for scalar floating
3146 point types is set, based on whether we allow that type into the upper
3147 (Altivec) registers. GCC has register classes to target the Altivec
3148 registers for load/store operations, to select using a VSX memory
3149 operation instead of the traditional floating point operation. The
3150 constraints are:
3151
3152 d - Register class to use with traditional DFmode instructions.
3153 f - Register class to use with traditional SFmode instructions.
3154 v - Altivec register.
3155 wa - Any VSX register.
3156 wc - Reserved to represent individual CR bits (used in LLVM).
3157 wd - Preferred register class for V2DFmode.
3158 wf - Preferred register class for V4SFmode.
3159 wg - Float register for power6x move insns.
3160 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3161 wl - Float register if we can do 32-bit signed int loads.
3162 wm - VSX register for ISA 2.07 direct move operations.
3163 wn - always NO_REGS.
3164 wr - GPR if 64-bit mode is permitted.
3165 ws - Register class to do ISA 2.06 DF operations.
3166 wt - VSX register for TImode in VSX registers.
3167 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3168 ww - Register class to do SF conversions in with VSX operations.
3169 wx - Float register if we can do 32-bit int stores.
3170 wz - Float register if we can do 32-bit unsigned int loads. */
3171
3172 if (TARGET_HARD_FLOAT)
3173 {
3174 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3175 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3176 }
3177
3178 if (TARGET_VSX)
3179 {
3180 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3181 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3182 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3183 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3184 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3185 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3186 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3187 }
3188
3189 /* Add conditional constraints based on various options, to allow us to
3190 collapse multiple insn patterns. */
3191 if (TARGET_ALTIVEC)
3192 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3193
3194 if (TARGET_MFPGPR) /* DFmode */
3195 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3196
3197 if (TARGET_LFIWAX)
3198 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3199
3200 if (TARGET_DIRECT_MOVE)
3201 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3202
3203 if (TARGET_POWERPC64)
3204 {
3205 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3206 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3207 }
3208
3209 if (TARGET_P8_VECTOR) /* SFmode */
3210 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3211 else if (TARGET_VSX)
3212 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3213
3214 if (TARGET_STFIWX)
3215 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3216
3217 if (TARGET_LFIWZX)
3218 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3219
3220 if (TARGET_FLOAT128_TYPE)
3221 {
3222 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3223 if (FLOAT128_IEEE_P (TFmode))
3224 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3225 }
3226
3227 /* Support for new direct moves (ISA 3.0 + 64bit). */
3228 if (TARGET_DIRECT_MOVE_128)
3229 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3230
3231 /* Set up the reload helper and direct move functions. */
3232 if (TARGET_VSX || TARGET_ALTIVEC)
3233 {
3234 if (TARGET_64BIT)
3235 {
3236 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3237 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3238 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3239 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3240 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3241 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3242 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3243 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3244 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3245 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3246 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3247 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3248 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3249 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3250 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3251 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3252 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3253 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3254 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3255 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3256
3257 if (FLOAT128_VECTOR_P (KFmode))
3258 {
3259 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3260 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3261 }
3262
3263 if (FLOAT128_VECTOR_P (TFmode))
3264 {
3265 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3266 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3267 }
3268
3269 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3270 available. */
3271 if (TARGET_NO_SDMODE_STACK)
3272 {
3273 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3274 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3275 }
3276
3277 if (TARGET_VSX)
3278 {
3279 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3280 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3281 }
3282
3283 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3284 {
3285 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3286 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3287 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3288 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3289 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3290 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3291 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3292 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3293 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3294
3295 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3296 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3297 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3298 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3299 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3300 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3301 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3302 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3303 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3304
3305 if (FLOAT128_VECTOR_P (KFmode))
3306 {
3307 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3308 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3309 }
3310
3311 if (FLOAT128_VECTOR_P (TFmode))
3312 {
3313 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3314 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3315 }
3316 }
3317 }
3318 else
3319 {
3320 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3321 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3322 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3323 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3324 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3325 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3326 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3327 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3328 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3329 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3330 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3331 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3332 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3333 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3334 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3335 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3336 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3337 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3338 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3339 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3340
3341 if (FLOAT128_VECTOR_P (KFmode))
3342 {
3343 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3344 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3345 }
3346
3347 if (FLOAT128_IEEE_P (TFmode))
3348 {
3349 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3350 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3351 }
3352
3353 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3354 available. */
3355 if (TARGET_NO_SDMODE_STACK)
3356 {
3357 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3358 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3359 }
3360
3361 if (TARGET_VSX)
3362 {
3363 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3364 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3365 }
3366
3367 if (TARGET_DIRECT_MOVE)
3368 {
3369 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3370 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3371 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3372 }
3373 }
3374
3375 reg_addr[DFmode].scalar_in_vmx_p = true;
3376 reg_addr[DImode].scalar_in_vmx_p = true;
3377
3378 if (TARGET_P8_VECTOR)
3379 {
3380 reg_addr[SFmode].scalar_in_vmx_p = true;
3381 reg_addr[SImode].scalar_in_vmx_p = true;
3382
3383 if (TARGET_P9_VECTOR)
3384 {
3385 reg_addr[HImode].scalar_in_vmx_p = true;
3386 reg_addr[QImode].scalar_in_vmx_p = true;
3387 }
3388 }
3389 }
3390
3391 /* Precalculate HARD_REGNO_NREGS. */
3392 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3393 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3394 rs6000_hard_regno_nregs[m][r]
3395 = rs6000_hard_regno_nregs_internal (r, (machine_mode) m);
3396
3397 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3398 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3399 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3400 rs6000_hard_regno_mode_ok_p[m][r]
3401 = rs6000_hard_regno_mode_ok_uncached (r, (machine_mode) m);
3402
3403 /* Precalculate CLASS_MAX_NREGS sizes. */
3404 for (c = 0; c < LIM_REG_CLASSES; ++c)
3405 {
3406 int reg_size;
3407
3408 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3409 reg_size = UNITS_PER_VSX_WORD;
3410
3411 else if (c == ALTIVEC_REGS)
3412 reg_size = UNITS_PER_ALTIVEC_WORD;
3413
3414 else if (c == FLOAT_REGS)
3415 reg_size = UNITS_PER_FP_WORD;
3416
3417 else
3418 reg_size = UNITS_PER_WORD;
3419
3420 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3421 {
3422 machine_mode m2 = (machine_mode)m;
3423 int reg_size2 = reg_size;
3424
3425 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3426 in VSX. */
3427 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3428 reg_size2 = UNITS_PER_FP_WORD;
3429
3430 rs6000_class_max_nregs[m][c]
3431 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3432 }
3433 }
3434
3435 /* Calculate which modes to automatically generate code to use a the
3436 reciprocal divide and square root instructions. In the future, possibly
3437 automatically generate the instructions even if the user did not specify
3438 -mrecip. The older machines double precision reciprocal sqrt estimate is
3439 not accurate enough. */
3440 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3441 if (TARGET_FRES)
3442 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3443 if (TARGET_FRE)
3444 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3445 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3446 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3447 if (VECTOR_UNIT_VSX_P (V2DFmode))
3448 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3449
3450 if (TARGET_FRSQRTES)
3451 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3452 if (TARGET_FRSQRTE)
3453 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3454 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3455 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3456 if (VECTOR_UNIT_VSX_P (V2DFmode))
3457 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3458
3459 if (rs6000_recip_control)
3460 {
3461 if (!flag_finite_math_only)
3462 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3463 "-ffast-math");
3464 if (flag_trapping_math)
3465 warning (0, "%qs requires %qs or %qs", "-mrecip",
3466 "-fno-trapping-math", "-ffast-math");
3467 if (!flag_reciprocal_math)
3468 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3469 "-ffast-math");
3470 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3471 {
3472 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3473 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3474 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3475
3476 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3477 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3478 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3479
3480 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3481 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3482 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3483
3484 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3485 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3486 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3487
3488 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3489 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3490 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3491
3492 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3493 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3494 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3495
3496 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3497 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3498 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3499
3500 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3501 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3502 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3503 }
3504 }
3505
3506 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3507 legitimate address support to figure out the appropriate addressing to
3508 use. */
3509 rs6000_setup_reg_addr_masks ();
3510
3511 if (global_init_p || TARGET_DEBUG_TARGET)
3512 {
3513 if (TARGET_DEBUG_REG)
3514 rs6000_debug_reg_global ();
3515
3516 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3517 fprintf (stderr,
3518 "SImode variable mult cost = %d\n"
3519 "SImode constant mult cost = %d\n"
3520 "SImode short constant mult cost = %d\n"
3521 "DImode multipliciation cost = %d\n"
3522 "SImode division cost = %d\n"
3523 "DImode division cost = %d\n"
3524 "Simple fp operation cost = %d\n"
3525 "DFmode multiplication cost = %d\n"
3526 "SFmode division cost = %d\n"
3527 "DFmode division cost = %d\n"
3528 "cache line size = %d\n"
3529 "l1 cache size = %d\n"
3530 "l2 cache size = %d\n"
3531 "simultaneous prefetches = %d\n"
3532 "\n",
3533 rs6000_cost->mulsi,
3534 rs6000_cost->mulsi_const,
3535 rs6000_cost->mulsi_const9,
3536 rs6000_cost->muldi,
3537 rs6000_cost->divsi,
3538 rs6000_cost->divdi,
3539 rs6000_cost->fp,
3540 rs6000_cost->dmul,
3541 rs6000_cost->sdiv,
3542 rs6000_cost->ddiv,
3543 rs6000_cost->cache_line_size,
3544 rs6000_cost->l1_cache_size,
3545 rs6000_cost->l2_cache_size,
3546 rs6000_cost->simultaneous_prefetches);
3547 }
3548 }
3549
3550 #if TARGET_MACHO
3551 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3552
3553 static void
3554 darwin_rs6000_override_options (void)
3555 {
3556 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3557 off. */
3558 rs6000_altivec_abi = 1;
3559 TARGET_ALTIVEC_VRSAVE = 1;
3560 rs6000_current_abi = ABI_DARWIN;
3561
3562 if (DEFAULT_ABI == ABI_DARWIN
3563 && TARGET_64BIT)
3564 darwin_one_byte_bool = 1;
3565
3566 if (TARGET_64BIT && ! TARGET_POWERPC64)
3567 {
3568 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3569 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3570 }
3571 if (flag_mkernel)
3572 {
3573 rs6000_default_long_calls = 1;
3574 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3575 }
3576
3577 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3578 Altivec. */
3579 if (!flag_mkernel && !flag_apple_kext
3580 && TARGET_64BIT
3581 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3582 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3583
3584 /* Unless the user (not the configurer) has explicitly overridden
3585 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3586 G4 unless targeting the kernel. */
3587 if (!flag_mkernel
3588 && !flag_apple_kext
3589 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3590 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3591 && ! global_options_set.x_rs6000_cpu_index)
3592 {
3593 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3594 }
3595 }
3596 #endif
3597
3598 /* If not otherwise specified by a target, make 'long double' equivalent to
3599 'double'. */
3600
3601 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3602 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3603 #endif
3604
3605 /* Return the builtin mask of the various options used that could affect which
3606 builtins were used. In the past we used target_flags, but we've run out of
3607 bits, and some options are no longer in target_flags. */
3608
3609 HOST_WIDE_INT
3610 rs6000_builtin_mask_calculate (void)
3611 {
3612 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3613 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3614 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3615 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3616 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3617 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3618 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3619 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3620 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3621 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3622 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3623 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3624 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3625 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3626 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3627 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3628 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3629 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3630 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3631 | ((TARGET_LONG_DOUBLE_128
3632 && TARGET_HARD_FLOAT
3633 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3634 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3635 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3636 }
3637
3638 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3639 to clobber the XER[CA] bit because clobbering that bit without telling
3640 the compiler worked just fine with versions of GCC before GCC 5, and
3641 breaking a lot of older code in ways that are hard to track down is
3642 not such a great idea. */
3643
3644 static rtx_insn *
3645 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3646 vec<const char *> &/*constraints*/,
3647 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3648 {
3649 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3650 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3651 return NULL;
3652 }
3653
3654 /* Override command line options.
3655
3656 Combine build-specific configuration information with options
3657 specified on the command line to set various state variables which
3658 influence code generation, optimization, and expansion of built-in
3659 functions. Assure that command-line configuration preferences are
3660 compatible with each other and with the build configuration; issue
3661 warnings while adjusting configuration or error messages while
3662 rejecting configuration.
3663
3664 Upon entry to this function:
3665
3666 This function is called once at the beginning of
3667 compilation, and then again at the start and end of compiling
3668 each section of code that has a different configuration, as
3669 indicated, for example, by adding the
3670
3671 __attribute__((__target__("cpu=power9")))
3672
3673 qualifier to a function definition or, for example, by bracketing
3674 code between
3675
3676 #pragma GCC target("altivec")
3677
3678 and
3679
3680 #pragma GCC reset_options
3681
3682 directives. Parameter global_init_p is true for the initial
3683 invocation, which initializes global variables, and false for all
3684 subsequent invocations.
3685
3686
3687 Various global state information is assumed to be valid. This
3688 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3689 default CPU specified at build configure time, TARGET_DEFAULT,
3690 representing the default set of option flags for the default
3691 target, and global_options_set.x_rs6000_isa_flags, representing
3692 which options were requested on the command line.
3693
3694 Upon return from this function:
3695
3696 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3697 was set by name on the command line. Additionally, if certain
3698 attributes are automatically enabled or disabled by this function
3699 in order to assure compatibility between options and
3700 configuration, the flags associated with those attributes are
3701 also set. By setting these "explicit bits", we avoid the risk
3702 that other code might accidentally overwrite these particular
3703 attributes with "default values".
3704
3705 The various bits of rs6000_isa_flags are set to indicate the
3706 target options that have been selected for the most current
3707 compilation efforts. This has the effect of also turning on the
3708 associated TARGET_XXX values since these are macros which are
3709 generally defined to test the corresponding bit of the
3710 rs6000_isa_flags variable.
3711
3712 The variable rs6000_builtin_mask is set to represent the target
3713 options for the most current compilation efforts, consistent with
3714 the current contents of rs6000_isa_flags. This variable controls
3715 expansion of built-in functions.
3716
3717 Various other global variables and fields of global structures
3718 (over 50 in all) are initialized to reflect the desired options
3719 for the most current compilation efforts. */
3720
3721 static bool
3722 rs6000_option_override_internal (bool global_init_p)
3723 {
3724 bool ret = true;
3725
3726 HOST_WIDE_INT set_masks;
3727 HOST_WIDE_INT ignore_masks;
3728 int cpu_index = -1;
3729 int tune_index;
3730 struct cl_target_option *main_target_opt
3731 = ((global_init_p || target_option_default_node == NULL)
3732 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3733
3734 /* Print defaults. */
3735 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3736 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3737
3738 /* Remember the explicit arguments. */
3739 if (global_init_p)
3740 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3741
3742 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3743 library functions, so warn about it. The flag may be useful for
3744 performance studies from time to time though, so don't disable it
3745 entirely. */
3746 if (global_options_set.x_rs6000_alignment_flags
3747 && rs6000_alignment_flags == MASK_ALIGN_POWER
3748 && DEFAULT_ABI == ABI_DARWIN
3749 && TARGET_64BIT)
3750 warning (0, "%qs is not supported for 64-bit Darwin;"
3751 " it is incompatible with the installed C and C++ libraries",
3752 "-malign-power");
3753
3754 /* Numerous experiment shows that IRA based loop pressure
3755 calculation works better for RTL loop invariant motion on targets
3756 with enough (>= 32) registers. It is an expensive optimization.
3757 So it is on only for peak performance. */
3758 if (optimize >= 3 && global_init_p
3759 && !global_options_set.x_flag_ira_loop_pressure)
3760 flag_ira_loop_pressure = 1;
3761
3762 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3763 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3764 options were already specified. */
3765 if (flag_sanitize & SANITIZE_USER_ADDRESS
3766 && !global_options_set.x_flag_asynchronous_unwind_tables)
3767 flag_asynchronous_unwind_tables = 1;
3768
3769 /* Set the pointer size. */
3770 if (TARGET_64BIT)
3771 {
3772 rs6000_pmode = DImode;
3773 rs6000_pointer_size = 64;
3774 }
3775 else
3776 {
3777 rs6000_pmode = SImode;
3778 rs6000_pointer_size = 32;
3779 }
3780
3781 /* Some OSs don't support saving the high part of 64-bit registers on context
3782 switch. Other OSs don't support saving Altivec registers. On those OSs,
3783 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3784 if the user wants either, the user must explicitly specify them and we
3785 won't interfere with the user's specification. */
3786
3787 set_masks = POWERPC_MASKS;
3788 #ifdef OS_MISSING_POWERPC64
3789 if (OS_MISSING_POWERPC64)
3790 set_masks &= ~OPTION_MASK_POWERPC64;
3791 #endif
3792 #ifdef OS_MISSING_ALTIVEC
3793 if (OS_MISSING_ALTIVEC)
3794 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3795 | OTHER_VSX_VECTOR_MASKS);
3796 #endif
3797
3798 /* Don't override by the processor default if given explicitly. */
3799 set_masks &= ~rs6000_isa_flags_explicit;
3800
3801 if (global_init_p && rs6000_dejagnu_cpu_index >= 0)
3802 rs6000_cpu_index = rs6000_dejagnu_cpu_index;
3803
3804 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3805 the cpu in a target attribute or pragma, but did not specify a tuning
3806 option, use the cpu for the tuning option rather than the option specified
3807 with -mtune on the command line. Process a '--with-cpu' configuration
3808 request as an implicit --cpu. */
3809 if (rs6000_cpu_index >= 0)
3810 cpu_index = rs6000_cpu_index;
3811 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3812 cpu_index = main_target_opt->x_rs6000_cpu_index;
3813 else if (OPTION_TARGET_CPU_DEFAULT)
3814 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3815
3816 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3817 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3818 with those from the cpu, except for options that were explicitly set. If
3819 we don't have a cpu, do not override the target bits set in
3820 TARGET_DEFAULT. */
3821 if (cpu_index >= 0)
3822 {
3823 rs6000_cpu_index = cpu_index;
3824 rs6000_isa_flags &= ~set_masks;
3825 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3826 & set_masks);
3827 }
3828 else
3829 {
3830 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3831 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3832 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3833 to using rs6000_isa_flags, we need to do the initialization here.
3834
3835 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3836 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3837 HOST_WIDE_INT flags;
3838 if (TARGET_DEFAULT)
3839 flags = TARGET_DEFAULT;
3840 else
3841 {
3842 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3843 const char *default_cpu = (!TARGET_POWERPC64
3844 ? "powerpc"
3845 : (BYTES_BIG_ENDIAN
3846 ? "powerpc64"
3847 : "powerpc64le"));
3848 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3849 flags = processor_target_table[default_cpu_index].target_enable;
3850 }
3851 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3852 }
3853
3854 if (rs6000_tune_index >= 0)
3855 tune_index = rs6000_tune_index;
3856 else if (cpu_index >= 0)
3857 rs6000_tune_index = tune_index = cpu_index;
3858 else
3859 {
3860 size_t i;
3861 enum processor_type tune_proc
3862 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3863
3864 tune_index = -1;
3865 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3866 if (processor_target_table[i].processor == tune_proc)
3867 {
3868 tune_index = i;
3869 break;
3870 }
3871 }
3872
3873 if (cpu_index >= 0)
3874 rs6000_cpu = processor_target_table[cpu_index].processor;
3875 else
3876 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3877
3878 gcc_assert (tune_index >= 0);
3879 rs6000_tune = processor_target_table[tune_index].processor;
3880
3881 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3882 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3883 || rs6000_cpu == PROCESSOR_PPCE5500)
3884 {
3885 if (TARGET_ALTIVEC)
3886 error ("AltiVec not supported in this target");
3887 }
3888
3889 /* If we are optimizing big endian systems for space, use the load/store
3890 multiple instructions. */
3891 if (BYTES_BIG_ENDIAN && optimize_size)
3892 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3893
3894 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3895 because the hardware doesn't support the instructions used in little
3896 endian mode, and causes an alignment trap. The 750 does not cause an
3897 alignment trap (except when the target is unaligned). */
3898
3899 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3900 {
3901 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3902 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3903 warning (0, "%qs is not supported on little endian systems",
3904 "-mmultiple");
3905 }
3906
3907 /* If little-endian, default to -mstrict-align on older processors.
3908 Testing for htm matches power8 and later. */
3909 if (!BYTES_BIG_ENDIAN
3910 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3911 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3912
3913 if (!rs6000_fold_gimple)
3914 fprintf (stderr,
3915 "gimple folding of rs6000 builtins has been disabled.\n");
3916
3917 /* Add some warnings for VSX. */
3918 if (TARGET_VSX)
3919 {
3920 const char *msg = NULL;
3921 if (!TARGET_HARD_FLOAT)
3922 {
3923 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3924 msg = N_("%<-mvsx%> requires hardware floating point");
3925 else
3926 {
3927 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3928 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3929 }
3930 }
3931 else if (TARGET_AVOID_XFORM > 0)
3932 msg = N_("%<-mvsx%> needs indexed addressing");
3933 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3934 & OPTION_MASK_ALTIVEC))
3935 {
3936 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3937 msg = N_("%<-mvsx%> and %<-mno-altivec%> are incompatible");
3938 else
3939 msg = N_("%<-mno-altivec%> disables vsx");
3940 }
3941
3942 if (msg)
3943 {
3944 warning (0, msg);
3945 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3946 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3947 }
3948 }
3949
3950 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
3951 the -mcpu setting to enable options that conflict. */
3952 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
3953 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
3954 | OPTION_MASK_ALTIVEC
3955 | OPTION_MASK_VSX)) != 0)
3956 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
3957 | OPTION_MASK_DIRECT_MOVE)
3958 & ~rs6000_isa_flags_explicit);
3959
3960 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3961 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
3962
3963 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
3964 off all of the options that depend on those flags. */
3965 ignore_masks = rs6000_disable_incompatible_switches ();
3966
3967 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
3968 unless the user explicitly used the -mno-<option> to disable the code. */
3969 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
3970 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
3971 else if (TARGET_P9_MINMAX)
3972 {
3973 if (cpu_index >= 0)
3974 {
3975 if (cpu_index == PROCESSOR_POWER9)
3976 {
3977 /* legacy behavior: allow -mcpu=power9 with certain
3978 capabilities explicitly disabled. */
3979 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
3980 }
3981 else
3982 error ("power9 target option is incompatible with %<%s=<xxx>%> "
3983 "for <xxx> less than power9", "-mcpu");
3984 }
3985 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
3986 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
3987 & rs6000_isa_flags_explicit))
3988 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
3989 were explicitly cleared. */
3990 error ("%qs incompatible with explicitly disabled options",
3991 "-mpower9-minmax");
3992 else
3993 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
3994 }
3995 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
3996 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
3997 else if (TARGET_VSX)
3998 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
3999 else if (TARGET_POPCNTD)
4000 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4001 else if (TARGET_DFP)
4002 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4003 else if (TARGET_CMPB)
4004 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4005 else if (TARGET_FPRND)
4006 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4007 else if (TARGET_POPCNTB)
4008 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4009 else if (TARGET_ALTIVEC)
4010 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4011
4012 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4013 {
4014 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4015 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4016 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4017 }
4018
4019 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4020 {
4021 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4022 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4023 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4024 }
4025
4026 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4027 {
4028 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4029 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4030 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4031 }
4032
4033 if (TARGET_P8_VECTOR && !TARGET_VSX)
4034 {
4035 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4036 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4037 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4038 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4039 {
4040 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4041 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4042 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4043 }
4044 else
4045 {
4046 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4047 not explicit. */
4048 rs6000_isa_flags |= OPTION_MASK_VSX;
4049 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4050 }
4051 }
4052
4053 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4054 {
4055 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4056 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4057 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4058 }
4059
4060 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4061 silently turn off quad memory mode. */
4062 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4063 {
4064 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4065 warning (0, N_("%<-mquad-memory%> requires 64-bit mode"));
4066
4067 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4068 warning (0, N_("%<-mquad-memory-atomic%> requires 64-bit mode"));
4069
4070 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4071 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4072 }
4073
4074 /* Non-atomic quad memory load/store are disabled for little endian, since
4075 the words are reversed, but atomic operations can still be done by
4076 swapping the words. */
4077 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4078 {
4079 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4080 warning (0, N_("%<-mquad-memory%> is not available in little endian "
4081 "mode"));
4082
4083 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4084 }
4085
4086 /* Assume if the user asked for normal quad memory instructions, they want
4087 the atomic versions as well, unless they explicity told us not to use quad
4088 word atomic instructions. */
4089 if (TARGET_QUAD_MEMORY
4090 && !TARGET_QUAD_MEMORY_ATOMIC
4091 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4092 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4093
4094 /* If we can shrink-wrap the TOC register save separately, then use
4095 -msave-toc-indirect unless explicitly disabled. */
4096 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4097 && flag_shrink_wrap_separate
4098 && optimize_function_for_speed_p (cfun))
4099 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4100
4101 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4102 generating power8 instructions. Power9 does not optimize power8 fusion
4103 cases. */
4104 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4105 {
4106 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
4107 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4108 else
4109 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4110 }
4111
4112 /* Setting additional fusion flags turns on base fusion. */
4113 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4114 {
4115 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4116 {
4117 if (TARGET_P8_FUSION_SIGN)
4118 error ("%qs requires %qs", "-mpower8-fusion-sign",
4119 "-mpower8-fusion");
4120
4121 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4122 }
4123 else
4124 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4125 }
4126
4127 /* Power8 does not fuse sign extended loads with the addis. If we are
4128 optimizing at high levels for speed, convert a sign extended load into a
4129 zero extending load, and an explicit sign extension. */
4130 if (TARGET_P8_FUSION
4131 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4132 && optimize_function_for_speed_p (cfun)
4133 && optimize >= 3)
4134 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4135
4136 /* ISA 3.0 vector instructions include ISA 2.07. */
4137 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4138 {
4139 /* We prefer to not mention undocumented options in
4140 error messages. However, if users have managed to select
4141 power9-vector without selecting power8-vector, they
4142 already know about undocumented flags. */
4143 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4144 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4145 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4146 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4147 {
4148 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4149 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4150 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4151 }
4152 else
4153 {
4154 /* OPTION_MASK_P9_VECTOR is explicit and
4155 OPTION_MASK_P8_VECTOR is not explicit. */
4156 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4157 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4158 }
4159 }
4160
4161 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4162 support. If we only have ISA 2.06 support, and the user did not specify
4163 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4164 but we don't enable the full vectorization support */
4165 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4166 TARGET_ALLOW_MOVMISALIGN = 1;
4167
4168 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4169 {
4170 if (TARGET_ALLOW_MOVMISALIGN > 0
4171 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4172 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4173
4174 TARGET_ALLOW_MOVMISALIGN = 0;
4175 }
4176
4177 /* Determine when unaligned vector accesses are permitted, and when
4178 they are preferred over masked Altivec loads. Note that if
4179 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4180 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4181 not true. */
4182 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4183 {
4184 if (!TARGET_VSX)
4185 {
4186 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4187 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4188
4189 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4190 }
4191
4192 else if (!TARGET_ALLOW_MOVMISALIGN)
4193 {
4194 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4195 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4196 "-mallow-movmisalign");
4197
4198 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4199 }
4200 }
4201
4202 /* Use long double size to select the appropriate long double. We use
4203 TYPE_PRECISION to differentiate the 3 different long double types. We map
4204 128 into the precision used for TFmode. */
4205 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4206 ? 64
4207 : FLOAT_PRECISION_TFmode);
4208
4209 /* Set long double size before the IEEE 128-bit tests. */
4210 if (!global_options_set.x_rs6000_long_double_type_size)
4211 {
4212 if (main_target_opt != NULL
4213 && (main_target_opt->x_rs6000_long_double_type_size
4214 != default_long_double_size))
4215 error ("target attribute or pragma changes %<long double%> size");
4216 else
4217 rs6000_long_double_type_size = default_long_double_size;
4218 }
4219 else if (rs6000_long_double_type_size == 128)
4220 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4221 else if (global_options_set.x_rs6000_ieeequad)
4222 {
4223 if (global_options.x_rs6000_ieeequad)
4224 error ("%qs requires %qs", "-mabi=ieeelongdouble", "-mlong-double-128");
4225 else
4226 error ("%qs requires %qs", "-mabi=ibmlongdouble", "-mlong-double-128");
4227 }
4228
4229 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4230 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4231 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4232 those systems will not pick up this default. Warn if the user changes the
4233 default unless -Wno-psabi. */
4234 if (!global_options_set.x_rs6000_ieeequad)
4235 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4236
4237 else
4238 {
4239 if (global_options.x_rs6000_ieeequad
4240 && (!TARGET_POPCNTD || !TARGET_VSX))
4241 error ("%qs requires full ISA 2.06 support", "-mabi=ieeelongdouble");
4242
4243 if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4244 {
4245 static bool warned_change_long_double;
4246 if (!warned_change_long_double)
4247 {
4248 warned_change_long_double = true;
4249 if (TARGET_IEEEQUAD)
4250 warning (OPT_Wpsabi, "Using IEEE extended precision "
4251 "%<long double%>");
4252 else
4253 warning (OPT_Wpsabi, "Using IBM extended precision "
4254 "%<long double%>");
4255 }
4256 }
4257 }
4258
4259 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4260 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4261 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4262 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4263 the keyword as well as the type. */
4264 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4265
4266 /* IEEE 128-bit floating point requires VSX support. */
4267 if (TARGET_FLOAT128_KEYWORD)
4268 {
4269 if (!TARGET_VSX)
4270 {
4271 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4272 error ("%qs requires VSX support", "%<-mfloat128%>");
4273
4274 TARGET_FLOAT128_TYPE = 0;
4275 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4276 | OPTION_MASK_FLOAT128_HW);
4277 }
4278 else if (!TARGET_FLOAT128_TYPE)
4279 {
4280 TARGET_FLOAT128_TYPE = 1;
4281 warning (0, "The %<-mfloat128%> option may not be fully supported");
4282 }
4283 }
4284
4285 /* Enable the __float128 keyword under Linux by default. */
4286 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4287 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4288 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4289
4290 /* If we have are supporting the float128 type and full ISA 3.0 support,
4291 enable -mfloat128-hardware by default. However, don't enable the
4292 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4293 because sometimes the compiler wants to put things in an integer
4294 container, and if we don't have __int128 support, it is impossible. */
4295 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4296 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4297 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4298 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4299
4300 if (TARGET_FLOAT128_HW
4301 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4302 {
4303 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4304 error ("%qs requires full ISA 3.0 support", "%<-mfloat128-hardware%>");
4305
4306 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4307 }
4308
4309 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4310 {
4311 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4312 error ("%qs requires %qs", "%<-mfloat128-hardware%>", "-m64");
4313
4314 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4315 }
4316
4317 /* Print the options after updating the defaults. */
4318 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4319 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4320
4321 /* E500mc does "better" if we inline more aggressively. Respect the
4322 user's opinion, though. */
4323 if (rs6000_block_move_inline_limit == 0
4324 && (rs6000_tune == PROCESSOR_PPCE500MC
4325 || rs6000_tune == PROCESSOR_PPCE500MC64
4326 || rs6000_tune == PROCESSOR_PPCE5500
4327 || rs6000_tune == PROCESSOR_PPCE6500))
4328 rs6000_block_move_inline_limit = 128;
4329
4330 /* store_one_arg depends on expand_block_move to handle at least the
4331 size of reg_parm_stack_space. */
4332 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4333 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4334
4335 if (global_init_p)
4336 {
4337 /* If the appropriate debug option is enabled, replace the target hooks
4338 with debug versions that call the real version and then prints
4339 debugging information. */
4340 if (TARGET_DEBUG_COST)
4341 {
4342 targetm.rtx_costs = rs6000_debug_rtx_costs;
4343 targetm.address_cost = rs6000_debug_address_cost;
4344 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4345 }
4346
4347 if (TARGET_DEBUG_ADDR)
4348 {
4349 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4350 targetm.legitimize_address = rs6000_debug_legitimize_address;
4351 rs6000_secondary_reload_class_ptr
4352 = rs6000_debug_secondary_reload_class;
4353 targetm.secondary_memory_needed
4354 = rs6000_debug_secondary_memory_needed;
4355 targetm.can_change_mode_class
4356 = rs6000_debug_can_change_mode_class;
4357 rs6000_preferred_reload_class_ptr
4358 = rs6000_debug_preferred_reload_class;
4359 rs6000_mode_dependent_address_ptr
4360 = rs6000_debug_mode_dependent_address;
4361 }
4362
4363 if (rs6000_veclibabi_name)
4364 {
4365 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4366 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4367 else
4368 {
4369 error ("unknown vectorization library ABI type (%qs) for "
4370 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4371 ret = false;
4372 }
4373 }
4374 }
4375
4376 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4377 target attribute or pragma which automatically enables both options,
4378 unless the altivec ABI was set. This is set by default for 64-bit, but
4379 not for 32-bit. */
4380 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4381 {
4382 TARGET_FLOAT128_TYPE = 0;
4383 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4384 | OPTION_MASK_FLOAT128_KEYWORD)
4385 & ~rs6000_isa_flags_explicit);
4386 }
4387
4388 /* Enable Altivec ABI for AIX -maltivec. */
4389 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4390 {
4391 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4392 error ("target attribute or pragma changes AltiVec ABI");
4393 else
4394 rs6000_altivec_abi = 1;
4395 }
4396
4397 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4398 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4399 be explicitly overridden in either case. */
4400 if (TARGET_ELF)
4401 {
4402 if (!global_options_set.x_rs6000_altivec_abi
4403 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4404 {
4405 if (main_target_opt != NULL &&
4406 !main_target_opt->x_rs6000_altivec_abi)
4407 error ("target attribute or pragma changes AltiVec ABI");
4408 else
4409 rs6000_altivec_abi = 1;
4410 }
4411 }
4412
4413 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4414 So far, the only darwin64 targets are also MACH-O. */
4415 if (TARGET_MACHO
4416 && DEFAULT_ABI == ABI_DARWIN
4417 && TARGET_64BIT)
4418 {
4419 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4420 error ("target attribute or pragma changes darwin64 ABI");
4421 else
4422 {
4423 rs6000_darwin64_abi = 1;
4424 /* Default to natural alignment, for better performance. */
4425 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4426 }
4427 }
4428
4429 /* Place FP constants in the constant pool instead of TOC
4430 if section anchors enabled. */
4431 if (flag_section_anchors
4432 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4433 TARGET_NO_FP_IN_TOC = 1;
4434
4435 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4436 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4437
4438 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4439 SUBTARGET_OVERRIDE_OPTIONS;
4440 #endif
4441 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4442 SUBSUBTARGET_OVERRIDE_OPTIONS;
4443 #endif
4444 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4445 SUB3TARGET_OVERRIDE_OPTIONS;
4446 #endif
4447
4448 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4449 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4450
4451 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4452 && rs6000_tune != PROCESSOR_POWER5
4453 && rs6000_tune != PROCESSOR_POWER6
4454 && rs6000_tune != PROCESSOR_POWER7
4455 && rs6000_tune != PROCESSOR_POWER8
4456 && rs6000_tune != PROCESSOR_POWER9
4457 && rs6000_tune != PROCESSOR_PPCA2
4458 && rs6000_tune != PROCESSOR_CELL
4459 && rs6000_tune != PROCESSOR_PPC476);
4460 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4461 || rs6000_tune == PROCESSOR_POWER5
4462 || rs6000_tune == PROCESSOR_POWER7
4463 || rs6000_tune == PROCESSOR_POWER8);
4464 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4465 || rs6000_tune == PROCESSOR_POWER5
4466 || rs6000_tune == PROCESSOR_POWER6
4467 || rs6000_tune == PROCESSOR_POWER7
4468 || rs6000_tune == PROCESSOR_POWER8
4469 || rs6000_tune == PROCESSOR_POWER9
4470 || rs6000_tune == PROCESSOR_PPCE500MC
4471 || rs6000_tune == PROCESSOR_PPCE500MC64
4472 || rs6000_tune == PROCESSOR_PPCE5500
4473 || rs6000_tune == PROCESSOR_PPCE6500);
4474
4475 /* Allow debug switches to override the above settings. These are set to -1
4476 in rs6000.opt to indicate the user hasn't directly set the switch. */
4477 if (TARGET_ALWAYS_HINT >= 0)
4478 rs6000_always_hint = TARGET_ALWAYS_HINT;
4479
4480 if (TARGET_SCHED_GROUPS >= 0)
4481 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4482
4483 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4484 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4485
4486 rs6000_sched_restricted_insns_priority
4487 = (rs6000_sched_groups ? 1 : 0);
4488
4489 /* Handle -msched-costly-dep option. */
4490 rs6000_sched_costly_dep
4491 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4492
4493 if (rs6000_sched_costly_dep_str)
4494 {
4495 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4496 rs6000_sched_costly_dep = no_dep_costly;
4497 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4498 rs6000_sched_costly_dep = all_deps_costly;
4499 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4500 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4501 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4502 rs6000_sched_costly_dep = store_to_load_dep_costly;
4503 else
4504 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4505 atoi (rs6000_sched_costly_dep_str));
4506 }
4507
4508 /* Handle -minsert-sched-nops option. */
4509 rs6000_sched_insert_nops
4510 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4511
4512 if (rs6000_sched_insert_nops_str)
4513 {
4514 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4515 rs6000_sched_insert_nops = sched_finish_none;
4516 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4517 rs6000_sched_insert_nops = sched_finish_pad_groups;
4518 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4519 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4520 else
4521 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4522 atoi (rs6000_sched_insert_nops_str));
4523 }
4524
4525 /* Handle stack protector */
4526 if (!global_options_set.x_rs6000_stack_protector_guard)
4527 #ifdef TARGET_THREAD_SSP_OFFSET
4528 rs6000_stack_protector_guard = SSP_TLS;
4529 #else
4530 rs6000_stack_protector_guard = SSP_GLOBAL;
4531 #endif
4532
4533 #ifdef TARGET_THREAD_SSP_OFFSET
4534 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4535 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4536 #endif
4537
4538 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4539 {
4540 char *endp;
4541 const char *str = rs6000_stack_protector_guard_offset_str;
4542
4543 errno = 0;
4544 long offset = strtol (str, &endp, 0);
4545 if (!*str || *endp || errno)
4546 error ("%qs is not a valid number in %qs", str,
4547 "-mstack-protector-guard-offset=");
4548
4549 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4550 || (TARGET_64BIT && (offset & 3)))
4551 error ("%qs is not a valid offset in %qs", str,
4552 "-mstack-protector-guard-offset=");
4553
4554 rs6000_stack_protector_guard_offset = offset;
4555 }
4556
4557 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4558 {
4559 const char *str = rs6000_stack_protector_guard_reg_str;
4560 int reg = decode_reg_name (str);
4561
4562 if (!IN_RANGE (reg, 1, 31))
4563 error ("%qs is not a valid base register in %qs", str,
4564 "-mstack-protector-guard-reg=");
4565
4566 rs6000_stack_protector_guard_reg = reg;
4567 }
4568
4569 if (rs6000_stack_protector_guard == SSP_TLS
4570 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4571 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4572
4573 if (global_init_p)
4574 {
4575 #ifdef TARGET_REGNAMES
4576 /* If the user desires alternate register names, copy in the
4577 alternate names now. */
4578 if (TARGET_REGNAMES)
4579 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4580 #endif
4581
4582 /* Set aix_struct_return last, after the ABI is determined.
4583 If -maix-struct-return or -msvr4-struct-return was explicitly
4584 used, don't override with the ABI default. */
4585 if (!global_options_set.x_aix_struct_return)
4586 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4587
4588 #if 0
4589 /* IBM XL compiler defaults to unsigned bitfields. */
4590 if (TARGET_XL_COMPAT)
4591 flag_signed_bitfields = 0;
4592 #endif
4593
4594 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4595 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4596
4597 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4598
4599 /* We can only guarantee the availability of DI pseudo-ops when
4600 assembling for 64-bit targets. */
4601 if (!TARGET_64BIT)
4602 {
4603 targetm.asm_out.aligned_op.di = NULL;
4604 targetm.asm_out.unaligned_op.di = NULL;
4605 }
4606
4607
4608 /* Set branch target alignment, if not optimizing for size. */
4609 if (!optimize_size)
4610 {
4611 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4612 aligned 8byte to avoid misprediction by the branch predictor. */
4613 if (rs6000_tune == PROCESSOR_TITAN
4614 || rs6000_tune == PROCESSOR_CELL)
4615 {
4616 if (flag_align_functions && !str_align_functions)
4617 str_align_functions = "8";
4618 if (flag_align_jumps && !str_align_jumps)
4619 str_align_jumps = "8";
4620 if (flag_align_loops && !str_align_loops)
4621 str_align_loops = "8";
4622 }
4623 if (rs6000_align_branch_targets)
4624 {
4625 if (flag_align_functions && !str_align_functions)
4626 str_align_functions = "16";
4627 if (flag_align_jumps && !str_align_jumps)
4628 str_align_jumps = "16";
4629 if (flag_align_loops && !str_align_loops)
4630 {
4631 can_override_loop_align = 1;
4632 str_align_loops = "16";
4633 }
4634 }
4635
4636 if (flag_align_jumps && !str_align_jumps)
4637 str_align_jumps = "16";
4638 if (flag_align_loops && !str_align_loops)
4639 str_align_loops = "16";
4640 }
4641
4642 /* Arrange to save and restore machine status around nested functions. */
4643 init_machine_status = rs6000_init_machine_status;
4644
4645 /* We should always be splitting complex arguments, but we can't break
4646 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4647 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4648 targetm.calls.split_complex_arg = NULL;
4649
4650 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4651 if (DEFAULT_ABI == ABI_AIX)
4652 targetm.calls.custom_function_descriptors = 0;
4653 }
4654
4655 /* Initialize rs6000_cost with the appropriate target costs. */
4656 if (optimize_size)
4657 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4658 else
4659 switch (rs6000_tune)
4660 {
4661 case PROCESSOR_RS64A:
4662 rs6000_cost = &rs64a_cost;
4663 break;
4664
4665 case PROCESSOR_MPCCORE:
4666 rs6000_cost = &mpccore_cost;
4667 break;
4668
4669 case PROCESSOR_PPC403:
4670 rs6000_cost = &ppc403_cost;
4671 break;
4672
4673 case PROCESSOR_PPC405:
4674 rs6000_cost = &ppc405_cost;
4675 break;
4676
4677 case PROCESSOR_PPC440:
4678 rs6000_cost = &ppc440_cost;
4679 break;
4680
4681 case PROCESSOR_PPC476:
4682 rs6000_cost = &ppc476_cost;
4683 break;
4684
4685 case PROCESSOR_PPC601:
4686 rs6000_cost = &ppc601_cost;
4687 break;
4688
4689 case PROCESSOR_PPC603:
4690 rs6000_cost = &ppc603_cost;
4691 break;
4692
4693 case PROCESSOR_PPC604:
4694 rs6000_cost = &ppc604_cost;
4695 break;
4696
4697 case PROCESSOR_PPC604e:
4698 rs6000_cost = &ppc604e_cost;
4699 break;
4700
4701 case PROCESSOR_PPC620:
4702 rs6000_cost = &ppc620_cost;
4703 break;
4704
4705 case PROCESSOR_PPC630:
4706 rs6000_cost = &ppc630_cost;
4707 break;
4708
4709 case PROCESSOR_CELL:
4710 rs6000_cost = &ppccell_cost;
4711 break;
4712
4713 case PROCESSOR_PPC750:
4714 case PROCESSOR_PPC7400:
4715 rs6000_cost = &ppc750_cost;
4716 break;
4717
4718 case PROCESSOR_PPC7450:
4719 rs6000_cost = &ppc7450_cost;
4720 break;
4721
4722 case PROCESSOR_PPC8540:
4723 case PROCESSOR_PPC8548:
4724 rs6000_cost = &ppc8540_cost;
4725 break;
4726
4727 case PROCESSOR_PPCE300C2:
4728 case PROCESSOR_PPCE300C3:
4729 rs6000_cost = &ppce300c2c3_cost;
4730 break;
4731
4732 case PROCESSOR_PPCE500MC:
4733 rs6000_cost = &ppce500mc_cost;
4734 break;
4735
4736 case PROCESSOR_PPCE500MC64:
4737 rs6000_cost = &ppce500mc64_cost;
4738 break;
4739
4740 case PROCESSOR_PPCE5500:
4741 rs6000_cost = &ppce5500_cost;
4742 break;
4743
4744 case PROCESSOR_PPCE6500:
4745 rs6000_cost = &ppce6500_cost;
4746 break;
4747
4748 case PROCESSOR_TITAN:
4749 rs6000_cost = &titan_cost;
4750 break;
4751
4752 case PROCESSOR_POWER4:
4753 case PROCESSOR_POWER5:
4754 rs6000_cost = &power4_cost;
4755 break;
4756
4757 case PROCESSOR_POWER6:
4758 rs6000_cost = &power6_cost;
4759 break;
4760
4761 case PROCESSOR_POWER7:
4762 rs6000_cost = &power7_cost;
4763 break;
4764
4765 case PROCESSOR_POWER8:
4766 rs6000_cost = &power8_cost;
4767 break;
4768
4769 case PROCESSOR_POWER9:
4770 rs6000_cost = &power9_cost;
4771 break;
4772
4773 case PROCESSOR_PPCA2:
4774 rs6000_cost = &ppca2_cost;
4775 break;
4776
4777 default:
4778 gcc_unreachable ();
4779 }
4780
4781 if (global_init_p)
4782 {
4783 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4784 rs6000_cost->simultaneous_prefetches,
4785 global_options.x_param_values,
4786 global_options_set.x_param_values);
4787 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4788 global_options.x_param_values,
4789 global_options_set.x_param_values);
4790 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4791 rs6000_cost->cache_line_size,
4792 global_options.x_param_values,
4793 global_options_set.x_param_values);
4794 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4795 global_options.x_param_values,
4796 global_options_set.x_param_values);
4797
4798 /* Increase loop peeling limits based on performance analysis. */
4799 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4800 global_options.x_param_values,
4801 global_options_set.x_param_values);
4802 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4803 global_options.x_param_values,
4804 global_options_set.x_param_values);
4805
4806 /* Use the 'model' -fsched-pressure algorithm by default. */
4807 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4808 SCHED_PRESSURE_MODEL,
4809 global_options.x_param_values,
4810 global_options_set.x_param_values);
4811
4812 /* If using typedef char *va_list, signal that
4813 __builtin_va_start (&ap, 0) can be optimized to
4814 ap = __builtin_next_arg (0). */
4815 if (DEFAULT_ABI != ABI_V4)
4816 targetm.expand_builtin_va_start = NULL;
4817 }
4818
4819 /* If not explicitly specified via option, decide whether to generate indexed
4820 load/store instructions. A value of -1 indicates that the
4821 initial value of this variable has not been overwritten. During
4822 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4823 if (TARGET_AVOID_XFORM == -1)
4824 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4825 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4826 need indexed accesses and the type used is the scalar type of the element
4827 being loaded or stored. */
4828 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4829 && !TARGET_ALTIVEC);
4830
4831 /* Set the -mrecip options. */
4832 if (rs6000_recip_name)
4833 {
4834 char *p = ASTRDUP (rs6000_recip_name);
4835 char *q;
4836 unsigned int mask, i;
4837 bool invert;
4838
4839 while ((q = strtok (p, ",")) != NULL)
4840 {
4841 p = NULL;
4842 if (*q == '!')
4843 {
4844 invert = true;
4845 q++;
4846 }
4847 else
4848 invert = false;
4849
4850 if (!strcmp (q, "default"))
4851 mask = ((TARGET_RECIP_PRECISION)
4852 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4853 else
4854 {
4855 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4856 if (!strcmp (q, recip_options[i].string))
4857 {
4858 mask = recip_options[i].mask;
4859 break;
4860 }
4861
4862 if (i == ARRAY_SIZE (recip_options))
4863 {
4864 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4865 invert = false;
4866 mask = 0;
4867 ret = false;
4868 }
4869 }
4870
4871 if (invert)
4872 rs6000_recip_control &= ~mask;
4873 else
4874 rs6000_recip_control |= mask;
4875 }
4876 }
4877
4878 /* Set the builtin mask of the various options used that could affect which
4879 builtins were used. In the past we used target_flags, but we've run out
4880 of bits, and some options are no longer in target_flags. */
4881 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4882 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4883 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4884 rs6000_builtin_mask);
4885
4886 /* Initialize all of the registers. */
4887 rs6000_init_hard_regno_mode_ok (global_init_p);
4888
4889 /* Save the initial options in case the user does function specific options */
4890 if (global_init_p)
4891 target_option_default_node = target_option_current_node
4892 = build_target_option_node (&global_options);
4893
4894 /* If not explicitly specified via option, decide whether to generate the
4895 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4896 if (TARGET_LINK_STACK == -1)
4897 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4898
4899 /* Deprecate use of -mno-speculate-indirect-jumps. */
4900 if (!rs6000_speculate_indirect_jumps)
4901 warning (0, "%qs is deprecated and not recommended in any circumstances",
4902 "-mno-speculate-indirect-jumps");
4903
4904 return ret;
4905 }
4906
4907 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4908 define the target cpu type. */
4909
4910 static void
4911 rs6000_option_override (void)
4912 {
4913 (void) rs6000_option_override_internal (true);
4914 }
4915
4916 \f
4917 /* Implement targetm.vectorize.builtin_mask_for_load. */
4918 static tree
4919 rs6000_builtin_mask_for_load (void)
4920 {
4921 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4922 if ((TARGET_ALTIVEC && !TARGET_VSX)
4923 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4924 return altivec_builtin_mask_for_load;
4925 else
4926 return 0;
4927 }
4928
4929 /* Implement LOOP_ALIGN. */
4930 align_flags
4931 rs6000_loop_align (rtx label)
4932 {
4933 basic_block bb;
4934 int ninsns;
4935
4936 /* Don't override loop alignment if -falign-loops was specified. */
4937 if (!can_override_loop_align)
4938 return align_loops;
4939
4940 bb = BLOCK_FOR_INSN (label);
4941 ninsns = num_loop_insns(bb->loop_father);
4942
4943 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
4944 if (ninsns > 4 && ninsns <= 8
4945 && (rs6000_tune == PROCESSOR_POWER4
4946 || rs6000_tune == PROCESSOR_POWER5
4947 || rs6000_tune == PROCESSOR_POWER6
4948 || rs6000_tune == PROCESSOR_POWER7
4949 || rs6000_tune == PROCESSOR_POWER8))
4950 return align_flags (5);
4951 else
4952 return align_loops;
4953 }
4954
4955 /* Return true iff, data reference of TYPE can reach vector alignment (16)
4956 after applying N number of iterations. This routine does not determine
4957 how may iterations are required to reach desired alignment. */
4958
4959 static bool
4960 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
4961 {
4962 if (is_packed)
4963 return false;
4964
4965 if (TARGET_32BIT)
4966 {
4967 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
4968 return true;
4969
4970 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
4971 return true;
4972
4973 return false;
4974 }
4975 else
4976 {
4977 if (TARGET_MACHO)
4978 return false;
4979
4980 /* Assuming that all other types are naturally aligned. CHECKME! */
4981 return true;
4982 }
4983 }
4984
4985 /* Return true if the vector misalignment factor is supported by the
4986 target. */
4987 static bool
4988 rs6000_builtin_support_vector_misalignment (machine_mode mode,
4989 const_tree type,
4990 int misalignment,
4991 bool is_packed)
4992 {
4993 if (TARGET_VSX)
4994 {
4995 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4996 return true;
4997
4998 /* Return if movmisalign pattern is not supported for this mode. */
4999 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5000 return false;
5001
5002 if (misalignment == -1)
5003 {
5004 /* Misalignment factor is unknown at compile time but we know
5005 it's word aligned. */
5006 if (rs6000_vector_alignment_reachable (type, is_packed))
5007 {
5008 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5009
5010 if (element_size == 64 || element_size == 32)
5011 return true;
5012 }
5013
5014 return false;
5015 }
5016
5017 /* VSX supports word-aligned vector. */
5018 if (misalignment % 4 == 0)
5019 return true;
5020 }
5021 return false;
5022 }
5023
5024 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5025 static int
5026 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5027 tree vectype, int misalign)
5028 {
5029 unsigned elements;
5030 tree elem_type;
5031
5032 switch (type_of_cost)
5033 {
5034 case scalar_stmt:
5035 case scalar_load:
5036 case scalar_store:
5037 case vector_stmt:
5038 case vector_load:
5039 case vector_store:
5040 case vec_to_scalar:
5041 case scalar_to_vec:
5042 case cond_branch_not_taken:
5043 return 1;
5044
5045 case vec_perm:
5046 if (TARGET_VSX)
5047 return 3;
5048 else
5049 return 1;
5050
5051 case vec_promote_demote:
5052 if (TARGET_VSX)
5053 return 4;
5054 else
5055 return 1;
5056
5057 case cond_branch_taken:
5058 return 3;
5059
5060 case unaligned_load:
5061 case vector_gather_load:
5062 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5063 return 1;
5064
5065 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5066 {
5067 elements = TYPE_VECTOR_SUBPARTS (vectype);
5068 if (elements == 2)
5069 /* Double word aligned. */
5070 return 2;
5071
5072 if (elements == 4)
5073 {
5074 switch (misalign)
5075 {
5076 case 8:
5077 /* Double word aligned. */
5078 return 2;
5079
5080 case -1:
5081 /* Unknown misalignment. */
5082 case 4:
5083 case 12:
5084 /* Word aligned. */
5085 return 22;
5086
5087 default:
5088 gcc_unreachable ();
5089 }
5090 }
5091 }
5092
5093 if (TARGET_ALTIVEC)
5094 /* Misaligned loads are not supported. */
5095 gcc_unreachable ();
5096
5097 return 2;
5098
5099 case unaligned_store:
5100 case vector_scatter_store:
5101 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5102 return 1;
5103
5104 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5105 {
5106 elements = TYPE_VECTOR_SUBPARTS (vectype);
5107 if (elements == 2)
5108 /* Double word aligned. */
5109 return 2;
5110
5111 if (elements == 4)
5112 {
5113 switch (misalign)
5114 {
5115 case 8:
5116 /* Double word aligned. */
5117 return 2;
5118
5119 case -1:
5120 /* Unknown misalignment. */
5121 case 4:
5122 case 12:
5123 /* Word aligned. */
5124 return 23;
5125
5126 default:
5127 gcc_unreachable ();
5128 }
5129 }
5130 }
5131
5132 if (TARGET_ALTIVEC)
5133 /* Misaligned stores are not supported. */
5134 gcc_unreachable ();
5135
5136 return 2;
5137
5138 case vec_construct:
5139 /* This is a rough approximation assuming non-constant elements
5140 constructed into a vector via element insertion. FIXME:
5141 vec_construct is not granular enough for uniformly good
5142 decisions. If the initialization is a splat, this is
5143 cheaper than we estimate. Improve this someday. */
5144 elem_type = TREE_TYPE (vectype);
5145 /* 32-bit vectors loaded into registers are stored as double
5146 precision, so we need 2 permutes, 2 converts, and 1 merge
5147 to construct a vector of short floats from them. */
5148 if (SCALAR_FLOAT_TYPE_P (elem_type)
5149 && TYPE_PRECISION (elem_type) == 32)
5150 return 5;
5151 /* On POWER9, integer vector types are built up in GPRs and then
5152 use a direct move (2 cycles). For POWER8 this is even worse,
5153 as we need two direct moves and a merge, and the direct moves
5154 are five cycles. */
5155 else if (INTEGRAL_TYPE_P (elem_type))
5156 {
5157 if (TARGET_P9_VECTOR)
5158 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5159 else
5160 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5161 }
5162 else
5163 /* V2DFmode doesn't need a direct move. */
5164 return 2;
5165
5166 default:
5167 gcc_unreachable ();
5168 }
5169 }
5170
5171 /* Implement targetm.vectorize.preferred_simd_mode. */
5172
5173 static machine_mode
5174 rs6000_preferred_simd_mode (scalar_mode mode)
5175 {
5176 if (TARGET_VSX)
5177 switch (mode)
5178 {
5179 case E_DFmode:
5180 return V2DFmode;
5181 default:;
5182 }
5183 if (TARGET_ALTIVEC || TARGET_VSX)
5184 switch (mode)
5185 {
5186 case E_SFmode:
5187 return V4SFmode;
5188 case E_TImode:
5189 return V1TImode;
5190 case E_DImode:
5191 return V2DImode;
5192 case E_SImode:
5193 return V4SImode;
5194 case E_HImode:
5195 return V8HImode;
5196 case E_QImode:
5197 return V16QImode;
5198 default:;
5199 }
5200 return word_mode;
5201 }
5202
5203 typedef struct _rs6000_cost_data
5204 {
5205 struct loop *loop_info;
5206 unsigned cost[3];
5207 } rs6000_cost_data;
5208
5209 /* Test for likely overcommitment of vector hardware resources. If a
5210 loop iteration is relatively large, and too large a percentage of
5211 instructions in the loop are vectorized, the cost model may not
5212 adequately reflect delays from unavailable vector resources.
5213 Penalize the loop body cost for this case. */
5214
5215 static void
5216 rs6000_density_test (rs6000_cost_data *data)
5217 {
5218 const int DENSITY_PCT_THRESHOLD = 85;
5219 const int DENSITY_SIZE_THRESHOLD = 70;
5220 const int DENSITY_PENALTY = 10;
5221 struct loop *loop = data->loop_info;
5222 basic_block *bbs = get_loop_body (loop);
5223 int nbbs = loop->num_nodes;
5224 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5225 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5226 int i, density_pct;
5227
5228 for (i = 0; i < nbbs; i++)
5229 {
5230 basic_block bb = bbs[i];
5231 gimple_stmt_iterator gsi;
5232
5233 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5234 {
5235 gimple *stmt = gsi_stmt (gsi);
5236 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5237
5238 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5239 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5240 not_vec_cost++;
5241 }
5242 }
5243
5244 free (bbs);
5245 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5246
5247 if (density_pct > DENSITY_PCT_THRESHOLD
5248 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5249 {
5250 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5251 if (dump_enabled_p ())
5252 dump_printf_loc (MSG_NOTE, vect_location,
5253 "density %d%%, cost %d exceeds threshold, penalizing "
5254 "loop body cost by %d%%", density_pct,
5255 vec_cost + not_vec_cost, DENSITY_PENALTY);
5256 }
5257 }
5258
5259 /* Implement targetm.vectorize.init_cost. */
5260
5261 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5262 instruction is needed by the vectorization. */
5263 static bool rs6000_vect_nonmem;
5264
5265 static void *
5266 rs6000_init_cost (struct loop *loop_info)
5267 {
5268 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5269 data->loop_info = loop_info;
5270 data->cost[vect_prologue] = 0;
5271 data->cost[vect_body] = 0;
5272 data->cost[vect_epilogue] = 0;
5273 rs6000_vect_nonmem = false;
5274 return data;
5275 }
5276
5277 /* Implement targetm.vectorize.add_stmt_cost. */
5278
5279 static unsigned
5280 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5281 struct _stmt_vec_info *stmt_info, int misalign,
5282 enum vect_cost_model_location where)
5283 {
5284 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5285 unsigned retval = 0;
5286
5287 if (flag_vect_cost_model)
5288 {
5289 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5290 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5291 misalign);
5292 /* Statements in an inner loop relative to the loop being
5293 vectorized are weighted more heavily. The value here is
5294 arbitrary and could potentially be improved with analysis. */
5295 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5296 count *= 50; /* FIXME. */
5297
5298 retval = (unsigned) (count * stmt_cost);
5299 cost_data->cost[where] += retval;
5300
5301 /* Check whether we're doing something other than just a copy loop.
5302 Not all such loops may be profitably vectorized; see
5303 rs6000_finish_cost. */
5304 if ((kind == vec_to_scalar || kind == vec_perm
5305 || kind == vec_promote_demote || kind == vec_construct
5306 || kind == scalar_to_vec)
5307 || (where == vect_body && kind == vector_stmt))
5308 rs6000_vect_nonmem = true;
5309 }
5310
5311 return retval;
5312 }
5313
5314 /* Implement targetm.vectorize.finish_cost. */
5315
5316 static void
5317 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5318 unsigned *body_cost, unsigned *epilogue_cost)
5319 {
5320 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5321
5322 if (cost_data->loop_info)
5323 rs6000_density_test (cost_data);
5324
5325 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5326 that require versioning for any reason. The vectorization is at
5327 best a wash inside the loop, and the versioning checks make
5328 profitability highly unlikely and potentially quite harmful. */
5329 if (cost_data->loop_info)
5330 {
5331 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5332 if (!rs6000_vect_nonmem
5333 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5334 && LOOP_REQUIRES_VERSIONING (vec_info))
5335 cost_data->cost[vect_body] += 10000;
5336 }
5337
5338 *prologue_cost = cost_data->cost[vect_prologue];
5339 *body_cost = cost_data->cost[vect_body];
5340 *epilogue_cost = cost_data->cost[vect_epilogue];
5341 }
5342
5343 /* Implement targetm.vectorize.destroy_cost_data. */
5344
5345 static void
5346 rs6000_destroy_cost_data (void *data)
5347 {
5348 free (data);
5349 }
5350
5351 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5352 library with vectorized intrinsics. */
5353
5354 static tree
5355 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5356 tree type_in)
5357 {
5358 char name[32];
5359 const char *suffix = NULL;
5360 tree fntype, new_fndecl, bdecl = NULL_TREE;
5361 int n_args = 1;
5362 const char *bname;
5363 machine_mode el_mode, in_mode;
5364 int n, in_n;
5365
5366 /* Libmass is suitable for unsafe math only as it does not correctly support
5367 parts of IEEE with the required precision such as denormals. Only support
5368 it if we have VSX to use the simd d2 or f4 functions.
5369 XXX: Add variable length support. */
5370 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5371 return NULL_TREE;
5372
5373 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5374 n = TYPE_VECTOR_SUBPARTS (type_out);
5375 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5376 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5377 if (el_mode != in_mode
5378 || n != in_n)
5379 return NULL_TREE;
5380
5381 switch (fn)
5382 {
5383 CASE_CFN_ATAN2:
5384 CASE_CFN_HYPOT:
5385 CASE_CFN_POW:
5386 n_args = 2;
5387 gcc_fallthrough ();
5388
5389 CASE_CFN_ACOS:
5390 CASE_CFN_ACOSH:
5391 CASE_CFN_ASIN:
5392 CASE_CFN_ASINH:
5393 CASE_CFN_ATAN:
5394 CASE_CFN_ATANH:
5395 CASE_CFN_CBRT:
5396 CASE_CFN_COS:
5397 CASE_CFN_COSH:
5398 CASE_CFN_ERF:
5399 CASE_CFN_ERFC:
5400 CASE_CFN_EXP2:
5401 CASE_CFN_EXP:
5402 CASE_CFN_EXPM1:
5403 CASE_CFN_LGAMMA:
5404 CASE_CFN_LOG10:
5405 CASE_CFN_LOG1P:
5406 CASE_CFN_LOG2:
5407 CASE_CFN_LOG:
5408 CASE_CFN_SIN:
5409 CASE_CFN_SINH:
5410 CASE_CFN_SQRT:
5411 CASE_CFN_TAN:
5412 CASE_CFN_TANH:
5413 if (el_mode == DFmode && n == 2)
5414 {
5415 bdecl = mathfn_built_in (double_type_node, fn);
5416 suffix = "d2"; /* pow -> powd2 */
5417 }
5418 else if (el_mode == SFmode && n == 4)
5419 {
5420 bdecl = mathfn_built_in (float_type_node, fn);
5421 suffix = "4"; /* powf -> powf4 */
5422 }
5423 else
5424 return NULL_TREE;
5425 if (!bdecl)
5426 return NULL_TREE;
5427 break;
5428
5429 default:
5430 return NULL_TREE;
5431 }
5432
5433 gcc_assert (suffix != NULL);
5434 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5435 if (!bname)
5436 return NULL_TREE;
5437
5438 strcpy (name, bname + sizeof ("__builtin_") - 1);
5439 strcat (name, suffix);
5440
5441 if (n_args == 1)
5442 fntype = build_function_type_list (type_out, type_in, NULL);
5443 else if (n_args == 2)
5444 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5445 else
5446 gcc_unreachable ();
5447
5448 /* Build a function declaration for the vectorized function. */
5449 new_fndecl = build_decl (BUILTINS_LOCATION,
5450 FUNCTION_DECL, get_identifier (name), fntype);
5451 TREE_PUBLIC (new_fndecl) = 1;
5452 DECL_EXTERNAL (new_fndecl) = 1;
5453 DECL_IS_NOVOPS (new_fndecl) = 1;
5454 TREE_READONLY (new_fndecl) = 1;
5455
5456 return new_fndecl;
5457 }
5458
5459 /* Returns a function decl for a vectorized version of the builtin function
5460 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5461 if it is not available. */
5462
5463 static tree
5464 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5465 tree type_in)
5466 {
5467 machine_mode in_mode, out_mode;
5468 int in_n, out_n;
5469
5470 if (TARGET_DEBUG_BUILTIN)
5471 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5472 combined_fn_name (combined_fn (fn)),
5473 GET_MODE_NAME (TYPE_MODE (type_out)),
5474 GET_MODE_NAME (TYPE_MODE (type_in)));
5475
5476 if (TREE_CODE (type_out) != VECTOR_TYPE
5477 || TREE_CODE (type_in) != VECTOR_TYPE)
5478 return NULL_TREE;
5479
5480 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5481 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5482 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5483 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5484
5485 switch (fn)
5486 {
5487 CASE_CFN_COPYSIGN:
5488 if (VECTOR_UNIT_VSX_P (V2DFmode)
5489 && out_mode == DFmode && out_n == 2
5490 && in_mode == DFmode && in_n == 2)
5491 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5492 if (VECTOR_UNIT_VSX_P (V4SFmode)
5493 && out_mode == SFmode && out_n == 4
5494 && in_mode == SFmode && in_n == 4)
5495 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5496 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5497 && out_mode == SFmode && out_n == 4
5498 && in_mode == SFmode && in_n == 4)
5499 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5500 break;
5501 CASE_CFN_CEIL:
5502 if (VECTOR_UNIT_VSX_P (V2DFmode)
5503 && out_mode == DFmode && out_n == 2
5504 && in_mode == DFmode && in_n == 2)
5505 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5506 if (VECTOR_UNIT_VSX_P (V4SFmode)
5507 && out_mode == SFmode && out_n == 4
5508 && in_mode == SFmode && in_n == 4)
5509 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5510 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5511 && out_mode == SFmode && out_n == 4
5512 && in_mode == SFmode && in_n == 4)
5513 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5514 break;
5515 CASE_CFN_FLOOR:
5516 if (VECTOR_UNIT_VSX_P (V2DFmode)
5517 && out_mode == DFmode && out_n == 2
5518 && in_mode == DFmode && in_n == 2)
5519 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5520 if (VECTOR_UNIT_VSX_P (V4SFmode)
5521 && out_mode == SFmode && out_n == 4
5522 && in_mode == SFmode && in_n == 4)
5523 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5524 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5525 && out_mode == SFmode && out_n == 4
5526 && in_mode == SFmode && in_n == 4)
5527 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5528 break;
5529 CASE_CFN_FMA:
5530 if (VECTOR_UNIT_VSX_P (V2DFmode)
5531 && out_mode == DFmode && out_n == 2
5532 && in_mode == DFmode && in_n == 2)
5533 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5534 if (VECTOR_UNIT_VSX_P (V4SFmode)
5535 && out_mode == SFmode && out_n == 4
5536 && in_mode == SFmode && in_n == 4)
5537 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5538 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5539 && out_mode == SFmode && out_n == 4
5540 && in_mode == SFmode && in_n == 4)
5541 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5542 break;
5543 CASE_CFN_TRUNC:
5544 if (VECTOR_UNIT_VSX_P (V2DFmode)
5545 && out_mode == DFmode && out_n == 2
5546 && in_mode == DFmode && in_n == 2)
5547 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5548 if (VECTOR_UNIT_VSX_P (V4SFmode)
5549 && out_mode == SFmode && out_n == 4
5550 && in_mode == SFmode && in_n == 4)
5551 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5552 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5553 && out_mode == SFmode && out_n == 4
5554 && in_mode == SFmode && in_n == 4)
5555 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5556 break;
5557 CASE_CFN_NEARBYINT:
5558 if (VECTOR_UNIT_VSX_P (V2DFmode)
5559 && flag_unsafe_math_optimizations
5560 && out_mode == DFmode && out_n == 2
5561 && in_mode == DFmode && in_n == 2)
5562 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5563 if (VECTOR_UNIT_VSX_P (V4SFmode)
5564 && flag_unsafe_math_optimizations
5565 && out_mode == SFmode && out_n == 4
5566 && in_mode == SFmode && in_n == 4)
5567 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5568 break;
5569 CASE_CFN_RINT:
5570 if (VECTOR_UNIT_VSX_P (V2DFmode)
5571 && !flag_trapping_math
5572 && out_mode == DFmode && out_n == 2
5573 && in_mode == DFmode && in_n == 2)
5574 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5575 if (VECTOR_UNIT_VSX_P (V4SFmode)
5576 && !flag_trapping_math
5577 && out_mode == SFmode && out_n == 4
5578 && in_mode == SFmode && in_n == 4)
5579 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5580 break;
5581 default:
5582 break;
5583 }
5584
5585 /* Generate calls to libmass if appropriate. */
5586 if (rs6000_veclib_handler)
5587 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5588
5589 return NULL_TREE;
5590 }
5591
5592 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5593
5594 static tree
5595 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5596 tree type_in)
5597 {
5598 machine_mode in_mode, out_mode;
5599 int in_n, out_n;
5600
5601 if (TARGET_DEBUG_BUILTIN)
5602 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5603 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5604 GET_MODE_NAME (TYPE_MODE (type_out)),
5605 GET_MODE_NAME (TYPE_MODE (type_in)));
5606
5607 if (TREE_CODE (type_out) != VECTOR_TYPE
5608 || TREE_CODE (type_in) != VECTOR_TYPE)
5609 return NULL_TREE;
5610
5611 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5612 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5613 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5614 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5615
5616 enum rs6000_builtins fn
5617 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5618 switch (fn)
5619 {
5620 case RS6000_BUILTIN_RSQRTF:
5621 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5622 && out_mode == SFmode && out_n == 4
5623 && in_mode == SFmode && in_n == 4)
5624 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5625 break;
5626 case RS6000_BUILTIN_RSQRT:
5627 if (VECTOR_UNIT_VSX_P (V2DFmode)
5628 && out_mode == DFmode && out_n == 2
5629 && in_mode == DFmode && in_n == 2)
5630 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5631 break;
5632 case RS6000_BUILTIN_RECIPF:
5633 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5634 && out_mode == SFmode && out_n == 4
5635 && in_mode == SFmode && in_n == 4)
5636 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5637 break;
5638 case RS6000_BUILTIN_RECIP:
5639 if (VECTOR_UNIT_VSX_P (V2DFmode)
5640 && out_mode == DFmode && out_n == 2
5641 && in_mode == DFmode && in_n == 2)
5642 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5643 break;
5644 default:
5645 break;
5646 }
5647 return NULL_TREE;
5648 }
5649 \f
5650 /* Default CPU string for rs6000*_file_start functions. */
5651 static const char *rs6000_default_cpu;
5652
5653 /* Do anything needed at the start of the asm file. */
5654
5655 static void
5656 rs6000_file_start (void)
5657 {
5658 char buffer[80];
5659 const char *start = buffer;
5660 FILE *file = asm_out_file;
5661
5662 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5663
5664 default_file_start ();
5665
5666 if (flag_verbose_asm)
5667 {
5668 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5669
5670 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5671 {
5672 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5673 start = "";
5674 }
5675
5676 if (global_options_set.x_rs6000_cpu_index)
5677 {
5678 fprintf (file, "%s -mcpu=%s", start,
5679 processor_target_table[rs6000_cpu_index].name);
5680 start = "";
5681 }
5682
5683 if (global_options_set.x_rs6000_tune_index)
5684 {
5685 fprintf (file, "%s -mtune=%s", start,
5686 processor_target_table[rs6000_tune_index].name);
5687 start = "";
5688 }
5689
5690 if (PPC405_ERRATUM77)
5691 {
5692 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5693 start = "";
5694 }
5695
5696 #ifdef USING_ELFOS_H
5697 switch (rs6000_sdata)
5698 {
5699 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5700 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5701 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5702 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5703 }
5704
5705 if (rs6000_sdata && g_switch_value)
5706 {
5707 fprintf (file, "%s -G %d", start,
5708 g_switch_value);
5709 start = "";
5710 }
5711 #endif
5712
5713 if (*start == '\0')
5714 putc ('\n', file);
5715 }
5716
5717 #ifdef USING_ELFOS_H
5718 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5719 && !global_options_set.x_rs6000_cpu_index)
5720 {
5721 fputs ("\t.machine ", asm_out_file);
5722 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5723 fputs ("power9\n", asm_out_file);
5724 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5725 fputs ("power8\n", asm_out_file);
5726 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5727 fputs ("power7\n", asm_out_file);
5728 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5729 fputs ("power6\n", asm_out_file);
5730 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5731 fputs ("power5\n", asm_out_file);
5732 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5733 fputs ("power4\n", asm_out_file);
5734 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5735 fputs ("ppc64\n", asm_out_file);
5736 else
5737 fputs ("ppc\n", asm_out_file);
5738 }
5739 #endif
5740
5741 if (DEFAULT_ABI == ABI_ELFv2)
5742 fprintf (file, "\t.abiversion 2\n");
5743 }
5744
5745 \f
5746 /* Return nonzero if this function is known to have a null epilogue. */
5747
5748 int
5749 direct_return (void)
5750 {
5751 if (reload_completed)
5752 {
5753 rs6000_stack_t *info = rs6000_stack_info ();
5754
5755 if (info->first_gp_reg_save == 32
5756 && info->first_fp_reg_save == 64
5757 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5758 && ! info->lr_save_p
5759 && ! info->cr_save_p
5760 && info->vrsave_size == 0
5761 && ! info->push_p)
5762 return 1;
5763 }
5764
5765 return 0;
5766 }
5767
5768 /* Helper for num_insns_constant. Calculate number of instructions to
5769 load VALUE to a single gpr using combinations of addi, addis, ori,
5770 oris and sldi instructions. */
5771
5772 static int
5773 num_insns_constant_gpr (HOST_WIDE_INT value)
5774 {
5775 /* signed constant loadable with addi */
5776 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5777 return 1;
5778
5779 /* constant loadable with addis */
5780 else if ((value & 0xffff) == 0
5781 && (value >> 31 == -1 || value >> 31 == 0))
5782 return 1;
5783
5784 else if (TARGET_POWERPC64)
5785 {
5786 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5787 HOST_WIDE_INT high = value >> 31;
5788
5789 if (high == 0 || high == -1)
5790 return 2;
5791
5792 high >>= 1;
5793
5794 if (low == 0)
5795 return num_insns_constant_gpr (high) + 1;
5796 else if (high == 0)
5797 return num_insns_constant_gpr (low) + 1;
5798 else
5799 return (num_insns_constant_gpr (high)
5800 + num_insns_constant_gpr (low) + 1);
5801 }
5802
5803 else
5804 return 2;
5805 }
5806
5807 /* Helper for num_insns_constant. Allow constants formed by the
5808 num_insns_constant_gpr sequences, plus li -1, rldicl/rldicr/rlwinm,
5809 and handle modes that require multiple gprs. */
5810
5811 static int
5812 num_insns_constant_multi (HOST_WIDE_INT value, machine_mode mode)
5813 {
5814 int nregs = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5815 int total = 0;
5816 while (nregs-- > 0)
5817 {
5818 HOST_WIDE_INT low = sext_hwi (value, BITS_PER_WORD);
5819 int insns = num_insns_constant_gpr (low);
5820 if (insns > 2
5821 /* We won't get more than 2 from num_insns_constant_gpr
5822 except when TARGET_POWERPC64 and mode is DImode or
5823 wider, so the register mode must be DImode. */
5824 && rs6000_is_valid_and_mask (GEN_INT (low), DImode))
5825 insns = 2;
5826 total += insns;
5827 value >>= BITS_PER_WORD;
5828 }
5829 return total;
5830 }
5831
5832 /* Return the number of instructions it takes to form a constant in as
5833 many gprs are needed for MODE. */
5834
5835 int
5836 num_insns_constant (rtx op, machine_mode mode)
5837 {
5838 HOST_WIDE_INT val;
5839
5840 switch (GET_CODE (op))
5841 {
5842 case CONST_INT:
5843 val = INTVAL (op);
5844 break;
5845
5846 case CONST_WIDE_INT:
5847 {
5848 int insns = 0;
5849 for (int i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5850 insns += num_insns_constant_multi (CONST_WIDE_INT_ELT (op, i),
5851 DImode);
5852 return insns;
5853 }
5854
5855 case CONST_DOUBLE:
5856 {
5857 const struct real_value *rv = CONST_DOUBLE_REAL_VALUE (op);
5858
5859 if (mode == SFmode || mode == SDmode)
5860 {
5861 long l;
5862
5863 if (mode == SDmode)
5864 REAL_VALUE_TO_TARGET_DECIMAL32 (*rv, l);
5865 else
5866 REAL_VALUE_TO_TARGET_SINGLE (*rv, l);
5867 /* See the first define_split in rs6000.md handling a
5868 const_double_operand. */
5869 val = l;
5870 mode = SImode;
5871 }
5872 else if (mode == DFmode || mode == DDmode)
5873 {
5874 long l[2];
5875
5876 if (mode == DDmode)
5877 REAL_VALUE_TO_TARGET_DECIMAL64 (*rv, l);
5878 else
5879 REAL_VALUE_TO_TARGET_DOUBLE (*rv, l);
5880
5881 /* See the second (32-bit) and third (64-bit) define_split
5882 in rs6000.md handling a const_double_operand. */
5883 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 1] << 32;
5884 val |= l[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffffUL;
5885 mode = DImode;
5886 }
5887 else if (mode == TFmode || mode == TDmode
5888 || mode == KFmode || mode == IFmode)
5889 {
5890 long l[4];
5891 int insns;
5892
5893 if (mode == TDmode)
5894 REAL_VALUE_TO_TARGET_DECIMAL128 (*rv, l);
5895 else
5896 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*rv, l);
5897
5898 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 3] << 32;
5899 val |= l[WORDS_BIG_ENDIAN ? 1 : 2] & 0xffffffffUL;
5900 insns = num_insns_constant_multi (val, DImode);
5901 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 2 : 1] << 32;
5902 val |= l[WORDS_BIG_ENDIAN ? 3 : 0] & 0xffffffffUL;
5903 insns += num_insns_constant_multi (val, DImode);
5904 return insns;
5905 }
5906 else
5907 gcc_unreachable ();
5908 }
5909 break;
5910
5911 default:
5912 gcc_unreachable ();
5913 }
5914
5915 return num_insns_constant_multi (val, mode);
5916 }
5917
5918 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5919 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5920 corresponding element of the vector, but for V4SFmode, the
5921 corresponding "float" is interpreted as an SImode integer. */
5922
5923 HOST_WIDE_INT
5924 const_vector_elt_as_int (rtx op, unsigned int elt)
5925 {
5926 rtx tmp;
5927
5928 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5929 gcc_assert (GET_MODE (op) != V2DImode
5930 && GET_MODE (op) != V2DFmode);
5931
5932 tmp = CONST_VECTOR_ELT (op, elt);
5933 if (GET_MODE (op) == V4SFmode)
5934 tmp = gen_lowpart (SImode, tmp);
5935 return INTVAL (tmp);
5936 }
5937
5938 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5939 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5940 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5941 all items are set to the same value and contain COPIES replicas of the
5942 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5943 operand and the others are set to the value of the operand's msb. */
5944
5945 static bool
5946 vspltis_constant (rtx op, unsigned step, unsigned copies)
5947 {
5948 machine_mode mode = GET_MODE (op);
5949 machine_mode inner = GET_MODE_INNER (mode);
5950
5951 unsigned i;
5952 unsigned nunits;
5953 unsigned bitsize;
5954 unsigned mask;
5955
5956 HOST_WIDE_INT val;
5957 HOST_WIDE_INT splat_val;
5958 HOST_WIDE_INT msb_val;
5959
5960 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5961 return false;
5962
5963 nunits = GET_MODE_NUNITS (mode);
5964 bitsize = GET_MODE_BITSIZE (inner);
5965 mask = GET_MODE_MASK (inner);
5966
5967 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5968 splat_val = val;
5969 msb_val = val >= 0 ? 0 : -1;
5970
5971 /* Construct the value to be splatted, if possible. If not, return 0. */
5972 for (i = 2; i <= copies; i *= 2)
5973 {
5974 HOST_WIDE_INT small_val;
5975 bitsize /= 2;
5976 small_val = splat_val >> bitsize;
5977 mask >>= bitsize;
5978 if (splat_val != ((HOST_WIDE_INT)
5979 ((unsigned HOST_WIDE_INT) small_val << bitsize)
5980 | (small_val & mask)))
5981 return false;
5982 splat_val = small_val;
5983 }
5984
5985 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
5986 if (EASY_VECTOR_15 (splat_val))
5987 ;
5988
5989 /* Also check if we can splat, and then add the result to itself. Do so if
5990 the value is positive, of if the splat instruction is using OP's mode;
5991 for splat_val < 0, the splat and the add should use the same mode. */
5992 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
5993 && (splat_val >= 0 || (step == 1 && copies == 1)))
5994 ;
5995
5996 /* Also check if are loading up the most significant bit which can be done by
5997 loading up -1 and shifting the value left by -1. */
5998 else if (EASY_VECTOR_MSB (splat_val, inner))
5999 ;
6000
6001 else
6002 return false;
6003
6004 /* Check if VAL is present in every STEP-th element, and the
6005 other elements are filled with its most significant bit. */
6006 for (i = 1; i < nunits; ++i)
6007 {
6008 HOST_WIDE_INT desired_val;
6009 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6010 if ((i & (step - 1)) == 0)
6011 desired_val = val;
6012 else
6013 desired_val = msb_val;
6014
6015 if (desired_val != const_vector_elt_as_int (op, elt))
6016 return false;
6017 }
6018
6019 return true;
6020 }
6021
6022 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6023 instruction, filling in the bottom elements with 0 or -1.
6024
6025 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6026 for the number of zeroes to shift in, or negative for the number of 0xff
6027 bytes to shift in.
6028
6029 OP is a CONST_VECTOR. */
6030
6031 int
6032 vspltis_shifted (rtx op)
6033 {
6034 machine_mode mode = GET_MODE (op);
6035 machine_mode inner = GET_MODE_INNER (mode);
6036
6037 unsigned i, j;
6038 unsigned nunits;
6039 unsigned mask;
6040
6041 HOST_WIDE_INT val;
6042
6043 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6044 return false;
6045
6046 /* We need to create pseudo registers to do the shift, so don't recognize
6047 shift vector constants after reload. */
6048 if (!can_create_pseudo_p ())
6049 return false;
6050
6051 nunits = GET_MODE_NUNITS (mode);
6052 mask = GET_MODE_MASK (inner);
6053
6054 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6055
6056 /* Check if the value can really be the operand of a vspltis[bhw]. */
6057 if (EASY_VECTOR_15 (val))
6058 ;
6059
6060 /* Also check if we are loading up the most significant bit which can be done
6061 by loading up -1 and shifting the value left by -1. */
6062 else if (EASY_VECTOR_MSB (val, inner))
6063 ;
6064
6065 else
6066 return 0;
6067
6068 /* Check if VAL is present in every STEP-th element until we find elements
6069 that are 0 or all 1 bits. */
6070 for (i = 1; i < nunits; ++i)
6071 {
6072 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6073 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6074
6075 /* If the value isn't the splat value, check for the remaining elements
6076 being 0/-1. */
6077 if (val != elt_val)
6078 {
6079 if (elt_val == 0)
6080 {
6081 for (j = i+1; j < nunits; ++j)
6082 {
6083 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6084 if (const_vector_elt_as_int (op, elt2) != 0)
6085 return 0;
6086 }
6087
6088 return (nunits - i) * GET_MODE_SIZE (inner);
6089 }
6090
6091 else if ((elt_val & mask) == mask)
6092 {
6093 for (j = i+1; j < nunits; ++j)
6094 {
6095 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6096 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6097 return 0;
6098 }
6099
6100 return -((nunits - i) * GET_MODE_SIZE (inner));
6101 }
6102
6103 else
6104 return 0;
6105 }
6106 }
6107
6108 /* If all elements are equal, we don't need to do VLSDOI. */
6109 return 0;
6110 }
6111
6112
6113 /* Return true if OP is of the given MODE and can be synthesized
6114 with a vspltisb, vspltish or vspltisw. */
6115
6116 bool
6117 easy_altivec_constant (rtx op, machine_mode mode)
6118 {
6119 unsigned step, copies;
6120
6121 if (mode == VOIDmode)
6122 mode = GET_MODE (op);
6123 else if (mode != GET_MODE (op))
6124 return false;
6125
6126 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6127 constants. */
6128 if (mode == V2DFmode)
6129 return zero_constant (op, mode);
6130
6131 else if (mode == V2DImode)
6132 {
6133 if (!CONST_INT_P (CONST_VECTOR_ELT (op, 0))
6134 || !CONST_INT_P (CONST_VECTOR_ELT (op, 1)))
6135 return false;
6136
6137 if (zero_constant (op, mode))
6138 return true;
6139
6140 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6141 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6142 return true;
6143
6144 return false;
6145 }
6146
6147 /* V1TImode is a special container for TImode. Ignore for now. */
6148 else if (mode == V1TImode)
6149 return false;
6150
6151 /* Start with a vspltisw. */
6152 step = GET_MODE_NUNITS (mode) / 4;
6153 copies = 1;
6154
6155 if (vspltis_constant (op, step, copies))
6156 return true;
6157
6158 /* Then try with a vspltish. */
6159 if (step == 1)
6160 copies <<= 1;
6161 else
6162 step >>= 1;
6163
6164 if (vspltis_constant (op, step, copies))
6165 return true;
6166
6167 /* And finally a vspltisb. */
6168 if (step == 1)
6169 copies <<= 1;
6170 else
6171 step >>= 1;
6172
6173 if (vspltis_constant (op, step, copies))
6174 return true;
6175
6176 if (vspltis_shifted (op) != 0)
6177 return true;
6178
6179 return false;
6180 }
6181
6182 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6183 result is OP. Abort if it is not possible. */
6184
6185 rtx
6186 gen_easy_altivec_constant (rtx op)
6187 {
6188 machine_mode mode = GET_MODE (op);
6189 int nunits = GET_MODE_NUNITS (mode);
6190 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6191 unsigned step = nunits / 4;
6192 unsigned copies = 1;
6193
6194 /* Start with a vspltisw. */
6195 if (vspltis_constant (op, step, copies))
6196 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6197
6198 /* Then try with a vspltish. */
6199 if (step == 1)
6200 copies <<= 1;
6201 else
6202 step >>= 1;
6203
6204 if (vspltis_constant (op, step, copies))
6205 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6206
6207 /* And finally a vspltisb. */
6208 if (step == 1)
6209 copies <<= 1;
6210 else
6211 step >>= 1;
6212
6213 if (vspltis_constant (op, step, copies))
6214 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6215
6216 gcc_unreachable ();
6217 }
6218
6219 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6220 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6221
6222 Return the number of instructions needed (1 or 2) into the address pointed
6223 via NUM_INSNS_PTR.
6224
6225 Return the constant that is being split via CONSTANT_PTR. */
6226
6227 bool
6228 xxspltib_constant_p (rtx op,
6229 machine_mode mode,
6230 int *num_insns_ptr,
6231 int *constant_ptr)
6232 {
6233 size_t nunits = GET_MODE_NUNITS (mode);
6234 size_t i;
6235 HOST_WIDE_INT value;
6236 rtx element;
6237
6238 /* Set the returned values to out of bound values. */
6239 *num_insns_ptr = -1;
6240 *constant_ptr = 256;
6241
6242 if (!TARGET_P9_VECTOR)
6243 return false;
6244
6245 if (mode == VOIDmode)
6246 mode = GET_MODE (op);
6247
6248 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6249 return false;
6250
6251 /* Handle (vec_duplicate <constant>). */
6252 if (GET_CODE (op) == VEC_DUPLICATE)
6253 {
6254 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6255 && mode != V2DImode)
6256 return false;
6257
6258 element = XEXP (op, 0);
6259 if (!CONST_INT_P (element))
6260 return false;
6261
6262 value = INTVAL (element);
6263 if (!IN_RANGE (value, -128, 127))
6264 return false;
6265 }
6266
6267 /* Handle (const_vector [...]). */
6268 else if (GET_CODE (op) == CONST_VECTOR)
6269 {
6270 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6271 && mode != V2DImode)
6272 return false;
6273
6274 element = CONST_VECTOR_ELT (op, 0);
6275 if (!CONST_INT_P (element))
6276 return false;
6277
6278 value = INTVAL (element);
6279 if (!IN_RANGE (value, -128, 127))
6280 return false;
6281
6282 for (i = 1; i < nunits; i++)
6283 {
6284 element = CONST_VECTOR_ELT (op, i);
6285 if (!CONST_INT_P (element))
6286 return false;
6287
6288 if (value != INTVAL (element))
6289 return false;
6290 }
6291 }
6292
6293 /* Handle integer constants being loaded into the upper part of the VSX
6294 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6295 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6296 else if (CONST_INT_P (op))
6297 {
6298 if (!SCALAR_INT_MODE_P (mode))
6299 return false;
6300
6301 value = INTVAL (op);
6302 if (!IN_RANGE (value, -128, 127))
6303 return false;
6304
6305 if (!IN_RANGE (value, -1, 0))
6306 {
6307 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6308 return false;
6309
6310 if (EASY_VECTOR_15 (value))
6311 return false;
6312 }
6313 }
6314
6315 else
6316 return false;
6317
6318 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6319 sign extend. Special case 0/-1 to allow getting any VSX register instead
6320 of an Altivec register. */
6321 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6322 && EASY_VECTOR_15 (value))
6323 return false;
6324
6325 /* Return # of instructions and the constant byte for XXSPLTIB. */
6326 if (mode == V16QImode)
6327 *num_insns_ptr = 1;
6328
6329 else if (IN_RANGE (value, -1, 0))
6330 *num_insns_ptr = 1;
6331
6332 else
6333 *num_insns_ptr = 2;
6334
6335 *constant_ptr = (int) value;
6336 return true;
6337 }
6338
6339 const char *
6340 output_vec_const_move (rtx *operands)
6341 {
6342 int shift;
6343 machine_mode mode;
6344 rtx dest, vec;
6345
6346 dest = operands[0];
6347 vec = operands[1];
6348 mode = GET_MODE (dest);
6349
6350 if (TARGET_VSX)
6351 {
6352 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6353 int xxspltib_value = 256;
6354 int num_insns = -1;
6355
6356 if (zero_constant (vec, mode))
6357 {
6358 if (TARGET_P9_VECTOR)
6359 return "xxspltib %x0,0";
6360
6361 else if (dest_vmx_p)
6362 return "vspltisw %0,0";
6363
6364 else
6365 return "xxlxor %x0,%x0,%x0";
6366 }
6367
6368 if (all_ones_constant (vec, mode))
6369 {
6370 if (TARGET_P9_VECTOR)
6371 return "xxspltib %x0,255";
6372
6373 else if (dest_vmx_p)
6374 return "vspltisw %0,-1";
6375
6376 else if (TARGET_P8_VECTOR)
6377 return "xxlorc %x0,%x0,%x0";
6378
6379 else
6380 gcc_unreachable ();
6381 }
6382
6383 if (TARGET_P9_VECTOR
6384 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6385 {
6386 if (num_insns == 1)
6387 {
6388 operands[2] = GEN_INT (xxspltib_value & 0xff);
6389 return "xxspltib %x0,%2";
6390 }
6391
6392 return "#";
6393 }
6394 }
6395
6396 if (TARGET_ALTIVEC)
6397 {
6398 rtx splat_vec;
6399
6400 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6401 if (zero_constant (vec, mode))
6402 return "vspltisw %0,0";
6403
6404 if (all_ones_constant (vec, mode))
6405 return "vspltisw %0,-1";
6406
6407 /* Do we need to construct a value using VSLDOI? */
6408 shift = vspltis_shifted (vec);
6409 if (shift != 0)
6410 return "#";
6411
6412 splat_vec = gen_easy_altivec_constant (vec);
6413 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6414 operands[1] = XEXP (splat_vec, 0);
6415 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6416 return "#";
6417
6418 switch (GET_MODE (splat_vec))
6419 {
6420 case E_V4SImode:
6421 return "vspltisw %0,%1";
6422
6423 case E_V8HImode:
6424 return "vspltish %0,%1";
6425
6426 case E_V16QImode:
6427 return "vspltisb %0,%1";
6428
6429 default:
6430 gcc_unreachable ();
6431 }
6432 }
6433
6434 gcc_unreachable ();
6435 }
6436
6437 /* Initialize vector TARGET to VALS. */
6438
6439 void
6440 rs6000_expand_vector_init (rtx target, rtx vals)
6441 {
6442 machine_mode mode = GET_MODE (target);
6443 machine_mode inner_mode = GET_MODE_INNER (mode);
6444 int n_elts = GET_MODE_NUNITS (mode);
6445 int n_var = 0, one_var = -1;
6446 bool all_same = true, all_const_zero = true;
6447 rtx x, mem;
6448 int i;
6449
6450 for (i = 0; i < n_elts; ++i)
6451 {
6452 x = XVECEXP (vals, 0, i);
6453 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6454 ++n_var, one_var = i;
6455 else if (x != CONST0_RTX (inner_mode))
6456 all_const_zero = false;
6457
6458 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6459 all_same = false;
6460 }
6461
6462 if (n_var == 0)
6463 {
6464 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6465 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6466 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6467 {
6468 /* Zero register. */
6469 emit_move_insn (target, CONST0_RTX (mode));
6470 return;
6471 }
6472 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6473 {
6474 /* Splat immediate. */
6475 emit_insn (gen_rtx_SET (target, const_vec));
6476 return;
6477 }
6478 else
6479 {
6480 /* Load from constant pool. */
6481 emit_move_insn (target, const_vec);
6482 return;
6483 }
6484 }
6485
6486 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6487 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6488 {
6489 rtx op[2];
6490 size_t i;
6491 size_t num_elements = all_same ? 1 : 2;
6492 for (i = 0; i < num_elements; i++)
6493 {
6494 op[i] = XVECEXP (vals, 0, i);
6495 /* Just in case there is a SUBREG with a smaller mode, do a
6496 conversion. */
6497 if (GET_MODE (op[i]) != inner_mode)
6498 {
6499 rtx tmp = gen_reg_rtx (inner_mode);
6500 convert_move (tmp, op[i], 0);
6501 op[i] = tmp;
6502 }
6503 /* Allow load with splat double word. */
6504 else if (MEM_P (op[i]))
6505 {
6506 if (!all_same)
6507 op[i] = force_reg (inner_mode, op[i]);
6508 }
6509 else if (!REG_P (op[i]))
6510 op[i] = force_reg (inner_mode, op[i]);
6511 }
6512
6513 if (all_same)
6514 {
6515 if (mode == V2DFmode)
6516 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6517 else
6518 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6519 }
6520 else
6521 {
6522 if (mode == V2DFmode)
6523 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6524 else
6525 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6526 }
6527 return;
6528 }
6529
6530 /* Special case initializing vector int if we are on 64-bit systems with
6531 direct move or we have the ISA 3.0 instructions. */
6532 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6533 && TARGET_DIRECT_MOVE_64BIT)
6534 {
6535 if (all_same)
6536 {
6537 rtx element0 = XVECEXP (vals, 0, 0);
6538 if (MEM_P (element0))
6539 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6540 else
6541 element0 = force_reg (SImode, element0);
6542
6543 if (TARGET_P9_VECTOR)
6544 emit_insn (gen_vsx_splat_v4si (target, element0));
6545 else
6546 {
6547 rtx tmp = gen_reg_rtx (DImode);
6548 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6549 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6550 }
6551 return;
6552 }
6553 else
6554 {
6555 rtx elements[4];
6556 size_t i;
6557
6558 for (i = 0; i < 4; i++)
6559 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6560
6561 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6562 elements[2], elements[3]));
6563 return;
6564 }
6565 }
6566
6567 /* With single precision floating point on VSX, know that internally single
6568 precision is actually represented as a double, and either make 2 V2DF
6569 vectors, and convert these vectors to single precision, or do one
6570 conversion, and splat the result to the other elements. */
6571 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6572 {
6573 if (all_same)
6574 {
6575 rtx element0 = XVECEXP (vals, 0, 0);
6576
6577 if (TARGET_P9_VECTOR)
6578 {
6579 if (MEM_P (element0))
6580 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6581
6582 emit_insn (gen_vsx_splat_v4sf (target, element0));
6583 }
6584
6585 else
6586 {
6587 rtx freg = gen_reg_rtx (V4SFmode);
6588 rtx sreg = force_reg (SFmode, element0);
6589 rtx cvt = (TARGET_XSCVDPSPN
6590 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6591 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6592
6593 emit_insn (cvt);
6594 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6595 const0_rtx));
6596 }
6597 }
6598 else
6599 {
6600 rtx dbl_even = gen_reg_rtx (V2DFmode);
6601 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6602 rtx flt_even = gen_reg_rtx (V4SFmode);
6603 rtx flt_odd = gen_reg_rtx (V4SFmode);
6604 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6605 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6606 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6607 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6608
6609 /* Use VMRGEW if we can instead of doing a permute. */
6610 if (TARGET_P8_VECTOR)
6611 {
6612 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6613 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6614 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6615 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6616 if (BYTES_BIG_ENDIAN)
6617 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6618 else
6619 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6620 }
6621 else
6622 {
6623 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6624 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6625 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6626 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6627 rs6000_expand_extract_even (target, flt_even, flt_odd);
6628 }
6629 }
6630 return;
6631 }
6632
6633 /* Special case initializing vector short/char that are splats if we are on
6634 64-bit systems with direct move. */
6635 if (all_same && TARGET_DIRECT_MOVE_64BIT
6636 && (mode == V16QImode || mode == V8HImode))
6637 {
6638 rtx op0 = XVECEXP (vals, 0, 0);
6639 rtx di_tmp = gen_reg_rtx (DImode);
6640
6641 if (!REG_P (op0))
6642 op0 = force_reg (GET_MODE_INNER (mode), op0);
6643
6644 if (mode == V16QImode)
6645 {
6646 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6647 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6648 return;
6649 }
6650
6651 if (mode == V8HImode)
6652 {
6653 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6654 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6655 return;
6656 }
6657 }
6658
6659 /* Store value to stack temp. Load vector element. Splat. However, splat
6660 of 64-bit items is not supported on Altivec. */
6661 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6662 {
6663 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6664 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6665 XVECEXP (vals, 0, 0));
6666 x = gen_rtx_UNSPEC (VOIDmode,
6667 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6668 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6669 gen_rtvec (2,
6670 gen_rtx_SET (target, mem),
6671 x)));
6672 x = gen_rtx_VEC_SELECT (inner_mode, target,
6673 gen_rtx_PARALLEL (VOIDmode,
6674 gen_rtvec (1, const0_rtx)));
6675 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6676 return;
6677 }
6678
6679 /* One field is non-constant. Load constant then overwrite
6680 varying field. */
6681 if (n_var == 1)
6682 {
6683 rtx copy = copy_rtx (vals);
6684
6685 /* Load constant part of vector, substitute neighboring value for
6686 varying element. */
6687 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6688 rs6000_expand_vector_init (target, copy);
6689
6690 /* Insert variable. */
6691 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6692 return;
6693 }
6694
6695 /* Construct the vector in memory one field at a time
6696 and load the whole vector. */
6697 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6698 for (i = 0; i < n_elts; i++)
6699 emit_move_insn (adjust_address_nv (mem, inner_mode,
6700 i * GET_MODE_SIZE (inner_mode)),
6701 XVECEXP (vals, 0, i));
6702 emit_move_insn (target, mem);
6703 }
6704
6705 /* Set field ELT of TARGET to VAL. */
6706
6707 void
6708 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6709 {
6710 machine_mode mode = GET_MODE (target);
6711 machine_mode inner_mode = GET_MODE_INNER (mode);
6712 rtx reg = gen_reg_rtx (mode);
6713 rtx mask, mem, x;
6714 int width = GET_MODE_SIZE (inner_mode);
6715 int i;
6716
6717 val = force_reg (GET_MODE (val), val);
6718
6719 if (VECTOR_MEM_VSX_P (mode))
6720 {
6721 rtx insn = NULL_RTX;
6722 rtx elt_rtx = GEN_INT (elt);
6723
6724 if (mode == V2DFmode)
6725 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6726
6727 else if (mode == V2DImode)
6728 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6729
6730 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6731 {
6732 if (mode == V4SImode)
6733 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6734 else if (mode == V8HImode)
6735 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6736 else if (mode == V16QImode)
6737 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6738 else if (mode == V4SFmode)
6739 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6740 }
6741
6742 if (insn)
6743 {
6744 emit_insn (insn);
6745 return;
6746 }
6747 }
6748
6749 /* Simplify setting single element vectors like V1TImode. */
6750 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6751 {
6752 emit_move_insn (target, gen_lowpart (mode, val));
6753 return;
6754 }
6755
6756 /* Load single variable value. */
6757 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6758 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6759 x = gen_rtx_UNSPEC (VOIDmode,
6760 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6761 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6762 gen_rtvec (2,
6763 gen_rtx_SET (reg, mem),
6764 x)));
6765
6766 /* Linear sequence. */
6767 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6768 for (i = 0; i < 16; ++i)
6769 XVECEXP (mask, 0, i) = GEN_INT (i);
6770
6771 /* Set permute mask to insert element into target. */
6772 for (i = 0; i < width; ++i)
6773 XVECEXP (mask, 0, elt*width + i)
6774 = GEN_INT (i + 0x10);
6775 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6776
6777 if (BYTES_BIG_ENDIAN)
6778 x = gen_rtx_UNSPEC (mode,
6779 gen_rtvec (3, target, reg,
6780 force_reg (V16QImode, x)),
6781 UNSPEC_VPERM);
6782 else
6783 {
6784 if (TARGET_P9_VECTOR)
6785 x = gen_rtx_UNSPEC (mode,
6786 gen_rtvec (3, reg, target,
6787 force_reg (V16QImode, x)),
6788 UNSPEC_VPERMR);
6789 else
6790 {
6791 /* Invert selector. We prefer to generate VNAND on P8 so
6792 that future fusion opportunities can kick in, but must
6793 generate VNOR elsewhere. */
6794 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6795 rtx iorx = (TARGET_P8_VECTOR
6796 ? gen_rtx_IOR (V16QImode, notx, notx)
6797 : gen_rtx_AND (V16QImode, notx, notx));
6798 rtx tmp = gen_reg_rtx (V16QImode);
6799 emit_insn (gen_rtx_SET (tmp, iorx));
6800
6801 /* Permute with operands reversed and adjusted selector. */
6802 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6803 UNSPEC_VPERM);
6804 }
6805 }
6806
6807 emit_insn (gen_rtx_SET (target, x));
6808 }
6809
6810 /* Extract field ELT from VEC into TARGET. */
6811
6812 void
6813 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6814 {
6815 machine_mode mode = GET_MODE (vec);
6816 machine_mode inner_mode = GET_MODE_INNER (mode);
6817 rtx mem;
6818
6819 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6820 {
6821 switch (mode)
6822 {
6823 default:
6824 break;
6825 case E_V1TImode:
6826 emit_move_insn (target, gen_lowpart (TImode, vec));
6827 break;
6828 case E_V2DFmode:
6829 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6830 return;
6831 case E_V2DImode:
6832 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6833 return;
6834 case E_V4SFmode:
6835 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6836 return;
6837 case E_V16QImode:
6838 if (TARGET_DIRECT_MOVE_64BIT)
6839 {
6840 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6841 return;
6842 }
6843 else
6844 break;
6845 case E_V8HImode:
6846 if (TARGET_DIRECT_MOVE_64BIT)
6847 {
6848 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6849 return;
6850 }
6851 else
6852 break;
6853 case E_V4SImode:
6854 if (TARGET_DIRECT_MOVE_64BIT)
6855 {
6856 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6857 return;
6858 }
6859 break;
6860 }
6861 }
6862 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6863 && TARGET_DIRECT_MOVE_64BIT)
6864 {
6865 if (GET_MODE (elt) != DImode)
6866 {
6867 rtx tmp = gen_reg_rtx (DImode);
6868 convert_move (tmp, elt, 0);
6869 elt = tmp;
6870 }
6871 else if (!REG_P (elt))
6872 elt = force_reg (DImode, elt);
6873
6874 switch (mode)
6875 {
6876 case E_V1TImode:
6877 emit_move_insn (target, gen_lowpart (TImode, vec));
6878 return;
6879
6880 case E_V2DFmode:
6881 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6882 return;
6883
6884 case E_V2DImode:
6885 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6886 return;
6887
6888 case E_V4SFmode:
6889 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6890 return;
6891
6892 case E_V4SImode:
6893 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6894 return;
6895
6896 case E_V8HImode:
6897 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6898 return;
6899
6900 case E_V16QImode:
6901 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6902 return;
6903
6904 default:
6905 gcc_unreachable ();
6906 }
6907 }
6908
6909 /* Allocate mode-sized buffer. */
6910 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6911
6912 emit_move_insn (mem, vec);
6913 if (CONST_INT_P (elt))
6914 {
6915 int modulo_elt = INTVAL (elt) % GET_MODE_NUNITS (mode);
6916
6917 /* Add offset to field within buffer matching vector element. */
6918 mem = adjust_address_nv (mem, inner_mode,
6919 modulo_elt * GET_MODE_SIZE (inner_mode));
6920 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6921 }
6922 else
6923 {
6924 unsigned int ele_size = GET_MODE_SIZE (inner_mode);
6925 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
6926 rtx new_addr = gen_reg_rtx (Pmode);
6927
6928 elt = gen_rtx_AND (Pmode, elt, num_ele_m1);
6929 if (ele_size > 1)
6930 elt = gen_rtx_MULT (Pmode, elt, GEN_INT (ele_size));
6931 new_addr = gen_rtx_PLUS (Pmode, XEXP (mem, 0), elt);
6932 new_addr = change_address (mem, inner_mode, new_addr);
6933 emit_move_insn (target, new_addr);
6934 }
6935 }
6936
6937 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6938 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6939 temporary (BASE_TMP) to fixup the address. Return the new memory address
6940 that is valid for reads or writes to a given register (SCALAR_REG). */
6941
6942 rtx
6943 rs6000_adjust_vec_address (rtx scalar_reg,
6944 rtx mem,
6945 rtx element,
6946 rtx base_tmp,
6947 machine_mode scalar_mode)
6948 {
6949 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
6950 rtx addr = XEXP (mem, 0);
6951 rtx element_offset;
6952 rtx new_addr;
6953 bool valid_addr_p;
6954
6955 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
6956 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
6957
6958 /* Calculate what we need to add to the address to get the element
6959 address. */
6960 if (CONST_INT_P (element))
6961 element_offset = GEN_INT (INTVAL (element) * scalar_size);
6962 else
6963 {
6964 int byte_shift = exact_log2 (scalar_size);
6965 gcc_assert (byte_shift >= 0);
6966
6967 if (byte_shift == 0)
6968 element_offset = element;
6969
6970 else
6971 {
6972 if (TARGET_POWERPC64)
6973 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
6974 else
6975 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
6976
6977 element_offset = base_tmp;
6978 }
6979 }
6980
6981 /* Create the new address pointing to the element within the vector. If we
6982 are adding 0, we don't have to change the address. */
6983 if (element_offset == const0_rtx)
6984 new_addr = addr;
6985
6986 /* A simple indirect address can be converted into a reg + offset
6987 address. */
6988 else if (REG_P (addr) || SUBREG_P (addr))
6989 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
6990
6991 /* Optimize D-FORM addresses with constant offset with a constant element, to
6992 include the element offset in the address directly. */
6993 else if (GET_CODE (addr) == PLUS)
6994 {
6995 rtx op0 = XEXP (addr, 0);
6996 rtx op1 = XEXP (addr, 1);
6997 rtx insn;
6998
6999 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7000 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7001 {
7002 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7003 rtx offset_rtx = GEN_INT (offset);
7004
7005 if (IN_RANGE (offset, -32768, 32767)
7006 && (scalar_size < 8 || (offset & 0x3) == 0))
7007 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7008 else
7009 {
7010 emit_move_insn (base_tmp, offset_rtx);
7011 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7012 }
7013 }
7014 else
7015 {
7016 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7017 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7018
7019 /* Note, ADDI requires the register being added to be a base
7020 register. If the register was R0, load it up into the temporary
7021 and do the add. */
7022 if (op1_reg_p
7023 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7024 {
7025 insn = gen_add3_insn (base_tmp, op1, element_offset);
7026 gcc_assert (insn != NULL_RTX);
7027 emit_insn (insn);
7028 }
7029
7030 else if (ele_reg_p
7031 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7032 {
7033 insn = gen_add3_insn (base_tmp, element_offset, op1);
7034 gcc_assert (insn != NULL_RTX);
7035 emit_insn (insn);
7036 }
7037
7038 else
7039 {
7040 emit_move_insn (base_tmp, op1);
7041 emit_insn (gen_add2_insn (base_tmp, element_offset));
7042 }
7043
7044 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7045 }
7046 }
7047
7048 else
7049 {
7050 emit_move_insn (base_tmp, addr);
7051 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7052 }
7053
7054 /* If we have a PLUS, we need to see whether the particular register class
7055 allows for D-FORM or X-FORM addressing. */
7056 if (GET_CODE (new_addr) == PLUS)
7057 {
7058 rtx op1 = XEXP (new_addr, 1);
7059 addr_mask_type addr_mask;
7060 unsigned int scalar_regno = reg_or_subregno (scalar_reg);
7061
7062 gcc_assert (HARD_REGISTER_NUM_P (scalar_regno));
7063 if (INT_REGNO_P (scalar_regno))
7064 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7065
7066 else if (FP_REGNO_P (scalar_regno))
7067 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7068
7069 else if (ALTIVEC_REGNO_P (scalar_regno))
7070 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7071
7072 else
7073 gcc_unreachable ();
7074
7075 if (REG_P (op1) || SUBREG_P (op1))
7076 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7077 else
7078 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7079 }
7080
7081 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7082 valid_addr_p = true;
7083
7084 else
7085 valid_addr_p = false;
7086
7087 if (!valid_addr_p)
7088 {
7089 emit_move_insn (base_tmp, new_addr);
7090 new_addr = base_tmp;
7091 }
7092
7093 return change_address (mem, scalar_mode, new_addr);
7094 }
7095
7096 /* Split a variable vec_extract operation into the component instructions. */
7097
7098 void
7099 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7100 rtx tmp_altivec)
7101 {
7102 machine_mode mode = GET_MODE (src);
7103 machine_mode scalar_mode = GET_MODE_INNER (GET_MODE (src));
7104 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7105 int byte_shift = exact_log2 (scalar_size);
7106
7107 gcc_assert (byte_shift >= 0);
7108
7109 /* If we are given a memory address, optimize to load just the element. We
7110 don't have to adjust the vector element number on little endian
7111 systems. */
7112 if (MEM_P (src))
7113 {
7114 int num_elements = GET_MODE_NUNITS (mode);
7115 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7116
7117 emit_insn (gen_anddi3 (element, element, num_ele_m1));
7118 gcc_assert (REG_P (tmp_gpr));
7119 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7120 tmp_gpr, scalar_mode));
7121 return;
7122 }
7123
7124 else if (REG_P (src) || SUBREG_P (src))
7125 {
7126 int num_elements = GET_MODE_NUNITS (mode);
7127 int bits_in_element = mode_to_bits (GET_MODE_INNER (mode));
7128 int bit_shift = 7 - exact_log2 (num_elements);
7129 rtx element2;
7130 unsigned int dest_regno = reg_or_subregno (dest);
7131 unsigned int src_regno = reg_or_subregno (src);
7132 unsigned int element_regno = reg_or_subregno (element);
7133
7134 gcc_assert (REG_P (tmp_gpr));
7135
7136 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7137 a general purpose register. */
7138 if (TARGET_P9_VECTOR
7139 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7140 && INT_REGNO_P (dest_regno)
7141 && ALTIVEC_REGNO_P (src_regno)
7142 && INT_REGNO_P (element_regno))
7143 {
7144 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7145 rtx element_si = gen_rtx_REG (SImode, element_regno);
7146
7147 if (mode == V16QImode)
7148 emit_insn (BYTES_BIG_ENDIAN
7149 ? gen_vextublx (dest_si, element_si, src)
7150 : gen_vextubrx (dest_si, element_si, src));
7151
7152 else if (mode == V8HImode)
7153 {
7154 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7155 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7156 emit_insn (BYTES_BIG_ENDIAN
7157 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7158 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7159 }
7160
7161
7162 else
7163 {
7164 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7165 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7166 emit_insn (BYTES_BIG_ENDIAN
7167 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7168 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7169 }
7170
7171 return;
7172 }
7173
7174
7175 gcc_assert (REG_P (tmp_altivec));
7176
7177 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7178 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7179 will shift the element into the upper position (adding 3 to convert a
7180 byte shift into a bit shift). */
7181 if (scalar_size == 8)
7182 {
7183 if (!BYTES_BIG_ENDIAN)
7184 {
7185 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7186 element2 = tmp_gpr;
7187 }
7188 else
7189 element2 = element;
7190
7191 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7192 bit. */
7193 emit_insn (gen_rtx_SET (tmp_gpr,
7194 gen_rtx_AND (DImode,
7195 gen_rtx_ASHIFT (DImode,
7196 element2,
7197 GEN_INT (6)),
7198 GEN_INT (64))));
7199 }
7200 else
7201 {
7202 if (!BYTES_BIG_ENDIAN)
7203 {
7204 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7205
7206 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7207 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7208 element2 = tmp_gpr;
7209 }
7210 else
7211 element2 = element;
7212
7213 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7214 }
7215
7216 /* Get the value into the lower byte of the Altivec register where VSLO
7217 expects it. */
7218 if (TARGET_P9_VECTOR)
7219 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7220 else if (can_create_pseudo_p ())
7221 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7222 else
7223 {
7224 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7225 emit_move_insn (tmp_di, tmp_gpr);
7226 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7227 }
7228
7229 /* Do the VSLO to get the value into the final location. */
7230 switch (mode)
7231 {
7232 case E_V2DFmode:
7233 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7234 return;
7235
7236 case E_V2DImode:
7237 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7238 return;
7239
7240 case E_V4SFmode:
7241 {
7242 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7243 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7244 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7245 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7246 tmp_altivec));
7247
7248 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7249 return;
7250 }
7251
7252 case E_V4SImode:
7253 case E_V8HImode:
7254 case E_V16QImode:
7255 {
7256 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7257 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7258 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7259 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7260 tmp_altivec));
7261 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7262 emit_insn (gen_lshrdi3 (tmp_gpr_di, tmp_gpr_di,
7263 GEN_INT (64 - bits_in_element)));
7264 return;
7265 }
7266
7267 default:
7268 gcc_unreachable ();
7269 }
7270
7271 return;
7272 }
7273 else
7274 gcc_unreachable ();
7275 }
7276
7277 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7278 selects whether the alignment is abi mandated, optional, or
7279 both abi and optional alignment. */
7280
7281 unsigned int
7282 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7283 {
7284 if (how != align_opt)
7285 {
7286 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7287 align = 128;
7288 }
7289
7290 if (how != align_abi)
7291 {
7292 if (TREE_CODE (type) == ARRAY_TYPE
7293 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7294 {
7295 if (align < BITS_PER_WORD)
7296 align = BITS_PER_WORD;
7297 }
7298 }
7299
7300 return align;
7301 }
7302
7303 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7304 instructions simply ignore the low bits; VSX memory instructions
7305 are aligned to 4 or 8 bytes. */
7306
7307 static bool
7308 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7309 {
7310 return (STRICT_ALIGNMENT
7311 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7312 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7313 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7314 && (int) align < VECTOR_ALIGN (mode)))));
7315 }
7316
7317 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7318
7319 bool
7320 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7321 {
7322 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7323 {
7324 if (computed != 128)
7325 {
7326 static bool warned;
7327 if (!warned && warn_psabi)
7328 {
7329 warned = true;
7330 inform (input_location,
7331 "the layout of aggregates containing vectors with"
7332 " %d-byte alignment has changed in GCC 5",
7333 computed / BITS_PER_UNIT);
7334 }
7335 }
7336 /* In current GCC there is no special case. */
7337 return false;
7338 }
7339
7340 return false;
7341 }
7342
7343 /* AIX increases natural record alignment to doubleword if the first
7344 field is an FP double while the FP fields remain word aligned. */
7345
7346 unsigned int
7347 rs6000_special_round_type_align (tree type, unsigned int computed,
7348 unsigned int specified)
7349 {
7350 unsigned int align = MAX (computed, specified);
7351 tree field = TYPE_FIELDS (type);
7352
7353 /* Skip all non field decls */
7354 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7355 field = DECL_CHAIN (field);
7356
7357 if (field != NULL && field != type)
7358 {
7359 type = TREE_TYPE (field);
7360 while (TREE_CODE (type) == ARRAY_TYPE)
7361 type = TREE_TYPE (type);
7362
7363 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7364 align = MAX (align, 64);
7365 }
7366
7367 return align;
7368 }
7369
7370 /* Darwin increases record alignment to the natural alignment of
7371 the first field. */
7372
7373 unsigned int
7374 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7375 unsigned int specified)
7376 {
7377 unsigned int align = MAX (computed, specified);
7378
7379 if (TYPE_PACKED (type))
7380 return align;
7381
7382 /* Find the first field, looking down into aggregates. */
7383 do {
7384 tree field = TYPE_FIELDS (type);
7385 /* Skip all non field decls */
7386 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7387 field = DECL_CHAIN (field);
7388 if (! field)
7389 break;
7390 /* A packed field does not contribute any extra alignment. */
7391 if (DECL_PACKED (field))
7392 return align;
7393 type = TREE_TYPE (field);
7394 while (TREE_CODE (type) == ARRAY_TYPE)
7395 type = TREE_TYPE (type);
7396 } while (AGGREGATE_TYPE_P (type));
7397
7398 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7399 align = MAX (align, TYPE_ALIGN (type));
7400
7401 return align;
7402 }
7403
7404 /* Return 1 for an operand in small memory on V.4/eabi. */
7405
7406 int
7407 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7408 machine_mode mode ATTRIBUTE_UNUSED)
7409 {
7410 #if TARGET_ELF
7411 rtx sym_ref;
7412
7413 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7414 return 0;
7415
7416 if (DEFAULT_ABI != ABI_V4)
7417 return 0;
7418
7419 if (SYMBOL_REF_P (op))
7420 sym_ref = op;
7421
7422 else if (GET_CODE (op) != CONST
7423 || GET_CODE (XEXP (op, 0)) != PLUS
7424 || !SYMBOL_REF_P (XEXP (XEXP (op, 0), 0))
7425 || !CONST_INT_P (XEXP (XEXP (op, 0), 1)))
7426 return 0;
7427
7428 else
7429 {
7430 rtx sum = XEXP (op, 0);
7431 HOST_WIDE_INT summand;
7432
7433 /* We have to be careful here, because it is the referenced address
7434 that must be 32k from _SDA_BASE_, not just the symbol. */
7435 summand = INTVAL (XEXP (sum, 1));
7436 if (summand < 0 || summand > g_switch_value)
7437 return 0;
7438
7439 sym_ref = XEXP (sum, 0);
7440 }
7441
7442 return SYMBOL_REF_SMALL_P (sym_ref);
7443 #else
7444 return 0;
7445 #endif
7446 }
7447
7448 /* Return true if either operand is a general purpose register. */
7449
7450 bool
7451 gpr_or_gpr_p (rtx op0, rtx op1)
7452 {
7453 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7454 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7455 }
7456
7457 /* Return true if this is a move direct operation between GPR registers and
7458 floating point/VSX registers. */
7459
7460 bool
7461 direct_move_p (rtx op0, rtx op1)
7462 {
7463 int regno0, regno1;
7464
7465 if (!REG_P (op0) || !REG_P (op1))
7466 return false;
7467
7468 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7469 return false;
7470
7471 regno0 = REGNO (op0);
7472 regno1 = REGNO (op1);
7473 if (!HARD_REGISTER_NUM_P (regno0) || !HARD_REGISTER_NUM_P (regno1))
7474 return false;
7475
7476 if (INT_REGNO_P (regno0))
7477 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7478
7479 else if (INT_REGNO_P (regno1))
7480 {
7481 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7482 return true;
7483
7484 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7485 return true;
7486 }
7487
7488 return false;
7489 }
7490
7491 /* Return true if the OFFSET is valid for the quad address instructions that
7492 use d-form (register + offset) addressing. */
7493
7494 static inline bool
7495 quad_address_offset_p (HOST_WIDE_INT offset)
7496 {
7497 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7498 }
7499
7500 /* Return true if the ADDR is an acceptable address for a quad memory
7501 operation of mode MODE (either LQ/STQ for general purpose registers, or
7502 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7503 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7504 3.0 LXV/STXV instruction. */
7505
7506 bool
7507 quad_address_p (rtx addr, machine_mode mode, bool strict)
7508 {
7509 rtx op0, op1;
7510
7511 if (GET_MODE_SIZE (mode) != 16)
7512 return false;
7513
7514 if (legitimate_indirect_address_p (addr, strict))
7515 return true;
7516
7517 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7518 return false;
7519
7520 if (GET_CODE (addr) != PLUS)
7521 return false;
7522
7523 op0 = XEXP (addr, 0);
7524 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7525 return false;
7526
7527 op1 = XEXP (addr, 1);
7528 if (!CONST_INT_P (op1))
7529 return false;
7530
7531 return quad_address_offset_p (INTVAL (op1));
7532 }
7533
7534 /* Return true if this is a load or store quad operation. This function does
7535 not handle the atomic quad memory instructions. */
7536
7537 bool
7538 quad_load_store_p (rtx op0, rtx op1)
7539 {
7540 bool ret;
7541
7542 if (!TARGET_QUAD_MEMORY)
7543 ret = false;
7544
7545 else if (REG_P (op0) && MEM_P (op1))
7546 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7547 && quad_memory_operand (op1, GET_MODE (op1))
7548 && !reg_overlap_mentioned_p (op0, op1));
7549
7550 else if (MEM_P (op0) && REG_P (op1))
7551 ret = (quad_memory_operand (op0, GET_MODE (op0))
7552 && quad_int_reg_operand (op1, GET_MODE (op1)));
7553
7554 else
7555 ret = false;
7556
7557 if (TARGET_DEBUG_ADDR)
7558 {
7559 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7560 ret ? "true" : "false");
7561 debug_rtx (gen_rtx_SET (op0, op1));
7562 }
7563
7564 return ret;
7565 }
7566
7567 /* Given an address, return a constant offset term if one exists. */
7568
7569 static rtx
7570 address_offset (rtx op)
7571 {
7572 if (GET_CODE (op) == PRE_INC
7573 || GET_CODE (op) == PRE_DEC)
7574 op = XEXP (op, 0);
7575 else if (GET_CODE (op) == PRE_MODIFY
7576 || GET_CODE (op) == LO_SUM)
7577 op = XEXP (op, 1);
7578
7579 if (GET_CODE (op) == CONST)
7580 op = XEXP (op, 0);
7581
7582 if (GET_CODE (op) == PLUS)
7583 op = XEXP (op, 1);
7584
7585 if (CONST_INT_P (op))
7586 return op;
7587
7588 return NULL_RTX;
7589 }
7590
7591 /* Return true if the MEM operand is a memory operand suitable for use
7592 with a (full width, possibly multiple) gpr load/store. On
7593 powerpc64 this means the offset must be divisible by 4.
7594 Implements 'Y' constraint.
7595
7596 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7597 a constraint function we know the operand has satisfied a suitable
7598 memory predicate.
7599
7600 Offsetting a lo_sum should not be allowed, except where we know by
7601 alignment that a 32k boundary is not crossed. Note that by
7602 "offsetting" here we mean a further offset to access parts of the
7603 MEM. It's fine to have a lo_sum where the inner address is offset
7604 from a sym, since the same sym+offset will appear in the high part
7605 of the address calculation. */
7606
7607 bool
7608 mem_operand_gpr (rtx op, machine_mode mode)
7609 {
7610 unsigned HOST_WIDE_INT offset;
7611 int extra;
7612 rtx addr = XEXP (op, 0);
7613
7614 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7615 if (TARGET_UPDATE
7616 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7617 && mode_supports_pre_incdec_p (mode)
7618 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7619 return true;
7620
7621 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7622 if (!rs6000_offsettable_memref_p (op, mode, false))
7623 return false;
7624
7625 op = address_offset (addr);
7626 if (op == NULL_RTX)
7627 return true;
7628
7629 offset = INTVAL (op);
7630 if (TARGET_POWERPC64 && (offset & 3) != 0)
7631 return false;
7632
7633 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7634 if (extra < 0)
7635 extra = 0;
7636
7637 if (GET_CODE (addr) == LO_SUM)
7638 /* For lo_sum addresses, we must allow any offset except one that
7639 causes a wrap, so test only the low 16 bits. */
7640 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7641
7642 return offset + 0x8000 < 0x10000u - extra;
7643 }
7644
7645 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7646 enforce an offset divisible by 4 even for 32-bit. */
7647
7648 bool
7649 mem_operand_ds_form (rtx op, machine_mode mode)
7650 {
7651 unsigned HOST_WIDE_INT offset;
7652 int extra;
7653 rtx addr = XEXP (op, 0);
7654
7655 if (!offsettable_address_p (false, mode, addr))
7656 return false;
7657
7658 op = address_offset (addr);
7659 if (op == NULL_RTX)
7660 return true;
7661
7662 offset = INTVAL (op);
7663 if ((offset & 3) != 0)
7664 return false;
7665
7666 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7667 if (extra < 0)
7668 extra = 0;
7669
7670 if (GET_CODE (addr) == LO_SUM)
7671 /* For lo_sum addresses, we must allow any offset except one that
7672 causes a wrap, so test only the low 16 bits. */
7673 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7674
7675 return offset + 0x8000 < 0x10000u - extra;
7676 }
7677 \f
7678 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7679
7680 static bool
7681 reg_offset_addressing_ok_p (machine_mode mode)
7682 {
7683 switch (mode)
7684 {
7685 case E_V16QImode:
7686 case E_V8HImode:
7687 case E_V4SFmode:
7688 case E_V4SImode:
7689 case E_V2DFmode:
7690 case E_V2DImode:
7691 case E_V1TImode:
7692 case E_TImode:
7693 case E_TFmode:
7694 case E_KFmode:
7695 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7696 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7697 a vector mode, if we want to use the VSX registers to move it around,
7698 we need to restrict ourselves to reg+reg addressing. Similarly for
7699 IEEE 128-bit floating point that is passed in a single vector
7700 register. */
7701 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7702 return mode_supports_dq_form (mode);
7703 break;
7704
7705 case E_SDmode:
7706 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7707 addressing for the LFIWZX and STFIWX instructions. */
7708 if (TARGET_NO_SDMODE_STACK)
7709 return false;
7710 break;
7711
7712 default:
7713 break;
7714 }
7715
7716 return true;
7717 }
7718
7719 static bool
7720 virtual_stack_registers_memory_p (rtx op)
7721 {
7722 int regnum;
7723
7724 if (REG_P (op))
7725 regnum = REGNO (op);
7726
7727 else if (GET_CODE (op) == PLUS
7728 && REG_P (XEXP (op, 0))
7729 && CONST_INT_P (XEXP (op, 1)))
7730 regnum = REGNO (XEXP (op, 0));
7731
7732 else
7733 return false;
7734
7735 return (regnum >= FIRST_VIRTUAL_REGISTER
7736 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7737 }
7738
7739 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7740 is known to not straddle a 32k boundary. This function is used
7741 to determine whether -mcmodel=medium code can use TOC pointer
7742 relative addressing for OP. This means the alignment of the TOC
7743 pointer must also be taken into account, and unfortunately that is
7744 only 8 bytes. */
7745
7746 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7747 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7748 #endif
7749
7750 static bool
7751 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7752 machine_mode mode)
7753 {
7754 tree decl;
7755 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7756
7757 if (!SYMBOL_REF_P (op))
7758 return false;
7759
7760 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7761 SYMBOL_REF. */
7762 if (mode_supports_dq_form (mode))
7763 return false;
7764
7765 dsize = GET_MODE_SIZE (mode);
7766 decl = SYMBOL_REF_DECL (op);
7767 if (!decl)
7768 {
7769 if (dsize == 0)
7770 return false;
7771
7772 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7773 replacing memory addresses with an anchor plus offset. We
7774 could find the decl by rummaging around in the block->objects
7775 VEC for the given offset but that seems like too much work. */
7776 dalign = BITS_PER_UNIT;
7777 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7778 && SYMBOL_REF_ANCHOR_P (op)
7779 && SYMBOL_REF_BLOCK (op) != NULL)
7780 {
7781 struct object_block *block = SYMBOL_REF_BLOCK (op);
7782
7783 dalign = block->alignment;
7784 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7785 }
7786 else if (CONSTANT_POOL_ADDRESS_P (op))
7787 {
7788 /* It would be nice to have get_pool_align().. */
7789 machine_mode cmode = get_pool_mode (op);
7790
7791 dalign = GET_MODE_ALIGNMENT (cmode);
7792 }
7793 }
7794 else if (DECL_P (decl))
7795 {
7796 dalign = DECL_ALIGN (decl);
7797
7798 if (dsize == 0)
7799 {
7800 /* Allow BLKmode when the entire object is known to not
7801 cross a 32k boundary. */
7802 if (!DECL_SIZE_UNIT (decl))
7803 return false;
7804
7805 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7806 return false;
7807
7808 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7809 if (dsize > 32768)
7810 return false;
7811
7812 dalign /= BITS_PER_UNIT;
7813 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7814 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7815 return dalign >= dsize;
7816 }
7817 }
7818 else
7819 gcc_unreachable ();
7820
7821 /* Find how many bits of the alignment we know for this access. */
7822 dalign /= BITS_PER_UNIT;
7823 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7824 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7825 mask = dalign - 1;
7826 lsb = offset & -offset;
7827 mask &= lsb - 1;
7828 dalign = mask + 1;
7829
7830 return dalign >= dsize;
7831 }
7832
7833 static bool
7834 constant_pool_expr_p (rtx op)
7835 {
7836 rtx base, offset;
7837
7838 split_const (op, &base, &offset);
7839 return (SYMBOL_REF_P (base)
7840 && CONSTANT_POOL_ADDRESS_P (base)
7841 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7842 }
7843
7844 /* These are only used to pass through from print_operand/print_operand_address
7845 to rs6000_output_addr_const_extra over the intervening function
7846 output_addr_const which is not target code. */
7847 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7848
7849 /* Return true if OP is a toc pointer relative address (the output
7850 of create_TOC_reference). If STRICT, do not match non-split
7851 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7852 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7853 TOCREL_OFFSET_RET respectively. */
7854
7855 bool
7856 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7857 const_rtx *tocrel_offset_ret)
7858 {
7859 if (!TARGET_TOC)
7860 return false;
7861
7862 if (TARGET_CMODEL != CMODEL_SMALL)
7863 {
7864 /* When strict ensure we have everything tidy. */
7865 if (strict
7866 && !(GET_CODE (op) == LO_SUM
7867 && REG_P (XEXP (op, 0))
7868 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7869 return false;
7870
7871 /* When not strict, allow non-split TOC addresses and also allow
7872 (lo_sum (high ..)) TOC addresses created during reload. */
7873 if (GET_CODE (op) == LO_SUM)
7874 op = XEXP (op, 1);
7875 }
7876
7877 const_rtx tocrel_base = op;
7878 const_rtx tocrel_offset = const0_rtx;
7879
7880 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7881 {
7882 tocrel_base = XEXP (op, 0);
7883 tocrel_offset = XEXP (op, 1);
7884 }
7885
7886 if (tocrel_base_ret)
7887 *tocrel_base_ret = tocrel_base;
7888 if (tocrel_offset_ret)
7889 *tocrel_offset_ret = tocrel_offset;
7890
7891 return (GET_CODE (tocrel_base) == UNSPEC
7892 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7893 && REG_P (XVECEXP (tocrel_base, 0, 1))
7894 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7895 }
7896
7897 /* Return true if X is a constant pool address, and also for cmodel=medium
7898 if X is a toc-relative address known to be offsettable within MODE. */
7899
7900 bool
7901 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7902 bool strict)
7903 {
7904 const_rtx tocrel_base, tocrel_offset;
7905 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7906 && (TARGET_CMODEL != CMODEL_MEDIUM
7907 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7908 || mode == QImode
7909 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7910 INTVAL (tocrel_offset), mode)));
7911 }
7912
7913 static bool
7914 legitimate_small_data_p (machine_mode mode, rtx x)
7915 {
7916 return (DEFAULT_ABI == ABI_V4
7917 && !flag_pic && !TARGET_TOC
7918 && (SYMBOL_REF_P (x) || GET_CODE (x) == CONST)
7919 && small_data_operand (x, mode));
7920 }
7921
7922 bool
7923 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7924 bool strict, bool worst_case)
7925 {
7926 unsigned HOST_WIDE_INT offset;
7927 unsigned int extra;
7928
7929 if (GET_CODE (x) != PLUS)
7930 return false;
7931 if (!REG_P (XEXP (x, 0)))
7932 return false;
7933 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7934 return false;
7935 if (mode_supports_dq_form (mode))
7936 return quad_address_p (x, mode, strict);
7937 if (!reg_offset_addressing_ok_p (mode))
7938 return virtual_stack_registers_memory_p (x);
7939 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7940 return true;
7941 if (!CONST_INT_P (XEXP (x, 1)))
7942 return false;
7943
7944 offset = INTVAL (XEXP (x, 1));
7945 extra = 0;
7946 switch (mode)
7947 {
7948 case E_DFmode:
7949 case E_DDmode:
7950 case E_DImode:
7951 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
7952 addressing. */
7953 if (VECTOR_MEM_VSX_P (mode))
7954 return false;
7955
7956 if (!worst_case)
7957 break;
7958 if (!TARGET_POWERPC64)
7959 extra = 4;
7960 else if (offset & 3)
7961 return false;
7962 break;
7963
7964 case E_TFmode:
7965 case E_IFmode:
7966 case E_KFmode:
7967 case E_TDmode:
7968 case E_TImode:
7969 case E_PTImode:
7970 extra = 8;
7971 if (!worst_case)
7972 break;
7973 if (!TARGET_POWERPC64)
7974 extra = 12;
7975 else if (offset & 3)
7976 return false;
7977 break;
7978
7979 default:
7980 break;
7981 }
7982
7983 offset += 0x8000;
7984 return offset < 0x10000 - extra;
7985 }
7986
7987 bool
7988 legitimate_indexed_address_p (rtx x, int strict)
7989 {
7990 rtx op0, op1;
7991
7992 if (GET_CODE (x) != PLUS)
7993 return false;
7994
7995 op0 = XEXP (x, 0);
7996 op1 = XEXP (x, 1);
7997
7998 return (REG_P (op0) && REG_P (op1)
7999 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8000 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8001 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8002 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8003 }
8004
8005 bool
8006 avoiding_indexed_address_p (machine_mode mode)
8007 {
8008 /* Avoid indexed addressing for modes that have non-indexed
8009 load/store instruction forms. */
8010 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8011 }
8012
8013 bool
8014 legitimate_indirect_address_p (rtx x, int strict)
8015 {
8016 return REG_P (x) && INT_REG_OK_FOR_BASE_P (x, strict);
8017 }
8018
8019 bool
8020 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8021 {
8022 if (!TARGET_MACHO || !flag_pic
8023 || mode != SImode || !MEM_P (x))
8024 return false;
8025 x = XEXP (x, 0);
8026
8027 if (GET_CODE (x) != LO_SUM)
8028 return false;
8029 if (!REG_P (XEXP (x, 0)))
8030 return false;
8031 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8032 return false;
8033 x = XEXP (x, 1);
8034
8035 return CONSTANT_P (x);
8036 }
8037
8038 static bool
8039 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8040 {
8041 if (GET_CODE (x) != LO_SUM)
8042 return false;
8043 if (!REG_P (XEXP (x, 0)))
8044 return false;
8045 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8046 return false;
8047 /* quad word addresses are restricted, and we can't use LO_SUM. */
8048 if (mode_supports_dq_form (mode))
8049 return false;
8050 x = XEXP (x, 1);
8051
8052 if (TARGET_ELF || TARGET_MACHO)
8053 {
8054 bool large_toc_ok;
8055
8056 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8057 return false;
8058 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8059 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8060 recognizes some LO_SUM addresses as valid although this
8061 function says opposite. In most cases, LRA through different
8062 transformations can generate correct code for address reloads.
8063 It cannot manage only some LO_SUM cases. So we need to add
8064 code here saying that some addresses are still valid. */
8065 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8066 && small_toc_ref (x, VOIDmode));
8067 if (TARGET_TOC && ! large_toc_ok)
8068 return false;
8069 if (GET_MODE_NUNITS (mode) != 1)
8070 return false;
8071 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8072 && !(/* ??? Assume floating point reg based on mode? */
8073 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8074 return false;
8075
8076 return CONSTANT_P (x) || large_toc_ok;
8077 }
8078
8079 return false;
8080 }
8081
8082
8083 /* Try machine-dependent ways of modifying an illegitimate address
8084 to be legitimate. If we find one, return the new, valid address.
8085 This is used from only one place: `memory_address' in explow.c.
8086
8087 OLDX is the address as it was before break_out_memory_refs was
8088 called. In some cases it is useful to look at this to decide what
8089 needs to be done.
8090
8091 It is always safe for this function to do nothing. It exists to
8092 recognize opportunities to optimize the output.
8093
8094 On RS/6000, first check for the sum of a register with a constant
8095 integer that is out of range. If so, generate code to add the
8096 constant with the low-order 16 bits masked to the register and force
8097 this result into another register (this can be done with `cau').
8098 Then generate an address of REG+(CONST&0xffff), allowing for the
8099 possibility of bit 16 being a one.
8100
8101 Then check for the sum of a register and something not constant, try to
8102 load the other things into a register and return the sum. */
8103
8104 static rtx
8105 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8106 machine_mode mode)
8107 {
8108 unsigned int extra;
8109
8110 if (!reg_offset_addressing_ok_p (mode)
8111 || mode_supports_dq_form (mode))
8112 {
8113 if (virtual_stack_registers_memory_p (x))
8114 return x;
8115
8116 /* In theory we should not be seeing addresses of the form reg+0,
8117 but just in case it is generated, optimize it away. */
8118 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8119 return force_reg (Pmode, XEXP (x, 0));
8120
8121 /* For TImode with load/store quad, restrict addresses to just a single
8122 pointer, so it works with both GPRs and VSX registers. */
8123 /* Make sure both operands are registers. */
8124 else if (GET_CODE (x) == PLUS
8125 && (mode != TImode || !TARGET_VSX))
8126 return gen_rtx_PLUS (Pmode,
8127 force_reg (Pmode, XEXP (x, 0)),
8128 force_reg (Pmode, XEXP (x, 1)));
8129 else
8130 return force_reg (Pmode, x);
8131 }
8132 if (SYMBOL_REF_P (x))
8133 {
8134 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8135 if (model != 0)
8136 return rs6000_legitimize_tls_address (x, model);
8137 }
8138
8139 extra = 0;
8140 switch (mode)
8141 {
8142 case E_TFmode:
8143 case E_TDmode:
8144 case E_TImode:
8145 case E_PTImode:
8146 case E_IFmode:
8147 case E_KFmode:
8148 /* As in legitimate_offset_address_p we do not assume
8149 worst-case. The mode here is just a hint as to the registers
8150 used. A TImode is usually in gprs, but may actually be in
8151 fprs. Leave worst-case scenario for reload to handle via
8152 insn constraints. PTImode is only GPRs. */
8153 extra = 8;
8154 break;
8155 default:
8156 break;
8157 }
8158
8159 if (GET_CODE (x) == PLUS
8160 && REG_P (XEXP (x, 0))
8161 && CONST_INT_P (XEXP (x, 1))
8162 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8163 >= 0x10000 - extra))
8164 {
8165 HOST_WIDE_INT high_int, low_int;
8166 rtx sum;
8167 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8168 if (low_int >= 0x8000 - extra)
8169 low_int = 0;
8170 high_int = INTVAL (XEXP (x, 1)) - low_int;
8171 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8172 GEN_INT (high_int)), 0);
8173 return plus_constant (Pmode, sum, low_int);
8174 }
8175 else if (GET_CODE (x) == PLUS
8176 && REG_P (XEXP (x, 0))
8177 && !CONST_INT_P (XEXP (x, 1))
8178 && GET_MODE_NUNITS (mode) == 1
8179 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8180 || (/* ??? Assume floating point reg based on mode? */
8181 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8182 && !avoiding_indexed_address_p (mode))
8183 {
8184 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8185 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8186 }
8187 else if ((TARGET_ELF
8188 #if TARGET_MACHO
8189 || !MACHO_DYNAMIC_NO_PIC_P
8190 #endif
8191 )
8192 && TARGET_32BIT
8193 && TARGET_NO_TOC
8194 && !flag_pic
8195 && !CONST_INT_P (x)
8196 && !CONST_WIDE_INT_P (x)
8197 && !CONST_DOUBLE_P (x)
8198 && CONSTANT_P (x)
8199 && GET_MODE_NUNITS (mode) == 1
8200 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8201 || (/* ??? Assume floating point reg based on mode? */
8202 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8203 {
8204 rtx reg = gen_reg_rtx (Pmode);
8205 if (TARGET_ELF)
8206 emit_insn (gen_elf_high (reg, x));
8207 else
8208 emit_insn (gen_macho_high (reg, x));
8209 return gen_rtx_LO_SUM (Pmode, reg, x);
8210 }
8211 else if (TARGET_TOC
8212 && SYMBOL_REF_P (x)
8213 && constant_pool_expr_p (x)
8214 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8215 return create_TOC_reference (x, NULL_RTX);
8216 else
8217 return x;
8218 }
8219
8220 /* Debug version of rs6000_legitimize_address. */
8221 static rtx
8222 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8223 {
8224 rtx ret;
8225 rtx_insn *insns;
8226
8227 start_sequence ();
8228 ret = rs6000_legitimize_address (x, oldx, mode);
8229 insns = get_insns ();
8230 end_sequence ();
8231
8232 if (ret != x)
8233 {
8234 fprintf (stderr,
8235 "\nrs6000_legitimize_address: mode %s, old code %s, "
8236 "new code %s, modified\n",
8237 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8238 GET_RTX_NAME (GET_CODE (ret)));
8239
8240 fprintf (stderr, "Original address:\n");
8241 debug_rtx (x);
8242
8243 fprintf (stderr, "oldx:\n");
8244 debug_rtx (oldx);
8245
8246 fprintf (stderr, "New address:\n");
8247 debug_rtx (ret);
8248
8249 if (insns)
8250 {
8251 fprintf (stderr, "Insns added:\n");
8252 debug_rtx_list (insns, 20);
8253 }
8254 }
8255 else
8256 {
8257 fprintf (stderr,
8258 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8259 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8260
8261 debug_rtx (x);
8262 }
8263
8264 if (insns)
8265 emit_insn (insns);
8266
8267 return ret;
8268 }
8269
8270 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8271 We need to emit DTP-relative relocations. */
8272
8273 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8274 static void
8275 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8276 {
8277 switch (size)
8278 {
8279 case 4:
8280 fputs ("\t.long\t", file);
8281 break;
8282 case 8:
8283 fputs (DOUBLE_INT_ASM_OP, file);
8284 break;
8285 default:
8286 gcc_unreachable ();
8287 }
8288 output_addr_const (file, x);
8289 if (TARGET_ELF)
8290 fputs ("@dtprel+0x8000", file);
8291 else if (TARGET_XCOFF && SYMBOL_REF_P (x))
8292 {
8293 switch (SYMBOL_REF_TLS_MODEL (x))
8294 {
8295 case 0:
8296 break;
8297 case TLS_MODEL_LOCAL_EXEC:
8298 fputs ("@le", file);
8299 break;
8300 case TLS_MODEL_INITIAL_EXEC:
8301 fputs ("@ie", file);
8302 break;
8303 case TLS_MODEL_GLOBAL_DYNAMIC:
8304 case TLS_MODEL_LOCAL_DYNAMIC:
8305 fputs ("@m", file);
8306 break;
8307 default:
8308 gcc_unreachable ();
8309 }
8310 }
8311 }
8312
8313 /* Return true if X is a symbol that refers to real (rather than emulated)
8314 TLS. */
8315
8316 static bool
8317 rs6000_real_tls_symbol_ref_p (rtx x)
8318 {
8319 return (SYMBOL_REF_P (x)
8320 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8321 }
8322
8323 /* In the name of slightly smaller debug output, and to cater to
8324 general assembler lossage, recognize various UNSPEC sequences
8325 and turn them back into a direct symbol reference. */
8326
8327 static rtx
8328 rs6000_delegitimize_address (rtx orig_x)
8329 {
8330 rtx x, y, offset;
8331
8332 if (GET_CODE (orig_x) == UNSPEC && XINT (orig_x, 1) == UNSPEC_FUSION_GPR)
8333 orig_x = XVECEXP (orig_x, 0, 0);
8334
8335 orig_x = delegitimize_mem_from_attrs (orig_x);
8336
8337 x = orig_x;
8338 if (MEM_P (x))
8339 x = XEXP (x, 0);
8340
8341 y = x;
8342 if (TARGET_CMODEL != CMODEL_SMALL && GET_CODE (y) == LO_SUM)
8343 y = XEXP (y, 1);
8344
8345 offset = NULL_RTX;
8346 if (GET_CODE (y) == PLUS
8347 && GET_MODE (y) == Pmode
8348 && CONST_INT_P (XEXP (y, 1)))
8349 {
8350 offset = XEXP (y, 1);
8351 y = XEXP (y, 0);
8352 }
8353
8354 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_TOCREL)
8355 {
8356 y = XVECEXP (y, 0, 0);
8357
8358 #ifdef HAVE_AS_TLS
8359 /* Do not associate thread-local symbols with the original
8360 constant pool symbol. */
8361 if (TARGET_XCOFF
8362 && SYMBOL_REF_P (y)
8363 && CONSTANT_POOL_ADDRESS_P (y)
8364 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8365 return orig_x;
8366 #endif
8367
8368 if (offset != NULL_RTX)
8369 y = gen_rtx_PLUS (Pmode, y, offset);
8370 if (!MEM_P (orig_x))
8371 return y;
8372 else
8373 return replace_equiv_address_nv (orig_x, y);
8374 }
8375
8376 if (TARGET_MACHO
8377 && GET_CODE (orig_x) == LO_SUM
8378 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8379 {
8380 y = XEXP (XEXP (orig_x, 1), 0);
8381 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8382 return XVECEXP (y, 0, 0);
8383 }
8384
8385 return orig_x;
8386 }
8387
8388 /* Return true if X shouldn't be emitted into the debug info.
8389 The linker doesn't like .toc section references from
8390 .debug_* sections, so reject .toc section symbols. */
8391
8392 static bool
8393 rs6000_const_not_ok_for_debug_p (rtx x)
8394 {
8395 if (GET_CODE (x) == UNSPEC)
8396 return true;
8397 if (SYMBOL_REF_P (x)
8398 && CONSTANT_POOL_ADDRESS_P (x))
8399 {
8400 rtx c = get_pool_constant (x);
8401 machine_mode cmode = get_pool_mode (x);
8402 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8403 return true;
8404 }
8405
8406 return false;
8407 }
8408
8409 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8410
8411 static bool
8412 rs6000_legitimate_combined_insn (rtx_insn *insn)
8413 {
8414 int icode = INSN_CODE (insn);
8415
8416 /* Reject creating doloop insns. Combine should not be allowed
8417 to create these for a number of reasons:
8418 1) In a nested loop, if combine creates one of these in an
8419 outer loop and the register allocator happens to allocate ctr
8420 to the outer loop insn, then the inner loop can't use ctr.
8421 Inner loops ought to be more highly optimized.
8422 2) Combine often wants to create one of these from what was
8423 originally a three insn sequence, first combining the three
8424 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8425 allocated ctr, the splitter takes use back to the three insn
8426 sequence. It's better to stop combine at the two insn
8427 sequence.
8428 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8429 insns, the register allocator sometimes uses floating point
8430 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8431 jump insn and output reloads are not implemented for jumps,
8432 the ctrsi/ctrdi splitters need to handle all possible cases.
8433 That's a pain, and it gets to be seriously difficult when a
8434 splitter that runs after reload needs memory to transfer from
8435 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8436 for the difficult case. It's better to not create problems
8437 in the first place. */
8438 if (icode != CODE_FOR_nothing
8439 && (icode == CODE_FOR_bdz_si
8440 || icode == CODE_FOR_bdz_di
8441 || icode == CODE_FOR_bdnz_si
8442 || icode == CODE_FOR_bdnz_di
8443 || icode == CODE_FOR_bdztf_si
8444 || icode == CODE_FOR_bdztf_di
8445 || icode == CODE_FOR_bdnztf_si
8446 || icode == CODE_FOR_bdnztf_di))
8447 return false;
8448
8449 return true;
8450 }
8451
8452 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8453
8454 static GTY(()) rtx rs6000_tls_symbol;
8455 static rtx
8456 rs6000_tls_get_addr (void)
8457 {
8458 if (!rs6000_tls_symbol)
8459 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8460
8461 return rs6000_tls_symbol;
8462 }
8463
8464 /* Construct the SYMBOL_REF for TLS GOT references. */
8465
8466 static GTY(()) rtx rs6000_got_symbol;
8467 static rtx
8468 rs6000_got_sym (void)
8469 {
8470 if (!rs6000_got_symbol)
8471 {
8472 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8473 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8474 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8475 }
8476
8477 return rs6000_got_symbol;
8478 }
8479
8480 /* AIX Thread-Local Address support. */
8481
8482 static rtx
8483 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8484 {
8485 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8486 const char *name;
8487 char *tlsname;
8488
8489 name = XSTR (addr, 0);
8490 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8491 or the symbol will be in TLS private data section. */
8492 if (name[strlen (name) - 1] != ']'
8493 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8494 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8495 {
8496 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8497 strcpy (tlsname, name);
8498 strcat (tlsname,
8499 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8500 tlsaddr = copy_rtx (addr);
8501 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8502 }
8503 else
8504 tlsaddr = addr;
8505
8506 /* Place addr into TOC constant pool. */
8507 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8508
8509 /* Output the TOC entry and create the MEM referencing the value. */
8510 if (constant_pool_expr_p (XEXP (sym, 0))
8511 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8512 {
8513 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8514 mem = gen_const_mem (Pmode, tocref);
8515 set_mem_alias_set (mem, get_TOC_alias_set ());
8516 }
8517 else
8518 return sym;
8519
8520 /* Use global-dynamic for local-dynamic. */
8521 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8522 || model == TLS_MODEL_LOCAL_DYNAMIC)
8523 {
8524 /* Create new TOC reference for @m symbol. */
8525 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8526 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8527 strcpy (tlsname, "*LCM");
8528 strcat (tlsname, name + 3);
8529 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8530 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8531 tocref = create_TOC_reference (modaddr, NULL_RTX);
8532 rtx modmem = gen_const_mem (Pmode, tocref);
8533 set_mem_alias_set (modmem, get_TOC_alias_set ());
8534
8535 rtx modreg = gen_reg_rtx (Pmode);
8536 emit_insn (gen_rtx_SET (modreg, modmem));
8537
8538 tmpreg = gen_reg_rtx (Pmode);
8539 emit_insn (gen_rtx_SET (tmpreg, mem));
8540
8541 dest = gen_reg_rtx (Pmode);
8542 if (TARGET_32BIT)
8543 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8544 else
8545 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8546 return dest;
8547 }
8548 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8549 else if (TARGET_32BIT)
8550 {
8551 tlsreg = gen_reg_rtx (SImode);
8552 emit_insn (gen_tls_get_tpointer (tlsreg));
8553 }
8554 else
8555 tlsreg = gen_rtx_REG (DImode, 13);
8556
8557 /* Load the TOC value into temporary register. */
8558 tmpreg = gen_reg_rtx (Pmode);
8559 emit_insn (gen_rtx_SET (tmpreg, mem));
8560 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8561 gen_rtx_MINUS (Pmode, addr, tlsreg));
8562
8563 /* Add TOC symbol value to TLS pointer. */
8564 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8565
8566 return dest;
8567 }
8568
8569 /* Output arg setup instructions for a !TARGET_TLS_MARKERS
8570 __tls_get_addr call. */
8571
8572 void
8573 rs6000_output_tlsargs (rtx *operands)
8574 {
8575 /* Set up operands for output_asm_insn, without modifying OPERANDS. */
8576 rtx op[3];
8577
8578 /* The set dest of the call, ie. r3, which is also the first arg reg. */
8579 op[0] = operands[0];
8580 /* The TLS symbol from global_tlsarg stashed as CALL operand 2. */
8581 op[1] = XVECEXP (operands[2], 0, 0);
8582 if (XINT (operands[2], 1) == UNSPEC_TLSGD)
8583 {
8584 /* The GOT register. */
8585 op[2] = XVECEXP (operands[2], 0, 1);
8586 if (TARGET_CMODEL != CMODEL_SMALL)
8587 output_asm_insn ("addis %0,%2,%1@got@tlsgd@ha\n\t"
8588 "addi %0,%0,%1@got@tlsgd@l", op);
8589 else
8590 output_asm_insn ("addi %0,%2,%1@got@tlsgd", op);
8591 }
8592 else if (XINT (operands[2], 1) == UNSPEC_TLSLD)
8593 {
8594 if (TARGET_CMODEL != CMODEL_SMALL)
8595 output_asm_insn ("addis %0,%1,%&@got@tlsld@ha\n\t"
8596 "addi %0,%0,%&@got@tlsld@l", op);
8597 else
8598 output_asm_insn ("addi %0,%1,%&@got@tlsld", op);
8599 }
8600 else
8601 gcc_unreachable ();
8602 }
8603
8604 /* Passes the tls arg value for global dynamic and local dynamic
8605 emit_library_call_value in rs6000_legitimize_tls_address to
8606 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8607 marker relocs put on __tls_get_addr calls. */
8608 static rtx global_tlsarg;
8609
8610 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8611 this (thread-local) address. */
8612
8613 static rtx
8614 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8615 {
8616 rtx dest, insn;
8617
8618 if (TARGET_XCOFF)
8619 return rs6000_legitimize_tls_address_aix (addr, model);
8620
8621 dest = gen_reg_rtx (Pmode);
8622 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8623 {
8624 rtx tlsreg;
8625
8626 if (TARGET_64BIT)
8627 {
8628 tlsreg = gen_rtx_REG (Pmode, 13);
8629 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8630 }
8631 else
8632 {
8633 tlsreg = gen_rtx_REG (Pmode, 2);
8634 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8635 }
8636 emit_insn (insn);
8637 }
8638 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8639 {
8640 rtx tlsreg, tmp;
8641
8642 tmp = gen_reg_rtx (Pmode);
8643 if (TARGET_64BIT)
8644 {
8645 tlsreg = gen_rtx_REG (Pmode, 13);
8646 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8647 }
8648 else
8649 {
8650 tlsreg = gen_rtx_REG (Pmode, 2);
8651 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8652 }
8653 emit_insn (insn);
8654 if (TARGET_64BIT)
8655 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8656 else
8657 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8658 emit_insn (insn);
8659 }
8660 else
8661 {
8662 rtx got, tga, tmp1, tmp2;
8663
8664 /* We currently use relocations like @got@tlsgd for tls, which
8665 means the linker will handle allocation of tls entries, placing
8666 them in the .got section. So use a pointer to the .got section,
8667 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8668 or to secondary GOT sections used by 32-bit -fPIC. */
8669 if (TARGET_64BIT)
8670 got = gen_rtx_REG (Pmode, 2);
8671 else
8672 {
8673 if (flag_pic == 1)
8674 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8675 else
8676 {
8677 rtx gsym = rs6000_got_sym ();
8678 got = gen_reg_rtx (Pmode);
8679 if (flag_pic == 0)
8680 rs6000_emit_move (got, gsym, Pmode);
8681 else
8682 {
8683 rtx mem, lab;
8684
8685 tmp1 = gen_reg_rtx (Pmode);
8686 tmp2 = gen_reg_rtx (Pmode);
8687 mem = gen_const_mem (Pmode, tmp1);
8688 lab = gen_label_rtx ();
8689 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8690 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8691 if (TARGET_LINK_STACK)
8692 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8693 emit_move_insn (tmp2, mem);
8694 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8695 set_unique_reg_note (last, REG_EQUAL, gsym);
8696 }
8697 }
8698 }
8699
8700 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8701 {
8702 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8703 UNSPEC_TLSGD);
8704 tga = rs6000_tls_get_addr ();
8705 global_tlsarg = arg;
8706 if (TARGET_TLS_MARKERS)
8707 {
8708 rtx argreg = gen_rtx_REG (Pmode, 3);
8709 emit_insn (gen_rtx_SET (argreg, arg));
8710 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8711 argreg, Pmode);
8712 }
8713 else
8714 emit_library_call_value (tga, dest, LCT_CONST, Pmode);
8715 global_tlsarg = NULL_RTX;
8716
8717 /* Make a note so that the result of this call can be CSEd. */
8718 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8719 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8720 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8721 }
8722 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8723 {
8724 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got), UNSPEC_TLSLD);
8725 tga = rs6000_tls_get_addr ();
8726 tmp1 = gen_reg_rtx (Pmode);
8727 global_tlsarg = arg;
8728 if (TARGET_TLS_MARKERS)
8729 {
8730 rtx argreg = gen_rtx_REG (Pmode, 3);
8731 emit_insn (gen_rtx_SET (argreg, arg));
8732 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8733 argreg, Pmode);
8734 }
8735 else
8736 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode);
8737 global_tlsarg = NULL_RTX;
8738
8739 /* Make a note so that the result of this call can be CSEd. */
8740 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8741 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8742 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8743
8744 if (rs6000_tls_size == 16)
8745 {
8746 if (TARGET_64BIT)
8747 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8748 else
8749 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8750 }
8751 else if (rs6000_tls_size == 32)
8752 {
8753 tmp2 = gen_reg_rtx (Pmode);
8754 if (TARGET_64BIT)
8755 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8756 else
8757 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8758 emit_insn (insn);
8759 if (TARGET_64BIT)
8760 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8761 else
8762 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8763 }
8764 else
8765 {
8766 tmp2 = gen_reg_rtx (Pmode);
8767 if (TARGET_64BIT)
8768 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8769 else
8770 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8771 emit_insn (insn);
8772 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8773 }
8774 emit_insn (insn);
8775 }
8776 else
8777 {
8778 /* IE, or 64-bit offset LE. */
8779 tmp2 = gen_reg_rtx (Pmode);
8780 if (TARGET_64BIT)
8781 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8782 else
8783 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8784 emit_insn (insn);
8785 if (TARGET_64BIT)
8786 insn = gen_tls_tls_64 (dest, tmp2, addr);
8787 else
8788 insn = gen_tls_tls_32 (dest, tmp2, addr);
8789 emit_insn (insn);
8790 }
8791 }
8792
8793 return dest;
8794 }
8795
8796 /* Only create the global variable for the stack protect guard if we are using
8797 the global flavor of that guard. */
8798 static tree
8799 rs6000_init_stack_protect_guard (void)
8800 {
8801 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8802 return default_stack_protect_guard ();
8803
8804 return NULL_TREE;
8805 }
8806
8807 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8808
8809 static bool
8810 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8811 {
8812 if (GET_CODE (x) == HIGH
8813 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8814 return true;
8815
8816 /* A TLS symbol in the TOC cannot contain a sum. */
8817 if (GET_CODE (x) == CONST
8818 && GET_CODE (XEXP (x, 0)) == PLUS
8819 && SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
8820 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8821 return true;
8822
8823 /* Do not place an ELF TLS symbol in the constant pool. */
8824 return TARGET_ELF && tls_referenced_p (x);
8825 }
8826
8827 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8828 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8829 can be addressed relative to the toc pointer. */
8830
8831 static bool
8832 use_toc_relative_ref (rtx sym, machine_mode mode)
8833 {
8834 return ((constant_pool_expr_p (sym)
8835 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8836 get_pool_mode (sym)))
8837 || (TARGET_CMODEL == CMODEL_MEDIUM
8838 && SYMBOL_REF_LOCAL_P (sym)
8839 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8840 }
8841
8842 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
8843 that is a valid memory address for an instruction.
8844 The MODE argument is the machine mode for the MEM expression
8845 that wants to use this address.
8846
8847 On the RS/6000, there are four valid address: a SYMBOL_REF that
8848 refers to a constant pool entry of an address (or the sum of it
8849 plus a constant), a short (16-bit signed) constant plus a register,
8850 the sum of two registers, or a register indirect, possibly with an
8851 auto-increment. For DFmode, DDmode and DImode with a constant plus
8852 register, we must ensure that both words are addressable or PowerPC64
8853 with offset word aligned.
8854
8855 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
8856 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
8857 because adjacent memory cells are accessed by adding word-sized offsets
8858 during assembly output. */
8859 static bool
8860 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
8861 {
8862 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8863 bool quad_offset_p = mode_supports_dq_form (mode);
8864
8865 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
8866 if (VECTOR_MEM_ALTIVEC_P (mode)
8867 && GET_CODE (x) == AND
8868 && CONST_INT_P (XEXP (x, 1))
8869 && INTVAL (XEXP (x, 1)) == -16)
8870 x = XEXP (x, 0);
8871
8872 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
8873 return 0;
8874 if (legitimate_indirect_address_p (x, reg_ok_strict))
8875 return 1;
8876 if (TARGET_UPDATE
8877 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
8878 && mode_supports_pre_incdec_p (mode)
8879 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
8880 return 1;
8881 /* Handle restricted vector d-form offsets in ISA 3.0. */
8882 if (quad_offset_p)
8883 {
8884 if (quad_address_p (x, mode, reg_ok_strict))
8885 return 1;
8886 }
8887 else if (virtual_stack_registers_memory_p (x))
8888 return 1;
8889
8890 else if (reg_offset_p)
8891 {
8892 if (legitimate_small_data_p (mode, x))
8893 return 1;
8894 if (legitimate_constant_pool_address_p (x, mode,
8895 reg_ok_strict || lra_in_progress))
8896 return 1;
8897 }
8898
8899 /* For TImode, if we have TImode in VSX registers, only allow register
8900 indirect addresses. This will allow the values to go in either GPRs
8901 or VSX registers without reloading. The vector types would tend to
8902 go into VSX registers, so we allow REG+REG, while TImode seems
8903 somewhat split, in that some uses are GPR based, and some VSX based. */
8904 /* FIXME: We could loosen this by changing the following to
8905 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
8906 but currently we cannot allow REG+REG addressing for TImode. See
8907 PR72827 for complete details on how this ends up hoodwinking DSE. */
8908 if (mode == TImode && TARGET_VSX)
8909 return 0;
8910 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
8911 if (! reg_ok_strict
8912 && reg_offset_p
8913 && GET_CODE (x) == PLUS
8914 && REG_P (XEXP (x, 0))
8915 && (XEXP (x, 0) == virtual_stack_vars_rtx
8916 || XEXP (x, 0) == arg_pointer_rtx)
8917 && CONST_INT_P (XEXP (x, 1)))
8918 return 1;
8919 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
8920 return 1;
8921 if (!FLOAT128_2REG_P (mode)
8922 && (TARGET_HARD_FLOAT
8923 || TARGET_POWERPC64
8924 || (mode != DFmode && mode != DDmode))
8925 && (TARGET_POWERPC64 || mode != DImode)
8926 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
8927 && mode != PTImode
8928 && !avoiding_indexed_address_p (mode)
8929 && legitimate_indexed_address_p (x, reg_ok_strict))
8930 return 1;
8931 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
8932 && mode_supports_pre_modify_p (mode)
8933 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
8934 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
8935 reg_ok_strict, false)
8936 || (!avoiding_indexed_address_p (mode)
8937 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
8938 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
8939 return 1;
8940 if (reg_offset_p && !quad_offset_p
8941 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
8942 return 1;
8943 return 0;
8944 }
8945
8946 /* Debug version of rs6000_legitimate_address_p. */
8947 static bool
8948 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
8949 bool reg_ok_strict)
8950 {
8951 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
8952 fprintf (stderr,
8953 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
8954 "strict = %d, reload = %s, code = %s\n",
8955 ret ? "true" : "false",
8956 GET_MODE_NAME (mode),
8957 reg_ok_strict,
8958 (reload_completed ? "after" : "before"),
8959 GET_RTX_NAME (GET_CODE (x)));
8960 debug_rtx (x);
8961
8962 return ret;
8963 }
8964
8965 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
8966
8967 static bool
8968 rs6000_mode_dependent_address_p (const_rtx addr,
8969 addr_space_t as ATTRIBUTE_UNUSED)
8970 {
8971 return rs6000_mode_dependent_address_ptr (addr);
8972 }
8973
8974 /* Go to LABEL if ADDR (a legitimate address expression)
8975 has an effect that depends on the machine mode it is used for.
8976
8977 On the RS/6000 this is true of all integral offsets (since AltiVec
8978 and VSX modes don't allow them) or is a pre-increment or decrement.
8979
8980 ??? Except that due to conceptual problems in offsettable_address_p
8981 we can't really report the problems of integral offsets. So leave
8982 this assuming that the adjustable offset must be valid for the
8983 sub-words of a TFmode operand, which is what we had before. */
8984
8985 static bool
8986 rs6000_mode_dependent_address (const_rtx addr)
8987 {
8988 switch (GET_CODE (addr))
8989 {
8990 case PLUS:
8991 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
8992 is considered a legitimate address before reload, so there
8993 are no offset restrictions in that case. Note that this
8994 condition is safe in strict mode because any address involving
8995 virtual_stack_vars_rtx or arg_pointer_rtx would already have
8996 been rejected as illegitimate. */
8997 if (XEXP (addr, 0) != virtual_stack_vars_rtx
8998 && XEXP (addr, 0) != arg_pointer_rtx
8999 && CONST_INT_P (XEXP (addr, 1)))
9000 {
9001 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9002 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9003 }
9004 break;
9005
9006 case LO_SUM:
9007 /* Anything in the constant pool is sufficiently aligned that
9008 all bytes have the same high part address. */
9009 return !legitimate_constant_pool_address_p (addr, QImode, false);
9010
9011 /* Auto-increment cases are now treated generically in recog.c. */
9012 case PRE_MODIFY:
9013 return TARGET_UPDATE;
9014
9015 /* AND is only allowed in Altivec loads. */
9016 case AND:
9017 return true;
9018
9019 default:
9020 break;
9021 }
9022
9023 return false;
9024 }
9025
9026 /* Debug version of rs6000_mode_dependent_address. */
9027 static bool
9028 rs6000_debug_mode_dependent_address (const_rtx addr)
9029 {
9030 bool ret = rs6000_mode_dependent_address (addr);
9031
9032 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9033 ret ? "true" : "false");
9034 debug_rtx (addr);
9035
9036 return ret;
9037 }
9038
9039 /* Implement FIND_BASE_TERM. */
9040
9041 rtx
9042 rs6000_find_base_term (rtx op)
9043 {
9044 rtx base;
9045
9046 base = op;
9047 if (GET_CODE (base) == CONST)
9048 base = XEXP (base, 0);
9049 if (GET_CODE (base) == PLUS)
9050 base = XEXP (base, 0);
9051 if (GET_CODE (base) == UNSPEC)
9052 switch (XINT (base, 1))
9053 {
9054 case UNSPEC_TOCREL:
9055 case UNSPEC_MACHOPIC_OFFSET:
9056 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9057 for aliasing purposes. */
9058 return XVECEXP (base, 0, 0);
9059 }
9060
9061 return op;
9062 }
9063
9064 /* More elaborate version of recog's offsettable_memref_p predicate
9065 that works around the ??? note of rs6000_mode_dependent_address.
9066 In particular it accepts
9067
9068 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9069
9070 in 32-bit mode, that the recog predicate rejects. */
9071
9072 static bool
9073 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9074 {
9075 bool worst_case;
9076
9077 if (!MEM_P (op))
9078 return false;
9079
9080 /* First mimic offsettable_memref_p. */
9081 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9082 return true;
9083
9084 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9085 the latter predicate knows nothing about the mode of the memory
9086 reference and, therefore, assumes that it is the largest supported
9087 mode (TFmode). As a consequence, legitimate offsettable memory
9088 references are rejected. rs6000_legitimate_offset_address_p contains
9089 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9090 at least with a little bit of help here given that we know the
9091 actual registers used. */
9092 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9093 || GET_MODE_SIZE (reg_mode) == 4);
9094 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9095 strict, worst_case);
9096 }
9097
9098 /* Determine the reassociation width to be used in reassociate_bb.
9099 This takes into account how many parallel operations we
9100 can actually do of a given type, and also the latency.
9101 P8:
9102 int add/sub 6/cycle
9103 mul 2/cycle
9104 vect add/sub/mul 2/cycle
9105 fp add/sub/mul 2/cycle
9106 dfp 1/cycle
9107 */
9108
9109 static int
9110 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9111 machine_mode mode)
9112 {
9113 switch (rs6000_tune)
9114 {
9115 case PROCESSOR_POWER8:
9116 case PROCESSOR_POWER9:
9117 if (DECIMAL_FLOAT_MODE_P (mode))
9118 return 1;
9119 if (VECTOR_MODE_P (mode))
9120 return 4;
9121 if (INTEGRAL_MODE_P (mode))
9122 return 1;
9123 if (FLOAT_MODE_P (mode))
9124 return 4;
9125 break;
9126 default:
9127 break;
9128 }
9129 return 1;
9130 }
9131
9132 /* Change register usage conditional on target flags. */
9133 static void
9134 rs6000_conditional_register_usage (void)
9135 {
9136 int i;
9137
9138 if (TARGET_DEBUG_TARGET)
9139 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9140
9141 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9142 if (TARGET_64BIT)
9143 fixed_regs[13] = call_used_regs[13]
9144 = call_really_used_regs[13] = 1;
9145
9146 /* Conditionally disable FPRs. */
9147 if (TARGET_SOFT_FLOAT)
9148 for (i = 32; i < 64; i++)
9149 fixed_regs[i] = call_used_regs[i]
9150 = call_really_used_regs[i] = 1;
9151
9152 /* The TOC register is not killed across calls in a way that is
9153 visible to the compiler. */
9154 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9155 call_really_used_regs[2] = 0;
9156
9157 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9158 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9159
9160 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9161 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9162 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9163 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9164
9165 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9166 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9167 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9168 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9169
9170 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9171 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9172 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9173
9174 if (!TARGET_ALTIVEC && !TARGET_VSX)
9175 {
9176 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9177 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9178 call_really_used_regs[VRSAVE_REGNO] = 1;
9179 }
9180
9181 if (TARGET_ALTIVEC || TARGET_VSX)
9182 global_regs[VSCR_REGNO] = 1;
9183
9184 if (TARGET_ALTIVEC_ABI)
9185 {
9186 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9187 call_used_regs[i] = call_really_used_regs[i] = 1;
9188
9189 /* AIX reserves VR20:31 in non-extended ABI mode. */
9190 if (TARGET_XCOFF)
9191 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9192 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9193 }
9194 }
9195
9196 \f
9197 /* Output insns to set DEST equal to the constant SOURCE as a series of
9198 lis, ori and shl instructions and return TRUE. */
9199
9200 bool
9201 rs6000_emit_set_const (rtx dest, rtx source)
9202 {
9203 machine_mode mode = GET_MODE (dest);
9204 rtx temp, set;
9205 rtx_insn *insn;
9206 HOST_WIDE_INT c;
9207
9208 gcc_checking_assert (CONST_INT_P (source));
9209 c = INTVAL (source);
9210 switch (mode)
9211 {
9212 case E_QImode:
9213 case E_HImode:
9214 emit_insn (gen_rtx_SET (dest, source));
9215 return true;
9216
9217 case E_SImode:
9218 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9219
9220 emit_insn (gen_rtx_SET (copy_rtx (temp),
9221 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9222 emit_insn (gen_rtx_SET (dest,
9223 gen_rtx_IOR (SImode, copy_rtx (temp),
9224 GEN_INT (c & 0xffff))));
9225 break;
9226
9227 case E_DImode:
9228 if (!TARGET_POWERPC64)
9229 {
9230 rtx hi, lo;
9231
9232 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9233 DImode);
9234 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9235 DImode);
9236 emit_move_insn (hi, GEN_INT (c >> 32));
9237 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9238 emit_move_insn (lo, GEN_INT (c));
9239 }
9240 else
9241 rs6000_emit_set_long_const (dest, c);
9242 break;
9243
9244 default:
9245 gcc_unreachable ();
9246 }
9247
9248 insn = get_last_insn ();
9249 set = single_set (insn);
9250 if (! CONSTANT_P (SET_SRC (set)))
9251 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9252
9253 return true;
9254 }
9255
9256 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9257 Output insns to set DEST equal to the constant C as a series of
9258 lis, ori and shl instructions. */
9259
9260 static void
9261 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9262 {
9263 rtx temp;
9264 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9265
9266 ud1 = c & 0xffff;
9267 c = c >> 16;
9268 ud2 = c & 0xffff;
9269 c = c >> 16;
9270 ud3 = c & 0xffff;
9271 c = c >> 16;
9272 ud4 = c & 0xffff;
9273
9274 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9275 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9276 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9277
9278 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9279 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9280 {
9281 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9282
9283 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9284 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9285 if (ud1 != 0)
9286 emit_move_insn (dest,
9287 gen_rtx_IOR (DImode, copy_rtx (temp),
9288 GEN_INT (ud1)));
9289 }
9290 else if (ud3 == 0 && ud4 == 0)
9291 {
9292 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9293
9294 gcc_assert (ud2 & 0x8000);
9295 emit_move_insn (copy_rtx (temp),
9296 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9297 if (ud1 != 0)
9298 emit_move_insn (copy_rtx (temp),
9299 gen_rtx_IOR (DImode, copy_rtx (temp),
9300 GEN_INT (ud1)));
9301 emit_move_insn (dest,
9302 gen_rtx_ZERO_EXTEND (DImode,
9303 gen_lowpart (SImode,
9304 copy_rtx (temp))));
9305 }
9306 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9307 || (ud4 == 0 && ! (ud3 & 0x8000)))
9308 {
9309 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9310
9311 emit_move_insn (copy_rtx (temp),
9312 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9313 if (ud2 != 0)
9314 emit_move_insn (copy_rtx (temp),
9315 gen_rtx_IOR (DImode, copy_rtx (temp),
9316 GEN_INT (ud2)));
9317 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9318 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9319 GEN_INT (16)));
9320 if (ud1 != 0)
9321 emit_move_insn (dest,
9322 gen_rtx_IOR (DImode, copy_rtx (temp),
9323 GEN_INT (ud1)));
9324 }
9325 else
9326 {
9327 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9328
9329 emit_move_insn (copy_rtx (temp),
9330 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9331 if (ud3 != 0)
9332 emit_move_insn (copy_rtx (temp),
9333 gen_rtx_IOR (DImode, copy_rtx (temp),
9334 GEN_INT (ud3)));
9335
9336 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9337 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9338 GEN_INT (32)));
9339 if (ud2 != 0)
9340 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9341 gen_rtx_IOR (DImode, copy_rtx (temp),
9342 GEN_INT (ud2 << 16)));
9343 if (ud1 != 0)
9344 emit_move_insn (dest,
9345 gen_rtx_IOR (DImode, copy_rtx (temp),
9346 GEN_INT (ud1)));
9347 }
9348 }
9349
9350 /* Helper for the following. Get rid of [r+r] memory refs
9351 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9352
9353 static void
9354 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9355 {
9356 if (MEM_P (operands[0])
9357 && !REG_P (XEXP (operands[0], 0))
9358 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9359 GET_MODE (operands[0]), false))
9360 operands[0]
9361 = replace_equiv_address (operands[0],
9362 copy_addr_to_reg (XEXP (operands[0], 0)));
9363
9364 if (MEM_P (operands[1])
9365 && !REG_P (XEXP (operands[1], 0))
9366 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9367 GET_MODE (operands[1]), false))
9368 operands[1]
9369 = replace_equiv_address (operands[1],
9370 copy_addr_to_reg (XEXP (operands[1], 0)));
9371 }
9372
9373 /* Generate a vector of constants to permute MODE for a little-endian
9374 storage operation by swapping the two halves of a vector. */
9375 static rtvec
9376 rs6000_const_vec (machine_mode mode)
9377 {
9378 int i, subparts;
9379 rtvec v;
9380
9381 switch (mode)
9382 {
9383 case E_V1TImode:
9384 subparts = 1;
9385 break;
9386 case E_V2DFmode:
9387 case E_V2DImode:
9388 subparts = 2;
9389 break;
9390 case E_V4SFmode:
9391 case E_V4SImode:
9392 subparts = 4;
9393 break;
9394 case E_V8HImode:
9395 subparts = 8;
9396 break;
9397 case E_V16QImode:
9398 subparts = 16;
9399 break;
9400 default:
9401 gcc_unreachable();
9402 }
9403
9404 v = rtvec_alloc (subparts);
9405
9406 for (i = 0; i < subparts / 2; ++i)
9407 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9408 for (i = subparts / 2; i < subparts; ++i)
9409 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9410
9411 return v;
9412 }
9413
9414 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9415 store operation. */
9416 void
9417 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9418 {
9419 /* Scalar permutations are easier to express in integer modes rather than
9420 floating-point modes, so cast them here. We use V1TImode instead
9421 of TImode to ensure that the values don't go through GPRs. */
9422 if (FLOAT128_VECTOR_P (mode))
9423 {
9424 dest = gen_lowpart (V1TImode, dest);
9425 source = gen_lowpart (V1TImode, source);
9426 mode = V1TImode;
9427 }
9428
9429 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9430 scalar. */
9431 if (mode == TImode || mode == V1TImode)
9432 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9433 GEN_INT (64))));
9434 else
9435 {
9436 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9437 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9438 }
9439 }
9440
9441 /* Emit a little-endian load from vector memory location SOURCE to VSX
9442 register DEST in mode MODE. The load is done with two permuting
9443 insn's that represent an lxvd2x and xxpermdi. */
9444 void
9445 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9446 {
9447 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9448 V1TImode). */
9449 if (mode == TImode || mode == V1TImode)
9450 {
9451 mode = V2DImode;
9452 dest = gen_lowpart (V2DImode, dest);
9453 source = adjust_address (source, V2DImode, 0);
9454 }
9455
9456 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9457 rs6000_emit_le_vsx_permute (tmp, source, mode);
9458 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9459 }
9460
9461 /* Emit a little-endian store to vector memory location DEST from VSX
9462 register SOURCE in mode MODE. The store is done with two permuting
9463 insn's that represent an xxpermdi and an stxvd2x. */
9464 void
9465 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9466 {
9467 /* This should never be called during or after LRA, because it does
9468 not re-permute the source register. It is intended only for use
9469 during expand. */
9470 gcc_assert (!lra_in_progress && !reload_completed);
9471
9472 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9473 V1TImode). */
9474 if (mode == TImode || mode == V1TImode)
9475 {
9476 mode = V2DImode;
9477 dest = adjust_address (dest, V2DImode, 0);
9478 source = gen_lowpart (V2DImode, source);
9479 }
9480
9481 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9482 rs6000_emit_le_vsx_permute (tmp, source, mode);
9483 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9484 }
9485
9486 /* Emit a sequence representing a little-endian VSX load or store,
9487 moving data from SOURCE to DEST in mode MODE. This is done
9488 separately from rs6000_emit_move to ensure it is called only
9489 during expand. LE VSX loads and stores introduced later are
9490 handled with a split. The expand-time RTL generation allows
9491 us to optimize away redundant pairs of register-permutes. */
9492 void
9493 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9494 {
9495 gcc_assert (!BYTES_BIG_ENDIAN
9496 && VECTOR_MEM_VSX_P (mode)
9497 && !TARGET_P9_VECTOR
9498 && !gpr_or_gpr_p (dest, source)
9499 && (MEM_P (source) ^ MEM_P (dest)));
9500
9501 if (MEM_P (source))
9502 {
9503 gcc_assert (REG_P (dest) || SUBREG_P (dest));
9504 rs6000_emit_le_vsx_load (dest, source, mode);
9505 }
9506 else
9507 {
9508 if (!REG_P (source))
9509 source = force_reg (mode, source);
9510 rs6000_emit_le_vsx_store (dest, source, mode);
9511 }
9512 }
9513
9514 /* Return whether a SFmode or SImode move can be done without converting one
9515 mode to another. This arrises when we have:
9516
9517 (SUBREG:SF (REG:SI ...))
9518 (SUBREG:SI (REG:SF ...))
9519
9520 and one of the values is in a floating point/vector register, where SFmode
9521 scalars are stored in DFmode format. */
9522
9523 bool
9524 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9525 {
9526 if (TARGET_ALLOW_SF_SUBREG)
9527 return true;
9528
9529 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9530 return true;
9531
9532 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9533 return true;
9534
9535 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9536 if (SUBREG_P (dest))
9537 {
9538 rtx dest_subreg = SUBREG_REG (dest);
9539 rtx src_subreg = SUBREG_REG (src);
9540 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9541 }
9542
9543 return false;
9544 }
9545
9546
9547 /* Helper function to change moves with:
9548
9549 (SUBREG:SF (REG:SI)) and
9550 (SUBREG:SI (REG:SF))
9551
9552 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9553 values are stored as DFmode values in the VSX registers. We need to convert
9554 the bits before we can use a direct move or operate on the bits in the
9555 vector register as an integer type.
9556
9557 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9558
9559 static bool
9560 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9561 {
9562 if (TARGET_DIRECT_MOVE_64BIT && !reload_completed
9563 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9564 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9565 {
9566 rtx inner_source = SUBREG_REG (source);
9567 machine_mode inner_mode = GET_MODE (inner_source);
9568
9569 if (mode == SImode && inner_mode == SFmode)
9570 {
9571 emit_insn (gen_movsi_from_sf (dest, inner_source));
9572 return true;
9573 }
9574
9575 if (mode == SFmode && inner_mode == SImode)
9576 {
9577 emit_insn (gen_movsf_from_si (dest, inner_source));
9578 return true;
9579 }
9580 }
9581
9582 return false;
9583 }
9584
9585 /* Emit a move from SOURCE to DEST in mode MODE. */
9586 void
9587 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9588 {
9589 rtx operands[2];
9590 operands[0] = dest;
9591 operands[1] = source;
9592
9593 if (TARGET_DEBUG_ADDR)
9594 {
9595 fprintf (stderr,
9596 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9597 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9598 GET_MODE_NAME (mode),
9599 lra_in_progress,
9600 reload_completed,
9601 can_create_pseudo_p ());
9602 debug_rtx (dest);
9603 fprintf (stderr, "source:\n");
9604 debug_rtx (source);
9605 }
9606
9607 /* Check that we get CONST_WIDE_INT only when we should. */
9608 if (CONST_WIDE_INT_P (operands[1])
9609 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9610 gcc_unreachable ();
9611
9612 #ifdef HAVE_AS_GNU_ATTRIBUTE
9613 /* If we use a long double type, set the flags in .gnu_attribute that say
9614 what the long double type is. This is to allow the linker's warning
9615 message for the wrong long double to be useful, even if the function does
9616 not do a call (for example, doing a 128-bit add on power9 if the long
9617 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9618 used if they aren't the default long dobule type. */
9619 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9620 {
9621 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9622 rs6000_passes_float = rs6000_passes_long_double = true;
9623
9624 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9625 rs6000_passes_float = rs6000_passes_long_double = true;
9626 }
9627 #endif
9628
9629 /* See if we need to special case SImode/SFmode SUBREG moves. */
9630 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9631 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9632 return;
9633
9634 /* Check if GCC is setting up a block move that will end up using FP
9635 registers as temporaries. We must make sure this is acceptable. */
9636 if (MEM_P (operands[0])
9637 && MEM_P (operands[1])
9638 && mode == DImode
9639 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9640 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9641 && ! (rs6000_slow_unaligned_access (SImode,
9642 (MEM_ALIGN (operands[0]) > 32
9643 ? 32 : MEM_ALIGN (operands[0])))
9644 || rs6000_slow_unaligned_access (SImode,
9645 (MEM_ALIGN (operands[1]) > 32
9646 ? 32 : MEM_ALIGN (operands[1]))))
9647 && ! MEM_VOLATILE_P (operands [0])
9648 && ! MEM_VOLATILE_P (operands [1]))
9649 {
9650 emit_move_insn (adjust_address (operands[0], SImode, 0),
9651 adjust_address (operands[1], SImode, 0));
9652 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9653 adjust_address (copy_rtx (operands[1]), SImode, 4));
9654 return;
9655 }
9656
9657 if (can_create_pseudo_p () && MEM_P (operands[0])
9658 && !gpc_reg_operand (operands[1], mode))
9659 operands[1] = force_reg (mode, operands[1]);
9660
9661 /* Recognize the case where operand[1] is a reference to thread-local
9662 data and load its address to a register. */
9663 if (tls_referenced_p (operands[1]))
9664 {
9665 enum tls_model model;
9666 rtx tmp = operands[1];
9667 rtx addend = NULL;
9668
9669 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9670 {
9671 addend = XEXP (XEXP (tmp, 0), 1);
9672 tmp = XEXP (XEXP (tmp, 0), 0);
9673 }
9674
9675 gcc_assert (SYMBOL_REF_P (tmp));
9676 model = SYMBOL_REF_TLS_MODEL (tmp);
9677 gcc_assert (model != 0);
9678
9679 tmp = rs6000_legitimize_tls_address (tmp, model);
9680 if (addend)
9681 {
9682 tmp = gen_rtx_PLUS (mode, tmp, addend);
9683 tmp = force_operand (tmp, operands[0]);
9684 }
9685 operands[1] = tmp;
9686 }
9687
9688 /* 128-bit constant floating-point values on Darwin should really be loaded
9689 as two parts. However, this premature splitting is a problem when DFmode
9690 values can go into Altivec registers. */
9691 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
9692 && !reg_addr[DFmode].scalar_in_vmx_p)
9693 {
9694 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
9695 simplify_gen_subreg (DFmode, operands[1], mode, 0),
9696 DFmode);
9697 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
9698 GET_MODE_SIZE (DFmode)),
9699 simplify_gen_subreg (DFmode, operands[1], mode,
9700 GET_MODE_SIZE (DFmode)),
9701 DFmode);
9702 return;
9703 }
9704
9705 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9706 p1:SD) if p1 is not of floating point class and p0 is spilled as
9707 we can have no analogous movsd_store for this. */
9708 if (lra_in_progress && mode == DDmode
9709 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
9710 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9711 && SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1]))
9712 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
9713 {
9714 enum reg_class cl;
9715 int regno = REGNO (SUBREG_REG (operands[1]));
9716
9717 if (!HARD_REGISTER_NUM_P (regno))
9718 {
9719 cl = reg_preferred_class (regno);
9720 regno = reg_renumber[regno];
9721 if (regno < 0)
9722 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
9723 }
9724 if (regno >= 0 && ! FP_REGNO_P (regno))
9725 {
9726 mode = SDmode;
9727 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
9728 operands[1] = SUBREG_REG (operands[1]);
9729 }
9730 }
9731 if (lra_in_progress
9732 && mode == SDmode
9733 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
9734 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9735 && (REG_P (operands[1])
9736 || (SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1])))))
9737 {
9738 int regno = reg_or_subregno (operands[1]);
9739 enum reg_class cl;
9740
9741 if (!HARD_REGISTER_NUM_P (regno))
9742 {
9743 cl = reg_preferred_class (regno);
9744 gcc_assert (cl != NO_REGS);
9745 regno = reg_renumber[regno];
9746 if (regno < 0)
9747 regno = ira_class_hard_regs[cl][0];
9748 }
9749 if (FP_REGNO_P (regno))
9750 {
9751 if (GET_MODE (operands[0]) != DDmode)
9752 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
9753 emit_insn (gen_movsd_store (operands[0], operands[1]));
9754 }
9755 else if (INT_REGNO_P (regno))
9756 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9757 else
9758 gcc_unreachable();
9759 return;
9760 }
9761 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
9762 p:DD)) if p0 is not of floating point class and p1 is spilled as
9763 we can have no analogous movsd_load for this. */
9764 if (lra_in_progress && mode == DDmode
9765 && SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))
9766 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
9767 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
9768 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9769 {
9770 enum reg_class cl;
9771 int regno = REGNO (SUBREG_REG (operands[0]));
9772
9773 if (!HARD_REGISTER_NUM_P (regno))
9774 {
9775 cl = reg_preferred_class (regno);
9776 regno = reg_renumber[regno];
9777 if (regno < 0)
9778 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
9779 }
9780 if (regno >= 0 && ! FP_REGNO_P (regno))
9781 {
9782 mode = SDmode;
9783 operands[0] = SUBREG_REG (operands[0]);
9784 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
9785 }
9786 }
9787 if (lra_in_progress
9788 && mode == SDmode
9789 && (REG_P (operands[0])
9790 || (SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))))
9791 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
9792 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9793 {
9794 int regno = reg_or_subregno (operands[0]);
9795 enum reg_class cl;
9796
9797 if (!HARD_REGISTER_NUM_P (regno))
9798 {
9799 cl = reg_preferred_class (regno);
9800 gcc_assert (cl != NO_REGS);
9801 regno = reg_renumber[regno];
9802 if (regno < 0)
9803 regno = ira_class_hard_regs[cl][0];
9804 }
9805 if (FP_REGNO_P (regno))
9806 {
9807 if (GET_MODE (operands[1]) != DDmode)
9808 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
9809 emit_insn (gen_movsd_load (operands[0], operands[1]));
9810 }
9811 else if (INT_REGNO_P (regno))
9812 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9813 else
9814 gcc_unreachable();
9815 return;
9816 }
9817
9818 /* FIXME: In the long term, this switch statement should go away
9819 and be replaced by a sequence of tests based on things like
9820 mode == Pmode. */
9821 switch (mode)
9822 {
9823 case E_HImode:
9824 case E_QImode:
9825 if (CONSTANT_P (operands[1])
9826 && !CONST_INT_P (operands[1]))
9827 operands[1] = force_const_mem (mode, operands[1]);
9828 break;
9829
9830 case E_TFmode:
9831 case E_TDmode:
9832 case E_IFmode:
9833 case E_KFmode:
9834 if (FLOAT128_2REG_P (mode))
9835 rs6000_eliminate_indexed_memrefs (operands);
9836 /* fall through */
9837
9838 case E_DFmode:
9839 case E_DDmode:
9840 case E_SFmode:
9841 case E_SDmode:
9842 if (CONSTANT_P (operands[1])
9843 && ! easy_fp_constant (operands[1], mode))
9844 operands[1] = force_const_mem (mode, operands[1]);
9845 break;
9846
9847 case E_V16QImode:
9848 case E_V8HImode:
9849 case E_V4SFmode:
9850 case E_V4SImode:
9851 case E_V2DFmode:
9852 case E_V2DImode:
9853 case E_V1TImode:
9854 if (CONSTANT_P (operands[1])
9855 && !easy_vector_constant (operands[1], mode))
9856 operands[1] = force_const_mem (mode, operands[1]);
9857 break;
9858
9859 case E_SImode:
9860 case E_DImode:
9861 /* Use default pattern for address of ELF small data */
9862 if (TARGET_ELF
9863 && mode == Pmode
9864 && DEFAULT_ABI == ABI_V4
9865 && (SYMBOL_REF_P (operands[1])
9866 || GET_CODE (operands[1]) == CONST)
9867 && small_data_operand (operands[1], mode))
9868 {
9869 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9870 return;
9871 }
9872
9873 if (DEFAULT_ABI == ABI_V4
9874 && mode == Pmode && mode == SImode
9875 && flag_pic == 1 && got_operand (operands[1], mode))
9876 {
9877 emit_insn (gen_movsi_got (operands[0], operands[1]));
9878 return;
9879 }
9880
9881 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
9882 && TARGET_NO_TOC
9883 && ! flag_pic
9884 && mode == Pmode
9885 && CONSTANT_P (operands[1])
9886 && GET_CODE (operands[1]) != HIGH
9887 && !CONST_INT_P (operands[1]))
9888 {
9889 rtx target = (!can_create_pseudo_p ()
9890 ? operands[0]
9891 : gen_reg_rtx (mode));
9892
9893 /* If this is a function address on -mcall-aixdesc,
9894 convert it to the address of the descriptor. */
9895 if (DEFAULT_ABI == ABI_AIX
9896 && SYMBOL_REF_P (operands[1])
9897 && XSTR (operands[1], 0)[0] == '.')
9898 {
9899 const char *name = XSTR (operands[1], 0);
9900 rtx new_ref;
9901 while (*name == '.')
9902 name++;
9903 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
9904 CONSTANT_POOL_ADDRESS_P (new_ref)
9905 = CONSTANT_POOL_ADDRESS_P (operands[1]);
9906 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
9907 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
9908 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
9909 operands[1] = new_ref;
9910 }
9911
9912 if (DEFAULT_ABI == ABI_DARWIN)
9913 {
9914 #if TARGET_MACHO
9915 if (MACHO_DYNAMIC_NO_PIC_P)
9916 {
9917 /* Take care of any required data indirection. */
9918 operands[1] = rs6000_machopic_legitimize_pic_address (
9919 operands[1], mode, operands[0]);
9920 if (operands[0] != operands[1])
9921 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9922 return;
9923 }
9924 #endif
9925 emit_insn (gen_macho_high (target, operands[1]));
9926 emit_insn (gen_macho_low (operands[0], target, operands[1]));
9927 return;
9928 }
9929
9930 emit_insn (gen_elf_high (target, operands[1]));
9931 emit_insn (gen_elf_low (operands[0], target, operands[1]));
9932 return;
9933 }
9934
9935 /* If this is a SYMBOL_REF that refers to a constant pool entry,
9936 and we have put it in the TOC, we just need to make a TOC-relative
9937 reference to it. */
9938 if (TARGET_TOC
9939 && SYMBOL_REF_P (operands[1])
9940 && use_toc_relative_ref (operands[1], mode))
9941 operands[1] = create_TOC_reference (operands[1], operands[0]);
9942 else if (mode == Pmode
9943 && CONSTANT_P (operands[1])
9944 && GET_CODE (operands[1]) != HIGH
9945 && ((REG_P (operands[0])
9946 && FP_REGNO_P (REGNO (operands[0])))
9947 || !CONST_INT_P (operands[1])
9948 || (num_insns_constant (operands[1], mode)
9949 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
9950 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
9951 && (TARGET_CMODEL == CMODEL_SMALL
9952 || can_create_pseudo_p ()
9953 || (REG_P (operands[0])
9954 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
9955 {
9956
9957 #if TARGET_MACHO
9958 /* Darwin uses a special PIC legitimizer. */
9959 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
9960 {
9961 operands[1] =
9962 rs6000_machopic_legitimize_pic_address (operands[1], mode,
9963 operands[0]);
9964 if (operands[0] != operands[1])
9965 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9966 return;
9967 }
9968 #endif
9969
9970 /* If we are to limit the number of things we put in the TOC and
9971 this is a symbol plus a constant we can add in one insn,
9972 just put the symbol in the TOC and add the constant. */
9973 if (GET_CODE (operands[1]) == CONST
9974 && TARGET_NO_SUM_IN_TOC
9975 && GET_CODE (XEXP (operands[1], 0)) == PLUS
9976 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
9977 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
9978 || SYMBOL_REF_P (XEXP (XEXP (operands[1], 0), 0)))
9979 && ! side_effects_p (operands[0]))
9980 {
9981 rtx sym =
9982 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
9983 rtx other = XEXP (XEXP (operands[1], 0), 1);
9984
9985 sym = force_reg (mode, sym);
9986 emit_insn (gen_add3_insn (operands[0], sym, other));
9987 return;
9988 }
9989
9990 operands[1] = force_const_mem (mode, operands[1]);
9991
9992 if (TARGET_TOC
9993 && SYMBOL_REF_P (XEXP (operands[1], 0))
9994 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
9995 {
9996 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
9997 operands[0]);
9998 operands[1] = gen_const_mem (mode, tocref);
9999 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10000 }
10001 }
10002 break;
10003
10004 case E_TImode:
10005 if (!VECTOR_MEM_VSX_P (TImode))
10006 rs6000_eliminate_indexed_memrefs (operands);
10007 break;
10008
10009 case E_PTImode:
10010 rs6000_eliminate_indexed_memrefs (operands);
10011 break;
10012
10013 default:
10014 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10015 }
10016
10017 /* Above, we may have called force_const_mem which may have returned
10018 an invalid address. If we can, fix this up; otherwise, reload will
10019 have to deal with it. */
10020 if (MEM_P (operands[1]))
10021 operands[1] = validize_mem (operands[1]);
10022
10023 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10024 }
10025 \f
10026 /* Nonzero if we can use a floating-point register to pass this arg. */
10027 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10028 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10029 && (CUM)->fregno <= FP_ARG_MAX_REG \
10030 && TARGET_HARD_FLOAT)
10031
10032 /* Nonzero if we can use an AltiVec register to pass this arg. */
10033 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10034 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10035 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10036 && TARGET_ALTIVEC_ABI \
10037 && (NAMED))
10038
10039 /* Walk down the type tree of TYPE counting consecutive base elements.
10040 If *MODEP is VOIDmode, then set it to the first valid floating point
10041 or vector type. If a non-floating point or vector type is found, or
10042 if a floating point or vector type that doesn't match a non-VOIDmode
10043 *MODEP is found, then return -1, otherwise return the count in the
10044 sub-tree. */
10045
10046 static int
10047 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10048 {
10049 machine_mode mode;
10050 HOST_WIDE_INT size;
10051
10052 switch (TREE_CODE (type))
10053 {
10054 case REAL_TYPE:
10055 mode = TYPE_MODE (type);
10056 if (!SCALAR_FLOAT_MODE_P (mode))
10057 return -1;
10058
10059 if (*modep == VOIDmode)
10060 *modep = mode;
10061
10062 if (*modep == mode)
10063 return 1;
10064
10065 break;
10066
10067 case COMPLEX_TYPE:
10068 mode = TYPE_MODE (TREE_TYPE (type));
10069 if (!SCALAR_FLOAT_MODE_P (mode))
10070 return -1;
10071
10072 if (*modep == VOIDmode)
10073 *modep = mode;
10074
10075 if (*modep == mode)
10076 return 2;
10077
10078 break;
10079
10080 case VECTOR_TYPE:
10081 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10082 return -1;
10083
10084 /* Use V4SImode as representative of all 128-bit vector types. */
10085 size = int_size_in_bytes (type);
10086 switch (size)
10087 {
10088 case 16:
10089 mode = V4SImode;
10090 break;
10091 default:
10092 return -1;
10093 }
10094
10095 if (*modep == VOIDmode)
10096 *modep = mode;
10097
10098 /* Vector modes are considered to be opaque: two vectors are
10099 equivalent for the purposes of being homogeneous aggregates
10100 if they are the same size. */
10101 if (*modep == mode)
10102 return 1;
10103
10104 break;
10105
10106 case ARRAY_TYPE:
10107 {
10108 int count;
10109 tree index = TYPE_DOMAIN (type);
10110
10111 /* Can't handle incomplete types nor sizes that are not
10112 fixed. */
10113 if (!COMPLETE_TYPE_P (type)
10114 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10115 return -1;
10116
10117 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10118 if (count == -1
10119 || !index
10120 || !TYPE_MAX_VALUE (index)
10121 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10122 || !TYPE_MIN_VALUE (index)
10123 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10124 || count < 0)
10125 return -1;
10126
10127 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10128 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10129
10130 /* There must be no padding. */
10131 if (wi::to_wide (TYPE_SIZE (type))
10132 != count * GET_MODE_BITSIZE (*modep))
10133 return -1;
10134
10135 return count;
10136 }
10137
10138 case RECORD_TYPE:
10139 {
10140 int count = 0;
10141 int sub_count;
10142 tree field;
10143
10144 /* Can't handle incomplete types nor sizes that are not
10145 fixed. */
10146 if (!COMPLETE_TYPE_P (type)
10147 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10148 return -1;
10149
10150 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10151 {
10152 if (TREE_CODE (field) != FIELD_DECL)
10153 continue;
10154
10155 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10156 if (sub_count < 0)
10157 return -1;
10158 count += sub_count;
10159 }
10160
10161 /* There must be no padding. */
10162 if (wi::to_wide (TYPE_SIZE (type))
10163 != count * GET_MODE_BITSIZE (*modep))
10164 return -1;
10165
10166 return count;
10167 }
10168
10169 case UNION_TYPE:
10170 case QUAL_UNION_TYPE:
10171 {
10172 /* These aren't very interesting except in a degenerate case. */
10173 int count = 0;
10174 int sub_count;
10175 tree field;
10176
10177 /* Can't handle incomplete types nor sizes that are not
10178 fixed. */
10179 if (!COMPLETE_TYPE_P (type)
10180 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10181 return -1;
10182
10183 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10184 {
10185 if (TREE_CODE (field) != FIELD_DECL)
10186 continue;
10187
10188 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10189 if (sub_count < 0)
10190 return -1;
10191 count = count > sub_count ? count : sub_count;
10192 }
10193
10194 /* There must be no padding. */
10195 if (wi::to_wide (TYPE_SIZE (type))
10196 != count * GET_MODE_BITSIZE (*modep))
10197 return -1;
10198
10199 return count;
10200 }
10201
10202 default:
10203 break;
10204 }
10205
10206 return -1;
10207 }
10208
10209 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10210 float or vector aggregate that shall be passed in FP/vector registers
10211 according to the ELFv2 ABI, return the homogeneous element mode in
10212 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10213
10214 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10215
10216 static bool
10217 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10218 machine_mode *elt_mode,
10219 int *n_elts)
10220 {
10221 /* Note that we do not accept complex types at the top level as
10222 homogeneous aggregates; these types are handled via the
10223 targetm.calls.split_complex_arg mechanism. Complex types
10224 can be elements of homogeneous aggregates, however. */
10225 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10226 && AGGREGATE_TYPE_P (type))
10227 {
10228 machine_mode field_mode = VOIDmode;
10229 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10230
10231 if (field_count > 0)
10232 {
10233 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10234 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10235
10236 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10237 up to AGGR_ARG_NUM_REG registers. */
10238 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10239 {
10240 if (elt_mode)
10241 *elt_mode = field_mode;
10242 if (n_elts)
10243 *n_elts = field_count;
10244 return true;
10245 }
10246 }
10247 }
10248
10249 if (elt_mode)
10250 *elt_mode = mode;
10251 if (n_elts)
10252 *n_elts = 1;
10253 return false;
10254 }
10255
10256 /* Return a nonzero value to say to return the function value in
10257 memory, just as large structures are always returned. TYPE will be
10258 the data type of the value, and FNTYPE will be the type of the
10259 function doing the returning, or @code{NULL} for libcalls.
10260
10261 The AIX ABI for the RS/6000 specifies that all structures are
10262 returned in memory. The Darwin ABI does the same.
10263
10264 For the Darwin 64 Bit ABI, a function result can be returned in
10265 registers or in memory, depending on the size of the return data
10266 type. If it is returned in registers, the value occupies the same
10267 registers as it would if it were the first and only function
10268 argument. Otherwise, the function places its result in memory at
10269 the location pointed to by GPR3.
10270
10271 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10272 but a draft put them in memory, and GCC used to implement the draft
10273 instead of the final standard. Therefore, aix_struct_return
10274 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10275 compatibility can change DRAFT_V4_STRUCT_RET to override the
10276 default, and -m switches get the final word. See
10277 rs6000_option_override_internal for more details.
10278
10279 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10280 long double support is enabled. These values are returned in memory.
10281
10282 int_size_in_bytes returns -1 for variable size objects, which go in
10283 memory always. The cast to unsigned makes -1 > 8. */
10284
10285 static bool
10286 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10287 {
10288 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10289 if (TARGET_MACHO
10290 && rs6000_darwin64_abi
10291 && TREE_CODE (type) == RECORD_TYPE
10292 && int_size_in_bytes (type) > 0)
10293 {
10294 CUMULATIVE_ARGS valcum;
10295 rtx valret;
10296
10297 valcum.words = 0;
10298 valcum.fregno = FP_ARG_MIN_REG;
10299 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10300 /* Do a trial code generation as if this were going to be passed
10301 as an argument; if any part goes in memory, we return NULL. */
10302 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10303 if (valret)
10304 return false;
10305 /* Otherwise fall through to more conventional ABI rules. */
10306 }
10307
10308 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10309 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10310 NULL, NULL))
10311 return false;
10312
10313 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10314 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10315 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10316 return false;
10317
10318 if (AGGREGATE_TYPE_P (type)
10319 && (aix_struct_return
10320 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10321 return true;
10322
10323 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10324 modes only exist for GCC vector types if -maltivec. */
10325 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10326 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10327 return false;
10328
10329 /* Return synthetic vectors in memory. */
10330 if (TREE_CODE (type) == VECTOR_TYPE
10331 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10332 {
10333 static bool warned_for_return_big_vectors = false;
10334 if (!warned_for_return_big_vectors)
10335 {
10336 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10337 "non-standard ABI extension with no compatibility "
10338 "guarantee");
10339 warned_for_return_big_vectors = true;
10340 }
10341 return true;
10342 }
10343
10344 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10345 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10346 return true;
10347
10348 return false;
10349 }
10350
10351 /* Specify whether values returned in registers should be at the most
10352 significant end of a register. We want aggregates returned by
10353 value to match the way aggregates are passed to functions. */
10354
10355 static bool
10356 rs6000_return_in_msb (const_tree valtype)
10357 {
10358 return (DEFAULT_ABI == ABI_ELFv2
10359 && BYTES_BIG_ENDIAN
10360 && AGGREGATE_TYPE_P (valtype)
10361 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10362 == PAD_UPWARD));
10363 }
10364
10365 #ifdef HAVE_AS_GNU_ATTRIBUTE
10366 /* Return TRUE if a call to function FNDECL may be one that
10367 potentially affects the function calling ABI of the object file. */
10368
10369 static bool
10370 call_ABI_of_interest (tree fndecl)
10371 {
10372 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10373 {
10374 struct cgraph_node *c_node;
10375
10376 /* Libcalls are always interesting. */
10377 if (fndecl == NULL_TREE)
10378 return true;
10379
10380 /* Any call to an external function is interesting. */
10381 if (DECL_EXTERNAL (fndecl))
10382 return true;
10383
10384 /* Interesting functions that we are emitting in this object file. */
10385 c_node = cgraph_node::get (fndecl);
10386 c_node = c_node->ultimate_alias_target ();
10387 return !c_node->only_called_directly_p ();
10388 }
10389 return false;
10390 }
10391 #endif
10392
10393 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10394 for a call to a function whose data type is FNTYPE.
10395 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10396
10397 For incoming args we set the number of arguments in the prototype large
10398 so we never return a PARALLEL. */
10399
10400 void
10401 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10402 rtx libname ATTRIBUTE_UNUSED, int incoming,
10403 int libcall, int n_named_args,
10404 tree fndecl,
10405 machine_mode return_mode ATTRIBUTE_UNUSED)
10406 {
10407 static CUMULATIVE_ARGS zero_cumulative;
10408
10409 *cum = zero_cumulative;
10410 cum->words = 0;
10411 cum->fregno = FP_ARG_MIN_REG;
10412 cum->vregno = ALTIVEC_ARG_MIN_REG;
10413 cum->prototype = (fntype && prototype_p (fntype));
10414 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10415 ? CALL_LIBCALL : CALL_NORMAL);
10416 cum->sysv_gregno = GP_ARG_MIN_REG;
10417 cum->stdarg = stdarg_p (fntype);
10418 cum->libcall = libcall;
10419
10420 cum->nargs_prototype = 0;
10421 if (incoming || cum->prototype)
10422 cum->nargs_prototype = n_named_args;
10423
10424 /* Check for a longcall attribute. */
10425 if ((!fntype && rs6000_default_long_calls)
10426 || (fntype
10427 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10428 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10429 cum->call_cookie |= CALL_LONG;
10430 else if (DEFAULT_ABI != ABI_DARWIN)
10431 {
10432 bool is_local = (fndecl
10433 && !DECL_EXTERNAL (fndecl)
10434 && !DECL_WEAK (fndecl)
10435 && (*targetm.binds_local_p) (fndecl));
10436 if (is_local)
10437 ;
10438 else if (flag_plt)
10439 {
10440 if (fntype
10441 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10442 cum->call_cookie |= CALL_LONG;
10443 }
10444 else
10445 {
10446 if (!(fntype
10447 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10448 cum->call_cookie |= CALL_LONG;
10449 }
10450 }
10451
10452 if (TARGET_DEBUG_ARG)
10453 {
10454 fprintf (stderr, "\ninit_cumulative_args:");
10455 if (fntype)
10456 {
10457 tree ret_type = TREE_TYPE (fntype);
10458 fprintf (stderr, " ret code = %s,",
10459 get_tree_code_name (TREE_CODE (ret_type)));
10460 }
10461
10462 if (cum->call_cookie & CALL_LONG)
10463 fprintf (stderr, " longcall,");
10464
10465 fprintf (stderr, " proto = %d, nargs = %d\n",
10466 cum->prototype, cum->nargs_prototype);
10467 }
10468
10469 #ifdef HAVE_AS_GNU_ATTRIBUTE
10470 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10471 {
10472 cum->escapes = call_ABI_of_interest (fndecl);
10473 if (cum->escapes)
10474 {
10475 tree return_type;
10476
10477 if (fntype)
10478 {
10479 return_type = TREE_TYPE (fntype);
10480 return_mode = TYPE_MODE (return_type);
10481 }
10482 else
10483 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10484
10485 if (return_type != NULL)
10486 {
10487 if (TREE_CODE (return_type) == RECORD_TYPE
10488 && TYPE_TRANSPARENT_AGGR (return_type))
10489 {
10490 return_type = TREE_TYPE (first_field (return_type));
10491 return_mode = TYPE_MODE (return_type);
10492 }
10493 if (AGGREGATE_TYPE_P (return_type)
10494 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10495 <= 8))
10496 rs6000_returns_struct = true;
10497 }
10498 if (SCALAR_FLOAT_MODE_P (return_mode))
10499 {
10500 rs6000_passes_float = true;
10501 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10502 && (FLOAT128_IBM_P (return_mode)
10503 || FLOAT128_IEEE_P (return_mode)
10504 || (return_type != NULL
10505 && (TYPE_MAIN_VARIANT (return_type)
10506 == long_double_type_node))))
10507 rs6000_passes_long_double = true;
10508
10509 /* Note if we passed or return a IEEE 128-bit type. We changed
10510 the mangling for these types, and we may need to make an alias
10511 with the old mangling. */
10512 if (FLOAT128_IEEE_P (return_mode))
10513 rs6000_passes_ieee128 = true;
10514 }
10515 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10516 rs6000_passes_vector = true;
10517 }
10518 }
10519 #endif
10520
10521 if (fntype
10522 && !TARGET_ALTIVEC
10523 && TARGET_ALTIVEC_ABI
10524 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10525 {
10526 error ("cannot return value in vector register because"
10527 " altivec instructions are disabled, use %qs"
10528 " to enable them", "-maltivec");
10529 }
10530 }
10531 \f
10532 /* The mode the ABI uses for a word. This is not the same as word_mode
10533 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10534
10535 static scalar_int_mode
10536 rs6000_abi_word_mode (void)
10537 {
10538 return TARGET_32BIT ? SImode : DImode;
10539 }
10540
10541 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10542 static char *
10543 rs6000_offload_options (void)
10544 {
10545 if (TARGET_64BIT)
10546 return xstrdup ("-foffload-abi=lp64");
10547 else
10548 return xstrdup ("-foffload-abi=ilp32");
10549 }
10550
10551 /* On rs6000, function arguments are promoted, as are function return
10552 values. */
10553
10554 static machine_mode
10555 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10556 machine_mode mode,
10557 int *punsignedp ATTRIBUTE_UNUSED,
10558 const_tree, int)
10559 {
10560 PROMOTE_MODE (mode, *punsignedp, type);
10561
10562 return mode;
10563 }
10564
10565 /* Return true if TYPE must be passed on the stack and not in registers. */
10566
10567 static bool
10568 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10569 {
10570 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10571 return must_pass_in_stack_var_size (mode, type);
10572 else
10573 return must_pass_in_stack_var_size_or_pad (mode, type);
10574 }
10575
10576 static inline bool
10577 is_complex_IBM_long_double (machine_mode mode)
10578 {
10579 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10580 }
10581
10582 /* Whether ABI_V4 passes MODE args to a function in floating point
10583 registers. */
10584
10585 static bool
10586 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10587 {
10588 if (!TARGET_HARD_FLOAT)
10589 return false;
10590 if (mode == DFmode)
10591 return true;
10592 if (mode == SFmode && named)
10593 return true;
10594 /* ABI_V4 passes complex IBM long double in 8 gprs.
10595 Stupid, but we can't change the ABI now. */
10596 if (is_complex_IBM_long_double (mode))
10597 return false;
10598 if (FLOAT128_2REG_P (mode))
10599 return true;
10600 if (DECIMAL_FLOAT_MODE_P (mode))
10601 return true;
10602 return false;
10603 }
10604
10605 /* Implement TARGET_FUNCTION_ARG_PADDING.
10606
10607 For the AIX ABI structs are always stored left shifted in their
10608 argument slot. */
10609
10610 static pad_direction
10611 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10612 {
10613 #ifndef AGGREGATE_PADDING_FIXED
10614 #define AGGREGATE_PADDING_FIXED 0
10615 #endif
10616 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10617 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10618 #endif
10619
10620 if (!AGGREGATE_PADDING_FIXED)
10621 {
10622 /* GCC used to pass structures of the same size as integer types as
10623 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10624 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10625 passed padded downward, except that -mstrict-align further
10626 muddied the water in that multi-component structures of 2 and 4
10627 bytes in size were passed padded upward.
10628
10629 The following arranges for best compatibility with previous
10630 versions of gcc, but removes the -mstrict-align dependency. */
10631 if (BYTES_BIG_ENDIAN)
10632 {
10633 HOST_WIDE_INT size = 0;
10634
10635 if (mode == BLKmode)
10636 {
10637 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10638 size = int_size_in_bytes (type);
10639 }
10640 else
10641 size = GET_MODE_SIZE (mode);
10642
10643 if (size == 1 || size == 2 || size == 4)
10644 return PAD_DOWNWARD;
10645 }
10646 return PAD_UPWARD;
10647 }
10648
10649 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10650 {
10651 if (type != 0 && AGGREGATE_TYPE_P (type))
10652 return PAD_UPWARD;
10653 }
10654
10655 /* Fall back to the default. */
10656 return default_function_arg_padding (mode, type);
10657 }
10658
10659 /* If defined, a C expression that gives the alignment boundary, in bits,
10660 of an argument with the specified mode and type. If it is not defined,
10661 PARM_BOUNDARY is used for all arguments.
10662
10663 V.4 wants long longs and doubles to be double word aligned. Just
10664 testing the mode size is a boneheaded way to do this as it means
10665 that other types such as complex int are also double word aligned.
10666 However, we're stuck with this because changing the ABI might break
10667 existing library interfaces.
10668
10669 Quadword align Altivec/VSX vectors.
10670 Quadword align large synthetic vector types. */
10671
10672 static unsigned int
10673 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
10674 {
10675 machine_mode elt_mode;
10676 int n_elts;
10677
10678 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10679
10680 if (DEFAULT_ABI == ABI_V4
10681 && (GET_MODE_SIZE (mode) == 8
10682 || (TARGET_HARD_FLOAT
10683 && !is_complex_IBM_long_double (mode)
10684 && FLOAT128_2REG_P (mode))))
10685 return 64;
10686 else if (FLOAT128_VECTOR_P (mode))
10687 return 128;
10688 else if (type && TREE_CODE (type) == VECTOR_TYPE
10689 && int_size_in_bytes (type) >= 8
10690 && int_size_in_bytes (type) < 16)
10691 return 64;
10692 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10693 || (type && TREE_CODE (type) == VECTOR_TYPE
10694 && int_size_in_bytes (type) >= 16))
10695 return 128;
10696
10697 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10698 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10699 -mcompat-align-parm is used. */
10700 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
10701 || DEFAULT_ABI == ABI_ELFv2)
10702 && type && TYPE_ALIGN (type) > 64)
10703 {
10704 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
10705 or homogeneous float/vector aggregates here. We already handled
10706 vector aggregates above, but still need to check for float here. */
10707 bool aggregate_p = (AGGREGATE_TYPE_P (type)
10708 && !SCALAR_FLOAT_MODE_P (elt_mode));
10709
10710 /* We used to check for BLKmode instead of the above aggregate type
10711 check. Warn when this results in any difference to the ABI. */
10712 if (aggregate_p != (mode == BLKmode))
10713 {
10714 static bool warned;
10715 if (!warned && warn_psabi)
10716 {
10717 warned = true;
10718 inform (input_location,
10719 "the ABI of passing aggregates with %d-byte alignment"
10720 " has changed in GCC 5",
10721 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
10722 }
10723 }
10724
10725 if (aggregate_p)
10726 return 128;
10727 }
10728
10729 /* Similar for the Darwin64 ABI. Note that for historical reasons we
10730 implement the "aggregate type" check as a BLKmode check here; this
10731 means certain aggregate types are in fact not aligned. */
10732 if (TARGET_MACHO && rs6000_darwin64_abi
10733 && mode == BLKmode
10734 && type && TYPE_ALIGN (type) > 64)
10735 return 128;
10736
10737 return PARM_BOUNDARY;
10738 }
10739
10740 /* The offset in words to the start of the parameter save area. */
10741
10742 static unsigned int
10743 rs6000_parm_offset (void)
10744 {
10745 return (DEFAULT_ABI == ABI_V4 ? 2
10746 : DEFAULT_ABI == ABI_ELFv2 ? 4
10747 : 6);
10748 }
10749
10750 /* For a function parm of MODE and TYPE, return the starting word in
10751 the parameter area. NWORDS of the parameter area are already used. */
10752
10753 static unsigned int
10754 rs6000_parm_start (machine_mode mode, const_tree type,
10755 unsigned int nwords)
10756 {
10757 unsigned int align;
10758
10759 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
10760 return nwords + (-(rs6000_parm_offset () + nwords) & align);
10761 }
10762
10763 /* Compute the size (in words) of a function argument. */
10764
10765 static unsigned long
10766 rs6000_arg_size (machine_mode mode, const_tree type)
10767 {
10768 unsigned long size;
10769
10770 if (mode != BLKmode)
10771 size = GET_MODE_SIZE (mode);
10772 else
10773 size = int_size_in_bytes (type);
10774
10775 if (TARGET_32BIT)
10776 return (size + 3) >> 2;
10777 else
10778 return (size + 7) >> 3;
10779 }
10780 \f
10781 /* Use this to flush pending int fields. */
10782
10783 static void
10784 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
10785 HOST_WIDE_INT bitpos, int final)
10786 {
10787 unsigned int startbit, endbit;
10788 int intregs, intoffset;
10789
10790 /* Handle the situations where a float is taking up the first half
10791 of the GPR, and the other half is empty (typically due to
10792 alignment restrictions). We can detect this by a 8-byte-aligned
10793 int field, or by seeing that this is the final flush for this
10794 argument. Count the word and continue on. */
10795 if (cum->floats_in_gpr == 1
10796 && (cum->intoffset % 64 == 0
10797 || (cum->intoffset == -1 && final)))
10798 {
10799 cum->words++;
10800 cum->floats_in_gpr = 0;
10801 }
10802
10803 if (cum->intoffset == -1)
10804 return;
10805
10806 intoffset = cum->intoffset;
10807 cum->intoffset = -1;
10808 cum->floats_in_gpr = 0;
10809
10810 if (intoffset % BITS_PER_WORD != 0)
10811 {
10812 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
10813 if (!int_mode_for_size (bits, 0).exists ())
10814 {
10815 /* We couldn't find an appropriate mode, which happens,
10816 e.g., in packed structs when there are 3 bytes to load.
10817 Back intoffset back to the beginning of the word in this
10818 case. */
10819 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
10820 }
10821 }
10822
10823 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
10824 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
10825 intregs = (endbit - startbit) / BITS_PER_WORD;
10826 cum->words += intregs;
10827 /* words should be unsigned. */
10828 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
10829 {
10830 int pad = (endbit/BITS_PER_WORD) - cum->words;
10831 cum->words += pad;
10832 }
10833 }
10834
10835 /* The darwin64 ABI calls for us to recurse down through structs,
10836 looking for elements passed in registers. Unfortunately, we have
10837 to track int register count here also because of misalignments
10838 in powerpc alignment mode. */
10839
10840 static void
10841 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
10842 const_tree type,
10843 HOST_WIDE_INT startbitpos)
10844 {
10845 tree f;
10846
10847 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
10848 if (TREE_CODE (f) == FIELD_DECL)
10849 {
10850 HOST_WIDE_INT bitpos = startbitpos;
10851 tree ftype = TREE_TYPE (f);
10852 machine_mode mode;
10853 if (ftype == error_mark_node)
10854 continue;
10855 mode = TYPE_MODE (ftype);
10856
10857 if (DECL_SIZE (f) != 0
10858 && tree_fits_uhwi_p (bit_position (f)))
10859 bitpos += int_bit_position (f);
10860
10861 /* ??? FIXME: else assume zero offset. */
10862
10863 if (TREE_CODE (ftype) == RECORD_TYPE)
10864 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
10865 else if (USE_FP_FOR_ARG_P (cum, mode))
10866 {
10867 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
10868 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10869 cum->fregno += n_fpregs;
10870 /* Single-precision floats present a special problem for
10871 us, because they are smaller than an 8-byte GPR, and so
10872 the structure-packing rules combined with the standard
10873 varargs behavior mean that we want to pack float/float
10874 and float/int combinations into a single register's
10875 space. This is complicated by the arg advance flushing,
10876 which works on arbitrarily large groups of int-type
10877 fields. */
10878 if (mode == SFmode)
10879 {
10880 if (cum->floats_in_gpr == 1)
10881 {
10882 /* Two floats in a word; count the word and reset
10883 the float count. */
10884 cum->words++;
10885 cum->floats_in_gpr = 0;
10886 }
10887 else if (bitpos % 64 == 0)
10888 {
10889 /* A float at the beginning of an 8-byte word;
10890 count it and put off adjusting cum->words until
10891 we see if a arg advance flush is going to do it
10892 for us. */
10893 cum->floats_in_gpr++;
10894 }
10895 else
10896 {
10897 /* The float is at the end of a word, preceded
10898 by integer fields, so the arg advance flush
10899 just above has already set cum->words and
10900 everything is taken care of. */
10901 }
10902 }
10903 else
10904 cum->words += n_fpregs;
10905 }
10906 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
10907 {
10908 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10909 cum->vregno++;
10910 cum->words += 2;
10911 }
10912 else if (cum->intoffset == -1)
10913 cum->intoffset = bitpos;
10914 }
10915 }
10916
10917 /* Check for an item that needs to be considered specially under the darwin 64
10918 bit ABI. These are record types where the mode is BLK or the structure is
10919 8 bytes in size. */
10920 static int
10921 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
10922 {
10923 return rs6000_darwin64_abi
10924 && ((mode == BLKmode
10925 && TREE_CODE (type) == RECORD_TYPE
10926 && int_size_in_bytes (type) > 0)
10927 || (type && TREE_CODE (type) == RECORD_TYPE
10928 && int_size_in_bytes (type) == 8)) ? 1 : 0;
10929 }
10930
10931 /* Update the data in CUM to advance over an argument
10932 of mode MODE and data type TYPE.
10933 (TYPE is null for libcalls where that information may not be available.)
10934
10935 Note that for args passed by reference, function_arg will be called
10936 with MODE and TYPE set to that of the pointer to the arg, not the arg
10937 itself. */
10938
10939 static void
10940 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
10941 const_tree type, bool named, int depth)
10942 {
10943 machine_mode elt_mode;
10944 int n_elts;
10945
10946 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10947
10948 /* Only tick off an argument if we're not recursing. */
10949 if (depth == 0)
10950 cum->nargs_prototype--;
10951
10952 #ifdef HAVE_AS_GNU_ATTRIBUTE
10953 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
10954 && cum->escapes)
10955 {
10956 if (SCALAR_FLOAT_MODE_P (mode))
10957 {
10958 rs6000_passes_float = true;
10959 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10960 && (FLOAT128_IBM_P (mode)
10961 || FLOAT128_IEEE_P (mode)
10962 || (type != NULL
10963 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
10964 rs6000_passes_long_double = true;
10965
10966 /* Note if we passed or return a IEEE 128-bit type. We changed the
10967 mangling for these types, and we may need to make an alias with
10968 the old mangling. */
10969 if (FLOAT128_IEEE_P (mode))
10970 rs6000_passes_ieee128 = true;
10971 }
10972 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
10973 rs6000_passes_vector = true;
10974 }
10975 #endif
10976
10977 if (TARGET_ALTIVEC_ABI
10978 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10979 || (type && TREE_CODE (type) == VECTOR_TYPE
10980 && int_size_in_bytes (type) == 16)))
10981 {
10982 bool stack = false;
10983
10984 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10985 {
10986 cum->vregno += n_elts;
10987
10988 if (!TARGET_ALTIVEC)
10989 error ("cannot pass argument in vector register because"
10990 " altivec instructions are disabled, use %qs"
10991 " to enable them", "-maltivec");
10992
10993 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
10994 even if it is going to be passed in a vector register.
10995 Darwin does the same for variable-argument functions. */
10996 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10997 && TARGET_64BIT)
10998 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
10999 stack = true;
11000 }
11001 else
11002 stack = true;
11003
11004 if (stack)
11005 {
11006 int align;
11007
11008 /* Vector parameters must be 16-byte aligned. In 32-bit
11009 mode this means we need to take into account the offset
11010 to the parameter save area. In 64-bit mode, they just
11011 have to start on an even word, since the parameter save
11012 area is 16-byte aligned. */
11013 if (TARGET_32BIT)
11014 align = -(rs6000_parm_offset () + cum->words) & 3;
11015 else
11016 align = cum->words & 1;
11017 cum->words += align + rs6000_arg_size (mode, type);
11018
11019 if (TARGET_DEBUG_ARG)
11020 {
11021 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11022 cum->words, align);
11023 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11024 cum->nargs_prototype, cum->prototype,
11025 GET_MODE_NAME (mode));
11026 }
11027 }
11028 }
11029 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11030 {
11031 int size = int_size_in_bytes (type);
11032 /* Variable sized types have size == -1 and are
11033 treated as if consisting entirely of ints.
11034 Pad to 16 byte boundary if needed. */
11035 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11036 && (cum->words % 2) != 0)
11037 cum->words++;
11038 /* For varargs, we can just go up by the size of the struct. */
11039 if (!named)
11040 cum->words += (size + 7) / 8;
11041 else
11042 {
11043 /* It is tempting to say int register count just goes up by
11044 sizeof(type)/8, but this is wrong in a case such as
11045 { int; double; int; } [powerpc alignment]. We have to
11046 grovel through the fields for these too. */
11047 cum->intoffset = 0;
11048 cum->floats_in_gpr = 0;
11049 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11050 rs6000_darwin64_record_arg_advance_flush (cum,
11051 size * BITS_PER_UNIT, 1);
11052 }
11053 if (TARGET_DEBUG_ARG)
11054 {
11055 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11056 cum->words, TYPE_ALIGN (type), size);
11057 fprintf (stderr,
11058 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11059 cum->nargs_prototype, cum->prototype,
11060 GET_MODE_NAME (mode));
11061 }
11062 }
11063 else if (DEFAULT_ABI == ABI_V4)
11064 {
11065 if (abi_v4_pass_in_fpr (mode, named))
11066 {
11067 /* _Decimal128 must use an even/odd register pair. This assumes
11068 that the register number is odd when fregno is odd. */
11069 if (mode == TDmode && (cum->fregno % 2) == 1)
11070 cum->fregno++;
11071
11072 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11073 <= FP_ARG_V4_MAX_REG)
11074 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11075 else
11076 {
11077 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11078 if (mode == DFmode || FLOAT128_IBM_P (mode)
11079 || mode == DDmode || mode == TDmode)
11080 cum->words += cum->words & 1;
11081 cum->words += rs6000_arg_size (mode, type);
11082 }
11083 }
11084 else
11085 {
11086 int n_words = rs6000_arg_size (mode, type);
11087 int gregno = cum->sysv_gregno;
11088
11089 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11090 As does any other 2 word item such as complex int due to a
11091 historical mistake. */
11092 if (n_words == 2)
11093 gregno += (1 - gregno) & 1;
11094
11095 /* Multi-reg args are not split between registers and stack. */
11096 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11097 {
11098 /* Long long is aligned on the stack. So are other 2 word
11099 items such as complex int due to a historical mistake. */
11100 if (n_words == 2)
11101 cum->words += cum->words & 1;
11102 cum->words += n_words;
11103 }
11104
11105 /* Note: continuing to accumulate gregno past when we've started
11106 spilling to the stack indicates the fact that we've started
11107 spilling to the stack to expand_builtin_saveregs. */
11108 cum->sysv_gregno = gregno + n_words;
11109 }
11110
11111 if (TARGET_DEBUG_ARG)
11112 {
11113 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11114 cum->words, cum->fregno);
11115 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11116 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11117 fprintf (stderr, "mode = %4s, named = %d\n",
11118 GET_MODE_NAME (mode), named);
11119 }
11120 }
11121 else
11122 {
11123 int n_words = rs6000_arg_size (mode, type);
11124 int start_words = cum->words;
11125 int align_words = rs6000_parm_start (mode, type, start_words);
11126
11127 cum->words = align_words + n_words;
11128
11129 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11130 {
11131 /* _Decimal128 must be passed in an even/odd float register pair.
11132 This assumes that the register number is odd when fregno is
11133 odd. */
11134 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11135 cum->fregno++;
11136 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11137 }
11138
11139 if (TARGET_DEBUG_ARG)
11140 {
11141 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11142 cum->words, cum->fregno);
11143 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11144 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11145 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11146 named, align_words - start_words, depth);
11147 }
11148 }
11149 }
11150
11151 static void
11152 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11153 const_tree type, bool named)
11154 {
11155 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11156 0);
11157 }
11158
11159 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11160 structure between cum->intoffset and bitpos to integer registers. */
11161
11162 static void
11163 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11164 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11165 {
11166 machine_mode mode;
11167 unsigned int regno;
11168 unsigned int startbit, endbit;
11169 int this_regno, intregs, intoffset;
11170 rtx reg;
11171
11172 if (cum->intoffset == -1)
11173 return;
11174
11175 intoffset = cum->intoffset;
11176 cum->intoffset = -1;
11177
11178 /* If this is the trailing part of a word, try to only load that
11179 much into the register. Otherwise load the whole register. Note
11180 that in the latter case we may pick up unwanted bits. It's not a
11181 problem at the moment but may wish to revisit. */
11182
11183 if (intoffset % BITS_PER_WORD != 0)
11184 {
11185 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11186 if (!int_mode_for_size (bits, 0).exists (&mode))
11187 {
11188 /* We couldn't find an appropriate mode, which happens,
11189 e.g., in packed structs when there are 3 bytes to load.
11190 Back intoffset back to the beginning of the word in this
11191 case. */
11192 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11193 mode = word_mode;
11194 }
11195 }
11196 else
11197 mode = word_mode;
11198
11199 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11200 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11201 intregs = (endbit - startbit) / BITS_PER_WORD;
11202 this_regno = cum->words + intoffset / BITS_PER_WORD;
11203
11204 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11205 cum->use_stack = 1;
11206
11207 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11208 if (intregs <= 0)
11209 return;
11210
11211 intoffset /= BITS_PER_UNIT;
11212 do
11213 {
11214 regno = GP_ARG_MIN_REG + this_regno;
11215 reg = gen_rtx_REG (mode, regno);
11216 rvec[(*k)++] =
11217 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11218
11219 this_regno += 1;
11220 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11221 mode = word_mode;
11222 intregs -= 1;
11223 }
11224 while (intregs > 0);
11225 }
11226
11227 /* Recursive workhorse for the following. */
11228
11229 static void
11230 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11231 HOST_WIDE_INT startbitpos, rtx rvec[],
11232 int *k)
11233 {
11234 tree f;
11235
11236 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11237 if (TREE_CODE (f) == FIELD_DECL)
11238 {
11239 HOST_WIDE_INT bitpos = startbitpos;
11240 tree ftype = TREE_TYPE (f);
11241 machine_mode mode;
11242 if (ftype == error_mark_node)
11243 continue;
11244 mode = TYPE_MODE (ftype);
11245
11246 if (DECL_SIZE (f) != 0
11247 && tree_fits_uhwi_p (bit_position (f)))
11248 bitpos += int_bit_position (f);
11249
11250 /* ??? FIXME: else assume zero offset. */
11251
11252 if (TREE_CODE (ftype) == RECORD_TYPE)
11253 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11254 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11255 {
11256 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11257 #if 0
11258 switch (mode)
11259 {
11260 case E_SCmode: mode = SFmode; break;
11261 case E_DCmode: mode = DFmode; break;
11262 case E_TCmode: mode = TFmode; break;
11263 default: break;
11264 }
11265 #endif
11266 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11267 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11268 {
11269 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11270 && (mode == TFmode || mode == TDmode));
11271 /* Long double or _Decimal128 split over regs and memory. */
11272 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11273 cum->use_stack=1;
11274 }
11275 rvec[(*k)++]
11276 = gen_rtx_EXPR_LIST (VOIDmode,
11277 gen_rtx_REG (mode, cum->fregno++),
11278 GEN_INT (bitpos / BITS_PER_UNIT));
11279 if (FLOAT128_2REG_P (mode))
11280 cum->fregno++;
11281 }
11282 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11283 {
11284 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11285 rvec[(*k)++]
11286 = gen_rtx_EXPR_LIST (VOIDmode,
11287 gen_rtx_REG (mode, cum->vregno++),
11288 GEN_INT (bitpos / BITS_PER_UNIT));
11289 }
11290 else if (cum->intoffset == -1)
11291 cum->intoffset = bitpos;
11292 }
11293 }
11294
11295 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11296 the register(s) to be used for each field and subfield of a struct
11297 being passed by value, along with the offset of where the
11298 register's value may be found in the block. FP fields go in FP
11299 register, vector fields go in vector registers, and everything
11300 else goes in int registers, packed as in memory.
11301
11302 This code is also used for function return values. RETVAL indicates
11303 whether this is the case.
11304
11305 Much of this is taken from the SPARC V9 port, which has a similar
11306 calling convention. */
11307
11308 static rtx
11309 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11310 bool named, bool retval)
11311 {
11312 rtx rvec[FIRST_PSEUDO_REGISTER];
11313 int k = 1, kbase = 1;
11314 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11315 /* This is a copy; modifications are not visible to our caller. */
11316 CUMULATIVE_ARGS copy_cum = *orig_cum;
11317 CUMULATIVE_ARGS *cum = &copy_cum;
11318
11319 /* Pad to 16 byte boundary if needed. */
11320 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11321 && (cum->words % 2) != 0)
11322 cum->words++;
11323
11324 cum->intoffset = 0;
11325 cum->use_stack = 0;
11326 cum->named = named;
11327
11328 /* Put entries into rvec[] for individual FP and vector fields, and
11329 for the chunks of memory that go in int regs. Note we start at
11330 element 1; 0 is reserved for an indication of using memory, and
11331 may or may not be filled in below. */
11332 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11333 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11334
11335 /* If any part of the struct went on the stack put all of it there.
11336 This hack is because the generic code for
11337 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11338 parts of the struct are not at the beginning. */
11339 if (cum->use_stack)
11340 {
11341 if (retval)
11342 return NULL_RTX; /* doesn't go in registers at all */
11343 kbase = 0;
11344 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11345 }
11346 if (k > 1 || cum->use_stack)
11347 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11348 else
11349 return NULL_RTX;
11350 }
11351
11352 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11353
11354 static rtx
11355 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11356 int align_words)
11357 {
11358 int n_units;
11359 int i, k;
11360 rtx rvec[GP_ARG_NUM_REG + 1];
11361
11362 if (align_words >= GP_ARG_NUM_REG)
11363 return NULL_RTX;
11364
11365 n_units = rs6000_arg_size (mode, type);
11366
11367 /* Optimize the simple case where the arg fits in one gpr, except in
11368 the case of BLKmode due to assign_parms assuming that registers are
11369 BITS_PER_WORD wide. */
11370 if (n_units == 0
11371 || (n_units == 1 && mode != BLKmode))
11372 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11373
11374 k = 0;
11375 if (align_words + n_units > GP_ARG_NUM_REG)
11376 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11377 using a magic NULL_RTX component.
11378 This is not strictly correct. Only some of the arg belongs in
11379 memory, not all of it. However, the normal scheme using
11380 function_arg_partial_nregs can result in unusual subregs, eg.
11381 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11382 store the whole arg to memory is often more efficient than code
11383 to store pieces, and we know that space is available in the right
11384 place for the whole arg. */
11385 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11386
11387 i = 0;
11388 do
11389 {
11390 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11391 rtx off = GEN_INT (i++ * 4);
11392 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11393 }
11394 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11395
11396 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11397 }
11398
11399 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11400 but must also be copied into the parameter save area starting at
11401 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11402 to the GPRs and/or memory. Return the number of elements used. */
11403
11404 static int
11405 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11406 int align_words, rtx *rvec)
11407 {
11408 int k = 0;
11409
11410 if (align_words < GP_ARG_NUM_REG)
11411 {
11412 int n_words = rs6000_arg_size (mode, type);
11413
11414 if (align_words + n_words > GP_ARG_NUM_REG
11415 || mode == BLKmode
11416 || (TARGET_32BIT && TARGET_POWERPC64))
11417 {
11418 /* If this is partially on the stack, then we only
11419 include the portion actually in registers here. */
11420 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11421 int i = 0;
11422
11423 if (align_words + n_words > GP_ARG_NUM_REG)
11424 {
11425 /* Not all of the arg fits in gprs. Say that it goes in memory
11426 too, using a magic NULL_RTX component. Also see comment in
11427 rs6000_mixed_function_arg for why the normal
11428 function_arg_partial_nregs scheme doesn't work in this case. */
11429 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11430 }
11431
11432 do
11433 {
11434 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11435 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11436 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11437 }
11438 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11439 }
11440 else
11441 {
11442 /* The whole arg fits in gprs. */
11443 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11444 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11445 }
11446 }
11447 else
11448 {
11449 /* It's entirely in memory. */
11450 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11451 }
11452
11453 return k;
11454 }
11455
11456 /* RVEC is a vector of K components of an argument of mode MODE.
11457 Construct the final function_arg return value from it. */
11458
11459 static rtx
11460 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11461 {
11462 gcc_assert (k >= 1);
11463
11464 /* Avoid returning a PARALLEL in the trivial cases. */
11465 if (k == 1)
11466 {
11467 if (XEXP (rvec[0], 0) == NULL_RTX)
11468 return NULL_RTX;
11469
11470 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11471 return XEXP (rvec[0], 0);
11472 }
11473
11474 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11475 }
11476
11477 /* Determine where to put an argument to a function.
11478 Value is zero to push the argument on the stack,
11479 or a hard register in which to store the argument.
11480
11481 MODE is the argument's machine mode.
11482 TYPE is the data type of the argument (as a tree).
11483 This is null for libcalls where that information may
11484 not be available.
11485 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11486 the preceding args and about the function being called. It is
11487 not modified in this routine.
11488 NAMED is nonzero if this argument is a named parameter
11489 (otherwise it is an extra parameter matching an ellipsis).
11490
11491 On RS/6000 the first eight words of non-FP are normally in registers
11492 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11493 Under V.4, the first 8 FP args are in registers.
11494
11495 If this is floating-point and no prototype is specified, we use
11496 both an FP and integer register (or possibly FP reg and stack). Library
11497 functions (when CALL_LIBCALL is set) always have the proper types for args,
11498 so we can pass the FP value just in one register. emit_library_function
11499 doesn't support PARALLEL anyway.
11500
11501 Note that for args passed by reference, function_arg will be called
11502 with MODE and TYPE set to that of the pointer to the arg, not the arg
11503 itself. */
11504
11505 static rtx
11506 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11507 const_tree type, bool named)
11508 {
11509 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11510 enum rs6000_abi abi = DEFAULT_ABI;
11511 machine_mode elt_mode;
11512 int n_elts;
11513
11514 /* Return a marker to indicate whether CR1 needs to set or clear the
11515 bit that V.4 uses to say fp args were passed in registers.
11516 Assume that we don't need the marker for software floating point,
11517 or compiler generated library calls. */
11518 if (mode == VOIDmode)
11519 {
11520 if (abi == ABI_V4
11521 && (cum->call_cookie & CALL_LIBCALL) == 0
11522 && (cum->stdarg
11523 || (cum->nargs_prototype < 0
11524 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11525 && TARGET_HARD_FLOAT)
11526 return GEN_INT (cum->call_cookie
11527 | ((cum->fregno == FP_ARG_MIN_REG)
11528 ? CALL_V4_SET_FP_ARGS
11529 : CALL_V4_CLEAR_FP_ARGS));
11530
11531 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11532 }
11533
11534 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11535
11536 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11537 {
11538 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11539 if (rslt != NULL_RTX)
11540 return rslt;
11541 /* Else fall through to usual handling. */
11542 }
11543
11544 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11545 {
11546 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11547 rtx r, off;
11548 int i, k = 0;
11549
11550 /* Do we also need to pass this argument in the parameter save area?
11551 Library support functions for IEEE 128-bit are assumed to not need the
11552 value passed both in GPRs and in vector registers. */
11553 if (TARGET_64BIT && !cum->prototype
11554 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11555 {
11556 int align_words = ROUND_UP (cum->words, 2);
11557 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11558 }
11559
11560 /* Describe where this argument goes in the vector registers. */
11561 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11562 {
11563 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11564 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11565 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11566 }
11567
11568 return rs6000_finish_function_arg (mode, rvec, k);
11569 }
11570 else if (TARGET_ALTIVEC_ABI
11571 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11572 || (type && TREE_CODE (type) == VECTOR_TYPE
11573 && int_size_in_bytes (type) == 16)))
11574 {
11575 if (named || abi == ABI_V4)
11576 return NULL_RTX;
11577 else
11578 {
11579 /* Vector parameters to varargs functions under AIX or Darwin
11580 get passed in memory and possibly also in GPRs. */
11581 int align, align_words, n_words;
11582 machine_mode part_mode;
11583
11584 /* Vector parameters must be 16-byte aligned. In 32-bit
11585 mode this means we need to take into account the offset
11586 to the parameter save area. In 64-bit mode, they just
11587 have to start on an even word, since the parameter save
11588 area is 16-byte aligned. */
11589 if (TARGET_32BIT)
11590 align = -(rs6000_parm_offset () + cum->words) & 3;
11591 else
11592 align = cum->words & 1;
11593 align_words = cum->words + align;
11594
11595 /* Out of registers? Memory, then. */
11596 if (align_words >= GP_ARG_NUM_REG)
11597 return NULL_RTX;
11598
11599 if (TARGET_32BIT && TARGET_POWERPC64)
11600 return rs6000_mixed_function_arg (mode, type, align_words);
11601
11602 /* The vector value goes in GPRs. Only the part of the
11603 value in GPRs is reported here. */
11604 part_mode = mode;
11605 n_words = rs6000_arg_size (mode, type);
11606 if (align_words + n_words > GP_ARG_NUM_REG)
11607 /* Fortunately, there are only two possibilities, the value
11608 is either wholly in GPRs or half in GPRs and half not. */
11609 part_mode = DImode;
11610
11611 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11612 }
11613 }
11614
11615 else if (abi == ABI_V4)
11616 {
11617 if (abi_v4_pass_in_fpr (mode, named))
11618 {
11619 /* _Decimal128 must use an even/odd register pair. This assumes
11620 that the register number is odd when fregno is odd. */
11621 if (mode == TDmode && (cum->fregno % 2) == 1)
11622 cum->fregno++;
11623
11624 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11625 <= FP_ARG_V4_MAX_REG)
11626 return gen_rtx_REG (mode, cum->fregno);
11627 else
11628 return NULL_RTX;
11629 }
11630 else
11631 {
11632 int n_words = rs6000_arg_size (mode, type);
11633 int gregno = cum->sysv_gregno;
11634
11635 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11636 As does any other 2 word item such as complex int due to a
11637 historical mistake. */
11638 if (n_words == 2)
11639 gregno += (1 - gregno) & 1;
11640
11641 /* Multi-reg args are not split between registers and stack. */
11642 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11643 return NULL_RTX;
11644
11645 if (TARGET_32BIT && TARGET_POWERPC64)
11646 return rs6000_mixed_function_arg (mode, type,
11647 gregno - GP_ARG_MIN_REG);
11648 return gen_rtx_REG (mode, gregno);
11649 }
11650 }
11651 else
11652 {
11653 int align_words = rs6000_parm_start (mode, type, cum->words);
11654
11655 /* _Decimal128 must be passed in an even/odd float register pair.
11656 This assumes that the register number is odd when fregno is odd. */
11657 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11658 cum->fregno++;
11659
11660 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11661 && !(TARGET_AIX && !TARGET_ELF
11662 && type != NULL && AGGREGATE_TYPE_P (type)))
11663 {
11664 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11665 rtx r, off;
11666 int i, k = 0;
11667 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11668 int fpr_words;
11669
11670 /* Do we also need to pass this argument in the parameter
11671 save area? */
11672 if (type && (cum->nargs_prototype <= 0
11673 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11674 && TARGET_XL_COMPAT
11675 && align_words >= GP_ARG_NUM_REG)))
11676 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11677
11678 /* Describe where this argument goes in the fprs. */
11679 for (i = 0; i < n_elts
11680 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
11681 {
11682 /* Check if the argument is split over registers and memory.
11683 This can only ever happen for long double or _Decimal128;
11684 complex types are handled via split_complex_arg. */
11685 machine_mode fmode = elt_mode;
11686 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
11687 {
11688 gcc_assert (FLOAT128_2REG_P (fmode));
11689 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
11690 }
11691
11692 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
11693 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11694 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11695 }
11696
11697 /* If there were not enough FPRs to hold the argument, the rest
11698 usually goes into memory. However, if the current position
11699 is still within the register parameter area, a portion may
11700 actually have to go into GPRs.
11701
11702 Note that it may happen that the portion of the argument
11703 passed in the first "half" of the first GPR was already
11704 passed in the last FPR as well.
11705
11706 For unnamed arguments, we already set up GPRs to cover the
11707 whole argument in rs6000_psave_function_arg, so there is
11708 nothing further to do at this point. */
11709 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
11710 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
11711 && cum->nargs_prototype > 0)
11712 {
11713 static bool warned;
11714
11715 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11716 int n_words = rs6000_arg_size (mode, type);
11717
11718 align_words += fpr_words;
11719 n_words -= fpr_words;
11720
11721 do
11722 {
11723 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11724 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
11725 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11726 }
11727 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11728
11729 if (!warned && warn_psabi)
11730 {
11731 warned = true;
11732 inform (input_location,
11733 "the ABI of passing homogeneous %<float%> aggregates"
11734 " has changed in GCC 5");
11735 }
11736 }
11737
11738 return rs6000_finish_function_arg (mode, rvec, k);
11739 }
11740 else if (align_words < GP_ARG_NUM_REG)
11741 {
11742 if (TARGET_32BIT && TARGET_POWERPC64)
11743 return rs6000_mixed_function_arg (mode, type, align_words);
11744
11745 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11746 }
11747 else
11748 return NULL_RTX;
11749 }
11750 }
11751 \f
11752 /* For an arg passed partly in registers and partly in memory, this is
11753 the number of bytes passed in registers. For args passed entirely in
11754 registers or entirely in memory, zero. When an arg is described by a
11755 PARALLEL, perhaps using more than one register type, this function
11756 returns the number of bytes used by the first element of the PARALLEL. */
11757
11758 static int
11759 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
11760 tree type, bool named)
11761 {
11762 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11763 bool passed_in_gprs = true;
11764 int ret = 0;
11765 int align_words;
11766 machine_mode elt_mode;
11767 int n_elts;
11768
11769 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11770
11771 if (DEFAULT_ABI == ABI_V4)
11772 return 0;
11773
11774 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11775 {
11776 /* If we are passing this arg in the fixed parameter save area (gprs or
11777 memory) as well as VRs, we do not use the partial bytes mechanism;
11778 instead, rs6000_function_arg will return a PARALLEL including a memory
11779 element as necessary. Library support functions for IEEE 128-bit are
11780 assumed to not need the value passed both in GPRs and in vector
11781 registers. */
11782 if (TARGET_64BIT && !cum->prototype
11783 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11784 return 0;
11785
11786 /* Otherwise, we pass in VRs only. Check for partial copies. */
11787 passed_in_gprs = false;
11788 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
11789 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
11790 }
11791
11792 /* In this complicated case we just disable the partial_nregs code. */
11793 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11794 return 0;
11795
11796 align_words = rs6000_parm_start (mode, type, cum->words);
11797
11798 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11799 && !(TARGET_AIX && !TARGET_ELF
11800 && type != NULL && AGGREGATE_TYPE_P (type)))
11801 {
11802 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11803
11804 /* If we are passing this arg in the fixed parameter save area
11805 (gprs or memory) as well as FPRs, we do not use the partial
11806 bytes mechanism; instead, rs6000_function_arg will return a
11807 PARALLEL including a memory element as necessary. */
11808 if (type
11809 && (cum->nargs_prototype <= 0
11810 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11811 && TARGET_XL_COMPAT
11812 && align_words >= GP_ARG_NUM_REG)))
11813 return 0;
11814
11815 /* Otherwise, we pass in FPRs only. Check for partial copies. */
11816 passed_in_gprs = false;
11817 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
11818 {
11819 /* Compute number of bytes / words passed in FPRs. If there
11820 is still space available in the register parameter area
11821 *after* that amount, a part of the argument will be passed
11822 in GPRs. In that case, the total amount passed in any
11823 registers is equal to the amount that would have been passed
11824 in GPRs if everything were passed there, so we fall back to
11825 the GPR code below to compute the appropriate value. */
11826 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
11827 * MIN (8, GET_MODE_SIZE (elt_mode)));
11828 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
11829
11830 if (align_words + fpr_words < GP_ARG_NUM_REG)
11831 passed_in_gprs = true;
11832 else
11833 ret = fpr;
11834 }
11835 }
11836
11837 if (passed_in_gprs
11838 && align_words < GP_ARG_NUM_REG
11839 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
11840 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
11841
11842 if (ret != 0 && TARGET_DEBUG_ARG)
11843 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
11844
11845 return ret;
11846 }
11847 \f
11848 /* A C expression that indicates when an argument must be passed by
11849 reference. If nonzero for an argument, a copy of that argument is
11850 made in memory and a pointer to the argument is passed instead of
11851 the argument itself. The pointer is passed in whatever way is
11852 appropriate for passing a pointer to that type.
11853
11854 Under V.4, aggregates and long double are passed by reference.
11855
11856 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
11857 reference unless the AltiVec vector extension ABI is in force.
11858
11859 As an extension to all ABIs, variable sized types are passed by
11860 reference. */
11861
11862 static bool
11863 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
11864 machine_mode mode, const_tree type,
11865 bool named ATTRIBUTE_UNUSED)
11866 {
11867 if (!type)
11868 return 0;
11869
11870 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11871 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11872 {
11873 if (TARGET_DEBUG_ARG)
11874 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
11875 return 1;
11876 }
11877
11878 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
11879 {
11880 if (TARGET_DEBUG_ARG)
11881 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
11882 return 1;
11883 }
11884
11885 if (int_size_in_bytes (type) < 0)
11886 {
11887 if (TARGET_DEBUG_ARG)
11888 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
11889 return 1;
11890 }
11891
11892 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11893 modes only exist for GCC vector types if -maltivec. */
11894 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
11895 {
11896 if (TARGET_DEBUG_ARG)
11897 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
11898 return 1;
11899 }
11900
11901 /* Pass synthetic vectors in memory. */
11902 if (TREE_CODE (type) == VECTOR_TYPE
11903 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11904 {
11905 static bool warned_for_pass_big_vectors = false;
11906 if (TARGET_DEBUG_ARG)
11907 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
11908 if (!warned_for_pass_big_vectors)
11909 {
11910 warning (OPT_Wpsabi, "GCC vector passed by reference: "
11911 "non-standard ABI extension with no compatibility "
11912 "guarantee");
11913 warned_for_pass_big_vectors = true;
11914 }
11915 return 1;
11916 }
11917
11918 return 0;
11919 }
11920
11921 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
11922 already processes. Return true if the parameter must be passed
11923 (fully or partially) on the stack. */
11924
11925 static bool
11926 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
11927 {
11928 machine_mode mode;
11929 int unsignedp;
11930 rtx entry_parm;
11931
11932 /* Catch errors. */
11933 if (type == NULL || type == error_mark_node)
11934 return true;
11935
11936 /* Handle types with no storage requirement. */
11937 if (TYPE_MODE (type) == VOIDmode)
11938 return false;
11939
11940 /* Handle complex types. */
11941 if (TREE_CODE (type) == COMPLEX_TYPE)
11942 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
11943 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
11944
11945 /* Handle transparent aggregates. */
11946 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
11947 && TYPE_TRANSPARENT_AGGR (type))
11948 type = TREE_TYPE (first_field (type));
11949
11950 /* See if this arg was passed by invisible reference. */
11951 if (pass_by_reference (get_cumulative_args (args_so_far),
11952 TYPE_MODE (type), type, true))
11953 type = build_pointer_type (type);
11954
11955 /* Find mode as it is passed by the ABI. */
11956 unsignedp = TYPE_UNSIGNED (type);
11957 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
11958
11959 /* If we must pass in stack, we need a stack. */
11960 if (rs6000_must_pass_in_stack (mode, type))
11961 return true;
11962
11963 /* If there is no incoming register, we need a stack. */
11964 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
11965 if (entry_parm == NULL)
11966 return true;
11967
11968 /* Likewise if we need to pass both in registers and on the stack. */
11969 if (GET_CODE (entry_parm) == PARALLEL
11970 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
11971 return true;
11972
11973 /* Also true if we're partially in registers and partially not. */
11974 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
11975 return true;
11976
11977 /* Update info on where next arg arrives in registers. */
11978 rs6000_function_arg_advance (args_so_far, mode, type, true);
11979 return false;
11980 }
11981
11982 /* Return true if FUN has no prototype, has a variable argument
11983 list, or passes any parameter in memory. */
11984
11985 static bool
11986 rs6000_function_parms_need_stack (tree fun, bool incoming)
11987 {
11988 tree fntype, result;
11989 CUMULATIVE_ARGS args_so_far_v;
11990 cumulative_args_t args_so_far;
11991
11992 if (!fun)
11993 /* Must be a libcall, all of which only use reg parms. */
11994 return false;
11995
11996 fntype = fun;
11997 if (!TYPE_P (fun))
11998 fntype = TREE_TYPE (fun);
11999
12000 /* Varargs functions need the parameter save area. */
12001 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12002 return true;
12003
12004 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12005 args_so_far = pack_cumulative_args (&args_so_far_v);
12006
12007 /* When incoming, we will have been passed the function decl.
12008 It is necessary to use the decl to handle K&R style functions,
12009 where TYPE_ARG_TYPES may not be available. */
12010 if (incoming)
12011 {
12012 gcc_assert (DECL_P (fun));
12013 result = DECL_RESULT (fun);
12014 }
12015 else
12016 result = TREE_TYPE (fntype);
12017
12018 if (result && aggregate_value_p (result, fntype))
12019 {
12020 if (!TYPE_P (result))
12021 result = TREE_TYPE (result);
12022 result = build_pointer_type (result);
12023 rs6000_parm_needs_stack (args_so_far, result);
12024 }
12025
12026 if (incoming)
12027 {
12028 tree parm;
12029
12030 for (parm = DECL_ARGUMENTS (fun);
12031 parm && parm != void_list_node;
12032 parm = TREE_CHAIN (parm))
12033 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12034 return true;
12035 }
12036 else
12037 {
12038 function_args_iterator args_iter;
12039 tree arg_type;
12040
12041 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12042 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12043 return true;
12044 }
12045
12046 return false;
12047 }
12048
12049 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12050 usually a constant depending on the ABI. However, in the ELFv2 ABI
12051 the register parameter area is optional when calling a function that
12052 has a prototype is scope, has no variable argument list, and passes
12053 all parameters in registers. */
12054
12055 int
12056 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12057 {
12058 int reg_parm_stack_space;
12059
12060 switch (DEFAULT_ABI)
12061 {
12062 default:
12063 reg_parm_stack_space = 0;
12064 break;
12065
12066 case ABI_AIX:
12067 case ABI_DARWIN:
12068 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12069 break;
12070
12071 case ABI_ELFv2:
12072 /* ??? Recomputing this every time is a bit expensive. Is there
12073 a place to cache this information? */
12074 if (rs6000_function_parms_need_stack (fun, incoming))
12075 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12076 else
12077 reg_parm_stack_space = 0;
12078 break;
12079 }
12080
12081 return reg_parm_stack_space;
12082 }
12083
12084 static void
12085 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12086 {
12087 int i;
12088 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12089
12090 if (nregs == 0)
12091 return;
12092
12093 for (i = 0; i < nregs; i++)
12094 {
12095 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12096 if (reload_completed)
12097 {
12098 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12099 tem = NULL_RTX;
12100 else
12101 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12102 i * GET_MODE_SIZE (reg_mode));
12103 }
12104 else
12105 tem = replace_equiv_address (tem, XEXP (tem, 0));
12106
12107 gcc_assert (tem);
12108
12109 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12110 }
12111 }
12112 \f
12113 /* Perform any needed actions needed for a function that is receiving a
12114 variable number of arguments.
12115
12116 CUM is as above.
12117
12118 MODE and TYPE are the mode and type of the current parameter.
12119
12120 PRETEND_SIZE is a variable that should be set to the amount of stack
12121 that must be pushed by the prolog to pretend that our caller pushed
12122 it.
12123
12124 Normally, this macro will push all remaining incoming registers on the
12125 stack and set PRETEND_SIZE to the length of the registers pushed. */
12126
12127 static void
12128 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12129 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12130 int no_rtl)
12131 {
12132 CUMULATIVE_ARGS next_cum;
12133 int reg_size = TARGET_32BIT ? 4 : 8;
12134 rtx save_area = NULL_RTX, mem;
12135 int first_reg_offset;
12136 alias_set_type set;
12137
12138 /* Skip the last named argument. */
12139 next_cum = *get_cumulative_args (cum);
12140 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12141
12142 if (DEFAULT_ABI == ABI_V4)
12143 {
12144 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12145
12146 if (! no_rtl)
12147 {
12148 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12149 HOST_WIDE_INT offset = 0;
12150
12151 /* Try to optimize the size of the varargs save area.
12152 The ABI requires that ap.reg_save_area is doubleword
12153 aligned, but we don't need to allocate space for all
12154 the bytes, only those to which we actually will save
12155 anything. */
12156 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12157 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12158 if (TARGET_HARD_FLOAT
12159 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12160 && cfun->va_list_fpr_size)
12161 {
12162 if (gpr_reg_num)
12163 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12164 * UNITS_PER_FP_WORD;
12165 if (cfun->va_list_fpr_size
12166 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12167 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12168 else
12169 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12170 * UNITS_PER_FP_WORD;
12171 }
12172 if (gpr_reg_num)
12173 {
12174 offset = -((first_reg_offset * reg_size) & ~7);
12175 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12176 {
12177 gpr_reg_num = cfun->va_list_gpr_size;
12178 if (reg_size == 4 && (first_reg_offset & 1))
12179 gpr_reg_num++;
12180 }
12181 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12182 }
12183 else if (fpr_size)
12184 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12185 * UNITS_PER_FP_WORD
12186 - (int) (GP_ARG_NUM_REG * reg_size);
12187
12188 if (gpr_size + fpr_size)
12189 {
12190 rtx reg_save_area
12191 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12192 gcc_assert (MEM_P (reg_save_area));
12193 reg_save_area = XEXP (reg_save_area, 0);
12194 if (GET_CODE (reg_save_area) == PLUS)
12195 {
12196 gcc_assert (XEXP (reg_save_area, 0)
12197 == virtual_stack_vars_rtx);
12198 gcc_assert (CONST_INT_P (XEXP (reg_save_area, 1)));
12199 offset += INTVAL (XEXP (reg_save_area, 1));
12200 }
12201 else
12202 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12203 }
12204
12205 cfun->machine->varargs_save_offset = offset;
12206 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12207 }
12208 }
12209 else
12210 {
12211 first_reg_offset = next_cum.words;
12212 save_area = crtl->args.internal_arg_pointer;
12213
12214 if (targetm.calls.must_pass_in_stack (mode, type))
12215 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12216 }
12217
12218 set = get_varargs_alias_set ();
12219 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12220 && cfun->va_list_gpr_size)
12221 {
12222 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12223
12224 if (va_list_gpr_counter_field)
12225 /* V4 va_list_gpr_size counts number of registers needed. */
12226 n_gpr = cfun->va_list_gpr_size;
12227 else
12228 /* char * va_list instead counts number of bytes needed. */
12229 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12230
12231 if (nregs > n_gpr)
12232 nregs = n_gpr;
12233
12234 mem = gen_rtx_MEM (BLKmode,
12235 plus_constant (Pmode, save_area,
12236 first_reg_offset * reg_size));
12237 MEM_NOTRAP_P (mem) = 1;
12238 set_mem_alias_set (mem, set);
12239 set_mem_align (mem, BITS_PER_WORD);
12240
12241 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12242 nregs);
12243 }
12244
12245 /* Save FP registers if needed. */
12246 if (DEFAULT_ABI == ABI_V4
12247 && TARGET_HARD_FLOAT
12248 && ! no_rtl
12249 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12250 && cfun->va_list_fpr_size)
12251 {
12252 int fregno = next_cum.fregno, nregs;
12253 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12254 rtx lab = gen_label_rtx ();
12255 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12256 * UNITS_PER_FP_WORD);
12257
12258 emit_jump_insn
12259 (gen_rtx_SET (pc_rtx,
12260 gen_rtx_IF_THEN_ELSE (VOIDmode,
12261 gen_rtx_NE (VOIDmode, cr1,
12262 const0_rtx),
12263 gen_rtx_LABEL_REF (VOIDmode, lab),
12264 pc_rtx)));
12265
12266 for (nregs = 0;
12267 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12268 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12269 {
12270 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12271 plus_constant (Pmode, save_area, off));
12272 MEM_NOTRAP_P (mem) = 1;
12273 set_mem_alias_set (mem, set);
12274 set_mem_align (mem, GET_MODE_ALIGNMENT (
12275 TARGET_HARD_FLOAT ? DFmode : SFmode));
12276 emit_move_insn (mem, gen_rtx_REG (
12277 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12278 }
12279
12280 emit_label (lab);
12281 }
12282 }
12283
12284 /* Create the va_list data type. */
12285
12286 static tree
12287 rs6000_build_builtin_va_list (void)
12288 {
12289 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12290
12291 /* For AIX, prefer 'char *' because that's what the system
12292 header files like. */
12293 if (DEFAULT_ABI != ABI_V4)
12294 return build_pointer_type (char_type_node);
12295
12296 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12297 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12298 get_identifier ("__va_list_tag"), record);
12299
12300 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12301 unsigned_char_type_node);
12302 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12303 unsigned_char_type_node);
12304 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12305 every user file. */
12306 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12307 get_identifier ("reserved"), short_unsigned_type_node);
12308 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12309 get_identifier ("overflow_arg_area"),
12310 ptr_type_node);
12311 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12312 get_identifier ("reg_save_area"),
12313 ptr_type_node);
12314
12315 va_list_gpr_counter_field = f_gpr;
12316 va_list_fpr_counter_field = f_fpr;
12317
12318 DECL_FIELD_CONTEXT (f_gpr) = record;
12319 DECL_FIELD_CONTEXT (f_fpr) = record;
12320 DECL_FIELD_CONTEXT (f_res) = record;
12321 DECL_FIELD_CONTEXT (f_ovf) = record;
12322 DECL_FIELD_CONTEXT (f_sav) = record;
12323
12324 TYPE_STUB_DECL (record) = type_decl;
12325 TYPE_NAME (record) = type_decl;
12326 TYPE_FIELDS (record) = f_gpr;
12327 DECL_CHAIN (f_gpr) = f_fpr;
12328 DECL_CHAIN (f_fpr) = f_res;
12329 DECL_CHAIN (f_res) = f_ovf;
12330 DECL_CHAIN (f_ovf) = f_sav;
12331
12332 layout_type (record);
12333
12334 /* The correct type is an array type of one element. */
12335 return build_array_type (record, build_index_type (size_zero_node));
12336 }
12337
12338 /* Implement va_start. */
12339
12340 static void
12341 rs6000_va_start (tree valist, rtx nextarg)
12342 {
12343 HOST_WIDE_INT words, n_gpr, n_fpr;
12344 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12345 tree gpr, fpr, ovf, sav, t;
12346
12347 /* Only SVR4 needs something special. */
12348 if (DEFAULT_ABI != ABI_V4)
12349 {
12350 std_expand_builtin_va_start (valist, nextarg);
12351 return;
12352 }
12353
12354 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12355 f_fpr = DECL_CHAIN (f_gpr);
12356 f_res = DECL_CHAIN (f_fpr);
12357 f_ovf = DECL_CHAIN (f_res);
12358 f_sav = DECL_CHAIN (f_ovf);
12359
12360 valist = build_simple_mem_ref (valist);
12361 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12362 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12363 f_fpr, NULL_TREE);
12364 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12365 f_ovf, NULL_TREE);
12366 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12367 f_sav, NULL_TREE);
12368
12369 /* Count number of gp and fp argument registers used. */
12370 words = crtl->args.info.words;
12371 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12372 GP_ARG_NUM_REG);
12373 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12374 FP_ARG_NUM_REG);
12375
12376 if (TARGET_DEBUG_ARG)
12377 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12378 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12379 words, n_gpr, n_fpr);
12380
12381 if (cfun->va_list_gpr_size)
12382 {
12383 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12384 build_int_cst (NULL_TREE, n_gpr));
12385 TREE_SIDE_EFFECTS (t) = 1;
12386 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12387 }
12388
12389 if (cfun->va_list_fpr_size)
12390 {
12391 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12392 build_int_cst (NULL_TREE, n_fpr));
12393 TREE_SIDE_EFFECTS (t) = 1;
12394 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12395
12396 #ifdef HAVE_AS_GNU_ATTRIBUTE
12397 if (call_ABI_of_interest (cfun->decl))
12398 rs6000_passes_float = true;
12399 #endif
12400 }
12401
12402 /* Find the overflow area. */
12403 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12404 if (words != 0)
12405 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12406 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12407 TREE_SIDE_EFFECTS (t) = 1;
12408 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12409
12410 /* If there were no va_arg invocations, don't set up the register
12411 save area. */
12412 if (!cfun->va_list_gpr_size
12413 && !cfun->va_list_fpr_size
12414 && n_gpr < GP_ARG_NUM_REG
12415 && n_fpr < FP_ARG_V4_MAX_REG)
12416 return;
12417
12418 /* Find the register save area. */
12419 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12420 if (cfun->machine->varargs_save_offset)
12421 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12422 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12423 TREE_SIDE_EFFECTS (t) = 1;
12424 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12425 }
12426
12427 /* Implement va_arg. */
12428
12429 static tree
12430 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12431 gimple_seq *post_p)
12432 {
12433 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12434 tree gpr, fpr, ovf, sav, reg, t, u;
12435 int size, rsize, n_reg, sav_ofs, sav_scale;
12436 tree lab_false, lab_over, addr;
12437 int align;
12438 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12439 int regalign = 0;
12440 gimple *stmt;
12441
12442 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12443 {
12444 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12445 return build_va_arg_indirect_ref (t);
12446 }
12447
12448 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12449 earlier version of gcc, with the property that it always applied alignment
12450 adjustments to the va-args (even for zero-sized types). The cheapest way
12451 to deal with this is to replicate the effect of the part of
12452 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12453 of relevance.
12454 We don't need to check for pass-by-reference because of the test above.
12455 We can return a simplifed answer, since we know there's no offset to add. */
12456
12457 if (((TARGET_MACHO
12458 && rs6000_darwin64_abi)
12459 || DEFAULT_ABI == ABI_ELFv2
12460 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12461 && integer_zerop (TYPE_SIZE (type)))
12462 {
12463 unsigned HOST_WIDE_INT align, boundary;
12464 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12465 align = PARM_BOUNDARY / BITS_PER_UNIT;
12466 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12467 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12468 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12469 boundary /= BITS_PER_UNIT;
12470 if (boundary > align)
12471 {
12472 tree t ;
12473 /* This updates arg ptr by the amount that would be necessary
12474 to align the zero-sized (but not zero-alignment) item. */
12475 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12476 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12477 gimplify_and_add (t, pre_p);
12478
12479 t = fold_convert (sizetype, valist_tmp);
12480 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12481 fold_convert (TREE_TYPE (valist),
12482 fold_build2 (BIT_AND_EXPR, sizetype, t,
12483 size_int (-boundary))));
12484 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12485 gimplify_and_add (t, pre_p);
12486 }
12487 /* Since it is zero-sized there's no increment for the item itself. */
12488 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12489 return build_va_arg_indirect_ref (valist_tmp);
12490 }
12491
12492 if (DEFAULT_ABI != ABI_V4)
12493 {
12494 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12495 {
12496 tree elem_type = TREE_TYPE (type);
12497 machine_mode elem_mode = TYPE_MODE (elem_type);
12498 int elem_size = GET_MODE_SIZE (elem_mode);
12499
12500 if (elem_size < UNITS_PER_WORD)
12501 {
12502 tree real_part, imag_part;
12503 gimple_seq post = NULL;
12504
12505 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12506 &post);
12507 /* Copy the value into a temporary, lest the formal temporary
12508 be reused out from under us. */
12509 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12510 gimple_seq_add_seq (pre_p, post);
12511
12512 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12513 post_p);
12514
12515 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12516 }
12517 }
12518
12519 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12520 }
12521
12522 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12523 f_fpr = DECL_CHAIN (f_gpr);
12524 f_res = DECL_CHAIN (f_fpr);
12525 f_ovf = DECL_CHAIN (f_res);
12526 f_sav = DECL_CHAIN (f_ovf);
12527
12528 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12529 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12530 f_fpr, NULL_TREE);
12531 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12532 f_ovf, NULL_TREE);
12533 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12534 f_sav, NULL_TREE);
12535
12536 size = int_size_in_bytes (type);
12537 rsize = (size + 3) / 4;
12538 int pad = 4 * rsize - size;
12539 align = 1;
12540
12541 machine_mode mode = TYPE_MODE (type);
12542 if (abi_v4_pass_in_fpr (mode, false))
12543 {
12544 /* FP args go in FP registers, if present. */
12545 reg = fpr;
12546 n_reg = (size + 7) / 8;
12547 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12548 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12549 if (mode != SFmode && mode != SDmode)
12550 align = 8;
12551 }
12552 else
12553 {
12554 /* Otherwise into GP registers. */
12555 reg = gpr;
12556 n_reg = rsize;
12557 sav_ofs = 0;
12558 sav_scale = 4;
12559 if (n_reg == 2)
12560 align = 8;
12561 }
12562
12563 /* Pull the value out of the saved registers.... */
12564
12565 lab_over = NULL;
12566 addr = create_tmp_var (ptr_type_node, "addr");
12567
12568 /* AltiVec vectors never go in registers when -mabi=altivec. */
12569 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12570 align = 16;
12571 else
12572 {
12573 lab_false = create_artificial_label (input_location);
12574 lab_over = create_artificial_label (input_location);
12575
12576 /* Long long is aligned in the registers. As are any other 2 gpr
12577 item such as complex int due to a historical mistake. */
12578 u = reg;
12579 if (n_reg == 2 && reg == gpr)
12580 {
12581 regalign = 1;
12582 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12583 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12584 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12585 unshare_expr (reg), u);
12586 }
12587 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12588 reg number is 0 for f1, so we want to make it odd. */
12589 else if (reg == fpr && mode == TDmode)
12590 {
12591 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12592 build_int_cst (TREE_TYPE (reg), 1));
12593 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12594 }
12595
12596 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12597 t = build2 (GE_EXPR, boolean_type_node, u, t);
12598 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12599 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12600 gimplify_and_add (t, pre_p);
12601
12602 t = sav;
12603 if (sav_ofs)
12604 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12605
12606 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12607 build_int_cst (TREE_TYPE (reg), n_reg));
12608 u = fold_convert (sizetype, u);
12609 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12610 t = fold_build_pointer_plus (t, u);
12611
12612 /* _Decimal32 varargs are located in the second word of the 64-bit
12613 FP register for 32-bit binaries. */
12614 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12615 t = fold_build_pointer_plus_hwi (t, size);
12616
12617 /* Args are passed right-aligned. */
12618 if (BYTES_BIG_ENDIAN)
12619 t = fold_build_pointer_plus_hwi (t, pad);
12620
12621 gimplify_assign (addr, t, pre_p);
12622
12623 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12624
12625 stmt = gimple_build_label (lab_false);
12626 gimple_seq_add_stmt (pre_p, stmt);
12627
12628 if ((n_reg == 2 && !regalign) || n_reg > 2)
12629 {
12630 /* Ensure that we don't find any more args in regs.
12631 Alignment has taken care of for special cases. */
12632 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12633 }
12634 }
12635
12636 /* ... otherwise out of the overflow area. */
12637
12638 /* Care for on-stack alignment if needed. */
12639 t = ovf;
12640 if (align != 1)
12641 {
12642 t = fold_build_pointer_plus_hwi (t, align - 1);
12643 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12644 build_int_cst (TREE_TYPE (t), -align));
12645 }
12646
12647 /* Args are passed right-aligned. */
12648 if (BYTES_BIG_ENDIAN)
12649 t = fold_build_pointer_plus_hwi (t, pad);
12650
12651 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12652
12653 gimplify_assign (unshare_expr (addr), t, pre_p);
12654
12655 t = fold_build_pointer_plus_hwi (t, size);
12656 gimplify_assign (unshare_expr (ovf), t, pre_p);
12657
12658 if (lab_over)
12659 {
12660 stmt = gimple_build_label (lab_over);
12661 gimple_seq_add_stmt (pre_p, stmt);
12662 }
12663
12664 if (STRICT_ALIGNMENT
12665 && (TYPE_ALIGN (type)
12666 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12667 {
12668 /* The value (of type complex double, for example) may not be
12669 aligned in memory in the saved registers, so copy via a
12670 temporary. (This is the same code as used for SPARC.) */
12671 tree tmp = create_tmp_var (type, "va_arg_tmp");
12672 tree dest_addr = build_fold_addr_expr (tmp);
12673
12674 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
12675 3, dest_addr, addr, size_int (rsize * 4));
12676 TREE_ADDRESSABLE (tmp) = 1;
12677
12678 gimplify_and_add (copy, pre_p);
12679 addr = dest_addr;
12680 }
12681
12682 addr = fold_convert (ptrtype, addr);
12683 return build_va_arg_indirect_ref (addr);
12684 }
12685
12686 /* Builtins. */
12687
12688 static void
12689 def_builtin (const char *name, tree type, enum rs6000_builtins code)
12690 {
12691 tree t;
12692 unsigned classify = rs6000_builtin_info[(int)code].attr;
12693 const char *attr_string = "";
12694
12695 gcc_assert (name != NULL);
12696 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
12697
12698 if (rs6000_builtin_decls[(int)code])
12699 fatal_error (input_location,
12700 "internal error: builtin function %qs already processed",
12701 name);
12702
12703 rs6000_builtin_decls[(int)code] = t =
12704 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
12705
12706 /* Set any special attributes. */
12707 if ((classify & RS6000_BTC_CONST) != 0)
12708 {
12709 /* const function, function only depends on the inputs. */
12710 TREE_READONLY (t) = 1;
12711 TREE_NOTHROW (t) = 1;
12712 attr_string = ", const";
12713 }
12714 else if ((classify & RS6000_BTC_PURE) != 0)
12715 {
12716 /* pure function, function can read global memory, but does not set any
12717 external state. */
12718 DECL_PURE_P (t) = 1;
12719 TREE_NOTHROW (t) = 1;
12720 attr_string = ", pure";
12721 }
12722 else if ((classify & RS6000_BTC_FP) != 0)
12723 {
12724 /* Function is a math function. If rounding mode is on, then treat the
12725 function as not reading global memory, but it can have arbitrary side
12726 effects. If it is off, then assume the function is a const function.
12727 This mimics the ATTR_MATHFN_FPROUNDING attribute in
12728 builtin-attribute.def that is used for the math functions. */
12729 TREE_NOTHROW (t) = 1;
12730 if (flag_rounding_math)
12731 {
12732 DECL_PURE_P (t) = 1;
12733 DECL_IS_NOVOPS (t) = 1;
12734 attr_string = ", fp, pure";
12735 }
12736 else
12737 {
12738 TREE_READONLY (t) = 1;
12739 attr_string = ", fp, const";
12740 }
12741 }
12742 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
12743 gcc_unreachable ();
12744
12745 if (TARGET_DEBUG_BUILTIN)
12746 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
12747 (int)code, name, attr_string);
12748 }
12749
12750 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
12751
12752 #undef RS6000_BUILTIN_0
12753 #undef RS6000_BUILTIN_1
12754 #undef RS6000_BUILTIN_2
12755 #undef RS6000_BUILTIN_3
12756 #undef RS6000_BUILTIN_A
12757 #undef RS6000_BUILTIN_D
12758 #undef RS6000_BUILTIN_H
12759 #undef RS6000_BUILTIN_P
12760 #undef RS6000_BUILTIN_X
12761
12762 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12763 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12764 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12765 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
12766 { MASK, ICODE, NAME, ENUM },
12767
12768 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12769 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12770 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12771 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12772 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12773
12774 static const struct builtin_description bdesc_3arg[] =
12775 {
12776 #include "rs6000-builtin.def"
12777 };
12778
12779 /* DST operations: void foo (void *, const int, const char). */
12780
12781 #undef RS6000_BUILTIN_0
12782 #undef RS6000_BUILTIN_1
12783 #undef RS6000_BUILTIN_2
12784 #undef RS6000_BUILTIN_3
12785 #undef RS6000_BUILTIN_A
12786 #undef RS6000_BUILTIN_D
12787 #undef RS6000_BUILTIN_H
12788 #undef RS6000_BUILTIN_P
12789 #undef RS6000_BUILTIN_X
12790
12791 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12792 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12793 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12794 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12795 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12796 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
12797 { MASK, ICODE, NAME, ENUM },
12798
12799 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12800 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12801 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12802
12803 static const struct builtin_description bdesc_dst[] =
12804 {
12805 #include "rs6000-builtin.def"
12806 };
12807
12808 /* Simple binary operations: VECc = foo (VECa, VECb). */
12809
12810 #undef RS6000_BUILTIN_0
12811 #undef RS6000_BUILTIN_1
12812 #undef RS6000_BUILTIN_2
12813 #undef RS6000_BUILTIN_3
12814 #undef RS6000_BUILTIN_A
12815 #undef RS6000_BUILTIN_D
12816 #undef RS6000_BUILTIN_H
12817 #undef RS6000_BUILTIN_P
12818 #undef RS6000_BUILTIN_X
12819
12820 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12821 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12822 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
12823 { MASK, ICODE, NAME, ENUM },
12824
12825 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12826 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12827 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12828 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12829 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12830 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12831
12832 static const struct builtin_description bdesc_2arg[] =
12833 {
12834 #include "rs6000-builtin.def"
12835 };
12836
12837 #undef RS6000_BUILTIN_0
12838 #undef RS6000_BUILTIN_1
12839 #undef RS6000_BUILTIN_2
12840 #undef RS6000_BUILTIN_3
12841 #undef RS6000_BUILTIN_A
12842 #undef RS6000_BUILTIN_D
12843 #undef RS6000_BUILTIN_H
12844 #undef RS6000_BUILTIN_P
12845 #undef RS6000_BUILTIN_X
12846
12847 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12848 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12849 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12850 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12851 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12852 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12853 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12854 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
12855 { MASK, ICODE, NAME, ENUM },
12856
12857 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12858
12859 /* AltiVec predicates. */
12860
12861 static const struct builtin_description bdesc_altivec_preds[] =
12862 {
12863 #include "rs6000-builtin.def"
12864 };
12865
12866 /* ABS* operations. */
12867
12868 #undef RS6000_BUILTIN_0
12869 #undef RS6000_BUILTIN_1
12870 #undef RS6000_BUILTIN_2
12871 #undef RS6000_BUILTIN_3
12872 #undef RS6000_BUILTIN_A
12873 #undef RS6000_BUILTIN_D
12874 #undef RS6000_BUILTIN_H
12875 #undef RS6000_BUILTIN_P
12876 #undef RS6000_BUILTIN_X
12877
12878 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12879 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12880 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12881 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12882 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
12883 { MASK, ICODE, NAME, ENUM },
12884
12885 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12886 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12887 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12888 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12889
12890 static const struct builtin_description bdesc_abs[] =
12891 {
12892 #include "rs6000-builtin.def"
12893 };
12894
12895 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
12896 foo (VECa). */
12897
12898 #undef RS6000_BUILTIN_0
12899 #undef RS6000_BUILTIN_1
12900 #undef RS6000_BUILTIN_2
12901 #undef RS6000_BUILTIN_3
12902 #undef RS6000_BUILTIN_A
12903 #undef RS6000_BUILTIN_D
12904 #undef RS6000_BUILTIN_H
12905 #undef RS6000_BUILTIN_P
12906 #undef RS6000_BUILTIN_X
12907
12908 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12909 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
12910 { MASK, ICODE, NAME, ENUM },
12911
12912 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12913 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12914 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12915 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12916 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12917 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12918 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12919
12920 static const struct builtin_description bdesc_1arg[] =
12921 {
12922 #include "rs6000-builtin.def"
12923 };
12924
12925 /* Simple no-argument operations: result = __builtin_darn_32 () */
12926
12927 #undef RS6000_BUILTIN_0
12928 #undef RS6000_BUILTIN_1
12929 #undef RS6000_BUILTIN_2
12930 #undef RS6000_BUILTIN_3
12931 #undef RS6000_BUILTIN_A
12932 #undef RS6000_BUILTIN_D
12933 #undef RS6000_BUILTIN_H
12934 #undef RS6000_BUILTIN_P
12935 #undef RS6000_BUILTIN_X
12936
12937 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
12938 { MASK, ICODE, NAME, ENUM },
12939
12940 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12941 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12942 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12943 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12944 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12945 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12946 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12947 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12948
12949 static const struct builtin_description bdesc_0arg[] =
12950 {
12951 #include "rs6000-builtin.def"
12952 };
12953
12954 /* HTM builtins. */
12955 #undef RS6000_BUILTIN_0
12956 #undef RS6000_BUILTIN_1
12957 #undef RS6000_BUILTIN_2
12958 #undef RS6000_BUILTIN_3
12959 #undef RS6000_BUILTIN_A
12960 #undef RS6000_BUILTIN_D
12961 #undef RS6000_BUILTIN_H
12962 #undef RS6000_BUILTIN_P
12963 #undef RS6000_BUILTIN_X
12964
12965 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12966 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12967 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12968 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12969 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12970 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12971 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
12972 { MASK, ICODE, NAME, ENUM },
12973
12974 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12975 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12976
12977 static const struct builtin_description bdesc_htm[] =
12978 {
12979 #include "rs6000-builtin.def"
12980 };
12981
12982 #undef RS6000_BUILTIN_0
12983 #undef RS6000_BUILTIN_1
12984 #undef RS6000_BUILTIN_2
12985 #undef RS6000_BUILTIN_3
12986 #undef RS6000_BUILTIN_A
12987 #undef RS6000_BUILTIN_D
12988 #undef RS6000_BUILTIN_H
12989 #undef RS6000_BUILTIN_P
12990
12991 /* Return true if a builtin function is overloaded. */
12992 bool
12993 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
12994 {
12995 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
12996 }
12997
12998 const char *
12999 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13000 {
13001 return rs6000_builtin_info[(int)fncode].name;
13002 }
13003
13004 /* Expand an expression EXP that calls a builtin without arguments. */
13005 static rtx
13006 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13007 {
13008 rtx pat;
13009 machine_mode tmode = insn_data[icode].operand[0].mode;
13010
13011 if (icode == CODE_FOR_nothing)
13012 /* Builtin not supported on this processor. */
13013 return 0;
13014
13015 if (icode == CODE_FOR_rs6000_mffsl
13016 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13017 {
13018 error ("%<__builtin_mffsl%> not supported with %<-msoft-float%>");
13019 return const0_rtx;
13020 }
13021
13022 if (target == 0
13023 || GET_MODE (target) != tmode
13024 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13025 target = gen_reg_rtx (tmode);
13026
13027 pat = GEN_FCN (icode) (target);
13028 if (! pat)
13029 return 0;
13030 emit_insn (pat);
13031
13032 return target;
13033 }
13034
13035
13036 static rtx
13037 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13038 {
13039 rtx pat;
13040 tree arg0 = CALL_EXPR_ARG (exp, 0);
13041 tree arg1 = CALL_EXPR_ARG (exp, 1);
13042 rtx op0 = expand_normal (arg0);
13043 rtx op1 = expand_normal (arg1);
13044 machine_mode mode0 = insn_data[icode].operand[0].mode;
13045 machine_mode mode1 = insn_data[icode].operand[1].mode;
13046
13047 if (icode == CODE_FOR_nothing)
13048 /* Builtin not supported on this processor. */
13049 return 0;
13050
13051 /* If we got invalid arguments bail out before generating bad rtl. */
13052 if (arg0 == error_mark_node || arg1 == error_mark_node)
13053 return const0_rtx;
13054
13055 if (!CONST_INT_P (op0)
13056 || INTVAL (op0) > 255
13057 || INTVAL (op0) < 0)
13058 {
13059 error ("argument 1 must be an 8-bit field value");
13060 return const0_rtx;
13061 }
13062
13063 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13064 op0 = copy_to_mode_reg (mode0, op0);
13065
13066 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13067 op1 = copy_to_mode_reg (mode1, op1);
13068
13069 pat = GEN_FCN (icode) (op0, op1);
13070 if (!pat)
13071 return const0_rtx;
13072 emit_insn (pat);
13073
13074 return NULL_RTX;
13075 }
13076
13077 static rtx
13078 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13079 {
13080 rtx pat;
13081 tree arg0 = CALL_EXPR_ARG (exp, 0);
13082 rtx op0 = expand_normal (arg0);
13083
13084 if (icode == CODE_FOR_nothing)
13085 /* Builtin not supported on this processor. */
13086 return 0;
13087
13088 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13089 {
13090 error ("%<__builtin_mtfsb0%> and %<__builtin_mtfsb1%> not supported with "
13091 "%<-msoft-float%>");
13092 return const0_rtx;
13093 }
13094
13095 /* If we got invalid arguments bail out before generating bad rtl. */
13096 if (arg0 == error_mark_node)
13097 return const0_rtx;
13098
13099 /* Only allow bit numbers 0 to 31. */
13100 if (!u5bit_cint_operand (op0, VOIDmode))
13101 {
13102 error ("Argument must be a constant between 0 and 31.");
13103 return const0_rtx;
13104 }
13105
13106 pat = GEN_FCN (icode) (op0);
13107 if (!pat)
13108 return const0_rtx;
13109 emit_insn (pat);
13110
13111 return NULL_RTX;
13112 }
13113
13114 static rtx
13115 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13116 {
13117 rtx pat;
13118 tree arg0 = CALL_EXPR_ARG (exp, 0);
13119 rtx op0 = expand_normal (arg0);
13120 machine_mode mode0 = insn_data[icode].operand[0].mode;
13121
13122 if (icode == CODE_FOR_nothing)
13123 /* Builtin not supported on this processor. */
13124 return 0;
13125
13126 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13127 {
13128 error ("%<__builtin_set_fpscr_rn%> not supported with %<-msoft-float%>");
13129 return const0_rtx;
13130 }
13131
13132 /* If we got invalid arguments bail out before generating bad rtl. */
13133 if (arg0 == error_mark_node)
13134 return const0_rtx;
13135
13136 /* If the argument is a constant, check the range. Argument can only be a
13137 2-bit value. Unfortunately, can't check the range of the value at
13138 compile time if the argument is a variable. The least significant two
13139 bits of the argument, regardless of type, are used to set the rounding
13140 mode. All other bits are ignored. */
13141 if (CONST_INT_P (op0) && !const_0_to_3_operand(op0, VOIDmode))
13142 {
13143 error ("Argument must be a value between 0 and 3.");
13144 return const0_rtx;
13145 }
13146
13147 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13148 op0 = copy_to_mode_reg (mode0, op0);
13149
13150 pat = GEN_FCN (icode) (op0);
13151 if (!pat)
13152 return const0_rtx;
13153 emit_insn (pat);
13154
13155 return NULL_RTX;
13156 }
13157 static rtx
13158 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13159 {
13160 rtx pat;
13161 tree arg0 = CALL_EXPR_ARG (exp, 0);
13162 rtx op0 = expand_normal (arg0);
13163 machine_mode mode0 = insn_data[icode].operand[0].mode;
13164
13165 if (TARGET_32BIT)
13166 /* Builtin not supported in 32-bit mode. */
13167 fatal_error (input_location,
13168 "%<__builtin_set_fpscr_drn%> is not supported "
13169 "in 32-bit mode");
13170
13171 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13172 {
13173 error ("%<__builtin_set_fpscr_drn%> not supported with %<-msoft-float%>");
13174 return const0_rtx;
13175 }
13176
13177 if (icode == CODE_FOR_nothing)
13178 /* Builtin not supported on this processor. */
13179 return 0;
13180
13181 /* If we got invalid arguments bail out before generating bad rtl. */
13182 if (arg0 == error_mark_node)
13183 return const0_rtx;
13184
13185 /* If the argument is a constant, check the range. Agrument can only be a
13186 3-bit value. Unfortunately, can't check the range of the value at
13187 compile time if the argument is a variable. The least significant two
13188 bits of the argument, regardless of type, are used to set the rounding
13189 mode. All other bits are ignored. */
13190 if (CONST_INT_P (op0) && !const_0_to_7_operand(op0, VOIDmode))
13191 {
13192 error ("Argument must be a value between 0 and 7.");
13193 return const0_rtx;
13194 }
13195
13196 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13197 op0 = copy_to_mode_reg (mode0, op0);
13198
13199 pat = GEN_FCN (icode) (op0);
13200 if (! pat)
13201 return const0_rtx;
13202 emit_insn (pat);
13203
13204 return NULL_RTX;
13205 }
13206
13207 static rtx
13208 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13209 {
13210 rtx pat;
13211 tree arg0 = CALL_EXPR_ARG (exp, 0);
13212 rtx op0 = expand_normal (arg0);
13213 machine_mode tmode = insn_data[icode].operand[0].mode;
13214 machine_mode mode0 = insn_data[icode].operand[1].mode;
13215
13216 if (icode == CODE_FOR_nothing)
13217 /* Builtin not supported on this processor. */
13218 return 0;
13219
13220 /* If we got invalid arguments bail out before generating bad rtl. */
13221 if (arg0 == error_mark_node)
13222 return const0_rtx;
13223
13224 if (icode == CODE_FOR_altivec_vspltisb
13225 || icode == CODE_FOR_altivec_vspltish
13226 || icode == CODE_FOR_altivec_vspltisw)
13227 {
13228 /* Only allow 5-bit *signed* literals. */
13229 if (!CONST_INT_P (op0)
13230 || INTVAL (op0) > 15
13231 || INTVAL (op0) < -16)
13232 {
13233 error ("argument 1 must be a 5-bit signed literal");
13234 return CONST0_RTX (tmode);
13235 }
13236 }
13237
13238 if (target == 0
13239 || GET_MODE (target) != tmode
13240 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13241 target = gen_reg_rtx (tmode);
13242
13243 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13244 op0 = copy_to_mode_reg (mode0, op0);
13245
13246 pat = GEN_FCN (icode) (target, op0);
13247 if (! pat)
13248 return 0;
13249 emit_insn (pat);
13250
13251 return target;
13252 }
13253
13254 static rtx
13255 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13256 {
13257 rtx pat, scratch1, scratch2;
13258 tree arg0 = CALL_EXPR_ARG (exp, 0);
13259 rtx op0 = expand_normal (arg0);
13260 machine_mode tmode = insn_data[icode].operand[0].mode;
13261 machine_mode mode0 = insn_data[icode].operand[1].mode;
13262
13263 /* If we have invalid arguments, bail out before generating bad rtl. */
13264 if (arg0 == error_mark_node)
13265 return const0_rtx;
13266
13267 if (target == 0
13268 || GET_MODE (target) != tmode
13269 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13270 target = gen_reg_rtx (tmode);
13271
13272 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13273 op0 = copy_to_mode_reg (mode0, op0);
13274
13275 scratch1 = gen_reg_rtx (mode0);
13276 scratch2 = gen_reg_rtx (mode0);
13277
13278 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13279 if (! pat)
13280 return 0;
13281 emit_insn (pat);
13282
13283 return target;
13284 }
13285
13286 static rtx
13287 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13288 {
13289 rtx pat;
13290 tree arg0 = CALL_EXPR_ARG (exp, 0);
13291 tree arg1 = CALL_EXPR_ARG (exp, 1);
13292 rtx op0 = expand_normal (arg0);
13293 rtx op1 = expand_normal (arg1);
13294 machine_mode tmode = insn_data[icode].operand[0].mode;
13295 machine_mode mode0 = insn_data[icode].operand[1].mode;
13296 machine_mode mode1 = insn_data[icode].operand[2].mode;
13297
13298 if (icode == CODE_FOR_nothing)
13299 /* Builtin not supported on this processor. */
13300 return 0;
13301
13302 /* If we got invalid arguments bail out before generating bad rtl. */
13303 if (arg0 == error_mark_node || arg1 == error_mark_node)
13304 return const0_rtx;
13305
13306 if (icode == CODE_FOR_unpackv1ti
13307 || icode == CODE_FOR_unpackkf
13308 || icode == CODE_FOR_unpacktf
13309 || icode == CODE_FOR_unpackif
13310 || icode == CODE_FOR_unpacktd)
13311 {
13312 /* Only allow 1-bit unsigned literals. */
13313 STRIP_NOPS (arg1);
13314 if (TREE_CODE (arg1) != INTEGER_CST
13315 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13316 {
13317 error ("argument 2 must be a 1-bit unsigned literal");
13318 return CONST0_RTX (tmode);
13319 }
13320 }
13321 else if (icode == CODE_FOR_altivec_vspltw)
13322 {
13323 /* Only allow 2-bit unsigned literals. */
13324 STRIP_NOPS (arg1);
13325 if (TREE_CODE (arg1) != INTEGER_CST
13326 || TREE_INT_CST_LOW (arg1) & ~3)
13327 {
13328 error ("argument 2 must be a 2-bit unsigned literal");
13329 return CONST0_RTX (tmode);
13330 }
13331 }
13332 else if (icode == CODE_FOR_altivec_vsplth)
13333 {
13334 /* Only allow 3-bit unsigned literals. */
13335 STRIP_NOPS (arg1);
13336 if (TREE_CODE (arg1) != INTEGER_CST
13337 || TREE_INT_CST_LOW (arg1) & ~7)
13338 {
13339 error ("argument 2 must be a 3-bit unsigned literal");
13340 return CONST0_RTX (tmode);
13341 }
13342 }
13343 else if (icode == CODE_FOR_altivec_vspltb)
13344 {
13345 /* Only allow 4-bit unsigned literals. */
13346 STRIP_NOPS (arg1);
13347 if (TREE_CODE (arg1) != INTEGER_CST
13348 || TREE_INT_CST_LOW (arg1) & ~15)
13349 {
13350 error ("argument 2 must be a 4-bit unsigned literal");
13351 return CONST0_RTX (tmode);
13352 }
13353 }
13354 else if (icode == CODE_FOR_altivec_vcfux
13355 || icode == CODE_FOR_altivec_vcfsx
13356 || icode == CODE_FOR_altivec_vctsxs
13357 || icode == CODE_FOR_altivec_vctuxs)
13358 {
13359 /* Only allow 5-bit unsigned literals. */
13360 STRIP_NOPS (arg1);
13361 if (TREE_CODE (arg1) != INTEGER_CST
13362 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13363 {
13364 error ("argument 2 must be a 5-bit unsigned literal");
13365 return CONST0_RTX (tmode);
13366 }
13367 }
13368 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13369 || icode == CODE_FOR_dfptstsfi_lt_dd
13370 || icode == CODE_FOR_dfptstsfi_gt_dd
13371 || icode == CODE_FOR_dfptstsfi_unordered_dd
13372 || icode == CODE_FOR_dfptstsfi_eq_td
13373 || icode == CODE_FOR_dfptstsfi_lt_td
13374 || icode == CODE_FOR_dfptstsfi_gt_td
13375 || icode == CODE_FOR_dfptstsfi_unordered_td)
13376 {
13377 /* Only allow 6-bit unsigned literals. */
13378 STRIP_NOPS (arg0);
13379 if (TREE_CODE (arg0) != INTEGER_CST
13380 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13381 {
13382 error ("argument 1 must be a 6-bit unsigned literal");
13383 return CONST0_RTX (tmode);
13384 }
13385 }
13386 else if (icode == CODE_FOR_xststdcqp_kf
13387 || icode == CODE_FOR_xststdcqp_tf
13388 || icode == CODE_FOR_xststdcdp
13389 || icode == CODE_FOR_xststdcsp
13390 || icode == CODE_FOR_xvtstdcdp
13391 || icode == CODE_FOR_xvtstdcsp)
13392 {
13393 /* Only allow 7-bit unsigned literals. */
13394 STRIP_NOPS (arg1);
13395 if (TREE_CODE (arg1) != INTEGER_CST
13396 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13397 {
13398 error ("argument 2 must be a 7-bit unsigned literal");
13399 return CONST0_RTX (tmode);
13400 }
13401 }
13402
13403 if (target == 0
13404 || GET_MODE (target) != tmode
13405 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13406 target = gen_reg_rtx (tmode);
13407
13408 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13409 op0 = copy_to_mode_reg (mode0, op0);
13410 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13411 op1 = copy_to_mode_reg (mode1, op1);
13412
13413 pat = GEN_FCN (icode) (target, op0, op1);
13414 if (! pat)
13415 return 0;
13416 emit_insn (pat);
13417
13418 return target;
13419 }
13420
13421 static rtx
13422 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13423 {
13424 rtx pat, scratch;
13425 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13426 tree arg0 = CALL_EXPR_ARG (exp, 1);
13427 tree arg1 = CALL_EXPR_ARG (exp, 2);
13428 rtx op0 = expand_normal (arg0);
13429 rtx op1 = expand_normal (arg1);
13430 machine_mode tmode = SImode;
13431 machine_mode mode0 = insn_data[icode].operand[1].mode;
13432 machine_mode mode1 = insn_data[icode].operand[2].mode;
13433 int cr6_form_int;
13434
13435 if (TREE_CODE (cr6_form) != INTEGER_CST)
13436 {
13437 error ("argument 1 of %qs must be a constant",
13438 "__builtin_altivec_predicate");
13439 return const0_rtx;
13440 }
13441 else
13442 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13443
13444 gcc_assert (mode0 == mode1);
13445
13446 /* If we have invalid arguments, bail out before generating bad rtl. */
13447 if (arg0 == error_mark_node || arg1 == error_mark_node)
13448 return const0_rtx;
13449
13450 if (target == 0
13451 || GET_MODE (target) != tmode
13452 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13453 target = gen_reg_rtx (tmode);
13454
13455 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13456 op0 = copy_to_mode_reg (mode0, op0);
13457 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13458 op1 = copy_to_mode_reg (mode1, op1);
13459
13460 /* Note that for many of the relevant operations (e.g. cmpne or
13461 cmpeq) with float or double operands, it makes more sense for the
13462 mode of the allocated scratch register to select a vector of
13463 integer. But the choice to copy the mode of operand 0 was made
13464 long ago and there are no plans to change it. */
13465 scratch = gen_reg_rtx (mode0);
13466
13467 pat = GEN_FCN (icode) (scratch, op0, op1);
13468 if (! pat)
13469 return 0;
13470 emit_insn (pat);
13471
13472 /* The vec_any* and vec_all* predicates use the same opcodes for two
13473 different operations, but the bits in CR6 will be different
13474 depending on what information we want. So we have to play tricks
13475 with CR6 to get the right bits out.
13476
13477 If you think this is disgusting, look at the specs for the
13478 AltiVec predicates. */
13479
13480 switch (cr6_form_int)
13481 {
13482 case 0:
13483 emit_insn (gen_cr6_test_for_zero (target));
13484 break;
13485 case 1:
13486 emit_insn (gen_cr6_test_for_zero_reverse (target));
13487 break;
13488 case 2:
13489 emit_insn (gen_cr6_test_for_lt (target));
13490 break;
13491 case 3:
13492 emit_insn (gen_cr6_test_for_lt_reverse (target));
13493 break;
13494 default:
13495 error ("argument 1 of %qs is out of range",
13496 "__builtin_altivec_predicate");
13497 break;
13498 }
13499
13500 return target;
13501 }
13502
13503 rtx
13504 swap_endian_selector_for_mode (machine_mode mode)
13505 {
13506 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13507 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13508 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13509 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13510
13511 unsigned int *swaparray, i;
13512 rtx perm[16];
13513
13514 switch (mode)
13515 {
13516 case E_V1TImode:
13517 swaparray = swap1;
13518 break;
13519 case E_V2DFmode:
13520 case E_V2DImode:
13521 swaparray = swap2;
13522 break;
13523 case E_V4SFmode:
13524 case E_V4SImode:
13525 swaparray = swap4;
13526 break;
13527 case E_V8HImode:
13528 swaparray = swap8;
13529 break;
13530 default:
13531 gcc_unreachable ();
13532 }
13533
13534 for (i = 0; i < 16; ++i)
13535 perm[i] = GEN_INT (swaparray[i]);
13536
13537 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13538 gen_rtvec_v (16, perm)));
13539 }
13540
13541 static rtx
13542 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13543 {
13544 rtx pat, addr;
13545 tree arg0 = CALL_EXPR_ARG (exp, 0);
13546 tree arg1 = CALL_EXPR_ARG (exp, 1);
13547 machine_mode tmode = insn_data[icode].operand[0].mode;
13548 machine_mode mode0 = Pmode;
13549 machine_mode mode1 = Pmode;
13550 rtx op0 = expand_normal (arg0);
13551 rtx op1 = expand_normal (arg1);
13552
13553 if (icode == CODE_FOR_nothing)
13554 /* Builtin not supported on this processor. */
13555 return 0;
13556
13557 /* If we got invalid arguments bail out before generating bad rtl. */
13558 if (arg0 == error_mark_node || arg1 == error_mark_node)
13559 return const0_rtx;
13560
13561 if (target == 0
13562 || GET_MODE (target) != tmode
13563 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13564 target = gen_reg_rtx (tmode);
13565
13566 op1 = copy_to_mode_reg (mode1, op1);
13567
13568 /* For LVX, express the RTL accurately by ANDing the address with -16.
13569 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13570 so the raw address is fine. */
13571 if (icode == CODE_FOR_altivec_lvx_v1ti
13572 || icode == CODE_FOR_altivec_lvx_v2df
13573 || icode == CODE_FOR_altivec_lvx_v2di
13574 || icode == CODE_FOR_altivec_lvx_v4sf
13575 || icode == CODE_FOR_altivec_lvx_v4si
13576 || icode == CODE_FOR_altivec_lvx_v8hi
13577 || icode == CODE_FOR_altivec_lvx_v16qi)
13578 {
13579 rtx rawaddr;
13580 if (op0 == const0_rtx)
13581 rawaddr = op1;
13582 else
13583 {
13584 op0 = copy_to_mode_reg (mode0, op0);
13585 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13586 }
13587 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13588 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13589
13590 emit_insn (gen_rtx_SET (target, addr));
13591 }
13592 else
13593 {
13594 if (op0 == const0_rtx)
13595 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13596 else
13597 {
13598 op0 = copy_to_mode_reg (mode0, op0);
13599 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13600 gen_rtx_PLUS (Pmode, op1, op0));
13601 }
13602
13603 pat = GEN_FCN (icode) (target, addr);
13604 if (! pat)
13605 return 0;
13606 emit_insn (pat);
13607 }
13608
13609 return target;
13610 }
13611
13612 static rtx
13613 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13614 {
13615 rtx pat;
13616 tree arg0 = CALL_EXPR_ARG (exp, 0);
13617 tree arg1 = CALL_EXPR_ARG (exp, 1);
13618 tree arg2 = CALL_EXPR_ARG (exp, 2);
13619 rtx op0 = expand_normal (arg0);
13620 rtx op1 = expand_normal (arg1);
13621 rtx op2 = expand_normal (arg2);
13622 machine_mode mode0 = insn_data[icode].operand[0].mode;
13623 machine_mode mode1 = insn_data[icode].operand[1].mode;
13624 machine_mode mode2 = insn_data[icode].operand[2].mode;
13625
13626 if (icode == CODE_FOR_nothing)
13627 /* Builtin not supported on this processor. */
13628 return NULL_RTX;
13629
13630 /* If we got invalid arguments bail out before generating bad rtl. */
13631 if (arg0 == error_mark_node
13632 || arg1 == error_mark_node
13633 || arg2 == error_mark_node)
13634 return NULL_RTX;
13635
13636 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13637 op0 = copy_to_mode_reg (mode0, op0);
13638 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13639 op1 = copy_to_mode_reg (mode1, op1);
13640 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13641 op2 = copy_to_mode_reg (mode2, op2);
13642
13643 pat = GEN_FCN (icode) (op0, op1, op2);
13644 if (pat)
13645 emit_insn (pat);
13646
13647 return NULL_RTX;
13648 }
13649
13650 static rtx
13651 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13652 {
13653 tree arg0 = CALL_EXPR_ARG (exp, 0);
13654 tree arg1 = CALL_EXPR_ARG (exp, 1);
13655 tree arg2 = CALL_EXPR_ARG (exp, 2);
13656 rtx op0 = expand_normal (arg0);
13657 rtx op1 = expand_normal (arg1);
13658 rtx op2 = expand_normal (arg2);
13659 rtx pat, addr, rawaddr;
13660 machine_mode tmode = insn_data[icode].operand[0].mode;
13661 machine_mode smode = insn_data[icode].operand[1].mode;
13662 machine_mode mode1 = Pmode;
13663 machine_mode mode2 = Pmode;
13664
13665 /* Invalid arguments. Bail before doing anything stoopid! */
13666 if (arg0 == error_mark_node
13667 || arg1 == error_mark_node
13668 || arg2 == error_mark_node)
13669 return const0_rtx;
13670
13671 op2 = copy_to_mode_reg (mode2, op2);
13672
13673 /* For STVX, express the RTL accurately by ANDing the address with -16.
13674 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
13675 so the raw address is fine. */
13676 if (icode == CODE_FOR_altivec_stvx_v2df
13677 || icode == CODE_FOR_altivec_stvx_v2di
13678 || icode == CODE_FOR_altivec_stvx_v4sf
13679 || icode == CODE_FOR_altivec_stvx_v4si
13680 || icode == CODE_FOR_altivec_stvx_v8hi
13681 || icode == CODE_FOR_altivec_stvx_v16qi)
13682 {
13683 if (op1 == const0_rtx)
13684 rawaddr = op2;
13685 else
13686 {
13687 op1 = copy_to_mode_reg (mode1, op1);
13688 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
13689 }
13690
13691 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13692 addr = gen_rtx_MEM (tmode, addr);
13693
13694 op0 = copy_to_mode_reg (tmode, op0);
13695
13696 emit_insn (gen_rtx_SET (addr, op0));
13697 }
13698 else
13699 {
13700 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
13701 op0 = copy_to_mode_reg (smode, op0);
13702
13703 if (op1 == const0_rtx)
13704 addr = gen_rtx_MEM (tmode, op2);
13705 else
13706 {
13707 op1 = copy_to_mode_reg (mode1, op1);
13708 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
13709 }
13710
13711 pat = GEN_FCN (icode) (addr, op0);
13712 if (pat)
13713 emit_insn (pat);
13714 }
13715
13716 return NULL_RTX;
13717 }
13718
13719 /* Return the appropriate SPR number associated with the given builtin. */
13720 static inline HOST_WIDE_INT
13721 htm_spr_num (enum rs6000_builtins code)
13722 {
13723 if (code == HTM_BUILTIN_GET_TFHAR
13724 || code == HTM_BUILTIN_SET_TFHAR)
13725 return TFHAR_SPR;
13726 else if (code == HTM_BUILTIN_GET_TFIAR
13727 || code == HTM_BUILTIN_SET_TFIAR)
13728 return TFIAR_SPR;
13729 else if (code == HTM_BUILTIN_GET_TEXASR
13730 || code == HTM_BUILTIN_SET_TEXASR)
13731 return TEXASR_SPR;
13732 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
13733 || code == HTM_BUILTIN_SET_TEXASRU);
13734 return TEXASRU_SPR;
13735 }
13736
13737 /* Return the correct ICODE value depending on whether we are
13738 setting or reading the HTM SPRs. */
13739 static inline enum insn_code
13740 rs6000_htm_spr_icode (bool nonvoid)
13741 {
13742 if (nonvoid)
13743 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
13744 else
13745 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
13746 }
13747
13748 /* Expand the HTM builtin in EXP and store the result in TARGET.
13749 Store true in *EXPANDEDP if we found a builtin to expand. */
13750 static rtx
13751 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
13752 {
13753 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13754 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
13755 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13756 const struct builtin_description *d;
13757 size_t i;
13758
13759 *expandedp = true;
13760
13761 if (!TARGET_POWERPC64
13762 && (fcode == HTM_BUILTIN_TABORTDC
13763 || fcode == HTM_BUILTIN_TABORTDCI))
13764 {
13765 size_t uns_fcode = (size_t)fcode;
13766 const char *name = rs6000_builtin_info[uns_fcode].name;
13767 error ("builtin %qs is only valid in 64-bit mode", name);
13768 return const0_rtx;
13769 }
13770
13771 /* Expand the HTM builtins. */
13772 d = bdesc_htm;
13773 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
13774 if (d->code == fcode)
13775 {
13776 rtx op[MAX_HTM_OPERANDS], pat;
13777 int nopnds = 0;
13778 tree arg;
13779 call_expr_arg_iterator iter;
13780 unsigned attr = rs6000_builtin_info[fcode].attr;
13781 enum insn_code icode = d->icode;
13782 const struct insn_operand_data *insn_op;
13783 bool uses_spr = (attr & RS6000_BTC_SPR);
13784 rtx cr = NULL_RTX;
13785
13786 if (uses_spr)
13787 icode = rs6000_htm_spr_icode (nonvoid);
13788 insn_op = &insn_data[icode].operand[0];
13789
13790 if (nonvoid)
13791 {
13792 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
13793 if (!target
13794 || GET_MODE (target) != tmode
13795 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
13796 target = gen_reg_rtx (tmode);
13797 if (uses_spr)
13798 op[nopnds++] = target;
13799 }
13800
13801 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
13802 {
13803 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
13804 return const0_rtx;
13805
13806 insn_op = &insn_data[icode].operand[nopnds];
13807
13808 op[nopnds] = expand_normal (arg);
13809
13810 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
13811 {
13812 if (!strcmp (insn_op->constraint, "n"))
13813 {
13814 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
13815 if (!CONST_INT_P (op[nopnds]))
13816 error ("argument %d must be an unsigned literal", arg_num);
13817 else
13818 error ("argument %d is an unsigned literal that is "
13819 "out of range", arg_num);
13820 return const0_rtx;
13821 }
13822 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
13823 }
13824
13825 nopnds++;
13826 }
13827
13828 /* Handle the builtins for extended mnemonics. These accept
13829 no arguments, but map to builtins that take arguments. */
13830 switch (fcode)
13831 {
13832 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
13833 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
13834 op[nopnds++] = GEN_INT (1);
13835 if (flag_checking)
13836 attr |= RS6000_BTC_UNARY;
13837 break;
13838 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
13839 op[nopnds++] = GEN_INT (0);
13840 if (flag_checking)
13841 attr |= RS6000_BTC_UNARY;
13842 break;
13843 default:
13844 break;
13845 }
13846
13847 /* If this builtin accesses SPRs, then pass in the appropriate
13848 SPR number and SPR regno as the last two operands. */
13849 if (uses_spr)
13850 {
13851 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
13852 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
13853 }
13854 /* If this builtin accesses a CR, then pass in a scratch
13855 CR as the last operand. */
13856 else if (attr & RS6000_BTC_CR)
13857 { cr = gen_reg_rtx (CCmode);
13858 op[nopnds++] = cr;
13859 }
13860
13861 if (flag_checking)
13862 {
13863 int expected_nopnds = 0;
13864 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
13865 expected_nopnds = 1;
13866 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
13867 expected_nopnds = 2;
13868 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
13869 expected_nopnds = 3;
13870 if (!(attr & RS6000_BTC_VOID))
13871 expected_nopnds += 1;
13872 if (uses_spr)
13873 expected_nopnds += 1;
13874
13875 gcc_assert (nopnds == expected_nopnds
13876 && nopnds <= MAX_HTM_OPERANDS);
13877 }
13878
13879 switch (nopnds)
13880 {
13881 case 1:
13882 pat = GEN_FCN (icode) (op[0]);
13883 break;
13884 case 2:
13885 pat = GEN_FCN (icode) (op[0], op[1]);
13886 break;
13887 case 3:
13888 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
13889 break;
13890 case 4:
13891 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
13892 break;
13893 default:
13894 gcc_unreachable ();
13895 }
13896 if (!pat)
13897 return NULL_RTX;
13898 emit_insn (pat);
13899
13900 if (attr & RS6000_BTC_CR)
13901 {
13902 if (fcode == HTM_BUILTIN_TBEGIN)
13903 {
13904 /* Emit code to set TARGET to true or false depending on
13905 whether the tbegin. instruction successfully or failed
13906 to start a transaction. We do this by placing the 1's
13907 complement of CR's EQ bit into TARGET. */
13908 rtx scratch = gen_reg_rtx (SImode);
13909 emit_insn (gen_rtx_SET (scratch,
13910 gen_rtx_EQ (SImode, cr,
13911 const0_rtx)));
13912 emit_insn (gen_rtx_SET (target,
13913 gen_rtx_XOR (SImode, scratch,
13914 GEN_INT (1))));
13915 }
13916 else
13917 {
13918 /* Emit code to copy the 4-bit condition register field
13919 CR into the least significant end of register TARGET. */
13920 rtx scratch1 = gen_reg_rtx (SImode);
13921 rtx scratch2 = gen_reg_rtx (SImode);
13922 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
13923 emit_insn (gen_movcc (subreg, cr));
13924 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
13925 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
13926 }
13927 }
13928
13929 if (nonvoid)
13930 return target;
13931 return const0_rtx;
13932 }
13933
13934 *expandedp = false;
13935 return NULL_RTX;
13936 }
13937
13938 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
13939
13940 static rtx
13941 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
13942 rtx target)
13943 {
13944 /* __builtin_cpu_init () is a nop, so expand to nothing. */
13945 if (fcode == RS6000_BUILTIN_CPU_INIT)
13946 return const0_rtx;
13947
13948 if (target == 0 || GET_MODE (target) != SImode)
13949 target = gen_reg_rtx (SImode);
13950
13951 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
13952 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
13953 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
13954 to a STRING_CST. */
13955 if (TREE_CODE (arg) == ARRAY_REF
13956 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
13957 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
13958 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
13959 arg = TREE_OPERAND (arg, 0);
13960
13961 if (TREE_CODE (arg) != STRING_CST)
13962 {
13963 error ("builtin %qs only accepts a string argument",
13964 rs6000_builtin_info[(size_t) fcode].name);
13965 return const0_rtx;
13966 }
13967
13968 if (fcode == RS6000_BUILTIN_CPU_IS)
13969 {
13970 const char *cpu = TREE_STRING_POINTER (arg);
13971 rtx cpuid = NULL_RTX;
13972 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
13973 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
13974 {
13975 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
13976 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
13977 break;
13978 }
13979 if (cpuid == NULL_RTX)
13980 {
13981 /* Invalid CPU argument. */
13982 error ("cpu %qs is an invalid argument to builtin %qs",
13983 cpu, rs6000_builtin_info[(size_t) fcode].name);
13984 return const0_rtx;
13985 }
13986
13987 rtx platform = gen_reg_rtx (SImode);
13988 rtx tcbmem = gen_const_mem (SImode,
13989 gen_rtx_PLUS (Pmode,
13990 gen_rtx_REG (Pmode, TLS_REGNUM),
13991 GEN_INT (TCB_PLATFORM_OFFSET)));
13992 emit_move_insn (platform, tcbmem);
13993 emit_insn (gen_eqsi3 (target, platform, cpuid));
13994 }
13995 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
13996 {
13997 const char *hwcap = TREE_STRING_POINTER (arg);
13998 rtx mask = NULL_RTX;
13999 int hwcap_offset;
14000 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14001 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14002 {
14003 mask = GEN_INT (cpu_supports_info[i].mask);
14004 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14005 break;
14006 }
14007 if (mask == NULL_RTX)
14008 {
14009 /* Invalid HWCAP argument. */
14010 error ("%s %qs is an invalid argument to builtin %qs",
14011 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14012 return const0_rtx;
14013 }
14014
14015 rtx tcb_hwcap = gen_reg_rtx (SImode);
14016 rtx tcbmem = gen_const_mem (SImode,
14017 gen_rtx_PLUS (Pmode,
14018 gen_rtx_REG (Pmode, TLS_REGNUM),
14019 GEN_INT (hwcap_offset)));
14020 emit_move_insn (tcb_hwcap, tcbmem);
14021 rtx scratch1 = gen_reg_rtx (SImode);
14022 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14023 rtx scratch2 = gen_reg_rtx (SImode);
14024 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14025 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14026 }
14027 else
14028 gcc_unreachable ();
14029
14030 /* Record that we have expanded a CPU builtin, so that we can later
14031 emit a reference to the special symbol exported by LIBC to ensure we
14032 do not link against an old LIBC that doesn't support this feature. */
14033 cpu_builtin_p = true;
14034
14035 #else
14036 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14037 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14038
14039 /* For old LIBCs, always return FALSE. */
14040 emit_move_insn (target, GEN_INT (0));
14041 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14042
14043 return target;
14044 }
14045
14046 static rtx
14047 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14048 {
14049 rtx pat;
14050 tree arg0 = CALL_EXPR_ARG (exp, 0);
14051 tree arg1 = CALL_EXPR_ARG (exp, 1);
14052 tree arg2 = CALL_EXPR_ARG (exp, 2);
14053 rtx op0 = expand_normal (arg0);
14054 rtx op1 = expand_normal (arg1);
14055 rtx op2 = expand_normal (arg2);
14056 machine_mode tmode = insn_data[icode].operand[0].mode;
14057 machine_mode mode0 = insn_data[icode].operand[1].mode;
14058 machine_mode mode1 = insn_data[icode].operand[2].mode;
14059 machine_mode mode2 = insn_data[icode].operand[3].mode;
14060
14061 if (icode == CODE_FOR_nothing)
14062 /* Builtin not supported on this processor. */
14063 return 0;
14064
14065 /* If we got invalid arguments bail out before generating bad rtl. */
14066 if (arg0 == error_mark_node
14067 || arg1 == error_mark_node
14068 || arg2 == error_mark_node)
14069 return const0_rtx;
14070
14071 /* Check and prepare argument depending on the instruction code.
14072
14073 Note that a switch statement instead of the sequence of tests
14074 would be incorrect as many of the CODE_FOR values could be
14075 CODE_FOR_nothing and that would yield multiple alternatives
14076 with identical values. We'd never reach here at runtime in
14077 this case. */
14078 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14079 || icode == CODE_FOR_altivec_vsldoi_v2df
14080 || icode == CODE_FOR_altivec_vsldoi_v4si
14081 || icode == CODE_FOR_altivec_vsldoi_v8hi
14082 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14083 {
14084 /* Only allow 4-bit unsigned literals. */
14085 STRIP_NOPS (arg2);
14086 if (TREE_CODE (arg2) != INTEGER_CST
14087 || TREE_INT_CST_LOW (arg2) & ~0xf)
14088 {
14089 error ("argument 3 must be a 4-bit unsigned literal");
14090 return CONST0_RTX (tmode);
14091 }
14092 }
14093 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14094 || icode == CODE_FOR_vsx_xxpermdi_v2di
14095 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14096 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14097 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14098 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14099 || icode == CODE_FOR_vsx_xxpermdi_v4si
14100 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14101 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14102 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14103 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14104 || icode == CODE_FOR_vsx_xxsldwi_v4si
14105 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14106 || icode == CODE_FOR_vsx_xxsldwi_v2di
14107 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14108 {
14109 /* Only allow 2-bit unsigned literals. */
14110 STRIP_NOPS (arg2);
14111 if (TREE_CODE (arg2) != INTEGER_CST
14112 || TREE_INT_CST_LOW (arg2) & ~0x3)
14113 {
14114 error ("argument 3 must be a 2-bit unsigned literal");
14115 return CONST0_RTX (tmode);
14116 }
14117 }
14118 else if (icode == CODE_FOR_vsx_set_v2df
14119 || icode == CODE_FOR_vsx_set_v2di
14120 || icode == CODE_FOR_bcdadd
14121 || icode == CODE_FOR_bcdadd_lt
14122 || icode == CODE_FOR_bcdadd_eq
14123 || icode == CODE_FOR_bcdadd_gt
14124 || icode == CODE_FOR_bcdsub
14125 || icode == CODE_FOR_bcdsub_lt
14126 || icode == CODE_FOR_bcdsub_eq
14127 || icode == CODE_FOR_bcdsub_gt)
14128 {
14129 /* Only allow 1-bit unsigned literals. */
14130 STRIP_NOPS (arg2);
14131 if (TREE_CODE (arg2) != INTEGER_CST
14132 || TREE_INT_CST_LOW (arg2) & ~0x1)
14133 {
14134 error ("argument 3 must be a 1-bit unsigned literal");
14135 return CONST0_RTX (tmode);
14136 }
14137 }
14138 else if (icode == CODE_FOR_dfp_ddedpd_dd
14139 || icode == CODE_FOR_dfp_ddedpd_td)
14140 {
14141 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14142 STRIP_NOPS (arg0);
14143 if (TREE_CODE (arg0) != INTEGER_CST
14144 || TREE_INT_CST_LOW (arg2) & ~0x3)
14145 {
14146 error ("argument 1 must be 0 or 2");
14147 return CONST0_RTX (tmode);
14148 }
14149 }
14150 else if (icode == CODE_FOR_dfp_denbcd_dd
14151 || icode == CODE_FOR_dfp_denbcd_td)
14152 {
14153 /* Only allow 1-bit unsigned literals. */
14154 STRIP_NOPS (arg0);
14155 if (TREE_CODE (arg0) != INTEGER_CST
14156 || TREE_INT_CST_LOW (arg0) & ~0x1)
14157 {
14158 error ("argument 1 must be a 1-bit unsigned literal");
14159 return CONST0_RTX (tmode);
14160 }
14161 }
14162 else if (icode == CODE_FOR_dfp_dscli_dd
14163 || icode == CODE_FOR_dfp_dscli_td
14164 || icode == CODE_FOR_dfp_dscri_dd
14165 || icode == CODE_FOR_dfp_dscri_td)
14166 {
14167 /* Only allow 6-bit unsigned literals. */
14168 STRIP_NOPS (arg1);
14169 if (TREE_CODE (arg1) != INTEGER_CST
14170 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14171 {
14172 error ("argument 2 must be a 6-bit unsigned literal");
14173 return CONST0_RTX (tmode);
14174 }
14175 }
14176 else if (icode == CODE_FOR_crypto_vshasigmaw
14177 || icode == CODE_FOR_crypto_vshasigmad)
14178 {
14179 /* Check whether the 2nd and 3rd arguments are integer constants and in
14180 range and prepare arguments. */
14181 STRIP_NOPS (arg1);
14182 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14183 {
14184 error ("argument 2 must be 0 or 1");
14185 return CONST0_RTX (tmode);
14186 }
14187
14188 STRIP_NOPS (arg2);
14189 if (TREE_CODE (arg2) != INTEGER_CST
14190 || wi::geu_p (wi::to_wide (arg2), 16))
14191 {
14192 error ("argument 3 must be in the range [0, 15]");
14193 return CONST0_RTX (tmode);
14194 }
14195 }
14196
14197 if (target == 0
14198 || GET_MODE (target) != tmode
14199 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14200 target = gen_reg_rtx (tmode);
14201
14202 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14203 op0 = copy_to_mode_reg (mode0, op0);
14204 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14205 op1 = copy_to_mode_reg (mode1, op1);
14206 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14207 op2 = copy_to_mode_reg (mode2, op2);
14208
14209 pat = GEN_FCN (icode) (target, op0, op1, op2);
14210 if (! pat)
14211 return 0;
14212 emit_insn (pat);
14213
14214 return target;
14215 }
14216
14217
14218 /* Expand the dst builtins. */
14219 static rtx
14220 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14221 bool *expandedp)
14222 {
14223 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14224 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14225 tree arg0, arg1, arg2;
14226 machine_mode mode0, mode1;
14227 rtx pat, op0, op1, op2;
14228 const struct builtin_description *d;
14229 size_t i;
14230
14231 *expandedp = false;
14232
14233 /* Handle DST variants. */
14234 d = bdesc_dst;
14235 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14236 if (d->code == fcode)
14237 {
14238 arg0 = CALL_EXPR_ARG (exp, 0);
14239 arg1 = CALL_EXPR_ARG (exp, 1);
14240 arg2 = CALL_EXPR_ARG (exp, 2);
14241 op0 = expand_normal (arg0);
14242 op1 = expand_normal (arg1);
14243 op2 = expand_normal (arg2);
14244 mode0 = insn_data[d->icode].operand[0].mode;
14245 mode1 = insn_data[d->icode].operand[1].mode;
14246
14247 /* Invalid arguments, bail out before generating bad rtl. */
14248 if (arg0 == error_mark_node
14249 || arg1 == error_mark_node
14250 || arg2 == error_mark_node)
14251 return const0_rtx;
14252
14253 *expandedp = true;
14254 STRIP_NOPS (arg2);
14255 if (TREE_CODE (arg2) != INTEGER_CST
14256 || TREE_INT_CST_LOW (arg2) & ~0x3)
14257 {
14258 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14259 return const0_rtx;
14260 }
14261
14262 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14263 op0 = copy_to_mode_reg (Pmode, op0);
14264 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14265 op1 = copy_to_mode_reg (mode1, op1);
14266
14267 pat = GEN_FCN (d->icode) (op0, op1, op2);
14268 if (pat != 0)
14269 emit_insn (pat);
14270
14271 return NULL_RTX;
14272 }
14273
14274 return NULL_RTX;
14275 }
14276
14277 /* Expand vec_init builtin. */
14278 static rtx
14279 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14280 {
14281 machine_mode tmode = TYPE_MODE (type);
14282 machine_mode inner_mode = GET_MODE_INNER (tmode);
14283 int i, n_elt = GET_MODE_NUNITS (tmode);
14284
14285 gcc_assert (VECTOR_MODE_P (tmode));
14286 gcc_assert (n_elt == call_expr_nargs (exp));
14287
14288 if (!target || !register_operand (target, tmode))
14289 target = gen_reg_rtx (tmode);
14290
14291 /* If we have a vector compromised of a single element, such as V1TImode, do
14292 the initialization directly. */
14293 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14294 {
14295 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14296 emit_move_insn (target, gen_lowpart (tmode, x));
14297 }
14298 else
14299 {
14300 rtvec v = rtvec_alloc (n_elt);
14301
14302 for (i = 0; i < n_elt; ++i)
14303 {
14304 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14305 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14306 }
14307
14308 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14309 }
14310
14311 return target;
14312 }
14313
14314 /* Return the integer constant in ARG. Constrain it to be in the range
14315 of the subparts of VEC_TYPE; issue an error if not. */
14316
14317 static int
14318 get_element_number (tree vec_type, tree arg)
14319 {
14320 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14321
14322 if (!tree_fits_uhwi_p (arg)
14323 || (elt = tree_to_uhwi (arg), elt > max))
14324 {
14325 error ("selector must be an integer constant in the range [0, %wi]", max);
14326 return 0;
14327 }
14328
14329 return elt;
14330 }
14331
14332 /* Expand vec_set builtin. */
14333 static rtx
14334 altivec_expand_vec_set_builtin (tree exp)
14335 {
14336 machine_mode tmode, mode1;
14337 tree arg0, arg1, arg2;
14338 int elt;
14339 rtx op0, op1;
14340
14341 arg0 = CALL_EXPR_ARG (exp, 0);
14342 arg1 = CALL_EXPR_ARG (exp, 1);
14343 arg2 = CALL_EXPR_ARG (exp, 2);
14344
14345 tmode = TYPE_MODE (TREE_TYPE (arg0));
14346 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14347 gcc_assert (VECTOR_MODE_P (tmode));
14348
14349 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14350 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14351 elt = get_element_number (TREE_TYPE (arg0), arg2);
14352
14353 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14354 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14355
14356 op0 = force_reg (tmode, op0);
14357 op1 = force_reg (mode1, op1);
14358
14359 rs6000_expand_vector_set (op0, op1, elt);
14360
14361 return op0;
14362 }
14363
14364 /* Expand vec_ext builtin. */
14365 static rtx
14366 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14367 {
14368 machine_mode tmode, mode0;
14369 tree arg0, arg1;
14370 rtx op0;
14371 rtx op1;
14372
14373 arg0 = CALL_EXPR_ARG (exp, 0);
14374 arg1 = CALL_EXPR_ARG (exp, 1);
14375
14376 op0 = expand_normal (arg0);
14377 op1 = expand_normal (arg1);
14378
14379 if (TREE_CODE (arg1) == INTEGER_CST)
14380 {
14381 unsigned HOST_WIDE_INT elt;
14382 unsigned HOST_WIDE_INT size = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
14383 unsigned int truncated_selector;
14384 /* Even if !tree_fits_uhwi_p (arg1)), TREE_INT_CST_LOW (arg0)
14385 returns low-order bits of INTEGER_CST for modulo indexing. */
14386 elt = TREE_INT_CST_LOW (arg1);
14387 truncated_selector = elt % size;
14388 op1 = GEN_INT (truncated_selector);
14389 }
14390
14391 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14392 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14393 gcc_assert (VECTOR_MODE_P (mode0));
14394
14395 op0 = force_reg (mode0, op0);
14396
14397 if (optimize || !target || !register_operand (target, tmode))
14398 target = gen_reg_rtx (tmode);
14399
14400 rs6000_expand_vector_extract (target, op0, op1);
14401
14402 return target;
14403 }
14404
14405 /* Expand the builtin in EXP and store the result in TARGET. Store
14406 true in *EXPANDEDP if we found a builtin to expand. */
14407 static rtx
14408 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14409 {
14410 const struct builtin_description *d;
14411 size_t i;
14412 enum insn_code icode;
14413 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14414 tree arg0, arg1, arg2;
14415 rtx op0, pat;
14416 machine_mode tmode, mode0;
14417 enum rs6000_builtins fcode
14418 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14419
14420 if (rs6000_overloaded_builtin_p (fcode))
14421 {
14422 *expandedp = true;
14423 error ("unresolved overload for Altivec builtin %qF", fndecl);
14424
14425 /* Given it is invalid, just generate a normal call. */
14426 return expand_call (exp, target, false);
14427 }
14428
14429 target = altivec_expand_dst_builtin (exp, target, expandedp);
14430 if (*expandedp)
14431 return target;
14432
14433 *expandedp = true;
14434
14435 switch (fcode)
14436 {
14437 case ALTIVEC_BUILTIN_STVX_V2DF:
14438 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14439 case ALTIVEC_BUILTIN_STVX_V2DI:
14440 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14441 case ALTIVEC_BUILTIN_STVX_V4SF:
14442 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14443 case ALTIVEC_BUILTIN_STVX:
14444 case ALTIVEC_BUILTIN_STVX_V4SI:
14445 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14446 case ALTIVEC_BUILTIN_STVX_V8HI:
14447 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14448 case ALTIVEC_BUILTIN_STVX_V16QI:
14449 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14450 case ALTIVEC_BUILTIN_STVEBX:
14451 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14452 case ALTIVEC_BUILTIN_STVEHX:
14453 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14454 case ALTIVEC_BUILTIN_STVEWX:
14455 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14456 case ALTIVEC_BUILTIN_STVXL_V2DF:
14457 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14458 case ALTIVEC_BUILTIN_STVXL_V2DI:
14459 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14460 case ALTIVEC_BUILTIN_STVXL_V4SF:
14461 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14462 case ALTIVEC_BUILTIN_STVXL:
14463 case ALTIVEC_BUILTIN_STVXL_V4SI:
14464 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14465 case ALTIVEC_BUILTIN_STVXL_V8HI:
14466 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14467 case ALTIVEC_BUILTIN_STVXL_V16QI:
14468 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14469
14470 case ALTIVEC_BUILTIN_STVLX:
14471 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14472 case ALTIVEC_BUILTIN_STVLXL:
14473 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14474 case ALTIVEC_BUILTIN_STVRX:
14475 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14476 case ALTIVEC_BUILTIN_STVRXL:
14477 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14478
14479 case P9V_BUILTIN_STXVL:
14480 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14481
14482 case P9V_BUILTIN_XST_LEN_R:
14483 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14484
14485 case VSX_BUILTIN_STXVD2X_V1TI:
14486 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14487 case VSX_BUILTIN_STXVD2X_V2DF:
14488 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14489 case VSX_BUILTIN_STXVD2X_V2DI:
14490 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14491 case VSX_BUILTIN_STXVW4X_V4SF:
14492 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14493 case VSX_BUILTIN_STXVW4X_V4SI:
14494 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14495 case VSX_BUILTIN_STXVW4X_V8HI:
14496 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14497 case VSX_BUILTIN_STXVW4X_V16QI:
14498 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14499
14500 /* For the following on big endian, it's ok to use any appropriate
14501 unaligned-supporting store, so use a generic expander. For
14502 little-endian, the exact element-reversing instruction must
14503 be used. */
14504 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14505 {
14506 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14507 : CODE_FOR_vsx_st_elemrev_v1ti);
14508 return altivec_expand_stv_builtin (code, exp);
14509 }
14510 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14511 {
14512 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14513 : CODE_FOR_vsx_st_elemrev_v2df);
14514 return altivec_expand_stv_builtin (code, exp);
14515 }
14516 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14517 {
14518 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14519 : CODE_FOR_vsx_st_elemrev_v2di);
14520 return altivec_expand_stv_builtin (code, exp);
14521 }
14522 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14523 {
14524 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14525 : CODE_FOR_vsx_st_elemrev_v4sf);
14526 return altivec_expand_stv_builtin (code, exp);
14527 }
14528 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14529 {
14530 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14531 : CODE_FOR_vsx_st_elemrev_v4si);
14532 return altivec_expand_stv_builtin (code, exp);
14533 }
14534 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14535 {
14536 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14537 : CODE_FOR_vsx_st_elemrev_v8hi);
14538 return altivec_expand_stv_builtin (code, exp);
14539 }
14540 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14541 {
14542 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14543 : CODE_FOR_vsx_st_elemrev_v16qi);
14544 return altivec_expand_stv_builtin (code, exp);
14545 }
14546
14547 case ALTIVEC_BUILTIN_MFVSCR:
14548 icode = CODE_FOR_altivec_mfvscr;
14549 tmode = insn_data[icode].operand[0].mode;
14550
14551 if (target == 0
14552 || GET_MODE (target) != tmode
14553 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14554 target = gen_reg_rtx (tmode);
14555
14556 pat = GEN_FCN (icode) (target);
14557 if (! pat)
14558 return 0;
14559 emit_insn (pat);
14560 return target;
14561
14562 case ALTIVEC_BUILTIN_MTVSCR:
14563 icode = CODE_FOR_altivec_mtvscr;
14564 arg0 = CALL_EXPR_ARG (exp, 0);
14565 op0 = expand_normal (arg0);
14566 mode0 = insn_data[icode].operand[0].mode;
14567
14568 /* If we got invalid arguments bail out before generating bad rtl. */
14569 if (arg0 == error_mark_node)
14570 return const0_rtx;
14571
14572 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14573 op0 = copy_to_mode_reg (mode0, op0);
14574
14575 pat = GEN_FCN (icode) (op0);
14576 if (pat)
14577 emit_insn (pat);
14578 return NULL_RTX;
14579
14580 case ALTIVEC_BUILTIN_DSSALL:
14581 emit_insn (gen_altivec_dssall ());
14582 return NULL_RTX;
14583
14584 case ALTIVEC_BUILTIN_DSS:
14585 icode = CODE_FOR_altivec_dss;
14586 arg0 = CALL_EXPR_ARG (exp, 0);
14587 STRIP_NOPS (arg0);
14588 op0 = expand_normal (arg0);
14589 mode0 = insn_data[icode].operand[0].mode;
14590
14591 /* If we got invalid arguments bail out before generating bad rtl. */
14592 if (arg0 == error_mark_node)
14593 return const0_rtx;
14594
14595 if (TREE_CODE (arg0) != INTEGER_CST
14596 || TREE_INT_CST_LOW (arg0) & ~0x3)
14597 {
14598 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14599 return const0_rtx;
14600 }
14601
14602 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14603 op0 = copy_to_mode_reg (mode0, op0);
14604
14605 emit_insn (gen_altivec_dss (op0));
14606 return NULL_RTX;
14607
14608 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14609 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14610 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14611 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14612 case VSX_BUILTIN_VEC_INIT_V2DF:
14613 case VSX_BUILTIN_VEC_INIT_V2DI:
14614 case VSX_BUILTIN_VEC_INIT_V1TI:
14615 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14616
14617 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14618 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14619 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14620 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14621 case VSX_BUILTIN_VEC_SET_V2DF:
14622 case VSX_BUILTIN_VEC_SET_V2DI:
14623 case VSX_BUILTIN_VEC_SET_V1TI:
14624 return altivec_expand_vec_set_builtin (exp);
14625
14626 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14627 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14628 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14629 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14630 case VSX_BUILTIN_VEC_EXT_V2DF:
14631 case VSX_BUILTIN_VEC_EXT_V2DI:
14632 case VSX_BUILTIN_VEC_EXT_V1TI:
14633 return altivec_expand_vec_ext_builtin (exp, target);
14634
14635 case P9V_BUILTIN_VEC_EXTRACT4B:
14636 arg1 = CALL_EXPR_ARG (exp, 1);
14637 STRIP_NOPS (arg1);
14638
14639 /* Generate a normal call if it is invalid. */
14640 if (arg1 == error_mark_node)
14641 return expand_call (exp, target, false);
14642
14643 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14644 {
14645 error ("second argument to %qs must be [0, 12]", "vec_vextract4b");
14646 return expand_call (exp, target, false);
14647 }
14648 break;
14649
14650 case P9V_BUILTIN_VEC_INSERT4B:
14651 arg2 = CALL_EXPR_ARG (exp, 2);
14652 STRIP_NOPS (arg2);
14653
14654 /* Generate a normal call if it is invalid. */
14655 if (arg2 == error_mark_node)
14656 return expand_call (exp, target, false);
14657
14658 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
14659 {
14660 error ("third argument to %qs must be [0, 12]", "vec_vinsert4b");
14661 return expand_call (exp, target, false);
14662 }
14663 break;
14664
14665 default:
14666 break;
14667 /* Fall through. */
14668 }
14669
14670 /* Expand abs* operations. */
14671 d = bdesc_abs;
14672 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14673 if (d->code == fcode)
14674 return altivec_expand_abs_builtin (d->icode, exp, target);
14675
14676 /* Expand the AltiVec predicates. */
14677 d = bdesc_altivec_preds;
14678 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14679 if (d->code == fcode)
14680 return altivec_expand_predicate_builtin (d->icode, exp, target);
14681
14682 /* LV* are funky. We initialized them differently. */
14683 switch (fcode)
14684 {
14685 case ALTIVEC_BUILTIN_LVSL:
14686 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
14687 exp, target, false);
14688 case ALTIVEC_BUILTIN_LVSR:
14689 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
14690 exp, target, false);
14691 case ALTIVEC_BUILTIN_LVEBX:
14692 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
14693 exp, target, false);
14694 case ALTIVEC_BUILTIN_LVEHX:
14695 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
14696 exp, target, false);
14697 case ALTIVEC_BUILTIN_LVEWX:
14698 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
14699 exp, target, false);
14700 case ALTIVEC_BUILTIN_LVXL_V2DF:
14701 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
14702 exp, target, false);
14703 case ALTIVEC_BUILTIN_LVXL_V2DI:
14704 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
14705 exp, target, false);
14706 case ALTIVEC_BUILTIN_LVXL_V4SF:
14707 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
14708 exp, target, false);
14709 case ALTIVEC_BUILTIN_LVXL:
14710 case ALTIVEC_BUILTIN_LVXL_V4SI:
14711 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
14712 exp, target, false);
14713 case ALTIVEC_BUILTIN_LVXL_V8HI:
14714 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
14715 exp, target, false);
14716 case ALTIVEC_BUILTIN_LVXL_V16QI:
14717 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
14718 exp, target, false);
14719 case ALTIVEC_BUILTIN_LVX_V1TI:
14720 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
14721 exp, target, false);
14722 case ALTIVEC_BUILTIN_LVX_V2DF:
14723 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
14724 exp, target, false);
14725 case ALTIVEC_BUILTIN_LVX_V2DI:
14726 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
14727 exp, target, false);
14728 case ALTIVEC_BUILTIN_LVX_V4SF:
14729 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
14730 exp, target, false);
14731 case ALTIVEC_BUILTIN_LVX:
14732 case ALTIVEC_BUILTIN_LVX_V4SI:
14733 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
14734 exp, target, false);
14735 case ALTIVEC_BUILTIN_LVX_V8HI:
14736 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
14737 exp, target, false);
14738 case ALTIVEC_BUILTIN_LVX_V16QI:
14739 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
14740 exp, target, false);
14741 case ALTIVEC_BUILTIN_LVLX:
14742 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
14743 exp, target, true);
14744 case ALTIVEC_BUILTIN_LVLXL:
14745 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
14746 exp, target, true);
14747 case ALTIVEC_BUILTIN_LVRX:
14748 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
14749 exp, target, true);
14750 case ALTIVEC_BUILTIN_LVRXL:
14751 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
14752 exp, target, true);
14753 case VSX_BUILTIN_LXVD2X_V1TI:
14754 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
14755 exp, target, false);
14756 case VSX_BUILTIN_LXVD2X_V2DF:
14757 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
14758 exp, target, false);
14759 case VSX_BUILTIN_LXVD2X_V2DI:
14760 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
14761 exp, target, false);
14762 case VSX_BUILTIN_LXVW4X_V4SF:
14763 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
14764 exp, target, false);
14765 case VSX_BUILTIN_LXVW4X_V4SI:
14766 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
14767 exp, target, false);
14768 case VSX_BUILTIN_LXVW4X_V8HI:
14769 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
14770 exp, target, false);
14771 case VSX_BUILTIN_LXVW4X_V16QI:
14772 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
14773 exp, target, false);
14774 /* For the following on big endian, it's ok to use any appropriate
14775 unaligned-supporting load, so use a generic expander. For
14776 little-endian, the exact element-reversing instruction must
14777 be used. */
14778 case VSX_BUILTIN_LD_ELEMREV_V2DF:
14779 {
14780 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
14781 : CODE_FOR_vsx_ld_elemrev_v2df);
14782 return altivec_expand_lv_builtin (code, exp, target, false);
14783 }
14784 case VSX_BUILTIN_LD_ELEMREV_V1TI:
14785 {
14786 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
14787 : CODE_FOR_vsx_ld_elemrev_v1ti);
14788 return altivec_expand_lv_builtin (code, exp, target, false);
14789 }
14790 case VSX_BUILTIN_LD_ELEMREV_V2DI:
14791 {
14792 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
14793 : CODE_FOR_vsx_ld_elemrev_v2di);
14794 return altivec_expand_lv_builtin (code, exp, target, false);
14795 }
14796 case VSX_BUILTIN_LD_ELEMREV_V4SF:
14797 {
14798 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
14799 : CODE_FOR_vsx_ld_elemrev_v4sf);
14800 return altivec_expand_lv_builtin (code, exp, target, false);
14801 }
14802 case VSX_BUILTIN_LD_ELEMREV_V4SI:
14803 {
14804 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
14805 : CODE_FOR_vsx_ld_elemrev_v4si);
14806 return altivec_expand_lv_builtin (code, exp, target, false);
14807 }
14808 case VSX_BUILTIN_LD_ELEMREV_V8HI:
14809 {
14810 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
14811 : CODE_FOR_vsx_ld_elemrev_v8hi);
14812 return altivec_expand_lv_builtin (code, exp, target, false);
14813 }
14814 case VSX_BUILTIN_LD_ELEMREV_V16QI:
14815 {
14816 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
14817 : CODE_FOR_vsx_ld_elemrev_v16qi);
14818 return altivec_expand_lv_builtin (code, exp, target, false);
14819 }
14820 break;
14821 default:
14822 break;
14823 /* Fall through. */
14824 }
14825
14826 *expandedp = false;
14827 return NULL_RTX;
14828 }
14829
14830 /* Check whether a builtin function is supported in this target
14831 configuration. */
14832 bool
14833 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
14834 {
14835 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
14836 if ((fnmask & rs6000_builtin_mask) != fnmask)
14837 return false;
14838 else
14839 return true;
14840 }
14841
14842 /* Raise an error message for a builtin function that is called without the
14843 appropriate target options being set. */
14844
14845 static void
14846 rs6000_invalid_builtin (enum rs6000_builtins fncode)
14847 {
14848 size_t uns_fncode = (size_t) fncode;
14849 const char *name = rs6000_builtin_info[uns_fncode].name;
14850 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
14851
14852 gcc_assert (name != NULL);
14853 if ((fnmask & RS6000_BTM_CELL) != 0)
14854 error ("builtin function %qs is only valid for the cell processor", name);
14855 else if ((fnmask & RS6000_BTM_VSX) != 0)
14856 error ("builtin function %qs requires the %qs option", name, "-mvsx");
14857 else if ((fnmask & RS6000_BTM_HTM) != 0)
14858 error ("builtin function %qs requires the %qs option", name, "-mhtm");
14859 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
14860 error ("builtin function %qs requires the %qs option", name, "-maltivec");
14861 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14862 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14863 error ("builtin function %qs requires the %qs and %qs options",
14864 name, "-mhard-dfp", "-mpower8-vector");
14865 else if ((fnmask & RS6000_BTM_DFP) != 0)
14866 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
14867 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
14868 error ("builtin function %qs requires the %qs option", name,
14869 "-mpower8-vector");
14870 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14871 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14872 error ("builtin function %qs requires the %qs and %qs options",
14873 name, "-mcpu=power9", "-m64");
14874 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
14875 error ("builtin function %qs requires the %qs option", name,
14876 "-mcpu=power9");
14877 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
14878 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
14879 error ("builtin function %qs requires the %qs and %qs options",
14880 name, "-mcpu=power9", "-m64");
14881 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
14882 error ("builtin function %qs requires the %qs option", name,
14883 "-mcpu=power9");
14884 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
14885 {
14886 if (!TARGET_HARD_FLOAT)
14887 error ("builtin function %qs requires the %qs option", name,
14888 "-mhard-float");
14889 else
14890 error ("builtin function %qs requires the %qs option", name,
14891 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
14892 }
14893 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
14894 error ("builtin function %qs requires the %qs option", name,
14895 "-mhard-float");
14896 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
14897 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
14898 name);
14899 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
14900 error ("builtin function %qs requires the %qs option", name,
14901 "%<-mfloat128%>");
14902 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
14903 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
14904 error ("builtin function %qs requires the %qs (or newer), and "
14905 "%qs or %qs options",
14906 name, "-mcpu=power7", "-m64", "-mpowerpc64");
14907 else
14908 error ("builtin function %qs is not supported with the current options",
14909 name);
14910 }
14911
14912 /* Target hook for early folding of built-ins, shamelessly stolen
14913 from ia64.c. */
14914
14915 static tree
14916 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
14917 int n_args ATTRIBUTE_UNUSED,
14918 tree *args ATTRIBUTE_UNUSED,
14919 bool ignore ATTRIBUTE_UNUSED)
14920 {
14921 #ifdef SUBTARGET_FOLD_BUILTIN
14922 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
14923 #else
14924 return NULL_TREE;
14925 #endif
14926 }
14927
14928 /* Helper function to sort out which built-ins may be valid without having
14929 a LHS. */
14930 static bool
14931 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
14932 {
14933 switch (fn_code)
14934 {
14935 case ALTIVEC_BUILTIN_STVX_V16QI:
14936 case ALTIVEC_BUILTIN_STVX_V8HI:
14937 case ALTIVEC_BUILTIN_STVX_V4SI:
14938 case ALTIVEC_BUILTIN_STVX_V4SF:
14939 case ALTIVEC_BUILTIN_STVX_V2DI:
14940 case ALTIVEC_BUILTIN_STVX_V2DF:
14941 case VSX_BUILTIN_STXVW4X_V16QI:
14942 case VSX_BUILTIN_STXVW4X_V8HI:
14943 case VSX_BUILTIN_STXVW4X_V4SF:
14944 case VSX_BUILTIN_STXVW4X_V4SI:
14945 case VSX_BUILTIN_STXVD2X_V2DF:
14946 case VSX_BUILTIN_STXVD2X_V2DI:
14947 return true;
14948 default:
14949 return false;
14950 }
14951 }
14952
14953 /* Helper function to handle the gimple folding of a vector compare
14954 operation. This sets up true/false vectors, and uses the
14955 VEC_COND_EXPR operation.
14956 CODE indicates which comparison is to be made. (EQ, GT, ...).
14957 TYPE indicates the type of the result. */
14958 static tree
14959 fold_build_vec_cmp (tree_code code, tree type,
14960 tree arg0, tree arg1)
14961 {
14962 tree cmp_type = build_same_sized_truth_vector_type (type);
14963 tree zero_vec = build_zero_cst (type);
14964 tree minus_one_vec = build_minus_one_cst (type);
14965 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
14966 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
14967 }
14968
14969 /* Helper function to handle the in-between steps for the
14970 vector compare built-ins. */
14971 static void
14972 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
14973 {
14974 tree arg0 = gimple_call_arg (stmt, 0);
14975 tree arg1 = gimple_call_arg (stmt, 1);
14976 tree lhs = gimple_call_lhs (stmt);
14977 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
14978 gimple *g = gimple_build_assign (lhs, cmp);
14979 gimple_set_location (g, gimple_location (stmt));
14980 gsi_replace (gsi, g, true);
14981 }
14982
14983 /* Helper function to map V2DF and V4SF types to their
14984 integral equivalents (V2DI and V4SI). */
14985 tree map_to_integral_tree_type (tree input_tree_type)
14986 {
14987 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
14988 return input_tree_type;
14989 else
14990 {
14991 if (types_compatible_p (TREE_TYPE (input_tree_type),
14992 TREE_TYPE (V2DF_type_node)))
14993 return V2DI_type_node;
14994 else if (types_compatible_p (TREE_TYPE (input_tree_type),
14995 TREE_TYPE (V4SF_type_node)))
14996 return V4SI_type_node;
14997 else
14998 gcc_unreachable ();
14999 }
15000 }
15001
15002 /* Helper function to handle the vector merge[hl] built-ins. The
15003 implementation difference between h and l versions for this code are in
15004 the values used when building of the permute vector for high word versus
15005 low word merge. The variance is keyed off the use_high parameter. */
15006 static void
15007 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15008 {
15009 tree arg0 = gimple_call_arg (stmt, 0);
15010 tree arg1 = gimple_call_arg (stmt, 1);
15011 tree lhs = gimple_call_lhs (stmt);
15012 tree lhs_type = TREE_TYPE (lhs);
15013 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15014 int midpoint = n_elts / 2;
15015 int offset = 0;
15016
15017 if (use_high == 1)
15018 offset = midpoint;
15019
15020 /* The permute_type will match the lhs for integral types. For double and
15021 float types, the permute type needs to map to the V2 or V4 type that
15022 matches size. */
15023 tree permute_type;
15024 permute_type = map_to_integral_tree_type (lhs_type);
15025 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15026
15027 for (int i = 0; i < midpoint; i++)
15028 {
15029 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15030 offset + i));
15031 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15032 offset + n_elts + i));
15033 }
15034
15035 tree permute = elts.build ();
15036
15037 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15038 gimple_set_location (g, gimple_location (stmt));
15039 gsi_replace (gsi, g, true);
15040 }
15041
15042 /* Helper function to handle the vector merge[eo] built-ins. */
15043 static void
15044 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
15045 {
15046 tree arg0 = gimple_call_arg (stmt, 0);
15047 tree arg1 = gimple_call_arg (stmt, 1);
15048 tree lhs = gimple_call_lhs (stmt);
15049 tree lhs_type = TREE_TYPE (lhs);
15050 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15051
15052 /* The permute_type will match the lhs for integral types. For double and
15053 float types, the permute type needs to map to the V2 or V4 type that
15054 matches size. */
15055 tree permute_type;
15056 permute_type = map_to_integral_tree_type (lhs_type);
15057
15058 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15059
15060 /* Build the permute vector. */
15061 for (int i = 0; i < n_elts / 2; i++)
15062 {
15063 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15064 2*i + use_odd));
15065 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15066 2*i + use_odd + n_elts));
15067 }
15068
15069 tree permute = elts.build ();
15070
15071 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15072 gimple_set_location (g, gimple_location (stmt));
15073 gsi_replace (gsi, g, true);
15074 }
15075
15076 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15077 a constant, use rs6000_fold_builtin.) */
15078
15079 bool
15080 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15081 {
15082 gimple *stmt = gsi_stmt (*gsi);
15083 tree fndecl = gimple_call_fndecl (stmt);
15084 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15085 enum rs6000_builtins fn_code
15086 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15087 tree arg0, arg1, lhs, temp;
15088 enum tree_code bcode;
15089 gimple *g;
15090
15091 size_t uns_fncode = (size_t) fn_code;
15092 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15093 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15094 const char *fn_name2 = (icode != CODE_FOR_nothing)
15095 ? get_insn_name ((int) icode)
15096 : "nothing";
15097
15098 if (TARGET_DEBUG_BUILTIN)
15099 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15100 fn_code, fn_name1, fn_name2);
15101
15102 if (!rs6000_fold_gimple)
15103 return false;
15104
15105 /* Prevent gimple folding for code that does not have a LHS, unless it is
15106 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15107 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15108 return false;
15109
15110 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15111 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15112 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15113 if (!func_valid_p)
15114 return false;
15115
15116 switch (fn_code)
15117 {
15118 /* Flavors of vec_add. We deliberately don't expand
15119 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15120 TImode, resulting in much poorer code generation. */
15121 case ALTIVEC_BUILTIN_VADDUBM:
15122 case ALTIVEC_BUILTIN_VADDUHM:
15123 case ALTIVEC_BUILTIN_VADDUWM:
15124 case P8V_BUILTIN_VADDUDM:
15125 case ALTIVEC_BUILTIN_VADDFP:
15126 case VSX_BUILTIN_XVADDDP:
15127 bcode = PLUS_EXPR;
15128 do_binary:
15129 arg0 = gimple_call_arg (stmt, 0);
15130 arg1 = gimple_call_arg (stmt, 1);
15131 lhs = gimple_call_lhs (stmt);
15132 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15133 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15134 {
15135 /* Ensure the binary operation is performed in a type
15136 that wraps if it is integral type. */
15137 gimple_seq stmts = NULL;
15138 tree type = unsigned_type_for (TREE_TYPE (lhs));
15139 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15140 type, arg0);
15141 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15142 type, arg1);
15143 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15144 type, uarg0, uarg1);
15145 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15146 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15147 build1 (VIEW_CONVERT_EXPR,
15148 TREE_TYPE (lhs), res));
15149 gsi_replace (gsi, g, true);
15150 return true;
15151 }
15152 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15153 gimple_set_location (g, gimple_location (stmt));
15154 gsi_replace (gsi, g, true);
15155 return true;
15156 /* Flavors of vec_sub. We deliberately don't expand
15157 P8V_BUILTIN_VSUBUQM. */
15158 case ALTIVEC_BUILTIN_VSUBUBM:
15159 case ALTIVEC_BUILTIN_VSUBUHM:
15160 case ALTIVEC_BUILTIN_VSUBUWM:
15161 case P8V_BUILTIN_VSUBUDM:
15162 case ALTIVEC_BUILTIN_VSUBFP:
15163 case VSX_BUILTIN_XVSUBDP:
15164 bcode = MINUS_EXPR;
15165 goto do_binary;
15166 case VSX_BUILTIN_XVMULSP:
15167 case VSX_BUILTIN_XVMULDP:
15168 arg0 = gimple_call_arg (stmt, 0);
15169 arg1 = gimple_call_arg (stmt, 1);
15170 lhs = gimple_call_lhs (stmt);
15171 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15172 gimple_set_location (g, gimple_location (stmt));
15173 gsi_replace (gsi, g, true);
15174 return true;
15175 /* Even element flavors of vec_mul (signed). */
15176 case ALTIVEC_BUILTIN_VMULESB:
15177 case ALTIVEC_BUILTIN_VMULESH:
15178 case P8V_BUILTIN_VMULESW:
15179 /* Even element flavors of vec_mul (unsigned). */
15180 case ALTIVEC_BUILTIN_VMULEUB:
15181 case ALTIVEC_BUILTIN_VMULEUH:
15182 case P8V_BUILTIN_VMULEUW:
15183 arg0 = gimple_call_arg (stmt, 0);
15184 arg1 = gimple_call_arg (stmt, 1);
15185 lhs = gimple_call_lhs (stmt);
15186 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15187 gimple_set_location (g, gimple_location (stmt));
15188 gsi_replace (gsi, g, true);
15189 return true;
15190 /* Odd element flavors of vec_mul (signed). */
15191 case ALTIVEC_BUILTIN_VMULOSB:
15192 case ALTIVEC_BUILTIN_VMULOSH:
15193 case P8V_BUILTIN_VMULOSW:
15194 /* Odd element flavors of vec_mul (unsigned). */
15195 case ALTIVEC_BUILTIN_VMULOUB:
15196 case ALTIVEC_BUILTIN_VMULOUH:
15197 case P8V_BUILTIN_VMULOUW:
15198 arg0 = gimple_call_arg (stmt, 0);
15199 arg1 = gimple_call_arg (stmt, 1);
15200 lhs = gimple_call_lhs (stmt);
15201 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15202 gimple_set_location (g, gimple_location (stmt));
15203 gsi_replace (gsi, g, true);
15204 return true;
15205 /* Flavors of vec_div (Integer). */
15206 case VSX_BUILTIN_DIV_V2DI:
15207 case VSX_BUILTIN_UDIV_V2DI:
15208 arg0 = gimple_call_arg (stmt, 0);
15209 arg1 = gimple_call_arg (stmt, 1);
15210 lhs = gimple_call_lhs (stmt);
15211 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15212 gimple_set_location (g, gimple_location (stmt));
15213 gsi_replace (gsi, g, true);
15214 return true;
15215 /* Flavors of vec_div (Float). */
15216 case VSX_BUILTIN_XVDIVSP:
15217 case VSX_BUILTIN_XVDIVDP:
15218 arg0 = gimple_call_arg (stmt, 0);
15219 arg1 = gimple_call_arg (stmt, 1);
15220 lhs = gimple_call_lhs (stmt);
15221 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15222 gimple_set_location (g, gimple_location (stmt));
15223 gsi_replace (gsi, g, true);
15224 return true;
15225 /* Flavors of vec_and. */
15226 case ALTIVEC_BUILTIN_VAND:
15227 arg0 = gimple_call_arg (stmt, 0);
15228 arg1 = gimple_call_arg (stmt, 1);
15229 lhs = gimple_call_lhs (stmt);
15230 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15231 gimple_set_location (g, gimple_location (stmt));
15232 gsi_replace (gsi, g, true);
15233 return true;
15234 /* Flavors of vec_andc. */
15235 case ALTIVEC_BUILTIN_VANDC:
15236 arg0 = gimple_call_arg (stmt, 0);
15237 arg1 = gimple_call_arg (stmt, 1);
15238 lhs = gimple_call_lhs (stmt);
15239 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15240 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15241 gimple_set_location (g, gimple_location (stmt));
15242 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15243 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15244 gimple_set_location (g, gimple_location (stmt));
15245 gsi_replace (gsi, g, true);
15246 return true;
15247 /* Flavors of vec_nand. */
15248 case P8V_BUILTIN_VEC_NAND:
15249 case P8V_BUILTIN_NAND_V16QI:
15250 case P8V_BUILTIN_NAND_V8HI:
15251 case P8V_BUILTIN_NAND_V4SI:
15252 case P8V_BUILTIN_NAND_V4SF:
15253 case P8V_BUILTIN_NAND_V2DF:
15254 case P8V_BUILTIN_NAND_V2DI:
15255 arg0 = gimple_call_arg (stmt, 0);
15256 arg1 = gimple_call_arg (stmt, 1);
15257 lhs = gimple_call_lhs (stmt);
15258 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15259 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15260 gimple_set_location (g, gimple_location (stmt));
15261 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15262 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15263 gimple_set_location (g, gimple_location (stmt));
15264 gsi_replace (gsi, g, true);
15265 return true;
15266 /* Flavors of vec_or. */
15267 case ALTIVEC_BUILTIN_VOR:
15268 arg0 = gimple_call_arg (stmt, 0);
15269 arg1 = gimple_call_arg (stmt, 1);
15270 lhs = gimple_call_lhs (stmt);
15271 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15272 gimple_set_location (g, gimple_location (stmt));
15273 gsi_replace (gsi, g, true);
15274 return true;
15275 /* flavors of vec_orc. */
15276 case P8V_BUILTIN_ORC_V16QI:
15277 case P8V_BUILTIN_ORC_V8HI:
15278 case P8V_BUILTIN_ORC_V4SI:
15279 case P8V_BUILTIN_ORC_V4SF:
15280 case P8V_BUILTIN_ORC_V2DF:
15281 case P8V_BUILTIN_ORC_V2DI:
15282 arg0 = gimple_call_arg (stmt, 0);
15283 arg1 = gimple_call_arg (stmt, 1);
15284 lhs = gimple_call_lhs (stmt);
15285 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15286 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15287 gimple_set_location (g, gimple_location (stmt));
15288 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15289 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15290 gimple_set_location (g, gimple_location (stmt));
15291 gsi_replace (gsi, g, true);
15292 return true;
15293 /* Flavors of vec_xor. */
15294 case ALTIVEC_BUILTIN_VXOR:
15295 arg0 = gimple_call_arg (stmt, 0);
15296 arg1 = gimple_call_arg (stmt, 1);
15297 lhs = gimple_call_lhs (stmt);
15298 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15299 gimple_set_location (g, gimple_location (stmt));
15300 gsi_replace (gsi, g, true);
15301 return true;
15302 /* Flavors of vec_nor. */
15303 case ALTIVEC_BUILTIN_VNOR:
15304 arg0 = gimple_call_arg (stmt, 0);
15305 arg1 = gimple_call_arg (stmt, 1);
15306 lhs = gimple_call_lhs (stmt);
15307 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15308 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15309 gimple_set_location (g, gimple_location (stmt));
15310 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15311 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15312 gimple_set_location (g, gimple_location (stmt));
15313 gsi_replace (gsi, g, true);
15314 return true;
15315 /* flavors of vec_abs. */
15316 case ALTIVEC_BUILTIN_ABS_V16QI:
15317 case ALTIVEC_BUILTIN_ABS_V8HI:
15318 case ALTIVEC_BUILTIN_ABS_V4SI:
15319 case ALTIVEC_BUILTIN_ABS_V4SF:
15320 case P8V_BUILTIN_ABS_V2DI:
15321 case VSX_BUILTIN_XVABSDP:
15322 arg0 = gimple_call_arg (stmt, 0);
15323 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15324 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15325 return false;
15326 lhs = gimple_call_lhs (stmt);
15327 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15328 gimple_set_location (g, gimple_location (stmt));
15329 gsi_replace (gsi, g, true);
15330 return true;
15331 /* flavors of vec_min. */
15332 case VSX_BUILTIN_XVMINDP:
15333 case P8V_BUILTIN_VMINSD:
15334 case P8V_BUILTIN_VMINUD:
15335 case ALTIVEC_BUILTIN_VMINSB:
15336 case ALTIVEC_BUILTIN_VMINSH:
15337 case ALTIVEC_BUILTIN_VMINSW:
15338 case ALTIVEC_BUILTIN_VMINUB:
15339 case ALTIVEC_BUILTIN_VMINUH:
15340 case ALTIVEC_BUILTIN_VMINUW:
15341 case ALTIVEC_BUILTIN_VMINFP:
15342 arg0 = gimple_call_arg (stmt, 0);
15343 arg1 = gimple_call_arg (stmt, 1);
15344 lhs = gimple_call_lhs (stmt);
15345 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15346 gimple_set_location (g, gimple_location (stmt));
15347 gsi_replace (gsi, g, true);
15348 return true;
15349 /* flavors of vec_max. */
15350 case VSX_BUILTIN_XVMAXDP:
15351 case P8V_BUILTIN_VMAXSD:
15352 case P8V_BUILTIN_VMAXUD:
15353 case ALTIVEC_BUILTIN_VMAXSB:
15354 case ALTIVEC_BUILTIN_VMAXSH:
15355 case ALTIVEC_BUILTIN_VMAXSW:
15356 case ALTIVEC_BUILTIN_VMAXUB:
15357 case ALTIVEC_BUILTIN_VMAXUH:
15358 case ALTIVEC_BUILTIN_VMAXUW:
15359 case ALTIVEC_BUILTIN_VMAXFP:
15360 arg0 = gimple_call_arg (stmt, 0);
15361 arg1 = gimple_call_arg (stmt, 1);
15362 lhs = gimple_call_lhs (stmt);
15363 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15364 gimple_set_location (g, gimple_location (stmt));
15365 gsi_replace (gsi, g, true);
15366 return true;
15367 /* Flavors of vec_eqv. */
15368 case P8V_BUILTIN_EQV_V16QI:
15369 case P8V_BUILTIN_EQV_V8HI:
15370 case P8V_BUILTIN_EQV_V4SI:
15371 case P8V_BUILTIN_EQV_V4SF:
15372 case P8V_BUILTIN_EQV_V2DF:
15373 case P8V_BUILTIN_EQV_V2DI:
15374 arg0 = gimple_call_arg (stmt, 0);
15375 arg1 = gimple_call_arg (stmt, 1);
15376 lhs = gimple_call_lhs (stmt);
15377 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15378 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15379 gimple_set_location (g, gimple_location (stmt));
15380 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15381 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15382 gimple_set_location (g, gimple_location (stmt));
15383 gsi_replace (gsi, g, true);
15384 return true;
15385 /* Flavors of vec_rotate_left. */
15386 case ALTIVEC_BUILTIN_VRLB:
15387 case ALTIVEC_BUILTIN_VRLH:
15388 case ALTIVEC_BUILTIN_VRLW:
15389 case P8V_BUILTIN_VRLD:
15390 arg0 = gimple_call_arg (stmt, 0);
15391 arg1 = gimple_call_arg (stmt, 1);
15392 lhs = gimple_call_lhs (stmt);
15393 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15394 gimple_set_location (g, gimple_location (stmt));
15395 gsi_replace (gsi, g, true);
15396 return true;
15397 /* Flavors of vector shift right algebraic.
15398 vec_sra{b,h,w} -> vsra{b,h,w}. */
15399 case ALTIVEC_BUILTIN_VSRAB:
15400 case ALTIVEC_BUILTIN_VSRAH:
15401 case ALTIVEC_BUILTIN_VSRAW:
15402 case P8V_BUILTIN_VSRAD:
15403 {
15404 arg0 = gimple_call_arg (stmt, 0);
15405 arg1 = gimple_call_arg (stmt, 1);
15406 lhs = gimple_call_lhs (stmt);
15407 tree arg1_type = TREE_TYPE (arg1);
15408 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15409 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15410 location_t loc = gimple_location (stmt);
15411 /* Force arg1 into the range valid matching the arg0 type. */
15412 /* Build a vector consisting of the max valid bit-size values. */
15413 int n_elts = VECTOR_CST_NELTS (arg1);
15414 tree element_size = build_int_cst (unsigned_element_type,
15415 128 / n_elts);
15416 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15417 for (int i = 0; i < n_elts; i++)
15418 elts.safe_push (element_size);
15419 tree modulo_tree = elts.build ();
15420 /* Modulo the provided shift value against that vector. */
15421 gimple_seq stmts = NULL;
15422 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15423 unsigned_arg1_type, arg1);
15424 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15425 unsigned_arg1_type, unsigned_arg1,
15426 modulo_tree);
15427 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15428 /* And finally, do the shift. */
15429 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, new_arg1);
15430 gimple_set_location (g, loc);
15431 gsi_replace (gsi, g, true);
15432 return true;
15433 }
15434 /* Flavors of vector shift left.
15435 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15436 case ALTIVEC_BUILTIN_VSLB:
15437 case ALTIVEC_BUILTIN_VSLH:
15438 case ALTIVEC_BUILTIN_VSLW:
15439 case P8V_BUILTIN_VSLD:
15440 {
15441 location_t loc;
15442 gimple_seq stmts = NULL;
15443 arg0 = gimple_call_arg (stmt, 0);
15444 tree arg0_type = TREE_TYPE (arg0);
15445 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15446 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15447 return false;
15448 arg1 = gimple_call_arg (stmt, 1);
15449 tree arg1_type = TREE_TYPE (arg1);
15450 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15451 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15452 loc = gimple_location (stmt);
15453 lhs = gimple_call_lhs (stmt);
15454 /* Force arg1 into the range valid matching the arg0 type. */
15455 /* Build a vector consisting of the max valid bit-size values. */
15456 int n_elts = VECTOR_CST_NELTS (arg1);
15457 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15458 * BITS_PER_UNIT;
15459 tree element_size = build_int_cst (unsigned_element_type,
15460 tree_size_in_bits / n_elts);
15461 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15462 for (int i = 0; i < n_elts; i++)
15463 elts.safe_push (element_size);
15464 tree modulo_tree = elts.build ();
15465 /* Modulo the provided shift value against that vector. */
15466 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15467 unsigned_arg1_type, arg1);
15468 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15469 unsigned_arg1_type, unsigned_arg1,
15470 modulo_tree);
15471 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15472 /* And finally, do the shift. */
15473 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15474 gimple_set_location (g, gimple_location (stmt));
15475 gsi_replace (gsi, g, true);
15476 return true;
15477 }
15478 /* Flavors of vector shift right. */
15479 case ALTIVEC_BUILTIN_VSRB:
15480 case ALTIVEC_BUILTIN_VSRH:
15481 case ALTIVEC_BUILTIN_VSRW:
15482 case P8V_BUILTIN_VSRD:
15483 {
15484 arg0 = gimple_call_arg (stmt, 0);
15485 arg1 = gimple_call_arg (stmt, 1);
15486 lhs = gimple_call_lhs (stmt);
15487 tree arg1_type = TREE_TYPE (arg1);
15488 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15489 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15490 location_t loc = gimple_location (stmt);
15491 gimple_seq stmts = NULL;
15492 /* Convert arg0 to unsigned. */
15493 tree arg0_unsigned
15494 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15495 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15496 /* Force arg1 into the range valid matching the arg0 type. */
15497 /* Build a vector consisting of the max valid bit-size values. */
15498 int n_elts = VECTOR_CST_NELTS (arg1);
15499 tree element_size = build_int_cst (unsigned_element_type,
15500 128 / n_elts);
15501 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15502 for (int i = 0; i < n_elts; i++)
15503 elts.safe_push (element_size);
15504 tree modulo_tree = elts.build ();
15505 /* Modulo the provided shift value against that vector. */
15506 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15507 unsigned_arg1_type, arg1);
15508 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15509 unsigned_arg1_type, unsigned_arg1,
15510 modulo_tree);
15511 /* Do the shift. */
15512 tree res
15513 = gimple_build (&stmts, RSHIFT_EXPR,
15514 TREE_TYPE (arg0_unsigned), arg0_unsigned, new_arg1);
15515 /* Convert result back to the lhs type. */
15516 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15517 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15518 update_call_from_tree (gsi, res);
15519 return true;
15520 }
15521 /* Vector loads. */
15522 case ALTIVEC_BUILTIN_LVX_V16QI:
15523 case ALTIVEC_BUILTIN_LVX_V8HI:
15524 case ALTIVEC_BUILTIN_LVX_V4SI:
15525 case ALTIVEC_BUILTIN_LVX_V4SF:
15526 case ALTIVEC_BUILTIN_LVX_V2DI:
15527 case ALTIVEC_BUILTIN_LVX_V2DF:
15528 case ALTIVEC_BUILTIN_LVX_V1TI:
15529 {
15530 arg0 = gimple_call_arg (stmt, 0); // offset
15531 arg1 = gimple_call_arg (stmt, 1); // address
15532 lhs = gimple_call_lhs (stmt);
15533 location_t loc = gimple_location (stmt);
15534 /* Since arg1 may be cast to a different type, just use ptr_type_node
15535 here instead of trying to enforce TBAA on pointer types. */
15536 tree arg1_type = ptr_type_node;
15537 tree lhs_type = TREE_TYPE (lhs);
15538 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15539 the tree using the value from arg0. The resulting type will match
15540 the type of arg1. */
15541 gimple_seq stmts = NULL;
15542 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15543 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15544 arg1_type, arg1, temp_offset);
15545 /* Mask off any lower bits from the address. */
15546 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15547 arg1_type, temp_addr,
15548 build_int_cst (arg1_type, -16));
15549 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15550 if (!is_gimple_mem_ref_addr (aligned_addr))
15551 {
15552 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15553 gimple *g = gimple_build_assign (t, aligned_addr);
15554 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15555 aligned_addr = t;
15556 }
15557 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15558 take an offset, but since we've already incorporated the offset
15559 above, here we just pass in a zero. */
15560 gimple *g
15561 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15562 build_int_cst (arg1_type, 0)));
15563 gimple_set_location (g, loc);
15564 gsi_replace (gsi, g, true);
15565 return true;
15566 }
15567 /* Vector stores. */
15568 case ALTIVEC_BUILTIN_STVX_V16QI:
15569 case ALTIVEC_BUILTIN_STVX_V8HI:
15570 case ALTIVEC_BUILTIN_STVX_V4SI:
15571 case ALTIVEC_BUILTIN_STVX_V4SF:
15572 case ALTIVEC_BUILTIN_STVX_V2DI:
15573 case ALTIVEC_BUILTIN_STVX_V2DF:
15574 {
15575 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15576 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15577 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15578 location_t loc = gimple_location (stmt);
15579 tree arg0_type = TREE_TYPE (arg0);
15580 /* Use ptr_type_node (no TBAA) for the arg2_type.
15581 FIXME: (Richard) "A proper fix would be to transition this type as
15582 seen from the frontend to GIMPLE, for example in a similar way we
15583 do for MEM_REFs by piggy-backing that on an extra argument, a
15584 constant zero pointer of the alias pointer type to use (which would
15585 also serve as a type indicator of the store itself). I'd use a
15586 target specific internal function for this (not sure if we can have
15587 those target specific, but I guess if it's folded away then that's
15588 fine) and get away with the overload set." */
15589 tree arg2_type = ptr_type_node;
15590 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15591 the tree using the value from arg0. The resulting type will match
15592 the type of arg2. */
15593 gimple_seq stmts = NULL;
15594 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15595 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15596 arg2_type, arg2, temp_offset);
15597 /* Mask off any lower bits from the address. */
15598 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15599 arg2_type, temp_addr,
15600 build_int_cst (arg2_type, -16));
15601 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15602 if (!is_gimple_mem_ref_addr (aligned_addr))
15603 {
15604 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15605 gimple *g = gimple_build_assign (t, aligned_addr);
15606 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15607 aligned_addr = t;
15608 }
15609 /* The desired gimple result should be similar to:
15610 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15611 gimple *g
15612 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15613 build_int_cst (arg2_type, 0)), arg0);
15614 gimple_set_location (g, loc);
15615 gsi_replace (gsi, g, true);
15616 return true;
15617 }
15618
15619 /* unaligned Vector loads. */
15620 case VSX_BUILTIN_LXVW4X_V16QI:
15621 case VSX_BUILTIN_LXVW4X_V8HI:
15622 case VSX_BUILTIN_LXVW4X_V4SF:
15623 case VSX_BUILTIN_LXVW4X_V4SI:
15624 case VSX_BUILTIN_LXVD2X_V2DF:
15625 case VSX_BUILTIN_LXVD2X_V2DI:
15626 {
15627 arg0 = gimple_call_arg (stmt, 0); // offset
15628 arg1 = gimple_call_arg (stmt, 1); // address
15629 lhs = gimple_call_lhs (stmt);
15630 location_t loc = gimple_location (stmt);
15631 /* Since arg1 may be cast to a different type, just use ptr_type_node
15632 here instead of trying to enforce TBAA on pointer types. */
15633 tree arg1_type = ptr_type_node;
15634 tree lhs_type = TREE_TYPE (lhs);
15635 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15636 required alignment (power) is 4 bytes regardless of data type. */
15637 tree align_ltype = build_aligned_type (lhs_type, 4);
15638 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15639 the tree using the value from arg0. The resulting type will match
15640 the type of arg1. */
15641 gimple_seq stmts = NULL;
15642 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15643 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15644 arg1_type, arg1, temp_offset);
15645 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15646 if (!is_gimple_mem_ref_addr (temp_addr))
15647 {
15648 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15649 gimple *g = gimple_build_assign (t, temp_addr);
15650 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15651 temp_addr = t;
15652 }
15653 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15654 take an offset, but since we've already incorporated the offset
15655 above, here we just pass in a zero. */
15656 gimple *g;
15657 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15658 build_int_cst (arg1_type, 0)));
15659 gimple_set_location (g, loc);
15660 gsi_replace (gsi, g, true);
15661 return true;
15662 }
15663
15664 /* unaligned Vector stores. */
15665 case VSX_BUILTIN_STXVW4X_V16QI:
15666 case VSX_BUILTIN_STXVW4X_V8HI:
15667 case VSX_BUILTIN_STXVW4X_V4SF:
15668 case VSX_BUILTIN_STXVW4X_V4SI:
15669 case VSX_BUILTIN_STXVD2X_V2DF:
15670 case VSX_BUILTIN_STXVD2X_V2DI:
15671 {
15672 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15673 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15674 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15675 location_t loc = gimple_location (stmt);
15676 tree arg0_type = TREE_TYPE (arg0);
15677 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15678 tree arg2_type = ptr_type_node;
15679 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15680 required alignment (power) is 4 bytes regardless of data type. */
15681 tree align_stype = build_aligned_type (arg0_type, 4);
15682 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15683 the tree using the value from arg1. */
15684 gimple_seq stmts = NULL;
15685 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15686 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15687 arg2_type, arg2, temp_offset);
15688 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15689 if (!is_gimple_mem_ref_addr (temp_addr))
15690 {
15691 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15692 gimple *g = gimple_build_assign (t, temp_addr);
15693 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15694 temp_addr = t;
15695 }
15696 gimple *g;
15697 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
15698 build_int_cst (arg2_type, 0)), arg0);
15699 gimple_set_location (g, loc);
15700 gsi_replace (gsi, g, true);
15701 return true;
15702 }
15703
15704 /* Vector Fused multiply-add (fma). */
15705 case ALTIVEC_BUILTIN_VMADDFP:
15706 case VSX_BUILTIN_XVMADDDP:
15707 case ALTIVEC_BUILTIN_VMLADDUHM:
15708 {
15709 arg0 = gimple_call_arg (stmt, 0);
15710 arg1 = gimple_call_arg (stmt, 1);
15711 tree arg2 = gimple_call_arg (stmt, 2);
15712 lhs = gimple_call_lhs (stmt);
15713 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15714 gimple_call_set_lhs (g, lhs);
15715 gimple_call_set_nothrow (g, true);
15716 gimple_set_location (g, gimple_location (stmt));
15717 gsi_replace (gsi, g, true);
15718 return true;
15719 }
15720
15721 /* Vector compares; EQ, NE, GE, GT, LE. */
15722 case ALTIVEC_BUILTIN_VCMPEQUB:
15723 case ALTIVEC_BUILTIN_VCMPEQUH:
15724 case ALTIVEC_BUILTIN_VCMPEQUW:
15725 case P8V_BUILTIN_VCMPEQUD:
15726 fold_compare_helper (gsi, EQ_EXPR, stmt);
15727 return true;
15728
15729 case P9V_BUILTIN_CMPNEB:
15730 case P9V_BUILTIN_CMPNEH:
15731 case P9V_BUILTIN_CMPNEW:
15732 fold_compare_helper (gsi, NE_EXPR, stmt);
15733 return true;
15734
15735 case VSX_BUILTIN_CMPGE_16QI:
15736 case VSX_BUILTIN_CMPGE_U16QI:
15737 case VSX_BUILTIN_CMPGE_8HI:
15738 case VSX_BUILTIN_CMPGE_U8HI:
15739 case VSX_BUILTIN_CMPGE_4SI:
15740 case VSX_BUILTIN_CMPGE_U4SI:
15741 case VSX_BUILTIN_CMPGE_2DI:
15742 case VSX_BUILTIN_CMPGE_U2DI:
15743 fold_compare_helper (gsi, GE_EXPR, stmt);
15744 return true;
15745
15746 case ALTIVEC_BUILTIN_VCMPGTSB:
15747 case ALTIVEC_BUILTIN_VCMPGTUB:
15748 case ALTIVEC_BUILTIN_VCMPGTSH:
15749 case ALTIVEC_BUILTIN_VCMPGTUH:
15750 case ALTIVEC_BUILTIN_VCMPGTSW:
15751 case ALTIVEC_BUILTIN_VCMPGTUW:
15752 case P8V_BUILTIN_VCMPGTUD:
15753 case P8V_BUILTIN_VCMPGTSD:
15754 fold_compare_helper (gsi, GT_EXPR, stmt);
15755 return true;
15756
15757 case VSX_BUILTIN_CMPLE_16QI:
15758 case VSX_BUILTIN_CMPLE_U16QI:
15759 case VSX_BUILTIN_CMPLE_8HI:
15760 case VSX_BUILTIN_CMPLE_U8HI:
15761 case VSX_BUILTIN_CMPLE_4SI:
15762 case VSX_BUILTIN_CMPLE_U4SI:
15763 case VSX_BUILTIN_CMPLE_2DI:
15764 case VSX_BUILTIN_CMPLE_U2DI:
15765 fold_compare_helper (gsi, LE_EXPR, stmt);
15766 return true;
15767
15768 /* flavors of vec_splat_[us]{8,16,32}. */
15769 case ALTIVEC_BUILTIN_VSPLTISB:
15770 case ALTIVEC_BUILTIN_VSPLTISH:
15771 case ALTIVEC_BUILTIN_VSPLTISW:
15772 {
15773 arg0 = gimple_call_arg (stmt, 0);
15774 lhs = gimple_call_lhs (stmt);
15775
15776 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
15777 5-bit signed constant in range -16 to +15. */
15778 if (TREE_CODE (arg0) != INTEGER_CST
15779 || !IN_RANGE (TREE_INT_CST_LOW (arg0), -16, 15))
15780 return false;
15781 gimple_seq stmts = NULL;
15782 location_t loc = gimple_location (stmt);
15783 tree splat_value = gimple_convert (&stmts, loc,
15784 TREE_TYPE (TREE_TYPE (lhs)), arg0);
15785 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15786 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
15787 g = gimple_build_assign (lhs, splat_tree);
15788 gimple_set_location (g, gimple_location (stmt));
15789 gsi_replace (gsi, g, true);
15790 return true;
15791 }
15792
15793 /* Flavors of vec_splat. */
15794 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
15795 case ALTIVEC_BUILTIN_VSPLTB:
15796 case ALTIVEC_BUILTIN_VSPLTH:
15797 case ALTIVEC_BUILTIN_VSPLTW:
15798 case VSX_BUILTIN_XXSPLTD_V2DI:
15799 case VSX_BUILTIN_XXSPLTD_V2DF:
15800 {
15801 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
15802 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
15803 /* Only fold the vec_splat_*() if arg1 is both a constant value and
15804 is a valid index into the arg0 vector. */
15805 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
15806 if (TREE_CODE (arg1) != INTEGER_CST
15807 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
15808 return false;
15809 lhs = gimple_call_lhs (stmt);
15810 tree lhs_type = TREE_TYPE (lhs);
15811 tree arg0_type = TREE_TYPE (arg0);
15812 tree splat;
15813 if (TREE_CODE (arg0) == VECTOR_CST)
15814 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
15815 else
15816 {
15817 /* Determine (in bits) the length and start location of the
15818 splat value for a call to the tree_vec_extract helper. */
15819 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
15820 * BITS_PER_UNIT / n_elts;
15821 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
15822 tree len = build_int_cst (bitsizetype, splat_elem_size);
15823 tree start = build_int_cst (bitsizetype, splat_start_bit);
15824 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
15825 len, start);
15826 }
15827 /* And finally, build the new vector. */
15828 tree splat_tree = build_vector_from_val (lhs_type, splat);
15829 g = gimple_build_assign (lhs, splat_tree);
15830 gimple_set_location (g, gimple_location (stmt));
15831 gsi_replace (gsi, g, true);
15832 return true;
15833 }
15834
15835 /* vec_mergel (integrals). */
15836 case ALTIVEC_BUILTIN_VMRGLH:
15837 case ALTIVEC_BUILTIN_VMRGLW:
15838 case VSX_BUILTIN_XXMRGLW_4SI:
15839 case ALTIVEC_BUILTIN_VMRGLB:
15840 case VSX_BUILTIN_VEC_MERGEL_V2DI:
15841 case VSX_BUILTIN_XXMRGLW_4SF:
15842 case VSX_BUILTIN_VEC_MERGEL_V2DF:
15843 fold_mergehl_helper (gsi, stmt, 1);
15844 return true;
15845 /* vec_mergeh (integrals). */
15846 case ALTIVEC_BUILTIN_VMRGHH:
15847 case ALTIVEC_BUILTIN_VMRGHW:
15848 case VSX_BUILTIN_XXMRGHW_4SI:
15849 case ALTIVEC_BUILTIN_VMRGHB:
15850 case VSX_BUILTIN_VEC_MERGEH_V2DI:
15851 case VSX_BUILTIN_XXMRGHW_4SF:
15852 case VSX_BUILTIN_VEC_MERGEH_V2DF:
15853 fold_mergehl_helper (gsi, stmt, 0);
15854 return true;
15855
15856 /* Flavors of vec_mergee. */
15857 case P8V_BUILTIN_VMRGEW_V4SI:
15858 case P8V_BUILTIN_VMRGEW_V2DI:
15859 case P8V_BUILTIN_VMRGEW_V4SF:
15860 case P8V_BUILTIN_VMRGEW_V2DF:
15861 fold_mergeeo_helper (gsi, stmt, 0);
15862 return true;
15863 /* Flavors of vec_mergeo. */
15864 case P8V_BUILTIN_VMRGOW_V4SI:
15865 case P8V_BUILTIN_VMRGOW_V2DI:
15866 case P8V_BUILTIN_VMRGOW_V4SF:
15867 case P8V_BUILTIN_VMRGOW_V2DF:
15868 fold_mergeeo_helper (gsi, stmt, 1);
15869 return true;
15870
15871 /* d = vec_pack (a, b) */
15872 case P8V_BUILTIN_VPKUDUM:
15873 case ALTIVEC_BUILTIN_VPKUHUM:
15874 case ALTIVEC_BUILTIN_VPKUWUM:
15875 {
15876 arg0 = gimple_call_arg (stmt, 0);
15877 arg1 = gimple_call_arg (stmt, 1);
15878 lhs = gimple_call_lhs (stmt);
15879 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
15880 gimple_set_location (g, gimple_location (stmt));
15881 gsi_replace (gsi, g, true);
15882 return true;
15883 }
15884
15885 /* d = vec_unpackh (a) */
15886 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
15887 in this code is sensitive to endian-ness, and needs to be inverted to
15888 handle both LE and BE targets. */
15889 case ALTIVEC_BUILTIN_VUPKHSB:
15890 case ALTIVEC_BUILTIN_VUPKHSH:
15891 case P8V_BUILTIN_VUPKHSW:
15892 {
15893 arg0 = gimple_call_arg (stmt, 0);
15894 lhs = gimple_call_lhs (stmt);
15895 if (BYTES_BIG_ENDIAN)
15896 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15897 else
15898 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15899 gimple_set_location (g, gimple_location (stmt));
15900 gsi_replace (gsi, g, true);
15901 return true;
15902 }
15903 /* d = vec_unpackl (a) */
15904 case ALTIVEC_BUILTIN_VUPKLSB:
15905 case ALTIVEC_BUILTIN_VUPKLSH:
15906 case P8V_BUILTIN_VUPKLSW:
15907 {
15908 arg0 = gimple_call_arg (stmt, 0);
15909 lhs = gimple_call_lhs (stmt);
15910 if (BYTES_BIG_ENDIAN)
15911 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15912 else
15913 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15914 gimple_set_location (g, gimple_location (stmt));
15915 gsi_replace (gsi, g, true);
15916 return true;
15917 }
15918 /* There is no gimple type corresponding with pixel, so just return. */
15919 case ALTIVEC_BUILTIN_VUPKHPX:
15920 case ALTIVEC_BUILTIN_VUPKLPX:
15921 return false;
15922
15923 /* vec_perm. */
15924 case ALTIVEC_BUILTIN_VPERM_16QI:
15925 case ALTIVEC_BUILTIN_VPERM_8HI:
15926 case ALTIVEC_BUILTIN_VPERM_4SI:
15927 case ALTIVEC_BUILTIN_VPERM_2DI:
15928 case ALTIVEC_BUILTIN_VPERM_4SF:
15929 case ALTIVEC_BUILTIN_VPERM_2DF:
15930 {
15931 arg0 = gimple_call_arg (stmt, 0);
15932 arg1 = gimple_call_arg (stmt, 1);
15933 tree permute = gimple_call_arg (stmt, 2);
15934 lhs = gimple_call_lhs (stmt);
15935 location_t loc = gimple_location (stmt);
15936 gimple_seq stmts = NULL;
15937 // convert arg0 and arg1 to match the type of the permute
15938 // for the VEC_PERM_EXPR operation.
15939 tree permute_type = (TREE_TYPE (permute));
15940 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
15941 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
15942 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
15943 permute_type, arg0_ptype, arg1_ptype,
15944 permute);
15945 // Convert the result back to the desired lhs type upon completion.
15946 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
15947 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15948 g = gimple_build_assign (lhs, temp);
15949 gimple_set_location (g, loc);
15950 gsi_replace (gsi, g, true);
15951 return true;
15952 }
15953
15954 default:
15955 if (TARGET_DEBUG_BUILTIN)
15956 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
15957 fn_code, fn_name1, fn_name2);
15958 break;
15959 }
15960
15961 return false;
15962 }
15963
15964 /* Expand an expression EXP that calls a built-in function,
15965 with result going to TARGET if that's convenient
15966 (and in mode MODE if that's convenient).
15967 SUBTARGET may be used as the target for computing one of EXP's operands.
15968 IGNORE is nonzero if the value is to be ignored. */
15969
15970 static rtx
15971 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15972 machine_mode mode ATTRIBUTE_UNUSED,
15973 int ignore ATTRIBUTE_UNUSED)
15974 {
15975 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15976 enum rs6000_builtins fcode
15977 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
15978 size_t uns_fcode = (size_t)fcode;
15979 const struct builtin_description *d;
15980 size_t i;
15981 rtx ret;
15982 bool success;
15983 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
15984 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
15985 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
15986
15987 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
15988 floating point type, depending on whether long double is the IBM extended
15989 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
15990 we only define one variant of the built-in function, and switch the code
15991 when defining it, rather than defining two built-ins and using the
15992 overload table in rs6000-c.c to switch between the two. If we don't have
15993 the proper assembler, don't do this switch because CODE_FOR_*kf* and
15994 CODE_FOR_*tf* will be CODE_FOR_nothing. */
15995 if (FLOAT128_IEEE_P (TFmode))
15996 switch (icode)
15997 {
15998 default:
15999 break;
16000
16001 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16002 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16003 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16004 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16005 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16006 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16007 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16008 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16009 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16010 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16011 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16012 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16013 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16014 }
16015
16016 if (TARGET_DEBUG_BUILTIN)
16017 {
16018 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16019 const char *name2 = (icode != CODE_FOR_nothing)
16020 ? get_insn_name ((int) icode)
16021 : "nothing";
16022 const char *name3;
16023
16024 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16025 {
16026 default: name3 = "unknown"; break;
16027 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16028 case RS6000_BTC_UNARY: name3 = "unary"; break;
16029 case RS6000_BTC_BINARY: name3 = "binary"; break;
16030 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16031 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16032 case RS6000_BTC_ABS: name3 = "abs"; break;
16033 case RS6000_BTC_DST: name3 = "dst"; break;
16034 }
16035
16036
16037 fprintf (stderr,
16038 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16039 (name1) ? name1 : "---", fcode,
16040 (name2) ? name2 : "---", (int) icode,
16041 name3,
16042 func_valid_p ? "" : ", not valid");
16043 }
16044
16045 if (!func_valid_p)
16046 {
16047 rs6000_invalid_builtin (fcode);
16048
16049 /* Given it is invalid, just generate a normal call. */
16050 return expand_call (exp, target, ignore);
16051 }
16052
16053 switch (fcode)
16054 {
16055 case RS6000_BUILTIN_RECIP:
16056 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16057
16058 case RS6000_BUILTIN_RECIPF:
16059 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16060
16061 case RS6000_BUILTIN_RSQRTF:
16062 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16063
16064 case RS6000_BUILTIN_RSQRT:
16065 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16066
16067 case POWER7_BUILTIN_BPERMD:
16068 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16069 ? CODE_FOR_bpermd_di
16070 : CODE_FOR_bpermd_si), exp, target);
16071
16072 case RS6000_BUILTIN_GET_TB:
16073 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16074 target);
16075
16076 case RS6000_BUILTIN_MFTB:
16077 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16078 ? CODE_FOR_rs6000_mftb_di
16079 : CODE_FOR_rs6000_mftb_si),
16080 target);
16081
16082 case RS6000_BUILTIN_MFFS:
16083 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16084
16085 case RS6000_BUILTIN_MTFSB0:
16086 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16087
16088 case RS6000_BUILTIN_MTFSB1:
16089 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16090
16091 case RS6000_BUILTIN_SET_FPSCR_RN:
16092 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16093 exp);
16094
16095 case RS6000_BUILTIN_SET_FPSCR_DRN:
16096 return
16097 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16098 exp);
16099
16100 case RS6000_BUILTIN_MFFSL:
16101 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16102
16103 case RS6000_BUILTIN_MTFSF:
16104 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16105
16106 case RS6000_BUILTIN_CPU_INIT:
16107 case RS6000_BUILTIN_CPU_IS:
16108 case RS6000_BUILTIN_CPU_SUPPORTS:
16109 return cpu_expand_builtin (fcode, exp, target);
16110
16111 case MISC_BUILTIN_SPEC_BARRIER:
16112 {
16113 emit_insn (gen_speculation_barrier ());
16114 return NULL_RTX;
16115 }
16116
16117 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16118 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16119 {
16120 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16121 : (int) CODE_FOR_altivec_lvsl_direct);
16122 machine_mode tmode = insn_data[icode2].operand[0].mode;
16123 machine_mode mode = insn_data[icode2].operand[1].mode;
16124 tree arg;
16125 rtx op, addr, pat;
16126
16127 gcc_assert (TARGET_ALTIVEC);
16128
16129 arg = CALL_EXPR_ARG (exp, 0);
16130 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16131 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16132 addr = memory_address (mode, op);
16133 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16134 op = addr;
16135 else
16136 {
16137 /* For the load case need to negate the address. */
16138 op = gen_reg_rtx (GET_MODE (addr));
16139 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16140 }
16141 op = gen_rtx_MEM (mode, op);
16142
16143 if (target == 0
16144 || GET_MODE (target) != tmode
16145 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16146 target = gen_reg_rtx (tmode);
16147
16148 pat = GEN_FCN (icode2) (target, op);
16149 if (!pat)
16150 return 0;
16151 emit_insn (pat);
16152
16153 return target;
16154 }
16155
16156 case ALTIVEC_BUILTIN_VCFUX:
16157 case ALTIVEC_BUILTIN_VCFSX:
16158 case ALTIVEC_BUILTIN_VCTUXS:
16159 case ALTIVEC_BUILTIN_VCTSXS:
16160 /* FIXME: There's got to be a nicer way to handle this case than
16161 constructing a new CALL_EXPR. */
16162 if (call_expr_nargs (exp) == 1)
16163 {
16164 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16165 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16166 }
16167 break;
16168
16169 /* For the pack and unpack int128 routines, fix up the builtin so it
16170 uses the correct IBM128 type. */
16171 case MISC_BUILTIN_PACK_IF:
16172 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16173 {
16174 icode = CODE_FOR_packtf;
16175 fcode = MISC_BUILTIN_PACK_TF;
16176 uns_fcode = (size_t)fcode;
16177 }
16178 break;
16179
16180 case MISC_BUILTIN_UNPACK_IF:
16181 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16182 {
16183 icode = CODE_FOR_unpacktf;
16184 fcode = MISC_BUILTIN_UNPACK_TF;
16185 uns_fcode = (size_t)fcode;
16186 }
16187 break;
16188
16189 default:
16190 break;
16191 }
16192
16193 if (TARGET_ALTIVEC)
16194 {
16195 ret = altivec_expand_builtin (exp, target, &success);
16196
16197 if (success)
16198 return ret;
16199 }
16200 if (TARGET_HTM)
16201 {
16202 ret = htm_expand_builtin (exp, target, &success);
16203
16204 if (success)
16205 return ret;
16206 }
16207
16208 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16209 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16210 gcc_assert (attr == RS6000_BTC_UNARY
16211 || attr == RS6000_BTC_BINARY
16212 || attr == RS6000_BTC_TERNARY
16213 || attr == RS6000_BTC_SPECIAL);
16214
16215 /* Handle simple unary operations. */
16216 d = bdesc_1arg;
16217 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16218 if (d->code == fcode)
16219 return rs6000_expand_unop_builtin (icode, exp, target);
16220
16221 /* Handle simple binary operations. */
16222 d = bdesc_2arg;
16223 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16224 if (d->code == fcode)
16225 return rs6000_expand_binop_builtin (icode, exp, target);
16226
16227 /* Handle simple ternary operations. */
16228 d = bdesc_3arg;
16229 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16230 if (d->code == fcode)
16231 return rs6000_expand_ternop_builtin (icode, exp, target);
16232
16233 /* Handle simple no-argument operations. */
16234 d = bdesc_0arg;
16235 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16236 if (d->code == fcode)
16237 return rs6000_expand_zeroop_builtin (icode, target);
16238
16239 gcc_unreachable ();
16240 }
16241
16242 /* Create a builtin vector type with a name. Taking care not to give
16243 the canonical type a name. */
16244
16245 static tree
16246 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16247 {
16248 tree result = build_vector_type (elt_type, num_elts);
16249
16250 /* Copy so we don't give the canonical type a name. */
16251 result = build_variant_type_copy (result);
16252
16253 add_builtin_type (name, result);
16254
16255 return result;
16256 }
16257
16258 static void
16259 rs6000_init_builtins (void)
16260 {
16261 tree tdecl;
16262 tree ftype;
16263 machine_mode mode;
16264
16265 if (TARGET_DEBUG_BUILTIN)
16266 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16267 (TARGET_ALTIVEC) ? ", altivec" : "",
16268 (TARGET_VSX) ? ", vsx" : "");
16269
16270 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16271 : "__vector long long",
16272 intDI_type_node, 2);
16273 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16274 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16275 intSI_type_node, 4);
16276 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16277 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16278 intHI_type_node, 8);
16279 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16280 intQI_type_node, 16);
16281
16282 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16283 unsigned_intQI_type_node, 16);
16284 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16285 unsigned_intHI_type_node, 8);
16286 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16287 unsigned_intSI_type_node, 4);
16288 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16289 ? "__vector unsigned long"
16290 : "__vector unsigned long long",
16291 unsigned_intDI_type_node, 2);
16292
16293 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16294
16295 const_str_type_node
16296 = build_pointer_type (build_qualified_type (char_type_node,
16297 TYPE_QUAL_CONST));
16298
16299 /* We use V1TI mode as a special container to hold __int128_t items that
16300 must live in VSX registers. */
16301 if (intTI_type_node)
16302 {
16303 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16304 intTI_type_node, 1);
16305 unsigned_V1TI_type_node
16306 = rs6000_vector_type ("__vector unsigned __int128",
16307 unsigned_intTI_type_node, 1);
16308 }
16309
16310 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16311 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16312 'vector unsigned short'. */
16313
16314 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16315 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16316 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16317 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16318 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16319
16320 long_integer_type_internal_node = long_integer_type_node;
16321 long_unsigned_type_internal_node = long_unsigned_type_node;
16322 long_long_integer_type_internal_node = long_long_integer_type_node;
16323 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16324 intQI_type_internal_node = intQI_type_node;
16325 uintQI_type_internal_node = unsigned_intQI_type_node;
16326 intHI_type_internal_node = intHI_type_node;
16327 uintHI_type_internal_node = unsigned_intHI_type_node;
16328 intSI_type_internal_node = intSI_type_node;
16329 uintSI_type_internal_node = unsigned_intSI_type_node;
16330 intDI_type_internal_node = intDI_type_node;
16331 uintDI_type_internal_node = unsigned_intDI_type_node;
16332 intTI_type_internal_node = intTI_type_node;
16333 uintTI_type_internal_node = unsigned_intTI_type_node;
16334 float_type_internal_node = float_type_node;
16335 double_type_internal_node = double_type_node;
16336 long_double_type_internal_node = long_double_type_node;
16337 dfloat64_type_internal_node = dfloat64_type_node;
16338 dfloat128_type_internal_node = dfloat128_type_node;
16339 void_type_internal_node = void_type_node;
16340
16341 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16342 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16343 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16344 format that uses a pair of doubles, depending on the switches and
16345 defaults.
16346
16347 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16348 floating point, we need make sure the type is non-zero or else self-test
16349 fails during bootstrap.
16350
16351 Always create __ibm128 as a separate type, even if the current long double
16352 format is IBM extended double.
16353
16354 For IEEE 128-bit floating point, always create the type __ieee128. If the
16355 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16356 __ieee128. */
16357 if (TARGET_FLOAT128_TYPE)
16358 {
16359 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16360 ibm128_float_type_node = long_double_type_node;
16361 else
16362 {
16363 ibm128_float_type_node = make_node (REAL_TYPE);
16364 TYPE_PRECISION (ibm128_float_type_node) = 128;
16365 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16366 layout_type (ibm128_float_type_node);
16367 }
16368
16369 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16370 "__ibm128");
16371
16372 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16373 ieee128_float_type_node = long_double_type_node;
16374 else
16375 ieee128_float_type_node = float128_type_node;
16376
16377 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16378 "__ieee128");
16379 }
16380
16381 else
16382 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16383
16384 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16385 tree type node. */
16386 builtin_mode_to_type[QImode][0] = integer_type_node;
16387 builtin_mode_to_type[HImode][0] = integer_type_node;
16388 builtin_mode_to_type[SImode][0] = intSI_type_node;
16389 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16390 builtin_mode_to_type[DImode][0] = intDI_type_node;
16391 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16392 builtin_mode_to_type[TImode][0] = intTI_type_node;
16393 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16394 builtin_mode_to_type[SFmode][0] = float_type_node;
16395 builtin_mode_to_type[DFmode][0] = double_type_node;
16396 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16397 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16398 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16399 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16400 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16401 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16402 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16403 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16404 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16405 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16406 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16407 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16408 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16409 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16410 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16411 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16412 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16413
16414 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16415 TYPE_NAME (bool_char_type_node) = tdecl;
16416
16417 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16418 TYPE_NAME (bool_short_type_node) = tdecl;
16419
16420 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16421 TYPE_NAME (bool_int_type_node) = tdecl;
16422
16423 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16424 TYPE_NAME (pixel_type_node) = tdecl;
16425
16426 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16427 bool_char_type_node, 16);
16428 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16429 bool_short_type_node, 8);
16430 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16431 bool_int_type_node, 4);
16432 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16433 ? "__vector __bool long"
16434 : "__vector __bool long long",
16435 bool_long_long_type_node, 2);
16436 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16437 pixel_type_node, 8);
16438
16439 /* Create Altivec and VSX builtins on machines with at least the
16440 general purpose extensions (970 and newer) to allow the use of
16441 the target attribute. */
16442 if (TARGET_EXTRA_BUILTINS)
16443 altivec_init_builtins ();
16444 if (TARGET_HTM)
16445 htm_init_builtins ();
16446
16447 if (TARGET_EXTRA_BUILTINS)
16448 rs6000_common_init_builtins ();
16449
16450 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16451 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16452 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16453
16454 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16455 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16456 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16457
16458 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16459 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16460 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16461
16462 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16463 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16464 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16465
16466 mode = (TARGET_64BIT) ? DImode : SImode;
16467 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16468 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16469 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16470
16471 ftype = build_function_type_list (unsigned_intDI_type_node,
16472 NULL_TREE);
16473 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16474
16475 if (TARGET_64BIT)
16476 ftype = build_function_type_list (unsigned_intDI_type_node,
16477 NULL_TREE);
16478 else
16479 ftype = build_function_type_list (unsigned_intSI_type_node,
16480 NULL_TREE);
16481 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16482
16483 ftype = build_function_type_list (double_type_node, NULL_TREE);
16484 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16485
16486 ftype = build_function_type_list (double_type_node, NULL_TREE);
16487 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16488
16489 ftype = build_function_type_list (void_type_node,
16490 intSI_type_node,
16491 NULL_TREE);
16492 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16493
16494 ftype = build_function_type_list (void_type_node,
16495 intSI_type_node,
16496 NULL_TREE);
16497 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16498
16499 ftype = build_function_type_list (void_type_node,
16500 intDI_type_node,
16501 NULL_TREE);
16502 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16503
16504 ftype = build_function_type_list (void_type_node,
16505 intDI_type_node,
16506 NULL_TREE);
16507 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16508
16509 ftype = build_function_type_list (void_type_node,
16510 intSI_type_node, double_type_node,
16511 NULL_TREE);
16512 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16513
16514 ftype = build_function_type_list (void_type_node, NULL_TREE);
16515 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16516 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16517 MISC_BUILTIN_SPEC_BARRIER);
16518
16519 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16520 NULL_TREE);
16521 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16522 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16523
16524 /* AIX libm provides clog as __clog. */
16525 if (TARGET_XCOFF &&
16526 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16527 set_user_assembler_name (tdecl, "__clog");
16528
16529 #ifdef SUBTARGET_INIT_BUILTINS
16530 SUBTARGET_INIT_BUILTINS;
16531 #endif
16532 }
16533
16534 /* Returns the rs6000 builtin decl for CODE. */
16535
16536 static tree
16537 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16538 {
16539 HOST_WIDE_INT fnmask;
16540
16541 if (code >= RS6000_BUILTIN_COUNT)
16542 return error_mark_node;
16543
16544 fnmask = rs6000_builtin_info[code].mask;
16545 if ((fnmask & rs6000_builtin_mask) != fnmask)
16546 {
16547 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16548 return error_mark_node;
16549 }
16550
16551 return rs6000_builtin_decls[code];
16552 }
16553
16554 static void
16555 altivec_init_builtins (void)
16556 {
16557 const struct builtin_description *d;
16558 size_t i;
16559 tree ftype;
16560 tree decl;
16561 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16562
16563 tree pvoid_type_node = build_pointer_type (void_type_node);
16564
16565 tree pcvoid_type_node
16566 = build_pointer_type (build_qualified_type (void_type_node,
16567 TYPE_QUAL_CONST));
16568
16569 tree int_ftype_opaque
16570 = build_function_type_list (integer_type_node,
16571 opaque_V4SI_type_node, NULL_TREE);
16572 tree opaque_ftype_opaque
16573 = build_function_type_list (integer_type_node, NULL_TREE);
16574 tree opaque_ftype_opaque_int
16575 = build_function_type_list (opaque_V4SI_type_node,
16576 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16577 tree opaque_ftype_opaque_opaque_int
16578 = build_function_type_list (opaque_V4SI_type_node,
16579 opaque_V4SI_type_node, opaque_V4SI_type_node,
16580 integer_type_node, NULL_TREE);
16581 tree opaque_ftype_opaque_opaque_opaque
16582 = build_function_type_list (opaque_V4SI_type_node,
16583 opaque_V4SI_type_node, opaque_V4SI_type_node,
16584 opaque_V4SI_type_node, NULL_TREE);
16585 tree opaque_ftype_opaque_opaque
16586 = build_function_type_list (opaque_V4SI_type_node,
16587 opaque_V4SI_type_node, opaque_V4SI_type_node,
16588 NULL_TREE);
16589 tree int_ftype_int_opaque_opaque
16590 = build_function_type_list (integer_type_node,
16591 integer_type_node, opaque_V4SI_type_node,
16592 opaque_V4SI_type_node, NULL_TREE);
16593 tree int_ftype_int_v4si_v4si
16594 = build_function_type_list (integer_type_node,
16595 integer_type_node, V4SI_type_node,
16596 V4SI_type_node, NULL_TREE);
16597 tree int_ftype_int_v2di_v2di
16598 = build_function_type_list (integer_type_node,
16599 integer_type_node, V2DI_type_node,
16600 V2DI_type_node, NULL_TREE);
16601 tree void_ftype_v4si
16602 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16603 tree v8hi_ftype_void
16604 = build_function_type_list (V8HI_type_node, NULL_TREE);
16605 tree void_ftype_void
16606 = build_function_type_list (void_type_node, NULL_TREE);
16607 tree void_ftype_int
16608 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16609
16610 tree opaque_ftype_long_pcvoid
16611 = build_function_type_list (opaque_V4SI_type_node,
16612 long_integer_type_node, pcvoid_type_node,
16613 NULL_TREE);
16614 tree v16qi_ftype_long_pcvoid
16615 = build_function_type_list (V16QI_type_node,
16616 long_integer_type_node, pcvoid_type_node,
16617 NULL_TREE);
16618 tree v8hi_ftype_long_pcvoid
16619 = build_function_type_list (V8HI_type_node,
16620 long_integer_type_node, pcvoid_type_node,
16621 NULL_TREE);
16622 tree v4si_ftype_long_pcvoid
16623 = build_function_type_list (V4SI_type_node,
16624 long_integer_type_node, pcvoid_type_node,
16625 NULL_TREE);
16626 tree v4sf_ftype_long_pcvoid
16627 = build_function_type_list (V4SF_type_node,
16628 long_integer_type_node, pcvoid_type_node,
16629 NULL_TREE);
16630 tree v2df_ftype_long_pcvoid
16631 = build_function_type_list (V2DF_type_node,
16632 long_integer_type_node, pcvoid_type_node,
16633 NULL_TREE);
16634 tree v2di_ftype_long_pcvoid
16635 = build_function_type_list (V2DI_type_node,
16636 long_integer_type_node, pcvoid_type_node,
16637 NULL_TREE);
16638 tree v1ti_ftype_long_pcvoid
16639 = build_function_type_list (V1TI_type_node,
16640 long_integer_type_node, pcvoid_type_node,
16641 NULL_TREE);
16642
16643 tree void_ftype_opaque_long_pvoid
16644 = build_function_type_list (void_type_node,
16645 opaque_V4SI_type_node, long_integer_type_node,
16646 pvoid_type_node, NULL_TREE);
16647 tree void_ftype_v4si_long_pvoid
16648 = build_function_type_list (void_type_node,
16649 V4SI_type_node, long_integer_type_node,
16650 pvoid_type_node, NULL_TREE);
16651 tree void_ftype_v16qi_long_pvoid
16652 = build_function_type_list (void_type_node,
16653 V16QI_type_node, long_integer_type_node,
16654 pvoid_type_node, NULL_TREE);
16655
16656 tree void_ftype_v16qi_pvoid_long
16657 = build_function_type_list (void_type_node,
16658 V16QI_type_node, pvoid_type_node,
16659 long_integer_type_node, NULL_TREE);
16660
16661 tree void_ftype_v8hi_long_pvoid
16662 = build_function_type_list (void_type_node,
16663 V8HI_type_node, long_integer_type_node,
16664 pvoid_type_node, NULL_TREE);
16665 tree void_ftype_v4sf_long_pvoid
16666 = build_function_type_list (void_type_node,
16667 V4SF_type_node, long_integer_type_node,
16668 pvoid_type_node, NULL_TREE);
16669 tree void_ftype_v2df_long_pvoid
16670 = build_function_type_list (void_type_node,
16671 V2DF_type_node, long_integer_type_node,
16672 pvoid_type_node, NULL_TREE);
16673 tree void_ftype_v1ti_long_pvoid
16674 = build_function_type_list (void_type_node,
16675 V1TI_type_node, long_integer_type_node,
16676 pvoid_type_node, NULL_TREE);
16677 tree void_ftype_v2di_long_pvoid
16678 = build_function_type_list (void_type_node,
16679 V2DI_type_node, long_integer_type_node,
16680 pvoid_type_node, NULL_TREE);
16681 tree int_ftype_int_v8hi_v8hi
16682 = build_function_type_list (integer_type_node,
16683 integer_type_node, V8HI_type_node,
16684 V8HI_type_node, NULL_TREE);
16685 tree int_ftype_int_v16qi_v16qi
16686 = build_function_type_list (integer_type_node,
16687 integer_type_node, V16QI_type_node,
16688 V16QI_type_node, NULL_TREE);
16689 tree int_ftype_int_v4sf_v4sf
16690 = build_function_type_list (integer_type_node,
16691 integer_type_node, V4SF_type_node,
16692 V4SF_type_node, NULL_TREE);
16693 tree int_ftype_int_v2df_v2df
16694 = build_function_type_list (integer_type_node,
16695 integer_type_node, V2DF_type_node,
16696 V2DF_type_node, NULL_TREE);
16697 tree v2di_ftype_v2di
16698 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16699 tree v4si_ftype_v4si
16700 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16701 tree v8hi_ftype_v8hi
16702 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16703 tree v16qi_ftype_v16qi
16704 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16705 tree v4sf_ftype_v4sf
16706 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16707 tree v2df_ftype_v2df
16708 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16709 tree void_ftype_pcvoid_int_int
16710 = build_function_type_list (void_type_node,
16711 pcvoid_type_node, integer_type_node,
16712 integer_type_node, NULL_TREE);
16713
16714 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16715 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16716 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16717 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16718 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16719 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16720 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16721 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16722 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16723 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16724 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16725 ALTIVEC_BUILTIN_LVXL_V2DF);
16726 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16727 ALTIVEC_BUILTIN_LVXL_V2DI);
16728 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16729 ALTIVEC_BUILTIN_LVXL_V4SF);
16730 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16731 ALTIVEC_BUILTIN_LVXL_V4SI);
16732 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16733 ALTIVEC_BUILTIN_LVXL_V8HI);
16734 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16735 ALTIVEC_BUILTIN_LVXL_V16QI);
16736 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16737 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
16738 ALTIVEC_BUILTIN_LVX_V1TI);
16739 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16740 ALTIVEC_BUILTIN_LVX_V2DF);
16741 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16742 ALTIVEC_BUILTIN_LVX_V2DI);
16743 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16744 ALTIVEC_BUILTIN_LVX_V4SF);
16745 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16746 ALTIVEC_BUILTIN_LVX_V4SI);
16747 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16748 ALTIVEC_BUILTIN_LVX_V8HI);
16749 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16750 ALTIVEC_BUILTIN_LVX_V16QI);
16751 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16752 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16753 ALTIVEC_BUILTIN_STVX_V2DF);
16754 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16755 ALTIVEC_BUILTIN_STVX_V2DI);
16756 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16757 ALTIVEC_BUILTIN_STVX_V4SF);
16758 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16759 ALTIVEC_BUILTIN_STVX_V4SI);
16760 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16761 ALTIVEC_BUILTIN_STVX_V8HI);
16762 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16763 ALTIVEC_BUILTIN_STVX_V16QI);
16764 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16765 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16766 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16767 ALTIVEC_BUILTIN_STVXL_V2DF);
16768 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16769 ALTIVEC_BUILTIN_STVXL_V2DI);
16770 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16771 ALTIVEC_BUILTIN_STVXL_V4SF);
16772 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16773 ALTIVEC_BUILTIN_STVXL_V4SI);
16774 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16775 ALTIVEC_BUILTIN_STVXL_V8HI);
16776 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16777 ALTIVEC_BUILTIN_STVXL_V16QI);
16778 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16779 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16780 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16781 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16782 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16783 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16784 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16785 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16786 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16787 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16788 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16789 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16790 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16791 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16792 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16793 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16794
16795 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16796 VSX_BUILTIN_LXVD2X_V2DF);
16797 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16798 VSX_BUILTIN_LXVD2X_V2DI);
16799 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16800 VSX_BUILTIN_LXVW4X_V4SF);
16801 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16802 VSX_BUILTIN_LXVW4X_V4SI);
16803 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16804 VSX_BUILTIN_LXVW4X_V8HI);
16805 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16806 VSX_BUILTIN_LXVW4X_V16QI);
16807 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16808 VSX_BUILTIN_STXVD2X_V2DF);
16809 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16810 VSX_BUILTIN_STXVD2X_V2DI);
16811 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16812 VSX_BUILTIN_STXVW4X_V4SF);
16813 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16814 VSX_BUILTIN_STXVW4X_V4SI);
16815 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16816 VSX_BUILTIN_STXVW4X_V8HI);
16817 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16818 VSX_BUILTIN_STXVW4X_V16QI);
16819
16820 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
16821 VSX_BUILTIN_LD_ELEMREV_V2DF);
16822 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
16823 VSX_BUILTIN_LD_ELEMREV_V2DI);
16824 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
16825 VSX_BUILTIN_LD_ELEMREV_V4SF);
16826 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
16827 VSX_BUILTIN_LD_ELEMREV_V4SI);
16828 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
16829 VSX_BUILTIN_LD_ELEMREV_V8HI);
16830 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
16831 VSX_BUILTIN_LD_ELEMREV_V16QI);
16832 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
16833 VSX_BUILTIN_ST_ELEMREV_V2DF);
16834 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
16835 VSX_BUILTIN_ST_ELEMREV_V1TI);
16836 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
16837 VSX_BUILTIN_ST_ELEMREV_V2DI);
16838 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
16839 VSX_BUILTIN_ST_ELEMREV_V4SF);
16840 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
16841 VSX_BUILTIN_ST_ELEMREV_V4SI);
16842 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
16843 VSX_BUILTIN_ST_ELEMREV_V8HI);
16844 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
16845 VSX_BUILTIN_ST_ELEMREV_V16QI);
16846
16847 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
16848 VSX_BUILTIN_VEC_LD);
16849 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
16850 VSX_BUILTIN_VEC_ST);
16851 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
16852 VSX_BUILTIN_VEC_XL);
16853 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
16854 VSX_BUILTIN_VEC_XL_BE);
16855 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
16856 VSX_BUILTIN_VEC_XST);
16857 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
16858 VSX_BUILTIN_VEC_XST_BE);
16859
16860 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
16861 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
16862 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
16863
16864 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
16865 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
16866 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
16867 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
16868 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
16869 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
16870 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
16871 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
16872 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
16873 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
16874 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
16875 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
16876
16877 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
16878 ALTIVEC_BUILTIN_VEC_ADDE);
16879 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
16880 ALTIVEC_BUILTIN_VEC_ADDEC);
16881 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
16882 ALTIVEC_BUILTIN_VEC_CMPNE);
16883 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
16884 ALTIVEC_BUILTIN_VEC_MUL);
16885 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
16886 ALTIVEC_BUILTIN_VEC_SUBE);
16887 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
16888 ALTIVEC_BUILTIN_VEC_SUBEC);
16889
16890 /* Cell builtins. */
16891 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
16892 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
16893 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
16894 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
16895
16896 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
16897 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
16898 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
16899 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
16900
16901 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
16902 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
16903 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
16904 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
16905
16906 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
16907 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
16908 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
16909 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
16910
16911 if (TARGET_P9_VECTOR)
16912 {
16913 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
16914 P9V_BUILTIN_STXVL);
16915 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
16916 P9V_BUILTIN_XST_LEN_R);
16917 }
16918
16919 /* Add the DST variants. */
16920 d = bdesc_dst;
16921 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
16922 {
16923 HOST_WIDE_INT mask = d->mask;
16924
16925 /* It is expected that these dst built-in functions may have
16926 d->icode equal to CODE_FOR_nothing. */
16927 if ((mask & builtin_mask) != mask)
16928 {
16929 if (TARGET_DEBUG_BUILTIN)
16930 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
16931 d->name);
16932 continue;
16933 }
16934 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
16935 }
16936
16937 /* Initialize the predicates. */
16938 d = bdesc_altivec_preds;
16939 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
16940 {
16941 machine_mode mode1;
16942 tree type;
16943 HOST_WIDE_INT mask = d->mask;
16944
16945 if ((mask & builtin_mask) != mask)
16946 {
16947 if (TARGET_DEBUG_BUILTIN)
16948 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
16949 d->name);
16950 continue;
16951 }
16952
16953 if (rs6000_overloaded_builtin_p (d->code))
16954 mode1 = VOIDmode;
16955 else
16956 {
16957 /* Cannot define builtin if the instruction is disabled. */
16958 gcc_assert (d->icode != CODE_FOR_nothing);
16959 mode1 = insn_data[d->icode].operand[1].mode;
16960 }
16961
16962 switch (mode1)
16963 {
16964 case E_VOIDmode:
16965 type = int_ftype_int_opaque_opaque;
16966 break;
16967 case E_V2DImode:
16968 type = int_ftype_int_v2di_v2di;
16969 break;
16970 case E_V4SImode:
16971 type = int_ftype_int_v4si_v4si;
16972 break;
16973 case E_V8HImode:
16974 type = int_ftype_int_v8hi_v8hi;
16975 break;
16976 case E_V16QImode:
16977 type = int_ftype_int_v16qi_v16qi;
16978 break;
16979 case E_V4SFmode:
16980 type = int_ftype_int_v4sf_v4sf;
16981 break;
16982 case E_V2DFmode:
16983 type = int_ftype_int_v2df_v2df;
16984 break;
16985 default:
16986 gcc_unreachable ();
16987 }
16988
16989 def_builtin (d->name, type, d->code);
16990 }
16991
16992 /* Initialize the abs* operators. */
16993 d = bdesc_abs;
16994 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
16995 {
16996 machine_mode mode0;
16997 tree type;
16998 HOST_WIDE_INT mask = d->mask;
16999
17000 if ((mask & builtin_mask) != mask)
17001 {
17002 if (TARGET_DEBUG_BUILTIN)
17003 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17004 d->name);
17005 continue;
17006 }
17007
17008 /* Cannot define builtin if the instruction is disabled. */
17009 gcc_assert (d->icode != CODE_FOR_nothing);
17010 mode0 = insn_data[d->icode].operand[0].mode;
17011
17012 switch (mode0)
17013 {
17014 case E_V2DImode:
17015 type = v2di_ftype_v2di;
17016 break;
17017 case E_V4SImode:
17018 type = v4si_ftype_v4si;
17019 break;
17020 case E_V8HImode:
17021 type = v8hi_ftype_v8hi;
17022 break;
17023 case E_V16QImode:
17024 type = v16qi_ftype_v16qi;
17025 break;
17026 case E_V4SFmode:
17027 type = v4sf_ftype_v4sf;
17028 break;
17029 case E_V2DFmode:
17030 type = v2df_ftype_v2df;
17031 break;
17032 default:
17033 gcc_unreachable ();
17034 }
17035
17036 def_builtin (d->name, type, d->code);
17037 }
17038
17039 /* Initialize target builtin that implements
17040 targetm.vectorize.builtin_mask_for_load. */
17041
17042 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17043 v16qi_ftype_long_pcvoid,
17044 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17045 BUILT_IN_MD, NULL, NULL_TREE);
17046 TREE_READONLY (decl) = 1;
17047 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17048 altivec_builtin_mask_for_load = decl;
17049
17050 /* Access to the vec_init patterns. */
17051 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17052 integer_type_node, integer_type_node,
17053 integer_type_node, NULL_TREE);
17054 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17055
17056 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17057 short_integer_type_node,
17058 short_integer_type_node,
17059 short_integer_type_node,
17060 short_integer_type_node,
17061 short_integer_type_node,
17062 short_integer_type_node,
17063 short_integer_type_node, NULL_TREE);
17064 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17065
17066 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17067 char_type_node, char_type_node,
17068 char_type_node, char_type_node,
17069 char_type_node, char_type_node,
17070 char_type_node, char_type_node,
17071 char_type_node, char_type_node,
17072 char_type_node, char_type_node,
17073 char_type_node, char_type_node,
17074 char_type_node, NULL_TREE);
17075 def_builtin ("__builtin_vec_init_v16qi", ftype,
17076 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17077
17078 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17079 float_type_node, float_type_node,
17080 float_type_node, NULL_TREE);
17081 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17082
17083 /* VSX builtins. */
17084 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17085 double_type_node, NULL_TREE);
17086 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17087
17088 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17089 intDI_type_node, NULL_TREE);
17090 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17091
17092 /* Access to the vec_set patterns. */
17093 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17094 intSI_type_node,
17095 integer_type_node, NULL_TREE);
17096 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17097
17098 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17099 intHI_type_node,
17100 integer_type_node, NULL_TREE);
17101 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17102
17103 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17104 intQI_type_node,
17105 integer_type_node, NULL_TREE);
17106 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17107
17108 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17109 float_type_node,
17110 integer_type_node, NULL_TREE);
17111 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17112
17113 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17114 double_type_node,
17115 integer_type_node, NULL_TREE);
17116 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17117
17118 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17119 intDI_type_node,
17120 integer_type_node, NULL_TREE);
17121 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17122
17123 /* Access to the vec_extract patterns. */
17124 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17125 integer_type_node, NULL_TREE);
17126 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17127
17128 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17129 integer_type_node, NULL_TREE);
17130 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17131
17132 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17133 integer_type_node, NULL_TREE);
17134 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17135
17136 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17137 integer_type_node, NULL_TREE);
17138 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17139
17140 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17141 integer_type_node, NULL_TREE);
17142 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17143
17144 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17145 integer_type_node, NULL_TREE);
17146 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17147
17148
17149 if (V1TI_type_node)
17150 {
17151 tree v1ti_ftype_long_pcvoid
17152 = build_function_type_list (V1TI_type_node,
17153 long_integer_type_node, pcvoid_type_node,
17154 NULL_TREE);
17155 tree void_ftype_v1ti_long_pvoid
17156 = build_function_type_list (void_type_node,
17157 V1TI_type_node, long_integer_type_node,
17158 pvoid_type_node, NULL_TREE);
17159 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17160 VSX_BUILTIN_LD_ELEMREV_V1TI);
17161 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17162 VSX_BUILTIN_LXVD2X_V1TI);
17163 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17164 VSX_BUILTIN_STXVD2X_V1TI);
17165 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17166 NULL_TREE, NULL_TREE);
17167 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17168 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17169 intTI_type_node,
17170 integer_type_node, NULL_TREE);
17171 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17172 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17173 integer_type_node, NULL_TREE);
17174 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17175 }
17176
17177 }
17178
17179 static void
17180 htm_init_builtins (void)
17181 {
17182 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17183 const struct builtin_description *d;
17184 size_t i;
17185
17186 d = bdesc_htm;
17187 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17188 {
17189 tree op[MAX_HTM_OPERANDS], type;
17190 HOST_WIDE_INT mask = d->mask;
17191 unsigned attr = rs6000_builtin_info[d->code].attr;
17192 bool void_func = (attr & RS6000_BTC_VOID);
17193 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17194 int nopnds = 0;
17195 tree gpr_type_node;
17196 tree rettype;
17197 tree argtype;
17198
17199 /* It is expected that these htm built-in functions may have
17200 d->icode equal to CODE_FOR_nothing. */
17201
17202 if (TARGET_32BIT && TARGET_POWERPC64)
17203 gpr_type_node = long_long_unsigned_type_node;
17204 else
17205 gpr_type_node = long_unsigned_type_node;
17206
17207 if (attr & RS6000_BTC_SPR)
17208 {
17209 rettype = gpr_type_node;
17210 argtype = gpr_type_node;
17211 }
17212 else if (d->code == HTM_BUILTIN_TABORTDC
17213 || d->code == HTM_BUILTIN_TABORTDCI)
17214 {
17215 rettype = unsigned_type_node;
17216 argtype = gpr_type_node;
17217 }
17218 else
17219 {
17220 rettype = unsigned_type_node;
17221 argtype = unsigned_type_node;
17222 }
17223
17224 if ((mask & builtin_mask) != mask)
17225 {
17226 if (TARGET_DEBUG_BUILTIN)
17227 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17228 continue;
17229 }
17230
17231 if (d->name == 0)
17232 {
17233 if (TARGET_DEBUG_BUILTIN)
17234 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17235 (long unsigned) i);
17236 continue;
17237 }
17238
17239 op[nopnds++] = (void_func) ? void_type_node : rettype;
17240
17241 if (attr_args == RS6000_BTC_UNARY)
17242 op[nopnds++] = argtype;
17243 else if (attr_args == RS6000_BTC_BINARY)
17244 {
17245 op[nopnds++] = argtype;
17246 op[nopnds++] = argtype;
17247 }
17248 else if (attr_args == RS6000_BTC_TERNARY)
17249 {
17250 op[nopnds++] = argtype;
17251 op[nopnds++] = argtype;
17252 op[nopnds++] = argtype;
17253 }
17254
17255 switch (nopnds)
17256 {
17257 case 1:
17258 type = build_function_type_list (op[0], NULL_TREE);
17259 break;
17260 case 2:
17261 type = build_function_type_list (op[0], op[1], NULL_TREE);
17262 break;
17263 case 3:
17264 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17265 break;
17266 case 4:
17267 type = build_function_type_list (op[0], op[1], op[2], op[3],
17268 NULL_TREE);
17269 break;
17270 default:
17271 gcc_unreachable ();
17272 }
17273
17274 def_builtin (d->name, type, d->code);
17275 }
17276 }
17277
17278 /* Hash function for builtin functions with up to 3 arguments and a return
17279 type. */
17280 hashval_t
17281 builtin_hasher::hash (builtin_hash_struct *bh)
17282 {
17283 unsigned ret = 0;
17284 int i;
17285
17286 for (i = 0; i < 4; i++)
17287 {
17288 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17289 ret = (ret * 2) + bh->uns_p[i];
17290 }
17291
17292 return ret;
17293 }
17294
17295 /* Compare builtin hash entries H1 and H2 for equivalence. */
17296 bool
17297 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17298 {
17299 return ((p1->mode[0] == p2->mode[0])
17300 && (p1->mode[1] == p2->mode[1])
17301 && (p1->mode[2] == p2->mode[2])
17302 && (p1->mode[3] == p2->mode[3])
17303 && (p1->uns_p[0] == p2->uns_p[0])
17304 && (p1->uns_p[1] == p2->uns_p[1])
17305 && (p1->uns_p[2] == p2->uns_p[2])
17306 && (p1->uns_p[3] == p2->uns_p[3]));
17307 }
17308
17309 /* Map types for builtin functions with an explicit return type and up to 3
17310 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17311 of the argument. */
17312 static tree
17313 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17314 machine_mode mode_arg1, machine_mode mode_arg2,
17315 enum rs6000_builtins builtin, const char *name)
17316 {
17317 struct builtin_hash_struct h;
17318 struct builtin_hash_struct *h2;
17319 int num_args = 3;
17320 int i;
17321 tree ret_type = NULL_TREE;
17322 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17323
17324 /* Create builtin_hash_table. */
17325 if (builtin_hash_table == NULL)
17326 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17327
17328 h.type = NULL_TREE;
17329 h.mode[0] = mode_ret;
17330 h.mode[1] = mode_arg0;
17331 h.mode[2] = mode_arg1;
17332 h.mode[3] = mode_arg2;
17333 h.uns_p[0] = 0;
17334 h.uns_p[1] = 0;
17335 h.uns_p[2] = 0;
17336 h.uns_p[3] = 0;
17337
17338 /* If the builtin is a type that produces unsigned results or takes unsigned
17339 arguments, and it is returned as a decl for the vectorizer (such as
17340 widening multiplies, permute), make sure the arguments and return value
17341 are type correct. */
17342 switch (builtin)
17343 {
17344 /* unsigned 1 argument functions. */
17345 case CRYPTO_BUILTIN_VSBOX:
17346 case CRYPTO_BUILTIN_VSBOX_BE:
17347 case P8V_BUILTIN_VGBBD:
17348 case MISC_BUILTIN_CDTBCD:
17349 case MISC_BUILTIN_CBCDTD:
17350 h.uns_p[0] = 1;
17351 h.uns_p[1] = 1;
17352 break;
17353
17354 /* unsigned 2 argument functions. */
17355 case ALTIVEC_BUILTIN_VMULEUB:
17356 case ALTIVEC_BUILTIN_VMULEUH:
17357 case P8V_BUILTIN_VMULEUW:
17358 case ALTIVEC_BUILTIN_VMULOUB:
17359 case ALTIVEC_BUILTIN_VMULOUH:
17360 case P8V_BUILTIN_VMULOUW:
17361 case CRYPTO_BUILTIN_VCIPHER:
17362 case CRYPTO_BUILTIN_VCIPHER_BE:
17363 case CRYPTO_BUILTIN_VCIPHERLAST:
17364 case CRYPTO_BUILTIN_VCIPHERLAST_BE:
17365 case CRYPTO_BUILTIN_VNCIPHER:
17366 case CRYPTO_BUILTIN_VNCIPHER_BE:
17367 case CRYPTO_BUILTIN_VNCIPHERLAST:
17368 case CRYPTO_BUILTIN_VNCIPHERLAST_BE:
17369 case CRYPTO_BUILTIN_VPMSUMB:
17370 case CRYPTO_BUILTIN_VPMSUMH:
17371 case CRYPTO_BUILTIN_VPMSUMW:
17372 case CRYPTO_BUILTIN_VPMSUMD:
17373 case CRYPTO_BUILTIN_VPMSUM:
17374 case MISC_BUILTIN_ADDG6S:
17375 case MISC_BUILTIN_DIVWEU:
17376 case MISC_BUILTIN_DIVDEU:
17377 case VSX_BUILTIN_UDIV_V2DI:
17378 case ALTIVEC_BUILTIN_VMAXUB:
17379 case ALTIVEC_BUILTIN_VMINUB:
17380 case ALTIVEC_BUILTIN_VMAXUH:
17381 case ALTIVEC_BUILTIN_VMINUH:
17382 case ALTIVEC_BUILTIN_VMAXUW:
17383 case ALTIVEC_BUILTIN_VMINUW:
17384 case P8V_BUILTIN_VMAXUD:
17385 case P8V_BUILTIN_VMINUD:
17386 h.uns_p[0] = 1;
17387 h.uns_p[1] = 1;
17388 h.uns_p[2] = 1;
17389 break;
17390
17391 /* unsigned 3 argument functions. */
17392 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17393 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17394 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17395 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17396 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17397 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17398 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17399 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17400 case VSX_BUILTIN_VPERM_16QI_UNS:
17401 case VSX_BUILTIN_VPERM_8HI_UNS:
17402 case VSX_BUILTIN_VPERM_4SI_UNS:
17403 case VSX_BUILTIN_VPERM_2DI_UNS:
17404 case VSX_BUILTIN_XXSEL_16QI_UNS:
17405 case VSX_BUILTIN_XXSEL_8HI_UNS:
17406 case VSX_BUILTIN_XXSEL_4SI_UNS:
17407 case VSX_BUILTIN_XXSEL_2DI_UNS:
17408 case CRYPTO_BUILTIN_VPERMXOR:
17409 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17410 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17411 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17412 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17413 case CRYPTO_BUILTIN_VSHASIGMAW:
17414 case CRYPTO_BUILTIN_VSHASIGMAD:
17415 case CRYPTO_BUILTIN_VSHASIGMA:
17416 h.uns_p[0] = 1;
17417 h.uns_p[1] = 1;
17418 h.uns_p[2] = 1;
17419 h.uns_p[3] = 1;
17420 break;
17421
17422 /* signed permute functions with unsigned char mask. */
17423 case ALTIVEC_BUILTIN_VPERM_16QI:
17424 case ALTIVEC_BUILTIN_VPERM_8HI:
17425 case ALTIVEC_BUILTIN_VPERM_4SI:
17426 case ALTIVEC_BUILTIN_VPERM_4SF:
17427 case ALTIVEC_BUILTIN_VPERM_2DI:
17428 case ALTIVEC_BUILTIN_VPERM_2DF:
17429 case VSX_BUILTIN_VPERM_16QI:
17430 case VSX_BUILTIN_VPERM_8HI:
17431 case VSX_BUILTIN_VPERM_4SI:
17432 case VSX_BUILTIN_VPERM_4SF:
17433 case VSX_BUILTIN_VPERM_2DI:
17434 case VSX_BUILTIN_VPERM_2DF:
17435 h.uns_p[3] = 1;
17436 break;
17437
17438 /* unsigned args, signed return. */
17439 case VSX_BUILTIN_XVCVUXDSP:
17440 case VSX_BUILTIN_XVCVUXDDP_UNS:
17441 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17442 h.uns_p[1] = 1;
17443 break;
17444
17445 /* signed args, unsigned return. */
17446 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17447 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17448 case MISC_BUILTIN_UNPACK_TD:
17449 case MISC_BUILTIN_UNPACK_V1TI:
17450 h.uns_p[0] = 1;
17451 break;
17452
17453 /* unsigned arguments, bool return (compares). */
17454 case ALTIVEC_BUILTIN_VCMPEQUB:
17455 case ALTIVEC_BUILTIN_VCMPEQUH:
17456 case ALTIVEC_BUILTIN_VCMPEQUW:
17457 case P8V_BUILTIN_VCMPEQUD:
17458 case VSX_BUILTIN_CMPGE_U16QI:
17459 case VSX_BUILTIN_CMPGE_U8HI:
17460 case VSX_BUILTIN_CMPGE_U4SI:
17461 case VSX_BUILTIN_CMPGE_U2DI:
17462 case ALTIVEC_BUILTIN_VCMPGTUB:
17463 case ALTIVEC_BUILTIN_VCMPGTUH:
17464 case ALTIVEC_BUILTIN_VCMPGTUW:
17465 case P8V_BUILTIN_VCMPGTUD:
17466 h.uns_p[1] = 1;
17467 h.uns_p[2] = 1;
17468 break;
17469
17470 /* unsigned arguments for 128-bit pack instructions. */
17471 case MISC_BUILTIN_PACK_TD:
17472 case MISC_BUILTIN_PACK_V1TI:
17473 h.uns_p[1] = 1;
17474 h.uns_p[2] = 1;
17475 break;
17476
17477 /* unsigned second arguments (vector shift right). */
17478 case ALTIVEC_BUILTIN_VSRB:
17479 case ALTIVEC_BUILTIN_VSRH:
17480 case ALTIVEC_BUILTIN_VSRW:
17481 case P8V_BUILTIN_VSRD:
17482 h.uns_p[2] = 1;
17483 break;
17484
17485 default:
17486 break;
17487 }
17488
17489 /* Figure out how many args are present. */
17490 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17491 num_args--;
17492
17493 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17494 if (!ret_type && h.uns_p[0])
17495 ret_type = builtin_mode_to_type[h.mode[0]][0];
17496
17497 if (!ret_type)
17498 fatal_error (input_location,
17499 "internal error: builtin function %qs had an unexpected "
17500 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17501
17502 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17503 arg_type[i] = NULL_TREE;
17504
17505 for (i = 0; i < num_args; i++)
17506 {
17507 int m = (int) h.mode[i+1];
17508 int uns_p = h.uns_p[i+1];
17509
17510 arg_type[i] = builtin_mode_to_type[m][uns_p];
17511 if (!arg_type[i] && uns_p)
17512 arg_type[i] = builtin_mode_to_type[m][0];
17513
17514 if (!arg_type[i])
17515 fatal_error (input_location,
17516 "internal error: builtin function %qs, argument %d "
17517 "had unexpected argument type %qs", name, i,
17518 GET_MODE_NAME (m));
17519 }
17520
17521 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17522 if (*found == NULL)
17523 {
17524 h2 = ggc_alloc<builtin_hash_struct> ();
17525 *h2 = h;
17526 *found = h2;
17527
17528 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17529 arg_type[2], NULL_TREE);
17530 }
17531
17532 return (*found)->type;
17533 }
17534
17535 static void
17536 rs6000_common_init_builtins (void)
17537 {
17538 const struct builtin_description *d;
17539 size_t i;
17540
17541 tree opaque_ftype_opaque = NULL_TREE;
17542 tree opaque_ftype_opaque_opaque = NULL_TREE;
17543 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17544 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17545
17546 /* Create Altivec and VSX builtins on machines with at least the
17547 general purpose extensions (970 and newer) to allow the use of
17548 the target attribute. */
17549
17550 if (TARGET_EXTRA_BUILTINS)
17551 builtin_mask |= RS6000_BTM_COMMON;
17552
17553 /* Add the ternary operators. */
17554 d = bdesc_3arg;
17555 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17556 {
17557 tree type;
17558 HOST_WIDE_INT mask = d->mask;
17559
17560 if ((mask & builtin_mask) != mask)
17561 {
17562 if (TARGET_DEBUG_BUILTIN)
17563 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17564 continue;
17565 }
17566
17567 if (rs6000_overloaded_builtin_p (d->code))
17568 {
17569 if (! (type = opaque_ftype_opaque_opaque_opaque))
17570 type = opaque_ftype_opaque_opaque_opaque
17571 = build_function_type_list (opaque_V4SI_type_node,
17572 opaque_V4SI_type_node,
17573 opaque_V4SI_type_node,
17574 opaque_V4SI_type_node,
17575 NULL_TREE);
17576 }
17577 else
17578 {
17579 enum insn_code icode = d->icode;
17580 if (d->name == 0)
17581 {
17582 if (TARGET_DEBUG_BUILTIN)
17583 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17584 (long unsigned)i);
17585
17586 continue;
17587 }
17588
17589 if (icode == CODE_FOR_nothing)
17590 {
17591 if (TARGET_DEBUG_BUILTIN)
17592 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17593 d->name);
17594
17595 continue;
17596 }
17597
17598 type = builtin_function_type (insn_data[icode].operand[0].mode,
17599 insn_data[icode].operand[1].mode,
17600 insn_data[icode].operand[2].mode,
17601 insn_data[icode].operand[3].mode,
17602 d->code, d->name);
17603 }
17604
17605 def_builtin (d->name, type, d->code);
17606 }
17607
17608 /* Add the binary operators. */
17609 d = bdesc_2arg;
17610 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17611 {
17612 machine_mode mode0, mode1, mode2;
17613 tree type;
17614 HOST_WIDE_INT mask = d->mask;
17615
17616 if ((mask & builtin_mask) != mask)
17617 {
17618 if (TARGET_DEBUG_BUILTIN)
17619 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17620 continue;
17621 }
17622
17623 if (rs6000_overloaded_builtin_p (d->code))
17624 {
17625 if (! (type = opaque_ftype_opaque_opaque))
17626 type = opaque_ftype_opaque_opaque
17627 = build_function_type_list (opaque_V4SI_type_node,
17628 opaque_V4SI_type_node,
17629 opaque_V4SI_type_node,
17630 NULL_TREE);
17631 }
17632 else
17633 {
17634 enum insn_code icode = d->icode;
17635 if (d->name == 0)
17636 {
17637 if (TARGET_DEBUG_BUILTIN)
17638 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17639 (long unsigned)i);
17640
17641 continue;
17642 }
17643
17644 if (icode == CODE_FOR_nothing)
17645 {
17646 if (TARGET_DEBUG_BUILTIN)
17647 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17648 d->name);
17649
17650 continue;
17651 }
17652
17653 mode0 = insn_data[icode].operand[0].mode;
17654 mode1 = insn_data[icode].operand[1].mode;
17655 mode2 = insn_data[icode].operand[2].mode;
17656
17657 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17658 d->code, d->name);
17659 }
17660
17661 def_builtin (d->name, type, d->code);
17662 }
17663
17664 /* Add the simple unary operators. */
17665 d = bdesc_1arg;
17666 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17667 {
17668 machine_mode mode0, mode1;
17669 tree type;
17670 HOST_WIDE_INT mask = d->mask;
17671
17672 if ((mask & builtin_mask) != mask)
17673 {
17674 if (TARGET_DEBUG_BUILTIN)
17675 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17676 continue;
17677 }
17678
17679 if (rs6000_overloaded_builtin_p (d->code))
17680 {
17681 if (! (type = opaque_ftype_opaque))
17682 type = opaque_ftype_opaque
17683 = build_function_type_list (opaque_V4SI_type_node,
17684 opaque_V4SI_type_node,
17685 NULL_TREE);
17686 }
17687 else
17688 {
17689 enum insn_code icode = d->icode;
17690 if (d->name == 0)
17691 {
17692 if (TARGET_DEBUG_BUILTIN)
17693 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17694 (long unsigned)i);
17695
17696 continue;
17697 }
17698
17699 if (icode == CODE_FOR_nothing)
17700 {
17701 if (TARGET_DEBUG_BUILTIN)
17702 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17703 d->name);
17704
17705 continue;
17706 }
17707
17708 mode0 = insn_data[icode].operand[0].mode;
17709 mode1 = insn_data[icode].operand[1].mode;
17710
17711 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17712 d->code, d->name);
17713 }
17714
17715 def_builtin (d->name, type, d->code);
17716 }
17717
17718 /* Add the simple no-argument operators. */
17719 d = bdesc_0arg;
17720 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17721 {
17722 machine_mode mode0;
17723 tree type;
17724 HOST_WIDE_INT mask = d->mask;
17725
17726 if ((mask & builtin_mask) != mask)
17727 {
17728 if (TARGET_DEBUG_BUILTIN)
17729 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17730 continue;
17731 }
17732 if (rs6000_overloaded_builtin_p (d->code))
17733 {
17734 if (!opaque_ftype_opaque)
17735 opaque_ftype_opaque
17736 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17737 type = opaque_ftype_opaque;
17738 }
17739 else
17740 {
17741 enum insn_code icode = d->icode;
17742 if (d->name == 0)
17743 {
17744 if (TARGET_DEBUG_BUILTIN)
17745 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17746 (long unsigned) i);
17747 continue;
17748 }
17749 if (icode == CODE_FOR_nothing)
17750 {
17751 if (TARGET_DEBUG_BUILTIN)
17752 fprintf (stderr,
17753 "rs6000_builtin, skip no-argument %s (no code)\n",
17754 d->name);
17755 continue;
17756 }
17757 mode0 = insn_data[icode].operand[0].mode;
17758 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17759 d->code, d->name);
17760 }
17761 def_builtin (d->name, type, d->code);
17762 }
17763 }
17764
17765 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17766 static void
17767 init_float128_ibm (machine_mode mode)
17768 {
17769 if (!TARGET_XL_COMPAT)
17770 {
17771 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17772 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17773 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17774 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17775
17776 if (!TARGET_HARD_FLOAT)
17777 {
17778 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17779 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17780 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17781 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17782 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17783 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17784 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17785 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17786
17787 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17788 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17789 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17790 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17791 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17792 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17793 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17794 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17795 }
17796 }
17797 else
17798 {
17799 set_optab_libfunc (add_optab, mode, "_xlqadd");
17800 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17801 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17802 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17803 }
17804
17805 /* Add various conversions for IFmode to use the traditional TFmode
17806 names. */
17807 if (mode == IFmode)
17808 {
17809 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
17810 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
17811 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
17812 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
17813 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
17814 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
17815
17816 if (TARGET_POWERPC64)
17817 {
17818 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17819 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17820 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17821 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17822 }
17823 }
17824 }
17825
17826 /* Create a decl for either complex long double multiply or complex long double
17827 divide when long double is IEEE 128-bit floating point. We can't use
17828 __multc3 and __divtc3 because the original long double using IBM extended
17829 double used those names. The complex multiply/divide functions are encoded
17830 as builtin functions with a complex result and 4 scalar inputs. */
17831
17832 static void
17833 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
17834 {
17835 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
17836 name, NULL_TREE);
17837
17838 set_builtin_decl (fncode, fndecl, true);
17839
17840 if (TARGET_DEBUG_BUILTIN)
17841 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
17842
17843 return;
17844 }
17845
17846 /* Set up IEEE 128-bit floating point routines. Use different names if the
17847 arguments can be passed in a vector register. The historical PowerPC
17848 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
17849 continue to use that if we aren't using vector registers to pass IEEE
17850 128-bit floating point. */
17851
17852 static void
17853 init_float128_ieee (machine_mode mode)
17854 {
17855 if (FLOAT128_VECTOR_P (mode))
17856 {
17857 static bool complex_muldiv_init_p = false;
17858
17859 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
17860 we have clone or target attributes, this will be called a second
17861 time. We want to create the built-in function only once. */
17862 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
17863 {
17864 complex_muldiv_init_p = true;
17865 built_in_function fncode_mul =
17866 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
17867 - MIN_MODE_COMPLEX_FLOAT);
17868 built_in_function fncode_div =
17869 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
17870 - MIN_MODE_COMPLEX_FLOAT);
17871
17872 tree fntype = build_function_type_list (complex_long_double_type_node,
17873 long_double_type_node,
17874 long_double_type_node,
17875 long_double_type_node,
17876 long_double_type_node,
17877 NULL_TREE);
17878
17879 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
17880 create_complex_muldiv ("__divkc3", fncode_div, fntype);
17881 }
17882
17883 set_optab_libfunc (add_optab, mode, "__addkf3");
17884 set_optab_libfunc (sub_optab, mode, "__subkf3");
17885 set_optab_libfunc (neg_optab, mode, "__negkf2");
17886 set_optab_libfunc (smul_optab, mode, "__mulkf3");
17887 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
17888 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
17889 set_optab_libfunc (abs_optab, mode, "__abskf2");
17890 set_optab_libfunc (powi_optab, mode, "__powikf2");
17891
17892 set_optab_libfunc (eq_optab, mode, "__eqkf2");
17893 set_optab_libfunc (ne_optab, mode, "__nekf2");
17894 set_optab_libfunc (gt_optab, mode, "__gtkf2");
17895 set_optab_libfunc (ge_optab, mode, "__gekf2");
17896 set_optab_libfunc (lt_optab, mode, "__ltkf2");
17897 set_optab_libfunc (le_optab, mode, "__lekf2");
17898 set_optab_libfunc (unord_optab, mode, "__unordkf2");
17899
17900 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
17901 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
17902 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
17903 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
17904
17905 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
17906 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17907 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
17908
17909 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
17910 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17911 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
17912
17913 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
17914 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
17915 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
17916 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
17917 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
17918 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
17919
17920 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
17921 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
17922 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
17923 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
17924
17925 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
17926 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
17927 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
17928 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
17929
17930 if (TARGET_POWERPC64)
17931 {
17932 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
17933 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
17934 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
17935 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
17936 }
17937 }
17938
17939 else
17940 {
17941 set_optab_libfunc (add_optab, mode, "_q_add");
17942 set_optab_libfunc (sub_optab, mode, "_q_sub");
17943 set_optab_libfunc (neg_optab, mode, "_q_neg");
17944 set_optab_libfunc (smul_optab, mode, "_q_mul");
17945 set_optab_libfunc (sdiv_optab, mode, "_q_div");
17946 if (TARGET_PPC_GPOPT)
17947 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
17948
17949 set_optab_libfunc (eq_optab, mode, "_q_feq");
17950 set_optab_libfunc (ne_optab, mode, "_q_fne");
17951 set_optab_libfunc (gt_optab, mode, "_q_fgt");
17952 set_optab_libfunc (ge_optab, mode, "_q_fge");
17953 set_optab_libfunc (lt_optab, mode, "_q_flt");
17954 set_optab_libfunc (le_optab, mode, "_q_fle");
17955
17956 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
17957 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
17958 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
17959 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
17960 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
17961 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
17962 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
17963 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
17964 }
17965 }
17966
17967 static void
17968 rs6000_init_libfuncs (void)
17969 {
17970 /* __float128 support. */
17971 if (TARGET_FLOAT128_TYPE)
17972 {
17973 init_float128_ibm (IFmode);
17974 init_float128_ieee (KFmode);
17975 }
17976
17977 /* AIX/Darwin/64-bit Linux quad floating point routines. */
17978 if (TARGET_LONG_DOUBLE_128)
17979 {
17980 if (!TARGET_IEEEQUAD)
17981 init_float128_ibm (TFmode);
17982
17983 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
17984 else
17985 init_float128_ieee (TFmode);
17986 }
17987 }
17988
17989 /* Emit a potentially record-form instruction, setting DST from SRC.
17990 If DOT is 0, that is all; otherwise, set CCREG to the result of the
17991 signed comparison of DST with zero. If DOT is 1, the generated RTL
17992 doesn't care about the DST result; if DOT is 2, it does. If CCREG
17993 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
17994 a separate COMPARE. */
17995
17996 void
17997 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
17998 {
17999 if (dot == 0)
18000 {
18001 emit_move_insn (dst, src);
18002 return;
18003 }
18004
18005 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18006 {
18007 emit_move_insn (dst, src);
18008 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18009 return;
18010 }
18011
18012 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18013 if (dot == 1)
18014 {
18015 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18016 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18017 }
18018 else
18019 {
18020 rtx set = gen_rtx_SET (dst, src);
18021 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18022 }
18023 }
18024
18025 \f
18026 /* A validation routine: say whether CODE, a condition code, and MODE
18027 match. The other alternatives either don't make sense or should
18028 never be generated. */
18029
18030 void
18031 validate_condition_mode (enum rtx_code code, machine_mode mode)
18032 {
18033 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18034 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18035 && GET_MODE_CLASS (mode) == MODE_CC);
18036
18037 /* These don't make sense. */
18038 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18039 || mode != CCUNSmode);
18040
18041 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18042 || mode == CCUNSmode);
18043
18044 gcc_assert (mode == CCFPmode
18045 || (code != ORDERED && code != UNORDERED
18046 && code != UNEQ && code != LTGT
18047 && code != UNGT && code != UNLT
18048 && code != UNGE && code != UNLE));
18049
18050 /* These should never be generated except for
18051 flag_finite_math_only. */
18052 gcc_assert (mode != CCFPmode
18053 || flag_finite_math_only
18054 || (code != LE && code != GE
18055 && code != UNEQ && code != LTGT
18056 && code != UNGT && code != UNLT));
18057
18058 /* These are invalid; the information is not there. */
18059 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18060 }
18061
18062 \f
18063 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18064 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18065 not zero, store there the bit offset (counted from the right) where
18066 the single stretch of 1 bits begins; and similarly for B, the bit
18067 offset where it ends. */
18068
18069 bool
18070 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18071 {
18072 unsigned HOST_WIDE_INT val = INTVAL (mask);
18073 unsigned HOST_WIDE_INT bit;
18074 int nb, ne;
18075 int n = GET_MODE_PRECISION (mode);
18076
18077 if (mode != DImode && mode != SImode)
18078 return false;
18079
18080 if (INTVAL (mask) >= 0)
18081 {
18082 bit = val & -val;
18083 ne = exact_log2 (bit);
18084 nb = exact_log2 (val + bit);
18085 }
18086 else if (val + 1 == 0)
18087 {
18088 nb = n;
18089 ne = 0;
18090 }
18091 else if (val & 1)
18092 {
18093 val = ~val;
18094 bit = val & -val;
18095 nb = exact_log2 (bit);
18096 ne = exact_log2 (val + bit);
18097 }
18098 else
18099 {
18100 bit = val & -val;
18101 ne = exact_log2 (bit);
18102 if (val + bit == 0)
18103 nb = n;
18104 else
18105 nb = 0;
18106 }
18107
18108 nb--;
18109
18110 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18111 return false;
18112
18113 if (b)
18114 *b = nb;
18115 if (e)
18116 *e = ne;
18117
18118 return true;
18119 }
18120
18121 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18122 or rldicr instruction, to implement an AND with it in mode MODE. */
18123
18124 bool
18125 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18126 {
18127 int nb, ne;
18128
18129 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18130 return false;
18131
18132 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18133 does not wrap. */
18134 if (mode == DImode)
18135 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18136
18137 /* For SImode, rlwinm can do everything. */
18138 if (mode == SImode)
18139 return (nb < 32 && ne < 32);
18140
18141 return false;
18142 }
18143
18144 /* Return the instruction template for an AND with mask in mode MODE, with
18145 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18146
18147 const char *
18148 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18149 {
18150 int nb, ne;
18151
18152 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18153 gcc_unreachable ();
18154
18155 if (mode == DImode && ne == 0)
18156 {
18157 operands[3] = GEN_INT (63 - nb);
18158 if (dot)
18159 return "rldicl. %0,%1,0,%3";
18160 return "rldicl %0,%1,0,%3";
18161 }
18162
18163 if (mode == DImode && nb == 63)
18164 {
18165 operands[3] = GEN_INT (63 - ne);
18166 if (dot)
18167 return "rldicr. %0,%1,0,%3";
18168 return "rldicr %0,%1,0,%3";
18169 }
18170
18171 if (nb < 32 && ne < 32)
18172 {
18173 operands[3] = GEN_INT (31 - nb);
18174 operands[4] = GEN_INT (31 - ne);
18175 if (dot)
18176 return "rlwinm. %0,%1,0,%3,%4";
18177 return "rlwinm %0,%1,0,%3,%4";
18178 }
18179
18180 gcc_unreachable ();
18181 }
18182
18183 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18184 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18185 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18186
18187 bool
18188 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18189 {
18190 int nb, ne;
18191
18192 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18193 return false;
18194
18195 int n = GET_MODE_PRECISION (mode);
18196 int sh = -1;
18197
18198 if (CONST_INT_P (XEXP (shift, 1)))
18199 {
18200 sh = INTVAL (XEXP (shift, 1));
18201 if (sh < 0 || sh >= n)
18202 return false;
18203 }
18204
18205 rtx_code code = GET_CODE (shift);
18206
18207 /* Convert any shift by 0 to a rotate, to simplify below code. */
18208 if (sh == 0)
18209 code = ROTATE;
18210
18211 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18212 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18213 code = ASHIFT;
18214 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18215 {
18216 code = LSHIFTRT;
18217 sh = n - sh;
18218 }
18219
18220 /* DImode rotates need rld*. */
18221 if (mode == DImode && code == ROTATE)
18222 return (nb == 63 || ne == 0 || ne == sh);
18223
18224 /* SImode rotates need rlw*. */
18225 if (mode == SImode && code == ROTATE)
18226 return (nb < 32 && ne < 32 && sh < 32);
18227
18228 /* Wrap-around masks are only okay for rotates. */
18229 if (ne > nb)
18230 return false;
18231
18232 /* Variable shifts are only okay for rotates. */
18233 if (sh < 0)
18234 return false;
18235
18236 /* Don't allow ASHIFT if the mask is wrong for that. */
18237 if (code == ASHIFT && ne < sh)
18238 return false;
18239
18240 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18241 if the mask is wrong for that. */
18242 if (nb < 32 && ne < 32 && sh < 32
18243 && !(code == LSHIFTRT && nb >= 32 - sh))
18244 return true;
18245
18246 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18247 if the mask is wrong for that. */
18248 if (code == LSHIFTRT)
18249 sh = 64 - sh;
18250 if (nb == 63 || ne == 0 || ne == sh)
18251 return !(code == LSHIFTRT && nb >= sh);
18252
18253 return false;
18254 }
18255
18256 /* Return the instruction template for a shift with mask in mode MODE, with
18257 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18258
18259 const char *
18260 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18261 {
18262 int nb, ne;
18263
18264 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18265 gcc_unreachable ();
18266
18267 if (mode == DImode && ne == 0)
18268 {
18269 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18270 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18271 operands[3] = GEN_INT (63 - nb);
18272 if (dot)
18273 return "rld%I2cl. %0,%1,%2,%3";
18274 return "rld%I2cl %0,%1,%2,%3";
18275 }
18276
18277 if (mode == DImode && nb == 63)
18278 {
18279 operands[3] = GEN_INT (63 - ne);
18280 if (dot)
18281 return "rld%I2cr. %0,%1,%2,%3";
18282 return "rld%I2cr %0,%1,%2,%3";
18283 }
18284
18285 if (mode == DImode
18286 && GET_CODE (operands[4]) != LSHIFTRT
18287 && CONST_INT_P (operands[2])
18288 && ne == INTVAL (operands[2]))
18289 {
18290 operands[3] = GEN_INT (63 - nb);
18291 if (dot)
18292 return "rld%I2c. %0,%1,%2,%3";
18293 return "rld%I2c %0,%1,%2,%3";
18294 }
18295
18296 if (nb < 32 && ne < 32)
18297 {
18298 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18299 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18300 operands[3] = GEN_INT (31 - nb);
18301 operands[4] = GEN_INT (31 - ne);
18302 /* This insn can also be a 64-bit rotate with mask that really makes
18303 it just a shift right (with mask); the %h below are to adjust for
18304 that situation (shift count is >= 32 in that case). */
18305 if (dot)
18306 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18307 return "rlw%I2nm %0,%1,%h2,%3,%4";
18308 }
18309
18310 gcc_unreachable ();
18311 }
18312
18313 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18314 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18315 ASHIFT, or LSHIFTRT) in mode MODE. */
18316
18317 bool
18318 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18319 {
18320 int nb, ne;
18321
18322 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18323 return false;
18324
18325 int n = GET_MODE_PRECISION (mode);
18326
18327 int sh = INTVAL (XEXP (shift, 1));
18328 if (sh < 0 || sh >= n)
18329 return false;
18330
18331 rtx_code code = GET_CODE (shift);
18332
18333 /* Convert any shift by 0 to a rotate, to simplify below code. */
18334 if (sh == 0)
18335 code = ROTATE;
18336
18337 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18338 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18339 code = ASHIFT;
18340 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18341 {
18342 code = LSHIFTRT;
18343 sh = n - sh;
18344 }
18345
18346 /* DImode rotates need rldimi. */
18347 if (mode == DImode && code == ROTATE)
18348 return (ne == sh);
18349
18350 /* SImode rotates need rlwimi. */
18351 if (mode == SImode && code == ROTATE)
18352 return (nb < 32 && ne < 32 && sh < 32);
18353
18354 /* Wrap-around masks are only okay for rotates. */
18355 if (ne > nb)
18356 return false;
18357
18358 /* Don't allow ASHIFT if the mask is wrong for that. */
18359 if (code == ASHIFT && ne < sh)
18360 return false;
18361
18362 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18363 if the mask is wrong for that. */
18364 if (nb < 32 && ne < 32 && sh < 32
18365 && !(code == LSHIFTRT && nb >= 32 - sh))
18366 return true;
18367
18368 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18369 if the mask is wrong for that. */
18370 if (code == LSHIFTRT)
18371 sh = 64 - sh;
18372 if (ne == sh)
18373 return !(code == LSHIFTRT && nb >= sh);
18374
18375 return false;
18376 }
18377
18378 /* Return the instruction template for an insert with mask in mode MODE, with
18379 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18380
18381 const char *
18382 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18383 {
18384 int nb, ne;
18385
18386 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18387 gcc_unreachable ();
18388
18389 /* Prefer rldimi because rlwimi is cracked. */
18390 if (TARGET_POWERPC64
18391 && (!dot || mode == DImode)
18392 && GET_CODE (operands[4]) != LSHIFTRT
18393 && ne == INTVAL (operands[2]))
18394 {
18395 operands[3] = GEN_INT (63 - nb);
18396 if (dot)
18397 return "rldimi. %0,%1,%2,%3";
18398 return "rldimi %0,%1,%2,%3";
18399 }
18400
18401 if (nb < 32 && ne < 32)
18402 {
18403 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18404 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18405 operands[3] = GEN_INT (31 - nb);
18406 operands[4] = GEN_INT (31 - ne);
18407 if (dot)
18408 return "rlwimi. %0,%1,%2,%3,%4";
18409 return "rlwimi %0,%1,%2,%3,%4";
18410 }
18411
18412 gcc_unreachable ();
18413 }
18414
18415 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18416 using two machine instructions. */
18417
18418 bool
18419 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18420 {
18421 /* There are two kinds of AND we can handle with two insns:
18422 1) those we can do with two rl* insn;
18423 2) ori[s];xori[s].
18424
18425 We do not handle that last case yet. */
18426
18427 /* If there is just one stretch of ones, we can do it. */
18428 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18429 return true;
18430
18431 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18432 one insn, we can do the whole thing with two. */
18433 unsigned HOST_WIDE_INT val = INTVAL (c);
18434 unsigned HOST_WIDE_INT bit1 = val & -val;
18435 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18436 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18437 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18438 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18439 }
18440
18441 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18442 If EXPAND is true, split rotate-and-mask instructions we generate to
18443 their constituent parts as well (this is used during expand); if DOT
18444 is 1, make the last insn a record-form instruction clobbering the
18445 destination GPR and setting the CC reg (from operands[3]); if 2, set
18446 that GPR as well as the CC reg. */
18447
18448 void
18449 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18450 {
18451 gcc_assert (!(expand && dot));
18452
18453 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18454
18455 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18456 shift right. This generates better code than doing the masks without
18457 shifts, or shifting first right and then left. */
18458 int nb, ne;
18459 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18460 {
18461 gcc_assert (mode == DImode);
18462
18463 int shift = 63 - nb;
18464 if (expand)
18465 {
18466 rtx tmp1 = gen_reg_rtx (DImode);
18467 rtx tmp2 = gen_reg_rtx (DImode);
18468 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18469 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18470 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18471 }
18472 else
18473 {
18474 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18475 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18476 emit_move_insn (operands[0], tmp);
18477 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18478 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18479 }
18480 return;
18481 }
18482
18483 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18484 that does the rest. */
18485 unsigned HOST_WIDE_INT bit1 = val & -val;
18486 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18487 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18488 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18489
18490 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18491 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18492
18493 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18494
18495 /* Two "no-rotate"-and-mask instructions, for SImode. */
18496 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18497 {
18498 gcc_assert (mode == SImode);
18499
18500 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18501 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18502 emit_move_insn (reg, tmp);
18503 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18504 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18505 return;
18506 }
18507
18508 gcc_assert (mode == DImode);
18509
18510 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18511 insns; we have to do the first in SImode, because it wraps. */
18512 if (mask2 <= 0xffffffff
18513 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18514 {
18515 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18516 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18517 GEN_INT (mask1));
18518 rtx reg_low = gen_lowpart (SImode, reg);
18519 emit_move_insn (reg_low, tmp);
18520 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18521 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18522 return;
18523 }
18524
18525 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18526 at the top end), rotate back and clear the other hole. */
18527 int right = exact_log2 (bit3);
18528 int left = 64 - right;
18529
18530 /* Rotate the mask too. */
18531 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18532
18533 if (expand)
18534 {
18535 rtx tmp1 = gen_reg_rtx (DImode);
18536 rtx tmp2 = gen_reg_rtx (DImode);
18537 rtx tmp3 = gen_reg_rtx (DImode);
18538 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18539 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18540 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18541 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18542 }
18543 else
18544 {
18545 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18546 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18547 emit_move_insn (operands[0], tmp);
18548 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18549 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18550 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18551 }
18552 }
18553 \f
18554 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18555 for lfq and stfq insns iff the registers are hard registers. */
18556
18557 int
18558 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18559 {
18560 /* We might have been passed a SUBREG. */
18561 if (!REG_P (reg1) || !REG_P (reg2))
18562 return 0;
18563
18564 /* We might have been passed non floating point registers. */
18565 if (!FP_REGNO_P (REGNO (reg1))
18566 || !FP_REGNO_P (REGNO (reg2)))
18567 return 0;
18568
18569 return (REGNO (reg1) == REGNO (reg2) - 1);
18570 }
18571
18572 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18573 addr1 and addr2 must be in consecutive memory locations
18574 (addr2 == addr1 + 8). */
18575
18576 int
18577 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18578 {
18579 rtx addr1, addr2;
18580 unsigned int reg1, reg2;
18581 int offset1, offset2;
18582
18583 /* The mems cannot be volatile. */
18584 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18585 return 0;
18586
18587 addr1 = XEXP (mem1, 0);
18588 addr2 = XEXP (mem2, 0);
18589
18590 /* Extract an offset (if used) from the first addr. */
18591 if (GET_CODE (addr1) == PLUS)
18592 {
18593 /* If not a REG, return zero. */
18594 if (!REG_P (XEXP (addr1, 0)))
18595 return 0;
18596 else
18597 {
18598 reg1 = REGNO (XEXP (addr1, 0));
18599 /* The offset must be constant! */
18600 if (!CONST_INT_P (XEXP (addr1, 1)))
18601 return 0;
18602 offset1 = INTVAL (XEXP (addr1, 1));
18603 }
18604 }
18605 else if (!REG_P (addr1))
18606 return 0;
18607 else
18608 {
18609 reg1 = REGNO (addr1);
18610 /* This was a simple (mem (reg)) expression. Offset is 0. */
18611 offset1 = 0;
18612 }
18613
18614 /* And now for the second addr. */
18615 if (GET_CODE (addr2) == PLUS)
18616 {
18617 /* If not a REG, return zero. */
18618 if (!REG_P (XEXP (addr2, 0)))
18619 return 0;
18620 else
18621 {
18622 reg2 = REGNO (XEXP (addr2, 0));
18623 /* The offset must be constant. */
18624 if (!CONST_INT_P (XEXP (addr2, 1)))
18625 return 0;
18626 offset2 = INTVAL (XEXP (addr2, 1));
18627 }
18628 }
18629 else if (!REG_P (addr2))
18630 return 0;
18631 else
18632 {
18633 reg2 = REGNO (addr2);
18634 /* This was a simple (mem (reg)) expression. Offset is 0. */
18635 offset2 = 0;
18636 }
18637
18638 /* Both of these must have the same base register. */
18639 if (reg1 != reg2)
18640 return 0;
18641
18642 /* The offset for the second addr must be 8 more than the first addr. */
18643 if (offset2 != offset1 + 8)
18644 return 0;
18645
18646 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18647 instructions. */
18648 return 1;
18649 }
18650 \f
18651 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18652 need to use DDmode, in all other cases we can use the same mode. */
18653 static machine_mode
18654 rs6000_secondary_memory_needed_mode (machine_mode mode)
18655 {
18656 if (lra_in_progress && mode == SDmode)
18657 return DDmode;
18658 return mode;
18659 }
18660
18661 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18662 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18663 only work on the traditional altivec registers, note if an altivec register
18664 was chosen. */
18665
18666 static enum rs6000_reg_type
18667 register_to_reg_type (rtx reg, bool *is_altivec)
18668 {
18669 HOST_WIDE_INT regno;
18670 enum reg_class rclass;
18671
18672 if (SUBREG_P (reg))
18673 reg = SUBREG_REG (reg);
18674
18675 if (!REG_P (reg))
18676 return NO_REG_TYPE;
18677
18678 regno = REGNO (reg);
18679 if (!HARD_REGISTER_NUM_P (regno))
18680 {
18681 if (!lra_in_progress && !reload_completed)
18682 return PSEUDO_REG_TYPE;
18683
18684 regno = true_regnum (reg);
18685 if (regno < 0 || !HARD_REGISTER_NUM_P (regno))
18686 return PSEUDO_REG_TYPE;
18687 }
18688
18689 gcc_assert (regno >= 0);
18690
18691 if (is_altivec && ALTIVEC_REGNO_P (regno))
18692 *is_altivec = true;
18693
18694 rclass = rs6000_regno_regclass[regno];
18695 return reg_class_to_reg_type[(int)rclass];
18696 }
18697
18698 /* Helper function to return the cost of adding a TOC entry address. */
18699
18700 static inline int
18701 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18702 {
18703 int ret;
18704
18705 if (TARGET_CMODEL != CMODEL_SMALL)
18706 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18707
18708 else
18709 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18710
18711 return ret;
18712 }
18713
18714 /* Helper function for rs6000_secondary_reload to determine whether the memory
18715 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18716 needs reloading. Return negative if the memory is not handled by the memory
18717 helper functions and to try a different reload method, 0 if no additional
18718 instructions are need, and positive to give the extra cost for the
18719 memory. */
18720
18721 static int
18722 rs6000_secondary_reload_memory (rtx addr,
18723 enum reg_class rclass,
18724 machine_mode mode)
18725 {
18726 int extra_cost = 0;
18727 rtx reg, and_arg, plus_arg0, plus_arg1;
18728 addr_mask_type addr_mask;
18729 const char *type = NULL;
18730 const char *fail_msg = NULL;
18731
18732 if (GPR_REG_CLASS_P (rclass))
18733 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18734
18735 else if (rclass == FLOAT_REGS)
18736 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18737
18738 else if (rclass == ALTIVEC_REGS)
18739 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18740
18741 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18742 else if (rclass == VSX_REGS)
18743 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18744 & ~RELOAD_REG_AND_M16);
18745
18746 /* If the register allocator hasn't made up its mind yet on the register
18747 class to use, settle on defaults to use. */
18748 else if (rclass == NO_REGS)
18749 {
18750 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18751 & ~RELOAD_REG_AND_M16);
18752
18753 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18754 addr_mask &= ~(RELOAD_REG_INDEXED
18755 | RELOAD_REG_PRE_INCDEC
18756 | RELOAD_REG_PRE_MODIFY);
18757 }
18758
18759 else
18760 addr_mask = 0;
18761
18762 /* If the register isn't valid in this register class, just return now. */
18763 if ((addr_mask & RELOAD_REG_VALID) == 0)
18764 {
18765 if (TARGET_DEBUG_ADDR)
18766 {
18767 fprintf (stderr,
18768 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18769 "not valid in class\n",
18770 GET_MODE_NAME (mode), reg_class_names[rclass]);
18771 debug_rtx (addr);
18772 }
18773
18774 return -1;
18775 }
18776
18777 switch (GET_CODE (addr))
18778 {
18779 /* Does the register class supports auto update forms for this mode? We
18780 don't need a scratch register, since the powerpc only supports
18781 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18782 case PRE_INC:
18783 case PRE_DEC:
18784 reg = XEXP (addr, 0);
18785 if (!base_reg_operand (addr, GET_MODE (reg)))
18786 {
18787 fail_msg = "no base register #1";
18788 extra_cost = -1;
18789 }
18790
18791 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
18792 {
18793 extra_cost = 1;
18794 type = "update";
18795 }
18796 break;
18797
18798 case PRE_MODIFY:
18799 reg = XEXP (addr, 0);
18800 plus_arg1 = XEXP (addr, 1);
18801 if (!base_reg_operand (reg, GET_MODE (reg))
18802 || GET_CODE (plus_arg1) != PLUS
18803 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
18804 {
18805 fail_msg = "bad PRE_MODIFY";
18806 extra_cost = -1;
18807 }
18808
18809 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
18810 {
18811 extra_cost = 1;
18812 type = "update";
18813 }
18814 break;
18815
18816 /* Do we need to simulate AND -16 to clear the bottom address bits used
18817 in VMX load/stores? Only allow the AND for vector sizes. */
18818 case AND:
18819 and_arg = XEXP (addr, 0);
18820 if (GET_MODE_SIZE (mode) != 16
18821 || !CONST_INT_P (XEXP (addr, 1))
18822 || INTVAL (XEXP (addr, 1)) != -16)
18823 {
18824 fail_msg = "bad Altivec AND #1";
18825 extra_cost = -1;
18826 }
18827
18828 if (rclass != ALTIVEC_REGS)
18829 {
18830 if (legitimate_indirect_address_p (and_arg, false))
18831 extra_cost = 1;
18832
18833 else if (legitimate_indexed_address_p (and_arg, false))
18834 extra_cost = 2;
18835
18836 else
18837 {
18838 fail_msg = "bad Altivec AND #2";
18839 extra_cost = -1;
18840 }
18841
18842 type = "and";
18843 }
18844 break;
18845
18846 /* If this is an indirect address, make sure it is a base register. */
18847 case REG:
18848 case SUBREG:
18849 if (!legitimate_indirect_address_p (addr, false))
18850 {
18851 extra_cost = 1;
18852 type = "move";
18853 }
18854 break;
18855
18856 /* If this is an indexed address, make sure the register class can handle
18857 indexed addresses for this mode. */
18858 case PLUS:
18859 plus_arg0 = XEXP (addr, 0);
18860 plus_arg1 = XEXP (addr, 1);
18861
18862 /* (plus (plus (reg) (constant)) (constant)) is generated during
18863 push_reload processing, so handle it now. */
18864 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
18865 {
18866 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18867 {
18868 extra_cost = 1;
18869 type = "offset";
18870 }
18871 }
18872
18873 /* (plus (plus (reg) (constant)) (reg)) is also generated during
18874 push_reload processing, so handle it now. */
18875 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
18876 {
18877 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
18878 {
18879 extra_cost = 1;
18880 type = "indexed #2";
18881 }
18882 }
18883
18884 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
18885 {
18886 fail_msg = "no base register #2";
18887 extra_cost = -1;
18888 }
18889
18890 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
18891 {
18892 if ((addr_mask & RELOAD_REG_INDEXED) == 0
18893 || !legitimate_indexed_address_p (addr, false))
18894 {
18895 extra_cost = 1;
18896 type = "indexed";
18897 }
18898 }
18899
18900 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
18901 && CONST_INT_P (plus_arg1))
18902 {
18903 if (!quad_address_offset_p (INTVAL (plus_arg1)))
18904 {
18905 extra_cost = 1;
18906 type = "vector d-form offset";
18907 }
18908 }
18909
18910 /* Make sure the register class can handle offset addresses. */
18911 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
18912 {
18913 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18914 {
18915 extra_cost = 1;
18916 type = "offset #2";
18917 }
18918 }
18919
18920 else
18921 {
18922 fail_msg = "bad PLUS";
18923 extra_cost = -1;
18924 }
18925
18926 break;
18927
18928 case LO_SUM:
18929 /* Quad offsets are restricted and can't handle normal addresses. */
18930 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18931 {
18932 extra_cost = -1;
18933 type = "vector d-form lo_sum";
18934 }
18935
18936 else if (!legitimate_lo_sum_address_p (mode, addr, false))
18937 {
18938 fail_msg = "bad LO_SUM";
18939 extra_cost = -1;
18940 }
18941
18942 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18943 {
18944 extra_cost = 1;
18945 type = "lo_sum";
18946 }
18947 break;
18948
18949 /* Static addresses need to create a TOC entry. */
18950 case CONST:
18951 case SYMBOL_REF:
18952 case LABEL_REF:
18953 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18954 {
18955 extra_cost = -1;
18956 type = "vector d-form lo_sum #2";
18957 }
18958
18959 else
18960 {
18961 type = "address";
18962 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
18963 }
18964 break;
18965
18966 /* TOC references look like offsetable memory. */
18967 case UNSPEC:
18968 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
18969 {
18970 fail_msg = "bad UNSPEC";
18971 extra_cost = -1;
18972 }
18973
18974 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18975 {
18976 extra_cost = -1;
18977 type = "vector d-form lo_sum #3";
18978 }
18979
18980 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18981 {
18982 extra_cost = 1;
18983 type = "toc reference";
18984 }
18985 break;
18986
18987 default:
18988 {
18989 fail_msg = "bad address";
18990 extra_cost = -1;
18991 }
18992 }
18993
18994 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
18995 {
18996 if (extra_cost < 0)
18997 fprintf (stderr,
18998 "rs6000_secondary_reload_memory error: mode = %s, "
18999 "class = %s, addr_mask = '%s', %s\n",
19000 GET_MODE_NAME (mode),
19001 reg_class_names[rclass],
19002 rs6000_debug_addr_mask (addr_mask, false),
19003 (fail_msg != NULL) ? fail_msg : "<bad address>");
19004
19005 else
19006 fprintf (stderr,
19007 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19008 "addr_mask = '%s', extra cost = %d, %s\n",
19009 GET_MODE_NAME (mode),
19010 reg_class_names[rclass],
19011 rs6000_debug_addr_mask (addr_mask, false),
19012 extra_cost,
19013 (type) ? type : "<none>");
19014
19015 debug_rtx (addr);
19016 }
19017
19018 return extra_cost;
19019 }
19020
19021 /* Helper function for rs6000_secondary_reload to return true if a move to a
19022 different register classe is really a simple move. */
19023
19024 static bool
19025 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19026 enum rs6000_reg_type from_type,
19027 machine_mode mode)
19028 {
19029 int size = GET_MODE_SIZE (mode);
19030
19031 /* Add support for various direct moves available. In this function, we only
19032 look at cases where we don't need any extra registers, and one or more
19033 simple move insns are issued. Originally small integers are not allowed
19034 in FPR/VSX registers. Single precision binary floating is not a simple
19035 move because we need to convert to the single precision memory layout.
19036 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19037 need special direct move handling, which we do not support yet. */
19038 if (TARGET_DIRECT_MOVE
19039 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19040 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19041 {
19042 if (TARGET_POWERPC64)
19043 {
19044 /* ISA 2.07: MTVSRD or MVFVSRD. */
19045 if (size == 8)
19046 return true;
19047
19048 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19049 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19050 return true;
19051 }
19052
19053 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19054 if (TARGET_P8_VECTOR)
19055 {
19056 if (mode == SImode)
19057 return true;
19058
19059 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19060 return true;
19061 }
19062
19063 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19064 if (mode == SDmode)
19065 return true;
19066 }
19067
19068 /* Power6+: MFTGPR or MFFGPR. */
19069 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19070 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19071 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19072 return true;
19073
19074 /* Move to/from SPR. */
19075 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19076 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19077 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19078 return true;
19079
19080 return false;
19081 }
19082
19083 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19084 special direct moves that involve allocating an extra register, return the
19085 insn code of the helper function if there is such a function or
19086 CODE_FOR_nothing if not. */
19087
19088 static bool
19089 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19090 enum rs6000_reg_type from_type,
19091 machine_mode mode,
19092 secondary_reload_info *sri,
19093 bool altivec_p)
19094 {
19095 bool ret = false;
19096 enum insn_code icode = CODE_FOR_nothing;
19097 int cost = 0;
19098 int size = GET_MODE_SIZE (mode);
19099
19100 if (TARGET_POWERPC64 && size == 16)
19101 {
19102 /* Handle moving 128-bit values from GPRs to VSX point registers on
19103 ISA 2.07 (power8, power9) when running in 64-bit mode using
19104 XXPERMDI to glue the two 64-bit values back together. */
19105 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19106 {
19107 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19108 icode = reg_addr[mode].reload_vsx_gpr;
19109 }
19110
19111 /* Handle moving 128-bit values from VSX point registers to GPRs on
19112 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19113 bottom 64-bit value. */
19114 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19115 {
19116 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19117 icode = reg_addr[mode].reload_gpr_vsx;
19118 }
19119 }
19120
19121 else if (TARGET_POWERPC64 && mode == SFmode)
19122 {
19123 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19124 {
19125 cost = 3; /* xscvdpspn, mfvsrd, and. */
19126 icode = reg_addr[mode].reload_gpr_vsx;
19127 }
19128
19129 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19130 {
19131 cost = 2; /* mtvsrz, xscvspdpn. */
19132 icode = reg_addr[mode].reload_vsx_gpr;
19133 }
19134 }
19135
19136 else if (!TARGET_POWERPC64 && size == 8)
19137 {
19138 /* Handle moving 64-bit values from GPRs to floating point registers on
19139 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19140 32-bit values back together. Altivec register classes must be handled
19141 specially since a different instruction is used, and the secondary
19142 reload support requires a single instruction class in the scratch
19143 register constraint. However, right now TFmode is not allowed in
19144 Altivec registers, so the pattern will never match. */
19145 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19146 {
19147 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19148 icode = reg_addr[mode].reload_fpr_gpr;
19149 }
19150 }
19151
19152 if (icode != CODE_FOR_nothing)
19153 {
19154 ret = true;
19155 if (sri)
19156 {
19157 sri->icode = icode;
19158 sri->extra_cost = cost;
19159 }
19160 }
19161
19162 return ret;
19163 }
19164
19165 /* Return whether a move between two register classes can be done either
19166 directly (simple move) or via a pattern that uses a single extra temporary
19167 (using ISA 2.07's direct move in this case. */
19168
19169 static bool
19170 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19171 enum rs6000_reg_type from_type,
19172 machine_mode mode,
19173 secondary_reload_info *sri,
19174 bool altivec_p)
19175 {
19176 /* Fall back to load/store reloads if either type is not a register. */
19177 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19178 return false;
19179
19180 /* If we haven't allocated registers yet, assume the move can be done for the
19181 standard register types. */
19182 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19183 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19184 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19185 return true;
19186
19187 /* Moves to the same set of registers is a simple move for non-specialized
19188 registers. */
19189 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19190 return true;
19191
19192 /* Check whether a simple move can be done directly. */
19193 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19194 {
19195 if (sri)
19196 {
19197 sri->icode = CODE_FOR_nothing;
19198 sri->extra_cost = 0;
19199 }
19200 return true;
19201 }
19202
19203 /* Now check if we can do it in a few steps. */
19204 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19205 altivec_p);
19206 }
19207
19208 /* Inform reload about cases where moving X with a mode MODE to a register in
19209 RCLASS requires an extra scratch or immediate register. Return the class
19210 needed for the immediate register.
19211
19212 For VSX and Altivec, we may need a register to convert sp+offset into
19213 reg+sp.
19214
19215 For misaligned 64-bit gpr loads and stores we need a register to
19216 convert an offset address to indirect. */
19217
19218 static reg_class_t
19219 rs6000_secondary_reload (bool in_p,
19220 rtx x,
19221 reg_class_t rclass_i,
19222 machine_mode mode,
19223 secondary_reload_info *sri)
19224 {
19225 enum reg_class rclass = (enum reg_class) rclass_i;
19226 reg_class_t ret = ALL_REGS;
19227 enum insn_code icode;
19228 bool default_p = false;
19229 bool done_p = false;
19230
19231 /* Allow subreg of memory before/during reload. */
19232 bool memory_p = (MEM_P (x)
19233 || (!reload_completed && SUBREG_P (x)
19234 && MEM_P (SUBREG_REG (x))));
19235
19236 sri->icode = CODE_FOR_nothing;
19237 sri->t_icode = CODE_FOR_nothing;
19238 sri->extra_cost = 0;
19239 icode = ((in_p)
19240 ? reg_addr[mode].reload_load
19241 : reg_addr[mode].reload_store);
19242
19243 if (REG_P (x) || register_operand (x, mode))
19244 {
19245 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19246 bool altivec_p = (rclass == ALTIVEC_REGS);
19247 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19248
19249 if (!in_p)
19250 std::swap (to_type, from_type);
19251
19252 /* Can we do a direct move of some sort? */
19253 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19254 altivec_p))
19255 {
19256 icode = (enum insn_code)sri->icode;
19257 default_p = false;
19258 done_p = true;
19259 ret = NO_REGS;
19260 }
19261 }
19262
19263 /* Make sure 0.0 is not reloaded or forced into memory. */
19264 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19265 {
19266 ret = NO_REGS;
19267 default_p = false;
19268 done_p = true;
19269 }
19270
19271 /* If this is a scalar floating point value and we want to load it into the
19272 traditional Altivec registers, do it via a move via a traditional floating
19273 point register, unless we have D-form addressing. Also make sure that
19274 non-zero constants use a FPR. */
19275 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19276 && !mode_supports_vmx_dform (mode)
19277 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19278 && (memory_p || CONST_DOUBLE_P (x)))
19279 {
19280 ret = FLOAT_REGS;
19281 default_p = false;
19282 done_p = true;
19283 }
19284
19285 /* Handle reload of load/stores if we have reload helper functions. */
19286 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19287 {
19288 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19289 mode);
19290
19291 if (extra_cost >= 0)
19292 {
19293 done_p = true;
19294 ret = NO_REGS;
19295 if (extra_cost > 0)
19296 {
19297 sri->extra_cost = extra_cost;
19298 sri->icode = icode;
19299 }
19300 }
19301 }
19302
19303 /* Handle unaligned loads and stores of integer registers. */
19304 if (!done_p && TARGET_POWERPC64
19305 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19306 && memory_p
19307 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19308 {
19309 rtx addr = XEXP (x, 0);
19310 rtx off = address_offset (addr);
19311
19312 if (off != NULL_RTX)
19313 {
19314 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19315 unsigned HOST_WIDE_INT offset = INTVAL (off);
19316
19317 /* We need a secondary reload when our legitimate_address_p
19318 says the address is good (as otherwise the entire address
19319 will be reloaded), and the offset is not a multiple of
19320 four or we have an address wrap. Address wrap will only
19321 occur for LO_SUMs since legitimate_offset_address_p
19322 rejects addresses for 16-byte mems that will wrap. */
19323 if (GET_CODE (addr) == LO_SUM
19324 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19325 && ((offset & 3) != 0
19326 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19327 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19328 && (offset & 3) != 0))
19329 {
19330 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19331 if (in_p)
19332 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19333 : CODE_FOR_reload_di_load);
19334 else
19335 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19336 : CODE_FOR_reload_di_store);
19337 sri->extra_cost = 2;
19338 ret = NO_REGS;
19339 done_p = true;
19340 }
19341 else
19342 default_p = true;
19343 }
19344 else
19345 default_p = true;
19346 }
19347
19348 if (!done_p && !TARGET_POWERPC64
19349 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19350 && memory_p
19351 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19352 {
19353 rtx addr = XEXP (x, 0);
19354 rtx off = address_offset (addr);
19355
19356 if (off != NULL_RTX)
19357 {
19358 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19359 unsigned HOST_WIDE_INT offset = INTVAL (off);
19360
19361 /* We need a secondary reload when our legitimate_address_p
19362 says the address is good (as otherwise the entire address
19363 will be reloaded), and we have a wrap.
19364
19365 legitimate_lo_sum_address_p allows LO_SUM addresses to
19366 have any offset so test for wrap in the low 16 bits.
19367
19368 legitimate_offset_address_p checks for the range
19369 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19370 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19371 [0x7ff4,0x7fff] respectively, so test for the
19372 intersection of these ranges, [0x7ffc,0x7fff] and
19373 [0x7ff4,0x7ff7] respectively.
19374
19375 Note that the address we see here may have been
19376 manipulated by legitimize_reload_address. */
19377 if (GET_CODE (addr) == LO_SUM
19378 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19379 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19380 {
19381 if (in_p)
19382 sri->icode = CODE_FOR_reload_si_load;
19383 else
19384 sri->icode = CODE_FOR_reload_si_store;
19385 sri->extra_cost = 2;
19386 ret = NO_REGS;
19387 done_p = true;
19388 }
19389 else
19390 default_p = true;
19391 }
19392 else
19393 default_p = true;
19394 }
19395
19396 if (!done_p)
19397 default_p = true;
19398
19399 if (default_p)
19400 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19401
19402 gcc_assert (ret != ALL_REGS);
19403
19404 if (TARGET_DEBUG_ADDR)
19405 {
19406 fprintf (stderr,
19407 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19408 "mode = %s",
19409 reg_class_names[ret],
19410 in_p ? "true" : "false",
19411 reg_class_names[rclass],
19412 GET_MODE_NAME (mode));
19413
19414 if (reload_completed)
19415 fputs (", after reload", stderr);
19416
19417 if (!done_p)
19418 fputs (", done_p not set", stderr);
19419
19420 if (default_p)
19421 fputs (", default secondary reload", stderr);
19422
19423 if (sri->icode != CODE_FOR_nothing)
19424 fprintf (stderr, ", reload func = %s, extra cost = %d",
19425 insn_data[sri->icode].name, sri->extra_cost);
19426
19427 else if (sri->extra_cost > 0)
19428 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19429
19430 fputs ("\n", stderr);
19431 debug_rtx (x);
19432 }
19433
19434 return ret;
19435 }
19436
19437 /* Better tracing for rs6000_secondary_reload_inner. */
19438
19439 static void
19440 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19441 bool store_p)
19442 {
19443 rtx set, clobber;
19444
19445 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19446
19447 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19448 store_p ? "store" : "load");
19449
19450 if (store_p)
19451 set = gen_rtx_SET (mem, reg);
19452 else
19453 set = gen_rtx_SET (reg, mem);
19454
19455 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19456 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19457 }
19458
19459 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19460 ATTRIBUTE_NORETURN;
19461
19462 static void
19463 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19464 bool store_p)
19465 {
19466 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19467 gcc_unreachable ();
19468 }
19469
19470 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19471 reload helper functions. These were identified in
19472 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19473 reload, it calls the insns:
19474 reload_<RELOAD:mode>_<P:mptrsize>_store
19475 reload_<RELOAD:mode>_<P:mptrsize>_load
19476
19477 which in turn calls this function, to do whatever is necessary to create
19478 valid addresses. */
19479
19480 void
19481 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19482 {
19483 int regno = true_regnum (reg);
19484 machine_mode mode = GET_MODE (reg);
19485 addr_mask_type addr_mask;
19486 rtx addr;
19487 rtx new_addr;
19488 rtx op_reg, op0, op1;
19489 rtx and_op;
19490 rtx cc_clobber;
19491 rtvec rv;
19492
19493 if (regno < 0 || !HARD_REGISTER_NUM_P (regno) || !MEM_P (mem)
19494 || !base_reg_operand (scratch, GET_MODE (scratch)))
19495 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19496
19497 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19498 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19499
19500 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19501 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19502
19503 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19504 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19505
19506 else
19507 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19508
19509 /* Make sure the mode is valid in this register class. */
19510 if ((addr_mask & RELOAD_REG_VALID) == 0)
19511 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19512
19513 if (TARGET_DEBUG_ADDR)
19514 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19515
19516 new_addr = addr = XEXP (mem, 0);
19517 switch (GET_CODE (addr))
19518 {
19519 /* Does the register class support auto update forms for this mode? If
19520 not, do the update now. We don't need a scratch register, since the
19521 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19522 case PRE_INC:
19523 case PRE_DEC:
19524 op_reg = XEXP (addr, 0);
19525 if (!base_reg_operand (op_reg, Pmode))
19526 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19527
19528 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19529 {
19530 int delta = GET_MODE_SIZE (mode);
19531 if (GET_CODE (addr) == PRE_DEC)
19532 delta = -delta;
19533 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19534 new_addr = op_reg;
19535 }
19536 break;
19537
19538 case PRE_MODIFY:
19539 op0 = XEXP (addr, 0);
19540 op1 = XEXP (addr, 1);
19541 if (!base_reg_operand (op0, Pmode)
19542 || GET_CODE (op1) != PLUS
19543 || !rtx_equal_p (op0, XEXP (op1, 0)))
19544 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19545
19546 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19547 {
19548 emit_insn (gen_rtx_SET (op0, op1));
19549 new_addr = reg;
19550 }
19551 break;
19552
19553 /* Do we need to simulate AND -16 to clear the bottom address bits used
19554 in VMX load/stores? */
19555 case AND:
19556 op0 = XEXP (addr, 0);
19557 op1 = XEXP (addr, 1);
19558 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19559 {
19560 if (REG_P (op0) || SUBREG_P (op0))
19561 op_reg = op0;
19562
19563 else if (GET_CODE (op1) == PLUS)
19564 {
19565 emit_insn (gen_rtx_SET (scratch, op1));
19566 op_reg = scratch;
19567 }
19568
19569 else
19570 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19571
19572 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19573 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19574 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19575 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19576 new_addr = scratch;
19577 }
19578 break;
19579
19580 /* If this is an indirect address, make sure it is a base register. */
19581 case REG:
19582 case SUBREG:
19583 if (!base_reg_operand (addr, GET_MODE (addr)))
19584 {
19585 emit_insn (gen_rtx_SET (scratch, addr));
19586 new_addr = scratch;
19587 }
19588 break;
19589
19590 /* If this is an indexed address, make sure the register class can handle
19591 indexed addresses for this mode. */
19592 case PLUS:
19593 op0 = XEXP (addr, 0);
19594 op1 = XEXP (addr, 1);
19595 if (!base_reg_operand (op0, Pmode))
19596 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19597
19598 else if (int_reg_operand (op1, Pmode))
19599 {
19600 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19601 {
19602 emit_insn (gen_rtx_SET (scratch, addr));
19603 new_addr = scratch;
19604 }
19605 }
19606
19607 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19608 {
19609 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19610 || !quad_address_p (addr, mode, false))
19611 {
19612 emit_insn (gen_rtx_SET (scratch, addr));
19613 new_addr = scratch;
19614 }
19615 }
19616
19617 /* Make sure the register class can handle offset addresses. */
19618 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19619 {
19620 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19621 {
19622 emit_insn (gen_rtx_SET (scratch, addr));
19623 new_addr = scratch;
19624 }
19625 }
19626
19627 else
19628 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19629
19630 break;
19631
19632 case LO_SUM:
19633 op0 = XEXP (addr, 0);
19634 op1 = XEXP (addr, 1);
19635 if (!base_reg_operand (op0, Pmode))
19636 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19637
19638 else if (int_reg_operand (op1, Pmode))
19639 {
19640 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19641 {
19642 emit_insn (gen_rtx_SET (scratch, addr));
19643 new_addr = scratch;
19644 }
19645 }
19646
19647 /* Quad offsets are restricted and can't handle normal addresses. */
19648 else if (mode_supports_dq_form (mode))
19649 {
19650 emit_insn (gen_rtx_SET (scratch, addr));
19651 new_addr = scratch;
19652 }
19653
19654 /* Make sure the register class can handle offset addresses. */
19655 else if (legitimate_lo_sum_address_p (mode, addr, false))
19656 {
19657 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19658 {
19659 emit_insn (gen_rtx_SET (scratch, addr));
19660 new_addr = scratch;
19661 }
19662 }
19663
19664 else
19665 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19666
19667 break;
19668
19669 case SYMBOL_REF:
19670 case CONST:
19671 case LABEL_REF:
19672 rs6000_emit_move (scratch, addr, Pmode);
19673 new_addr = scratch;
19674 break;
19675
19676 default:
19677 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19678 }
19679
19680 /* Adjust the address if it changed. */
19681 if (addr != new_addr)
19682 {
19683 mem = replace_equiv_address_nv (mem, new_addr);
19684 if (TARGET_DEBUG_ADDR)
19685 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19686 }
19687
19688 /* Now create the move. */
19689 if (store_p)
19690 emit_insn (gen_rtx_SET (mem, reg));
19691 else
19692 emit_insn (gen_rtx_SET (reg, mem));
19693
19694 return;
19695 }
19696
19697 /* Convert reloads involving 64-bit gprs and misaligned offset
19698 addressing, or multiple 32-bit gprs and offsets that are too large,
19699 to use indirect addressing. */
19700
19701 void
19702 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19703 {
19704 int regno = true_regnum (reg);
19705 enum reg_class rclass;
19706 rtx addr;
19707 rtx scratch_or_premodify = scratch;
19708
19709 if (TARGET_DEBUG_ADDR)
19710 {
19711 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19712 store_p ? "store" : "load");
19713 fprintf (stderr, "reg:\n");
19714 debug_rtx (reg);
19715 fprintf (stderr, "mem:\n");
19716 debug_rtx (mem);
19717 fprintf (stderr, "scratch:\n");
19718 debug_rtx (scratch);
19719 }
19720
19721 gcc_assert (regno >= 0 && HARD_REGISTER_NUM_P (regno));
19722 gcc_assert (MEM_P (mem));
19723 rclass = REGNO_REG_CLASS (regno);
19724 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19725 addr = XEXP (mem, 0);
19726
19727 if (GET_CODE (addr) == PRE_MODIFY)
19728 {
19729 gcc_assert (REG_P (XEXP (addr, 0))
19730 && GET_CODE (XEXP (addr, 1)) == PLUS
19731 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19732 scratch_or_premodify = XEXP (addr, 0);
19733 addr = XEXP (addr, 1);
19734 }
19735 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19736
19737 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19738
19739 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19740
19741 /* Now create the move. */
19742 if (store_p)
19743 emit_insn (gen_rtx_SET (mem, reg));
19744 else
19745 emit_insn (gen_rtx_SET (reg, mem));
19746
19747 return;
19748 }
19749
19750 /* Given an rtx X being reloaded into a reg required to be
19751 in class CLASS, return the class of reg to actually use.
19752 In general this is just CLASS; but on some machines
19753 in some cases it is preferable to use a more restrictive class.
19754
19755 On the RS/6000, we have to return NO_REGS when we want to reload a
19756 floating-point CONST_DOUBLE to force it to be copied to memory.
19757
19758 We also don't want to reload integer values into floating-point
19759 registers if we can at all help it. In fact, this can
19760 cause reload to die, if it tries to generate a reload of CTR
19761 into a FP register and discovers it doesn't have the memory location
19762 required.
19763
19764 ??? Would it be a good idea to have reload do the converse, that is
19765 try to reload floating modes into FP registers if possible?
19766 */
19767
19768 static enum reg_class
19769 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
19770 {
19771 machine_mode mode = GET_MODE (x);
19772 bool is_constant = CONSTANT_P (x);
19773
19774 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19775 reload class for it. */
19776 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19777 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
19778 return NO_REGS;
19779
19780 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
19781 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
19782 return NO_REGS;
19783
19784 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
19785 the reloading of address expressions using PLUS into floating point
19786 registers. */
19787 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
19788 {
19789 if (is_constant)
19790 {
19791 /* Zero is always allowed in all VSX registers. */
19792 if (x == CONST0_RTX (mode))
19793 return rclass;
19794
19795 /* If this is a vector constant that can be formed with a few Altivec
19796 instructions, we want altivec registers. */
19797 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
19798 return ALTIVEC_REGS;
19799
19800 /* If this is an integer constant that can easily be loaded into
19801 vector registers, allow it. */
19802 if (CONST_INT_P (x))
19803 {
19804 HOST_WIDE_INT value = INTVAL (x);
19805
19806 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
19807 2.06 can generate it in the Altivec registers with
19808 VSPLTI<x>. */
19809 if (value == -1)
19810 {
19811 if (TARGET_P8_VECTOR)
19812 return rclass;
19813 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19814 return ALTIVEC_REGS;
19815 else
19816 return NO_REGS;
19817 }
19818
19819 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
19820 a sign extend in the Altivec registers. */
19821 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
19822 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
19823 return ALTIVEC_REGS;
19824 }
19825
19826 /* Force constant to memory. */
19827 return NO_REGS;
19828 }
19829
19830 /* D-form addressing can easily reload the value. */
19831 if (mode_supports_vmx_dform (mode)
19832 || mode_supports_dq_form (mode))
19833 return rclass;
19834
19835 /* If this is a scalar floating point value and we don't have D-form
19836 addressing, prefer the traditional floating point registers so that we
19837 can use D-form (register+offset) addressing. */
19838 if (rclass == VSX_REGS
19839 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
19840 return FLOAT_REGS;
19841
19842 /* Prefer the Altivec registers if Altivec is handling the vector
19843 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
19844 loads. */
19845 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
19846 || mode == V1TImode)
19847 return ALTIVEC_REGS;
19848
19849 return rclass;
19850 }
19851
19852 if (is_constant || GET_CODE (x) == PLUS)
19853 {
19854 if (reg_class_subset_p (GENERAL_REGS, rclass))
19855 return GENERAL_REGS;
19856 if (reg_class_subset_p (BASE_REGS, rclass))
19857 return BASE_REGS;
19858 return NO_REGS;
19859 }
19860
19861 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == GEN_OR_FLOAT_REGS)
19862 return GENERAL_REGS;
19863
19864 return rclass;
19865 }
19866
19867 /* Debug version of rs6000_preferred_reload_class. */
19868 static enum reg_class
19869 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
19870 {
19871 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
19872
19873 fprintf (stderr,
19874 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
19875 "mode = %s, x:\n",
19876 reg_class_names[ret], reg_class_names[rclass],
19877 GET_MODE_NAME (GET_MODE (x)));
19878 debug_rtx (x);
19879
19880 return ret;
19881 }
19882
19883 /* If we are copying between FP or AltiVec registers and anything else, we need
19884 a memory location. The exception is when we are targeting ppc64 and the
19885 move to/from fpr to gpr instructions are available. Also, under VSX, you
19886 can copy vector registers from the FP register set to the Altivec register
19887 set and vice versa. */
19888
19889 static bool
19890 rs6000_secondary_memory_needed (machine_mode mode,
19891 reg_class_t from_class,
19892 reg_class_t to_class)
19893 {
19894 enum rs6000_reg_type from_type, to_type;
19895 bool altivec_p = ((from_class == ALTIVEC_REGS)
19896 || (to_class == ALTIVEC_REGS));
19897
19898 /* If a simple/direct move is available, we don't need secondary memory */
19899 from_type = reg_class_to_reg_type[(int)from_class];
19900 to_type = reg_class_to_reg_type[(int)to_class];
19901
19902 if (rs6000_secondary_reload_move (to_type, from_type, mode,
19903 (secondary_reload_info *)0, altivec_p))
19904 return false;
19905
19906 /* If we have a floating point or vector register class, we need to use
19907 memory to transfer the data. */
19908 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
19909 return true;
19910
19911 return false;
19912 }
19913
19914 /* Debug version of rs6000_secondary_memory_needed. */
19915 static bool
19916 rs6000_debug_secondary_memory_needed (machine_mode mode,
19917 reg_class_t from_class,
19918 reg_class_t to_class)
19919 {
19920 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
19921
19922 fprintf (stderr,
19923 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
19924 "to_class = %s, mode = %s\n",
19925 ret ? "true" : "false",
19926 reg_class_names[from_class],
19927 reg_class_names[to_class],
19928 GET_MODE_NAME (mode));
19929
19930 return ret;
19931 }
19932
19933 /* Return the register class of a scratch register needed to copy IN into
19934 or out of a register in RCLASS in MODE. If it can be done directly,
19935 NO_REGS is returned. */
19936
19937 static enum reg_class
19938 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
19939 rtx in)
19940 {
19941 int regno;
19942
19943 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
19944 #if TARGET_MACHO
19945 && MACHOPIC_INDIRECT
19946 #endif
19947 ))
19948 {
19949 /* We cannot copy a symbolic operand directly into anything
19950 other than BASE_REGS for TARGET_ELF. So indicate that a
19951 register from BASE_REGS is needed as an intermediate
19952 register.
19953
19954 On Darwin, pic addresses require a load from memory, which
19955 needs a base register. */
19956 if (rclass != BASE_REGS
19957 && (SYMBOL_REF_P (in)
19958 || GET_CODE (in) == HIGH
19959 || GET_CODE (in) == LABEL_REF
19960 || GET_CODE (in) == CONST))
19961 return BASE_REGS;
19962 }
19963
19964 if (REG_P (in))
19965 {
19966 regno = REGNO (in);
19967 if (!HARD_REGISTER_NUM_P (regno))
19968 {
19969 regno = true_regnum (in);
19970 if (!HARD_REGISTER_NUM_P (regno))
19971 regno = -1;
19972 }
19973 }
19974 else if (SUBREG_P (in))
19975 {
19976 regno = true_regnum (in);
19977 if (!HARD_REGISTER_NUM_P (regno))
19978 regno = -1;
19979 }
19980 else
19981 regno = -1;
19982
19983 /* If we have VSX register moves, prefer moving scalar values between
19984 Altivec registers and GPR by going via an FPR (and then via memory)
19985 instead of reloading the secondary memory address for Altivec moves. */
19986 if (TARGET_VSX
19987 && GET_MODE_SIZE (mode) < 16
19988 && !mode_supports_vmx_dform (mode)
19989 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
19990 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
19991 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19992 && (regno >= 0 && INT_REGNO_P (regno)))))
19993 return FLOAT_REGS;
19994
19995 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
19996 into anything. */
19997 if (rclass == GENERAL_REGS || rclass == BASE_REGS
19998 || (regno >= 0 && INT_REGNO_P (regno)))
19999 return NO_REGS;
20000
20001 /* Constants, memory, and VSX registers can go into VSX registers (both the
20002 traditional floating point and the altivec registers). */
20003 if (rclass == VSX_REGS
20004 && (regno == -1 || VSX_REGNO_P (regno)))
20005 return NO_REGS;
20006
20007 /* Constants, memory, and FP registers can go into FP registers. */
20008 if ((regno == -1 || FP_REGNO_P (regno))
20009 && (rclass == FLOAT_REGS || rclass == GEN_OR_FLOAT_REGS))
20010 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20011
20012 /* Memory, and AltiVec registers can go into AltiVec registers. */
20013 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20014 && rclass == ALTIVEC_REGS)
20015 return NO_REGS;
20016
20017 /* We can copy among the CR registers. */
20018 if ((rclass == CR_REGS || rclass == CR0_REGS)
20019 && regno >= 0 && CR_REGNO_P (regno))
20020 return NO_REGS;
20021
20022 /* Otherwise, we need GENERAL_REGS. */
20023 return GENERAL_REGS;
20024 }
20025
20026 /* Debug version of rs6000_secondary_reload_class. */
20027 static enum reg_class
20028 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20029 machine_mode mode, rtx in)
20030 {
20031 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20032 fprintf (stderr,
20033 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20034 "mode = %s, input rtx:\n",
20035 reg_class_names[ret], reg_class_names[rclass],
20036 GET_MODE_NAME (mode));
20037 debug_rtx (in);
20038
20039 return ret;
20040 }
20041
20042 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20043
20044 static bool
20045 rs6000_can_change_mode_class (machine_mode from,
20046 machine_mode to,
20047 reg_class_t rclass)
20048 {
20049 unsigned from_size = GET_MODE_SIZE (from);
20050 unsigned to_size = GET_MODE_SIZE (to);
20051
20052 if (from_size != to_size)
20053 {
20054 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20055
20056 if (reg_classes_intersect_p (xclass, rclass))
20057 {
20058 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20059 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20060 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20061 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20062
20063 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20064 single register under VSX because the scalar part of the register
20065 is in the upper 64-bits, and not the lower 64-bits. Types like
20066 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20067 IEEE floating point can't overlap, and neither can small
20068 values. */
20069
20070 if (to_float128_vector_p && from_float128_vector_p)
20071 return true;
20072
20073 else if (to_float128_vector_p || from_float128_vector_p)
20074 return false;
20075
20076 /* TDmode in floating-mode registers must always go into a register
20077 pair with the most significant word in the even-numbered register
20078 to match ISA requirements. In little-endian mode, this does not
20079 match subreg numbering, so we cannot allow subregs. */
20080 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20081 return false;
20082
20083 if (from_size < 8 || to_size < 8)
20084 return false;
20085
20086 if (from_size == 8 && (8 * to_nregs) != to_size)
20087 return false;
20088
20089 if (to_size == 8 && (8 * from_nregs) != from_size)
20090 return false;
20091
20092 return true;
20093 }
20094 else
20095 return true;
20096 }
20097
20098 /* Since the VSX register set includes traditional floating point registers
20099 and altivec registers, just check for the size being different instead of
20100 trying to check whether the modes are vector modes. Otherwise it won't
20101 allow say DF and DI to change classes. For types like TFmode and TDmode
20102 that take 2 64-bit registers, rather than a single 128-bit register, don't
20103 allow subregs of those types to other 128 bit types. */
20104 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20105 {
20106 unsigned num_regs = (from_size + 15) / 16;
20107 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20108 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20109 return false;
20110
20111 return (from_size == 8 || from_size == 16);
20112 }
20113
20114 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20115 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20116 return false;
20117
20118 return true;
20119 }
20120
20121 /* Debug version of rs6000_can_change_mode_class. */
20122 static bool
20123 rs6000_debug_can_change_mode_class (machine_mode from,
20124 machine_mode to,
20125 reg_class_t rclass)
20126 {
20127 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20128
20129 fprintf (stderr,
20130 "rs6000_can_change_mode_class, return %s, from = %s, "
20131 "to = %s, rclass = %s\n",
20132 ret ? "true" : "false",
20133 GET_MODE_NAME (from), GET_MODE_NAME (to),
20134 reg_class_names[rclass]);
20135
20136 return ret;
20137 }
20138 \f
20139 /* Return a string to do a move operation of 128 bits of data. */
20140
20141 const char *
20142 rs6000_output_move_128bit (rtx operands[])
20143 {
20144 rtx dest = operands[0];
20145 rtx src = operands[1];
20146 machine_mode mode = GET_MODE (dest);
20147 int dest_regno;
20148 int src_regno;
20149 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20150 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20151
20152 if (REG_P (dest))
20153 {
20154 dest_regno = REGNO (dest);
20155 dest_gpr_p = INT_REGNO_P (dest_regno);
20156 dest_fp_p = FP_REGNO_P (dest_regno);
20157 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20158 dest_vsx_p = dest_fp_p | dest_vmx_p;
20159 }
20160 else
20161 {
20162 dest_regno = -1;
20163 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20164 }
20165
20166 if (REG_P (src))
20167 {
20168 src_regno = REGNO (src);
20169 src_gpr_p = INT_REGNO_P (src_regno);
20170 src_fp_p = FP_REGNO_P (src_regno);
20171 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20172 src_vsx_p = src_fp_p | src_vmx_p;
20173 }
20174 else
20175 {
20176 src_regno = -1;
20177 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20178 }
20179
20180 /* Register moves. */
20181 if (dest_regno >= 0 && src_regno >= 0)
20182 {
20183 if (dest_gpr_p)
20184 {
20185 if (src_gpr_p)
20186 return "#";
20187
20188 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20189 return (WORDS_BIG_ENDIAN
20190 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20191 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20192
20193 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20194 return "#";
20195 }
20196
20197 else if (TARGET_VSX && dest_vsx_p)
20198 {
20199 if (src_vsx_p)
20200 return "xxlor %x0,%x1,%x1";
20201
20202 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20203 return (WORDS_BIG_ENDIAN
20204 ? "mtvsrdd %x0,%1,%L1"
20205 : "mtvsrdd %x0,%L1,%1");
20206
20207 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20208 return "#";
20209 }
20210
20211 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20212 return "vor %0,%1,%1";
20213
20214 else if (dest_fp_p && src_fp_p)
20215 return "#";
20216 }
20217
20218 /* Loads. */
20219 else if (dest_regno >= 0 && MEM_P (src))
20220 {
20221 if (dest_gpr_p)
20222 {
20223 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20224 return "lq %0,%1";
20225 else
20226 return "#";
20227 }
20228
20229 else if (TARGET_ALTIVEC && dest_vmx_p
20230 && altivec_indexed_or_indirect_operand (src, mode))
20231 return "lvx %0,%y1";
20232
20233 else if (TARGET_VSX && dest_vsx_p)
20234 {
20235 if (mode_supports_dq_form (mode)
20236 && quad_address_p (XEXP (src, 0), mode, true))
20237 return "lxv %x0,%1";
20238
20239 else if (TARGET_P9_VECTOR)
20240 return "lxvx %x0,%y1";
20241
20242 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20243 return "lxvw4x %x0,%y1";
20244
20245 else
20246 return "lxvd2x %x0,%y1";
20247 }
20248
20249 else if (TARGET_ALTIVEC && dest_vmx_p)
20250 return "lvx %0,%y1";
20251
20252 else if (dest_fp_p)
20253 return "#";
20254 }
20255
20256 /* Stores. */
20257 else if (src_regno >= 0 && MEM_P (dest))
20258 {
20259 if (src_gpr_p)
20260 {
20261 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20262 return "stq %1,%0";
20263 else
20264 return "#";
20265 }
20266
20267 else if (TARGET_ALTIVEC && src_vmx_p
20268 && altivec_indexed_or_indirect_operand (dest, mode))
20269 return "stvx %1,%y0";
20270
20271 else if (TARGET_VSX && src_vsx_p)
20272 {
20273 if (mode_supports_dq_form (mode)
20274 && quad_address_p (XEXP (dest, 0), mode, true))
20275 return "stxv %x1,%0";
20276
20277 else if (TARGET_P9_VECTOR)
20278 return "stxvx %x1,%y0";
20279
20280 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20281 return "stxvw4x %x1,%y0";
20282
20283 else
20284 return "stxvd2x %x1,%y0";
20285 }
20286
20287 else if (TARGET_ALTIVEC && src_vmx_p)
20288 return "stvx %1,%y0";
20289
20290 else if (src_fp_p)
20291 return "#";
20292 }
20293
20294 /* Constants. */
20295 else if (dest_regno >= 0
20296 && (CONST_INT_P (src)
20297 || CONST_WIDE_INT_P (src)
20298 || CONST_DOUBLE_P (src)
20299 || GET_CODE (src) == CONST_VECTOR))
20300 {
20301 if (dest_gpr_p)
20302 return "#";
20303
20304 else if ((dest_vmx_p && TARGET_ALTIVEC)
20305 || (dest_vsx_p && TARGET_VSX))
20306 return output_vec_const_move (operands);
20307 }
20308
20309 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20310 }
20311
20312 /* Validate a 128-bit move. */
20313 bool
20314 rs6000_move_128bit_ok_p (rtx operands[])
20315 {
20316 machine_mode mode = GET_MODE (operands[0]);
20317 return (gpc_reg_operand (operands[0], mode)
20318 || gpc_reg_operand (operands[1], mode));
20319 }
20320
20321 /* Return true if a 128-bit move needs to be split. */
20322 bool
20323 rs6000_split_128bit_ok_p (rtx operands[])
20324 {
20325 if (!reload_completed)
20326 return false;
20327
20328 if (!gpr_or_gpr_p (operands[0], operands[1]))
20329 return false;
20330
20331 if (quad_load_store_p (operands[0], operands[1]))
20332 return false;
20333
20334 return true;
20335 }
20336
20337 \f
20338 /* Given a comparison operation, return the bit number in CCR to test. We
20339 know this is a valid comparison.
20340
20341 SCC_P is 1 if this is for an scc. That means that %D will have been
20342 used instead of %C, so the bits will be in different places.
20343
20344 Return -1 if OP isn't a valid comparison for some reason. */
20345
20346 int
20347 ccr_bit (rtx op, int scc_p)
20348 {
20349 enum rtx_code code = GET_CODE (op);
20350 machine_mode cc_mode;
20351 int cc_regnum;
20352 int base_bit;
20353 rtx reg;
20354
20355 if (!COMPARISON_P (op))
20356 return -1;
20357
20358 reg = XEXP (op, 0);
20359
20360 if (!REG_P (reg) || !CR_REGNO_P (REGNO (reg)))
20361 return -1;
20362
20363 cc_mode = GET_MODE (reg);
20364 cc_regnum = REGNO (reg);
20365 base_bit = 4 * (cc_regnum - CR0_REGNO);
20366
20367 validate_condition_mode (code, cc_mode);
20368
20369 /* When generating a sCOND operation, only positive conditions are
20370 allowed. */
20371 if (scc_p)
20372 switch (code)
20373 {
20374 case EQ:
20375 case GT:
20376 case LT:
20377 case UNORDERED:
20378 case GTU:
20379 case LTU:
20380 break;
20381 default:
20382 return -1;
20383 }
20384
20385 switch (code)
20386 {
20387 case NE:
20388 return scc_p ? base_bit + 3 : base_bit + 2;
20389 case EQ:
20390 return base_bit + 2;
20391 case GT: case GTU: case UNLE:
20392 return base_bit + 1;
20393 case LT: case LTU: case UNGE:
20394 return base_bit;
20395 case ORDERED: case UNORDERED:
20396 return base_bit + 3;
20397
20398 case GE: case GEU:
20399 /* If scc, we will have done a cror to put the bit in the
20400 unordered position. So test that bit. For integer, this is ! LT
20401 unless this is an scc insn. */
20402 return scc_p ? base_bit + 3 : base_bit;
20403
20404 case LE: case LEU:
20405 return scc_p ? base_bit + 3 : base_bit + 1;
20406
20407 default:
20408 return -1;
20409 }
20410 }
20411 \f
20412 /* Return the GOT register. */
20413
20414 rtx
20415 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20416 {
20417 /* The second flow pass currently (June 1999) can't update
20418 regs_ever_live without disturbing other parts of the compiler, so
20419 update it here to make the prolog/epilogue code happy. */
20420 if (!can_create_pseudo_p ()
20421 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20422 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20423
20424 crtl->uses_pic_offset_table = 1;
20425
20426 return pic_offset_table_rtx;
20427 }
20428 \f
20429 static rs6000_stack_t stack_info;
20430
20431 /* Function to init struct machine_function.
20432 This will be called, via a pointer variable,
20433 from push_function_context. */
20434
20435 static struct machine_function *
20436 rs6000_init_machine_status (void)
20437 {
20438 stack_info.reload_completed = 0;
20439 return ggc_cleared_alloc<machine_function> ();
20440 }
20441 \f
20442 #define INT_P(X) (CONST_INT_P (X) && GET_MODE (X) == VOIDmode)
20443
20444 /* Write out a function code label. */
20445
20446 void
20447 rs6000_output_function_entry (FILE *file, const char *fname)
20448 {
20449 if (fname[0] != '.')
20450 {
20451 switch (DEFAULT_ABI)
20452 {
20453 default:
20454 gcc_unreachable ();
20455
20456 case ABI_AIX:
20457 if (DOT_SYMBOLS)
20458 putc ('.', file);
20459 else
20460 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20461 break;
20462
20463 case ABI_ELFv2:
20464 case ABI_V4:
20465 case ABI_DARWIN:
20466 break;
20467 }
20468 }
20469
20470 RS6000_OUTPUT_BASENAME (file, fname);
20471 }
20472
20473 /* Print an operand. Recognize special options, documented below. */
20474
20475 #if TARGET_ELF
20476 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20477 only introduced by the linker, when applying the sda21
20478 relocation. */
20479 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20480 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20481 #else
20482 #define SMALL_DATA_RELOC "sda21"
20483 #define SMALL_DATA_REG 0
20484 #endif
20485
20486 void
20487 print_operand (FILE *file, rtx x, int code)
20488 {
20489 int i;
20490 unsigned HOST_WIDE_INT uval;
20491
20492 switch (code)
20493 {
20494 /* %a is output_address. */
20495
20496 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20497 output_operand. */
20498
20499 case 'D':
20500 /* Like 'J' but get to the GT bit only. */
20501 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20502 {
20503 output_operand_lossage ("invalid %%D value");
20504 return;
20505 }
20506
20507 /* Bit 1 is GT bit. */
20508 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20509
20510 /* Add one for shift count in rlinm for scc. */
20511 fprintf (file, "%d", i + 1);
20512 return;
20513
20514 case 'e':
20515 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20516 if (! INT_P (x))
20517 {
20518 output_operand_lossage ("invalid %%e value");
20519 return;
20520 }
20521
20522 uval = INTVAL (x);
20523 if ((uval & 0xffff) == 0 && uval != 0)
20524 putc ('s', file);
20525 return;
20526
20527 case 'E':
20528 /* X is a CR register. Print the number of the EQ bit of the CR */
20529 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20530 output_operand_lossage ("invalid %%E value");
20531 else
20532 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20533 return;
20534
20535 case 'f':
20536 /* X is a CR register. Print the shift count needed to move it
20537 to the high-order four bits. */
20538 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20539 output_operand_lossage ("invalid %%f value");
20540 else
20541 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20542 return;
20543
20544 case 'F':
20545 /* Similar, but print the count for the rotate in the opposite
20546 direction. */
20547 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20548 output_operand_lossage ("invalid %%F value");
20549 else
20550 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20551 return;
20552
20553 case 'G':
20554 /* X is a constant integer. If it is negative, print "m",
20555 otherwise print "z". This is to make an aze or ame insn. */
20556 if (!CONST_INT_P (x))
20557 output_operand_lossage ("invalid %%G value");
20558 else if (INTVAL (x) >= 0)
20559 putc ('z', file);
20560 else
20561 putc ('m', file);
20562 return;
20563
20564 case 'h':
20565 /* If constant, output low-order five bits. Otherwise, write
20566 normally. */
20567 if (INT_P (x))
20568 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20569 else
20570 print_operand (file, x, 0);
20571 return;
20572
20573 case 'H':
20574 /* If constant, output low-order six bits. Otherwise, write
20575 normally. */
20576 if (INT_P (x))
20577 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20578 else
20579 print_operand (file, x, 0);
20580 return;
20581
20582 case 'I':
20583 /* Print `i' if this is a constant, else nothing. */
20584 if (INT_P (x))
20585 putc ('i', file);
20586 return;
20587
20588 case 'j':
20589 /* Write the bit number in CCR for jump. */
20590 i = ccr_bit (x, 0);
20591 if (i == -1)
20592 output_operand_lossage ("invalid %%j code");
20593 else
20594 fprintf (file, "%d", i);
20595 return;
20596
20597 case 'J':
20598 /* Similar, but add one for shift count in rlinm for scc and pass
20599 scc flag to `ccr_bit'. */
20600 i = ccr_bit (x, 1);
20601 if (i == -1)
20602 output_operand_lossage ("invalid %%J code");
20603 else
20604 /* If we want bit 31, write a shift count of zero, not 32. */
20605 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20606 return;
20607
20608 case 'k':
20609 /* X must be a constant. Write the 1's complement of the
20610 constant. */
20611 if (! INT_P (x))
20612 output_operand_lossage ("invalid %%k value");
20613 else
20614 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20615 return;
20616
20617 case 'K':
20618 /* X must be a symbolic constant on ELF. Write an
20619 expression suitable for an 'addi' that adds in the low 16
20620 bits of the MEM. */
20621 if (GET_CODE (x) == CONST)
20622 {
20623 if (GET_CODE (XEXP (x, 0)) != PLUS
20624 || (!SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
20625 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20626 || !CONST_INT_P (XEXP (XEXP (x, 0), 1)))
20627 output_operand_lossage ("invalid %%K value");
20628 }
20629 print_operand_address (file, x);
20630 fputs ("@l", file);
20631 return;
20632
20633 /* %l is output_asm_label. */
20634
20635 case 'L':
20636 /* Write second word of DImode or DFmode reference. Works on register
20637 or non-indexed memory only. */
20638 if (REG_P (x))
20639 fputs (reg_names[REGNO (x) + 1], file);
20640 else if (MEM_P (x))
20641 {
20642 machine_mode mode = GET_MODE (x);
20643 /* Handle possible auto-increment. Since it is pre-increment and
20644 we have already done it, we can just use an offset of word. */
20645 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20646 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20647 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20648 UNITS_PER_WORD));
20649 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20650 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20651 UNITS_PER_WORD));
20652 else
20653 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20654 UNITS_PER_WORD),
20655 0));
20656
20657 if (small_data_operand (x, GET_MODE (x)))
20658 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20659 reg_names[SMALL_DATA_REG]);
20660 }
20661 return;
20662
20663 case 'N': /* Unused */
20664 /* Write the number of elements in the vector times 4. */
20665 if (GET_CODE (x) != PARALLEL)
20666 output_operand_lossage ("invalid %%N value");
20667 else
20668 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20669 return;
20670
20671 case 'O': /* Unused */
20672 /* Similar, but subtract 1 first. */
20673 if (GET_CODE (x) != PARALLEL)
20674 output_operand_lossage ("invalid %%O value");
20675 else
20676 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20677 return;
20678
20679 case 'p':
20680 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20681 if (! INT_P (x)
20682 || INTVAL (x) < 0
20683 || (i = exact_log2 (INTVAL (x))) < 0)
20684 output_operand_lossage ("invalid %%p value");
20685 else
20686 fprintf (file, "%d", i);
20687 return;
20688
20689 case 'P':
20690 /* The operand must be an indirect memory reference. The result
20691 is the register name. */
20692 if (!MEM_P (x) || !REG_P (XEXP (x, 0))
20693 || REGNO (XEXP (x, 0)) >= 32)
20694 output_operand_lossage ("invalid %%P value");
20695 else
20696 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20697 return;
20698
20699 case 'q':
20700 /* This outputs the logical code corresponding to a boolean
20701 expression. The expression may have one or both operands
20702 negated (if one, only the first one). For condition register
20703 logical operations, it will also treat the negated
20704 CR codes as NOTs, but not handle NOTs of them. */
20705 {
20706 const char *const *t = 0;
20707 const char *s;
20708 enum rtx_code code = GET_CODE (x);
20709 static const char * const tbl[3][3] = {
20710 { "and", "andc", "nor" },
20711 { "or", "orc", "nand" },
20712 { "xor", "eqv", "xor" } };
20713
20714 if (code == AND)
20715 t = tbl[0];
20716 else if (code == IOR)
20717 t = tbl[1];
20718 else if (code == XOR)
20719 t = tbl[2];
20720 else
20721 output_operand_lossage ("invalid %%q value");
20722
20723 if (GET_CODE (XEXP (x, 0)) != NOT)
20724 s = t[0];
20725 else
20726 {
20727 if (GET_CODE (XEXP (x, 1)) == NOT)
20728 s = t[2];
20729 else
20730 s = t[1];
20731 }
20732
20733 fputs (s, file);
20734 }
20735 return;
20736
20737 case 'Q':
20738 if (! TARGET_MFCRF)
20739 return;
20740 fputc (',', file);
20741 /* FALLTHRU */
20742
20743 case 'R':
20744 /* X is a CR register. Print the mask for `mtcrf'. */
20745 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20746 output_operand_lossage ("invalid %%R value");
20747 else
20748 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20749 return;
20750
20751 case 's':
20752 /* Low 5 bits of 32 - value */
20753 if (! INT_P (x))
20754 output_operand_lossage ("invalid %%s value");
20755 else
20756 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
20757 return;
20758
20759 case 't':
20760 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20761 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20762 {
20763 output_operand_lossage ("invalid %%t value");
20764 return;
20765 }
20766
20767 /* Bit 3 is OV bit. */
20768 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
20769
20770 /* If we want bit 31, write a shift count of zero, not 32. */
20771 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20772 return;
20773
20774 case 'T':
20775 /* Print the symbolic name of a branch target register. */
20776 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20777 x = XVECEXP (x, 0, 0);
20778 if (!REG_P (x) || (REGNO (x) != LR_REGNO
20779 && REGNO (x) != CTR_REGNO))
20780 output_operand_lossage ("invalid %%T value");
20781 else if (REGNO (x) == LR_REGNO)
20782 fputs ("lr", file);
20783 else
20784 fputs ("ctr", file);
20785 return;
20786
20787 case 'u':
20788 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20789 for use in unsigned operand. */
20790 if (! INT_P (x))
20791 {
20792 output_operand_lossage ("invalid %%u value");
20793 return;
20794 }
20795
20796 uval = INTVAL (x);
20797 if ((uval & 0xffff) == 0)
20798 uval >>= 16;
20799
20800 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
20801 return;
20802
20803 case 'v':
20804 /* High-order 16 bits of constant for use in signed operand. */
20805 if (! INT_P (x))
20806 output_operand_lossage ("invalid %%v value");
20807 else
20808 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
20809 (INTVAL (x) >> 16) & 0xffff);
20810 return;
20811
20812 case 'U':
20813 /* Print `u' if this has an auto-increment or auto-decrement. */
20814 if (MEM_P (x)
20815 && (GET_CODE (XEXP (x, 0)) == PRE_INC
20816 || GET_CODE (XEXP (x, 0)) == PRE_DEC
20817 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
20818 putc ('u', file);
20819 return;
20820
20821 case 'V':
20822 /* Print the trap code for this operand. */
20823 switch (GET_CODE (x))
20824 {
20825 case EQ:
20826 fputs ("eq", file); /* 4 */
20827 break;
20828 case NE:
20829 fputs ("ne", file); /* 24 */
20830 break;
20831 case LT:
20832 fputs ("lt", file); /* 16 */
20833 break;
20834 case LE:
20835 fputs ("le", file); /* 20 */
20836 break;
20837 case GT:
20838 fputs ("gt", file); /* 8 */
20839 break;
20840 case GE:
20841 fputs ("ge", file); /* 12 */
20842 break;
20843 case LTU:
20844 fputs ("llt", file); /* 2 */
20845 break;
20846 case LEU:
20847 fputs ("lle", file); /* 6 */
20848 break;
20849 case GTU:
20850 fputs ("lgt", file); /* 1 */
20851 break;
20852 case GEU:
20853 fputs ("lge", file); /* 5 */
20854 break;
20855 default:
20856 output_operand_lossage ("invalid %%V value");
20857 }
20858 break;
20859
20860 case 'w':
20861 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
20862 normally. */
20863 if (INT_P (x))
20864 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
20865 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
20866 else
20867 print_operand (file, x, 0);
20868 return;
20869
20870 case 'x':
20871 /* X is a FPR or Altivec register used in a VSX context. */
20872 if (!REG_P (x) || !VSX_REGNO_P (REGNO (x)))
20873 output_operand_lossage ("invalid %%x value");
20874 else
20875 {
20876 int reg = REGNO (x);
20877 int vsx_reg = (FP_REGNO_P (reg)
20878 ? reg - 32
20879 : reg - FIRST_ALTIVEC_REGNO + 32);
20880
20881 #ifdef TARGET_REGNAMES
20882 if (TARGET_REGNAMES)
20883 fprintf (file, "%%vs%d", vsx_reg);
20884 else
20885 #endif
20886 fprintf (file, "%d", vsx_reg);
20887 }
20888 return;
20889
20890 case 'X':
20891 if (MEM_P (x)
20892 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
20893 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
20894 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
20895 putc ('x', file);
20896 return;
20897
20898 case 'Y':
20899 /* Like 'L', for third word of TImode/PTImode */
20900 if (REG_P (x))
20901 fputs (reg_names[REGNO (x) + 2], file);
20902 else if (MEM_P (x))
20903 {
20904 machine_mode mode = GET_MODE (x);
20905 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20906 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20907 output_address (mode, plus_constant (Pmode,
20908 XEXP (XEXP (x, 0), 0), 8));
20909 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20910 output_address (mode, plus_constant (Pmode,
20911 XEXP (XEXP (x, 0), 0), 8));
20912 else
20913 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
20914 if (small_data_operand (x, GET_MODE (x)))
20915 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20916 reg_names[SMALL_DATA_REG]);
20917 }
20918 return;
20919
20920 case 'z':
20921 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20922 x = XVECEXP (x, 0, 1);
20923 /* X is a SYMBOL_REF. Write out the name preceded by a
20924 period and without any trailing data in brackets. Used for function
20925 names. If we are configured for System V (or the embedded ABI) on
20926 the PowerPC, do not emit the period, since those systems do not use
20927 TOCs and the like. */
20928 if (!SYMBOL_REF_P (x))
20929 {
20930 output_operand_lossage ("invalid %%z value");
20931 return;
20932 }
20933
20934 /* For macho, check to see if we need a stub. */
20935 if (TARGET_MACHO)
20936 {
20937 const char *name = XSTR (x, 0);
20938 #if TARGET_MACHO
20939 if (darwin_emit_branch_islands
20940 && MACHOPIC_INDIRECT
20941 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
20942 name = machopic_indirection_name (x, /*stub_p=*/true);
20943 #endif
20944 assemble_name (file, name);
20945 }
20946 else if (!DOT_SYMBOLS)
20947 assemble_name (file, XSTR (x, 0));
20948 else
20949 rs6000_output_function_entry (file, XSTR (x, 0));
20950 return;
20951
20952 case 'Z':
20953 /* Like 'L', for last word of TImode/PTImode. */
20954 if (REG_P (x))
20955 fputs (reg_names[REGNO (x) + 3], file);
20956 else if (MEM_P (x))
20957 {
20958 machine_mode mode = GET_MODE (x);
20959 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20960 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20961 output_address (mode, plus_constant (Pmode,
20962 XEXP (XEXP (x, 0), 0), 12));
20963 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20964 output_address (mode, plus_constant (Pmode,
20965 XEXP (XEXP (x, 0), 0), 12));
20966 else
20967 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
20968 if (small_data_operand (x, GET_MODE (x)))
20969 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20970 reg_names[SMALL_DATA_REG]);
20971 }
20972 return;
20973
20974 /* Print AltiVec memory operand. */
20975 case 'y':
20976 {
20977 rtx tmp;
20978
20979 gcc_assert (MEM_P (x));
20980
20981 tmp = XEXP (x, 0);
20982
20983 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
20984 && GET_CODE (tmp) == AND
20985 && CONST_INT_P (XEXP (tmp, 1))
20986 && INTVAL (XEXP (tmp, 1)) == -16)
20987 tmp = XEXP (tmp, 0);
20988 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
20989 && GET_CODE (tmp) == PRE_MODIFY)
20990 tmp = XEXP (tmp, 1);
20991 if (REG_P (tmp))
20992 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
20993 else
20994 {
20995 if (GET_CODE (tmp) != PLUS
20996 || !REG_P (XEXP (tmp, 0))
20997 || !REG_P (XEXP (tmp, 1)))
20998 {
20999 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21000 break;
21001 }
21002
21003 if (REGNO (XEXP (tmp, 0)) == 0)
21004 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21005 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21006 else
21007 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21008 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21009 }
21010 break;
21011 }
21012
21013 case 0:
21014 if (REG_P (x))
21015 fprintf (file, "%s", reg_names[REGNO (x)]);
21016 else if (MEM_P (x))
21017 {
21018 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21019 know the width from the mode. */
21020 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21021 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21022 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21023 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21024 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21025 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21026 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21027 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21028 else
21029 output_address (GET_MODE (x), XEXP (x, 0));
21030 }
21031 else if (toc_relative_expr_p (x, false,
21032 &tocrel_base_oac, &tocrel_offset_oac))
21033 /* This hack along with a corresponding hack in
21034 rs6000_output_addr_const_extra arranges to output addends
21035 where the assembler expects to find them. eg.
21036 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21037 without this hack would be output as "x@toc+4". We
21038 want "x+4@toc". */
21039 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21040 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
21041 output_addr_const (file, XVECEXP (x, 0, 0));
21042 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21043 output_addr_const (file, XVECEXP (x, 0, 1));
21044 else
21045 output_addr_const (file, x);
21046 return;
21047
21048 case '&':
21049 if (const char *name = get_some_local_dynamic_name ())
21050 assemble_name (file, name);
21051 else
21052 output_operand_lossage ("'%%&' used without any "
21053 "local dynamic TLS references");
21054 return;
21055
21056 default:
21057 output_operand_lossage ("invalid %%xn code");
21058 }
21059 }
21060 \f
21061 /* Print the address of an operand. */
21062
21063 void
21064 print_operand_address (FILE *file, rtx x)
21065 {
21066 if (REG_P (x))
21067 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21068 else if (SYMBOL_REF_P (x) || GET_CODE (x) == CONST
21069 || GET_CODE (x) == LABEL_REF)
21070 {
21071 output_addr_const (file, x);
21072 if (small_data_operand (x, GET_MODE (x)))
21073 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21074 reg_names[SMALL_DATA_REG]);
21075 else
21076 gcc_assert (!TARGET_TOC);
21077 }
21078 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21079 && REG_P (XEXP (x, 1)))
21080 {
21081 if (REGNO (XEXP (x, 0)) == 0)
21082 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21083 reg_names[ REGNO (XEXP (x, 0)) ]);
21084 else
21085 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21086 reg_names[ REGNO (XEXP (x, 1)) ]);
21087 }
21088 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21089 && CONST_INT_P (XEXP (x, 1)))
21090 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21091 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21092 #if TARGET_MACHO
21093 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21094 && CONSTANT_P (XEXP (x, 1)))
21095 {
21096 fprintf (file, "lo16(");
21097 output_addr_const (file, XEXP (x, 1));
21098 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21099 }
21100 #endif
21101 #if TARGET_ELF
21102 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21103 && CONSTANT_P (XEXP (x, 1)))
21104 {
21105 output_addr_const (file, XEXP (x, 1));
21106 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21107 }
21108 #endif
21109 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21110 {
21111 /* This hack along with a corresponding hack in
21112 rs6000_output_addr_const_extra arranges to output addends
21113 where the assembler expects to find them. eg.
21114 (lo_sum (reg 9)
21115 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21116 without this hack would be output as "x@toc+8@l(9)". We
21117 want "x+8@toc@l(9)". */
21118 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21119 if (GET_CODE (x) == LO_SUM)
21120 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21121 else
21122 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21123 }
21124 else
21125 output_addr_const (file, x);
21126 }
21127 \f
21128 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21129
21130 static bool
21131 rs6000_output_addr_const_extra (FILE *file, rtx x)
21132 {
21133 if (GET_CODE (x) == UNSPEC)
21134 switch (XINT (x, 1))
21135 {
21136 case UNSPEC_TOCREL:
21137 gcc_checking_assert (SYMBOL_REF_P (XVECEXP (x, 0, 0))
21138 && REG_P (XVECEXP (x, 0, 1))
21139 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21140 output_addr_const (file, XVECEXP (x, 0, 0));
21141 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21142 {
21143 if (INTVAL (tocrel_offset_oac) >= 0)
21144 fprintf (file, "+");
21145 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21146 }
21147 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21148 {
21149 putc ('-', file);
21150 assemble_name (file, toc_label_name);
21151 need_toc_init = 1;
21152 }
21153 else if (TARGET_ELF)
21154 fputs ("@toc", file);
21155 return true;
21156
21157 #if TARGET_MACHO
21158 case UNSPEC_MACHOPIC_OFFSET:
21159 output_addr_const (file, XVECEXP (x, 0, 0));
21160 putc ('-', file);
21161 machopic_output_function_base_name (file);
21162 return true;
21163 #endif
21164 }
21165 return false;
21166 }
21167 \f
21168 /* Target hook for assembling integer objects. The PowerPC version has
21169 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21170 is defined. It also needs to handle DI-mode objects on 64-bit
21171 targets. */
21172
21173 static bool
21174 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21175 {
21176 #ifdef RELOCATABLE_NEEDS_FIXUP
21177 /* Special handling for SI values. */
21178 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21179 {
21180 static int recurse = 0;
21181
21182 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21183 the .fixup section. Since the TOC section is already relocated, we
21184 don't need to mark it here. We used to skip the text section, but it
21185 should never be valid for relocated addresses to be placed in the text
21186 section. */
21187 if (DEFAULT_ABI == ABI_V4
21188 && (TARGET_RELOCATABLE || flag_pic > 1)
21189 && in_section != toc_section
21190 && !recurse
21191 && !CONST_SCALAR_INT_P (x)
21192 && CONSTANT_P (x))
21193 {
21194 char buf[256];
21195
21196 recurse = 1;
21197 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21198 fixuplabelno++;
21199 ASM_OUTPUT_LABEL (asm_out_file, buf);
21200 fprintf (asm_out_file, "\t.long\t(");
21201 output_addr_const (asm_out_file, x);
21202 fprintf (asm_out_file, ")@fixup\n");
21203 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21204 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21205 fprintf (asm_out_file, "\t.long\t");
21206 assemble_name (asm_out_file, buf);
21207 fprintf (asm_out_file, "\n\t.previous\n");
21208 recurse = 0;
21209 return true;
21210 }
21211 /* Remove initial .'s to turn a -mcall-aixdesc function
21212 address into the address of the descriptor, not the function
21213 itself. */
21214 else if (SYMBOL_REF_P (x)
21215 && XSTR (x, 0)[0] == '.'
21216 && DEFAULT_ABI == ABI_AIX)
21217 {
21218 const char *name = XSTR (x, 0);
21219 while (*name == '.')
21220 name++;
21221
21222 fprintf (asm_out_file, "\t.long\t%s\n", name);
21223 return true;
21224 }
21225 }
21226 #endif /* RELOCATABLE_NEEDS_FIXUP */
21227 return default_assemble_integer (x, size, aligned_p);
21228 }
21229
21230 /* Return a template string for assembly to emit when making an
21231 external call. FUNOP is the call mem argument operand number. */
21232
21233 static const char *
21234 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21235 {
21236 /* -Wformat-overflow workaround, without which gcc thinks that %u
21237 might produce 10 digits. */
21238 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21239
21240 char arg[12];
21241 arg[0] = 0;
21242 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21243 {
21244 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21245 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21246 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21247 sprintf (arg, "(%%&@tlsld)");
21248 else
21249 gcc_unreachable ();
21250 }
21251
21252 /* The magic 32768 offset here corresponds to the offset of
21253 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21254 char z[11];
21255 sprintf (z, "%%z%u%s", funop,
21256 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21257 ? "+32768" : ""));
21258
21259 static char str[32]; /* 2 spare */
21260 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21261 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21262 sibcall ? "" : "\n\tnop");
21263 else if (DEFAULT_ABI == ABI_V4)
21264 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21265 flag_pic ? "@plt" : "");
21266 #if TARGET_MACHO
21267 /* If/when we remove the mlongcall opt, we can share the AIX/ELGv2 case. */
21268 else if (DEFAULT_ABI == ABI_DARWIN)
21269 {
21270 /* The cookie is in operand func+2. */
21271 gcc_checking_assert (GET_CODE (operands[funop + 2]) == CONST_INT);
21272 int cookie = INTVAL (operands[funop + 2]);
21273 if (cookie & CALL_LONG)
21274 {
21275 tree funname = get_identifier (XSTR (operands[funop], 0));
21276 tree labelname = get_prev_label (funname);
21277 gcc_checking_assert (labelname && !sibcall);
21278
21279 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21280 instruction will reach 'foo', otherwise link as 'bl L42'".
21281 "L42" should be a 'branch island', that will do a far jump to
21282 'foo'. Branch islands are generated in
21283 macho_branch_islands(). */
21284 sprintf (str, "jbsr %%z%u,%.10s", funop,
21285 IDENTIFIER_POINTER (labelname));
21286 }
21287 else
21288 /* Same as AIX or ELFv2, except to keep backwards compat, no nop
21289 after the call. */
21290 sprintf (str, "b%s %s%s", sibcall ? "" : "l", z, arg);
21291 }
21292 #endif
21293 else
21294 gcc_unreachable ();
21295 return str;
21296 }
21297
21298 const char *
21299 rs6000_call_template (rtx *operands, unsigned int funop)
21300 {
21301 return rs6000_call_template_1 (operands, funop, false);
21302 }
21303
21304 const char *
21305 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21306 {
21307 return rs6000_call_template_1 (operands, funop, true);
21308 }
21309
21310 /* As above, for indirect calls. */
21311
21312 static const char *
21313 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21314 bool sibcall)
21315 {
21316 /* -Wformat-overflow workaround, without which gcc thinks that %u
21317 might produce 10 digits. Note that -Wformat-overflow will not
21318 currently warn here for str[], so do not rely on a warning to
21319 ensure str[] is correctly sized. */
21320 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21321
21322 /* Currently, funop is either 0 or 1. The maximum string is always
21323 a !speculate 64-bit __tls_get_addr call.
21324
21325 ABI_AIX:
21326 . 9 ld 2,%3\n\t
21327 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21328 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21329 . 9 crset 2\n\t
21330 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21331 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21332 . 10 beq%T1l-\n\t
21333 . 10 ld 2,%4(1)
21334 .---
21335 .151
21336
21337 ABI_ELFv2:
21338 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21339 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21340 . 9 crset 2\n\t
21341 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21342 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21343 . 10 beq%T1l-\n\t
21344 . 10 ld 2,%3(1)
21345 .---
21346 .142
21347
21348 ABI_V4:
21349 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21350 . 35 .reloc .,R_PPC64_PLTSEQ,%z1+32768\n\t
21351 . 9 crset 2\n\t
21352 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21353 . 36 .reloc .,R_PPC64_PLTCALL,%z1+32768\n\t
21354 . 8 beq%T1l-
21355 .---
21356 .141 */
21357 static char str[160]; /* 8 spare */
21358 char *s = str;
21359 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21360
21361 if (DEFAULT_ABI == ABI_AIX)
21362 s += sprintf (s,
21363 "l%s 2,%%%u\n\t",
21364 ptrload, funop + 2);
21365
21366 /* We don't need the extra code to stop indirect call speculation if
21367 calling via LR. */
21368 bool speculate = (TARGET_MACHO
21369 || rs6000_speculate_indirect_jumps
21370 || (REG_P (operands[funop])
21371 && REGNO (operands[funop]) == LR_REGNO));
21372
21373 if (TARGET_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21374 {
21375 const char *rel64 = TARGET_64BIT ? "64" : "";
21376 char tls[29];
21377 tls[0] = 0;
21378 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21379 {
21380 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21381 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21382 rel64, funop + 1);
21383 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21384 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21385 rel64);
21386 else
21387 gcc_unreachable ();
21388 }
21389
21390 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21391 && flag_pic == 2 ? "+32768" : "");
21392 if (!speculate)
21393 {
21394 s += sprintf (s,
21395 "%s.reloc .,R_PPC%s_PLTSEQ,%%z%u%s\n\t",
21396 tls, rel64, funop, addend);
21397 s += sprintf (s, "crset 2\n\t");
21398 }
21399 s += sprintf (s,
21400 "%s.reloc .,R_PPC%s_PLTCALL,%%z%u%s\n\t",
21401 tls, rel64, funop, addend);
21402 }
21403 else if (!speculate)
21404 s += sprintf (s, "crset 2\n\t");
21405
21406 if (DEFAULT_ABI == ABI_AIX)
21407 {
21408 if (speculate)
21409 sprintf (s,
21410 "b%%T%ul\n\t"
21411 "l%s 2,%%%u(1)",
21412 funop, ptrload, funop + 3);
21413 else
21414 sprintf (s,
21415 "beq%%T%ul-\n\t"
21416 "l%s 2,%%%u(1)",
21417 funop, ptrload, funop + 3);
21418 }
21419 else if (DEFAULT_ABI == ABI_ELFv2)
21420 {
21421 if (speculate)
21422 sprintf (s,
21423 "b%%T%ul\n\t"
21424 "l%s 2,%%%u(1)",
21425 funop, ptrload, funop + 2);
21426 else
21427 sprintf (s,
21428 "beq%%T%ul-\n\t"
21429 "l%s 2,%%%u(1)",
21430 funop, ptrload, funop + 2);
21431 }
21432 else
21433 {
21434 if (speculate)
21435 sprintf (s,
21436 "b%%T%u%s",
21437 funop, sibcall ? "" : "l");
21438 else
21439 sprintf (s,
21440 "beq%%T%u%s-%s",
21441 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21442 }
21443 return str;
21444 }
21445
21446 const char *
21447 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21448 {
21449 return rs6000_indirect_call_template_1 (operands, funop, false);
21450 }
21451
21452 const char *
21453 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21454 {
21455 return rs6000_indirect_call_template_1 (operands, funop, true);
21456 }
21457
21458 #if HAVE_AS_PLTSEQ
21459 /* Output indirect call insns.
21460 WHICH is 0 for tocsave, 1 for plt16_ha, 2 for plt16_lo, 3 for mtctr. */
21461 const char *
21462 rs6000_pltseq_template (rtx *operands, int which)
21463 {
21464 const char *rel64 = TARGET_64BIT ? "64" : "";
21465 char tls[28];
21466 tls[0] = 0;
21467 if (TARGET_TLS_MARKERS && GET_CODE (operands[3]) == UNSPEC)
21468 {
21469 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21470 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%3\n\t",
21471 rel64);
21472 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21473 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21474 rel64);
21475 else
21476 gcc_unreachable ();
21477 }
21478
21479 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21480 static char str[96]; /* 15 spare */
21481 const char *off = WORDS_BIG_ENDIAN ? "+2" : "";
21482 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21483 && flag_pic == 2 ? "+32768" : "");
21484 switch (which)
21485 {
21486 case 0:
21487 sprintf (str,
21488 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2\n\t"
21489 "st%s",
21490 tls, rel64, TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)");
21491 break;
21492 case 1:
21493 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21494 sprintf (str,
21495 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2\n\t"
21496 "lis %%0,0",
21497 tls, off, rel64);
21498 else
21499 sprintf (str,
21500 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2%s\n\t"
21501 "addis %%0,%%1,0",
21502 tls, off, rel64, addend);
21503 break;
21504 case 2:
21505 sprintf (str,
21506 "%s.reloc .%s,R_PPC%s_PLT16_LO%s,%%z2%s\n\t"
21507 "l%s %%0,0(%%1)",
21508 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend,
21509 TARGET_64BIT ? "d" : "wz");
21510 break;
21511 case 3:
21512 sprintf (str,
21513 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2%s\n\t"
21514 "mtctr %%1",
21515 tls, rel64, addend);
21516 break;
21517 default:
21518 gcc_unreachable ();
21519 }
21520 return str;
21521 }
21522 #endif
21523
21524 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21525 /* Emit an assembler directive to set symbol visibility for DECL to
21526 VISIBILITY_TYPE. */
21527
21528 static void
21529 rs6000_assemble_visibility (tree decl, int vis)
21530 {
21531 if (TARGET_XCOFF)
21532 return;
21533
21534 /* Functions need to have their entry point symbol visibility set as
21535 well as their descriptor symbol visibility. */
21536 if (DEFAULT_ABI == ABI_AIX
21537 && DOT_SYMBOLS
21538 && TREE_CODE (decl) == FUNCTION_DECL)
21539 {
21540 static const char * const visibility_types[] = {
21541 NULL, "protected", "hidden", "internal"
21542 };
21543
21544 const char *name, *type;
21545
21546 name = ((* targetm.strip_name_encoding)
21547 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21548 type = visibility_types[vis];
21549
21550 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21551 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21552 }
21553 else
21554 default_assemble_visibility (decl, vis);
21555 }
21556 #endif
21557 \f
21558 enum rtx_code
21559 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21560 {
21561 /* Reversal of FP compares takes care -- an ordered compare
21562 becomes an unordered compare and vice versa. */
21563 if (mode == CCFPmode
21564 && (!flag_finite_math_only
21565 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21566 || code == UNEQ || code == LTGT))
21567 return reverse_condition_maybe_unordered (code);
21568 else
21569 return reverse_condition (code);
21570 }
21571
21572 /* Generate a compare for CODE. Return a brand-new rtx that
21573 represents the result of the compare. */
21574
21575 static rtx
21576 rs6000_generate_compare (rtx cmp, machine_mode mode)
21577 {
21578 machine_mode comp_mode;
21579 rtx compare_result;
21580 enum rtx_code code = GET_CODE (cmp);
21581 rtx op0 = XEXP (cmp, 0);
21582 rtx op1 = XEXP (cmp, 1);
21583
21584 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21585 comp_mode = CCmode;
21586 else if (FLOAT_MODE_P (mode))
21587 comp_mode = CCFPmode;
21588 else if (code == GTU || code == LTU
21589 || code == GEU || code == LEU)
21590 comp_mode = CCUNSmode;
21591 else if ((code == EQ || code == NE)
21592 && unsigned_reg_p (op0)
21593 && (unsigned_reg_p (op1)
21594 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21595 /* These are unsigned values, perhaps there will be a later
21596 ordering compare that can be shared with this one. */
21597 comp_mode = CCUNSmode;
21598 else
21599 comp_mode = CCmode;
21600
21601 /* If we have an unsigned compare, make sure we don't have a signed value as
21602 an immediate. */
21603 if (comp_mode == CCUNSmode && CONST_INT_P (op1)
21604 && INTVAL (op1) < 0)
21605 {
21606 op0 = copy_rtx_if_shared (op0);
21607 op1 = force_reg (GET_MODE (op0), op1);
21608 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21609 }
21610
21611 /* First, the compare. */
21612 compare_result = gen_reg_rtx (comp_mode);
21613
21614 /* IEEE 128-bit support in VSX registers when we do not have hardware
21615 support. */
21616 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21617 {
21618 rtx libfunc = NULL_RTX;
21619 bool check_nan = false;
21620 rtx dest;
21621
21622 switch (code)
21623 {
21624 case EQ:
21625 case NE:
21626 libfunc = optab_libfunc (eq_optab, mode);
21627 break;
21628
21629 case GT:
21630 case GE:
21631 libfunc = optab_libfunc (ge_optab, mode);
21632 break;
21633
21634 case LT:
21635 case LE:
21636 libfunc = optab_libfunc (le_optab, mode);
21637 break;
21638
21639 case UNORDERED:
21640 case ORDERED:
21641 libfunc = optab_libfunc (unord_optab, mode);
21642 code = (code == UNORDERED) ? NE : EQ;
21643 break;
21644
21645 case UNGE:
21646 case UNGT:
21647 check_nan = true;
21648 libfunc = optab_libfunc (ge_optab, mode);
21649 code = (code == UNGE) ? GE : GT;
21650 break;
21651
21652 case UNLE:
21653 case UNLT:
21654 check_nan = true;
21655 libfunc = optab_libfunc (le_optab, mode);
21656 code = (code == UNLE) ? LE : LT;
21657 break;
21658
21659 case UNEQ:
21660 case LTGT:
21661 check_nan = true;
21662 libfunc = optab_libfunc (eq_optab, mode);
21663 code = (code = UNEQ) ? EQ : NE;
21664 break;
21665
21666 default:
21667 gcc_unreachable ();
21668 }
21669
21670 gcc_assert (libfunc);
21671
21672 if (!check_nan)
21673 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21674 SImode, op0, mode, op1, mode);
21675
21676 /* The library signals an exception for signalling NaNs, so we need to
21677 handle isgreater, etc. by first checking isordered. */
21678 else
21679 {
21680 rtx ne_rtx, normal_dest, unord_dest;
21681 rtx unord_func = optab_libfunc (unord_optab, mode);
21682 rtx join_label = gen_label_rtx ();
21683 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21684 rtx unord_cmp = gen_reg_rtx (comp_mode);
21685
21686
21687 /* Test for either value being a NaN. */
21688 gcc_assert (unord_func);
21689 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21690 SImode, op0, mode, op1, mode);
21691
21692 /* Set value (0) if either value is a NaN, and jump to the join
21693 label. */
21694 dest = gen_reg_rtx (SImode);
21695 emit_move_insn (dest, const1_rtx);
21696 emit_insn (gen_rtx_SET (unord_cmp,
21697 gen_rtx_COMPARE (comp_mode, unord_dest,
21698 const0_rtx)));
21699
21700 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21701 emit_jump_insn (gen_rtx_SET (pc_rtx,
21702 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21703 join_ref,
21704 pc_rtx)));
21705
21706 /* Do the normal comparison, knowing that the values are not
21707 NaNs. */
21708 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21709 SImode, op0, mode, op1, mode);
21710
21711 emit_insn (gen_cstoresi4 (dest,
21712 gen_rtx_fmt_ee (code, SImode, normal_dest,
21713 const0_rtx),
21714 normal_dest, const0_rtx));
21715
21716 /* Join NaN and non-Nan paths. Compare dest against 0. */
21717 emit_label (join_label);
21718 code = NE;
21719 }
21720
21721 emit_insn (gen_rtx_SET (compare_result,
21722 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21723 }
21724
21725 else
21726 {
21727 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21728 CLOBBERs to match cmptf_internal2 pattern. */
21729 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21730 && FLOAT128_IBM_P (GET_MODE (op0))
21731 && TARGET_HARD_FLOAT)
21732 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21733 gen_rtvec (10,
21734 gen_rtx_SET (compare_result,
21735 gen_rtx_COMPARE (comp_mode, op0, op1)),
21736 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21737 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21738 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21739 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21740 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21741 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21742 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21743 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21744 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21745 else if (GET_CODE (op1) == UNSPEC
21746 && XINT (op1, 1) == UNSPEC_SP_TEST)
21747 {
21748 rtx op1b = XVECEXP (op1, 0, 0);
21749 comp_mode = CCEQmode;
21750 compare_result = gen_reg_rtx (CCEQmode);
21751 if (TARGET_64BIT)
21752 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21753 else
21754 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21755 }
21756 else
21757 emit_insn (gen_rtx_SET (compare_result,
21758 gen_rtx_COMPARE (comp_mode, op0, op1)));
21759 }
21760
21761 /* Some kinds of FP comparisons need an OR operation;
21762 under flag_finite_math_only we don't bother. */
21763 if (FLOAT_MODE_P (mode)
21764 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21765 && !flag_finite_math_only
21766 && (code == LE || code == GE
21767 || code == UNEQ || code == LTGT
21768 || code == UNGT || code == UNLT))
21769 {
21770 enum rtx_code or1, or2;
21771 rtx or1_rtx, or2_rtx, compare2_rtx;
21772 rtx or_result = gen_reg_rtx (CCEQmode);
21773
21774 switch (code)
21775 {
21776 case LE: or1 = LT; or2 = EQ; break;
21777 case GE: or1 = GT; or2 = EQ; break;
21778 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21779 case LTGT: or1 = LT; or2 = GT; break;
21780 case UNGT: or1 = UNORDERED; or2 = GT; break;
21781 case UNLT: or1 = UNORDERED; or2 = LT; break;
21782 default: gcc_unreachable ();
21783 }
21784 validate_condition_mode (or1, comp_mode);
21785 validate_condition_mode (or2, comp_mode);
21786 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21787 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21788 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21789 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21790 const_true_rtx);
21791 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21792
21793 compare_result = or_result;
21794 code = EQ;
21795 }
21796
21797 validate_condition_mode (code, GET_MODE (compare_result));
21798
21799 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21800 }
21801
21802 \f
21803 /* Return the diagnostic message string if the binary operation OP is
21804 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21805
21806 static const char*
21807 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21808 const_tree type1,
21809 const_tree type2)
21810 {
21811 machine_mode mode1 = TYPE_MODE (type1);
21812 machine_mode mode2 = TYPE_MODE (type2);
21813
21814 /* For complex modes, use the inner type. */
21815 if (COMPLEX_MODE_P (mode1))
21816 mode1 = GET_MODE_INNER (mode1);
21817
21818 if (COMPLEX_MODE_P (mode2))
21819 mode2 = GET_MODE_INNER (mode2);
21820
21821 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21822 double to intermix unless -mfloat128-convert. */
21823 if (mode1 == mode2)
21824 return NULL;
21825
21826 if (!TARGET_FLOAT128_CVT)
21827 {
21828 if ((mode1 == KFmode && mode2 == IFmode)
21829 || (mode1 == IFmode && mode2 == KFmode))
21830 return N_("__float128 and __ibm128 cannot be used in the same "
21831 "expression");
21832
21833 if (TARGET_IEEEQUAD
21834 && ((mode1 == IFmode && mode2 == TFmode)
21835 || (mode1 == TFmode && mode2 == IFmode)))
21836 return N_("__ibm128 and long double cannot be used in the same "
21837 "expression");
21838
21839 if (!TARGET_IEEEQUAD
21840 && ((mode1 == KFmode && mode2 == TFmode)
21841 || (mode1 == TFmode && mode2 == KFmode)))
21842 return N_("__float128 and long double cannot be used in the same "
21843 "expression");
21844 }
21845
21846 return NULL;
21847 }
21848
21849 \f
21850 /* Expand floating point conversion to/from __float128 and __ibm128. */
21851
21852 void
21853 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
21854 {
21855 machine_mode dest_mode = GET_MODE (dest);
21856 machine_mode src_mode = GET_MODE (src);
21857 convert_optab cvt = unknown_optab;
21858 bool do_move = false;
21859 rtx libfunc = NULL_RTX;
21860 rtx dest2;
21861 typedef rtx (*rtx_2func_t) (rtx, rtx);
21862 rtx_2func_t hw_convert = (rtx_2func_t)0;
21863 size_t kf_or_tf;
21864
21865 struct hw_conv_t {
21866 rtx_2func_t from_df;
21867 rtx_2func_t from_sf;
21868 rtx_2func_t from_si_sign;
21869 rtx_2func_t from_si_uns;
21870 rtx_2func_t from_di_sign;
21871 rtx_2func_t from_di_uns;
21872 rtx_2func_t to_df;
21873 rtx_2func_t to_sf;
21874 rtx_2func_t to_si_sign;
21875 rtx_2func_t to_si_uns;
21876 rtx_2func_t to_di_sign;
21877 rtx_2func_t to_di_uns;
21878 } hw_conversions[2] = {
21879 /* convertions to/from KFmode */
21880 {
21881 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
21882 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
21883 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
21884 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
21885 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
21886 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
21887 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
21888 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
21889 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
21890 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
21891 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
21892 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
21893 },
21894
21895 /* convertions to/from TFmode */
21896 {
21897 gen_extenddftf2_hw, /* TFmode <- DFmode. */
21898 gen_extendsftf2_hw, /* TFmode <- SFmode. */
21899 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
21900 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
21901 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
21902 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
21903 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
21904 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
21905 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
21906 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
21907 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
21908 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
21909 },
21910 };
21911
21912 if (dest_mode == src_mode)
21913 gcc_unreachable ();
21914
21915 /* Eliminate memory operations. */
21916 if (MEM_P (src))
21917 src = force_reg (src_mode, src);
21918
21919 if (MEM_P (dest))
21920 {
21921 rtx tmp = gen_reg_rtx (dest_mode);
21922 rs6000_expand_float128_convert (tmp, src, unsigned_p);
21923 rs6000_emit_move (dest, tmp, dest_mode);
21924 return;
21925 }
21926
21927 /* Convert to IEEE 128-bit floating point. */
21928 if (FLOAT128_IEEE_P (dest_mode))
21929 {
21930 if (dest_mode == KFmode)
21931 kf_or_tf = 0;
21932 else if (dest_mode == TFmode)
21933 kf_or_tf = 1;
21934 else
21935 gcc_unreachable ();
21936
21937 switch (src_mode)
21938 {
21939 case E_DFmode:
21940 cvt = sext_optab;
21941 hw_convert = hw_conversions[kf_or_tf].from_df;
21942 break;
21943
21944 case E_SFmode:
21945 cvt = sext_optab;
21946 hw_convert = hw_conversions[kf_or_tf].from_sf;
21947 break;
21948
21949 case E_KFmode:
21950 case E_IFmode:
21951 case E_TFmode:
21952 if (FLOAT128_IBM_P (src_mode))
21953 cvt = sext_optab;
21954 else
21955 do_move = true;
21956 break;
21957
21958 case E_SImode:
21959 if (unsigned_p)
21960 {
21961 cvt = ufloat_optab;
21962 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
21963 }
21964 else
21965 {
21966 cvt = sfloat_optab;
21967 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
21968 }
21969 break;
21970
21971 case E_DImode:
21972 if (unsigned_p)
21973 {
21974 cvt = ufloat_optab;
21975 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
21976 }
21977 else
21978 {
21979 cvt = sfloat_optab;
21980 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
21981 }
21982 break;
21983
21984 default:
21985 gcc_unreachable ();
21986 }
21987 }
21988
21989 /* Convert from IEEE 128-bit floating point. */
21990 else if (FLOAT128_IEEE_P (src_mode))
21991 {
21992 if (src_mode == KFmode)
21993 kf_or_tf = 0;
21994 else if (src_mode == TFmode)
21995 kf_or_tf = 1;
21996 else
21997 gcc_unreachable ();
21998
21999 switch (dest_mode)
22000 {
22001 case E_DFmode:
22002 cvt = trunc_optab;
22003 hw_convert = hw_conversions[kf_or_tf].to_df;
22004 break;
22005
22006 case E_SFmode:
22007 cvt = trunc_optab;
22008 hw_convert = hw_conversions[kf_or_tf].to_sf;
22009 break;
22010
22011 case E_KFmode:
22012 case E_IFmode:
22013 case E_TFmode:
22014 if (FLOAT128_IBM_P (dest_mode))
22015 cvt = trunc_optab;
22016 else
22017 do_move = true;
22018 break;
22019
22020 case E_SImode:
22021 if (unsigned_p)
22022 {
22023 cvt = ufix_optab;
22024 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22025 }
22026 else
22027 {
22028 cvt = sfix_optab;
22029 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22030 }
22031 break;
22032
22033 case E_DImode:
22034 if (unsigned_p)
22035 {
22036 cvt = ufix_optab;
22037 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22038 }
22039 else
22040 {
22041 cvt = sfix_optab;
22042 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22043 }
22044 break;
22045
22046 default:
22047 gcc_unreachable ();
22048 }
22049 }
22050
22051 /* Both IBM format. */
22052 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22053 do_move = true;
22054
22055 else
22056 gcc_unreachable ();
22057
22058 /* Handle conversion between TFmode/KFmode/IFmode. */
22059 if (do_move)
22060 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22061
22062 /* Handle conversion if we have hardware support. */
22063 else if (TARGET_FLOAT128_HW && hw_convert)
22064 emit_insn ((hw_convert) (dest, src));
22065
22066 /* Call an external function to do the conversion. */
22067 else if (cvt != unknown_optab)
22068 {
22069 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22070 gcc_assert (libfunc != NULL_RTX);
22071
22072 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22073 src, src_mode);
22074
22075 gcc_assert (dest2 != NULL_RTX);
22076 if (!rtx_equal_p (dest, dest2))
22077 emit_move_insn (dest, dest2);
22078 }
22079
22080 else
22081 gcc_unreachable ();
22082
22083 return;
22084 }
22085
22086 \f
22087 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22088 can be used as that dest register. Return the dest register. */
22089
22090 rtx
22091 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22092 {
22093 if (op2 == const0_rtx)
22094 return op1;
22095
22096 if (GET_CODE (scratch) == SCRATCH)
22097 scratch = gen_reg_rtx (mode);
22098
22099 if (logical_operand (op2, mode))
22100 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22101 else
22102 emit_insn (gen_rtx_SET (scratch,
22103 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22104
22105 return scratch;
22106 }
22107
22108 void
22109 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22110 {
22111 rtx condition_rtx;
22112 machine_mode op_mode;
22113 enum rtx_code cond_code;
22114 rtx result = operands[0];
22115
22116 condition_rtx = rs6000_generate_compare (operands[1], mode);
22117 cond_code = GET_CODE (condition_rtx);
22118
22119 if (cond_code == NE
22120 || cond_code == GE || cond_code == LE
22121 || cond_code == GEU || cond_code == LEU
22122 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22123 {
22124 rtx not_result = gen_reg_rtx (CCEQmode);
22125 rtx not_op, rev_cond_rtx;
22126 machine_mode cc_mode;
22127
22128 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22129
22130 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22131 SImode, XEXP (condition_rtx, 0), const0_rtx);
22132 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22133 emit_insn (gen_rtx_SET (not_result, not_op));
22134 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22135 }
22136
22137 op_mode = GET_MODE (XEXP (operands[1], 0));
22138 if (op_mode == VOIDmode)
22139 op_mode = GET_MODE (XEXP (operands[1], 1));
22140
22141 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22142 {
22143 PUT_MODE (condition_rtx, DImode);
22144 convert_move (result, condition_rtx, 0);
22145 }
22146 else
22147 {
22148 PUT_MODE (condition_rtx, SImode);
22149 emit_insn (gen_rtx_SET (result, condition_rtx));
22150 }
22151 }
22152
22153 /* Emit a branch of kind CODE to location LOC. */
22154
22155 void
22156 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22157 {
22158 rtx condition_rtx, loc_ref;
22159
22160 condition_rtx = rs6000_generate_compare (operands[0], mode);
22161 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22162 emit_jump_insn (gen_rtx_SET (pc_rtx,
22163 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22164 loc_ref, pc_rtx)));
22165 }
22166
22167 /* Return the string to output a conditional branch to LABEL, which is
22168 the operand template of the label, or NULL if the branch is really a
22169 conditional return.
22170
22171 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22172 condition code register and its mode specifies what kind of
22173 comparison we made.
22174
22175 REVERSED is nonzero if we should reverse the sense of the comparison.
22176
22177 INSN is the insn. */
22178
22179 char *
22180 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22181 {
22182 static char string[64];
22183 enum rtx_code code = GET_CODE (op);
22184 rtx cc_reg = XEXP (op, 0);
22185 machine_mode mode = GET_MODE (cc_reg);
22186 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22187 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22188 int really_reversed = reversed ^ need_longbranch;
22189 char *s = string;
22190 const char *ccode;
22191 const char *pred;
22192 rtx note;
22193
22194 validate_condition_mode (code, mode);
22195
22196 /* Work out which way this really branches. We could use
22197 reverse_condition_maybe_unordered here always but this
22198 makes the resulting assembler clearer. */
22199 if (really_reversed)
22200 {
22201 /* Reversal of FP compares takes care -- an ordered compare
22202 becomes an unordered compare and vice versa. */
22203 if (mode == CCFPmode)
22204 code = reverse_condition_maybe_unordered (code);
22205 else
22206 code = reverse_condition (code);
22207 }
22208
22209 switch (code)
22210 {
22211 /* Not all of these are actually distinct opcodes, but
22212 we distinguish them for clarity of the resulting assembler. */
22213 case NE: case LTGT:
22214 ccode = "ne"; break;
22215 case EQ: case UNEQ:
22216 ccode = "eq"; break;
22217 case GE: case GEU:
22218 ccode = "ge"; break;
22219 case GT: case GTU: case UNGT:
22220 ccode = "gt"; break;
22221 case LE: case LEU:
22222 ccode = "le"; break;
22223 case LT: case LTU: case UNLT:
22224 ccode = "lt"; break;
22225 case UNORDERED: ccode = "un"; break;
22226 case ORDERED: ccode = "nu"; break;
22227 case UNGE: ccode = "nl"; break;
22228 case UNLE: ccode = "ng"; break;
22229 default:
22230 gcc_unreachable ();
22231 }
22232
22233 /* Maybe we have a guess as to how likely the branch is. */
22234 pred = "";
22235 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22236 if (note != NULL_RTX)
22237 {
22238 /* PROB is the difference from 50%. */
22239 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22240 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22241
22242 /* Only hint for highly probable/improbable branches on newer cpus when
22243 we have real profile data, as static prediction overrides processor
22244 dynamic prediction. For older cpus we may as well always hint, but
22245 assume not taken for branches that are very close to 50% as a
22246 mispredicted taken branch is more expensive than a
22247 mispredicted not-taken branch. */
22248 if (rs6000_always_hint
22249 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22250 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22251 && br_prob_note_reliable_p (note)))
22252 {
22253 if (abs (prob) > REG_BR_PROB_BASE / 20
22254 && ((prob > 0) ^ need_longbranch))
22255 pred = "+";
22256 else
22257 pred = "-";
22258 }
22259 }
22260
22261 if (label == NULL)
22262 s += sprintf (s, "b%slr%s ", ccode, pred);
22263 else
22264 s += sprintf (s, "b%s%s ", ccode, pred);
22265
22266 /* We need to escape any '%' characters in the reg_names string.
22267 Assume they'd only be the first character.... */
22268 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22269 *s++ = '%';
22270 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22271
22272 if (label != NULL)
22273 {
22274 /* If the branch distance was too far, we may have to use an
22275 unconditional branch to go the distance. */
22276 if (need_longbranch)
22277 s += sprintf (s, ",$+8\n\tb %s", label);
22278 else
22279 s += sprintf (s, ",%s", label);
22280 }
22281
22282 return string;
22283 }
22284
22285 /* Return insn for VSX or Altivec comparisons. */
22286
22287 static rtx
22288 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22289 {
22290 rtx mask;
22291 machine_mode mode = GET_MODE (op0);
22292
22293 switch (code)
22294 {
22295 default:
22296 break;
22297
22298 case GE:
22299 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22300 return NULL_RTX;
22301 /* FALLTHRU */
22302
22303 case EQ:
22304 case GT:
22305 case GTU:
22306 case ORDERED:
22307 case UNORDERED:
22308 case UNEQ:
22309 case LTGT:
22310 mask = gen_reg_rtx (mode);
22311 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22312 return mask;
22313 }
22314
22315 return NULL_RTX;
22316 }
22317
22318 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22319 DMODE is expected destination mode. This is a recursive function. */
22320
22321 static rtx
22322 rs6000_emit_vector_compare (enum rtx_code rcode,
22323 rtx op0, rtx op1,
22324 machine_mode dmode)
22325 {
22326 rtx mask;
22327 bool swap_operands = false;
22328 bool try_again = false;
22329
22330 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22331 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22332
22333 /* See if the comparison works as is. */
22334 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22335 if (mask)
22336 return mask;
22337
22338 switch (rcode)
22339 {
22340 case LT:
22341 rcode = GT;
22342 swap_operands = true;
22343 try_again = true;
22344 break;
22345 case LTU:
22346 rcode = GTU;
22347 swap_operands = true;
22348 try_again = true;
22349 break;
22350 case NE:
22351 case UNLE:
22352 case UNLT:
22353 case UNGE:
22354 case UNGT:
22355 /* Invert condition and try again.
22356 e.g., A != B becomes ~(A==B). */
22357 {
22358 enum rtx_code rev_code;
22359 enum insn_code nor_code;
22360 rtx mask2;
22361
22362 rev_code = reverse_condition_maybe_unordered (rcode);
22363 if (rev_code == UNKNOWN)
22364 return NULL_RTX;
22365
22366 nor_code = optab_handler (one_cmpl_optab, dmode);
22367 if (nor_code == CODE_FOR_nothing)
22368 return NULL_RTX;
22369
22370 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22371 if (!mask2)
22372 return NULL_RTX;
22373
22374 mask = gen_reg_rtx (dmode);
22375 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22376 return mask;
22377 }
22378 break;
22379 case GE:
22380 case GEU:
22381 case LE:
22382 case LEU:
22383 /* Try GT/GTU/LT/LTU OR EQ */
22384 {
22385 rtx c_rtx, eq_rtx;
22386 enum insn_code ior_code;
22387 enum rtx_code new_code;
22388
22389 switch (rcode)
22390 {
22391 case GE:
22392 new_code = GT;
22393 break;
22394
22395 case GEU:
22396 new_code = GTU;
22397 break;
22398
22399 case LE:
22400 new_code = LT;
22401 break;
22402
22403 case LEU:
22404 new_code = LTU;
22405 break;
22406
22407 default:
22408 gcc_unreachable ();
22409 }
22410
22411 ior_code = optab_handler (ior_optab, dmode);
22412 if (ior_code == CODE_FOR_nothing)
22413 return NULL_RTX;
22414
22415 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22416 if (!c_rtx)
22417 return NULL_RTX;
22418
22419 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22420 if (!eq_rtx)
22421 return NULL_RTX;
22422
22423 mask = gen_reg_rtx (dmode);
22424 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22425 return mask;
22426 }
22427 break;
22428 default:
22429 return NULL_RTX;
22430 }
22431
22432 if (try_again)
22433 {
22434 if (swap_operands)
22435 std::swap (op0, op1);
22436
22437 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22438 if (mask)
22439 return mask;
22440 }
22441
22442 /* You only get two chances. */
22443 return NULL_RTX;
22444 }
22445
22446 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22447 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22448 operands for the relation operation COND. */
22449
22450 int
22451 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22452 rtx cond, rtx cc_op0, rtx cc_op1)
22453 {
22454 machine_mode dest_mode = GET_MODE (dest);
22455 machine_mode mask_mode = GET_MODE (cc_op0);
22456 enum rtx_code rcode = GET_CODE (cond);
22457 machine_mode cc_mode = CCmode;
22458 rtx mask;
22459 rtx cond2;
22460 bool invert_move = false;
22461
22462 if (VECTOR_UNIT_NONE_P (dest_mode))
22463 return 0;
22464
22465 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22466 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22467
22468 switch (rcode)
22469 {
22470 /* Swap operands if we can, and fall back to doing the operation as
22471 specified, and doing a NOR to invert the test. */
22472 case NE:
22473 case UNLE:
22474 case UNLT:
22475 case UNGE:
22476 case UNGT:
22477 /* Invert condition and try again.
22478 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22479 invert_move = true;
22480 rcode = reverse_condition_maybe_unordered (rcode);
22481 if (rcode == UNKNOWN)
22482 return 0;
22483 break;
22484
22485 case GE:
22486 case LE:
22487 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22488 {
22489 /* Invert condition to avoid compound test. */
22490 invert_move = true;
22491 rcode = reverse_condition (rcode);
22492 }
22493 break;
22494
22495 case GTU:
22496 case GEU:
22497 case LTU:
22498 case LEU:
22499 /* Mark unsigned tests with CCUNSmode. */
22500 cc_mode = CCUNSmode;
22501
22502 /* Invert condition to avoid compound test if necessary. */
22503 if (rcode == GEU || rcode == LEU)
22504 {
22505 invert_move = true;
22506 rcode = reverse_condition (rcode);
22507 }
22508 break;
22509
22510 default:
22511 break;
22512 }
22513
22514 /* Get the vector mask for the given relational operations. */
22515 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22516
22517 if (!mask)
22518 return 0;
22519
22520 if (invert_move)
22521 std::swap (op_true, op_false);
22522
22523 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22524 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22525 && (GET_CODE (op_true) == CONST_VECTOR
22526 || GET_CODE (op_false) == CONST_VECTOR))
22527 {
22528 rtx constant_0 = CONST0_RTX (dest_mode);
22529 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22530
22531 if (op_true == constant_m1 && op_false == constant_0)
22532 {
22533 emit_move_insn (dest, mask);
22534 return 1;
22535 }
22536
22537 else if (op_true == constant_0 && op_false == constant_m1)
22538 {
22539 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22540 return 1;
22541 }
22542
22543 /* If we can't use the vector comparison directly, perhaps we can use
22544 the mask for the true or false fields, instead of loading up a
22545 constant. */
22546 if (op_true == constant_m1)
22547 op_true = mask;
22548
22549 if (op_false == constant_0)
22550 op_false = mask;
22551 }
22552
22553 if (!REG_P (op_true) && !SUBREG_P (op_true))
22554 op_true = force_reg (dest_mode, op_true);
22555
22556 if (!REG_P (op_false) && !SUBREG_P (op_false))
22557 op_false = force_reg (dest_mode, op_false);
22558
22559 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22560 CONST0_RTX (dest_mode));
22561 emit_insn (gen_rtx_SET (dest,
22562 gen_rtx_IF_THEN_ELSE (dest_mode,
22563 cond2,
22564 op_true,
22565 op_false)));
22566 return 1;
22567 }
22568
22569 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22570 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22571 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22572 hardware has no such operation. */
22573
22574 static int
22575 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22576 {
22577 enum rtx_code code = GET_CODE (op);
22578 rtx op0 = XEXP (op, 0);
22579 rtx op1 = XEXP (op, 1);
22580 machine_mode compare_mode = GET_MODE (op0);
22581 machine_mode result_mode = GET_MODE (dest);
22582 bool max_p = false;
22583
22584 if (result_mode != compare_mode)
22585 return 0;
22586
22587 if (code == GE || code == GT)
22588 max_p = true;
22589 else if (code == LE || code == LT)
22590 max_p = false;
22591 else
22592 return 0;
22593
22594 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22595 ;
22596
22597 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22598 max_p = !max_p;
22599
22600 else
22601 return 0;
22602
22603 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22604 return 1;
22605 }
22606
22607 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22608 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22609 operands of the last comparison is nonzero/true, FALSE_COND if it is
22610 zero/false. Return 0 if the hardware has no such operation. */
22611
22612 static int
22613 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22614 {
22615 enum rtx_code code = GET_CODE (op);
22616 rtx op0 = XEXP (op, 0);
22617 rtx op1 = XEXP (op, 1);
22618 machine_mode result_mode = GET_MODE (dest);
22619 rtx compare_rtx;
22620 rtx cmove_rtx;
22621 rtx clobber_rtx;
22622
22623 if (!can_create_pseudo_p ())
22624 return 0;
22625
22626 switch (code)
22627 {
22628 case EQ:
22629 case GE:
22630 case GT:
22631 break;
22632
22633 case NE:
22634 case LT:
22635 case LE:
22636 code = swap_condition (code);
22637 std::swap (op0, op1);
22638 break;
22639
22640 default:
22641 return 0;
22642 }
22643
22644 /* Generate: [(parallel [(set (dest)
22645 (if_then_else (op (cmp1) (cmp2))
22646 (true)
22647 (false)))
22648 (clobber (scratch))])]. */
22649
22650 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22651 cmove_rtx = gen_rtx_SET (dest,
22652 gen_rtx_IF_THEN_ELSE (result_mode,
22653 compare_rtx,
22654 true_cond,
22655 false_cond));
22656
22657 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22658 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22659 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22660
22661 return 1;
22662 }
22663
22664 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22665 operands of the last comparison is nonzero/true, FALSE_COND if it
22666 is zero/false. Return 0 if the hardware has no such operation. */
22667
22668 int
22669 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22670 {
22671 enum rtx_code code = GET_CODE (op);
22672 rtx op0 = XEXP (op, 0);
22673 rtx op1 = XEXP (op, 1);
22674 machine_mode compare_mode = GET_MODE (op0);
22675 machine_mode result_mode = GET_MODE (dest);
22676 rtx temp;
22677 bool is_against_zero;
22678
22679 /* These modes should always match. */
22680 if (GET_MODE (op1) != compare_mode
22681 /* In the isel case however, we can use a compare immediate, so
22682 op1 may be a small constant. */
22683 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22684 return 0;
22685 if (GET_MODE (true_cond) != result_mode)
22686 return 0;
22687 if (GET_MODE (false_cond) != result_mode)
22688 return 0;
22689
22690 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22691 if (TARGET_P9_MINMAX
22692 && (compare_mode == SFmode || compare_mode == DFmode)
22693 && (result_mode == SFmode || result_mode == DFmode))
22694 {
22695 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22696 return 1;
22697
22698 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22699 return 1;
22700 }
22701
22702 /* Don't allow using floating point comparisons for integer results for
22703 now. */
22704 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22705 return 0;
22706
22707 /* First, work out if the hardware can do this at all, or
22708 if it's too slow.... */
22709 if (!FLOAT_MODE_P (compare_mode))
22710 {
22711 if (TARGET_ISEL)
22712 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22713 return 0;
22714 }
22715
22716 is_against_zero = op1 == CONST0_RTX (compare_mode);
22717
22718 /* A floating-point subtract might overflow, underflow, or produce
22719 an inexact result, thus changing the floating-point flags, so it
22720 can't be generated if we care about that. It's safe if one side
22721 of the construct is zero, since then no subtract will be
22722 generated. */
22723 if (SCALAR_FLOAT_MODE_P (compare_mode)
22724 && flag_trapping_math && ! is_against_zero)
22725 return 0;
22726
22727 /* Eliminate half of the comparisons by switching operands, this
22728 makes the remaining code simpler. */
22729 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22730 || code == LTGT || code == LT || code == UNLE)
22731 {
22732 code = reverse_condition_maybe_unordered (code);
22733 temp = true_cond;
22734 true_cond = false_cond;
22735 false_cond = temp;
22736 }
22737
22738 /* UNEQ and LTGT take four instructions for a comparison with zero,
22739 it'll probably be faster to use a branch here too. */
22740 if (code == UNEQ && HONOR_NANS (compare_mode))
22741 return 0;
22742
22743 /* We're going to try to implement comparisons by performing
22744 a subtract, then comparing against zero. Unfortunately,
22745 Inf - Inf is NaN which is not zero, and so if we don't
22746 know that the operand is finite and the comparison
22747 would treat EQ different to UNORDERED, we can't do it. */
22748 if (HONOR_INFINITIES (compare_mode)
22749 && code != GT && code != UNGE
22750 && (!CONST_DOUBLE_P (op1)
22751 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22752 /* Constructs of the form (a OP b ? a : b) are safe. */
22753 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22754 || (! rtx_equal_p (op0, true_cond)
22755 && ! rtx_equal_p (op1, true_cond))))
22756 return 0;
22757
22758 /* At this point we know we can use fsel. */
22759
22760 /* Reduce the comparison to a comparison against zero. */
22761 if (! is_against_zero)
22762 {
22763 temp = gen_reg_rtx (compare_mode);
22764 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22765 op0 = temp;
22766 op1 = CONST0_RTX (compare_mode);
22767 }
22768
22769 /* If we don't care about NaNs we can reduce some of the comparisons
22770 down to faster ones. */
22771 if (! HONOR_NANS (compare_mode))
22772 switch (code)
22773 {
22774 case GT:
22775 code = LE;
22776 temp = true_cond;
22777 true_cond = false_cond;
22778 false_cond = temp;
22779 break;
22780 case UNGE:
22781 code = GE;
22782 break;
22783 case UNEQ:
22784 code = EQ;
22785 break;
22786 default:
22787 break;
22788 }
22789
22790 /* Now, reduce everything down to a GE. */
22791 switch (code)
22792 {
22793 case GE:
22794 break;
22795
22796 case LE:
22797 temp = gen_reg_rtx (compare_mode);
22798 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22799 op0 = temp;
22800 break;
22801
22802 case ORDERED:
22803 temp = gen_reg_rtx (compare_mode);
22804 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
22805 op0 = temp;
22806 break;
22807
22808 case EQ:
22809 temp = gen_reg_rtx (compare_mode);
22810 emit_insn (gen_rtx_SET (temp,
22811 gen_rtx_NEG (compare_mode,
22812 gen_rtx_ABS (compare_mode, op0))));
22813 op0 = temp;
22814 break;
22815
22816 case UNGE:
22817 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22818 temp = gen_reg_rtx (result_mode);
22819 emit_insn (gen_rtx_SET (temp,
22820 gen_rtx_IF_THEN_ELSE (result_mode,
22821 gen_rtx_GE (VOIDmode,
22822 op0, op1),
22823 true_cond, false_cond)));
22824 false_cond = true_cond;
22825 true_cond = temp;
22826
22827 temp = gen_reg_rtx (compare_mode);
22828 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22829 op0 = temp;
22830 break;
22831
22832 case GT:
22833 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22834 temp = gen_reg_rtx (result_mode);
22835 emit_insn (gen_rtx_SET (temp,
22836 gen_rtx_IF_THEN_ELSE (result_mode,
22837 gen_rtx_GE (VOIDmode,
22838 op0, op1),
22839 true_cond, false_cond)));
22840 true_cond = false_cond;
22841 false_cond = temp;
22842
22843 temp = gen_reg_rtx (compare_mode);
22844 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22845 op0 = temp;
22846 break;
22847
22848 default:
22849 gcc_unreachable ();
22850 }
22851
22852 emit_insn (gen_rtx_SET (dest,
22853 gen_rtx_IF_THEN_ELSE (result_mode,
22854 gen_rtx_GE (VOIDmode,
22855 op0, op1),
22856 true_cond, false_cond)));
22857 return 1;
22858 }
22859
22860 /* Same as above, but for ints (isel). */
22861
22862 int
22863 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22864 {
22865 rtx condition_rtx, cr;
22866 machine_mode mode = GET_MODE (dest);
22867 enum rtx_code cond_code;
22868 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
22869 bool signedp;
22870
22871 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
22872 return 0;
22873
22874 /* We still have to do the compare, because isel doesn't do a
22875 compare, it just looks at the CRx bits set by a previous compare
22876 instruction. */
22877 condition_rtx = rs6000_generate_compare (op, mode);
22878 cond_code = GET_CODE (condition_rtx);
22879 cr = XEXP (condition_rtx, 0);
22880 signedp = GET_MODE (cr) == CCmode;
22881
22882 isel_func = (mode == SImode
22883 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
22884 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
22885
22886 switch (cond_code)
22887 {
22888 case LT: case GT: case LTU: case GTU: case EQ:
22889 /* isel handles these directly. */
22890 break;
22891
22892 default:
22893 /* We need to swap the sense of the comparison. */
22894 {
22895 std::swap (false_cond, true_cond);
22896 PUT_CODE (condition_rtx, reverse_condition (cond_code));
22897 }
22898 break;
22899 }
22900
22901 false_cond = force_reg (mode, false_cond);
22902 if (true_cond != const0_rtx)
22903 true_cond = force_reg (mode, true_cond);
22904
22905 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
22906
22907 return 1;
22908 }
22909
22910 void
22911 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
22912 {
22913 machine_mode mode = GET_MODE (op0);
22914 enum rtx_code c;
22915 rtx target;
22916
22917 /* VSX/altivec have direct min/max insns. */
22918 if ((code == SMAX || code == SMIN)
22919 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
22920 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
22921 {
22922 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
22923 return;
22924 }
22925
22926 if (code == SMAX || code == SMIN)
22927 c = GE;
22928 else
22929 c = GEU;
22930
22931 if (code == SMAX || code == UMAX)
22932 target = emit_conditional_move (dest, c, op0, op1, mode,
22933 op0, op1, mode, 0);
22934 else
22935 target = emit_conditional_move (dest, c, op0, op1, mode,
22936 op1, op0, mode, 0);
22937 gcc_assert (target);
22938 if (target != dest)
22939 emit_move_insn (dest, target);
22940 }
22941
22942 /* A subroutine of the atomic operation splitters. Jump to LABEL if
22943 COND is true. Mark the jump as unlikely to be taken. */
22944
22945 static void
22946 emit_unlikely_jump (rtx cond, rtx label)
22947 {
22948 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
22949 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
22950 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
22951 }
22952
22953 /* A subroutine of the atomic operation splitters. Emit a load-locked
22954 instruction in MODE. For QI/HImode, possibly use a pattern than includes
22955 the zero_extend operation. */
22956
22957 static void
22958 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
22959 {
22960 rtx (*fn) (rtx, rtx) = NULL;
22961
22962 switch (mode)
22963 {
22964 case E_QImode:
22965 fn = gen_load_lockedqi;
22966 break;
22967 case E_HImode:
22968 fn = gen_load_lockedhi;
22969 break;
22970 case E_SImode:
22971 if (GET_MODE (mem) == QImode)
22972 fn = gen_load_lockedqi_si;
22973 else if (GET_MODE (mem) == HImode)
22974 fn = gen_load_lockedhi_si;
22975 else
22976 fn = gen_load_lockedsi;
22977 break;
22978 case E_DImode:
22979 fn = gen_load_lockeddi;
22980 break;
22981 case E_TImode:
22982 fn = gen_load_lockedti;
22983 break;
22984 default:
22985 gcc_unreachable ();
22986 }
22987 emit_insn (fn (reg, mem));
22988 }
22989
22990 /* A subroutine of the atomic operation splitters. Emit a store-conditional
22991 instruction in MODE. */
22992
22993 static void
22994 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
22995 {
22996 rtx (*fn) (rtx, rtx, rtx) = NULL;
22997
22998 switch (mode)
22999 {
23000 case E_QImode:
23001 fn = gen_store_conditionalqi;
23002 break;
23003 case E_HImode:
23004 fn = gen_store_conditionalhi;
23005 break;
23006 case E_SImode:
23007 fn = gen_store_conditionalsi;
23008 break;
23009 case E_DImode:
23010 fn = gen_store_conditionaldi;
23011 break;
23012 case E_TImode:
23013 fn = gen_store_conditionalti;
23014 break;
23015 default:
23016 gcc_unreachable ();
23017 }
23018
23019 /* Emit sync before stwcx. to address PPC405 Erratum. */
23020 if (PPC405_ERRATUM77)
23021 emit_insn (gen_hwsync ());
23022
23023 emit_insn (fn (res, mem, val));
23024 }
23025
23026 /* Expand barriers before and after a load_locked/store_cond sequence. */
23027
23028 static rtx
23029 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23030 {
23031 rtx addr = XEXP (mem, 0);
23032
23033 if (!legitimate_indirect_address_p (addr, reload_completed)
23034 && !legitimate_indexed_address_p (addr, reload_completed))
23035 {
23036 addr = force_reg (Pmode, addr);
23037 mem = replace_equiv_address_nv (mem, addr);
23038 }
23039
23040 switch (model)
23041 {
23042 case MEMMODEL_RELAXED:
23043 case MEMMODEL_CONSUME:
23044 case MEMMODEL_ACQUIRE:
23045 break;
23046 case MEMMODEL_RELEASE:
23047 case MEMMODEL_ACQ_REL:
23048 emit_insn (gen_lwsync ());
23049 break;
23050 case MEMMODEL_SEQ_CST:
23051 emit_insn (gen_hwsync ());
23052 break;
23053 default:
23054 gcc_unreachable ();
23055 }
23056 return mem;
23057 }
23058
23059 static void
23060 rs6000_post_atomic_barrier (enum memmodel model)
23061 {
23062 switch (model)
23063 {
23064 case MEMMODEL_RELAXED:
23065 case MEMMODEL_CONSUME:
23066 case MEMMODEL_RELEASE:
23067 break;
23068 case MEMMODEL_ACQUIRE:
23069 case MEMMODEL_ACQ_REL:
23070 case MEMMODEL_SEQ_CST:
23071 emit_insn (gen_isync ());
23072 break;
23073 default:
23074 gcc_unreachable ();
23075 }
23076 }
23077
23078 /* A subroutine of the various atomic expanders. For sub-word operations,
23079 we must adjust things to operate on SImode. Given the original MEM,
23080 return a new aligned memory. Also build and return the quantities by
23081 which to shift and mask. */
23082
23083 static rtx
23084 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23085 {
23086 rtx addr, align, shift, mask, mem;
23087 HOST_WIDE_INT shift_mask;
23088 machine_mode mode = GET_MODE (orig_mem);
23089
23090 /* For smaller modes, we have to implement this via SImode. */
23091 shift_mask = (mode == QImode ? 0x18 : 0x10);
23092
23093 addr = XEXP (orig_mem, 0);
23094 addr = force_reg (GET_MODE (addr), addr);
23095
23096 /* Aligned memory containing subword. Generate a new memory. We
23097 do not want any of the existing MEM_ATTR data, as we're now
23098 accessing memory outside the original object. */
23099 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23100 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23101 mem = gen_rtx_MEM (SImode, align);
23102 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23103 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23104 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23105
23106 /* Shift amount for subword relative to aligned word. */
23107 shift = gen_reg_rtx (SImode);
23108 addr = gen_lowpart (SImode, addr);
23109 rtx tmp = gen_reg_rtx (SImode);
23110 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23111 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23112 if (BYTES_BIG_ENDIAN)
23113 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23114 shift, 1, OPTAB_LIB_WIDEN);
23115 *pshift = shift;
23116
23117 /* Mask for insertion. */
23118 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23119 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23120 *pmask = mask;
23121
23122 return mem;
23123 }
23124
23125 /* A subroutine of the various atomic expanders. For sub-word operands,
23126 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23127
23128 static rtx
23129 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23130 {
23131 rtx x;
23132
23133 x = gen_reg_rtx (SImode);
23134 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23135 gen_rtx_NOT (SImode, mask),
23136 oldval)));
23137
23138 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23139
23140 return x;
23141 }
23142
23143 /* A subroutine of the various atomic expanders. For sub-word operands,
23144 extract WIDE to NARROW via SHIFT. */
23145
23146 static void
23147 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23148 {
23149 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23150 wide, 1, OPTAB_LIB_WIDEN);
23151 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23152 }
23153
23154 /* Expand an atomic compare and swap operation. */
23155
23156 void
23157 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23158 {
23159 rtx boolval, retval, mem, oldval, newval, cond;
23160 rtx label1, label2, x, mask, shift;
23161 machine_mode mode, orig_mode;
23162 enum memmodel mod_s, mod_f;
23163 bool is_weak;
23164
23165 boolval = operands[0];
23166 retval = operands[1];
23167 mem = operands[2];
23168 oldval = operands[3];
23169 newval = operands[4];
23170 is_weak = (INTVAL (operands[5]) != 0);
23171 mod_s = memmodel_base (INTVAL (operands[6]));
23172 mod_f = memmodel_base (INTVAL (operands[7]));
23173 orig_mode = mode = GET_MODE (mem);
23174
23175 mask = shift = NULL_RTX;
23176 if (mode == QImode || mode == HImode)
23177 {
23178 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23179 lwarx and shift/mask operations. With power8, we need to do the
23180 comparison in SImode, but the store is still done in QI/HImode. */
23181 oldval = convert_modes (SImode, mode, oldval, 1);
23182
23183 if (!TARGET_SYNC_HI_QI)
23184 {
23185 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23186
23187 /* Shift and mask OLDVAL into position with the word. */
23188 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23189 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23190
23191 /* Shift and mask NEWVAL into position within the word. */
23192 newval = convert_modes (SImode, mode, newval, 1);
23193 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23194 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23195 }
23196
23197 /* Prepare to adjust the return value. */
23198 retval = gen_reg_rtx (SImode);
23199 mode = SImode;
23200 }
23201 else if (reg_overlap_mentioned_p (retval, oldval))
23202 oldval = copy_to_reg (oldval);
23203
23204 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23205 oldval = copy_to_mode_reg (mode, oldval);
23206
23207 if (reg_overlap_mentioned_p (retval, newval))
23208 newval = copy_to_reg (newval);
23209
23210 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23211
23212 label1 = NULL_RTX;
23213 if (!is_weak)
23214 {
23215 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23216 emit_label (XEXP (label1, 0));
23217 }
23218 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23219
23220 emit_load_locked (mode, retval, mem);
23221
23222 x = retval;
23223 if (mask)
23224 x = expand_simple_binop (SImode, AND, retval, mask,
23225 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23226
23227 cond = gen_reg_rtx (CCmode);
23228 /* If we have TImode, synthesize a comparison. */
23229 if (mode != TImode)
23230 x = gen_rtx_COMPARE (CCmode, x, oldval);
23231 else
23232 {
23233 rtx xor1_result = gen_reg_rtx (DImode);
23234 rtx xor2_result = gen_reg_rtx (DImode);
23235 rtx or_result = gen_reg_rtx (DImode);
23236 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23237 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23238 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23239 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23240
23241 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23242 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23243 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23244 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23245 }
23246
23247 emit_insn (gen_rtx_SET (cond, x));
23248
23249 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23250 emit_unlikely_jump (x, label2);
23251
23252 x = newval;
23253 if (mask)
23254 x = rs6000_mask_atomic_subword (retval, newval, mask);
23255
23256 emit_store_conditional (orig_mode, cond, mem, x);
23257
23258 if (!is_weak)
23259 {
23260 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23261 emit_unlikely_jump (x, label1);
23262 }
23263
23264 if (!is_mm_relaxed (mod_f))
23265 emit_label (XEXP (label2, 0));
23266
23267 rs6000_post_atomic_barrier (mod_s);
23268
23269 if (is_mm_relaxed (mod_f))
23270 emit_label (XEXP (label2, 0));
23271
23272 if (shift)
23273 rs6000_finish_atomic_subword (operands[1], retval, shift);
23274 else if (mode != GET_MODE (operands[1]))
23275 convert_move (operands[1], retval, 1);
23276
23277 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23278 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23279 emit_insn (gen_rtx_SET (boolval, x));
23280 }
23281
23282 /* Expand an atomic exchange operation. */
23283
23284 void
23285 rs6000_expand_atomic_exchange (rtx operands[])
23286 {
23287 rtx retval, mem, val, cond;
23288 machine_mode mode;
23289 enum memmodel model;
23290 rtx label, x, mask, shift;
23291
23292 retval = operands[0];
23293 mem = operands[1];
23294 val = operands[2];
23295 model = memmodel_base (INTVAL (operands[3]));
23296 mode = GET_MODE (mem);
23297
23298 mask = shift = NULL_RTX;
23299 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23300 {
23301 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23302
23303 /* Shift and mask VAL into position with the word. */
23304 val = convert_modes (SImode, mode, val, 1);
23305 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23306 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23307
23308 /* Prepare to adjust the return value. */
23309 retval = gen_reg_rtx (SImode);
23310 mode = SImode;
23311 }
23312
23313 mem = rs6000_pre_atomic_barrier (mem, model);
23314
23315 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23316 emit_label (XEXP (label, 0));
23317
23318 emit_load_locked (mode, retval, mem);
23319
23320 x = val;
23321 if (mask)
23322 x = rs6000_mask_atomic_subword (retval, val, mask);
23323
23324 cond = gen_reg_rtx (CCmode);
23325 emit_store_conditional (mode, cond, mem, x);
23326
23327 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23328 emit_unlikely_jump (x, label);
23329
23330 rs6000_post_atomic_barrier (model);
23331
23332 if (shift)
23333 rs6000_finish_atomic_subword (operands[0], retval, shift);
23334 }
23335
23336 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23337 to perform. MEM is the memory on which to operate. VAL is the second
23338 operand of the binary operator. BEFORE and AFTER are optional locations to
23339 return the value of MEM either before of after the operation. MODEL_RTX
23340 is a CONST_INT containing the memory model to use. */
23341
23342 void
23343 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23344 rtx orig_before, rtx orig_after, rtx model_rtx)
23345 {
23346 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23347 machine_mode mode = GET_MODE (mem);
23348 machine_mode store_mode = mode;
23349 rtx label, x, cond, mask, shift;
23350 rtx before = orig_before, after = orig_after;
23351
23352 mask = shift = NULL_RTX;
23353 /* On power8, we want to use SImode for the operation. On previous systems,
23354 use the operation in a subword and shift/mask to get the proper byte or
23355 halfword. */
23356 if (mode == QImode || mode == HImode)
23357 {
23358 if (TARGET_SYNC_HI_QI)
23359 {
23360 val = convert_modes (SImode, mode, val, 1);
23361
23362 /* Prepare to adjust the return value. */
23363 before = gen_reg_rtx (SImode);
23364 if (after)
23365 after = gen_reg_rtx (SImode);
23366 mode = SImode;
23367 }
23368 else
23369 {
23370 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23371
23372 /* Shift and mask VAL into position with the word. */
23373 val = convert_modes (SImode, mode, val, 1);
23374 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23375 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23376
23377 switch (code)
23378 {
23379 case IOR:
23380 case XOR:
23381 /* We've already zero-extended VAL. That is sufficient to
23382 make certain that it does not affect other bits. */
23383 mask = NULL;
23384 break;
23385
23386 case AND:
23387 /* If we make certain that all of the other bits in VAL are
23388 set, that will be sufficient to not affect other bits. */
23389 x = gen_rtx_NOT (SImode, mask);
23390 x = gen_rtx_IOR (SImode, x, val);
23391 emit_insn (gen_rtx_SET (val, x));
23392 mask = NULL;
23393 break;
23394
23395 case NOT:
23396 case PLUS:
23397 case MINUS:
23398 /* These will all affect bits outside the field and need
23399 adjustment via MASK within the loop. */
23400 break;
23401
23402 default:
23403 gcc_unreachable ();
23404 }
23405
23406 /* Prepare to adjust the return value. */
23407 before = gen_reg_rtx (SImode);
23408 if (after)
23409 after = gen_reg_rtx (SImode);
23410 store_mode = mode = SImode;
23411 }
23412 }
23413
23414 mem = rs6000_pre_atomic_barrier (mem, model);
23415
23416 label = gen_label_rtx ();
23417 emit_label (label);
23418 label = gen_rtx_LABEL_REF (VOIDmode, label);
23419
23420 if (before == NULL_RTX)
23421 before = gen_reg_rtx (mode);
23422
23423 emit_load_locked (mode, before, mem);
23424
23425 if (code == NOT)
23426 {
23427 x = expand_simple_binop (mode, AND, before, val,
23428 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23429 after = expand_simple_unop (mode, NOT, x, after, 1);
23430 }
23431 else
23432 {
23433 after = expand_simple_binop (mode, code, before, val,
23434 after, 1, OPTAB_LIB_WIDEN);
23435 }
23436
23437 x = after;
23438 if (mask)
23439 {
23440 x = expand_simple_binop (SImode, AND, after, mask,
23441 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23442 x = rs6000_mask_atomic_subword (before, x, mask);
23443 }
23444 else if (store_mode != mode)
23445 x = convert_modes (store_mode, mode, x, 1);
23446
23447 cond = gen_reg_rtx (CCmode);
23448 emit_store_conditional (store_mode, cond, mem, x);
23449
23450 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23451 emit_unlikely_jump (x, label);
23452
23453 rs6000_post_atomic_barrier (model);
23454
23455 if (shift)
23456 {
23457 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23458 then do the calcuations in a SImode register. */
23459 if (orig_before)
23460 rs6000_finish_atomic_subword (orig_before, before, shift);
23461 if (orig_after)
23462 rs6000_finish_atomic_subword (orig_after, after, shift);
23463 }
23464 else if (store_mode != mode)
23465 {
23466 /* QImode/HImode on machines with lbarx/lharx where we do the native
23467 operation and then do the calcuations in a SImode register. */
23468 if (orig_before)
23469 convert_move (orig_before, before, 1);
23470 if (orig_after)
23471 convert_move (orig_after, after, 1);
23472 }
23473 else if (orig_after && after != orig_after)
23474 emit_move_insn (orig_after, after);
23475 }
23476
23477 /* Emit instructions to move SRC to DST. Called by splitters for
23478 multi-register moves. It will emit at most one instruction for
23479 each register that is accessed; that is, it won't emit li/lis pairs
23480 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23481 register. */
23482
23483 void
23484 rs6000_split_multireg_move (rtx dst, rtx src)
23485 {
23486 /* The register number of the first register being moved. */
23487 int reg;
23488 /* The mode that is to be moved. */
23489 machine_mode mode;
23490 /* The mode that the move is being done in, and its size. */
23491 machine_mode reg_mode;
23492 int reg_mode_size;
23493 /* The number of registers that will be moved. */
23494 int nregs;
23495
23496 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23497 mode = GET_MODE (dst);
23498 nregs = hard_regno_nregs (reg, mode);
23499 if (FP_REGNO_P (reg))
23500 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23501 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23502 else if (ALTIVEC_REGNO_P (reg))
23503 reg_mode = V16QImode;
23504 else
23505 reg_mode = word_mode;
23506 reg_mode_size = GET_MODE_SIZE (reg_mode);
23507
23508 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23509
23510 /* TDmode residing in FP registers is special, since the ISA requires that
23511 the lower-numbered word of a register pair is always the most significant
23512 word, even in little-endian mode. This does not match the usual subreg
23513 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23514 the appropriate constituent registers "by hand" in little-endian mode.
23515
23516 Note we do not need to check for destructive overlap here since TDmode
23517 can only reside in even/odd register pairs. */
23518 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23519 {
23520 rtx p_src, p_dst;
23521 int i;
23522
23523 for (i = 0; i < nregs; i++)
23524 {
23525 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23526 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23527 else
23528 p_src = simplify_gen_subreg (reg_mode, src, mode,
23529 i * reg_mode_size);
23530
23531 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23532 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23533 else
23534 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23535 i * reg_mode_size);
23536
23537 emit_insn (gen_rtx_SET (p_dst, p_src));
23538 }
23539
23540 return;
23541 }
23542
23543 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23544 {
23545 /* Move register range backwards, if we might have destructive
23546 overlap. */
23547 int i;
23548 for (i = nregs - 1; i >= 0; i--)
23549 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23550 i * reg_mode_size),
23551 simplify_gen_subreg (reg_mode, src, mode,
23552 i * reg_mode_size)));
23553 }
23554 else
23555 {
23556 int i;
23557 int j = -1;
23558 bool used_update = false;
23559 rtx restore_basereg = NULL_RTX;
23560
23561 if (MEM_P (src) && INT_REGNO_P (reg))
23562 {
23563 rtx breg;
23564
23565 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23566 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23567 {
23568 rtx delta_rtx;
23569 breg = XEXP (XEXP (src, 0), 0);
23570 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23571 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23572 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23573 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23574 src = replace_equiv_address (src, breg);
23575 }
23576 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23577 {
23578 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23579 {
23580 rtx basereg = XEXP (XEXP (src, 0), 0);
23581 if (TARGET_UPDATE)
23582 {
23583 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23584 emit_insn (gen_rtx_SET (ndst,
23585 gen_rtx_MEM (reg_mode,
23586 XEXP (src, 0))));
23587 used_update = true;
23588 }
23589 else
23590 emit_insn (gen_rtx_SET (basereg,
23591 XEXP (XEXP (src, 0), 1)));
23592 src = replace_equiv_address (src, basereg);
23593 }
23594 else
23595 {
23596 rtx basereg = gen_rtx_REG (Pmode, reg);
23597 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23598 src = replace_equiv_address (src, basereg);
23599 }
23600 }
23601
23602 breg = XEXP (src, 0);
23603 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23604 breg = XEXP (breg, 0);
23605
23606 /* If the base register we are using to address memory is
23607 also a destination reg, then change that register last. */
23608 if (REG_P (breg)
23609 && REGNO (breg) >= REGNO (dst)
23610 && REGNO (breg) < REGNO (dst) + nregs)
23611 j = REGNO (breg) - REGNO (dst);
23612 }
23613 else if (MEM_P (dst) && INT_REGNO_P (reg))
23614 {
23615 rtx breg;
23616
23617 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23618 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23619 {
23620 rtx delta_rtx;
23621 breg = XEXP (XEXP (dst, 0), 0);
23622 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23623 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23624 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23625
23626 /* We have to update the breg before doing the store.
23627 Use store with update, if available. */
23628
23629 if (TARGET_UPDATE)
23630 {
23631 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23632 emit_insn (TARGET_32BIT
23633 ? (TARGET_POWERPC64
23634 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23635 : gen_movsi_si_update (breg, breg, delta_rtx, nsrc))
23636 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23637 used_update = true;
23638 }
23639 else
23640 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23641 dst = replace_equiv_address (dst, breg);
23642 }
23643 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23644 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23645 {
23646 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23647 {
23648 rtx basereg = XEXP (XEXP (dst, 0), 0);
23649 if (TARGET_UPDATE)
23650 {
23651 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23652 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23653 XEXP (dst, 0)),
23654 nsrc));
23655 used_update = true;
23656 }
23657 else
23658 emit_insn (gen_rtx_SET (basereg,
23659 XEXP (XEXP (dst, 0), 1)));
23660 dst = replace_equiv_address (dst, basereg);
23661 }
23662 else
23663 {
23664 rtx basereg = XEXP (XEXP (dst, 0), 0);
23665 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23666 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23667 && REG_P (basereg)
23668 && REG_P (offsetreg)
23669 && REGNO (basereg) != REGNO (offsetreg));
23670 if (REGNO (basereg) == 0)
23671 {
23672 rtx tmp = offsetreg;
23673 offsetreg = basereg;
23674 basereg = tmp;
23675 }
23676 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23677 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23678 dst = replace_equiv_address (dst, basereg);
23679 }
23680 }
23681 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23682 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23683 }
23684
23685 for (i = 0; i < nregs; i++)
23686 {
23687 /* Calculate index to next subword. */
23688 ++j;
23689 if (j == nregs)
23690 j = 0;
23691
23692 /* If compiler already emitted move of first word by
23693 store with update, no need to do anything. */
23694 if (j == 0 && used_update)
23695 continue;
23696
23697 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23698 j * reg_mode_size),
23699 simplify_gen_subreg (reg_mode, src, mode,
23700 j * reg_mode_size)));
23701 }
23702 if (restore_basereg != NULL_RTX)
23703 emit_insn (restore_basereg);
23704 }
23705 }
23706
23707 \f
23708 /* This page contains routines that are used to determine what the
23709 function prologue and epilogue code will do and write them out. */
23710
23711 /* Determine whether the REG is really used. */
23712
23713 static bool
23714 save_reg_p (int reg)
23715 {
23716 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23717 {
23718 /* When calling eh_return, we must return true for all the cases
23719 where conditional_register_usage marks the PIC offset reg
23720 call used or fixed. */
23721 if (crtl->calls_eh_return
23722 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
23723 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
23724 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
23725 return true;
23726
23727 /* We need to mark the PIC offset register live for the same
23728 conditions as it is set up in rs6000_emit_prologue, or
23729 otherwise it won't be saved before we clobber it. */
23730 if (TARGET_TOC && TARGET_MINIMAL_TOC
23731 && !constant_pool_empty_p ())
23732 return true;
23733
23734 if (DEFAULT_ABI == ABI_V4
23735 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
23736 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
23737 return true;
23738
23739 if (DEFAULT_ABI == ABI_DARWIN
23740 && flag_pic && crtl->uses_pic_offset_table)
23741 return true;
23742 }
23743
23744 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
23745 }
23746
23747 /* Return the first fixed-point register that is required to be
23748 saved. 32 if none. */
23749
23750 int
23751 first_reg_to_save (void)
23752 {
23753 int first_reg;
23754
23755 /* Find lowest numbered live register. */
23756 for (first_reg = 13; first_reg <= 31; first_reg++)
23757 if (save_reg_p (first_reg))
23758 break;
23759
23760 return first_reg;
23761 }
23762
23763 /* Similar, for FP regs. */
23764
23765 int
23766 first_fp_reg_to_save (void)
23767 {
23768 int first_reg;
23769
23770 /* Find lowest numbered live register. */
23771 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
23772 if (save_reg_p (first_reg))
23773 break;
23774
23775 return first_reg;
23776 }
23777
23778 /* Similar, for AltiVec regs. */
23779
23780 static int
23781 first_altivec_reg_to_save (void)
23782 {
23783 int i;
23784
23785 /* Stack frame remains as is unless we are in AltiVec ABI. */
23786 if (! TARGET_ALTIVEC_ABI)
23787 return LAST_ALTIVEC_REGNO + 1;
23788
23789 /* On Darwin, the unwind routines are compiled without
23790 TARGET_ALTIVEC, and use save_world to save/restore the
23791 altivec registers when necessary. */
23792 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23793 && ! TARGET_ALTIVEC)
23794 return FIRST_ALTIVEC_REGNO + 20;
23795
23796 /* Find lowest numbered live register. */
23797 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
23798 if (save_reg_p (i))
23799 break;
23800
23801 return i;
23802 }
23803
23804 /* Return a 32-bit mask of the AltiVec registers we need to set in
23805 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
23806 the 32-bit word is 0. */
23807
23808 static unsigned int
23809 compute_vrsave_mask (void)
23810 {
23811 unsigned int i, mask = 0;
23812
23813 /* On Darwin, the unwind routines are compiled without
23814 TARGET_ALTIVEC, and use save_world to save/restore the
23815 call-saved altivec registers when necessary. */
23816 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23817 && ! TARGET_ALTIVEC)
23818 mask |= 0xFFF;
23819
23820 /* First, find out if we use _any_ altivec registers. */
23821 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
23822 if (df_regs_ever_live_p (i))
23823 mask |= ALTIVEC_REG_BIT (i);
23824
23825 if (mask == 0)
23826 return mask;
23827
23828 /* Next, remove the argument registers from the set. These must
23829 be in the VRSAVE mask set by the caller, so we don't need to add
23830 them in again. More importantly, the mask we compute here is
23831 used to generate CLOBBERs in the set_vrsave insn, and we do not
23832 wish the argument registers to die. */
23833 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
23834 mask &= ~ALTIVEC_REG_BIT (i);
23835
23836 /* Similarly, remove the return value from the set. */
23837 {
23838 bool yes = false;
23839 diddle_return_value (is_altivec_return_reg, &yes);
23840 if (yes)
23841 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
23842 }
23843
23844 return mask;
23845 }
23846
23847 /* For a very restricted set of circumstances, we can cut down the
23848 size of prologues/epilogues by calling our own save/restore-the-world
23849 routines. */
23850
23851 static void
23852 compute_save_world_info (rs6000_stack_t *info)
23853 {
23854 info->world_save_p = 1;
23855 info->world_save_p
23856 = (WORLD_SAVE_P (info)
23857 && DEFAULT_ABI == ABI_DARWIN
23858 && !cfun->has_nonlocal_label
23859 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
23860 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
23861 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
23862 && info->cr_save_p);
23863
23864 /* This will not work in conjunction with sibcalls. Make sure there
23865 are none. (This check is expensive, but seldom executed.) */
23866 if (WORLD_SAVE_P (info))
23867 {
23868 rtx_insn *insn;
23869 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
23870 if (CALL_P (insn) && SIBLING_CALL_P (insn))
23871 {
23872 info->world_save_p = 0;
23873 break;
23874 }
23875 }
23876
23877 if (WORLD_SAVE_P (info))
23878 {
23879 /* Even if we're not touching VRsave, make sure there's room on the
23880 stack for it, if it looks like we're calling SAVE_WORLD, which
23881 will attempt to save it. */
23882 info->vrsave_size = 4;
23883
23884 /* If we are going to save the world, we need to save the link register too. */
23885 info->lr_save_p = 1;
23886
23887 /* "Save" the VRsave register too if we're saving the world. */
23888 if (info->vrsave_mask == 0)
23889 info->vrsave_mask = compute_vrsave_mask ();
23890
23891 /* Because the Darwin register save/restore routines only handle
23892 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
23893 check. */
23894 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
23895 && (info->first_altivec_reg_save
23896 >= FIRST_SAVED_ALTIVEC_REGNO));
23897 }
23898
23899 return;
23900 }
23901
23902
23903 static void
23904 is_altivec_return_reg (rtx reg, void *xyes)
23905 {
23906 bool *yes = (bool *) xyes;
23907 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
23908 *yes = true;
23909 }
23910
23911 \f
23912 /* Return whether REG is a global user reg or has been specifed by
23913 -ffixed-REG. We should not restore these, and so cannot use
23914 lmw or out-of-line restore functions if there are any. We also
23915 can't save them (well, emit frame notes for them), because frame
23916 unwinding during exception handling will restore saved registers. */
23917
23918 static bool
23919 fixed_reg_p (int reg)
23920 {
23921 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
23922 backend sets it, overriding anything the user might have given. */
23923 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
23924 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
23925 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
23926 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
23927 return false;
23928
23929 return fixed_regs[reg];
23930 }
23931
23932 /* Determine the strategy for savings/restoring registers. */
23933
23934 enum {
23935 SAVE_MULTIPLE = 0x1,
23936 SAVE_INLINE_GPRS = 0x2,
23937 SAVE_INLINE_FPRS = 0x4,
23938 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
23939 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
23940 SAVE_INLINE_VRS = 0x20,
23941 REST_MULTIPLE = 0x100,
23942 REST_INLINE_GPRS = 0x200,
23943 REST_INLINE_FPRS = 0x400,
23944 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
23945 REST_INLINE_VRS = 0x1000
23946 };
23947
23948 static int
23949 rs6000_savres_strategy (rs6000_stack_t *info,
23950 bool using_static_chain_p)
23951 {
23952 int strategy = 0;
23953
23954 /* Select between in-line and out-of-line save and restore of regs.
23955 First, all the obvious cases where we don't use out-of-line. */
23956 if (crtl->calls_eh_return
23957 || cfun->machine->ra_need_lr)
23958 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
23959 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
23960 | SAVE_INLINE_VRS | REST_INLINE_VRS);
23961
23962 if (info->first_gp_reg_save == 32)
23963 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23964
23965 if (info->first_fp_reg_save == 64)
23966 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23967
23968 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
23969 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23970
23971 /* Define cutoff for using out-of-line functions to save registers. */
23972 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
23973 {
23974 if (!optimize_size)
23975 {
23976 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23977 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23978 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23979 }
23980 else
23981 {
23982 /* Prefer out-of-line restore if it will exit. */
23983 if (info->first_fp_reg_save > 61)
23984 strategy |= SAVE_INLINE_FPRS;
23985 if (info->first_gp_reg_save > 29)
23986 {
23987 if (info->first_fp_reg_save == 64)
23988 strategy |= SAVE_INLINE_GPRS;
23989 else
23990 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23991 }
23992 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
23993 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23994 }
23995 }
23996 else if (DEFAULT_ABI == ABI_DARWIN)
23997 {
23998 if (info->first_fp_reg_save > 60)
23999 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24000 if (info->first_gp_reg_save > 29)
24001 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24002 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24003 }
24004 else
24005 {
24006 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24007 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24008 || info->first_fp_reg_save > 61)
24009 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24010 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24011 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24012 }
24013
24014 /* Don't bother to try to save things out-of-line if r11 is occupied
24015 by the static chain. It would require too much fiddling and the
24016 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24017 pointer on Darwin, and AIX uses r1 or r12. */
24018 if (using_static_chain_p
24019 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24020 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24021 | SAVE_INLINE_GPRS
24022 | SAVE_INLINE_VRS);
24023
24024 /* Don't ever restore fixed regs. That means we can't use the
24025 out-of-line register restore functions if a fixed reg is in the
24026 range of regs restored. */
24027 if (!(strategy & REST_INLINE_FPRS))
24028 for (int i = info->first_fp_reg_save; i < 64; i++)
24029 if (fixed_regs[i])
24030 {
24031 strategy |= REST_INLINE_FPRS;
24032 break;
24033 }
24034
24035 /* We can only use the out-of-line routines to restore fprs if we've
24036 saved all the registers from first_fp_reg_save in the prologue.
24037 Otherwise, we risk loading garbage. Of course, if we have saved
24038 out-of-line then we know we haven't skipped any fprs. */
24039 if ((strategy & SAVE_INLINE_FPRS)
24040 && !(strategy & REST_INLINE_FPRS))
24041 for (int i = info->first_fp_reg_save; i < 64; i++)
24042 if (!save_reg_p (i))
24043 {
24044 strategy |= REST_INLINE_FPRS;
24045 break;
24046 }
24047
24048 /* Similarly, for altivec regs. */
24049 if (!(strategy & REST_INLINE_VRS))
24050 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24051 if (fixed_regs[i])
24052 {
24053 strategy |= REST_INLINE_VRS;
24054 break;
24055 }
24056
24057 if ((strategy & SAVE_INLINE_VRS)
24058 && !(strategy & REST_INLINE_VRS))
24059 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24060 if (!save_reg_p (i))
24061 {
24062 strategy |= REST_INLINE_VRS;
24063 break;
24064 }
24065
24066 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24067 saved is an out-of-line save or restore. Set up the value for
24068 the next test (excluding out-of-line gprs). */
24069 bool lr_save_p = (info->lr_save_p
24070 || !(strategy & SAVE_INLINE_FPRS)
24071 || !(strategy & SAVE_INLINE_VRS)
24072 || !(strategy & REST_INLINE_FPRS)
24073 || !(strategy & REST_INLINE_VRS));
24074
24075 if (TARGET_MULTIPLE
24076 && !TARGET_POWERPC64
24077 && info->first_gp_reg_save < 31
24078 && !(flag_shrink_wrap
24079 && flag_shrink_wrap_separate
24080 && optimize_function_for_speed_p (cfun)))
24081 {
24082 int count = 0;
24083 for (int i = info->first_gp_reg_save; i < 32; i++)
24084 if (save_reg_p (i))
24085 count++;
24086
24087 if (count <= 1)
24088 /* Don't use store multiple if only one reg needs to be
24089 saved. This can occur for example when the ABI_V4 pic reg
24090 (r30) needs to be saved to make calls, but r31 is not
24091 used. */
24092 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24093 else
24094 {
24095 /* Prefer store multiple for saves over out-of-line
24096 routines, since the store-multiple instruction will
24097 always be smaller. */
24098 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24099
24100 /* The situation is more complicated with load multiple.
24101 We'd prefer to use the out-of-line routines for restores,
24102 since the "exit" out-of-line routines can handle the
24103 restore of LR and the frame teardown. However if doesn't
24104 make sense to use the out-of-line routine if that is the
24105 only reason we'd need to save LR, and we can't use the
24106 "exit" out-of-line gpr restore if we have saved some
24107 fprs; In those cases it is advantageous to use load
24108 multiple when available. */
24109 if (info->first_fp_reg_save != 64 || !lr_save_p)
24110 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24111 }
24112 }
24113
24114 /* Using the "exit" out-of-line routine does not improve code size
24115 if using it would require lr to be saved and if only saving one
24116 or two gprs. */
24117 else if (!lr_save_p && info->first_gp_reg_save > 29)
24118 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24119
24120 /* Don't ever restore fixed regs. */
24121 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24122 for (int i = info->first_gp_reg_save; i < 32; i++)
24123 if (fixed_reg_p (i))
24124 {
24125 strategy |= REST_INLINE_GPRS;
24126 strategy &= ~REST_MULTIPLE;
24127 break;
24128 }
24129
24130 /* We can only use load multiple or the out-of-line routines to
24131 restore gprs if we've saved all the registers from
24132 first_gp_reg_save. Otherwise, we risk loading garbage.
24133 Of course, if we have saved out-of-line or used stmw then we know
24134 we haven't skipped any gprs. */
24135 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24136 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24137 for (int i = info->first_gp_reg_save; i < 32; i++)
24138 if (!save_reg_p (i))
24139 {
24140 strategy |= REST_INLINE_GPRS;
24141 strategy &= ~REST_MULTIPLE;
24142 break;
24143 }
24144
24145 if (TARGET_ELF && TARGET_64BIT)
24146 {
24147 if (!(strategy & SAVE_INLINE_FPRS))
24148 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24149 else if (!(strategy & SAVE_INLINE_GPRS)
24150 && info->first_fp_reg_save == 64)
24151 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24152 }
24153 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24154 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24155
24156 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24157 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24158
24159 return strategy;
24160 }
24161
24162 /* Calculate the stack information for the current function. This is
24163 complicated by having two separate calling sequences, the AIX calling
24164 sequence and the V.4 calling sequence.
24165
24166 AIX (and Darwin/Mac OS X) stack frames look like:
24167 32-bit 64-bit
24168 SP----> +---------------------------------------+
24169 | back chain to caller | 0 0
24170 +---------------------------------------+
24171 | saved CR | 4 8 (8-11)
24172 +---------------------------------------+
24173 | saved LR | 8 16
24174 +---------------------------------------+
24175 | reserved for compilers | 12 24
24176 +---------------------------------------+
24177 | reserved for binders | 16 32
24178 +---------------------------------------+
24179 | saved TOC pointer | 20 40
24180 +---------------------------------------+
24181 | Parameter save area (+padding*) (P) | 24 48
24182 +---------------------------------------+
24183 | Alloca space (A) | 24+P etc.
24184 +---------------------------------------+
24185 | Local variable space (L) | 24+P+A
24186 +---------------------------------------+
24187 | Float/int conversion temporary (X) | 24+P+A+L
24188 +---------------------------------------+
24189 | Save area for AltiVec registers (W) | 24+P+A+L+X
24190 +---------------------------------------+
24191 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24192 +---------------------------------------+
24193 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24194 +---------------------------------------+
24195 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24196 +---------------------------------------+
24197 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24198 +---------------------------------------+
24199 old SP->| back chain to caller's caller |
24200 +---------------------------------------+
24201
24202 * If the alloca area is present, the parameter save area is
24203 padded so that the former starts 16-byte aligned.
24204
24205 The required alignment for AIX configurations is two words (i.e., 8
24206 or 16 bytes).
24207
24208 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24209
24210 SP----> +---------------------------------------+
24211 | Back chain to caller | 0
24212 +---------------------------------------+
24213 | Save area for CR | 8
24214 +---------------------------------------+
24215 | Saved LR | 16
24216 +---------------------------------------+
24217 | Saved TOC pointer | 24
24218 +---------------------------------------+
24219 | Parameter save area (+padding*) (P) | 32
24220 +---------------------------------------+
24221 | Alloca space (A) | 32+P
24222 +---------------------------------------+
24223 | Local variable space (L) | 32+P+A
24224 +---------------------------------------+
24225 | Save area for AltiVec registers (W) | 32+P+A+L
24226 +---------------------------------------+
24227 | AltiVec alignment padding (Y) | 32+P+A+L+W
24228 +---------------------------------------+
24229 | Save area for GP registers (G) | 32+P+A+L+W+Y
24230 +---------------------------------------+
24231 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24232 +---------------------------------------+
24233 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24234 +---------------------------------------+
24235
24236 * If the alloca area is present, the parameter save area is
24237 padded so that the former starts 16-byte aligned.
24238
24239 V.4 stack frames look like:
24240
24241 SP----> +---------------------------------------+
24242 | back chain to caller | 0
24243 +---------------------------------------+
24244 | caller's saved LR | 4
24245 +---------------------------------------+
24246 | Parameter save area (+padding*) (P) | 8
24247 +---------------------------------------+
24248 | Alloca space (A) | 8+P
24249 +---------------------------------------+
24250 | Varargs save area (V) | 8+P+A
24251 +---------------------------------------+
24252 | Local variable space (L) | 8+P+A+V
24253 +---------------------------------------+
24254 | Float/int conversion temporary (X) | 8+P+A+V+L
24255 +---------------------------------------+
24256 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24257 +---------------------------------------+
24258 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24259 +---------------------------------------+
24260 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24261 +---------------------------------------+
24262 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24263 +---------------------------------------+
24264 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24265 +---------------------------------------+
24266 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24267 +---------------------------------------+
24268 old SP->| back chain to caller's caller |
24269 +---------------------------------------+
24270
24271 * If the alloca area is present and the required alignment is
24272 16 bytes, the parameter save area is padded so that the
24273 alloca area starts 16-byte aligned.
24274
24275 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24276 given. (But note below and in sysv4.h that we require only 8 and
24277 may round up the size of our stack frame anyways. The historical
24278 reason is early versions of powerpc-linux which didn't properly
24279 align the stack at program startup. A happy side-effect is that
24280 -mno-eabi libraries can be used with -meabi programs.)
24281
24282 The EABI configuration defaults to the V.4 layout. However,
24283 the stack alignment requirements may differ. If -mno-eabi is not
24284 given, the required stack alignment is 8 bytes; if -mno-eabi is
24285 given, the required alignment is 16 bytes. (But see V.4 comment
24286 above.) */
24287
24288 #ifndef ABI_STACK_BOUNDARY
24289 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24290 #endif
24291
24292 static rs6000_stack_t *
24293 rs6000_stack_info (void)
24294 {
24295 /* We should never be called for thunks, we are not set up for that. */
24296 gcc_assert (!cfun->is_thunk);
24297
24298 rs6000_stack_t *info = &stack_info;
24299 int reg_size = TARGET_32BIT ? 4 : 8;
24300 int ehrd_size;
24301 int ehcr_size;
24302 int save_align;
24303 int first_gp;
24304 HOST_WIDE_INT non_fixed_size;
24305 bool using_static_chain_p;
24306
24307 if (reload_completed && info->reload_completed)
24308 return info;
24309
24310 memset (info, 0, sizeof (*info));
24311 info->reload_completed = reload_completed;
24312
24313 /* Select which calling sequence. */
24314 info->abi = DEFAULT_ABI;
24315
24316 /* Calculate which registers need to be saved & save area size. */
24317 info->first_gp_reg_save = first_reg_to_save ();
24318 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24319 even if it currently looks like we won't. Reload may need it to
24320 get at a constant; if so, it will have already created a constant
24321 pool entry for it. */
24322 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24323 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24324 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24325 && crtl->uses_const_pool
24326 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24327 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24328 else
24329 first_gp = info->first_gp_reg_save;
24330
24331 info->gp_size = reg_size * (32 - first_gp);
24332
24333 info->first_fp_reg_save = first_fp_reg_to_save ();
24334 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24335
24336 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24337 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24338 - info->first_altivec_reg_save);
24339
24340 /* Does this function call anything? */
24341 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24342
24343 /* Determine if we need to save the condition code registers. */
24344 if (save_reg_p (CR2_REGNO)
24345 || save_reg_p (CR3_REGNO)
24346 || save_reg_p (CR4_REGNO))
24347 {
24348 info->cr_save_p = 1;
24349 if (DEFAULT_ABI == ABI_V4)
24350 info->cr_size = reg_size;
24351 }
24352
24353 /* If the current function calls __builtin_eh_return, then we need
24354 to allocate stack space for registers that will hold data for
24355 the exception handler. */
24356 if (crtl->calls_eh_return)
24357 {
24358 unsigned int i;
24359 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24360 continue;
24361
24362 ehrd_size = i * UNITS_PER_WORD;
24363 }
24364 else
24365 ehrd_size = 0;
24366
24367 /* In the ELFv2 ABI, we also need to allocate space for separate
24368 CR field save areas if the function calls __builtin_eh_return. */
24369 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24370 {
24371 /* This hard-codes that we have three call-saved CR fields. */
24372 ehcr_size = 3 * reg_size;
24373 /* We do *not* use the regular CR save mechanism. */
24374 info->cr_save_p = 0;
24375 }
24376 else
24377 ehcr_size = 0;
24378
24379 /* Determine various sizes. */
24380 info->reg_size = reg_size;
24381 info->fixed_size = RS6000_SAVE_AREA;
24382 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24383 if (cfun->calls_alloca)
24384 info->parm_size =
24385 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24386 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24387 else
24388 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24389 TARGET_ALTIVEC ? 16 : 8);
24390 if (FRAME_GROWS_DOWNWARD)
24391 info->vars_size
24392 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24393 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24394 - (info->fixed_size + info->vars_size + info->parm_size);
24395
24396 if (TARGET_ALTIVEC_ABI)
24397 info->vrsave_mask = compute_vrsave_mask ();
24398
24399 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24400 info->vrsave_size = 4;
24401
24402 compute_save_world_info (info);
24403
24404 /* Calculate the offsets. */
24405 switch (DEFAULT_ABI)
24406 {
24407 case ABI_NONE:
24408 default:
24409 gcc_unreachable ();
24410
24411 case ABI_AIX:
24412 case ABI_ELFv2:
24413 case ABI_DARWIN:
24414 info->fp_save_offset = -info->fp_size;
24415 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24416
24417 if (TARGET_ALTIVEC_ABI)
24418 {
24419 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24420
24421 /* Align stack so vector save area is on a quadword boundary.
24422 The padding goes above the vectors. */
24423 if (info->altivec_size != 0)
24424 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24425
24426 info->altivec_save_offset = info->vrsave_save_offset
24427 - info->altivec_padding_size
24428 - info->altivec_size;
24429 gcc_assert (info->altivec_size == 0
24430 || info->altivec_save_offset % 16 == 0);
24431
24432 /* Adjust for AltiVec case. */
24433 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24434 }
24435 else
24436 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24437
24438 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24439 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24440 info->lr_save_offset = 2*reg_size;
24441 break;
24442
24443 case ABI_V4:
24444 info->fp_save_offset = -info->fp_size;
24445 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24446 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24447
24448 if (TARGET_ALTIVEC_ABI)
24449 {
24450 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24451
24452 /* Align stack so vector save area is on a quadword boundary. */
24453 if (info->altivec_size != 0)
24454 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24455
24456 info->altivec_save_offset = info->vrsave_save_offset
24457 - info->altivec_padding_size
24458 - info->altivec_size;
24459
24460 /* Adjust for AltiVec case. */
24461 info->ehrd_offset = info->altivec_save_offset;
24462 }
24463 else
24464 info->ehrd_offset = info->cr_save_offset;
24465
24466 info->ehrd_offset -= ehrd_size;
24467 info->lr_save_offset = reg_size;
24468 }
24469
24470 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24471 info->save_size = RS6000_ALIGN (info->fp_size
24472 + info->gp_size
24473 + info->altivec_size
24474 + info->altivec_padding_size
24475 + ehrd_size
24476 + ehcr_size
24477 + info->cr_size
24478 + info->vrsave_size,
24479 save_align);
24480
24481 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24482
24483 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24484 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24485
24486 /* Determine if we need to save the link register. */
24487 if (info->calls_p
24488 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24489 && crtl->profile
24490 && !TARGET_PROFILE_KERNEL)
24491 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24492 #ifdef TARGET_RELOCATABLE
24493 || (DEFAULT_ABI == ABI_V4
24494 && (TARGET_RELOCATABLE || flag_pic > 1)
24495 && !constant_pool_empty_p ())
24496 #endif
24497 || rs6000_ra_ever_killed ())
24498 info->lr_save_p = 1;
24499
24500 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24501 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24502 && call_used_regs[STATIC_CHAIN_REGNUM]);
24503 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24504
24505 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24506 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24507 || !(info->savres_strategy & SAVE_INLINE_VRS)
24508 || !(info->savres_strategy & REST_INLINE_GPRS)
24509 || !(info->savres_strategy & REST_INLINE_FPRS)
24510 || !(info->savres_strategy & REST_INLINE_VRS))
24511 info->lr_save_p = 1;
24512
24513 if (info->lr_save_p)
24514 df_set_regs_ever_live (LR_REGNO, true);
24515
24516 /* Determine if we need to allocate any stack frame:
24517
24518 For AIX we need to push the stack if a frame pointer is needed
24519 (because the stack might be dynamically adjusted), if we are
24520 debugging, if we make calls, or if the sum of fp_save, gp_save,
24521 and local variables are more than the space needed to save all
24522 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24523 + 18*8 = 288 (GPR13 reserved).
24524
24525 For V.4 we don't have the stack cushion that AIX uses, but assume
24526 that the debugger can handle stackless frames. */
24527
24528 if (info->calls_p)
24529 info->push_p = 1;
24530
24531 else if (DEFAULT_ABI == ABI_V4)
24532 info->push_p = non_fixed_size != 0;
24533
24534 else if (frame_pointer_needed)
24535 info->push_p = 1;
24536
24537 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24538 info->push_p = 1;
24539
24540 else
24541 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24542
24543 return info;
24544 }
24545
24546 static void
24547 debug_stack_info (rs6000_stack_t *info)
24548 {
24549 const char *abi_string;
24550
24551 if (! info)
24552 info = rs6000_stack_info ();
24553
24554 fprintf (stderr, "\nStack information for function %s:\n",
24555 ((current_function_decl && DECL_NAME (current_function_decl))
24556 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24557 : "<unknown>"));
24558
24559 switch (info->abi)
24560 {
24561 default: abi_string = "Unknown"; break;
24562 case ABI_NONE: abi_string = "NONE"; break;
24563 case ABI_AIX: abi_string = "AIX"; break;
24564 case ABI_ELFv2: abi_string = "ELFv2"; break;
24565 case ABI_DARWIN: abi_string = "Darwin"; break;
24566 case ABI_V4: abi_string = "V.4"; break;
24567 }
24568
24569 fprintf (stderr, "\tABI = %5s\n", abi_string);
24570
24571 if (TARGET_ALTIVEC_ABI)
24572 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24573
24574 if (info->first_gp_reg_save != 32)
24575 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24576
24577 if (info->first_fp_reg_save != 64)
24578 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24579
24580 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24581 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24582 info->first_altivec_reg_save);
24583
24584 if (info->lr_save_p)
24585 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24586
24587 if (info->cr_save_p)
24588 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24589
24590 if (info->vrsave_mask)
24591 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24592
24593 if (info->push_p)
24594 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24595
24596 if (info->calls_p)
24597 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24598
24599 if (info->gp_size)
24600 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24601
24602 if (info->fp_size)
24603 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24604
24605 if (info->altivec_size)
24606 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24607 info->altivec_save_offset);
24608
24609 if (info->vrsave_size)
24610 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24611 info->vrsave_save_offset);
24612
24613 if (info->lr_save_p)
24614 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24615
24616 if (info->cr_save_p)
24617 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24618
24619 if (info->varargs_save_offset)
24620 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24621
24622 if (info->total_size)
24623 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24624 info->total_size);
24625
24626 if (info->vars_size)
24627 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24628 info->vars_size);
24629
24630 if (info->parm_size)
24631 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24632
24633 if (info->fixed_size)
24634 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24635
24636 if (info->gp_size)
24637 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24638
24639 if (info->fp_size)
24640 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24641
24642 if (info->altivec_size)
24643 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24644
24645 if (info->vrsave_size)
24646 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24647
24648 if (info->altivec_padding_size)
24649 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24650 info->altivec_padding_size);
24651
24652 if (info->cr_size)
24653 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24654
24655 if (info->save_size)
24656 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24657
24658 if (info->reg_size != 4)
24659 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24660
24661 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24662
24663 if (info->abi == ABI_DARWIN)
24664 fprintf (stderr, "\tWORLD_SAVE_P = %5d\n", WORLD_SAVE_P(info));
24665
24666 fprintf (stderr, "\n");
24667 }
24668
24669 rtx
24670 rs6000_return_addr (int count, rtx frame)
24671 {
24672 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24673 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24674 if (count != 0
24675 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24676 {
24677 cfun->machine->ra_needs_full_frame = 1;
24678
24679 if (count == 0)
24680 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24681 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24682 frame = stack_pointer_rtx;
24683 rtx prev_frame_addr = memory_address (Pmode, frame);
24684 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
24685 rtx lr_save_off = plus_constant (Pmode,
24686 prev_frame, RETURN_ADDRESS_OFFSET);
24687 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
24688 return gen_rtx_MEM (Pmode, lr_save_addr);
24689 }
24690
24691 cfun->machine->ra_need_lr = 1;
24692 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24693 }
24694
24695 /* Say whether a function is a candidate for sibcall handling or not. */
24696
24697 static bool
24698 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24699 {
24700 tree fntype;
24701
24702 /* The sibcall epilogue may clobber the static chain register.
24703 ??? We could work harder and avoid that, but it's probably
24704 not worth the hassle in practice. */
24705 if (CALL_EXPR_STATIC_CHAIN (exp))
24706 return false;
24707
24708 if (decl)
24709 fntype = TREE_TYPE (decl);
24710 else
24711 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24712
24713 /* We can't do it if the called function has more vector parameters
24714 than the current function; there's nowhere to put the VRsave code. */
24715 if (TARGET_ALTIVEC_ABI
24716 && TARGET_ALTIVEC_VRSAVE
24717 && !(decl && decl == current_function_decl))
24718 {
24719 function_args_iterator args_iter;
24720 tree type;
24721 int nvreg = 0;
24722
24723 /* Functions with vector parameters are required to have a
24724 prototype, so the argument type info must be available
24725 here. */
24726 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24727 if (TREE_CODE (type) == VECTOR_TYPE
24728 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24729 nvreg++;
24730
24731 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24732 if (TREE_CODE (type) == VECTOR_TYPE
24733 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24734 nvreg--;
24735
24736 if (nvreg > 0)
24737 return false;
24738 }
24739
24740 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24741 functions, because the callee may have a different TOC pointer to
24742 the caller and there's no way to ensure we restore the TOC when
24743 we return. With the secure-plt SYSV ABI we can't make non-local
24744 calls when -fpic/PIC because the plt call stubs use r30. */
24745 if (DEFAULT_ABI == ABI_DARWIN
24746 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24747 && decl
24748 && !DECL_EXTERNAL (decl)
24749 && !DECL_WEAK (decl)
24750 && (*targetm.binds_local_p) (decl))
24751 || (DEFAULT_ABI == ABI_V4
24752 && (!TARGET_SECURE_PLT
24753 || !flag_pic
24754 || (decl
24755 && (*targetm.binds_local_p) (decl)))))
24756 {
24757 tree attr_list = TYPE_ATTRIBUTES (fntype);
24758
24759 if (!lookup_attribute ("longcall", attr_list)
24760 || lookup_attribute ("shortcall", attr_list))
24761 return true;
24762 }
24763
24764 return false;
24765 }
24766
24767 static int
24768 rs6000_ra_ever_killed (void)
24769 {
24770 rtx_insn *top;
24771 rtx reg;
24772 rtx_insn *insn;
24773
24774 if (cfun->is_thunk)
24775 return 0;
24776
24777 if (cfun->machine->lr_save_state)
24778 return cfun->machine->lr_save_state - 1;
24779
24780 /* regs_ever_live has LR marked as used if any sibcalls are present,
24781 but this should not force saving and restoring in the
24782 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
24783 clobbers LR, so that is inappropriate. */
24784
24785 /* Also, the prologue can generate a store into LR that
24786 doesn't really count, like this:
24787
24788 move LR->R0
24789 bcl to set PIC register
24790 move LR->R31
24791 move R0->LR
24792
24793 When we're called from the epilogue, we need to avoid counting
24794 this as a store. */
24795
24796 push_topmost_sequence ();
24797 top = get_insns ();
24798 pop_topmost_sequence ();
24799 reg = gen_rtx_REG (Pmode, LR_REGNO);
24800
24801 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
24802 {
24803 if (INSN_P (insn))
24804 {
24805 if (CALL_P (insn))
24806 {
24807 if (!SIBLING_CALL_P (insn))
24808 return 1;
24809 }
24810 else if (find_regno_note (insn, REG_INC, LR_REGNO))
24811 return 1;
24812 else if (set_of (reg, insn) != NULL_RTX
24813 && !prologue_epilogue_contains (insn))
24814 return 1;
24815 }
24816 }
24817 return 0;
24818 }
24819 \f
24820 /* Emit instructions needed to load the TOC register.
24821 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
24822 a constant pool; or for SVR4 -fpic. */
24823
24824 void
24825 rs6000_emit_load_toc_table (int fromprolog)
24826 {
24827 rtx dest;
24828 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
24829
24830 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
24831 {
24832 char buf[30];
24833 rtx lab, tmp1, tmp2, got;
24834
24835 lab = gen_label_rtx ();
24836 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
24837 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24838 if (flag_pic == 2)
24839 {
24840 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24841 need_toc_init = 1;
24842 }
24843 else
24844 got = rs6000_got_sym ();
24845 tmp1 = tmp2 = dest;
24846 if (!fromprolog)
24847 {
24848 tmp1 = gen_reg_rtx (Pmode);
24849 tmp2 = gen_reg_rtx (Pmode);
24850 }
24851 emit_insn (gen_load_toc_v4_PIC_1 (lab));
24852 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
24853 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
24854 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
24855 }
24856 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
24857 {
24858 emit_insn (gen_load_toc_v4_pic_si ());
24859 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24860 }
24861 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
24862 {
24863 char buf[30];
24864 rtx temp0 = (fromprolog
24865 ? gen_rtx_REG (Pmode, 0)
24866 : gen_reg_rtx (Pmode));
24867
24868 if (fromprolog)
24869 {
24870 rtx symF, symL;
24871
24872 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
24873 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24874
24875 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
24876 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24877
24878 emit_insn (gen_load_toc_v4_PIC_1 (symF));
24879 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24880 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
24881 }
24882 else
24883 {
24884 rtx tocsym, lab;
24885
24886 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24887 need_toc_init = 1;
24888 lab = gen_label_rtx ();
24889 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
24890 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24891 if (TARGET_LINK_STACK)
24892 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
24893 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
24894 }
24895 emit_insn (gen_addsi3 (dest, temp0, dest));
24896 }
24897 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
24898 {
24899 /* This is for AIX code running in non-PIC ELF32. */
24900 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24901
24902 need_toc_init = 1;
24903 emit_insn (gen_elf_high (dest, realsym));
24904 emit_insn (gen_elf_low (dest, dest, realsym));
24905 }
24906 else
24907 {
24908 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24909
24910 if (TARGET_32BIT)
24911 emit_insn (gen_load_toc_aix_si (dest));
24912 else
24913 emit_insn (gen_load_toc_aix_di (dest));
24914 }
24915 }
24916
24917 /* Emit instructions to restore the link register after determining where
24918 its value has been stored. */
24919
24920 void
24921 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
24922 {
24923 rs6000_stack_t *info = rs6000_stack_info ();
24924 rtx operands[2];
24925
24926 operands[0] = source;
24927 operands[1] = scratch;
24928
24929 if (info->lr_save_p)
24930 {
24931 rtx frame_rtx = stack_pointer_rtx;
24932 HOST_WIDE_INT sp_offset = 0;
24933 rtx tmp;
24934
24935 if (frame_pointer_needed
24936 || cfun->calls_alloca
24937 || info->total_size > 32767)
24938 {
24939 tmp = gen_frame_mem (Pmode, frame_rtx);
24940 emit_move_insn (operands[1], tmp);
24941 frame_rtx = operands[1];
24942 }
24943 else if (info->push_p)
24944 sp_offset = info->total_size;
24945
24946 tmp = plus_constant (Pmode, frame_rtx,
24947 info->lr_save_offset + sp_offset);
24948 tmp = gen_frame_mem (Pmode, tmp);
24949 emit_move_insn (tmp, operands[0]);
24950 }
24951 else
24952 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
24953
24954 /* Freeze lr_save_p. We've just emitted rtl that depends on the
24955 state of lr_save_p so any change from here on would be a bug. In
24956 particular, stop rs6000_ra_ever_killed from considering the SET
24957 of lr we may have added just above. */
24958 cfun->machine->lr_save_state = info->lr_save_p + 1;
24959 }
24960
24961 static GTY(()) alias_set_type set = -1;
24962
24963 alias_set_type
24964 get_TOC_alias_set (void)
24965 {
24966 if (set == -1)
24967 set = new_alias_set ();
24968 return set;
24969 }
24970
24971 /* This returns nonzero if the current function uses the TOC. This is
24972 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
24973 is generated by the ABI_V4 load_toc_* patterns.
24974 Return 2 instead of 1 if the load_toc_* pattern is in the function
24975 partition that doesn't start the function. */
24976 #if TARGET_ELF
24977 static int
24978 uses_TOC (void)
24979 {
24980 rtx_insn *insn;
24981 int ret = 1;
24982
24983 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
24984 {
24985 if (INSN_P (insn))
24986 {
24987 rtx pat = PATTERN (insn);
24988 int i;
24989
24990 if (GET_CODE (pat) == PARALLEL)
24991 for (i = 0; i < XVECLEN (pat, 0); i++)
24992 {
24993 rtx sub = XVECEXP (pat, 0, i);
24994 if (GET_CODE (sub) == USE)
24995 {
24996 sub = XEXP (sub, 0);
24997 if (GET_CODE (sub) == UNSPEC
24998 && XINT (sub, 1) == UNSPEC_TOC)
24999 return ret;
25000 }
25001 }
25002 }
25003 else if (crtl->has_bb_partition
25004 && NOTE_P (insn)
25005 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25006 ret = 2;
25007 }
25008 return 0;
25009 }
25010 #endif
25011
25012 rtx
25013 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25014 {
25015 rtx tocrel, tocreg, hi;
25016
25017 if (TARGET_DEBUG_ADDR)
25018 {
25019 if (SYMBOL_REF_P (symbol))
25020 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25021 XSTR (symbol, 0));
25022 else
25023 {
25024 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25025 GET_RTX_NAME (GET_CODE (symbol)));
25026 debug_rtx (symbol);
25027 }
25028 }
25029
25030 if (!can_create_pseudo_p ())
25031 df_set_regs_ever_live (TOC_REGISTER, true);
25032
25033 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25034 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25035 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25036 return tocrel;
25037
25038 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25039 if (largetoc_reg != NULL)
25040 {
25041 emit_move_insn (largetoc_reg, hi);
25042 hi = largetoc_reg;
25043 }
25044 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25045 }
25046
25047 /* Issue assembly directives that create a reference to the given DWARF
25048 FRAME_TABLE_LABEL from the current function section. */
25049 void
25050 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25051 {
25052 fprintf (asm_out_file, "\t.ref %s\n",
25053 (* targetm.strip_name_encoding) (frame_table_label));
25054 }
25055 \f
25056 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25057 and the change to the stack pointer. */
25058
25059 static void
25060 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25061 {
25062 rtvec p;
25063 int i;
25064 rtx regs[3];
25065
25066 i = 0;
25067 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25068 if (hard_frame_needed)
25069 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25070 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25071 || (hard_frame_needed
25072 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25073 regs[i++] = fp;
25074
25075 p = rtvec_alloc (i);
25076 while (--i >= 0)
25077 {
25078 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25079 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25080 }
25081
25082 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25083 }
25084
25085 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25086 and set the appropriate attributes for the generated insn. Return the
25087 first insn which adjusts the stack pointer or the last insn before
25088 the stack adjustment loop.
25089
25090 SIZE_INT is used to create the CFI note for the allocation.
25091
25092 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25093 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25094
25095 ORIG_SP contains the backchain value that must be stored at *sp. */
25096
25097 static rtx_insn *
25098 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25099 {
25100 rtx_insn *insn;
25101
25102 rtx size_rtx = GEN_INT (-size_int);
25103 if (size_int > 32767)
25104 {
25105 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25106 /* Need a note here so that try_split doesn't get confused. */
25107 if (get_last_insn () == NULL_RTX)
25108 emit_note (NOTE_INSN_DELETED);
25109 insn = emit_move_insn (tmp_reg, size_rtx);
25110 try_split (PATTERN (insn), insn, 0);
25111 size_rtx = tmp_reg;
25112 }
25113
25114 if (TARGET_32BIT)
25115 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25116 stack_pointer_rtx,
25117 size_rtx,
25118 orig_sp));
25119 else
25120 insn = emit_insn (gen_movdi_update_stack (stack_pointer_rtx,
25121 stack_pointer_rtx,
25122 size_rtx,
25123 orig_sp));
25124 rtx par = PATTERN (insn);
25125 gcc_assert (GET_CODE (par) == PARALLEL);
25126 rtx set = XVECEXP (par, 0, 0);
25127 gcc_assert (GET_CODE (set) == SET);
25128 rtx mem = SET_DEST (set);
25129 gcc_assert (MEM_P (mem));
25130 MEM_NOTRAP_P (mem) = 1;
25131 set_mem_alias_set (mem, get_frame_alias_set ());
25132
25133 RTX_FRAME_RELATED_P (insn) = 1;
25134 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25135 gen_rtx_SET (stack_pointer_rtx,
25136 gen_rtx_PLUS (Pmode,
25137 stack_pointer_rtx,
25138 GEN_INT (-size_int))));
25139
25140 /* Emit a blockage to ensure the allocation/probing insns are
25141 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25142 note for similar reasons. */
25143 if (flag_stack_clash_protection)
25144 {
25145 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25146 emit_insn (gen_blockage ());
25147 }
25148
25149 return insn;
25150 }
25151
25152 static HOST_WIDE_INT
25153 get_stack_clash_protection_probe_interval (void)
25154 {
25155 return (HOST_WIDE_INT_1U
25156 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25157 }
25158
25159 static HOST_WIDE_INT
25160 get_stack_clash_protection_guard_size (void)
25161 {
25162 return (HOST_WIDE_INT_1U
25163 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25164 }
25165
25166 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25167 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25168
25169 COPY_REG, if non-null, should contain a copy of the original
25170 stack pointer at exit from this function.
25171
25172 This is subtly different than the Ada probing in that it tries hard to
25173 prevent attacks that jump the stack guard. Thus it is never allowed to
25174 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25175 space without a suitable probe. */
25176 static rtx_insn *
25177 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25178 rtx copy_reg)
25179 {
25180 rtx orig_sp = copy_reg;
25181
25182 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25183
25184 /* Round the size down to a multiple of PROBE_INTERVAL. */
25185 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25186
25187 /* If explicitly requested,
25188 or the rounded size is not the same as the original size
25189 or the the rounded size is greater than a page,
25190 then we will need a copy of the original stack pointer. */
25191 if (rounded_size != orig_size
25192 || rounded_size > probe_interval
25193 || copy_reg)
25194 {
25195 /* If the caller did not request a copy of the incoming stack
25196 pointer, then we use r0 to hold the copy. */
25197 if (!copy_reg)
25198 orig_sp = gen_rtx_REG (Pmode, 0);
25199 emit_move_insn (orig_sp, stack_pointer_rtx);
25200 }
25201
25202 /* There's three cases here.
25203
25204 One is a single probe which is the most common and most efficiently
25205 implemented as it does not have to have a copy of the original
25206 stack pointer if there are no residuals.
25207
25208 Second is unrolled allocation/probes which we use if there's just
25209 a few of them. It needs to save the original stack pointer into a
25210 temporary for use as a source register in the allocation/probe.
25211
25212 Last is a loop. This is the most uncommon case and least efficient. */
25213 rtx_insn *retval = NULL;
25214 if (rounded_size == probe_interval)
25215 {
25216 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25217
25218 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25219 }
25220 else if (rounded_size <= 8 * probe_interval)
25221 {
25222 /* The ABI requires using the store with update insns to allocate
25223 space and store the backchain into the stack
25224
25225 So we save the current stack pointer into a temporary, then
25226 emit the store-with-update insns to store the saved stack pointer
25227 into the right location in each new page. */
25228 for (int i = 0; i < rounded_size; i += probe_interval)
25229 {
25230 rtx_insn *insn
25231 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25232
25233 /* Save the first stack adjustment in RETVAL. */
25234 if (i == 0)
25235 retval = insn;
25236 }
25237
25238 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25239 }
25240 else
25241 {
25242 /* Compute the ending address. */
25243 rtx end_addr
25244 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25245 rtx rs = GEN_INT (-rounded_size);
25246 rtx_insn *insn;
25247 if (add_operand (rs, Pmode))
25248 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25249 else
25250 {
25251 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25252 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25253 stack_pointer_rtx));
25254 /* Describe the effect of INSN to the CFI engine. */
25255 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25256 gen_rtx_SET (end_addr,
25257 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25258 rs)));
25259 }
25260 RTX_FRAME_RELATED_P (insn) = 1;
25261
25262 /* Emit the loop. */
25263 if (TARGET_64BIT)
25264 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25265 stack_pointer_rtx, orig_sp,
25266 end_addr));
25267 else
25268 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25269 stack_pointer_rtx, orig_sp,
25270 end_addr));
25271 RTX_FRAME_RELATED_P (retval) = 1;
25272 /* Describe the effect of INSN to the CFI engine. */
25273 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25274 gen_rtx_SET (stack_pointer_rtx, end_addr));
25275
25276 /* Emit a blockage to ensure the allocation/probing insns are
25277 not optimized, combined, removed, etc. Other cases handle this
25278 within their call to rs6000_emit_allocate_stack_1. */
25279 emit_insn (gen_blockage ());
25280
25281 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25282 }
25283
25284 if (orig_size != rounded_size)
25285 {
25286 /* Allocate (and implicitly probe) any residual space. */
25287 HOST_WIDE_INT residual = orig_size - rounded_size;
25288
25289 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25290
25291 /* If the residual was the only allocation, then we can return the
25292 allocating insn. */
25293 if (!retval)
25294 retval = insn;
25295 }
25296
25297 return retval;
25298 }
25299
25300 /* Emit the correct code for allocating stack space, as insns.
25301 If COPY_REG, make sure a copy of the old frame is left there.
25302 The generated code may use hard register 0 as a temporary. */
25303
25304 static rtx_insn *
25305 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25306 {
25307 rtx_insn *insn;
25308 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25309 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25310 rtx todec = gen_int_mode (-size, Pmode);
25311
25312 if (INTVAL (todec) != -size)
25313 {
25314 warning (0, "stack frame too large");
25315 emit_insn (gen_trap ());
25316 return 0;
25317 }
25318
25319 if (crtl->limit_stack)
25320 {
25321 if (REG_P (stack_limit_rtx)
25322 && REGNO (stack_limit_rtx) > 1
25323 && REGNO (stack_limit_rtx) <= 31)
25324 {
25325 rtx_insn *insn
25326 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25327 gcc_assert (insn);
25328 emit_insn (insn);
25329 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25330 }
25331 else if (SYMBOL_REF_P (stack_limit_rtx)
25332 && TARGET_32BIT
25333 && DEFAULT_ABI == ABI_V4
25334 && !flag_pic)
25335 {
25336 rtx toload = gen_rtx_CONST (VOIDmode,
25337 gen_rtx_PLUS (Pmode,
25338 stack_limit_rtx,
25339 GEN_INT (size)));
25340
25341 emit_insn (gen_elf_high (tmp_reg, toload));
25342 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25343 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25344 const0_rtx));
25345 }
25346 else
25347 warning (0, "stack limit expression is not supported");
25348 }
25349
25350 if (flag_stack_clash_protection)
25351 {
25352 if (size < get_stack_clash_protection_guard_size ())
25353 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25354 else
25355 {
25356 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25357 copy_reg);
25358
25359 /* If we asked for a copy with an offset, then we still need add in
25360 the offset. */
25361 if (copy_reg && copy_off)
25362 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25363 return insn;
25364 }
25365 }
25366
25367 if (copy_reg)
25368 {
25369 if (copy_off != 0)
25370 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25371 else
25372 emit_move_insn (copy_reg, stack_reg);
25373 }
25374
25375 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25376 it now and set the alias set/attributes. The above gen_*_update
25377 calls will generate a PARALLEL with the MEM set being the first
25378 operation. */
25379 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25380 return insn;
25381 }
25382
25383 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25384
25385 #if PROBE_INTERVAL > 32768
25386 #error Cannot use indexed addressing mode for stack probing
25387 #endif
25388
25389 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25390 inclusive. These are offsets from the current stack pointer. */
25391
25392 static void
25393 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25394 {
25395 /* See if we have a constant small number of probes to generate. If so,
25396 that's the easy case. */
25397 if (first + size <= 32768)
25398 {
25399 HOST_WIDE_INT i;
25400
25401 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25402 it exceeds SIZE. If only one probe is needed, this will not
25403 generate any code. Then probe at FIRST + SIZE. */
25404 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25405 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25406 -(first + i)));
25407
25408 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25409 -(first + size)));
25410 }
25411
25412 /* Otherwise, do the same as above, but in a loop. Note that we must be
25413 extra careful with variables wrapping around because we might be at
25414 the very top (or the very bottom) of the address space and we have
25415 to be able to handle this case properly; in particular, we use an
25416 equality test for the loop condition. */
25417 else
25418 {
25419 HOST_WIDE_INT rounded_size;
25420 rtx r12 = gen_rtx_REG (Pmode, 12);
25421 rtx r0 = gen_rtx_REG (Pmode, 0);
25422
25423 /* Sanity check for the addressing mode we're going to use. */
25424 gcc_assert (first <= 32768);
25425
25426 /* Step 1: round SIZE to the previous multiple of the interval. */
25427
25428 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25429
25430
25431 /* Step 2: compute initial and final value of the loop counter. */
25432
25433 /* TEST_ADDR = SP + FIRST. */
25434 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25435 -first)));
25436
25437 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25438 if (rounded_size > 32768)
25439 {
25440 emit_move_insn (r0, GEN_INT (-rounded_size));
25441 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25442 }
25443 else
25444 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25445 -rounded_size)));
25446
25447
25448 /* Step 3: the loop
25449
25450 do
25451 {
25452 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25453 probe at TEST_ADDR
25454 }
25455 while (TEST_ADDR != LAST_ADDR)
25456
25457 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25458 until it is equal to ROUNDED_SIZE. */
25459
25460 if (TARGET_64BIT)
25461 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25462 else
25463 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25464
25465
25466 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25467 that SIZE is equal to ROUNDED_SIZE. */
25468
25469 if (size != rounded_size)
25470 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25471 }
25472 }
25473
25474 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25475 addresses, not offsets. */
25476
25477 static const char *
25478 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25479 {
25480 static int labelno = 0;
25481 char loop_lab[32];
25482 rtx xops[2];
25483
25484 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25485
25486 /* Loop. */
25487 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25488
25489 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25490 xops[0] = reg1;
25491 xops[1] = GEN_INT (-PROBE_INTERVAL);
25492 output_asm_insn ("addi %0,%0,%1", xops);
25493
25494 /* Probe at TEST_ADDR. */
25495 xops[1] = gen_rtx_REG (Pmode, 0);
25496 output_asm_insn ("stw %1,0(%0)", xops);
25497
25498 /* Test if TEST_ADDR == LAST_ADDR. */
25499 xops[1] = reg2;
25500 if (TARGET_64BIT)
25501 output_asm_insn ("cmpd 0,%0,%1", xops);
25502 else
25503 output_asm_insn ("cmpw 0,%0,%1", xops);
25504
25505 /* Branch. */
25506 fputs ("\tbne 0,", asm_out_file);
25507 assemble_name_raw (asm_out_file, loop_lab);
25508 fputc ('\n', asm_out_file);
25509
25510 return "";
25511 }
25512
25513 /* This function is called when rs6000_frame_related is processing
25514 SETs within a PARALLEL, and returns whether the REGNO save ought to
25515 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25516 for out-of-line register save functions, store multiple, and the
25517 Darwin world_save. They may contain registers that don't really
25518 need saving. */
25519
25520 static bool
25521 interesting_frame_related_regno (unsigned int regno)
25522 {
25523 /* Saves apparently of r0 are actually saving LR. It doesn't make
25524 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25525 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25526 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25527 as frame related. */
25528 if (regno == 0)
25529 return true;
25530 /* If we see CR2 then we are here on a Darwin world save. Saves of
25531 CR2 signify the whole CR is being saved. This is a long-standing
25532 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25533 that CR needs to be saved. */
25534 if (regno == CR2_REGNO)
25535 return true;
25536 /* Omit frame info for any user-defined global regs. If frame info
25537 is supplied for them, frame unwinding will restore a user reg.
25538 Also omit frame info for any reg we don't need to save, as that
25539 bloats frame info and can cause problems with shrink wrapping.
25540 Since global regs won't be seen as needing to be saved, both of
25541 these conditions are covered by save_reg_p. */
25542 return save_reg_p (regno);
25543 }
25544
25545 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25546 addresses, not offsets.
25547
25548 REG2 contains the backchain that must be stored into *sp at each allocation.
25549
25550 This is subtly different than the Ada probing above in that it tries hard
25551 to prevent attacks that jump the stack guard. Thus, it is never allowed
25552 to allocate more than PROBE_INTERVAL bytes of stack space without a
25553 suitable probe. */
25554
25555 static const char *
25556 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25557 {
25558 static int labelno = 0;
25559 char loop_lab[32];
25560 rtx xops[3];
25561
25562 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25563
25564 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25565
25566 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25567
25568 /* This allocates and probes. */
25569 xops[0] = reg1;
25570 xops[1] = reg2;
25571 xops[2] = GEN_INT (-probe_interval);
25572 if (TARGET_64BIT)
25573 output_asm_insn ("stdu %1,%2(%0)", xops);
25574 else
25575 output_asm_insn ("stwu %1,%2(%0)", xops);
25576
25577 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25578 xops[0] = reg1;
25579 xops[1] = reg3;
25580 if (TARGET_64BIT)
25581 output_asm_insn ("cmpd 0,%0,%1", xops);
25582 else
25583 output_asm_insn ("cmpw 0,%0,%1", xops);
25584
25585 fputs ("\tbne 0,", asm_out_file);
25586 assemble_name_raw (asm_out_file, loop_lab);
25587 fputc ('\n', asm_out_file);
25588
25589 return "";
25590 }
25591
25592 /* Wrapper around the output_probe_stack_range routines. */
25593 const char *
25594 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25595 {
25596 if (flag_stack_clash_protection)
25597 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25598 else
25599 return output_probe_stack_range_1 (reg1, reg3);
25600 }
25601
25602 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25603 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25604 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25605 deduce these equivalences by itself so it wasn't necessary to hold
25606 its hand so much. Don't be tempted to always supply d2_f_d_e with
25607 the actual cfa register, ie. r31 when we are using a hard frame
25608 pointer. That fails when saving regs off r1, and sched moves the
25609 r31 setup past the reg saves. */
25610
25611 static rtx_insn *
25612 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25613 rtx reg2, rtx repl2)
25614 {
25615 rtx repl;
25616
25617 if (REGNO (reg) == STACK_POINTER_REGNUM)
25618 {
25619 gcc_checking_assert (val == 0);
25620 repl = NULL_RTX;
25621 }
25622 else
25623 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25624 GEN_INT (val));
25625
25626 rtx pat = PATTERN (insn);
25627 if (!repl && !reg2)
25628 {
25629 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25630 if (GET_CODE (pat) == PARALLEL)
25631 for (int i = 0; i < XVECLEN (pat, 0); i++)
25632 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25633 {
25634 rtx set = XVECEXP (pat, 0, i);
25635
25636 if (!REG_P (SET_SRC (set))
25637 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25638 RTX_FRAME_RELATED_P (set) = 1;
25639 }
25640 RTX_FRAME_RELATED_P (insn) = 1;
25641 return insn;
25642 }
25643
25644 /* We expect that 'pat' is either a SET or a PARALLEL containing
25645 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25646 are important so they all have to be marked RTX_FRAME_RELATED_P.
25647 Call simplify_replace_rtx on the SETs rather than the whole insn
25648 so as to leave the other stuff alone (for example USE of r12). */
25649
25650 set_used_flags (pat);
25651 if (GET_CODE (pat) == SET)
25652 {
25653 if (repl)
25654 pat = simplify_replace_rtx (pat, reg, repl);
25655 if (reg2)
25656 pat = simplify_replace_rtx (pat, reg2, repl2);
25657 }
25658 else if (GET_CODE (pat) == PARALLEL)
25659 {
25660 pat = shallow_copy_rtx (pat);
25661 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25662
25663 for (int i = 0; i < XVECLEN (pat, 0); i++)
25664 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25665 {
25666 rtx set = XVECEXP (pat, 0, i);
25667
25668 if (repl)
25669 set = simplify_replace_rtx (set, reg, repl);
25670 if (reg2)
25671 set = simplify_replace_rtx (set, reg2, repl2);
25672 XVECEXP (pat, 0, i) = set;
25673
25674 if (!REG_P (SET_SRC (set))
25675 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25676 RTX_FRAME_RELATED_P (set) = 1;
25677 }
25678 }
25679 else
25680 gcc_unreachable ();
25681
25682 RTX_FRAME_RELATED_P (insn) = 1;
25683 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25684
25685 return insn;
25686 }
25687
25688 /* Returns an insn that has a vrsave set operation with the
25689 appropriate CLOBBERs. */
25690
25691 static rtx
25692 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25693 {
25694 int nclobs, i;
25695 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25696 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25697
25698 clobs[0]
25699 = gen_rtx_SET (vrsave,
25700 gen_rtx_UNSPEC_VOLATILE (SImode,
25701 gen_rtvec (2, reg, vrsave),
25702 UNSPECV_SET_VRSAVE));
25703
25704 nclobs = 1;
25705
25706 /* We need to clobber the registers in the mask so the scheduler
25707 does not move sets to VRSAVE before sets of AltiVec registers.
25708
25709 However, if the function receives nonlocal gotos, reload will set
25710 all call saved registers live. We will end up with:
25711
25712 (set (reg 999) (mem))
25713 (parallel [ (set (reg vrsave) (unspec blah))
25714 (clobber (reg 999))])
25715
25716 The clobber will cause the store into reg 999 to be dead, and
25717 flow will attempt to delete an epilogue insn. In this case, we
25718 need an unspec use/set of the register. */
25719
25720 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25721 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25722 {
25723 if (!epiloguep || call_used_regs [i])
25724 clobs[nclobs++] = gen_hard_reg_clobber (V4SImode, i);
25725 else
25726 {
25727 rtx reg = gen_rtx_REG (V4SImode, i);
25728
25729 clobs[nclobs++]
25730 = gen_rtx_SET (reg,
25731 gen_rtx_UNSPEC (V4SImode,
25732 gen_rtvec (1, reg), 27));
25733 }
25734 }
25735
25736 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25737
25738 for (i = 0; i < nclobs; ++i)
25739 XVECEXP (insn, 0, i) = clobs[i];
25740
25741 return insn;
25742 }
25743
25744 static rtx
25745 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25746 {
25747 rtx addr, mem;
25748
25749 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25750 mem = gen_frame_mem (GET_MODE (reg), addr);
25751 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25752 }
25753
25754 static rtx
25755 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25756 {
25757 return gen_frame_set (reg, frame_reg, offset, false);
25758 }
25759
25760 static rtx
25761 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25762 {
25763 return gen_frame_set (reg, frame_reg, offset, true);
25764 }
25765
25766 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25767 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25768
25769 static rtx_insn *
25770 emit_frame_save (rtx frame_reg, machine_mode mode,
25771 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25772 {
25773 rtx reg;
25774
25775 /* Some cases that need register indexed addressing. */
25776 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25777 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25778
25779 reg = gen_rtx_REG (mode, regno);
25780 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25781 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25782 NULL_RTX, NULL_RTX);
25783 }
25784
25785 /* Emit an offset memory reference suitable for a frame store, while
25786 converting to a valid addressing mode. */
25787
25788 static rtx
25789 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25790 {
25791 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25792 }
25793
25794 #ifndef TARGET_FIX_AND_CONTINUE
25795 #define TARGET_FIX_AND_CONTINUE 0
25796 #endif
25797
25798 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25799 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25800 #define LAST_SAVRES_REGISTER 31
25801 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25802
25803 enum {
25804 SAVRES_LR = 0x1,
25805 SAVRES_SAVE = 0x2,
25806 SAVRES_REG = 0x0c,
25807 SAVRES_GPR = 0,
25808 SAVRES_FPR = 4,
25809 SAVRES_VR = 8
25810 };
25811
25812 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25813
25814 /* Temporary holding space for an out-of-line register save/restore
25815 routine name. */
25816 static char savres_routine_name[30];
25817
25818 /* Return the name for an out-of-line register save/restore routine.
25819 We are saving/restoring GPRs if GPR is true. */
25820
25821 static char *
25822 rs6000_savres_routine_name (int regno, int sel)
25823 {
25824 const char *prefix = "";
25825 const char *suffix = "";
25826
25827 /* Different targets are supposed to define
25828 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25829 routine name could be defined with:
25830
25831 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25832
25833 This is a nice idea in practice, but in reality, things are
25834 complicated in several ways:
25835
25836 - ELF targets have save/restore routines for GPRs.
25837
25838 - PPC64 ELF targets have routines for save/restore of GPRs that
25839 differ in what they do with the link register, so having a set
25840 prefix doesn't work. (We only use one of the save routines at
25841 the moment, though.)
25842
25843 - PPC32 elf targets have "exit" versions of the restore routines
25844 that restore the link register and can save some extra space.
25845 These require an extra suffix. (There are also "tail" versions
25846 of the restore routines and "GOT" versions of the save routines,
25847 but we don't generate those at present. Same problems apply,
25848 though.)
25849
25850 We deal with all this by synthesizing our own prefix/suffix and
25851 using that for the simple sprintf call shown above. */
25852 if (DEFAULT_ABI == ABI_V4)
25853 {
25854 if (TARGET_64BIT)
25855 goto aix_names;
25856
25857 if ((sel & SAVRES_REG) == SAVRES_GPR)
25858 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25859 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25860 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25861 else if ((sel & SAVRES_REG) == SAVRES_VR)
25862 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25863 else
25864 abort ();
25865
25866 if ((sel & SAVRES_LR))
25867 suffix = "_x";
25868 }
25869 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25870 {
25871 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25872 /* No out-of-line save/restore routines for GPRs on AIX. */
25873 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25874 #endif
25875
25876 aix_names:
25877 if ((sel & SAVRES_REG) == SAVRES_GPR)
25878 prefix = ((sel & SAVRES_SAVE)
25879 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25880 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25881 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25882 {
25883 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25884 if ((sel & SAVRES_LR))
25885 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
25886 else
25887 #endif
25888 {
25889 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
25890 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
25891 }
25892 }
25893 else if ((sel & SAVRES_REG) == SAVRES_VR)
25894 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25895 else
25896 abort ();
25897 }
25898
25899 if (DEFAULT_ABI == ABI_DARWIN)
25900 {
25901 /* The Darwin approach is (slightly) different, in order to be
25902 compatible with code generated by the system toolchain. There is a
25903 single symbol for the start of save sequence, and the code here
25904 embeds an offset into that code on the basis of the first register
25905 to be saved. */
25906 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
25907 if ((sel & SAVRES_REG) == SAVRES_GPR)
25908 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
25909 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
25910 (regno - 13) * 4, prefix, regno);
25911 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25912 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
25913 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
25914 else if ((sel & SAVRES_REG) == SAVRES_VR)
25915 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
25916 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
25917 else
25918 abort ();
25919 }
25920 else
25921 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
25922
25923 return savres_routine_name;
25924 }
25925
25926 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25927 We are saving/restoring GPRs if GPR is true. */
25928
25929 static rtx
25930 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
25931 {
25932 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
25933 ? info->first_gp_reg_save
25934 : (sel & SAVRES_REG) == SAVRES_FPR
25935 ? info->first_fp_reg_save - 32
25936 : (sel & SAVRES_REG) == SAVRES_VR
25937 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
25938 : -1);
25939 rtx sym;
25940 int select = sel;
25941
25942 /* Don't generate bogus routine names. */
25943 gcc_assert (FIRST_SAVRES_REGISTER <= regno
25944 && regno <= LAST_SAVRES_REGISTER
25945 && select >= 0 && select <= 12);
25946
25947 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
25948
25949 if (sym == NULL)
25950 {
25951 char *name;
25952
25953 name = rs6000_savres_routine_name (regno, sel);
25954
25955 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
25956 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
25957 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
25958 }
25959
25960 return sym;
25961 }
25962
25963 /* Emit a sequence of insns, including a stack tie if needed, for
25964 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
25965 reset the stack pointer, but move the base of the frame into
25966 reg UPDT_REGNO for use by out-of-line register restore routines. */
25967
25968 static rtx
25969 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
25970 unsigned updt_regno)
25971 {
25972 /* If there is nothing to do, don't do anything. */
25973 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
25974 return NULL_RTX;
25975
25976 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
25977
25978 /* This blockage is needed so that sched doesn't decide to move
25979 the sp change before the register restores. */
25980 if (DEFAULT_ABI == ABI_V4)
25981 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
25982 GEN_INT (frame_off)));
25983
25984 /* If we are restoring registers out-of-line, we will be using the
25985 "exit" variants of the restore routines, which will reset the
25986 stack for us. But we do need to point updt_reg into the
25987 right place for those routines. */
25988 if (frame_off != 0)
25989 return emit_insn (gen_add3_insn (updt_reg_rtx,
25990 frame_reg_rtx, GEN_INT (frame_off)));
25991 else
25992 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
25993
25994 return NULL_RTX;
25995 }
25996
25997 /* Return the register number used as a pointer by out-of-line
25998 save/restore functions. */
25999
26000 static inline unsigned
26001 ptr_regno_for_savres (int sel)
26002 {
26003 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26004 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26005 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26006 }
26007
26008 /* Construct a parallel rtx describing the effect of a call to an
26009 out-of-line register save/restore routine, and emit the insn
26010 or jump_insn as appropriate. */
26011
26012 static rtx_insn *
26013 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26014 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26015 machine_mode reg_mode, int sel)
26016 {
26017 int i;
26018 int offset, start_reg, end_reg, n_regs, use_reg;
26019 int reg_size = GET_MODE_SIZE (reg_mode);
26020 rtx sym;
26021 rtvec p;
26022 rtx par;
26023 rtx_insn *insn;
26024
26025 offset = 0;
26026 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26027 ? info->first_gp_reg_save
26028 : (sel & SAVRES_REG) == SAVRES_FPR
26029 ? info->first_fp_reg_save
26030 : (sel & SAVRES_REG) == SAVRES_VR
26031 ? info->first_altivec_reg_save
26032 : -1);
26033 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26034 ? 32
26035 : (sel & SAVRES_REG) == SAVRES_FPR
26036 ? 64
26037 : (sel & SAVRES_REG) == SAVRES_VR
26038 ? LAST_ALTIVEC_REGNO + 1
26039 : -1);
26040 n_regs = end_reg - start_reg;
26041 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26042 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26043 + n_regs);
26044
26045 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26046 RTVEC_ELT (p, offset++) = ret_rtx;
26047
26048 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
26049
26050 sym = rs6000_savres_routine_sym (info, sel);
26051 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26052
26053 use_reg = ptr_regno_for_savres (sel);
26054 if ((sel & SAVRES_REG) == SAVRES_VR)
26055 {
26056 /* Vector regs are saved/restored using [reg+reg] addressing. */
26057 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, use_reg);
26058 RTVEC_ELT (p, offset++)
26059 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26060 }
26061 else
26062 RTVEC_ELT (p, offset++)
26063 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26064
26065 for (i = 0; i < end_reg - start_reg; i++)
26066 RTVEC_ELT (p, i + offset)
26067 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26068 frame_reg_rtx, save_area_offset + reg_size * i,
26069 (sel & SAVRES_SAVE) != 0);
26070
26071 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26072 RTVEC_ELT (p, i + offset)
26073 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26074
26075 par = gen_rtx_PARALLEL (VOIDmode, p);
26076
26077 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26078 {
26079 insn = emit_jump_insn (par);
26080 JUMP_LABEL (insn) = ret_rtx;
26081 }
26082 else
26083 insn = emit_insn (par);
26084 return insn;
26085 }
26086
26087 /* Emit prologue code to store CR fields that need to be saved into REG. This
26088 function should only be called when moving the non-volatile CRs to REG, it
26089 is not a general purpose routine to move the entire set of CRs to REG.
26090 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26091 volatile CRs. */
26092
26093 static void
26094 rs6000_emit_prologue_move_from_cr (rtx reg)
26095 {
26096 /* Only the ELFv2 ABI allows storing only selected fields. */
26097 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26098 {
26099 int i, cr_reg[8], count = 0;
26100
26101 /* Collect CR fields that must be saved. */
26102 for (i = 0; i < 8; i++)
26103 if (save_reg_p (CR0_REGNO + i))
26104 cr_reg[count++] = i;
26105
26106 /* If it's just a single one, use mfcrf. */
26107 if (count == 1)
26108 {
26109 rtvec p = rtvec_alloc (1);
26110 rtvec r = rtvec_alloc (2);
26111 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26112 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26113 RTVEC_ELT (p, 0)
26114 = gen_rtx_SET (reg,
26115 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26116
26117 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26118 return;
26119 }
26120
26121 /* ??? It might be better to handle count == 2 / 3 cases here
26122 as well, using logical operations to combine the values. */
26123 }
26124
26125 emit_insn (gen_prologue_movesi_from_cr (reg));
26126 }
26127
26128 /* Return whether the split-stack arg pointer (r12) is used. */
26129
26130 static bool
26131 split_stack_arg_pointer_used_p (void)
26132 {
26133 /* If the pseudo holding the arg pointer is no longer a pseudo,
26134 then the arg pointer is used. */
26135 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26136 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26137 || HARD_REGISTER_P (cfun->machine->split_stack_arg_pointer)))
26138 return true;
26139
26140 /* Unfortunately we also need to do some code scanning, since
26141 r12 may have been substituted for the pseudo. */
26142 rtx_insn *insn;
26143 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26144 FOR_BB_INSNS (bb, insn)
26145 if (NONDEBUG_INSN_P (insn))
26146 {
26147 /* A call destroys r12. */
26148 if (CALL_P (insn))
26149 return false;
26150
26151 df_ref use;
26152 FOR_EACH_INSN_USE (use, insn)
26153 {
26154 rtx x = DF_REF_REG (use);
26155 if (REG_P (x) && REGNO (x) == 12)
26156 return true;
26157 }
26158 df_ref def;
26159 FOR_EACH_INSN_DEF (def, insn)
26160 {
26161 rtx x = DF_REF_REG (def);
26162 if (REG_P (x) && REGNO (x) == 12)
26163 return false;
26164 }
26165 }
26166 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26167 }
26168
26169 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26170
26171 static bool
26172 rs6000_global_entry_point_needed_p (void)
26173 {
26174 /* Only needed for the ELFv2 ABI. */
26175 if (DEFAULT_ABI != ABI_ELFv2)
26176 return false;
26177
26178 /* With -msingle-pic-base, we assume the whole program shares the same
26179 TOC, so no global entry point prologues are needed anywhere. */
26180 if (TARGET_SINGLE_PIC_BASE)
26181 return false;
26182
26183 /* Ensure we have a global entry point for thunks. ??? We could
26184 avoid that if the target routine doesn't need a global entry point,
26185 but we do not know whether this is the case at this point. */
26186 if (cfun->is_thunk)
26187 return true;
26188
26189 /* For regular functions, rs6000_emit_prologue sets this flag if the
26190 routine ever uses the TOC pointer. */
26191 return cfun->machine->r2_setup_needed;
26192 }
26193
26194 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26195 static sbitmap
26196 rs6000_get_separate_components (void)
26197 {
26198 rs6000_stack_t *info = rs6000_stack_info ();
26199
26200 if (WORLD_SAVE_P (info))
26201 return NULL;
26202
26203 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26204 && !(info->savres_strategy & REST_MULTIPLE));
26205
26206 /* Component 0 is the save/restore of LR (done via GPR0).
26207 Component 2 is the save of the TOC (GPR2).
26208 Components 13..31 are the save/restore of GPR13..GPR31.
26209 Components 46..63 are the save/restore of FPR14..FPR31. */
26210
26211 cfun->machine->n_components = 64;
26212
26213 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26214 bitmap_clear (components);
26215
26216 int reg_size = TARGET_32BIT ? 4 : 8;
26217 int fp_reg_size = 8;
26218
26219 /* The GPRs we need saved to the frame. */
26220 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26221 && (info->savres_strategy & REST_INLINE_GPRS))
26222 {
26223 int offset = info->gp_save_offset;
26224 if (info->push_p)
26225 offset += info->total_size;
26226
26227 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26228 {
26229 if (IN_RANGE (offset, -0x8000, 0x7fff)
26230 && save_reg_p (regno))
26231 bitmap_set_bit (components, regno);
26232
26233 offset += reg_size;
26234 }
26235 }
26236
26237 /* Don't mess with the hard frame pointer. */
26238 if (frame_pointer_needed)
26239 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26240
26241 /* Don't mess with the fixed TOC register. */
26242 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26243 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26244 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26245 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26246
26247 /* The FPRs we need saved to the frame. */
26248 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26249 && (info->savres_strategy & REST_INLINE_FPRS))
26250 {
26251 int offset = info->fp_save_offset;
26252 if (info->push_p)
26253 offset += info->total_size;
26254
26255 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26256 {
26257 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26258 bitmap_set_bit (components, regno);
26259
26260 offset += fp_reg_size;
26261 }
26262 }
26263
26264 /* Optimize LR save and restore if we can. This is component 0. Any
26265 out-of-line register save/restore routines need LR. */
26266 if (info->lr_save_p
26267 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26268 && (info->savres_strategy & SAVE_INLINE_GPRS)
26269 && (info->savres_strategy & REST_INLINE_GPRS)
26270 && (info->savres_strategy & SAVE_INLINE_FPRS)
26271 && (info->savres_strategy & REST_INLINE_FPRS)
26272 && (info->savres_strategy & SAVE_INLINE_VRS)
26273 && (info->savres_strategy & REST_INLINE_VRS))
26274 {
26275 int offset = info->lr_save_offset;
26276 if (info->push_p)
26277 offset += info->total_size;
26278 if (IN_RANGE (offset, -0x8000, 0x7fff))
26279 bitmap_set_bit (components, 0);
26280 }
26281
26282 /* Optimize saving the TOC. This is component 2. */
26283 if (cfun->machine->save_toc_in_prologue)
26284 bitmap_set_bit (components, 2);
26285
26286 return components;
26287 }
26288
26289 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26290 static sbitmap
26291 rs6000_components_for_bb (basic_block bb)
26292 {
26293 rs6000_stack_t *info = rs6000_stack_info ();
26294
26295 bitmap in = DF_LIVE_IN (bb);
26296 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26297 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26298
26299 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26300 bitmap_clear (components);
26301
26302 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26303
26304 /* GPRs. */
26305 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26306 if (bitmap_bit_p (in, regno)
26307 || bitmap_bit_p (gen, regno)
26308 || bitmap_bit_p (kill, regno))
26309 bitmap_set_bit (components, regno);
26310
26311 /* FPRs. */
26312 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26313 if (bitmap_bit_p (in, regno)
26314 || bitmap_bit_p (gen, regno)
26315 || bitmap_bit_p (kill, regno))
26316 bitmap_set_bit (components, regno);
26317
26318 /* The link register. */
26319 if (bitmap_bit_p (in, LR_REGNO)
26320 || bitmap_bit_p (gen, LR_REGNO)
26321 || bitmap_bit_p (kill, LR_REGNO))
26322 bitmap_set_bit (components, 0);
26323
26324 /* The TOC save. */
26325 if (bitmap_bit_p (in, TOC_REGNUM)
26326 || bitmap_bit_p (gen, TOC_REGNUM)
26327 || bitmap_bit_p (kill, TOC_REGNUM))
26328 bitmap_set_bit (components, 2);
26329
26330 return components;
26331 }
26332
26333 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26334 static void
26335 rs6000_disqualify_components (sbitmap components, edge e,
26336 sbitmap edge_components, bool /*is_prologue*/)
26337 {
26338 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26339 live where we want to place that code. */
26340 if (bitmap_bit_p (edge_components, 0)
26341 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26342 {
26343 if (dump_file)
26344 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26345 "on entry to bb %d\n", e->dest->index);
26346 bitmap_clear_bit (components, 0);
26347 }
26348 }
26349
26350 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26351 static void
26352 rs6000_emit_prologue_components (sbitmap components)
26353 {
26354 rs6000_stack_t *info = rs6000_stack_info ();
26355 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26356 ? HARD_FRAME_POINTER_REGNUM
26357 : STACK_POINTER_REGNUM);
26358
26359 machine_mode reg_mode = Pmode;
26360 int reg_size = TARGET_32BIT ? 4 : 8;
26361 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26362 int fp_reg_size = 8;
26363
26364 /* Prologue for LR. */
26365 if (bitmap_bit_p (components, 0))
26366 {
26367 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26368 rtx reg = gen_rtx_REG (reg_mode, 0);
26369 rtx_insn *insn = emit_move_insn (reg, lr);
26370 RTX_FRAME_RELATED_P (insn) = 1;
26371 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26372
26373 int offset = info->lr_save_offset;
26374 if (info->push_p)
26375 offset += info->total_size;
26376
26377 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26378 RTX_FRAME_RELATED_P (insn) = 1;
26379 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26380 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26381 }
26382
26383 /* Prologue for TOC. */
26384 if (bitmap_bit_p (components, 2))
26385 {
26386 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26387 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26388 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26389 }
26390
26391 /* Prologue for the GPRs. */
26392 int offset = info->gp_save_offset;
26393 if (info->push_p)
26394 offset += info->total_size;
26395
26396 for (int i = info->first_gp_reg_save; i < 32; i++)
26397 {
26398 if (bitmap_bit_p (components, i))
26399 {
26400 rtx reg = gen_rtx_REG (reg_mode, i);
26401 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26402 RTX_FRAME_RELATED_P (insn) = 1;
26403 rtx set = copy_rtx (single_set (insn));
26404 add_reg_note (insn, REG_CFA_OFFSET, set);
26405 }
26406
26407 offset += reg_size;
26408 }
26409
26410 /* Prologue for the FPRs. */
26411 offset = info->fp_save_offset;
26412 if (info->push_p)
26413 offset += info->total_size;
26414
26415 for (int i = info->first_fp_reg_save; i < 64; i++)
26416 {
26417 if (bitmap_bit_p (components, i))
26418 {
26419 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26420 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26421 RTX_FRAME_RELATED_P (insn) = 1;
26422 rtx set = copy_rtx (single_set (insn));
26423 add_reg_note (insn, REG_CFA_OFFSET, set);
26424 }
26425
26426 offset += fp_reg_size;
26427 }
26428 }
26429
26430 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26431 static void
26432 rs6000_emit_epilogue_components (sbitmap components)
26433 {
26434 rs6000_stack_t *info = rs6000_stack_info ();
26435 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26436 ? HARD_FRAME_POINTER_REGNUM
26437 : STACK_POINTER_REGNUM);
26438
26439 machine_mode reg_mode = Pmode;
26440 int reg_size = TARGET_32BIT ? 4 : 8;
26441
26442 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26443 int fp_reg_size = 8;
26444
26445 /* Epilogue for the FPRs. */
26446 int offset = info->fp_save_offset;
26447 if (info->push_p)
26448 offset += info->total_size;
26449
26450 for (int i = info->first_fp_reg_save; i < 64; i++)
26451 {
26452 if (bitmap_bit_p (components, i))
26453 {
26454 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26455 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26456 RTX_FRAME_RELATED_P (insn) = 1;
26457 add_reg_note (insn, REG_CFA_RESTORE, reg);
26458 }
26459
26460 offset += fp_reg_size;
26461 }
26462
26463 /* Epilogue for the GPRs. */
26464 offset = info->gp_save_offset;
26465 if (info->push_p)
26466 offset += info->total_size;
26467
26468 for (int i = info->first_gp_reg_save; i < 32; i++)
26469 {
26470 if (bitmap_bit_p (components, i))
26471 {
26472 rtx reg = gen_rtx_REG (reg_mode, i);
26473 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26474 RTX_FRAME_RELATED_P (insn) = 1;
26475 add_reg_note (insn, REG_CFA_RESTORE, reg);
26476 }
26477
26478 offset += reg_size;
26479 }
26480
26481 /* Epilogue for LR. */
26482 if (bitmap_bit_p (components, 0))
26483 {
26484 int offset = info->lr_save_offset;
26485 if (info->push_p)
26486 offset += info->total_size;
26487
26488 rtx reg = gen_rtx_REG (reg_mode, 0);
26489 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26490
26491 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26492 insn = emit_move_insn (lr, reg);
26493 RTX_FRAME_RELATED_P (insn) = 1;
26494 add_reg_note (insn, REG_CFA_RESTORE, lr);
26495 }
26496 }
26497
26498 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26499 static void
26500 rs6000_set_handled_components (sbitmap components)
26501 {
26502 rs6000_stack_t *info = rs6000_stack_info ();
26503
26504 for (int i = info->first_gp_reg_save; i < 32; i++)
26505 if (bitmap_bit_p (components, i))
26506 cfun->machine->gpr_is_wrapped_separately[i] = true;
26507
26508 for (int i = info->first_fp_reg_save; i < 64; i++)
26509 if (bitmap_bit_p (components, i))
26510 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26511
26512 if (bitmap_bit_p (components, 0))
26513 cfun->machine->lr_is_wrapped_separately = true;
26514
26515 if (bitmap_bit_p (components, 2))
26516 cfun->machine->toc_is_wrapped_separately = true;
26517 }
26518
26519 /* VRSAVE is a bit vector representing which AltiVec registers
26520 are used. The OS uses this to determine which vector
26521 registers to save on a context switch. We need to save
26522 VRSAVE on the stack frame, add whatever AltiVec registers we
26523 used in this function, and do the corresponding magic in the
26524 epilogue. */
26525 static void
26526 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26527 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26528 {
26529 /* Get VRSAVE into a GPR. */
26530 rtx reg = gen_rtx_REG (SImode, save_regno);
26531 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26532 if (TARGET_MACHO)
26533 emit_insn (gen_get_vrsave_internal (reg));
26534 else
26535 emit_insn (gen_rtx_SET (reg, vrsave));
26536
26537 /* Save VRSAVE. */
26538 int offset = info->vrsave_save_offset + frame_off;
26539 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26540
26541 /* Include the registers in the mask. */
26542 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26543
26544 emit_insn (generate_set_vrsave (reg, info, 0));
26545 }
26546
26547 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26548 called, it left the arg pointer to the old stack in r29. Otherwise, the
26549 arg pointer is the top of the current frame. */
26550 static void
26551 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26552 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26553 {
26554 cfun->machine->split_stack_argp_used = true;
26555
26556 if (sp_adjust)
26557 {
26558 rtx r12 = gen_rtx_REG (Pmode, 12);
26559 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26560 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26561 emit_insn_before (set_r12, sp_adjust);
26562 }
26563 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26564 {
26565 rtx r12 = gen_rtx_REG (Pmode, 12);
26566 if (frame_off == 0)
26567 emit_move_insn (r12, frame_reg_rtx);
26568 else
26569 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26570 }
26571
26572 if (info->push_p)
26573 {
26574 rtx r12 = gen_rtx_REG (Pmode, 12);
26575 rtx r29 = gen_rtx_REG (Pmode, 29);
26576 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26577 rtx not_more = gen_label_rtx ();
26578 rtx jump;
26579
26580 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26581 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26582 gen_rtx_LABEL_REF (VOIDmode, not_more),
26583 pc_rtx);
26584 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26585 JUMP_LABEL (jump) = not_more;
26586 LABEL_NUSES (not_more) += 1;
26587 emit_move_insn (r12, r29);
26588 emit_label (not_more);
26589 }
26590 }
26591
26592 /* Emit function prologue as insns. */
26593
26594 void
26595 rs6000_emit_prologue (void)
26596 {
26597 rs6000_stack_t *info = rs6000_stack_info ();
26598 machine_mode reg_mode = Pmode;
26599 int reg_size = TARGET_32BIT ? 4 : 8;
26600 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26601 int fp_reg_size = 8;
26602 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26603 rtx frame_reg_rtx = sp_reg_rtx;
26604 unsigned int cr_save_regno;
26605 rtx cr_save_rtx = NULL_RTX;
26606 rtx_insn *insn;
26607 int strategy;
26608 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26609 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26610 && call_used_regs[STATIC_CHAIN_REGNUM]);
26611 int using_split_stack = (flag_split_stack
26612 && (lookup_attribute ("no_split_stack",
26613 DECL_ATTRIBUTES (cfun->decl))
26614 == NULL));
26615
26616 /* Offset to top of frame for frame_reg and sp respectively. */
26617 HOST_WIDE_INT frame_off = 0;
26618 HOST_WIDE_INT sp_off = 0;
26619 /* sp_adjust is the stack adjusting instruction, tracked so that the
26620 insn setting up the split-stack arg pointer can be emitted just
26621 prior to it, when r12 is not used here for other purposes. */
26622 rtx_insn *sp_adjust = 0;
26623
26624 #if CHECKING_P
26625 /* Track and check usage of r0, r11, r12. */
26626 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26627 #define START_USE(R) do \
26628 { \
26629 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26630 reg_inuse |= 1 << (R); \
26631 } while (0)
26632 #define END_USE(R) do \
26633 { \
26634 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26635 reg_inuse &= ~(1 << (R)); \
26636 } while (0)
26637 #define NOT_INUSE(R) do \
26638 { \
26639 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26640 } while (0)
26641 #else
26642 #define START_USE(R) do {} while (0)
26643 #define END_USE(R) do {} while (0)
26644 #define NOT_INUSE(R) do {} while (0)
26645 #endif
26646
26647 if (DEFAULT_ABI == ABI_ELFv2
26648 && !TARGET_SINGLE_PIC_BASE)
26649 {
26650 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26651
26652 /* With -mminimal-toc we may generate an extra use of r2 below. */
26653 if (TARGET_TOC && TARGET_MINIMAL_TOC
26654 && !constant_pool_empty_p ())
26655 cfun->machine->r2_setup_needed = true;
26656 }
26657
26658
26659 if (flag_stack_usage_info)
26660 current_function_static_stack_size = info->total_size;
26661
26662 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26663 {
26664 HOST_WIDE_INT size = info->total_size;
26665
26666 if (crtl->is_leaf && !cfun->calls_alloca)
26667 {
26668 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
26669 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26670 size - get_stack_check_protect ());
26671 }
26672 else if (size > 0)
26673 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
26674 }
26675
26676 if (TARGET_FIX_AND_CONTINUE)
26677 {
26678 /* gdb on darwin arranges to forward a function from the old
26679 address by modifying the first 5 instructions of the function
26680 to branch to the overriding function. This is necessary to
26681 permit function pointers that point to the old function to
26682 actually forward to the new function. */
26683 emit_insn (gen_nop ());
26684 emit_insn (gen_nop ());
26685 emit_insn (gen_nop ());
26686 emit_insn (gen_nop ());
26687 emit_insn (gen_nop ());
26688 }
26689
26690 /* Handle world saves specially here. */
26691 if (WORLD_SAVE_P (info))
26692 {
26693 int i, j, sz;
26694 rtx treg;
26695 rtvec p;
26696 rtx reg0;
26697
26698 /* save_world expects lr in r0. */
26699 reg0 = gen_rtx_REG (Pmode, 0);
26700 if (info->lr_save_p)
26701 {
26702 insn = emit_move_insn (reg0,
26703 gen_rtx_REG (Pmode, LR_REGNO));
26704 RTX_FRAME_RELATED_P (insn) = 1;
26705 }
26706
26707 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26708 assumptions about the offsets of various bits of the stack
26709 frame. */
26710 gcc_assert (info->gp_save_offset == -220
26711 && info->fp_save_offset == -144
26712 && info->lr_save_offset == 8
26713 && info->cr_save_offset == 4
26714 && info->push_p
26715 && info->lr_save_p
26716 && (!crtl->calls_eh_return
26717 || info->ehrd_offset == -432)
26718 && info->vrsave_save_offset == -224
26719 && info->altivec_save_offset == -416);
26720
26721 treg = gen_rtx_REG (SImode, 11);
26722 emit_move_insn (treg, GEN_INT (-info->total_size));
26723
26724 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26725 in R11. It also clobbers R12, so beware! */
26726
26727 /* Preserve CR2 for save_world prologues */
26728 sz = 5;
26729 sz += 32 - info->first_gp_reg_save;
26730 sz += 64 - info->first_fp_reg_save;
26731 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26732 p = rtvec_alloc (sz);
26733 j = 0;
26734 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, LR_REGNO);
26735 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26736 gen_rtx_SYMBOL_REF (Pmode,
26737 "*save_world"));
26738 /* We do floats first so that the instruction pattern matches
26739 properly. */
26740 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26741 RTVEC_ELT (p, j++)
26742 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
26743 info->first_fp_reg_save + i),
26744 frame_reg_rtx,
26745 info->fp_save_offset + frame_off + 8 * i);
26746 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26747 RTVEC_ELT (p, j++)
26748 = gen_frame_store (gen_rtx_REG (V4SImode,
26749 info->first_altivec_reg_save + i),
26750 frame_reg_rtx,
26751 info->altivec_save_offset + frame_off + 16 * i);
26752 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26753 RTVEC_ELT (p, j++)
26754 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26755 frame_reg_rtx,
26756 info->gp_save_offset + frame_off + reg_size * i);
26757
26758 /* CR register traditionally saved as CR2. */
26759 RTVEC_ELT (p, j++)
26760 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26761 frame_reg_rtx, info->cr_save_offset + frame_off);
26762 /* Explain about use of R0. */
26763 if (info->lr_save_p)
26764 RTVEC_ELT (p, j++)
26765 = gen_frame_store (reg0,
26766 frame_reg_rtx, info->lr_save_offset + frame_off);
26767 /* Explain what happens to the stack pointer. */
26768 {
26769 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26770 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26771 }
26772
26773 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26774 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26775 treg, GEN_INT (-info->total_size));
26776 sp_off = frame_off = info->total_size;
26777 }
26778
26779 strategy = info->savres_strategy;
26780
26781 /* For V.4, update stack before we do any saving and set back pointer. */
26782 if (! WORLD_SAVE_P (info)
26783 && info->push_p
26784 && (DEFAULT_ABI == ABI_V4
26785 || crtl->calls_eh_return))
26786 {
26787 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26788 || !(strategy & SAVE_INLINE_GPRS)
26789 || !(strategy & SAVE_INLINE_VRS));
26790 int ptr_regno = -1;
26791 rtx ptr_reg = NULL_RTX;
26792 int ptr_off = 0;
26793
26794 if (info->total_size < 32767)
26795 frame_off = info->total_size;
26796 else if (need_r11)
26797 ptr_regno = 11;
26798 else if (info->cr_save_p
26799 || info->lr_save_p
26800 || info->first_fp_reg_save < 64
26801 || info->first_gp_reg_save < 32
26802 || info->altivec_size != 0
26803 || info->vrsave_size != 0
26804 || crtl->calls_eh_return)
26805 ptr_regno = 12;
26806 else
26807 {
26808 /* The prologue won't be saving any regs so there is no need
26809 to set up a frame register to access any frame save area.
26810 We also won't be using frame_off anywhere below, but set
26811 the correct value anyway to protect against future
26812 changes to this function. */
26813 frame_off = info->total_size;
26814 }
26815 if (ptr_regno != -1)
26816 {
26817 /* Set up the frame offset to that needed by the first
26818 out-of-line save function. */
26819 START_USE (ptr_regno);
26820 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26821 frame_reg_rtx = ptr_reg;
26822 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26823 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26824 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26825 ptr_off = info->gp_save_offset + info->gp_size;
26826 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26827 ptr_off = info->altivec_save_offset + info->altivec_size;
26828 frame_off = -ptr_off;
26829 }
26830 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26831 ptr_reg, ptr_off);
26832 if (REGNO (frame_reg_rtx) == 12)
26833 sp_adjust = 0;
26834 sp_off = info->total_size;
26835 if (frame_reg_rtx != sp_reg_rtx)
26836 rs6000_emit_stack_tie (frame_reg_rtx, false);
26837 }
26838
26839 /* If we use the link register, get it into r0. */
26840 if (!WORLD_SAVE_P (info) && info->lr_save_p
26841 && !cfun->machine->lr_is_wrapped_separately)
26842 {
26843 rtx addr, reg, mem;
26844
26845 reg = gen_rtx_REG (Pmode, 0);
26846 START_USE (0);
26847 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26848 RTX_FRAME_RELATED_P (insn) = 1;
26849
26850 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26851 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26852 {
26853 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26854 GEN_INT (info->lr_save_offset + frame_off));
26855 mem = gen_rtx_MEM (Pmode, addr);
26856 /* This should not be of rs6000_sr_alias_set, because of
26857 __builtin_return_address. */
26858
26859 insn = emit_move_insn (mem, reg);
26860 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26861 NULL_RTX, NULL_RTX);
26862 END_USE (0);
26863 }
26864 }
26865
26866 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26867 r12 will be needed by out-of-line gpr save. */
26868 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26869 && !(strategy & (SAVE_INLINE_GPRS
26870 | SAVE_NOINLINE_GPRS_SAVES_LR))
26871 ? 11 : 12);
26872 if (!WORLD_SAVE_P (info)
26873 && info->cr_save_p
26874 && REGNO (frame_reg_rtx) != cr_save_regno
26875 && !(using_static_chain_p && cr_save_regno == 11)
26876 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26877 {
26878 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26879 START_USE (cr_save_regno);
26880 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26881 }
26882
26883 /* Do any required saving of fpr's. If only one or two to save, do
26884 it ourselves. Otherwise, call function. */
26885 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26886 {
26887 int offset = info->fp_save_offset + frame_off;
26888 for (int i = info->first_fp_reg_save; i < 64; i++)
26889 {
26890 if (save_reg_p (i)
26891 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
26892 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
26893 sp_off - frame_off);
26894
26895 offset += fp_reg_size;
26896 }
26897 }
26898 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26899 {
26900 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26901 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26902 unsigned ptr_regno = ptr_regno_for_savres (sel);
26903 rtx ptr_reg = frame_reg_rtx;
26904
26905 if (REGNO (frame_reg_rtx) == ptr_regno)
26906 gcc_checking_assert (frame_off == 0);
26907 else
26908 {
26909 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26910 NOT_INUSE (ptr_regno);
26911 emit_insn (gen_add3_insn (ptr_reg,
26912 frame_reg_rtx, GEN_INT (frame_off)));
26913 }
26914 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26915 info->fp_save_offset,
26916 info->lr_save_offset,
26917 DFmode, sel);
26918 rs6000_frame_related (insn, ptr_reg, sp_off,
26919 NULL_RTX, NULL_RTX);
26920 if (lr)
26921 END_USE (0);
26922 }
26923
26924 /* Save GPRs. This is done as a PARALLEL if we are using
26925 the store-multiple instructions. */
26926 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26927 {
26928 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26929 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26930 unsigned ptr_regno = ptr_regno_for_savres (sel);
26931 rtx ptr_reg = frame_reg_rtx;
26932 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
26933 int end_save = info->gp_save_offset + info->gp_size;
26934 int ptr_off;
26935
26936 if (ptr_regno == 12)
26937 sp_adjust = 0;
26938 if (!ptr_set_up)
26939 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26940
26941 /* Need to adjust r11 (r12) if we saved any FPRs. */
26942 if (end_save + frame_off != 0)
26943 {
26944 rtx offset = GEN_INT (end_save + frame_off);
26945
26946 if (ptr_set_up)
26947 frame_off = -end_save;
26948 else
26949 NOT_INUSE (ptr_regno);
26950 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26951 }
26952 else if (!ptr_set_up)
26953 {
26954 NOT_INUSE (ptr_regno);
26955 emit_move_insn (ptr_reg, frame_reg_rtx);
26956 }
26957 ptr_off = -end_save;
26958 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26959 info->gp_save_offset + ptr_off,
26960 info->lr_save_offset + ptr_off,
26961 reg_mode, sel);
26962 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
26963 NULL_RTX, NULL_RTX);
26964 if (lr)
26965 END_USE (0);
26966 }
26967 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
26968 {
26969 rtvec p;
26970 int i;
26971 p = rtvec_alloc (32 - info->first_gp_reg_save);
26972 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26973 RTVEC_ELT (p, i)
26974 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26975 frame_reg_rtx,
26976 info->gp_save_offset + frame_off + reg_size * i);
26977 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26978 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26979 NULL_RTX, NULL_RTX);
26980 }
26981 else if (!WORLD_SAVE_P (info))
26982 {
26983 int offset = info->gp_save_offset + frame_off;
26984 for (int i = info->first_gp_reg_save; i < 32; i++)
26985 {
26986 if (save_reg_p (i)
26987 && !cfun->machine->gpr_is_wrapped_separately[i])
26988 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
26989 sp_off - frame_off);
26990
26991 offset += reg_size;
26992 }
26993 }
26994
26995 if (crtl->calls_eh_return)
26996 {
26997 unsigned int i;
26998 rtvec p;
26999
27000 for (i = 0; ; ++i)
27001 {
27002 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27003 if (regno == INVALID_REGNUM)
27004 break;
27005 }
27006
27007 p = rtvec_alloc (i);
27008
27009 for (i = 0; ; ++i)
27010 {
27011 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27012 if (regno == INVALID_REGNUM)
27013 break;
27014
27015 rtx set
27016 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27017 sp_reg_rtx,
27018 info->ehrd_offset + sp_off + reg_size * (int) i);
27019 RTVEC_ELT (p, i) = set;
27020 RTX_FRAME_RELATED_P (set) = 1;
27021 }
27022
27023 insn = emit_insn (gen_blockage ());
27024 RTX_FRAME_RELATED_P (insn) = 1;
27025 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27026 }
27027
27028 /* In AIX ABI we need to make sure r2 is really saved. */
27029 if (TARGET_AIX && crtl->calls_eh_return)
27030 {
27031 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27032 rtx join_insn, note;
27033 rtx_insn *save_insn;
27034 long toc_restore_insn;
27035
27036 tmp_reg = gen_rtx_REG (Pmode, 11);
27037 tmp_reg_si = gen_rtx_REG (SImode, 11);
27038 if (using_static_chain_p)
27039 {
27040 START_USE (0);
27041 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27042 }
27043 else
27044 START_USE (11);
27045 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27046 /* Peek at instruction to which this function returns. If it's
27047 restoring r2, then we know we've already saved r2. We can't
27048 unconditionally save r2 because the value we have will already
27049 be updated if we arrived at this function via a plt call or
27050 toc adjusting stub. */
27051 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27052 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27053 + RS6000_TOC_SAVE_SLOT);
27054 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27055 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27056 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27057 validate_condition_mode (EQ, CCUNSmode);
27058 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27059 emit_insn (gen_rtx_SET (compare_result,
27060 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27061 toc_save_done = gen_label_rtx ();
27062 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27063 gen_rtx_EQ (VOIDmode, compare_result,
27064 const0_rtx),
27065 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27066 pc_rtx);
27067 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27068 JUMP_LABEL (jump) = toc_save_done;
27069 LABEL_NUSES (toc_save_done) += 1;
27070
27071 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27072 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27073 sp_off - frame_off);
27074
27075 emit_label (toc_save_done);
27076
27077 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27078 have a CFG that has different saves along different paths.
27079 Move the note to a dummy blockage insn, which describes that
27080 R2 is unconditionally saved after the label. */
27081 /* ??? An alternate representation might be a special insn pattern
27082 containing both the branch and the store. That might let the
27083 code that minimizes the number of DW_CFA_advance opcodes better
27084 freedom in placing the annotations. */
27085 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27086 if (note)
27087 remove_note (save_insn, note);
27088 else
27089 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27090 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27091 RTX_FRAME_RELATED_P (save_insn) = 0;
27092
27093 join_insn = emit_insn (gen_blockage ());
27094 REG_NOTES (join_insn) = note;
27095 RTX_FRAME_RELATED_P (join_insn) = 1;
27096
27097 if (using_static_chain_p)
27098 {
27099 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27100 END_USE (0);
27101 }
27102 else
27103 END_USE (11);
27104 }
27105
27106 /* Save CR if we use any that must be preserved. */
27107 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27108 {
27109 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27110 GEN_INT (info->cr_save_offset + frame_off));
27111 rtx mem = gen_frame_mem (SImode, addr);
27112
27113 /* If we didn't copy cr before, do so now using r0. */
27114 if (cr_save_rtx == NULL_RTX)
27115 {
27116 START_USE (0);
27117 cr_save_rtx = gen_rtx_REG (SImode, 0);
27118 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27119 }
27120
27121 /* Saving CR requires a two-instruction sequence: one instruction
27122 to move the CR to a general-purpose register, and a second
27123 instruction that stores the GPR to memory.
27124
27125 We do not emit any DWARF CFI records for the first of these,
27126 because we cannot properly represent the fact that CR is saved in
27127 a register. One reason is that we cannot express that multiple
27128 CR fields are saved; another reason is that on 64-bit, the size
27129 of the CR register in DWARF (4 bytes) differs from the size of
27130 a general-purpose register.
27131
27132 This means if any intervening instruction were to clobber one of
27133 the call-saved CR fields, we'd have incorrect CFI. To prevent
27134 this from happening, we mark the store to memory as a use of
27135 those CR fields, which prevents any such instruction from being
27136 scheduled in between the two instructions. */
27137 rtx crsave_v[9];
27138 int n_crsave = 0;
27139 int i;
27140
27141 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27142 for (i = 0; i < 8; i++)
27143 if (save_reg_p (CR0_REGNO + i))
27144 crsave_v[n_crsave++]
27145 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27146
27147 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27148 gen_rtvec_v (n_crsave, crsave_v)));
27149 END_USE (REGNO (cr_save_rtx));
27150
27151 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27152 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27153 so we need to construct a frame expression manually. */
27154 RTX_FRAME_RELATED_P (insn) = 1;
27155
27156 /* Update address to be stack-pointer relative, like
27157 rs6000_frame_related would do. */
27158 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27159 GEN_INT (info->cr_save_offset + sp_off));
27160 mem = gen_frame_mem (SImode, addr);
27161
27162 if (DEFAULT_ABI == ABI_ELFv2)
27163 {
27164 /* In the ELFv2 ABI we generate separate CFI records for each
27165 CR field that was actually saved. They all point to the
27166 same 32-bit stack slot. */
27167 rtx crframe[8];
27168 int n_crframe = 0;
27169
27170 for (i = 0; i < 8; i++)
27171 if (save_reg_p (CR0_REGNO + i))
27172 {
27173 crframe[n_crframe]
27174 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27175
27176 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27177 n_crframe++;
27178 }
27179
27180 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27181 gen_rtx_PARALLEL (VOIDmode,
27182 gen_rtvec_v (n_crframe, crframe)));
27183 }
27184 else
27185 {
27186 /* In other ABIs, by convention, we use a single CR regnum to
27187 represent the fact that all call-saved CR fields are saved.
27188 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27189 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27190 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27191 }
27192 }
27193
27194 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27195 *separate* slots if the routine calls __builtin_eh_return, so
27196 that they can be independently restored by the unwinder. */
27197 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27198 {
27199 int i, cr_off = info->ehcr_offset;
27200 rtx crsave;
27201
27202 /* ??? We might get better performance by using multiple mfocrf
27203 instructions. */
27204 crsave = gen_rtx_REG (SImode, 0);
27205 emit_insn (gen_prologue_movesi_from_cr (crsave));
27206
27207 for (i = 0; i < 8; i++)
27208 if (!call_used_regs[CR0_REGNO + i])
27209 {
27210 rtvec p = rtvec_alloc (2);
27211 RTVEC_ELT (p, 0)
27212 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27213 RTVEC_ELT (p, 1)
27214 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27215
27216 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27217
27218 RTX_FRAME_RELATED_P (insn) = 1;
27219 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27220 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27221 sp_reg_rtx, cr_off + sp_off));
27222
27223 cr_off += reg_size;
27224 }
27225 }
27226
27227 /* If we are emitting stack probes, but allocate no stack, then
27228 just note that in the dump file. */
27229 if (flag_stack_clash_protection
27230 && dump_file
27231 && !info->push_p)
27232 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27233
27234 /* Update stack and set back pointer unless this is V.4,
27235 for which it was done previously. */
27236 if (!WORLD_SAVE_P (info) && info->push_p
27237 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27238 {
27239 rtx ptr_reg = NULL;
27240 int ptr_off = 0;
27241
27242 /* If saving altivec regs we need to be able to address all save
27243 locations using a 16-bit offset. */
27244 if ((strategy & SAVE_INLINE_VRS) == 0
27245 || (info->altivec_size != 0
27246 && (info->altivec_save_offset + info->altivec_size - 16
27247 + info->total_size - frame_off) > 32767)
27248 || (info->vrsave_size != 0
27249 && (info->vrsave_save_offset
27250 + info->total_size - frame_off) > 32767))
27251 {
27252 int sel = SAVRES_SAVE | SAVRES_VR;
27253 unsigned ptr_regno = ptr_regno_for_savres (sel);
27254
27255 if (using_static_chain_p
27256 && ptr_regno == STATIC_CHAIN_REGNUM)
27257 ptr_regno = 12;
27258 if (REGNO (frame_reg_rtx) != ptr_regno)
27259 START_USE (ptr_regno);
27260 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27261 frame_reg_rtx = ptr_reg;
27262 ptr_off = info->altivec_save_offset + info->altivec_size;
27263 frame_off = -ptr_off;
27264 }
27265 else if (REGNO (frame_reg_rtx) == 1)
27266 frame_off = info->total_size;
27267 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27268 ptr_reg, ptr_off);
27269 if (REGNO (frame_reg_rtx) == 12)
27270 sp_adjust = 0;
27271 sp_off = info->total_size;
27272 if (frame_reg_rtx != sp_reg_rtx)
27273 rs6000_emit_stack_tie (frame_reg_rtx, false);
27274 }
27275
27276 /* Set frame pointer, if needed. */
27277 if (frame_pointer_needed)
27278 {
27279 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27280 sp_reg_rtx);
27281 RTX_FRAME_RELATED_P (insn) = 1;
27282 }
27283
27284 /* Save AltiVec registers if needed. Save here because the red zone does
27285 not always include AltiVec registers. */
27286 if (!WORLD_SAVE_P (info)
27287 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27288 {
27289 int end_save = info->altivec_save_offset + info->altivec_size;
27290 int ptr_off;
27291 /* Oddly, the vector save/restore functions point r0 at the end
27292 of the save area, then use r11 or r12 to load offsets for
27293 [reg+reg] addressing. */
27294 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27295 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27296 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27297
27298 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27299 NOT_INUSE (0);
27300 if (scratch_regno == 12)
27301 sp_adjust = 0;
27302 if (end_save + frame_off != 0)
27303 {
27304 rtx offset = GEN_INT (end_save + frame_off);
27305
27306 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27307 }
27308 else
27309 emit_move_insn (ptr_reg, frame_reg_rtx);
27310
27311 ptr_off = -end_save;
27312 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27313 info->altivec_save_offset + ptr_off,
27314 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27315 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27316 NULL_RTX, NULL_RTX);
27317 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27318 {
27319 /* The oddity mentioned above clobbered our frame reg. */
27320 emit_move_insn (frame_reg_rtx, ptr_reg);
27321 frame_off = ptr_off;
27322 }
27323 }
27324 else if (!WORLD_SAVE_P (info)
27325 && info->altivec_size != 0)
27326 {
27327 int i;
27328
27329 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27330 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27331 {
27332 rtx areg, savereg, mem;
27333 HOST_WIDE_INT offset;
27334
27335 offset = (info->altivec_save_offset + frame_off
27336 + 16 * (i - info->first_altivec_reg_save));
27337
27338 savereg = gen_rtx_REG (V4SImode, i);
27339
27340 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27341 {
27342 mem = gen_frame_mem (V4SImode,
27343 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27344 GEN_INT (offset)));
27345 insn = emit_insn (gen_rtx_SET (mem, savereg));
27346 areg = NULL_RTX;
27347 }
27348 else
27349 {
27350 NOT_INUSE (0);
27351 areg = gen_rtx_REG (Pmode, 0);
27352 emit_move_insn (areg, GEN_INT (offset));
27353
27354 /* AltiVec addressing mode is [reg+reg]. */
27355 mem = gen_frame_mem (V4SImode,
27356 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27357
27358 /* Rather than emitting a generic move, force use of the stvx
27359 instruction, which we always want on ISA 2.07 (power8) systems.
27360 In particular we don't want xxpermdi/stxvd2x for little
27361 endian. */
27362 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27363 }
27364
27365 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27366 areg, GEN_INT (offset));
27367 }
27368 }
27369
27370 /* VRSAVE is a bit vector representing which AltiVec registers
27371 are used. The OS uses this to determine which vector
27372 registers to save on a context switch. We need to save
27373 VRSAVE on the stack frame, add whatever AltiVec registers we
27374 used in this function, and do the corresponding magic in the
27375 epilogue. */
27376
27377 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27378 {
27379 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27380 be using r12 as frame_reg_rtx and r11 as the static chain
27381 pointer for nested functions. */
27382 int save_regno = 12;
27383 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27384 && !using_static_chain_p)
27385 save_regno = 11;
27386 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27387 {
27388 save_regno = 11;
27389 if (using_static_chain_p)
27390 save_regno = 0;
27391 }
27392 NOT_INUSE (save_regno);
27393
27394 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27395 }
27396
27397 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27398 if (!TARGET_SINGLE_PIC_BASE
27399 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27400 && !constant_pool_empty_p ())
27401 || (DEFAULT_ABI == ABI_V4
27402 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27403 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27404 {
27405 /* If emit_load_toc_table will use the link register, we need to save
27406 it. We use R12 for this purpose because emit_load_toc_table
27407 can use register 0. This allows us to use a plain 'blr' to return
27408 from the procedure more often. */
27409 int save_LR_around_toc_setup = (TARGET_ELF
27410 && DEFAULT_ABI == ABI_V4
27411 && flag_pic
27412 && ! info->lr_save_p
27413 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27414 if (save_LR_around_toc_setup)
27415 {
27416 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27417 rtx tmp = gen_rtx_REG (Pmode, 12);
27418
27419 sp_adjust = 0;
27420 insn = emit_move_insn (tmp, lr);
27421 RTX_FRAME_RELATED_P (insn) = 1;
27422
27423 rs6000_emit_load_toc_table (TRUE);
27424
27425 insn = emit_move_insn (lr, tmp);
27426 add_reg_note (insn, REG_CFA_RESTORE, lr);
27427 RTX_FRAME_RELATED_P (insn) = 1;
27428 }
27429 else
27430 rs6000_emit_load_toc_table (TRUE);
27431 }
27432
27433 #if TARGET_MACHO
27434 if (!TARGET_SINGLE_PIC_BASE
27435 && DEFAULT_ABI == ABI_DARWIN
27436 && flag_pic && crtl->uses_pic_offset_table)
27437 {
27438 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27439 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27440
27441 /* Save and restore LR locally around this call (in R0). */
27442 if (!info->lr_save_p)
27443 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27444
27445 emit_insn (gen_load_macho_picbase (src));
27446
27447 emit_move_insn (gen_rtx_REG (Pmode,
27448 RS6000_PIC_OFFSET_TABLE_REGNUM),
27449 lr);
27450
27451 if (!info->lr_save_p)
27452 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27453 }
27454 #endif
27455
27456 /* If we need to, save the TOC register after doing the stack setup.
27457 Do not emit eh frame info for this save. The unwinder wants info,
27458 conceptually attached to instructions in this function, about
27459 register values in the caller of this function. This R2 may have
27460 already been changed from the value in the caller.
27461 We don't attempt to write accurate DWARF EH frame info for R2
27462 because code emitted by gcc for a (non-pointer) function call
27463 doesn't save and restore R2. Instead, R2 is managed out-of-line
27464 by a linker generated plt call stub when the function resides in
27465 a shared library. This behavior is costly to describe in DWARF,
27466 both in terms of the size of DWARF info and the time taken in the
27467 unwinder to interpret it. R2 changes, apart from the
27468 calls_eh_return case earlier in this function, are handled by
27469 linux-unwind.h frob_update_context. */
27470 if (rs6000_save_toc_in_prologue_p ()
27471 && !cfun->machine->toc_is_wrapped_separately)
27472 {
27473 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27474 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27475 }
27476
27477 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27478 if (using_split_stack && split_stack_arg_pointer_used_p ())
27479 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27480 }
27481
27482 /* Output .extern statements for the save/restore routines we use. */
27483
27484 static void
27485 rs6000_output_savres_externs (FILE *file)
27486 {
27487 rs6000_stack_t *info = rs6000_stack_info ();
27488
27489 if (TARGET_DEBUG_STACK)
27490 debug_stack_info (info);
27491
27492 /* Write .extern for any function we will call to save and restore
27493 fp values. */
27494 if (info->first_fp_reg_save < 64
27495 && !TARGET_MACHO
27496 && !TARGET_ELF)
27497 {
27498 char *name;
27499 int regno = info->first_fp_reg_save - 32;
27500
27501 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27502 {
27503 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27504 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27505 name = rs6000_savres_routine_name (regno, sel);
27506 fprintf (file, "\t.extern %s\n", name);
27507 }
27508 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27509 {
27510 bool lr = (info->savres_strategy
27511 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27512 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27513 name = rs6000_savres_routine_name (regno, sel);
27514 fprintf (file, "\t.extern %s\n", name);
27515 }
27516 }
27517 }
27518
27519 /* Write function prologue. */
27520
27521 static void
27522 rs6000_output_function_prologue (FILE *file)
27523 {
27524 if (!cfun->is_thunk)
27525 rs6000_output_savres_externs (file);
27526
27527 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27528 immediately after the global entry point label. */
27529 if (rs6000_global_entry_point_needed_p ())
27530 {
27531 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27532
27533 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27534
27535 if (TARGET_CMODEL != CMODEL_LARGE)
27536 {
27537 /* In the small and medium code models, we assume the TOC is less
27538 2 GB away from the text section, so it can be computed via the
27539 following two-instruction sequence. */
27540 char buf[256];
27541
27542 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27543 fprintf (file, "0:\taddis 2,12,.TOC.-");
27544 assemble_name (file, buf);
27545 fprintf (file, "@ha\n");
27546 fprintf (file, "\taddi 2,2,.TOC.-");
27547 assemble_name (file, buf);
27548 fprintf (file, "@l\n");
27549 }
27550 else
27551 {
27552 /* In the large code model, we allow arbitrary offsets between the
27553 TOC and the text section, so we have to load the offset from
27554 memory. The data field is emitted directly before the global
27555 entry point in rs6000_elf_declare_function_name. */
27556 char buf[256];
27557
27558 #ifdef HAVE_AS_ENTRY_MARKERS
27559 /* If supported by the linker, emit a marker relocation. If the
27560 total code size of the final executable or shared library
27561 happens to fit into 2 GB after all, the linker will replace
27562 this code sequence with the sequence for the small or medium
27563 code model. */
27564 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27565 #endif
27566 fprintf (file, "\tld 2,");
27567 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27568 assemble_name (file, buf);
27569 fprintf (file, "-");
27570 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27571 assemble_name (file, buf);
27572 fprintf (file, "(12)\n");
27573 fprintf (file, "\tadd 2,2,12\n");
27574 }
27575
27576 fputs ("\t.localentry\t", file);
27577 assemble_name (file, name);
27578 fputs (",.-", file);
27579 assemble_name (file, name);
27580 fputs ("\n", file);
27581 }
27582
27583 /* Output -mprofile-kernel code. This needs to be done here instead of
27584 in output_function_profile since it must go after the ELFv2 ABI
27585 local entry point. */
27586 if (TARGET_PROFILE_KERNEL && crtl->profile)
27587 {
27588 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27589 gcc_assert (!TARGET_32BIT);
27590
27591 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27592
27593 /* In the ELFv2 ABI we have no compiler stack word. It must be
27594 the resposibility of _mcount to preserve the static chain
27595 register if required. */
27596 if (DEFAULT_ABI != ABI_ELFv2
27597 && cfun->static_chain_decl != NULL)
27598 {
27599 asm_fprintf (file, "\tstd %s,24(%s)\n",
27600 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27601 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27602 asm_fprintf (file, "\tld %s,24(%s)\n",
27603 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27604 }
27605 else
27606 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27607 }
27608
27609 rs6000_pic_labelno++;
27610 }
27611
27612 /* -mprofile-kernel code calls mcount before the function prolog,
27613 so a profiled leaf function should stay a leaf function. */
27614 static bool
27615 rs6000_keep_leaf_when_profiled ()
27616 {
27617 return TARGET_PROFILE_KERNEL;
27618 }
27619
27620 /* Non-zero if vmx regs are restored before the frame pop, zero if
27621 we restore after the pop when possible. */
27622 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27623
27624 /* Restoring cr is a two step process: loading a reg from the frame
27625 save, then moving the reg to cr. For ABI_V4 we must let the
27626 unwinder know that the stack location is no longer valid at or
27627 before the stack deallocation, but we can't emit a cfa_restore for
27628 cr at the stack deallocation like we do for other registers.
27629 The trouble is that it is possible for the move to cr to be
27630 scheduled after the stack deallocation. So say exactly where cr
27631 is located on each of the two insns. */
27632
27633 static rtx
27634 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27635 {
27636 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27637 rtx reg = gen_rtx_REG (SImode, regno);
27638 rtx_insn *insn = emit_move_insn (reg, mem);
27639
27640 if (!exit_func && DEFAULT_ABI == ABI_V4)
27641 {
27642 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27643 rtx set = gen_rtx_SET (reg, cr);
27644
27645 add_reg_note (insn, REG_CFA_REGISTER, set);
27646 RTX_FRAME_RELATED_P (insn) = 1;
27647 }
27648 return reg;
27649 }
27650
27651 /* Reload CR from REG. */
27652
27653 static void
27654 restore_saved_cr (rtx reg, bool using_mfcr_multiple, bool exit_func)
27655 {
27656 int count = 0;
27657 int i;
27658
27659 if (using_mfcr_multiple)
27660 {
27661 for (i = 0; i < 8; i++)
27662 if (save_reg_p (CR0_REGNO + i))
27663 count++;
27664 gcc_assert (count);
27665 }
27666
27667 if (using_mfcr_multiple && count > 1)
27668 {
27669 rtx_insn *insn;
27670 rtvec p;
27671 int ndx;
27672
27673 p = rtvec_alloc (count);
27674
27675 ndx = 0;
27676 for (i = 0; i < 8; i++)
27677 if (save_reg_p (CR0_REGNO + i))
27678 {
27679 rtvec r = rtvec_alloc (2);
27680 RTVEC_ELT (r, 0) = reg;
27681 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27682 RTVEC_ELT (p, ndx) =
27683 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27684 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27685 ndx++;
27686 }
27687 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27688 gcc_assert (ndx == count);
27689
27690 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27691 CR field separately. */
27692 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27693 {
27694 for (i = 0; i < 8; i++)
27695 if (save_reg_p (CR0_REGNO + i))
27696 add_reg_note (insn, REG_CFA_RESTORE,
27697 gen_rtx_REG (SImode, CR0_REGNO + i));
27698
27699 RTX_FRAME_RELATED_P (insn) = 1;
27700 }
27701 }
27702 else
27703 for (i = 0; i < 8; i++)
27704 if (save_reg_p (CR0_REGNO + i))
27705 {
27706 rtx insn = emit_insn (gen_movsi_to_cr_one
27707 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27708
27709 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27710 CR field separately, attached to the insn that in fact
27711 restores this particular CR field. */
27712 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27713 {
27714 add_reg_note (insn, REG_CFA_RESTORE,
27715 gen_rtx_REG (SImode, CR0_REGNO + i));
27716
27717 RTX_FRAME_RELATED_P (insn) = 1;
27718 }
27719 }
27720
27721 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27722 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27723 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27724 {
27725 rtx_insn *insn = get_last_insn ();
27726 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27727
27728 add_reg_note (insn, REG_CFA_RESTORE, cr);
27729 RTX_FRAME_RELATED_P (insn) = 1;
27730 }
27731 }
27732
27733 /* Like cr, the move to lr instruction can be scheduled after the
27734 stack deallocation, but unlike cr, its stack frame save is still
27735 valid. So we only need to emit the cfa_restore on the correct
27736 instruction. */
27737
27738 static void
27739 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27740 {
27741 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27742 rtx reg = gen_rtx_REG (Pmode, regno);
27743
27744 emit_move_insn (reg, mem);
27745 }
27746
27747 static void
27748 restore_saved_lr (int regno, bool exit_func)
27749 {
27750 rtx reg = gen_rtx_REG (Pmode, regno);
27751 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27752 rtx_insn *insn = emit_move_insn (lr, reg);
27753
27754 if (!exit_func && flag_shrink_wrap)
27755 {
27756 add_reg_note (insn, REG_CFA_RESTORE, lr);
27757 RTX_FRAME_RELATED_P (insn) = 1;
27758 }
27759 }
27760
27761 static rtx
27762 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27763 {
27764 if (DEFAULT_ABI == ABI_ELFv2)
27765 {
27766 int i;
27767 for (i = 0; i < 8; i++)
27768 if (save_reg_p (CR0_REGNO + i))
27769 {
27770 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27771 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27772 cfa_restores);
27773 }
27774 }
27775 else if (info->cr_save_p)
27776 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27777 gen_rtx_REG (SImode, CR2_REGNO),
27778 cfa_restores);
27779
27780 if (info->lr_save_p)
27781 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27782 gen_rtx_REG (Pmode, LR_REGNO),
27783 cfa_restores);
27784 return cfa_restores;
27785 }
27786
27787 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27788 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27789 below stack pointer not cloberred by signals. */
27790
27791 static inline bool
27792 offset_below_red_zone_p (HOST_WIDE_INT offset)
27793 {
27794 return offset < (DEFAULT_ABI == ABI_V4
27795 ? 0
27796 : TARGET_32BIT ? -220 : -288);
27797 }
27798
27799 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27800
27801 static void
27802 emit_cfa_restores (rtx cfa_restores)
27803 {
27804 rtx_insn *insn = get_last_insn ();
27805 rtx *loc = &REG_NOTES (insn);
27806
27807 while (*loc)
27808 loc = &XEXP (*loc, 1);
27809 *loc = cfa_restores;
27810 RTX_FRAME_RELATED_P (insn) = 1;
27811 }
27812
27813 /* Emit function epilogue as insns. */
27814
27815 void
27816 rs6000_emit_epilogue (enum epilogue_type epilogue_type)
27817 {
27818 HOST_WIDE_INT frame_off = 0;
27819 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27820 rtx frame_reg_rtx = sp_reg_rtx;
27821 rtx cfa_restores = NULL_RTX;
27822 rtx insn;
27823 rtx cr_save_reg = NULL_RTX;
27824 machine_mode reg_mode = Pmode;
27825 int reg_size = TARGET_32BIT ? 4 : 8;
27826 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
27827 int fp_reg_size = 8;
27828 int i;
27829 unsigned ptr_regno;
27830
27831 rs6000_stack_t *info = rs6000_stack_info ();
27832
27833 if (epilogue_type == EPILOGUE_TYPE_NORMAL && crtl->calls_eh_return)
27834 epilogue_type = EPILOGUE_TYPE_EH_RETURN;
27835
27836 int strategy = info->savres_strategy;
27837 bool using_load_multiple = !!(strategy & REST_MULTIPLE);
27838 bool restoring_GPRs_inline = !!(strategy & REST_INLINE_GPRS);
27839 bool restoring_FPRs_inline = !!(strategy & REST_INLINE_FPRS);
27840 if (epilogue_type == EPILOGUE_TYPE_SIBCALL)
27841 {
27842 restoring_GPRs_inline = true;
27843 restoring_FPRs_inline = true;
27844 }
27845
27846 bool using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
27847 || rs6000_tune == PROCESSOR_PPC603
27848 || rs6000_tune == PROCESSOR_PPC750
27849 || optimize_size);
27850
27851 /* Restore via the backchain when we have a large frame, since this
27852 is more efficient than an addis, addi pair. The second condition
27853 here will not trigger at the moment; We don't actually need a
27854 frame pointer for alloca, but the generic parts of the compiler
27855 give us one anyway. */
27856 bool use_backchain_to_restore_sp
27857 = (info->total_size + (info->lr_save_p ? info->lr_save_offset : 0) > 32767
27858 || (cfun->calls_alloca && !frame_pointer_needed));
27859
27860 bool restore_lr = (info->lr_save_p
27861 && (restoring_FPRs_inline
27862 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27863 && (restoring_GPRs_inline
27864 || info->first_fp_reg_save < 64)
27865 && !cfun->machine->lr_is_wrapped_separately);
27866
27867
27868 if (WORLD_SAVE_P (info))
27869 {
27870 gcc_assert (epilogue_type != EPILOGUE_TYPE_SIBCALL);
27871
27872 /* eh_rest_world_r10 will return to the location saved in the LR
27873 stack slot (which is not likely to be our caller.)
27874 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27875 rest_world is similar, except any R10 parameter is ignored.
27876 The exception-handling stuff that was here in 2.95 is no
27877 longer necessary. */
27878
27879 rtvec p;
27880 p = rtvec_alloc (9
27881 + 32 - info->first_gp_reg_save
27882 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27883 + 63 + 1 - info->first_fp_reg_save);
27884
27885 const char *rname;
27886 switch (epilogue_type)
27887 {
27888 case EPILOGUE_TYPE_NORMAL:
27889 rname = ggc_strdup ("*rest_world");
27890 break;
27891
27892 case EPILOGUE_TYPE_EH_RETURN:
27893 rname = ggc_strdup ("*eh_rest_world_r10");
27894 break;
27895
27896 default:
27897 gcc_unreachable ();
27898 }
27899
27900 int j = 0;
27901 RTVEC_ELT (p, j++) = ret_rtx;
27902 RTVEC_ELT (p, j++)
27903 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, rname));
27904 /* The instruction pattern requires a clobber here;
27905 it is shared with the restVEC helper. */
27906 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 11);
27907
27908 {
27909 /* CR register traditionally saved as CR2. */
27910 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27911 RTVEC_ELT (p, j++)
27912 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27913 if (flag_shrink_wrap)
27914 {
27915 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27916 gen_rtx_REG (Pmode, LR_REGNO),
27917 cfa_restores);
27918 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27919 }
27920 }
27921
27922 int i;
27923 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27924 {
27925 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27926 RTVEC_ELT (p, j++)
27927 = gen_frame_load (reg,
27928 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27929 if (flag_shrink_wrap
27930 && save_reg_p (info->first_gp_reg_save + i))
27931 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27932 }
27933 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27934 {
27935 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27936 RTVEC_ELT (p, j++)
27937 = gen_frame_load (reg,
27938 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27939 if (flag_shrink_wrap
27940 && save_reg_p (info->first_altivec_reg_save + i))
27941 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27942 }
27943 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27944 {
27945 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
27946 info->first_fp_reg_save + i);
27947 RTVEC_ELT (p, j++)
27948 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
27949 if (flag_shrink_wrap
27950 && save_reg_p (info->first_fp_reg_save + i))
27951 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27952 }
27953 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 0);
27954 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 12);
27955 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 7);
27956 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 8);
27957 RTVEC_ELT (p, j++)
27958 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
27959 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
27960
27961 if (flag_shrink_wrap)
27962 {
27963 REG_NOTES (insn) = cfa_restores;
27964 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27965 RTX_FRAME_RELATED_P (insn) = 1;
27966 }
27967 return;
27968 }
27969
27970 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27971 if (info->push_p)
27972 frame_off = info->total_size;
27973
27974 /* Restore AltiVec registers if we must do so before adjusting the
27975 stack. */
27976 if (info->altivec_size != 0
27977 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27978 || (DEFAULT_ABI != ABI_V4
27979 && offset_below_red_zone_p (info->altivec_save_offset))))
27980 {
27981 int i;
27982 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
27983
27984 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27985 if (use_backchain_to_restore_sp)
27986 {
27987 int frame_regno = 11;
27988
27989 if ((strategy & REST_INLINE_VRS) == 0)
27990 {
27991 /* Of r11 and r12, select the one not clobbered by an
27992 out-of-line restore function for the frame register. */
27993 frame_regno = 11 + 12 - scratch_regno;
27994 }
27995 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
27996 emit_move_insn (frame_reg_rtx,
27997 gen_rtx_MEM (Pmode, sp_reg_rtx));
27998 frame_off = 0;
27999 }
28000 else if (frame_pointer_needed)
28001 frame_reg_rtx = hard_frame_pointer_rtx;
28002
28003 if ((strategy & REST_INLINE_VRS) == 0)
28004 {
28005 int end_save = info->altivec_save_offset + info->altivec_size;
28006 int ptr_off;
28007 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28008 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28009
28010 if (end_save + frame_off != 0)
28011 {
28012 rtx offset = GEN_INT (end_save + frame_off);
28013
28014 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28015 }
28016 else
28017 emit_move_insn (ptr_reg, frame_reg_rtx);
28018
28019 ptr_off = -end_save;
28020 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28021 info->altivec_save_offset + ptr_off,
28022 0, V4SImode, SAVRES_VR);
28023 }
28024 else
28025 {
28026 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28027 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28028 {
28029 rtx addr, areg, mem, insn;
28030 rtx reg = gen_rtx_REG (V4SImode, i);
28031 HOST_WIDE_INT offset
28032 = (info->altivec_save_offset + frame_off
28033 + 16 * (i - info->first_altivec_reg_save));
28034
28035 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28036 {
28037 mem = gen_frame_mem (V4SImode,
28038 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28039 GEN_INT (offset)));
28040 insn = gen_rtx_SET (reg, mem);
28041 }
28042 else
28043 {
28044 areg = gen_rtx_REG (Pmode, 0);
28045 emit_move_insn (areg, GEN_INT (offset));
28046
28047 /* AltiVec addressing mode is [reg+reg]. */
28048 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28049 mem = gen_frame_mem (V4SImode, addr);
28050
28051 /* Rather than emitting a generic move, force use of the
28052 lvx instruction, which we always want. In particular we
28053 don't want lxvd2x/xxpermdi for little endian. */
28054 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28055 }
28056
28057 (void) emit_insn (insn);
28058 }
28059 }
28060
28061 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28062 if (((strategy & REST_INLINE_VRS) == 0
28063 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28064 && (flag_shrink_wrap
28065 || (offset_below_red_zone_p
28066 (info->altivec_save_offset
28067 + 16 * (i - info->first_altivec_reg_save))))
28068 && save_reg_p (i))
28069 {
28070 rtx reg = gen_rtx_REG (V4SImode, i);
28071 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28072 }
28073 }
28074
28075 /* Restore VRSAVE if we must do so before adjusting the stack. */
28076 if (info->vrsave_size != 0
28077 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28078 || (DEFAULT_ABI != ABI_V4
28079 && offset_below_red_zone_p (info->vrsave_save_offset))))
28080 {
28081 rtx reg;
28082
28083 if (frame_reg_rtx == sp_reg_rtx)
28084 {
28085 if (use_backchain_to_restore_sp)
28086 {
28087 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28088 emit_move_insn (frame_reg_rtx,
28089 gen_rtx_MEM (Pmode, sp_reg_rtx));
28090 frame_off = 0;
28091 }
28092 else if (frame_pointer_needed)
28093 frame_reg_rtx = hard_frame_pointer_rtx;
28094 }
28095
28096 reg = gen_rtx_REG (SImode, 12);
28097 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28098 info->vrsave_save_offset + frame_off));
28099
28100 emit_insn (generate_set_vrsave (reg, info, 1));
28101 }
28102
28103 insn = NULL_RTX;
28104 /* If we have a large stack frame, restore the old stack pointer
28105 using the backchain. */
28106 if (use_backchain_to_restore_sp)
28107 {
28108 if (frame_reg_rtx == sp_reg_rtx)
28109 {
28110 /* Under V.4, don't reset the stack pointer until after we're done
28111 loading the saved registers. */
28112 if (DEFAULT_ABI == ABI_V4)
28113 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28114
28115 insn = emit_move_insn (frame_reg_rtx,
28116 gen_rtx_MEM (Pmode, sp_reg_rtx));
28117 frame_off = 0;
28118 }
28119 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28120 && DEFAULT_ABI == ABI_V4)
28121 /* frame_reg_rtx has been set up by the altivec restore. */
28122 ;
28123 else
28124 {
28125 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28126 frame_reg_rtx = sp_reg_rtx;
28127 }
28128 }
28129 /* If we have a frame pointer, we can restore the old stack pointer
28130 from it. */
28131 else if (frame_pointer_needed)
28132 {
28133 frame_reg_rtx = sp_reg_rtx;
28134 if (DEFAULT_ABI == ABI_V4)
28135 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28136 /* Prevent reordering memory accesses against stack pointer restore. */
28137 else if (cfun->calls_alloca
28138 || offset_below_red_zone_p (-info->total_size))
28139 rs6000_emit_stack_tie (frame_reg_rtx, true);
28140
28141 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28142 GEN_INT (info->total_size)));
28143 frame_off = 0;
28144 }
28145 else if (info->push_p
28146 && DEFAULT_ABI != ABI_V4
28147 && epilogue_type != EPILOGUE_TYPE_EH_RETURN)
28148 {
28149 /* Prevent reordering memory accesses against stack pointer restore. */
28150 if (cfun->calls_alloca
28151 || offset_below_red_zone_p (-info->total_size))
28152 rs6000_emit_stack_tie (frame_reg_rtx, false);
28153 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28154 GEN_INT (info->total_size)));
28155 frame_off = 0;
28156 }
28157 if (insn && frame_reg_rtx == sp_reg_rtx)
28158 {
28159 if (cfa_restores)
28160 {
28161 REG_NOTES (insn) = cfa_restores;
28162 cfa_restores = NULL_RTX;
28163 }
28164 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28165 RTX_FRAME_RELATED_P (insn) = 1;
28166 }
28167
28168 /* Restore AltiVec registers if we have not done so already. */
28169 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28170 && info->altivec_size != 0
28171 && (DEFAULT_ABI == ABI_V4
28172 || !offset_below_red_zone_p (info->altivec_save_offset)))
28173 {
28174 int i;
28175
28176 if ((strategy & REST_INLINE_VRS) == 0)
28177 {
28178 int end_save = info->altivec_save_offset + info->altivec_size;
28179 int ptr_off;
28180 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28181 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28182 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28183
28184 if (end_save + frame_off != 0)
28185 {
28186 rtx offset = GEN_INT (end_save + frame_off);
28187
28188 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28189 }
28190 else
28191 emit_move_insn (ptr_reg, frame_reg_rtx);
28192
28193 ptr_off = -end_save;
28194 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28195 info->altivec_save_offset + ptr_off,
28196 0, V4SImode, SAVRES_VR);
28197 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28198 {
28199 /* Frame reg was clobbered by out-of-line save. Restore it
28200 from ptr_reg, and if we are calling out-of-line gpr or
28201 fpr restore set up the correct pointer and offset. */
28202 unsigned newptr_regno = 1;
28203 if (!restoring_GPRs_inline)
28204 {
28205 bool lr = info->gp_save_offset + info->gp_size == 0;
28206 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28207 newptr_regno = ptr_regno_for_savres (sel);
28208 end_save = info->gp_save_offset + info->gp_size;
28209 }
28210 else if (!restoring_FPRs_inline)
28211 {
28212 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28213 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28214 newptr_regno = ptr_regno_for_savres (sel);
28215 end_save = info->fp_save_offset + info->fp_size;
28216 }
28217
28218 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28219 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28220
28221 if (end_save + ptr_off != 0)
28222 {
28223 rtx offset = GEN_INT (end_save + ptr_off);
28224
28225 frame_off = -end_save;
28226 if (TARGET_32BIT)
28227 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28228 ptr_reg, offset));
28229 else
28230 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28231 ptr_reg, offset));
28232 }
28233 else
28234 {
28235 frame_off = ptr_off;
28236 emit_move_insn (frame_reg_rtx, ptr_reg);
28237 }
28238 }
28239 }
28240 else
28241 {
28242 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28243 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28244 {
28245 rtx addr, areg, mem, insn;
28246 rtx reg = gen_rtx_REG (V4SImode, i);
28247 HOST_WIDE_INT offset
28248 = (info->altivec_save_offset + frame_off
28249 + 16 * (i - info->first_altivec_reg_save));
28250
28251 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28252 {
28253 mem = gen_frame_mem (V4SImode,
28254 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28255 GEN_INT (offset)));
28256 insn = gen_rtx_SET (reg, mem);
28257 }
28258 else
28259 {
28260 areg = gen_rtx_REG (Pmode, 0);
28261 emit_move_insn (areg, GEN_INT (offset));
28262
28263 /* AltiVec addressing mode is [reg+reg]. */
28264 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28265 mem = gen_frame_mem (V4SImode, addr);
28266
28267 /* Rather than emitting a generic move, force use of the
28268 lvx instruction, which we always want. In particular we
28269 don't want lxvd2x/xxpermdi for little endian. */
28270 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28271 }
28272
28273 (void) emit_insn (insn);
28274 }
28275 }
28276
28277 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28278 if (((strategy & REST_INLINE_VRS) == 0
28279 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28280 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28281 && save_reg_p (i))
28282 {
28283 rtx reg = gen_rtx_REG (V4SImode, i);
28284 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28285 }
28286 }
28287
28288 /* Restore VRSAVE if we have not done so already. */
28289 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28290 && info->vrsave_size != 0
28291 && (DEFAULT_ABI == ABI_V4
28292 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28293 {
28294 rtx reg;
28295
28296 reg = gen_rtx_REG (SImode, 12);
28297 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28298 info->vrsave_save_offset + frame_off));
28299
28300 emit_insn (generate_set_vrsave (reg, info, 1));
28301 }
28302
28303 /* If we exit by an out-of-line restore function on ABI_V4 then that
28304 function will deallocate the stack, so we don't need to worry
28305 about the unwinder restoring cr from an invalid stack frame
28306 location. */
28307 bool exit_func = (!restoring_FPRs_inline
28308 || (!restoring_GPRs_inline
28309 && info->first_fp_reg_save == 64));
28310
28311 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28312 *separate* slots if the routine calls __builtin_eh_return, so
28313 that they can be independently restored by the unwinder. */
28314 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28315 {
28316 int i, cr_off = info->ehcr_offset;
28317
28318 for (i = 0; i < 8; i++)
28319 if (!call_used_regs[CR0_REGNO + i])
28320 {
28321 rtx reg = gen_rtx_REG (SImode, 0);
28322 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28323 cr_off + frame_off));
28324
28325 insn = emit_insn (gen_movsi_to_cr_one
28326 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28327
28328 if (!exit_func && flag_shrink_wrap)
28329 {
28330 add_reg_note (insn, REG_CFA_RESTORE,
28331 gen_rtx_REG (SImode, CR0_REGNO + i));
28332
28333 RTX_FRAME_RELATED_P (insn) = 1;
28334 }
28335
28336 cr_off += reg_size;
28337 }
28338 }
28339
28340 /* Get the old lr if we saved it. If we are restoring registers
28341 out-of-line, then the out-of-line routines can do this for us. */
28342 if (restore_lr && restoring_GPRs_inline)
28343 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28344
28345 /* Get the old cr if we saved it. */
28346 if (info->cr_save_p)
28347 {
28348 unsigned cr_save_regno = 12;
28349
28350 if (!restoring_GPRs_inline)
28351 {
28352 /* Ensure we don't use the register used by the out-of-line
28353 gpr register restore below. */
28354 bool lr = info->gp_save_offset + info->gp_size == 0;
28355 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28356 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28357
28358 if (gpr_ptr_regno == 12)
28359 cr_save_regno = 11;
28360 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28361 }
28362 else if (REGNO (frame_reg_rtx) == 12)
28363 cr_save_regno = 11;
28364
28365 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28366 info->cr_save_offset + frame_off,
28367 exit_func);
28368 }
28369
28370 /* Set LR here to try to overlap restores below. */
28371 if (restore_lr && restoring_GPRs_inline)
28372 restore_saved_lr (0, exit_func);
28373
28374 /* Load exception handler data registers, if needed. */
28375 if (epilogue_type == EPILOGUE_TYPE_EH_RETURN)
28376 {
28377 unsigned int i, regno;
28378
28379 if (TARGET_AIX)
28380 {
28381 rtx reg = gen_rtx_REG (reg_mode, 2);
28382 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28383 frame_off + RS6000_TOC_SAVE_SLOT));
28384 }
28385
28386 for (i = 0; ; ++i)
28387 {
28388 rtx mem;
28389
28390 regno = EH_RETURN_DATA_REGNO (i);
28391 if (regno == INVALID_REGNUM)
28392 break;
28393
28394 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28395 info->ehrd_offset + frame_off
28396 + reg_size * (int) i);
28397
28398 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28399 }
28400 }
28401
28402 /* Restore GPRs. This is done as a PARALLEL if we are using
28403 the load-multiple instructions. */
28404 if (!restoring_GPRs_inline)
28405 {
28406 /* We are jumping to an out-of-line function. */
28407 rtx ptr_reg;
28408 int end_save = info->gp_save_offset + info->gp_size;
28409 bool can_use_exit = end_save == 0;
28410 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28411 int ptr_off;
28412
28413 /* Emit stack reset code if we need it. */
28414 ptr_regno = ptr_regno_for_savres (sel);
28415 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28416 if (can_use_exit)
28417 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28418 else if (end_save + frame_off != 0)
28419 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28420 GEN_INT (end_save + frame_off)));
28421 else if (REGNO (frame_reg_rtx) != ptr_regno)
28422 emit_move_insn (ptr_reg, frame_reg_rtx);
28423 if (REGNO (frame_reg_rtx) == ptr_regno)
28424 frame_off = -end_save;
28425
28426 if (can_use_exit && info->cr_save_p)
28427 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28428
28429 ptr_off = -end_save;
28430 rs6000_emit_savres_rtx (info, ptr_reg,
28431 info->gp_save_offset + ptr_off,
28432 info->lr_save_offset + ptr_off,
28433 reg_mode, sel);
28434 }
28435 else if (using_load_multiple)
28436 {
28437 rtvec p;
28438 p = rtvec_alloc (32 - info->first_gp_reg_save);
28439 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28440 RTVEC_ELT (p, i)
28441 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28442 frame_reg_rtx,
28443 info->gp_save_offset + frame_off + reg_size * i);
28444 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28445 }
28446 else
28447 {
28448 int offset = info->gp_save_offset + frame_off;
28449 for (i = info->first_gp_reg_save; i < 32; i++)
28450 {
28451 if (save_reg_p (i)
28452 && !cfun->machine->gpr_is_wrapped_separately[i])
28453 {
28454 rtx reg = gen_rtx_REG (reg_mode, i);
28455 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28456 }
28457
28458 offset += reg_size;
28459 }
28460 }
28461
28462 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28463 {
28464 /* If the frame pointer was used then we can't delay emitting
28465 a REG_CFA_DEF_CFA note. This must happen on the insn that
28466 restores the frame pointer, r31. We may have already emitted
28467 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28468 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28469 be harmless if emitted. */
28470 if (frame_pointer_needed)
28471 {
28472 insn = get_last_insn ();
28473 add_reg_note (insn, REG_CFA_DEF_CFA,
28474 plus_constant (Pmode, frame_reg_rtx, frame_off));
28475 RTX_FRAME_RELATED_P (insn) = 1;
28476 }
28477
28478 /* Set up cfa_restores. We always need these when
28479 shrink-wrapping. If not shrink-wrapping then we only need
28480 the cfa_restore when the stack location is no longer valid.
28481 The cfa_restores must be emitted on or before the insn that
28482 invalidates the stack, and of course must not be emitted
28483 before the insn that actually does the restore. The latter
28484 is why it is a bad idea to emit the cfa_restores as a group
28485 on the last instruction here that actually does a restore:
28486 That insn may be reordered with respect to others doing
28487 restores. */
28488 if (flag_shrink_wrap
28489 && !restoring_GPRs_inline
28490 && info->first_fp_reg_save == 64)
28491 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28492
28493 for (i = info->first_gp_reg_save; i < 32; i++)
28494 if (save_reg_p (i)
28495 && !cfun->machine->gpr_is_wrapped_separately[i])
28496 {
28497 rtx reg = gen_rtx_REG (reg_mode, i);
28498 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28499 }
28500 }
28501
28502 if (!restoring_GPRs_inline
28503 && info->first_fp_reg_save == 64)
28504 {
28505 /* We are jumping to an out-of-line function. */
28506 if (cfa_restores)
28507 emit_cfa_restores (cfa_restores);
28508 return;
28509 }
28510
28511 if (restore_lr && !restoring_GPRs_inline)
28512 {
28513 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28514 restore_saved_lr (0, exit_func);
28515 }
28516
28517 /* Restore fpr's if we need to do it without calling a function. */
28518 if (restoring_FPRs_inline)
28519 {
28520 int offset = info->fp_save_offset + frame_off;
28521 for (i = info->first_fp_reg_save; i < 64; i++)
28522 {
28523 if (save_reg_p (i)
28524 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28525 {
28526 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28527 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28528 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28529 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28530 cfa_restores);
28531 }
28532
28533 offset += fp_reg_size;
28534 }
28535 }
28536
28537 /* If we saved cr, restore it here. Just those that were used. */
28538 if (info->cr_save_p)
28539 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28540
28541 /* If this is V.4, unwind the stack pointer after all of the loads
28542 have been done, or set up r11 if we are restoring fp out of line. */
28543 ptr_regno = 1;
28544 if (!restoring_FPRs_inline)
28545 {
28546 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28547 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28548 ptr_regno = ptr_regno_for_savres (sel);
28549 }
28550
28551 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28552 if (REGNO (frame_reg_rtx) == ptr_regno)
28553 frame_off = 0;
28554
28555 if (insn && restoring_FPRs_inline)
28556 {
28557 if (cfa_restores)
28558 {
28559 REG_NOTES (insn) = cfa_restores;
28560 cfa_restores = NULL_RTX;
28561 }
28562 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28563 RTX_FRAME_RELATED_P (insn) = 1;
28564 }
28565
28566 if (epilogue_type == EPILOGUE_TYPE_EH_RETURN)
28567 {
28568 rtx sa = EH_RETURN_STACKADJ_RTX;
28569 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28570 }
28571
28572 if (epilogue_type != EPILOGUE_TYPE_SIBCALL && restoring_FPRs_inline)
28573 {
28574 if (cfa_restores)
28575 {
28576 /* We can't hang the cfa_restores off a simple return,
28577 since the shrink-wrap code sometimes uses an existing
28578 return. This means there might be a path from
28579 pre-prologue code to this return, and dwarf2cfi code
28580 wants the eh_frame unwinder state to be the same on
28581 all paths to any point. So we need to emit the
28582 cfa_restores before the return. For -m64 we really
28583 don't need epilogue cfa_restores at all, except for
28584 this irritating dwarf2cfi with shrink-wrap
28585 requirement; The stack red-zone means eh_frame info
28586 from the prologue telling the unwinder to restore
28587 from the stack is perfectly good right to the end of
28588 the function. */
28589 emit_insn (gen_blockage ());
28590 emit_cfa_restores (cfa_restores);
28591 cfa_restores = NULL_RTX;
28592 }
28593
28594 emit_jump_insn (targetm.gen_simple_return ());
28595 }
28596
28597 if (epilogue_type != EPILOGUE_TYPE_SIBCALL && !restoring_FPRs_inline)
28598 {
28599 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28600 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28601 int elt = 0;
28602 RTVEC_ELT (p, elt++) = ret_rtx;
28603 if (lr)
28604 RTVEC_ELT (p, elt++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
28605
28606 /* We have to restore more than two FP registers, so branch to the
28607 restore function. It will return to our caller. */
28608 int i;
28609 int reg;
28610 rtx sym;
28611
28612 if (flag_shrink_wrap)
28613 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28614
28615 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28616 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28617 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28618 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28619
28620 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28621 {
28622 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28623
28624 RTVEC_ELT (p, elt++)
28625 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28626 if (flag_shrink_wrap
28627 && save_reg_p (info->first_fp_reg_save + i))
28628 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28629 }
28630
28631 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28632 }
28633
28634 if (cfa_restores)
28635 {
28636 if (epilogue_type == EPILOGUE_TYPE_SIBCALL)
28637 /* Ensure the cfa_restores are hung off an insn that won't
28638 be reordered above other restores. */
28639 emit_insn (gen_blockage ());
28640
28641 emit_cfa_restores (cfa_restores);
28642 }
28643 }
28644
28645 /* Write function epilogue. */
28646
28647 static void
28648 rs6000_output_function_epilogue (FILE *file)
28649 {
28650 #if TARGET_MACHO
28651 macho_branch_islands ();
28652
28653 {
28654 rtx_insn *insn = get_last_insn ();
28655 rtx_insn *deleted_debug_label = NULL;
28656
28657 /* Mach-O doesn't support labels at the end of objects, so if
28658 it looks like we might want one, take special action.
28659
28660 First, collect any sequence of deleted debug labels. */
28661 while (insn
28662 && NOTE_P (insn)
28663 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28664 {
28665 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28666 notes only, instead set their CODE_LABEL_NUMBER to -1,
28667 otherwise there would be code generation differences
28668 in between -g and -g0. */
28669 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28670 deleted_debug_label = insn;
28671 insn = PREV_INSN (insn);
28672 }
28673
28674 /* Second, if we have:
28675 label:
28676 barrier
28677 then this needs to be detected, so skip past the barrier. */
28678
28679 if (insn && BARRIER_P (insn))
28680 insn = PREV_INSN (insn);
28681
28682 /* Up to now we've only seen notes or barriers. */
28683 if (insn)
28684 {
28685 if (LABEL_P (insn)
28686 || (NOTE_P (insn)
28687 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28688 /* Trailing label: <barrier>. */
28689 fputs ("\tnop\n", file);
28690 else
28691 {
28692 /* Lastly, see if we have a completely empty function body. */
28693 while (insn && ! INSN_P (insn))
28694 insn = PREV_INSN (insn);
28695 /* If we don't find any insns, we've got an empty function body;
28696 I.e. completely empty - without a return or branch. This is
28697 taken as the case where a function body has been removed
28698 because it contains an inline __builtin_unreachable(). GCC
28699 states that reaching __builtin_unreachable() means UB so we're
28700 not obliged to do anything special; however, we want
28701 non-zero-sized function bodies. To meet this, and help the
28702 user out, let's trap the case. */
28703 if (insn == NULL)
28704 fputs ("\ttrap\n", file);
28705 }
28706 }
28707 else if (deleted_debug_label)
28708 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28709 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28710 CODE_LABEL_NUMBER (insn) = -1;
28711 }
28712 #endif
28713
28714 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28715 on its format.
28716
28717 We don't output a traceback table if -finhibit-size-directive was
28718 used. The documentation for -finhibit-size-directive reads
28719 ``don't output a @code{.size} assembler directive, or anything
28720 else that would cause trouble if the function is split in the
28721 middle, and the two halves are placed at locations far apart in
28722 memory.'' The traceback table has this property, since it
28723 includes the offset from the start of the function to the
28724 traceback table itself.
28725
28726 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28727 different traceback table. */
28728 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28729 && ! flag_inhibit_size_directive
28730 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28731 {
28732 const char *fname = NULL;
28733 const char *language_string = lang_hooks.name;
28734 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28735 int i;
28736 int optional_tbtab;
28737 rs6000_stack_t *info = rs6000_stack_info ();
28738
28739 if (rs6000_traceback == traceback_full)
28740 optional_tbtab = 1;
28741 else if (rs6000_traceback == traceback_part)
28742 optional_tbtab = 0;
28743 else
28744 optional_tbtab = !optimize_size && !TARGET_ELF;
28745
28746 if (optional_tbtab)
28747 {
28748 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28749 while (*fname == '.') /* V.4 encodes . in the name */
28750 fname++;
28751
28752 /* Need label immediately before tbtab, so we can compute
28753 its offset from the function start. */
28754 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28755 ASM_OUTPUT_LABEL (file, fname);
28756 }
28757
28758 /* The .tbtab pseudo-op can only be used for the first eight
28759 expressions, since it can't handle the possibly variable
28760 length fields that follow. However, if you omit the optional
28761 fields, the assembler outputs zeros for all optional fields
28762 anyways, giving each variable length field is minimum length
28763 (as defined in sys/debug.h). Thus we cannot use the .tbtab
28764 pseudo-op at all. */
28765
28766 /* An all-zero word flags the start of the tbtab, for debuggers
28767 that have to find it by searching forward from the entry
28768 point or from the current pc. */
28769 fputs ("\t.long 0\n", file);
28770
28771 /* Tbtab format type. Use format type 0. */
28772 fputs ("\t.byte 0,", file);
28773
28774 /* Language type. Unfortunately, there does not seem to be any
28775 official way to discover the language being compiled, so we
28776 use language_string.
28777 C is 0. Fortran is 1. Ada is 3. C++ is 9.
28778 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28779 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
28780 numbers either, so for now use 0. */
28781 if (lang_GNU_C ()
28782 || ! strcmp (language_string, "GNU GIMPLE")
28783 || ! strcmp (language_string, "GNU Go")
28784 || ! strcmp (language_string, "GNU D")
28785 || ! strcmp (language_string, "libgccjit"))
28786 i = 0;
28787 else if (! strcmp (language_string, "GNU F77")
28788 || lang_GNU_Fortran ())
28789 i = 1;
28790 else if (! strcmp (language_string, "GNU Ada"))
28791 i = 3;
28792 else if (lang_GNU_CXX ()
28793 || ! strcmp (language_string, "GNU Objective-C++"))
28794 i = 9;
28795 else if (! strcmp (language_string, "GNU Java"))
28796 i = 13;
28797 else if (! strcmp (language_string, "GNU Objective-C"))
28798 i = 14;
28799 else
28800 gcc_unreachable ();
28801 fprintf (file, "%d,", i);
28802
28803 /* 8 single bit fields: global linkage (not set for C extern linkage,
28804 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28805 from start of procedure stored in tbtab, internal function, function
28806 has controlled storage, function has no toc, function uses fp,
28807 function logs/aborts fp operations. */
28808 /* Assume that fp operations are used if any fp reg must be saved. */
28809 fprintf (file, "%d,",
28810 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28811
28812 /* 6 bitfields: function is interrupt handler, name present in
28813 proc table, function calls alloca, on condition directives
28814 (controls stack walks, 3 bits), saves condition reg, saves
28815 link reg. */
28816 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28817 set up as a frame pointer, even when there is no alloca call. */
28818 fprintf (file, "%d,",
28819 ((optional_tbtab << 6)
28820 | ((optional_tbtab & frame_pointer_needed) << 5)
28821 | (info->cr_save_p << 1)
28822 | (info->lr_save_p)));
28823
28824 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28825 (6 bits). */
28826 fprintf (file, "%d,",
28827 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28828
28829 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28830 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28831
28832 if (optional_tbtab)
28833 {
28834 /* Compute the parameter info from the function decl argument
28835 list. */
28836 tree decl;
28837 int next_parm_info_bit = 31;
28838
28839 for (decl = DECL_ARGUMENTS (current_function_decl);
28840 decl; decl = DECL_CHAIN (decl))
28841 {
28842 rtx parameter = DECL_INCOMING_RTL (decl);
28843 machine_mode mode = GET_MODE (parameter);
28844
28845 if (REG_P (parameter))
28846 {
28847 if (SCALAR_FLOAT_MODE_P (mode))
28848 {
28849 int bits;
28850
28851 float_parms++;
28852
28853 switch (mode)
28854 {
28855 case E_SFmode:
28856 case E_SDmode:
28857 bits = 0x2;
28858 break;
28859
28860 case E_DFmode:
28861 case E_DDmode:
28862 case E_TFmode:
28863 case E_TDmode:
28864 case E_IFmode:
28865 case E_KFmode:
28866 bits = 0x3;
28867 break;
28868
28869 default:
28870 gcc_unreachable ();
28871 }
28872
28873 /* If only one bit will fit, don't or in this entry. */
28874 if (next_parm_info_bit > 0)
28875 parm_info |= (bits << (next_parm_info_bit - 1));
28876 next_parm_info_bit -= 2;
28877 }
28878 else
28879 {
28880 fixed_parms += ((GET_MODE_SIZE (mode)
28881 + (UNITS_PER_WORD - 1))
28882 / UNITS_PER_WORD);
28883 next_parm_info_bit -= 1;
28884 }
28885 }
28886 }
28887 }
28888
28889 /* Number of fixed point parameters. */
28890 /* This is actually the number of words of fixed point parameters; thus
28891 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28892 fprintf (file, "%d,", fixed_parms);
28893
28894 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28895 all on stack. */
28896 /* This is actually the number of fp registers that hold parameters;
28897 and thus the maximum value is 13. */
28898 /* Set parameters on stack bit if parameters are not in their original
28899 registers, regardless of whether they are on the stack? Xlc
28900 seems to set the bit when not optimizing. */
28901 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28902
28903 if (optional_tbtab)
28904 {
28905 /* Optional fields follow. Some are variable length. */
28906
28907 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28908 float, 11 double float. */
28909 /* There is an entry for each parameter in a register, in the order
28910 that they occur in the parameter list. Any intervening arguments
28911 on the stack are ignored. If the list overflows a long (max
28912 possible length 34 bits) then completely leave off all elements
28913 that don't fit. */
28914 /* Only emit this long if there was at least one parameter. */
28915 if (fixed_parms || float_parms)
28916 fprintf (file, "\t.long %d\n", parm_info);
28917
28918 /* Offset from start of code to tb table. */
28919 fputs ("\t.long ", file);
28920 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28921 RS6000_OUTPUT_BASENAME (file, fname);
28922 putc ('-', file);
28923 rs6000_output_function_entry (file, fname);
28924 putc ('\n', file);
28925
28926 /* Interrupt handler mask. */
28927 /* Omit this long, since we never set the interrupt handler bit
28928 above. */
28929
28930 /* Number of CTL (controlled storage) anchors. */
28931 /* Omit this long, since the has_ctl bit is never set above. */
28932
28933 /* Displacement into stack of each CTL anchor. */
28934 /* Omit this list of longs, because there are no CTL anchors. */
28935
28936 /* Length of function name. */
28937 if (*fname == '*')
28938 ++fname;
28939 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28940
28941 /* Function name. */
28942 assemble_string (fname, strlen (fname));
28943
28944 /* Register for alloca automatic storage; this is always reg 31.
28945 Only emit this if the alloca bit was set above. */
28946 if (frame_pointer_needed)
28947 fputs ("\t.byte 31\n", file);
28948
28949 fputs ("\t.align 2\n", file);
28950 }
28951 }
28952
28953 /* Arrange to define .LCTOC1 label, if not already done. */
28954 if (need_toc_init)
28955 {
28956 need_toc_init = 0;
28957 if (!toc_initialized)
28958 {
28959 switch_to_section (toc_section);
28960 switch_to_section (current_function_section ());
28961 }
28962 }
28963 }
28964
28965 /* -fsplit-stack support. */
28966
28967 /* A SYMBOL_REF for __morestack. */
28968 static GTY(()) rtx morestack_ref;
28969
28970 static rtx
28971 gen_add3_const (rtx rt, rtx ra, long c)
28972 {
28973 if (TARGET_64BIT)
28974 return gen_adddi3 (rt, ra, GEN_INT (c));
28975 else
28976 return gen_addsi3 (rt, ra, GEN_INT (c));
28977 }
28978
28979 /* Emit -fsplit-stack prologue, which goes before the regular function
28980 prologue (at local entry point in the case of ELFv2). */
28981
28982 void
28983 rs6000_expand_split_stack_prologue (void)
28984 {
28985 rs6000_stack_t *info = rs6000_stack_info ();
28986 unsigned HOST_WIDE_INT allocate;
28987 long alloc_hi, alloc_lo;
28988 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
28989 rtx_insn *insn;
28990
28991 gcc_assert (flag_split_stack && reload_completed);
28992
28993 if (!info->push_p)
28994 return;
28995
28996 if (global_regs[29])
28997 {
28998 error ("%qs uses register r29", "%<-fsplit-stack%>");
28999 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29000 "conflicts with %qD", global_regs_decl[29]);
29001 }
29002
29003 allocate = info->total_size;
29004 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29005 {
29006 sorry ("Stack frame larger than 2G is not supported for "
29007 "%<-fsplit-stack%>");
29008 return;
29009 }
29010 if (morestack_ref == NULL_RTX)
29011 {
29012 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29013 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29014 | SYMBOL_FLAG_FUNCTION);
29015 }
29016
29017 r0 = gen_rtx_REG (Pmode, 0);
29018 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29019 r12 = gen_rtx_REG (Pmode, 12);
29020 emit_insn (gen_load_split_stack_limit (r0));
29021 /* Always emit two insns here to calculate the requested stack,
29022 so that the linker can edit them when adjusting size for calling
29023 non-split-stack code. */
29024 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29025 alloc_lo = -allocate - alloc_hi;
29026 if (alloc_hi != 0)
29027 {
29028 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29029 if (alloc_lo != 0)
29030 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29031 else
29032 emit_insn (gen_nop ());
29033 }
29034 else
29035 {
29036 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29037 emit_insn (gen_nop ());
29038 }
29039
29040 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29041 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29042 ok_label = gen_label_rtx ();
29043 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29044 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29045 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29046 pc_rtx);
29047 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29048 JUMP_LABEL (insn) = ok_label;
29049 /* Mark the jump as very likely to be taken. */
29050 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29051
29052 lr = gen_rtx_REG (Pmode, LR_REGNO);
29053 insn = emit_move_insn (r0, lr);
29054 RTX_FRAME_RELATED_P (insn) = 1;
29055 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29056 RTX_FRAME_RELATED_P (insn) = 1;
29057
29058 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29059 const0_rtx, const0_rtx));
29060 call_fusage = NULL_RTX;
29061 use_reg (&call_fusage, r12);
29062 /* Say the call uses r0, even though it doesn't, to stop regrename
29063 from twiddling with the insns saving lr, trashing args for cfun.
29064 The insns restoring lr are similarly protected by making
29065 split_stack_return use r0. */
29066 use_reg (&call_fusage, r0);
29067 add_function_usage_to (insn, call_fusage);
29068 /* Indicate that this function can't jump to non-local gotos. */
29069 make_reg_eh_region_note_nothrow_nononlocal (insn);
29070 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29071 insn = emit_move_insn (lr, r0);
29072 add_reg_note (insn, REG_CFA_RESTORE, lr);
29073 RTX_FRAME_RELATED_P (insn) = 1;
29074 emit_insn (gen_split_stack_return ());
29075
29076 emit_label (ok_label);
29077 LABEL_NUSES (ok_label) = 1;
29078 }
29079
29080 /* Return the internal arg pointer used for function incoming
29081 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29082 to copy it to a pseudo in order for it to be preserved over calls
29083 and suchlike. We'd really like to use a pseudo here for the
29084 internal arg pointer but data-flow analysis is not prepared to
29085 accept pseudos as live at the beginning of a function. */
29086
29087 static rtx
29088 rs6000_internal_arg_pointer (void)
29089 {
29090 if (flag_split_stack
29091 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29092 == NULL))
29093
29094 {
29095 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29096 {
29097 rtx pat;
29098
29099 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29100 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29101
29102 /* Put the pseudo initialization right after the note at the
29103 beginning of the function. */
29104 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29105 gen_rtx_REG (Pmode, 12));
29106 push_topmost_sequence ();
29107 emit_insn_after (pat, get_insns ());
29108 pop_topmost_sequence ();
29109 }
29110 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29111 FIRST_PARM_OFFSET (current_function_decl));
29112 return copy_to_reg (ret);
29113 }
29114 return virtual_incoming_args_rtx;
29115 }
29116
29117 /* We may have to tell the dataflow pass that the split stack prologue
29118 is initializing a register. */
29119
29120 static void
29121 rs6000_live_on_entry (bitmap regs)
29122 {
29123 if (flag_split_stack)
29124 bitmap_set_bit (regs, 12);
29125 }
29126
29127 /* Emit -fsplit-stack dynamic stack allocation space check. */
29128
29129 void
29130 rs6000_split_stack_space_check (rtx size, rtx label)
29131 {
29132 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29133 rtx limit = gen_reg_rtx (Pmode);
29134 rtx requested = gen_reg_rtx (Pmode);
29135 rtx cmp = gen_reg_rtx (CCUNSmode);
29136 rtx jump;
29137
29138 emit_insn (gen_load_split_stack_limit (limit));
29139 if (CONST_INT_P (size))
29140 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29141 else
29142 {
29143 size = force_reg (Pmode, size);
29144 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29145 }
29146 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29147 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29148 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29149 gen_rtx_LABEL_REF (VOIDmode, label),
29150 pc_rtx);
29151 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29152 JUMP_LABEL (jump) = label;
29153 }
29154 \f
29155 /* A C compound statement that outputs the assembler code for a thunk
29156 function, used to implement C++ virtual function calls with
29157 multiple inheritance. The thunk acts as a wrapper around a virtual
29158 function, adjusting the implicit object parameter before handing
29159 control off to the real function.
29160
29161 First, emit code to add the integer DELTA to the location that
29162 contains the incoming first argument. Assume that this argument
29163 contains a pointer, and is the one used to pass the `this' pointer
29164 in C++. This is the incoming argument *before* the function
29165 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29166 values of all other incoming arguments.
29167
29168 After the addition, emit code to jump to FUNCTION, which is a
29169 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29170 not touch the return address. Hence returning from FUNCTION will
29171 return to whoever called the current `thunk'.
29172
29173 The effect must be as if FUNCTION had been called directly with the
29174 adjusted first argument. This macro is responsible for emitting
29175 all of the code for a thunk function; output_function_prologue()
29176 and output_function_epilogue() are not invoked.
29177
29178 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29179 been extracted from it.) It might possibly be useful on some
29180 targets, but probably not.
29181
29182 If you do not define this macro, the target-independent code in the
29183 C++ frontend will generate a less efficient heavyweight thunk that
29184 calls FUNCTION instead of jumping to it. The generic approach does
29185 not support varargs. */
29186
29187 static void
29188 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29189 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29190 tree function)
29191 {
29192 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
29193 rtx this_rtx, funexp;
29194 rtx_insn *insn;
29195
29196 reload_completed = 1;
29197 epilogue_completed = 1;
29198
29199 /* Mark the end of the (empty) prologue. */
29200 emit_note (NOTE_INSN_PROLOGUE_END);
29201
29202 /* Find the "this" pointer. If the function returns a structure,
29203 the structure return pointer is in r3. */
29204 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29205 this_rtx = gen_rtx_REG (Pmode, 4);
29206 else
29207 this_rtx = gen_rtx_REG (Pmode, 3);
29208
29209 /* Apply the constant offset, if required. */
29210 if (delta)
29211 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29212
29213 /* Apply the offset from the vtable, if required. */
29214 if (vcall_offset)
29215 {
29216 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29217 rtx tmp = gen_rtx_REG (Pmode, 12);
29218
29219 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29220 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29221 {
29222 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29223 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29224 }
29225 else
29226 {
29227 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29228
29229 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29230 }
29231 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29232 }
29233
29234 /* Generate a tail call to the target function. */
29235 if (!TREE_USED (function))
29236 {
29237 assemble_external (function);
29238 TREE_USED (function) = 1;
29239 }
29240 funexp = XEXP (DECL_RTL (function), 0);
29241 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29242
29243 #if TARGET_MACHO
29244 if (MACHOPIC_INDIRECT)
29245 funexp = machopic_indirect_call_target (funexp);
29246 #endif
29247
29248 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29249 generate sibcall RTL explicitly. */
29250 insn = emit_call_insn (
29251 gen_rtx_PARALLEL (VOIDmode,
29252 gen_rtvec (3,
29253 gen_rtx_CALL (VOIDmode,
29254 funexp, const0_rtx),
29255 gen_rtx_USE (VOIDmode, const0_rtx),
29256 simple_return_rtx)));
29257 SIBLING_CALL_P (insn) = 1;
29258 emit_barrier ();
29259
29260 /* Run just enough of rest_of_compilation to get the insns emitted.
29261 There's not really enough bulk here to make other passes such as
29262 instruction scheduling worth while. Note that use_thunk calls
29263 assemble_start_function and assemble_end_function. */
29264 insn = get_insns ();
29265 shorten_branches (insn);
29266 assemble_start_function (thunk_fndecl, fnname);
29267 final_start_function (insn, file, 1);
29268 final (insn, file, 1);
29269 final_end_function ();
29270 assemble_end_function (thunk_fndecl, fnname);
29271
29272 reload_completed = 0;
29273 epilogue_completed = 0;
29274 }
29275 \f
29276 /* A quick summary of the various types of 'constant-pool tables'
29277 under PowerPC:
29278
29279 Target Flags Name One table per
29280 AIX (none) AIX TOC object file
29281 AIX -mfull-toc AIX TOC object file
29282 AIX -mminimal-toc AIX minimal TOC translation unit
29283 SVR4/EABI (none) SVR4 SDATA object file
29284 SVR4/EABI -fpic SVR4 pic object file
29285 SVR4/EABI -fPIC SVR4 PIC translation unit
29286 SVR4/EABI -mrelocatable EABI TOC function
29287 SVR4/EABI -maix AIX TOC object file
29288 SVR4/EABI -maix -mminimal-toc
29289 AIX minimal TOC translation unit
29290
29291 Name Reg. Set by entries contains:
29292 made by addrs? fp? sum?
29293
29294 AIX TOC 2 crt0 as Y option option
29295 AIX minimal TOC 30 prolog gcc Y Y option
29296 SVR4 SDATA 13 crt0 gcc N Y N
29297 SVR4 pic 30 prolog ld Y not yet N
29298 SVR4 PIC 30 prolog gcc Y option option
29299 EABI TOC 30 prolog gcc Y option option
29300
29301 */
29302
29303 /* Hash functions for the hash table. */
29304
29305 static unsigned
29306 rs6000_hash_constant (rtx k)
29307 {
29308 enum rtx_code code = GET_CODE (k);
29309 machine_mode mode = GET_MODE (k);
29310 unsigned result = (code << 3) ^ mode;
29311 const char *format;
29312 int flen, fidx;
29313
29314 format = GET_RTX_FORMAT (code);
29315 flen = strlen (format);
29316 fidx = 0;
29317
29318 switch (code)
29319 {
29320 case LABEL_REF:
29321 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29322
29323 case CONST_WIDE_INT:
29324 {
29325 int i;
29326 flen = CONST_WIDE_INT_NUNITS (k);
29327 for (i = 0; i < flen; i++)
29328 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29329 return result;
29330 }
29331
29332 case CONST_DOUBLE:
29333 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29334
29335 case CODE_LABEL:
29336 fidx = 3;
29337 break;
29338
29339 default:
29340 break;
29341 }
29342
29343 for (; fidx < flen; fidx++)
29344 switch (format[fidx])
29345 {
29346 case 's':
29347 {
29348 unsigned i, len;
29349 const char *str = XSTR (k, fidx);
29350 len = strlen (str);
29351 result = result * 613 + len;
29352 for (i = 0; i < len; i++)
29353 result = result * 613 + (unsigned) str[i];
29354 break;
29355 }
29356 case 'u':
29357 case 'e':
29358 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29359 break;
29360 case 'i':
29361 case 'n':
29362 result = result * 613 + (unsigned) XINT (k, fidx);
29363 break;
29364 case 'w':
29365 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29366 result = result * 613 + (unsigned) XWINT (k, fidx);
29367 else
29368 {
29369 size_t i;
29370 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29371 result = result * 613 + (unsigned) (XWINT (k, fidx)
29372 >> CHAR_BIT * i);
29373 }
29374 break;
29375 case '0':
29376 break;
29377 default:
29378 gcc_unreachable ();
29379 }
29380
29381 return result;
29382 }
29383
29384 hashval_t
29385 toc_hasher::hash (toc_hash_struct *thc)
29386 {
29387 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29388 }
29389
29390 /* Compare H1 and H2 for equivalence. */
29391
29392 bool
29393 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29394 {
29395 rtx r1 = h1->key;
29396 rtx r2 = h2->key;
29397
29398 if (h1->key_mode != h2->key_mode)
29399 return 0;
29400
29401 return rtx_equal_p (r1, r2);
29402 }
29403
29404 /* These are the names given by the C++ front-end to vtables, and
29405 vtable-like objects. Ideally, this logic should not be here;
29406 instead, there should be some programmatic way of inquiring as
29407 to whether or not an object is a vtable. */
29408
29409 #define VTABLE_NAME_P(NAME) \
29410 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29411 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29412 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29413 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29414 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29415
29416 #ifdef NO_DOLLAR_IN_LABEL
29417 /* Return a GGC-allocated character string translating dollar signs in
29418 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29419
29420 const char *
29421 rs6000_xcoff_strip_dollar (const char *name)
29422 {
29423 char *strip, *p;
29424 const char *q;
29425 size_t len;
29426
29427 q = (const char *) strchr (name, '$');
29428
29429 if (q == 0 || q == name)
29430 return name;
29431
29432 len = strlen (name);
29433 strip = XALLOCAVEC (char, len + 1);
29434 strcpy (strip, name);
29435 p = strip + (q - name);
29436 while (p)
29437 {
29438 *p = '_';
29439 p = strchr (p + 1, '$');
29440 }
29441
29442 return ggc_alloc_string (strip, len);
29443 }
29444 #endif
29445
29446 void
29447 rs6000_output_symbol_ref (FILE *file, rtx x)
29448 {
29449 const char *name = XSTR (x, 0);
29450
29451 /* Currently C++ toc references to vtables can be emitted before it
29452 is decided whether the vtable is public or private. If this is
29453 the case, then the linker will eventually complain that there is
29454 a reference to an unknown section. Thus, for vtables only,
29455 we emit the TOC reference to reference the identifier and not the
29456 symbol. */
29457 if (VTABLE_NAME_P (name))
29458 {
29459 RS6000_OUTPUT_BASENAME (file, name);
29460 }
29461 else
29462 assemble_name (file, name);
29463 }
29464
29465 /* Output a TOC entry. We derive the entry name from what is being
29466 written. */
29467
29468 void
29469 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29470 {
29471 char buf[256];
29472 const char *name = buf;
29473 rtx base = x;
29474 HOST_WIDE_INT offset = 0;
29475
29476 gcc_assert (!TARGET_NO_TOC);
29477
29478 /* When the linker won't eliminate them, don't output duplicate
29479 TOC entries (this happens on AIX if there is any kind of TOC,
29480 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29481 CODE_LABELs. */
29482 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29483 {
29484 struct toc_hash_struct *h;
29485
29486 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29487 time because GGC is not initialized at that point. */
29488 if (toc_hash_table == NULL)
29489 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29490
29491 h = ggc_alloc<toc_hash_struct> ();
29492 h->key = x;
29493 h->key_mode = mode;
29494 h->labelno = labelno;
29495
29496 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29497 if (*found == NULL)
29498 *found = h;
29499 else /* This is indeed a duplicate.
29500 Set this label equal to that label. */
29501 {
29502 fputs ("\t.set ", file);
29503 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29504 fprintf (file, "%d,", labelno);
29505 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29506 fprintf (file, "%d\n", ((*found)->labelno));
29507
29508 #ifdef HAVE_AS_TLS
29509 if (TARGET_XCOFF && SYMBOL_REF_P (x)
29510 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29511 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29512 {
29513 fputs ("\t.set ", file);
29514 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29515 fprintf (file, "%d,", labelno);
29516 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29517 fprintf (file, "%d\n", ((*found)->labelno));
29518 }
29519 #endif
29520 return;
29521 }
29522 }
29523
29524 /* If we're going to put a double constant in the TOC, make sure it's
29525 aligned properly when strict alignment is on. */
29526 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29527 && STRICT_ALIGNMENT
29528 && GET_MODE_BITSIZE (mode) >= 64
29529 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29530 ASM_OUTPUT_ALIGN (file, 3);
29531 }
29532
29533 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29534
29535 /* Handle FP constants specially. Note that if we have a minimal
29536 TOC, things we put here aren't actually in the TOC, so we can allow
29537 FP constants. */
29538 if (CONST_DOUBLE_P (x)
29539 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29540 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29541 {
29542 long k[4];
29543
29544 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29545 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29546 else
29547 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29548
29549 if (TARGET_64BIT)
29550 {
29551 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29552 fputs (DOUBLE_INT_ASM_OP, file);
29553 else
29554 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29555 k[0] & 0xffffffff, k[1] & 0xffffffff,
29556 k[2] & 0xffffffff, k[3] & 0xffffffff);
29557 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29558 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29559 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29560 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29561 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29562 return;
29563 }
29564 else
29565 {
29566 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29567 fputs ("\t.long ", file);
29568 else
29569 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29570 k[0] & 0xffffffff, k[1] & 0xffffffff,
29571 k[2] & 0xffffffff, k[3] & 0xffffffff);
29572 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29573 k[0] & 0xffffffff, k[1] & 0xffffffff,
29574 k[2] & 0xffffffff, k[3] & 0xffffffff);
29575 return;
29576 }
29577 }
29578 else if (CONST_DOUBLE_P (x)
29579 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29580 {
29581 long k[2];
29582
29583 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29584 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29585 else
29586 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29587
29588 if (TARGET_64BIT)
29589 {
29590 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29591 fputs (DOUBLE_INT_ASM_OP, file);
29592 else
29593 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29594 k[0] & 0xffffffff, k[1] & 0xffffffff);
29595 fprintf (file, "0x%lx%08lx\n",
29596 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29597 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29598 return;
29599 }
29600 else
29601 {
29602 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29603 fputs ("\t.long ", file);
29604 else
29605 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29606 k[0] & 0xffffffff, k[1] & 0xffffffff);
29607 fprintf (file, "0x%lx,0x%lx\n",
29608 k[0] & 0xffffffff, k[1] & 0xffffffff);
29609 return;
29610 }
29611 }
29612 else if (CONST_DOUBLE_P (x)
29613 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29614 {
29615 long l;
29616
29617 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29618 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29619 else
29620 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29621
29622 if (TARGET_64BIT)
29623 {
29624 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29625 fputs (DOUBLE_INT_ASM_OP, file);
29626 else
29627 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29628 if (WORDS_BIG_ENDIAN)
29629 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29630 else
29631 fprintf (file, "0x%lx\n", l & 0xffffffff);
29632 return;
29633 }
29634 else
29635 {
29636 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29637 fputs ("\t.long ", file);
29638 else
29639 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29640 fprintf (file, "0x%lx\n", l & 0xffffffff);
29641 return;
29642 }
29643 }
29644 else if (GET_MODE (x) == VOIDmode && CONST_INT_P (x))
29645 {
29646 unsigned HOST_WIDE_INT low;
29647 HOST_WIDE_INT high;
29648
29649 low = INTVAL (x) & 0xffffffff;
29650 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29651
29652 /* TOC entries are always Pmode-sized, so when big-endian
29653 smaller integer constants in the TOC need to be padded.
29654 (This is still a win over putting the constants in
29655 a separate constant pool, because then we'd have
29656 to have both a TOC entry _and_ the actual constant.)
29657
29658 For a 32-bit target, CONST_INT values are loaded and shifted
29659 entirely within `low' and can be stored in one TOC entry. */
29660
29661 /* It would be easy to make this work, but it doesn't now. */
29662 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29663
29664 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29665 {
29666 low |= high << 32;
29667 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29668 high = (HOST_WIDE_INT) low >> 32;
29669 low &= 0xffffffff;
29670 }
29671
29672 if (TARGET_64BIT)
29673 {
29674 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29675 fputs (DOUBLE_INT_ASM_OP, file);
29676 else
29677 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29678 (long) high & 0xffffffff, (long) low & 0xffffffff);
29679 fprintf (file, "0x%lx%08lx\n",
29680 (long) high & 0xffffffff, (long) low & 0xffffffff);
29681 return;
29682 }
29683 else
29684 {
29685 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29686 {
29687 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29688 fputs ("\t.long ", file);
29689 else
29690 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29691 (long) high & 0xffffffff, (long) low & 0xffffffff);
29692 fprintf (file, "0x%lx,0x%lx\n",
29693 (long) high & 0xffffffff, (long) low & 0xffffffff);
29694 }
29695 else
29696 {
29697 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29698 fputs ("\t.long ", file);
29699 else
29700 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29701 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29702 }
29703 return;
29704 }
29705 }
29706
29707 if (GET_CODE (x) == CONST)
29708 {
29709 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29710 && CONST_INT_P (XEXP (XEXP (x, 0), 1)));
29711
29712 base = XEXP (XEXP (x, 0), 0);
29713 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29714 }
29715
29716 switch (GET_CODE (base))
29717 {
29718 case SYMBOL_REF:
29719 name = XSTR (base, 0);
29720 break;
29721
29722 case LABEL_REF:
29723 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29724 CODE_LABEL_NUMBER (XEXP (base, 0)));
29725 break;
29726
29727 case CODE_LABEL:
29728 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29729 break;
29730
29731 default:
29732 gcc_unreachable ();
29733 }
29734
29735 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29736 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29737 else
29738 {
29739 fputs ("\t.tc ", file);
29740 RS6000_OUTPUT_BASENAME (file, name);
29741
29742 if (offset < 0)
29743 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29744 else if (offset)
29745 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29746
29747 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29748 after other TOC symbols, reducing overflow of small TOC access
29749 to [TC] symbols. */
29750 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29751 ? "[TE]," : "[TC],", file);
29752 }
29753
29754 /* Currently C++ toc references to vtables can be emitted before it
29755 is decided whether the vtable is public or private. If this is
29756 the case, then the linker will eventually complain that there is
29757 a TOC reference to an unknown section. Thus, for vtables only,
29758 we emit the TOC reference to reference the symbol and not the
29759 section. */
29760 if (VTABLE_NAME_P (name))
29761 {
29762 RS6000_OUTPUT_BASENAME (file, name);
29763 if (offset < 0)
29764 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29765 else if (offset > 0)
29766 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29767 }
29768 else
29769 output_addr_const (file, x);
29770
29771 #if HAVE_AS_TLS
29772 if (TARGET_XCOFF && SYMBOL_REF_P (base))
29773 {
29774 switch (SYMBOL_REF_TLS_MODEL (base))
29775 {
29776 case 0:
29777 break;
29778 case TLS_MODEL_LOCAL_EXEC:
29779 fputs ("@le", file);
29780 break;
29781 case TLS_MODEL_INITIAL_EXEC:
29782 fputs ("@ie", file);
29783 break;
29784 /* Use global-dynamic for local-dynamic. */
29785 case TLS_MODEL_GLOBAL_DYNAMIC:
29786 case TLS_MODEL_LOCAL_DYNAMIC:
29787 putc ('\n', file);
29788 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29789 fputs ("\t.tc .", file);
29790 RS6000_OUTPUT_BASENAME (file, name);
29791 fputs ("[TC],", file);
29792 output_addr_const (file, x);
29793 fputs ("@m", file);
29794 break;
29795 default:
29796 gcc_unreachable ();
29797 }
29798 }
29799 #endif
29800
29801 putc ('\n', file);
29802 }
29803 \f
29804 /* Output an assembler pseudo-op to write an ASCII string of N characters
29805 starting at P to FILE.
29806
29807 On the RS/6000, we have to do this using the .byte operation and
29808 write out special characters outside the quoted string.
29809 Also, the assembler is broken; very long strings are truncated,
29810 so we must artificially break them up early. */
29811
29812 void
29813 output_ascii (FILE *file, const char *p, int n)
29814 {
29815 char c;
29816 int i, count_string;
29817 const char *for_string = "\t.byte \"";
29818 const char *for_decimal = "\t.byte ";
29819 const char *to_close = NULL;
29820
29821 count_string = 0;
29822 for (i = 0; i < n; i++)
29823 {
29824 c = *p++;
29825 if (c >= ' ' && c < 0177)
29826 {
29827 if (for_string)
29828 fputs (for_string, file);
29829 putc (c, file);
29830
29831 /* Write two quotes to get one. */
29832 if (c == '"')
29833 {
29834 putc (c, file);
29835 ++count_string;
29836 }
29837
29838 for_string = NULL;
29839 for_decimal = "\"\n\t.byte ";
29840 to_close = "\"\n";
29841 ++count_string;
29842
29843 if (count_string >= 512)
29844 {
29845 fputs (to_close, file);
29846
29847 for_string = "\t.byte \"";
29848 for_decimal = "\t.byte ";
29849 to_close = NULL;
29850 count_string = 0;
29851 }
29852 }
29853 else
29854 {
29855 if (for_decimal)
29856 fputs (for_decimal, file);
29857 fprintf (file, "%d", c);
29858
29859 for_string = "\n\t.byte \"";
29860 for_decimal = ", ";
29861 to_close = "\n";
29862 count_string = 0;
29863 }
29864 }
29865
29866 /* Now close the string if we have written one. Then end the line. */
29867 if (to_close)
29868 fputs (to_close, file);
29869 }
29870 \f
29871 /* Generate a unique section name for FILENAME for a section type
29872 represented by SECTION_DESC. Output goes into BUF.
29873
29874 SECTION_DESC can be any string, as long as it is different for each
29875 possible section type.
29876
29877 We name the section in the same manner as xlc. The name begins with an
29878 underscore followed by the filename (after stripping any leading directory
29879 names) with the last period replaced by the string SECTION_DESC. If
29880 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29881 the name. */
29882
29883 void
29884 rs6000_gen_section_name (char **buf, const char *filename,
29885 const char *section_desc)
29886 {
29887 const char *q, *after_last_slash, *last_period = 0;
29888 char *p;
29889 int len;
29890
29891 after_last_slash = filename;
29892 for (q = filename; *q; q++)
29893 {
29894 if (*q == '/')
29895 after_last_slash = q + 1;
29896 else if (*q == '.')
29897 last_period = q;
29898 }
29899
29900 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29901 *buf = (char *) xmalloc (len);
29902
29903 p = *buf;
29904 *p++ = '_';
29905
29906 for (q = after_last_slash; *q; q++)
29907 {
29908 if (q == last_period)
29909 {
29910 strcpy (p, section_desc);
29911 p += strlen (section_desc);
29912 break;
29913 }
29914
29915 else if (ISALNUM (*q))
29916 *p++ = *q;
29917 }
29918
29919 if (last_period == 0)
29920 strcpy (p, section_desc);
29921 else
29922 *p = '\0';
29923 }
29924 \f
29925 /* Emit profile function. */
29926
29927 void
29928 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29929 {
29930 /* Non-standard profiling for kernels, which just saves LR then calls
29931 _mcount without worrying about arg saves. The idea is to change
29932 the function prologue as little as possible as it isn't easy to
29933 account for arg save/restore code added just for _mcount. */
29934 if (TARGET_PROFILE_KERNEL)
29935 return;
29936
29937 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29938 {
29939 #ifndef NO_PROFILE_COUNTERS
29940 # define NO_PROFILE_COUNTERS 0
29941 #endif
29942 if (NO_PROFILE_COUNTERS)
29943 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29944 LCT_NORMAL, VOIDmode);
29945 else
29946 {
29947 char buf[30];
29948 const char *label_name;
29949 rtx fun;
29950
29951 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29952 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
29953 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
29954
29955 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29956 LCT_NORMAL, VOIDmode, fun, Pmode);
29957 }
29958 }
29959 else if (DEFAULT_ABI == ABI_DARWIN)
29960 {
29961 const char *mcount_name = RS6000_MCOUNT;
29962 int caller_addr_regno = LR_REGNO;
29963
29964 /* Be conservative and always set this, at least for now. */
29965 crtl->uses_pic_offset_table = 1;
29966
29967 #if TARGET_MACHO
29968 /* For PIC code, set up a stub and collect the caller's address
29969 from r0, which is where the prologue puts it. */
29970 if (MACHOPIC_INDIRECT
29971 && crtl->uses_pic_offset_table)
29972 caller_addr_regno = 0;
29973 #endif
29974 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
29975 LCT_NORMAL, VOIDmode,
29976 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
29977 }
29978 }
29979
29980 /* Write function profiler code. */
29981
29982 void
29983 output_function_profiler (FILE *file, int labelno)
29984 {
29985 char buf[100];
29986
29987 switch (DEFAULT_ABI)
29988 {
29989 default:
29990 gcc_unreachable ();
29991
29992 case ABI_V4:
29993 if (!TARGET_32BIT)
29994 {
29995 warning (0, "no profiling of 64-bit code for this ABI");
29996 return;
29997 }
29998 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29999 fprintf (file, "\tmflr %s\n", reg_names[0]);
30000 if (NO_PROFILE_COUNTERS)
30001 {
30002 asm_fprintf (file, "\tstw %s,4(%s)\n",
30003 reg_names[0], reg_names[1]);
30004 }
30005 else if (TARGET_SECURE_PLT && flag_pic)
30006 {
30007 if (TARGET_LINK_STACK)
30008 {
30009 char name[32];
30010 get_ppc476_thunk_name (name);
30011 asm_fprintf (file, "\tbl %s\n", name);
30012 }
30013 else
30014 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30015 asm_fprintf (file, "\tstw %s,4(%s)\n",
30016 reg_names[0], reg_names[1]);
30017 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30018 asm_fprintf (file, "\taddis %s,%s,",
30019 reg_names[12], reg_names[12]);
30020 assemble_name (file, buf);
30021 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30022 assemble_name (file, buf);
30023 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30024 }
30025 else if (flag_pic == 1)
30026 {
30027 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30028 asm_fprintf (file, "\tstw %s,4(%s)\n",
30029 reg_names[0], reg_names[1]);
30030 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30031 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30032 assemble_name (file, buf);
30033 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30034 }
30035 else if (flag_pic > 1)
30036 {
30037 asm_fprintf (file, "\tstw %s,4(%s)\n",
30038 reg_names[0], reg_names[1]);
30039 /* Now, we need to get the address of the label. */
30040 if (TARGET_LINK_STACK)
30041 {
30042 char name[32];
30043 get_ppc476_thunk_name (name);
30044 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30045 assemble_name (file, buf);
30046 fputs ("-.\n1:", file);
30047 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30048 asm_fprintf (file, "\taddi %s,%s,4\n",
30049 reg_names[11], reg_names[11]);
30050 }
30051 else
30052 {
30053 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30054 assemble_name (file, buf);
30055 fputs ("-.\n1:", file);
30056 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30057 }
30058 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30059 reg_names[0], reg_names[11]);
30060 asm_fprintf (file, "\tadd %s,%s,%s\n",
30061 reg_names[0], reg_names[0], reg_names[11]);
30062 }
30063 else
30064 {
30065 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30066 assemble_name (file, buf);
30067 fputs ("@ha\n", file);
30068 asm_fprintf (file, "\tstw %s,4(%s)\n",
30069 reg_names[0], reg_names[1]);
30070 asm_fprintf (file, "\tla %s,", reg_names[0]);
30071 assemble_name (file, buf);
30072 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30073 }
30074
30075 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30076 fprintf (file, "\tbl %s%s\n",
30077 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30078 break;
30079
30080 case ABI_AIX:
30081 case ABI_ELFv2:
30082 case ABI_DARWIN:
30083 /* Don't do anything, done in output_profile_hook (). */
30084 break;
30085 }
30086 }
30087
30088 \f
30089
30090 /* The following variable value is the last issued insn. */
30091
30092 static rtx_insn *last_scheduled_insn;
30093
30094 /* The following variable helps to balance issuing of load and
30095 store instructions */
30096
30097 static int load_store_pendulum;
30098
30099 /* The following variable helps pair divide insns during scheduling. */
30100 static int divide_cnt;
30101 /* The following variable helps pair and alternate vector and vector load
30102 insns during scheduling. */
30103 static int vec_pairing;
30104
30105
30106 /* Power4 load update and store update instructions are cracked into a
30107 load or store and an integer insn which are executed in the same cycle.
30108 Branches have their own dispatch slot which does not count against the
30109 GCC issue rate, but it changes the program flow so there are no other
30110 instructions to issue in this cycle. */
30111
30112 static int
30113 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30114 {
30115 last_scheduled_insn = insn;
30116 if (GET_CODE (PATTERN (insn)) == USE
30117 || GET_CODE (PATTERN (insn)) == CLOBBER)
30118 {
30119 cached_can_issue_more = more;
30120 return cached_can_issue_more;
30121 }
30122
30123 if (insn_terminates_group_p (insn, current_group))
30124 {
30125 cached_can_issue_more = 0;
30126 return cached_can_issue_more;
30127 }
30128
30129 /* If no reservation, but reach here */
30130 if (recog_memoized (insn) < 0)
30131 return more;
30132
30133 if (rs6000_sched_groups)
30134 {
30135 if (is_microcoded_insn (insn))
30136 cached_can_issue_more = 0;
30137 else if (is_cracked_insn (insn))
30138 cached_can_issue_more = more > 2 ? more - 2 : 0;
30139 else
30140 cached_can_issue_more = more - 1;
30141
30142 return cached_can_issue_more;
30143 }
30144
30145 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30146 return 0;
30147
30148 cached_can_issue_more = more - 1;
30149 return cached_can_issue_more;
30150 }
30151
30152 static int
30153 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30154 {
30155 int r = rs6000_variable_issue_1 (insn, more);
30156 if (verbose)
30157 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30158 return r;
30159 }
30160
30161 /* Adjust the cost of a scheduling dependency. Return the new cost of
30162 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30163
30164 static int
30165 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30166 unsigned int)
30167 {
30168 enum attr_type attr_type;
30169
30170 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30171 return cost;
30172
30173 switch (dep_type)
30174 {
30175 case REG_DEP_TRUE:
30176 {
30177 /* Data dependency; DEP_INSN writes a register that INSN reads
30178 some cycles later. */
30179
30180 /* Separate a load from a narrower, dependent store. */
30181 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30182 && GET_CODE (PATTERN (insn)) == SET
30183 && GET_CODE (PATTERN (dep_insn)) == SET
30184 && MEM_P (XEXP (PATTERN (insn), 1))
30185 && MEM_P (XEXP (PATTERN (dep_insn), 0))
30186 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30187 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30188 return cost + 14;
30189
30190 attr_type = get_attr_type (insn);
30191
30192 switch (attr_type)
30193 {
30194 case TYPE_JMPREG:
30195 /* Tell the first scheduling pass about the latency between
30196 a mtctr and bctr (and mtlr and br/blr). The first
30197 scheduling pass will not know about this latency since
30198 the mtctr instruction, which has the latency associated
30199 to it, will be generated by reload. */
30200 return 4;
30201 case TYPE_BRANCH:
30202 /* Leave some extra cycles between a compare and its
30203 dependent branch, to inhibit expensive mispredicts. */
30204 if ((rs6000_tune == PROCESSOR_PPC603
30205 || rs6000_tune == PROCESSOR_PPC604
30206 || rs6000_tune == PROCESSOR_PPC604e
30207 || rs6000_tune == PROCESSOR_PPC620
30208 || rs6000_tune == PROCESSOR_PPC630
30209 || rs6000_tune == PROCESSOR_PPC750
30210 || rs6000_tune == PROCESSOR_PPC7400
30211 || rs6000_tune == PROCESSOR_PPC7450
30212 || rs6000_tune == PROCESSOR_PPCE5500
30213 || rs6000_tune == PROCESSOR_PPCE6500
30214 || rs6000_tune == PROCESSOR_POWER4
30215 || rs6000_tune == PROCESSOR_POWER5
30216 || rs6000_tune == PROCESSOR_POWER7
30217 || rs6000_tune == PROCESSOR_POWER8
30218 || rs6000_tune == PROCESSOR_POWER9
30219 || rs6000_tune == PROCESSOR_CELL)
30220 && recog_memoized (dep_insn)
30221 && (INSN_CODE (dep_insn) >= 0))
30222
30223 switch (get_attr_type (dep_insn))
30224 {
30225 case TYPE_CMP:
30226 case TYPE_FPCOMPARE:
30227 case TYPE_CR_LOGICAL:
30228 return cost + 2;
30229 case TYPE_EXTS:
30230 case TYPE_MUL:
30231 if (get_attr_dot (dep_insn) == DOT_YES)
30232 return cost + 2;
30233 else
30234 break;
30235 case TYPE_SHIFT:
30236 if (get_attr_dot (dep_insn) == DOT_YES
30237 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30238 return cost + 2;
30239 else
30240 break;
30241 default:
30242 break;
30243 }
30244 break;
30245
30246 case TYPE_STORE:
30247 case TYPE_FPSTORE:
30248 if ((rs6000_tune == PROCESSOR_POWER6)
30249 && recog_memoized (dep_insn)
30250 && (INSN_CODE (dep_insn) >= 0))
30251 {
30252
30253 if (GET_CODE (PATTERN (insn)) != SET)
30254 /* If this happens, we have to extend this to schedule
30255 optimally. Return default for now. */
30256 return cost;
30257
30258 /* Adjust the cost for the case where the value written
30259 by a fixed point operation is used as the address
30260 gen value on a store. */
30261 switch (get_attr_type (dep_insn))
30262 {
30263 case TYPE_LOAD:
30264 case TYPE_CNTLZ:
30265 {
30266 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30267 return get_attr_sign_extend (dep_insn)
30268 == SIGN_EXTEND_YES ? 6 : 4;
30269 break;
30270 }
30271 case TYPE_SHIFT:
30272 {
30273 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30274 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30275 6 : 3;
30276 break;
30277 }
30278 case TYPE_INTEGER:
30279 case TYPE_ADD:
30280 case TYPE_LOGICAL:
30281 case TYPE_EXTS:
30282 case TYPE_INSERT:
30283 {
30284 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30285 return 3;
30286 break;
30287 }
30288 case TYPE_STORE:
30289 case TYPE_FPLOAD:
30290 case TYPE_FPSTORE:
30291 {
30292 if (get_attr_update (dep_insn) == UPDATE_YES
30293 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30294 return 3;
30295 break;
30296 }
30297 case TYPE_MUL:
30298 {
30299 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30300 return 17;
30301 break;
30302 }
30303 case TYPE_DIV:
30304 {
30305 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30306 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30307 break;
30308 }
30309 default:
30310 break;
30311 }
30312 }
30313 break;
30314
30315 case TYPE_LOAD:
30316 if ((rs6000_tune == PROCESSOR_POWER6)
30317 && recog_memoized (dep_insn)
30318 && (INSN_CODE (dep_insn) >= 0))
30319 {
30320
30321 /* Adjust the cost for the case where the value written
30322 by a fixed point instruction is used within the address
30323 gen portion of a subsequent load(u)(x) */
30324 switch (get_attr_type (dep_insn))
30325 {
30326 case TYPE_LOAD:
30327 case TYPE_CNTLZ:
30328 {
30329 if (set_to_load_agen (dep_insn, insn))
30330 return get_attr_sign_extend (dep_insn)
30331 == SIGN_EXTEND_YES ? 6 : 4;
30332 break;
30333 }
30334 case TYPE_SHIFT:
30335 {
30336 if (set_to_load_agen (dep_insn, insn))
30337 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30338 6 : 3;
30339 break;
30340 }
30341 case TYPE_INTEGER:
30342 case TYPE_ADD:
30343 case TYPE_LOGICAL:
30344 case TYPE_EXTS:
30345 case TYPE_INSERT:
30346 {
30347 if (set_to_load_agen (dep_insn, insn))
30348 return 3;
30349 break;
30350 }
30351 case TYPE_STORE:
30352 case TYPE_FPLOAD:
30353 case TYPE_FPSTORE:
30354 {
30355 if (get_attr_update (dep_insn) == UPDATE_YES
30356 && set_to_load_agen (dep_insn, insn))
30357 return 3;
30358 break;
30359 }
30360 case TYPE_MUL:
30361 {
30362 if (set_to_load_agen (dep_insn, insn))
30363 return 17;
30364 break;
30365 }
30366 case TYPE_DIV:
30367 {
30368 if (set_to_load_agen (dep_insn, insn))
30369 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30370 break;
30371 }
30372 default:
30373 break;
30374 }
30375 }
30376 break;
30377
30378 case TYPE_FPLOAD:
30379 if ((rs6000_tune == PROCESSOR_POWER6)
30380 && get_attr_update (insn) == UPDATE_NO
30381 && recog_memoized (dep_insn)
30382 && (INSN_CODE (dep_insn) >= 0)
30383 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30384 return 2;
30385
30386 default:
30387 break;
30388 }
30389
30390 /* Fall out to return default cost. */
30391 }
30392 break;
30393
30394 case REG_DEP_OUTPUT:
30395 /* Output dependency; DEP_INSN writes a register that INSN writes some
30396 cycles later. */
30397 if ((rs6000_tune == PROCESSOR_POWER6)
30398 && recog_memoized (dep_insn)
30399 && (INSN_CODE (dep_insn) >= 0))
30400 {
30401 attr_type = get_attr_type (insn);
30402
30403 switch (attr_type)
30404 {
30405 case TYPE_FP:
30406 case TYPE_FPSIMPLE:
30407 if (get_attr_type (dep_insn) == TYPE_FP
30408 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30409 return 1;
30410 break;
30411 case TYPE_FPLOAD:
30412 if (get_attr_update (insn) == UPDATE_NO
30413 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30414 return 2;
30415 break;
30416 default:
30417 break;
30418 }
30419 }
30420 /* Fall through, no cost for output dependency. */
30421 /* FALLTHRU */
30422
30423 case REG_DEP_ANTI:
30424 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30425 cycles later. */
30426 return 0;
30427
30428 default:
30429 gcc_unreachable ();
30430 }
30431
30432 return cost;
30433 }
30434
30435 /* Debug version of rs6000_adjust_cost. */
30436
30437 static int
30438 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30439 int cost, unsigned int dw)
30440 {
30441 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30442
30443 if (ret != cost)
30444 {
30445 const char *dep;
30446
30447 switch (dep_type)
30448 {
30449 default: dep = "unknown depencency"; break;
30450 case REG_DEP_TRUE: dep = "data dependency"; break;
30451 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30452 case REG_DEP_ANTI: dep = "anti depencency"; break;
30453 }
30454
30455 fprintf (stderr,
30456 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30457 "%s, insn:\n", ret, cost, dep);
30458
30459 debug_rtx (insn);
30460 }
30461
30462 return ret;
30463 }
30464
30465 /* The function returns a true if INSN is microcoded.
30466 Return false otherwise. */
30467
30468 static bool
30469 is_microcoded_insn (rtx_insn *insn)
30470 {
30471 if (!insn || !NONDEBUG_INSN_P (insn)
30472 || GET_CODE (PATTERN (insn)) == USE
30473 || GET_CODE (PATTERN (insn)) == CLOBBER)
30474 return false;
30475
30476 if (rs6000_tune == PROCESSOR_CELL)
30477 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30478
30479 if (rs6000_sched_groups
30480 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30481 {
30482 enum attr_type type = get_attr_type (insn);
30483 if ((type == TYPE_LOAD
30484 && get_attr_update (insn) == UPDATE_YES
30485 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30486 || ((type == TYPE_LOAD || type == TYPE_STORE)
30487 && get_attr_update (insn) == UPDATE_YES
30488 && get_attr_indexed (insn) == INDEXED_YES)
30489 || type == TYPE_MFCR)
30490 return true;
30491 }
30492
30493 return false;
30494 }
30495
30496 /* The function returns true if INSN is cracked into 2 instructions
30497 by the processor (and therefore occupies 2 issue slots). */
30498
30499 static bool
30500 is_cracked_insn (rtx_insn *insn)
30501 {
30502 if (!insn || !NONDEBUG_INSN_P (insn)
30503 || GET_CODE (PATTERN (insn)) == USE
30504 || GET_CODE (PATTERN (insn)) == CLOBBER)
30505 return false;
30506
30507 if (rs6000_sched_groups
30508 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30509 {
30510 enum attr_type type = get_attr_type (insn);
30511 if ((type == TYPE_LOAD
30512 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30513 && get_attr_update (insn) == UPDATE_NO)
30514 || (type == TYPE_LOAD
30515 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30516 && get_attr_update (insn) == UPDATE_YES
30517 && get_attr_indexed (insn) == INDEXED_NO)
30518 || (type == TYPE_STORE
30519 && get_attr_update (insn) == UPDATE_YES
30520 && get_attr_indexed (insn) == INDEXED_NO)
30521 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30522 && get_attr_update (insn) == UPDATE_YES)
30523 || (type == TYPE_CR_LOGICAL
30524 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30525 || (type == TYPE_EXTS
30526 && get_attr_dot (insn) == DOT_YES)
30527 || (type == TYPE_SHIFT
30528 && get_attr_dot (insn) == DOT_YES
30529 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30530 || (type == TYPE_MUL
30531 && get_attr_dot (insn) == DOT_YES)
30532 || type == TYPE_DIV
30533 || (type == TYPE_INSERT
30534 && get_attr_size (insn) == SIZE_32))
30535 return true;
30536 }
30537
30538 return false;
30539 }
30540
30541 /* The function returns true if INSN can be issued only from
30542 the branch slot. */
30543
30544 static bool
30545 is_branch_slot_insn (rtx_insn *insn)
30546 {
30547 if (!insn || !NONDEBUG_INSN_P (insn)
30548 || GET_CODE (PATTERN (insn)) == USE
30549 || GET_CODE (PATTERN (insn)) == CLOBBER)
30550 return false;
30551
30552 if (rs6000_sched_groups)
30553 {
30554 enum attr_type type = get_attr_type (insn);
30555 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30556 return true;
30557 return false;
30558 }
30559
30560 return false;
30561 }
30562
30563 /* The function returns true if out_inst sets a value that is
30564 used in the address generation computation of in_insn */
30565 static bool
30566 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30567 {
30568 rtx out_set, in_set;
30569
30570 /* For performance reasons, only handle the simple case where
30571 both loads are a single_set. */
30572 out_set = single_set (out_insn);
30573 if (out_set)
30574 {
30575 in_set = single_set (in_insn);
30576 if (in_set)
30577 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30578 }
30579
30580 return false;
30581 }
30582
30583 /* Try to determine base/offset/size parts of the given MEM.
30584 Return true if successful, false if all the values couldn't
30585 be determined.
30586
30587 This function only looks for REG or REG+CONST address forms.
30588 REG+REG address form will return false. */
30589
30590 static bool
30591 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30592 HOST_WIDE_INT *size)
30593 {
30594 rtx addr_rtx;
30595 if MEM_SIZE_KNOWN_P (mem)
30596 *size = MEM_SIZE (mem);
30597 else
30598 return false;
30599
30600 addr_rtx = (XEXP (mem, 0));
30601 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30602 addr_rtx = XEXP (addr_rtx, 1);
30603
30604 *offset = 0;
30605 while (GET_CODE (addr_rtx) == PLUS
30606 && CONST_INT_P (XEXP (addr_rtx, 1)))
30607 {
30608 *offset += INTVAL (XEXP (addr_rtx, 1));
30609 addr_rtx = XEXP (addr_rtx, 0);
30610 }
30611 if (!REG_P (addr_rtx))
30612 return false;
30613
30614 *base = addr_rtx;
30615 return true;
30616 }
30617
30618 /* The function returns true if the target storage location of
30619 mem1 is adjacent to the target storage location of mem2 */
30620 /* Return 1 if memory locations are adjacent. */
30621
30622 static bool
30623 adjacent_mem_locations (rtx mem1, rtx mem2)
30624 {
30625 rtx reg1, reg2;
30626 HOST_WIDE_INT off1, size1, off2, size2;
30627
30628 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30629 && get_memref_parts (mem2, &reg2, &off2, &size2))
30630 return ((REGNO (reg1) == REGNO (reg2))
30631 && ((off1 + size1 == off2)
30632 || (off2 + size2 == off1)));
30633
30634 return false;
30635 }
30636
30637 /* This function returns true if it can be determined that the two MEM
30638 locations overlap by at least 1 byte based on base reg/offset/size. */
30639
30640 static bool
30641 mem_locations_overlap (rtx mem1, rtx mem2)
30642 {
30643 rtx reg1, reg2;
30644 HOST_WIDE_INT off1, size1, off2, size2;
30645
30646 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30647 && get_memref_parts (mem2, &reg2, &off2, &size2))
30648 return ((REGNO (reg1) == REGNO (reg2))
30649 && (((off1 <= off2) && (off1 + size1 > off2))
30650 || ((off2 <= off1) && (off2 + size2 > off1))));
30651
30652 return false;
30653 }
30654
30655 /* A C statement (sans semicolon) to update the integer scheduling
30656 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30657 INSN earlier, reduce the priority to execute INSN later. Do not
30658 define this macro if you do not need to adjust the scheduling
30659 priorities of insns. */
30660
30661 static int
30662 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30663 {
30664 rtx load_mem, str_mem;
30665 /* On machines (like the 750) which have asymmetric integer units,
30666 where one integer unit can do multiply and divides and the other
30667 can't, reduce the priority of multiply/divide so it is scheduled
30668 before other integer operations. */
30669
30670 #if 0
30671 if (! INSN_P (insn))
30672 return priority;
30673
30674 if (GET_CODE (PATTERN (insn)) == USE)
30675 return priority;
30676
30677 switch (rs6000_tune) {
30678 case PROCESSOR_PPC750:
30679 switch (get_attr_type (insn))
30680 {
30681 default:
30682 break;
30683
30684 case TYPE_MUL:
30685 case TYPE_DIV:
30686 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30687 priority, priority);
30688 if (priority >= 0 && priority < 0x01000000)
30689 priority >>= 3;
30690 break;
30691 }
30692 }
30693 #endif
30694
30695 if (insn_must_be_first_in_group (insn)
30696 && reload_completed
30697 && current_sched_info->sched_max_insns_priority
30698 && rs6000_sched_restricted_insns_priority)
30699 {
30700
30701 /* Prioritize insns that can be dispatched only in the first
30702 dispatch slot. */
30703 if (rs6000_sched_restricted_insns_priority == 1)
30704 /* Attach highest priority to insn. This means that in
30705 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30706 precede 'priority' (critical path) considerations. */
30707 return current_sched_info->sched_max_insns_priority;
30708 else if (rs6000_sched_restricted_insns_priority == 2)
30709 /* Increase priority of insn by a minimal amount. This means that in
30710 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30711 considerations precede dispatch-slot restriction considerations. */
30712 return (priority + 1);
30713 }
30714
30715 if (rs6000_tune == PROCESSOR_POWER6
30716 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30717 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30718 /* Attach highest priority to insn if the scheduler has just issued two
30719 stores and this instruction is a load, or two loads and this instruction
30720 is a store. Power6 wants loads and stores scheduled alternately
30721 when possible */
30722 return current_sched_info->sched_max_insns_priority;
30723
30724 return priority;
30725 }
30726
30727 /* Return true if the instruction is nonpipelined on the Cell. */
30728 static bool
30729 is_nonpipeline_insn (rtx_insn *insn)
30730 {
30731 enum attr_type type;
30732 if (!insn || !NONDEBUG_INSN_P (insn)
30733 || GET_CODE (PATTERN (insn)) == USE
30734 || GET_CODE (PATTERN (insn)) == CLOBBER)
30735 return false;
30736
30737 type = get_attr_type (insn);
30738 if (type == TYPE_MUL
30739 || type == TYPE_DIV
30740 || type == TYPE_SDIV
30741 || type == TYPE_DDIV
30742 || type == TYPE_SSQRT
30743 || type == TYPE_DSQRT
30744 || type == TYPE_MFCR
30745 || type == TYPE_MFCRF
30746 || type == TYPE_MFJMPR)
30747 {
30748 return true;
30749 }
30750 return false;
30751 }
30752
30753
30754 /* Return how many instructions the machine can issue per cycle. */
30755
30756 static int
30757 rs6000_issue_rate (void)
30758 {
30759 /* Unless scheduling for register pressure, use issue rate of 1 for
30760 first scheduling pass to decrease degradation. */
30761 if (!reload_completed && !flag_sched_pressure)
30762 return 1;
30763
30764 switch (rs6000_tune) {
30765 case PROCESSOR_RS64A:
30766 case PROCESSOR_PPC601: /* ? */
30767 case PROCESSOR_PPC7450:
30768 return 3;
30769 case PROCESSOR_PPC440:
30770 case PROCESSOR_PPC603:
30771 case PROCESSOR_PPC750:
30772 case PROCESSOR_PPC7400:
30773 case PROCESSOR_PPC8540:
30774 case PROCESSOR_PPC8548:
30775 case PROCESSOR_CELL:
30776 case PROCESSOR_PPCE300C2:
30777 case PROCESSOR_PPCE300C3:
30778 case PROCESSOR_PPCE500MC:
30779 case PROCESSOR_PPCE500MC64:
30780 case PROCESSOR_PPCE5500:
30781 case PROCESSOR_PPCE6500:
30782 case PROCESSOR_TITAN:
30783 return 2;
30784 case PROCESSOR_PPC476:
30785 case PROCESSOR_PPC604:
30786 case PROCESSOR_PPC604e:
30787 case PROCESSOR_PPC620:
30788 case PROCESSOR_PPC630:
30789 return 4;
30790 case PROCESSOR_POWER4:
30791 case PROCESSOR_POWER5:
30792 case PROCESSOR_POWER6:
30793 case PROCESSOR_POWER7:
30794 return 5;
30795 case PROCESSOR_POWER8:
30796 return 7;
30797 case PROCESSOR_POWER9:
30798 return 6;
30799 default:
30800 return 1;
30801 }
30802 }
30803
30804 /* Return how many instructions to look ahead for better insn
30805 scheduling. */
30806
30807 static int
30808 rs6000_use_sched_lookahead (void)
30809 {
30810 switch (rs6000_tune)
30811 {
30812 case PROCESSOR_PPC8540:
30813 case PROCESSOR_PPC8548:
30814 return 4;
30815
30816 case PROCESSOR_CELL:
30817 return (reload_completed ? 8 : 0);
30818
30819 default:
30820 return 0;
30821 }
30822 }
30823
30824 /* We are choosing insn from the ready queue. Return zero if INSN can be
30825 chosen. */
30826 static int
30827 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30828 {
30829 if (ready_index == 0)
30830 return 0;
30831
30832 if (rs6000_tune != PROCESSOR_CELL)
30833 return 0;
30834
30835 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30836
30837 if (!reload_completed
30838 || is_nonpipeline_insn (insn)
30839 || is_microcoded_insn (insn))
30840 return 1;
30841
30842 return 0;
30843 }
30844
30845 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30846 and return true. */
30847
30848 static bool
30849 find_mem_ref (rtx pat, rtx *mem_ref)
30850 {
30851 const char * fmt;
30852 int i, j;
30853
30854 /* stack_tie does not produce any real memory traffic. */
30855 if (tie_operand (pat, VOIDmode))
30856 return false;
30857
30858 if (MEM_P (pat))
30859 {
30860 *mem_ref = pat;
30861 return true;
30862 }
30863
30864 /* Recursively process the pattern. */
30865 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30866
30867 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30868 {
30869 if (fmt[i] == 'e')
30870 {
30871 if (find_mem_ref (XEXP (pat, i), mem_ref))
30872 return true;
30873 }
30874 else if (fmt[i] == 'E')
30875 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30876 {
30877 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30878 return true;
30879 }
30880 }
30881
30882 return false;
30883 }
30884
30885 /* Determine if PAT is a PATTERN of a load insn. */
30886
30887 static bool
30888 is_load_insn1 (rtx pat, rtx *load_mem)
30889 {
30890 if (!pat || pat == NULL_RTX)
30891 return false;
30892
30893 if (GET_CODE (pat) == SET)
30894 return find_mem_ref (SET_SRC (pat), load_mem);
30895
30896 if (GET_CODE (pat) == PARALLEL)
30897 {
30898 int i;
30899
30900 for (i = 0; i < XVECLEN (pat, 0); i++)
30901 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30902 return true;
30903 }
30904
30905 return false;
30906 }
30907
30908 /* Determine if INSN loads from memory. */
30909
30910 static bool
30911 is_load_insn (rtx insn, rtx *load_mem)
30912 {
30913 if (!insn || !INSN_P (insn))
30914 return false;
30915
30916 if (CALL_P (insn))
30917 return false;
30918
30919 return is_load_insn1 (PATTERN (insn), load_mem);
30920 }
30921
30922 /* Determine if PAT is a PATTERN of a store insn. */
30923
30924 static bool
30925 is_store_insn1 (rtx pat, rtx *str_mem)
30926 {
30927 if (!pat || pat == NULL_RTX)
30928 return false;
30929
30930 if (GET_CODE (pat) == SET)
30931 return find_mem_ref (SET_DEST (pat), str_mem);
30932
30933 if (GET_CODE (pat) == PARALLEL)
30934 {
30935 int i;
30936
30937 for (i = 0; i < XVECLEN (pat, 0); i++)
30938 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30939 return true;
30940 }
30941
30942 return false;
30943 }
30944
30945 /* Determine if INSN stores to memory. */
30946
30947 static bool
30948 is_store_insn (rtx insn, rtx *str_mem)
30949 {
30950 if (!insn || !INSN_P (insn))
30951 return false;
30952
30953 return is_store_insn1 (PATTERN (insn), str_mem);
30954 }
30955
30956 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30957
30958 static bool
30959 is_power9_pairable_vec_type (enum attr_type type)
30960 {
30961 switch (type)
30962 {
30963 case TYPE_VECSIMPLE:
30964 case TYPE_VECCOMPLEX:
30965 case TYPE_VECDIV:
30966 case TYPE_VECCMP:
30967 case TYPE_VECPERM:
30968 case TYPE_VECFLOAT:
30969 case TYPE_VECFDIV:
30970 case TYPE_VECDOUBLE:
30971 return true;
30972 default:
30973 break;
30974 }
30975 return false;
30976 }
30977
30978 /* Returns whether the dependence between INSN and NEXT is considered
30979 costly by the given target. */
30980
30981 static bool
30982 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
30983 {
30984 rtx insn;
30985 rtx next;
30986 rtx load_mem, str_mem;
30987
30988 /* If the flag is not enabled - no dependence is considered costly;
30989 allow all dependent insns in the same group.
30990 This is the most aggressive option. */
30991 if (rs6000_sched_costly_dep == no_dep_costly)
30992 return false;
30993
30994 /* If the flag is set to 1 - a dependence is always considered costly;
30995 do not allow dependent instructions in the same group.
30996 This is the most conservative option. */
30997 if (rs6000_sched_costly_dep == all_deps_costly)
30998 return true;
30999
31000 insn = DEP_PRO (dep);
31001 next = DEP_CON (dep);
31002
31003 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31004 && is_load_insn (next, &load_mem)
31005 && is_store_insn (insn, &str_mem))
31006 /* Prevent load after store in the same group. */
31007 return true;
31008
31009 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31010 && is_load_insn (next, &load_mem)
31011 && is_store_insn (insn, &str_mem)
31012 && DEP_TYPE (dep) == REG_DEP_TRUE
31013 && mem_locations_overlap(str_mem, load_mem))
31014 /* Prevent load after store in the same group if it is a true
31015 dependence. */
31016 return true;
31017
31018 /* The flag is set to X; dependences with latency >= X are considered costly,
31019 and will not be scheduled in the same group. */
31020 if (rs6000_sched_costly_dep <= max_dep_latency
31021 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31022 return true;
31023
31024 return false;
31025 }
31026
31027 /* Return the next insn after INSN that is found before TAIL is reached,
31028 skipping any "non-active" insns - insns that will not actually occupy
31029 an issue slot. Return NULL_RTX if such an insn is not found. */
31030
31031 static rtx_insn *
31032 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31033 {
31034 if (insn == NULL_RTX || insn == tail)
31035 return NULL;
31036
31037 while (1)
31038 {
31039 insn = NEXT_INSN (insn);
31040 if (insn == NULL_RTX || insn == tail)
31041 return NULL;
31042
31043 if (CALL_P (insn)
31044 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31045 || (NONJUMP_INSN_P (insn)
31046 && GET_CODE (PATTERN (insn)) != USE
31047 && GET_CODE (PATTERN (insn)) != CLOBBER
31048 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31049 break;
31050 }
31051 return insn;
31052 }
31053
31054 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31055
31056 static int
31057 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31058 {
31059 int pos;
31060 int i;
31061 rtx_insn *tmp;
31062 enum attr_type type, type2;
31063
31064 type = get_attr_type (last_scheduled_insn);
31065
31066 /* Try to issue fixed point divides back-to-back in pairs so they will be
31067 routed to separate execution units and execute in parallel. */
31068 if (type == TYPE_DIV && divide_cnt == 0)
31069 {
31070 /* First divide has been scheduled. */
31071 divide_cnt = 1;
31072
31073 /* Scan the ready list looking for another divide, if found move it
31074 to the end of the list so it is chosen next. */
31075 pos = lastpos;
31076 while (pos >= 0)
31077 {
31078 if (recog_memoized (ready[pos]) >= 0
31079 && get_attr_type (ready[pos]) == TYPE_DIV)
31080 {
31081 tmp = ready[pos];
31082 for (i = pos; i < lastpos; i++)
31083 ready[i] = ready[i + 1];
31084 ready[lastpos] = tmp;
31085 break;
31086 }
31087 pos--;
31088 }
31089 }
31090 else
31091 {
31092 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31093 divide_cnt = 0;
31094
31095 /* The best dispatch throughput for vector and vector load insns can be
31096 achieved by interleaving a vector and vector load such that they'll
31097 dispatch to the same superslice. If this pairing cannot be achieved
31098 then it is best to pair vector insns together and vector load insns
31099 together.
31100
31101 To aid in this pairing, vec_pairing maintains the current state with
31102 the following values:
31103
31104 0 : Initial state, no vecload/vector pairing has been started.
31105
31106 1 : A vecload or vector insn has been issued and a candidate for
31107 pairing has been found and moved to the end of the ready
31108 list. */
31109 if (type == TYPE_VECLOAD)
31110 {
31111 /* Issued a vecload. */
31112 if (vec_pairing == 0)
31113 {
31114 int vecload_pos = -1;
31115 /* We issued a single vecload, look for a vector insn to pair it
31116 with. If one isn't found, try to pair another vecload. */
31117 pos = lastpos;
31118 while (pos >= 0)
31119 {
31120 if (recog_memoized (ready[pos]) >= 0)
31121 {
31122 type2 = get_attr_type (ready[pos]);
31123 if (is_power9_pairable_vec_type (type2))
31124 {
31125 /* Found a vector insn to pair with, move it to the
31126 end of the ready list so it is scheduled next. */
31127 tmp = ready[pos];
31128 for (i = pos; i < lastpos; i++)
31129 ready[i] = ready[i + 1];
31130 ready[lastpos] = tmp;
31131 vec_pairing = 1;
31132 return cached_can_issue_more;
31133 }
31134 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31135 /* Remember position of first vecload seen. */
31136 vecload_pos = pos;
31137 }
31138 pos--;
31139 }
31140 if (vecload_pos >= 0)
31141 {
31142 /* Didn't find a vector to pair with but did find a vecload,
31143 move it to the end of the ready list. */
31144 tmp = ready[vecload_pos];
31145 for (i = vecload_pos; i < lastpos; i++)
31146 ready[i] = ready[i + 1];
31147 ready[lastpos] = tmp;
31148 vec_pairing = 1;
31149 return cached_can_issue_more;
31150 }
31151 }
31152 }
31153 else if (is_power9_pairable_vec_type (type))
31154 {
31155 /* Issued a vector operation. */
31156 if (vec_pairing == 0)
31157 {
31158 int vec_pos = -1;
31159 /* We issued a single vector insn, look for a vecload to pair it
31160 with. If one isn't found, try to pair another vector. */
31161 pos = lastpos;
31162 while (pos >= 0)
31163 {
31164 if (recog_memoized (ready[pos]) >= 0)
31165 {
31166 type2 = get_attr_type (ready[pos]);
31167 if (type2 == TYPE_VECLOAD)
31168 {
31169 /* Found a vecload insn to pair with, move it to the
31170 end of the ready list so it is scheduled next. */
31171 tmp = ready[pos];
31172 for (i = pos; i < lastpos; i++)
31173 ready[i] = ready[i + 1];
31174 ready[lastpos] = tmp;
31175 vec_pairing = 1;
31176 return cached_can_issue_more;
31177 }
31178 else if (is_power9_pairable_vec_type (type2)
31179 && vec_pos == -1)
31180 /* Remember position of first vector insn seen. */
31181 vec_pos = pos;
31182 }
31183 pos--;
31184 }
31185 if (vec_pos >= 0)
31186 {
31187 /* Didn't find a vecload to pair with but did find a vector
31188 insn, move it to the end of the ready list. */
31189 tmp = ready[vec_pos];
31190 for (i = vec_pos; i < lastpos; i++)
31191 ready[i] = ready[i + 1];
31192 ready[lastpos] = tmp;
31193 vec_pairing = 1;
31194 return cached_can_issue_more;
31195 }
31196 }
31197 }
31198
31199 /* We've either finished a vec/vecload pair, couldn't find an insn to
31200 continue the current pair, or the last insn had nothing to do with
31201 with pairing. In any case, reset the state. */
31202 vec_pairing = 0;
31203 }
31204
31205 return cached_can_issue_more;
31206 }
31207
31208 /* We are about to begin issuing insns for this clock cycle. */
31209
31210 static int
31211 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31212 rtx_insn **ready ATTRIBUTE_UNUSED,
31213 int *pn_ready ATTRIBUTE_UNUSED,
31214 int clock_var ATTRIBUTE_UNUSED)
31215 {
31216 int n_ready = *pn_ready;
31217
31218 if (sched_verbose)
31219 fprintf (dump, "// rs6000_sched_reorder :\n");
31220
31221 /* Reorder the ready list, if the second to last ready insn
31222 is a nonepipeline insn. */
31223 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31224 {
31225 if (is_nonpipeline_insn (ready[n_ready - 1])
31226 && (recog_memoized (ready[n_ready - 2]) > 0))
31227 /* Simply swap first two insns. */
31228 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31229 }
31230
31231 if (rs6000_tune == PROCESSOR_POWER6)
31232 load_store_pendulum = 0;
31233
31234 return rs6000_issue_rate ();
31235 }
31236
31237 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31238
31239 static int
31240 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31241 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31242 {
31243 if (sched_verbose)
31244 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31245
31246 /* For Power6, we need to handle some special cases to try and keep the
31247 store queue from overflowing and triggering expensive flushes.
31248
31249 This code monitors how load and store instructions are being issued
31250 and skews the ready list one way or the other to increase the likelihood
31251 that a desired instruction is issued at the proper time.
31252
31253 A couple of things are done. First, we maintain a "load_store_pendulum"
31254 to track the current state of load/store issue.
31255
31256 - If the pendulum is at zero, then no loads or stores have been
31257 issued in the current cycle so we do nothing.
31258
31259 - If the pendulum is 1, then a single load has been issued in this
31260 cycle and we attempt to locate another load in the ready list to
31261 issue with it.
31262
31263 - If the pendulum is -2, then two stores have already been
31264 issued in this cycle, so we increase the priority of the first load
31265 in the ready list to increase it's likelihood of being chosen first
31266 in the next cycle.
31267
31268 - If the pendulum is -1, then a single store has been issued in this
31269 cycle and we attempt to locate another store in the ready list to
31270 issue with it, preferring a store to an adjacent memory location to
31271 facilitate store pairing in the store queue.
31272
31273 - If the pendulum is 2, then two loads have already been
31274 issued in this cycle, so we increase the priority of the first store
31275 in the ready list to increase it's likelihood of being chosen first
31276 in the next cycle.
31277
31278 - If the pendulum < -2 or > 2, then do nothing.
31279
31280 Note: This code covers the most common scenarios. There exist non
31281 load/store instructions which make use of the LSU and which
31282 would need to be accounted for to strictly model the behavior
31283 of the machine. Those instructions are currently unaccounted
31284 for to help minimize compile time overhead of this code.
31285 */
31286 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31287 {
31288 int pos;
31289 int i;
31290 rtx_insn *tmp;
31291 rtx load_mem, str_mem;
31292
31293 if (is_store_insn (last_scheduled_insn, &str_mem))
31294 /* Issuing a store, swing the load_store_pendulum to the left */
31295 load_store_pendulum--;
31296 else if (is_load_insn (last_scheduled_insn, &load_mem))
31297 /* Issuing a load, swing the load_store_pendulum to the right */
31298 load_store_pendulum++;
31299 else
31300 return cached_can_issue_more;
31301
31302 /* If the pendulum is balanced, or there is only one instruction on
31303 the ready list, then all is well, so return. */
31304 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31305 return cached_can_issue_more;
31306
31307 if (load_store_pendulum == 1)
31308 {
31309 /* A load has been issued in this cycle. Scan the ready list
31310 for another load to issue with it */
31311 pos = *pn_ready-1;
31312
31313 while (pos >= 0)
31314 {
31315 if (is_load_insn (ready[pos], &load_mem))
31316 {
31317 /* Found a load. Move it to the head of the ready list,
31318 and adjust it's priority so that it is more likely to
31319 stay there */
31320 tmp = ready[pos];
31321 for (i=pos; i<*pn_ready-1; i++)
31322 ready[i] = ready[i + 1];
31323 ready[*pn_ready-1] = tmp;
31324
31325 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31326 INSN_PRIORITY (tmp)++;
31327 break;
31328 }
31329 pos--;
31330 }
31331 }
31332 else if (load_store_pendulum == -2)
31333 {
31334 /* Two stores have been issued in this cycle. Increase the
31335 priority of the first load in the ready list to favor it for
31336 issuing in the next cycle. */
31337 pos = *pn_ready-1;
31338
31339 while (pos >= 0)
31340 {
31341 if (is_load_insn (ready[pos], &load_mem)
31342 && !sel_sched_p ()
31343 && INSN_PRIORITY_KNOWN (ready[pos]))
31344 {
31345 INSN_PRIORITY (ready[pos])++;
31346
31347 /* Adjust the pendulum to account for the fact that a load
31348 was found and increased in priority. This is to prevent
31349 increasing the priority of multiple loads */
31350 load_store_pendulum--;
31351
31352 break;
31353 }
31354 pos--;
31355 }
31356 }
31357 else if (load_store_pendulum == -1)
31358 {
31359 /* A store has been issued in this cycle. Scan the ready list for
31360 another store to issue with it, preferring a store to an adjacent
31361 memory location */
31362 int first_store_pos = -1;
31363
31364 pos = *pn_ready-1;
31365
31366 while (pos >= 0)
31367 {
31368 if (is_store_insn (ready[pos], &str_mem))
31369 {
31370 rtx str_mem2;
31371 /* Maintain the index of the first store found on the
31372 list */
31373 if (first_store_pos == -1)
31374 first_store_pos = pos;
31375
31376 if (is_store_insn (last_scheduled_insn, &str_mem2)
31377 && adjacent_mem_locations (str_mem, str_mem2))
31378 {
31379 /* Found an adjacent store. Move it to the head of the
31380 ready list, and adjust it's priority so that it is
31381 more likely to stay there */
31382 tmp = ready[pos];
31383 for (i=pos; i<*pn_ready-1; i++)
31384 ready[i] = ready[i + 1];
31385 ready[*pn_ready-1] = tmp;
31386
31387 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31388 INSN_PRIORITY (tmp)++;
31389
31390 first_store_pos = -1;
31391
31392 break;
31393 };
31394 }
31395 pos--;
31396 }
31397
31398 if (first_store_pos >= 0)
31399 {
31400 /* An adjacent store wasn't found, but a non-adjacent store was,
31401 so move the non-adjacent store to the front of the ready
31402 list, and adjust its priority so that it is more likely to
31403 stay there. */
31404 tmp = ready[first_store_pos];
31405 for (i=first_store_pos; i<*pn_ready-1; i++)
31406 ready[i] = ready[i + 1];
31407 ready[*pn_ready-1] = tmp;
31408 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31409 INSN_PRIORITY (tmp)++;
31410 }
31411 }
31412 else if (load_store_pendulum == 2)
31413 {
31414 /* Two loads have been issued in this cycle. Increase the priority
31415 of the first store in the ready list to favor it for issuing in
31416 the next cycle. */
31417 pos = *pn_ready-1;
31418
31419 while (pos >= 0)
31420 {
31421 if (is_store_insn (ready[pos], &str_mem)
31422 && !sel_sched_p ()
31423 && INSN_PRIORITY_KNOWN (ready[pos]))
31424 {
31425 INSN_PRIORITY (ready[pos])++;
31426
31427 /* Adjust the pendulum to account for the fact that a store
31428 was found and increased in priority. This is to prevent
31429 increasing the priority of multiple stores */
31430 load_store_pendulum++;
31431
31432 break;
31433 }
31434 pos--;
31435 }
31436 }
31437 }
31438
31439 /* Do Power9 dependent reordering if necessary. */
31440 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31441 && recog_memoized (last_scheduled_insn) >= 0)
31442 return power9_sched_reorder2 (ready, *pn_ready - 1);
31443
31444 return cached_can_issue_more;
31445 }
31446
31447 /* Return whether the presence of INSN causes a dispatch group termination
31448 of group WHICH_GROUP.
31449
31450 If WHICH_GROUP == current_group, this function will return true if INSN
31451 causes the termination of the current group (i.e, the dispatch group to
31452 which INSN belongs). This means that INSN will be the last insn in the
31453 group it belongs to.
31454
31455 If WHICH_GROUP == previous_group, this function will return true if INSN
31456 causes the termination of the previous group (i.e, the dispatch group that
31457 precedes the group to which INSN belongs). This means that INSN will be
31458 the first insn in the group it belongs to). */
31459
31460 static bool
31461 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31462 {
31463 bool first, last;
31464
31465 if (! insn)
31466 return false;
31467
31468 first = insn_must_be_first_in_group (insn);
31469 last = insn_must_be_last_in_group (insn);
31470
31471 if (first && last)
31472 return true;
31473
31474 if (which_group == current_group)
31475 return last;
31476 else if (which_group == previous_group)
31477 return first;
31478
31479 return false;
31480 }
31481
31482
31483 static bool
31484 insn_must_be_first_in_group (rtx_insn *insn)
31485 {
31486 enum attr_type type;
31487
31488 if (!insn
31489 || NOTE_P (insn)
31490 || DEBUG_INSN_P (insn)
31491 || GET_CODE (PATTERN (insn)) == USE
31492 || GET_CODE (PATTERN (insn)) == CLOBBER)
31493 return false;
31494
31495 switch (rs6000_tune)
31496 {
31497 case PROCESSOR_POWER5:
31498 if (is_cracked_insn (insn))
31499 return true;
31500 /* FALLTHRU */
31501 case PROCESSOR_POWER4:
31502 if (is_microcoded_insn (insn))
31503 return true;
31504
31505 if (!rs6000_sched_groups)
31506 return false;
31507
31508 type = get_attr_type (insn);
31509
31510 switch (type)
31511 {
31512 case TYPE_MFCR:
31513 case TYPE_MFCRF:
31514 case TYPE_MTCR:
31515 case TYPE_CR_LOGICAL:
31516 case TYPE_MTJMPR:
31517 case TYPE_MFJMPR:
31518 case TYPE_DIV:
31519 case TYPE_LOAD_L:
31520 case TYPE_STORE_C:
31521 case TYPE_ISYNC:
31522 case TYPE_SYNC:
31523 return true;
31524 default:
31525 break;
31526 }
31527 break;
31528 case PROCESSOR_POWER6:
31529 type = get_attr_type (insn);
31530
31531 switch (type)
31532 {
31533 case TYPE_EXTS:
31534 case TYPE_CNTLZ:
31535 case TYPE_TRAP:
31536 case TYPE_MUL:
31537 case TYPE_INSERT:
31538 case TYPE_FPCOMPARE:
31539 case TYPE_MFCR:
31540 case TYPE_MTCR:
31541 case TYPE_MFJMPR:
31542 case TYPE_MTJMPR:
31543 case TYPE_ISYNC:
31544 case TYPE_SYNC:
31545 case TYPE_LOAD_L:
31546 case TYPE_STORE_C:
31547 return true;
31548 case TYPE_SHIFT:
31549 if (get_attr_dot (insn) == DOT_NO
31550 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31551 return true;
31552 else
31553 break;
31554 case TYPE_DIV:
31555 if (get_attr_size (insn) == SIZE_32)
31556 return true;
31557 else
31558 break;
31559 case TYPE_LOAD:
31560 case TYPE_STORE:
31561 case TYPE_FPLOAD:
31562 case TYPE_FPSTORE:
31563 if (get_attr_update (insn) == UPDATE_YES)
31564 return true;
31565 else
31566 break;
31567 default:
31568 break;
31569 }
31570 break;
31571 case PROCESSOR_POWER7:
31572 type = get_attr_type (insn);
31573
31574 switch (type)
31575 {
31576 case TYPE_CR_LOGICAL:
31577 case TYPE_MFCR:
31578 case TYPE_MFCRF:
31579 case TYPE_MTCR:
31580 case TYPE_DIV:
31581 case TYPE_ISYNC:
31582 case TYPE_LOAD_L:
31583 case TYPE_STORE_C:
31584 case TYPE_MFJMPR:
31585 case TYPE_MTJMPR:
31586 return true;
31587 case TYPE_MUL:
31588 case TYPE_SHIFT:
31589 case TYPE_EXTS:
31590 if (get_attr_dot (insn) == DOT_YES)
31591 return true;
31592 else
31593 break;
31594 case TYPE_LOAD:
31595 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31596 || get_attr_update (insn) == UPDATE_YES)
31597 return true;
31598 else
31599 break;
31600 case TYPE_STORE:
31601 case TYPE_FPLOAD:
31602 case TYPE_FPSTORE:
31603 if (get_attr_update (insn) == UPDATE_YES)
31604 return true;
31605 else
31606 break;
31607 default:
31608 break;
31609 }
31610 break;
31611 case PROCESSOR_POWER8:
31612 type = get_attr_type (insn);
31613
31614 switch (type)
31615 {
31616 case TYPE_CR_LOGICAL:
31617 case TYPE_MFCR:
31618 case TYPE_MFCRF:
31619 case TYPE_MTCR:
31620 case TYPE_SYNC:
31621 case TYPE_ISYNC:
31622 case TYPE_LOAD_L:
31623 case TYPE_STORE_C:
31624 case TYPE_VECSTORE:
31625 case TYPE_MFJMPR:
31626 case TYPE_MTJMPR:
31627 return true;
31628 case TYPE_SHIFT:
31629 case TYPE_EXTS:
31630 case TYPE_MUL:
31631 if (get_attr_dot (insn) == DOT_YES)
31632 return true;
31633 else
31634 break;
31635 case TYPE_LOAD:
31636 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31637 || get_attr_update (insn) == UPDATE_YES)
31638 return true;
31639 else
31640 break;
31641 case TYPE_STORE:
31642 if (get_attr_update (insn) == UPDATE_YES
31643 && get_attr_indexed (insn) == INDEXED_YES)
31644 return true;
31645 else
31646 break;
31647 default:
31648 break;
31649 }
31650 break;
31651 default:
31652 break;
31653 }
31654
31655 return false;
31656 }
31657
31658 static bool
31659 insn_must_be_last_in_group (rtx_insn *insn)
31660 {
31661 enum attr_type type;
31662
31663 if (!insn
31664 || NOTE_P (insn)
31665 || DEBUG_INSN_P (insn)
31666 || GET_CODE (PATTERN (insn)) == USE
31667 || GET_CODE (PATTERN (insn)) == CLOBBER)
31668 return false;
31669
31670 switch (rs6000_tune) {
31671 case PROCESSOR_POWER4:
31672 case PROCESSOR_POWER5:
31673 if (is_microcoded_insn (insn))
31674 return true;
31675
31676 if (is_branch_slot_insn (insn))
31677 return true;
31678
31679 break;
31680 case PROCESSOR_POWER6:
31681 type = get_attr_type (insn);
31682
31683 switch (type)
31684 {
31685 case TYPE_EXTS:
31686 case TYPE_CNTLZ:
31687 case TYPE_TRAP:
31688 case TYPE_MUL:
31689 case TYPE_FPCOMPARE:
31690 case TYPE_MFCR:
31691 case TYPE_MTCR:
31692 case TYPE_MFJMPR:
31693 case TYPE_MTJMPR:
31694 case TYPE_ISYNC:
31695 case TYPE_SYNC:
31696 case TYPE_LOAD_L:
31697 case TYPE_STORE_C:
31698 return true;
31699 case TYPE_SHIFT:
31700 if (get_attr_dot (insn) == DOT_NO
31701 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31702 return true;
31703 else
31704 break;
31705 case TYPE_DIV:
31706 if (get_attr_size (insn) == SIZE_32)
31707 return true;
31708 else
31709 break;
31710 default:
31711 break;
31712 }
31713 break;
31714 case PROCESSOR_POWER7:
31715 type = get_attr_type (insn);
31716
31717 switch (type)
31718 {
31719 case TYPE_ISYNC:
31720 case TYPE_SYNC:
31721 case TYPE_LOAD_L:
31722 case TYPE_STORE_C:
31723 return true;
31724 case TYPE_LOAD:
31725 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31726 && get_attr_update (insn) == UPDATE_YES)
31727 return true;
31728 else
31729 break;
31730 case TYPE_STORE:
31731 if (get_attr_update (insn) == UPDATE_YES
31732 && get_attr_indexed (insn) == INDEXED_YES)
31733 return true;
31734 else
31735 break;
31736 default:
31737 break;
31738 }
31739 break;
31740 case PROCESSOR_POWER8:
31741 type = get_attr_type (insn);
31742
31743 switch (type)
31744 {
31745 case TYPE_MFCR:
31746 case TYPE_MTCR:
31747 case TYPE_ISYNC:
31748 case TYPE_SYNC:
31749 case TYPE_LOAD_L:
31750 case TYPE_STORE_C:
31751 return true;
31752 case TYPE_LOAD:
31753 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31754 && get_attr_update (insn) == UPDATE_YES)
31755 return true;
31756 else
31757 break;
31758 case TYPE_STORE:
31759 if (get_attr_update (insn) == UPDATE_YES
31760 && get_attr_indexed (insn) == INDEXED_YES)
31761 return true;
31762 else
31763 break;
31764 default:
31765 break;
31766 }
31767 break;
31768 default:
31769 break;
31770 }
31771
31772 return false;
31773 }
31774
31775 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31776 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31777
31778 static bool
31779 is_costly_group (rtx *group_insns, rtx next_insn)
31780 {
31781 int i;
31782 int issue_rate = rs6000_issue_rate ();
31783
31784 for (i = 0; i < issue_rate; i++)
31785 {
31786 sd_iterator_def sd_it;
31787 dep_t dep;
31788 rtx insn = group_insns[i];
31789
31790 if (!insn)
31791 continue;
31792
31793 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31794 {
31795 rtx next = DEP_CON (dep);
31796
31797 if (next == next_insn
31798 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31799 return true;
31800 }
31801 }
31802
31803 return false;
31804 }
31805
31806 /* Utility of the function redefine_groups.
31807 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31808 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31809 to keep it "far" (in a separate group) from GROUP_INSNS, following
31810 one of the following schemes, depending on the value of the flag
31811 -minsert_sched_nops = X:
31812 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31813 in order to force NEXT_INSN into a separate group.
31814 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31815 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31816 insertion (has a group just ended, how many vacant issue slots remain in the
31817 last group, and how many dispatch groups were encountered so far). */
31818
31819 static int
31820 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31821 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31822 int *group_count)
31823 {
31824 rtx nop;
31825 bool force;
31826 int issue_rate = rs6000_issue_rate ();
31827 bool end = *group_end;
31828 int i;
31829
31830 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31831 return can_issue_more;
31832
31833 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31834 return can_issue_more;
31835
31836 force = is_costly_group (group_insns, next_insn);
31837 if (!force)
31838 return can_issue_more;
31839
31840 if (sched_verbose > 6)
31841 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31842 *group_count ,can_issue_more);
31843
31844 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31845 {
31846 if (*group_end)
31847 can_issue_more = 0;
31848
31849 /* Since only a branch can be issued in the last issue_slot, it is
31850 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31851 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31852 in this case the last nop will start a new group and the branch
31853 will be forced to the new group. */
31854 if (can_issue_more && !is_branch_slot_insn (next_insn))
31855 can_issue_more--;
31856
31857 /* Do we have a special group ending nop? */
31858 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
31859 || rs6000_tune == PROCESSOR_POWER8)
31860 {
31861 nop = gen_group_ending_nop ();
31862 emit_insn_before (nop, next_insn);
31863 can_issue_more = 0;
31864 }
31865 else
31866 while (can_issue_more > 0)
31867 {
31868 nop = gen_nop ();
31869 emit_insn_before (nop, next_insn);
31870 can_issue_more--;
31871 }
31872
31873 *group_end = true;
31874 return 0;
31875 }
31876
31877 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31878 {
31879 int n_nops = rs6000_sched_insert_nops;
31880
31881 /* Nops can't be issued from the branch slot, so the effective
31882 issue_rate for nops is 'issue_rate - 1'. */
31883 if (can_issue_more == 0)
31884 can_issue_more = issue_rate;
31885 can_issue_more--;
31886 if (can_issue_more == 0)
31887 {
31888 can_issue_more = issue_rate - 1;
31889 (*group_count)++;
31890 end = true;
31891 for (i = 0; i < issue_rate; i++)
31892 {
31893 group_insns[i] = 0;
31894 }
31895 }
31896
31897 while (n_nops > 0)
31898 {
31899 nop = gen_nop ();
31900 emit_insn_before (nop, next_insn);
31901 if (can_issue_more == issue_rate - 1) /* new group begins */
31902 end = false;
31903 can_issue_more--;
31904 if (can_issue_more == 0)
31905 {
31906 can_issue_more = issue_rate - 1;
31907 (*group_count)++;
31908 end = true;
31909 for (i = 0; i < issue_rate; i++)
31910 {
31911 group_insns[i] = 0;
31912 }
31913 }
31914 n_nops--;
31915 }
31916
31917 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31918 can_issue_more++;
31919
31920 /* Is next_insn going to start a new group? */
31921 *group_end
31922 = (end
31923 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31924 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31925 || (can_issue_more < issue_rate &&
31926 insn_terminates_group_p (next_insn, previous_group)));
31927 if (*group_end && end)
31928 (*group_count)--;
31929
31930 if (sched_verbose > 6)
31931 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31932 *group_count, can_issue_more);
31933 return can_issue_more;
31934 }
31935
31936 return can_issue_more;
31937 }
31938
31939 /* This function tries to synch the dispatch groups that the compiler "sees"
31940 with the dispatch groups that the processor dispatcher is expected to
31941 form in practice. It tries to achieve this synchronization by forcing the
31942 estimated processor grouping on the compiler (as opposed to the function
31943 'pad_goups' which tries to force the scheduler's grouping on the processor).
31944
31945 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31946 examines the (estimated) dispatch groups that will be formed by the processor
31947 dispatcher. It marks these group boundaries to reflect the estimated
31948 processor grouping, overriding the grouping that the scheduler had marked.
31949 Depending on the value of the flag '-minsert-sched-nops' this function can
31950 force certain insns into separate groups or force a certain distance between
31951 them by inserting nops, for example, if there exists a "costly dependence"
31952 between the insns.
31953
31954 The function estimates the group boundaries that the processor will form as
31955 follows: It keeps track of how many vacant issue slots are available after
31956 each insn. A subsequent insn will start a new group if one of the following
31957 4 cases applies:
31958 - no more vacant issue slots remain in the current dispatch group.
31959 - only the last issue slot, which is the branch slot, is vacant, but the next
31960 insn is not a branch.
31961 - only the last 2 or less issue slots, including the branch slot, are vacant,
31962 which means that a cracked insn (which occupies two issue slots) can't be
31963 issued in this group.
31964 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31965 start a new group. */
31966
31967 static int
31968 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31969 rtx_insn *tail)
31970 {
31971 rtx_insn *insn, *next_insn;
31972 int issue_rate;
31973 int can_issue_more;
31974 int slot, i;
31975 bool group_end;
31976 int group_count = 0;
31977 rtx *group_insns;
31978
31979 /* Initialize. */
31980 issue_rate = rs6000_issue_rate ();
31981 group_insns = XALLOCAVEC (rtx, issue_rate);
31982 for (i = 0; i < issue_rate; i++)
31983 {
31984 group_insns[i] = 0;
31985 }
31986 can_issue_more = issue_rate;
31987 slot = 0;
31988 insn = get_next_active_insn (prev_head_insn, tail);
31989 group_end = false;
31990
31991 while (insn != NULL_RTX)
31992 {
31993 slot = (issue_rate - can_issue_more);
31994 group_insns[slot] = insn;
31995 can_issue_more =
31996 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
31997 if (insn_terminates_group_p (insn, current_group))
31998 can_issue_more = 0;
31999
32000 next_insn = get_next_active_insn (insn, tail);
32001 if (next_insn == NULL_RTX)
32002 return group_count + 1;
32003
32004 /* Is next_insn going to start a new group? */
32005 group_end
32006 = (can_issue_more == 0
32007 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32008 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32009 || (can_issue_more < issue_rate &&
32010 insn_terminates_group_p (next_insn, previous_group)));
32011
32012 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32013 next_insn, &group_end, can_issue_more,
32014 &group_count);
32015
32016 if (group_end)
32017 {
32018 group_count++;
32019 can_issue_more = 0;
32020 for (i = 0; i < issue_rate; i++)
32021 {
32022 group_insns[i] = 0;
32023 }
32024 }
32025
32026 if (GET_MODE (next_insn) == TImode && can_issue_more)
32027 PUT_MODE (next_insn, VOIDmode);
32028 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32029 PUT_MODE (next_insn, TImode);
32030
32031 insn = next_insn;
32032 if (can_issue_more == 0)
32033 can_issue_more = issue_rate;
32034 } /* while */
32035
32036 return group_count;
32037 }
32038
32039 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32040 dispatch group boundaries that the scheduler had marked. Pad with nops
32041 any dispatch groups which have vacant issue slots, in order to force the
32042 scheduler's grouping on the processor dispatcher. The function
32043 returns the number of dispatch groups found. */
32044
32045 static int
32046 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32047 rtx_insn *tail)
32048 {
32049 rtx_insn *insn, *next_insn;
32050 rtx nop;
32051 int issue_rate;
32052 int can_issue_more;
32053 int group_end;
32054 int group_count = 0;
32055
32056 /* Initialize issue_rate. */
32057 issue_rate = rs6000_issue_rate ();
32058 can_issue_more = issue_rate;
32059
32060 insn = get_next_active_insn (prev_head_insn, tail);
32061 next_insn = get_next_active_insn (insn, tail);
32062
32063 while (insn != NULL_RTX)
32064 {
32065 can_issue_more =
32066 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32067
32068 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32069
32070 if (next_insn == NULL_RTX)
32071 break;
32072
32073 if (group_end)
32074 {
32075 /* If the scheduler had marked group termination at this location
32076 (between insn and next_insn), and neither insn nor next_insn will
32077 force group termination, pad the group with nops to force group
32078 termination. */
32079 if (can_issue_more
32080 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32081 && !insn_terminates_group_p (insn, current_group)
32082 && !insn_terminates_group_p (next_insn, previous_group))
32083 {
32084 if (!is_branch_slot_insn (next_insn))
32085 can_issue_more--;
32086
32087 while (can_issue_more)
32088 {
32089 nop = gen_nop ();
32090 emit_insn_before (nop, next_insn);
32091 can_issue_more--;
32092 }
32093 }
32094
32095 can_issue_more = issue_rate;
32096 group_count++;
32097 }
32098
32099 insn = next_insn;
32100 next_insn = get_next_active_insn (insn, tail);
32101 }
32102
32103 return group_count;
32104 }
32105
32106 /* We're beginning a new block. Initialize data structures as necessary. */
32107
32108 static void
32109 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32110 int sched_verbose ATTRIBUTE_UNUSED,
32111 int max_ready ATTRIBUTE_UNUSED)
32112 {
32113 last_scheduled_insn = NULL;
32114 load_store_pendulum = 0;
32115 divide_cnt = 0;
32116 vec_pairing = 0;
32117 }
32118
32119 /* The following function is called at the end of scheduling BB.
32120 After reload, it inserts nops at insn group bundling. */
32121
32122 static void
32123 rs6000_sched_finish (FILE *dump, int sched_verbose)
32124 {
32125 int n_groups;
32126
32127 if (sched_verbose)
32128 fprintf (dump, "=== Finishing schedule.\n");
32129
32130 if (reload_completed && rs6000_sched_groups)
32131 {
32132 /* Do not run sched_finish hook when selective scheduling enabled. */
32133 if (sel_sched_p ())
32134 return;
32135
32136 if (rs6000_sched_insert_nops == sched_finish_none)
32137 return;
32138
32139 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32140 n_groups = pad_groups (dump, sched_verbose,
32141 current_sched_info->prev_head,
32142 current_sched_info->next_tail);
32143 else
32144 n_groups = redefine_groups (dump, sched_verbose,
32145 current_sched_info->prev_head,
32146 current_sched_info->next_tail);
32147
32148 if (sched_verbose >= 6)
32149 {
32150 fprintf (dump, "ngroups = %d\n", n_groups);
32151 print_rtl (dump, current_sched_info->prev_head);
32152 fprintf (dump, "Done finish_sched\n");
32153 }
32154 }
32155 }
32156
32157 struct rs6000_sched_context
32158 {
32159 short cached_can_issue_more;
32160 rtx_insn *last_scheduled_insn;
32161 int load_store_pendulum;
32162 int divide_cnt;
32163 int vec_pairing;
32164 };
32165
32166 typedef struct rs6000_sched_context rs6000_sched_context_def;
32167 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32168
32169 /* Allocate store for new scheduling context. */
32170 static void *
32171 rs6000_alloc_sched_context (void)
32172 {
32173 return xmalloc (sizeof (rs6000_sched_context_def));
32174 }
32175
32176 /* If CLEAN_P is true then initializes _SC with clean data,
32177 and from the global context otherwise. */
32178 static void
32179 rs6000_init_sched_context (void *_sc, bool clean_p)
32180 {
32181 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32182
32183 if (clean_p)
32184 {
32185 sc->cached_can_issue_more = 0;
32186 sc->last_scheduled_insn = NULL;
32187 sc->load_store_pendulum = 0;
32188 sc->divide_cnt = 0;
32189 sc->vec_pairing = 0;
32190 }
32191 else
32192 {
32193 sc->cached_can_issue_more = cached_can_issue_more;
32194 sc->last_scheduled_insn = last_scheduled_insn;
32195 sc->load_store_pendulum = load_store_pendulum;
32196 sc->divide_cnt = divide_cnt;
32197 sc->vec_pairing = vec_pairing;
32198 }
32199 }
32200
32201 /* Sets the global scheduling context to the one pointed to by _SC. */
32202 static void
32203 rs6000_set_sched_context (void *_sc)
32204 {
32205 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32206
32207 gcc_assert (sc != NULL);
32208
32209 cached_can_issue_more = sc->cached_can_issue_more;
32210 last_scheduled_insn = sc->last_scheduled_insn;
32211 load_store_pendulum = sc->load_store_pendulum;
32212 divide_cnt = sc->divide_cnt;
32213 vec_pairing = sc->vec_pairing;
32214 }
32215
32216 /* Free _SC. */
32217 static void
32218 rs6000_free_sched_context (void *_sc)
32219 {
32220 gcc_assert (_sc != NULL);
32221
32222 free (_sc);
32223 }
32224
32225 static bool
32226 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32227 {
32228 switch (get_attr_type (insn))
32229 {
32230 case TYPE_DIV:
32231 case TYPE_SDIV:
32232 case TYPE_DDIV:
32233 case TYPE_VECDIV:
32234 case TYPE_SSQRT:
32235 case TYPE_DSQRT:
32236 return false;
32237
32238 default:
32239 return true;
32240 }
32241 }
32242 \f
32243 /* Length in units of the trampoline for entering a nested function. */
32244
32245 int
32246 rs6000_trampoline_size (void)
32247 {
32248 int ret = 0;
32249
32250 switch (DEFAULT_ABI)
32251 {
32252 default:
32253 gcc_unreachable ();
32254
32255 case ABI_AIX:
32256 ret = (TARGET_32BIT) ? 12 : 24;
32257 break;
32258
32259 case ABI_ELFv2:
32260 gcc_assert (!TARGET_32BIT);
32261 ret = 32;
32262 break;
32263
32264 case ABI_DARWIN:
32265 case ABI_V4:
32266 ret = (TARGET_32BIT) ? 40 : 48;
32267 break;
32268 }
32269
32270 return ret;
32271 }
32272
32273 /* Emit RTL insns to initialize the variable parts of a trampoline.
32274 FNADDR is an RTX for the address of the function's pure code.
32275 CXT is an RTX for the static chain value for the function. */
32276
32277 static void
32278 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32279 {
32280 int regsize = (TARGET_32BIT) ? 4 : 8;
32281 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32282 rtx ctx_reg = force_reg (Pmode, cxt);
32283 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32284
32285 switch (DEFAULT_ABI)
32286 {
32287 default:
32288 gcc_unreachable ();
32289
32290 /* Under AIX, just build the 3 word function descriptor */
32291 case ABI_AIX:
32292 {
32293 rtx fnmem, fn_reg, toc_reg;
32294
32295 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32296 error ("you cannot take the address of a nested function if you use "
32297 "the %qs option", "-mno-pointers-to-nested-functions");
32298
32299 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32300 fn_reg = gen_reg_rtx (Pmode);
32301 toc_reg = gen_reg_rtx (Pmode);
32302
32303 /* Macro to shorten the code expansions below. */
32304 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32305
32306 m_tramp = replace_equiv_address (m_tramp, addr);
32307
32308 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32309 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32310 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32311 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32312 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32313
32314 # undef MEM_PLUS
32315 }
32316 break;
32317
32318 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32319 case ABI_ELFv2:
32320 case ABI_DARWIN:
32321 case ABI_V4:
32322 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32323 LCT_NORMAL, VOIDmode,
32324 addr, Pmode,
32325 GEN_INT (rs6000_trampoline_size ()), SImode,
32326 fnaddr, Pmode,
32327 ctx_reg, Pmode);
32328 break;
32329 }
32330 }
32331
32332 \f
32333 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32334 identifier as an argument, so the front end shouldn't look it up. */
32335
32336 static bool
32337 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32338 {
32339 return is_attribute_p ("altivec", attr_id);
32340 }
32341
32342 /* Handle the "altivec" attribute. The attribute may have
32343 arguments as follows:
32344
32345 __attribute__((altivec(vector__)))
32346 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32347 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32348
32349 and may appear more than once (e.g., 'vector bool char') in a
32350 given declaration. */
32351
32352 static tree
32353 rs6000_handle_altivec_attribute (tree *node,
32354 tree name ATTRIBUTE_UNUSED,
32355 tree args,
32356 int flags ATTRIBUTE_UNUSED,
32357 bool *no_add_attrs)
32358 {
32359 tree type = *node, result = NULL_TREE;
32360 machine_mode mode;
32361 int unsigned_p;
32362 char altivec_type
32363 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32364 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32365 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32366 : '?');
32367
32368 while (POINTER_TYPE_P (type)
32369 || TREE_CODE (type) == FUNCTION_TYPE
32370 || TREE_CODE (type) == METHOD_TYPE
32371 || TREE_CODE (type) == ARRAY_TYPE)
32372 type = TREE_TYPE (type);
32373
32374 mode = TYPE_MODE (type);
32375
32376 /* Check for invalid AltiVec type qualifiers. */
32377 if (type == long_double_type_node)
32378 error ("use of %<long double%> in AltiVec types is invalid");
32379 else if (type == boolean_type_node)
32380 error ("use of boolean types in AltiVec types is invalid");
32381 else if (TREE_CODE (type) == COMPLEX_TYPE)
32382 error ("use of %<complex%> in AltiVec types is invalid");
32383 else if (DECIMAL_FLOAT_MODE_P (mode))
32384 error ("use of decimal floating point types in AltiVec types is invalid");
32385 else if (!TARGET_VSX)
32386 {
32387 if (type == long_unsigned_type_node || type == long_integer_type_node)
32388 {
32389 if (TARGET_64BIT)
32390 error ("use of %<long%> in AltiVec types is invalid for "
32391 "64-bit code without %qs", "-mvsx");
32392 else if (rs6000_warn_altivec_long)
32393 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32394 "use %<int%>");
32395 }
32396 else if (type == long_long_unsigned_type_node
32397 || type == long_long_integer_type_node)
32398 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32399 "-mvsx");
32400 else if (type == double_type_node)
32401 error ("use of %<double%> in AltiVec types is invalid without %qs",
32402 "-mvsx");
32403 }
32404
32405 switch (altivec_type)
32406 {
32407 case 'v':
32408 unsigned_p = TYPE_UNSIGNED (type);
32409 switch (mode)
32410 {
32411 case E_TImode:
32412 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32413 break;
32414 case E_DImode:
32415 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32416 break;
32417 case E_SImode:
32418 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32419 break;
32420 case E_HImode:
32421 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32422 break;
32423 case E_QImode:
32424 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32425 break;
32426 case E_SFmode: result = V4SF_type_node; break;
32427 case E_DFmode: result = V2DF_type_node; break;
32428 /* If the user says 'vector int bool', we may be handed the 'bool'
32429 attribute _before_ the 'vector' attribute, and so select the
32430 proper type in the 'b' case below. */
32431 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32432 case E_V2DImode: case E_V2DFmode:
32433 result = type;
32434 default: break;
32435 }
32436 break;
32437 case 'b':
32438 switch (mode)
32439 {
32440 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32441 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32442 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32443 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32444 default: break;
32445 }
32446 break;
32447 case 'p':
32448 switch (mode)
32449 {
32450 case E_V8HImode: result = pixel_V8HI_type_node;
32451 default: break;
32452 }
32453 default: break;
32454 }
32455
32456 /* Propagate qualifiers attached to the element type
32457 onto the vector type. */
32458 if (result && result != type && TYPE_QUALS (type))
32459 result = build_qualified_type (result, TYPE_QUALS (type));
32460
32461 *no_add_attrs = true; /* No need to hang on to the attribute. */
32462
32463 if (result)
32464 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32465
32466 return NULL_TREE;
32467 }
32468
32469 /* AltiVec defines five built-in scalar types that serve as vector
32470 elements; we must teach the compiler how to mangle them. The 128-bit
32471 floating point mangling is target-specific as well. */
32472
32473 static const char *
32474 rs6000_mangle_type (const_tree type)
32475 {
32476 type = TYPE_MAIN_VARIANT (type);
32477
32478 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32479 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32480 return NULL;
32481
32482 if (type == bool_char_type_node) return "U6__boolc";
32483 if (type == bool_short_type_node) return "U6__bools";
32484 if (type == pixel_type_node) return "u7__pixel";
32485 if (type == bool_int_type_node) return "U6__booli";
32486 if (type == bool_long_long_type_node) return "U6__boolx";
32487
32488 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32489 return "g";
32490 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32491 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32492
32493 /* For all other types, use the default mangling. */
32494 return NULL;
32495 }
32496
32497 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32498 struct attribute_spec.handler. */
32499
32500 static tree
32501 rs6000_handle_longcall_attribute (tree *node, tree name,
32502 tree args ATTRIBUTE_UNUSED,
32503 int flags ATTRIBUTE_UNUSED,
32504 bool *no_add_attrs)
32505 {
32506 if (TREE_CODE (*node) != FUNCTION_TYPE
32507 && TREE_CODE (*node) != FIELD_DECL
32508 && TREE_CODE (*node) != TYPE_DECL)
32509 {
32510 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32511 name);
32512 *no_add_attrs = true;
32513 }
32514
32515 return NULL_TREE;
32516 }
32517
32518 /* Set longcall attributes on all functions declared when
32519 rs6000_default_long_calls is true. */
32520 static void
32521 rs6000_set_default_type_attributes (tree type)
32522 {
32523 if (rs6000_default_long_calls
32524 && (TREE_CODE (type) == FUNCTION_TYPE
32525 || TREE_CODE (type) == METHOD_TYPE))
32526 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32527 NULL_TREE,
32528 TYPE_ATTRIBUTES (type));
32529
32530 #if TARGET_MACHO
32531 darwin_set_default_type_attributes (type);
32532 #endif
32533 }
32534
32535 /* Return a reference suitable for calling a function with the
32536 longcall attribute. */
32537
32538 static rtx
32539 rs6000_longcall_ref (rtx call_ref, rtx arg)
32540 {
32541 /* System V adds '.' to the internal name, so skip them. */
32542 const char *call_name = XSTR (call_ref, 0);
32543 if (*call_name == '.')
32544 {
32545 while (*call_name == '.')
32546 call_name++;
32547
32548 tree node = get_identifier (call_name);
32549 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32550 }
32551
32552 if (TARGET_PLTSEQ)
32553 {
32554 rtx base = const0_rtx;
32555 int regno;
32556 if (DEFAULT_ABI == ABI_ELFv2)
32557 {
32558 base = gen_rtx_REG (Pmode, TOC_REGISTER);
32559 regno = 12;
32560 }
32561 else
32562 {
32563 if (flag_pic)
32564 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32565 regno = 11;
32566 }
32567 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
32568 may be used by a function global entry point. For SysV4, r11
32569 is used by __glink_PLTresolve lazy resolver entry. */
32570 rtx reg = gen_rtx_REG (Pmode, regno);
32571 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
32572 UNSPEC_PLT16_HA);
32573 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
32574 UNSPEC_PLT16_LO);
32575 emit_insn (gen_rtx_SET (reg, hi));
32576 emit_insn (gen_rtx_SET (reg, lo));
32577 return reg;
32578 }
32579
32580 return force_reg (Pmode, call_ref);
32581 }
32582 \f
32583 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32584 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32585 #endif
32586
32587 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32588 struct attribute_spec.handler. */
32589 static tree
32590 rs6000_handle_struct_attribute (tree *node, tree name,
32591 tree args ATTRIBUTE_UNUSED,
32592 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32593 {
32594 tree *type = NULL;
32595 if (DECL_P (*node))
32596 {
32597 if (TREE_CODE (*node) == TYPE_DECL)
32598 type = &TREE_TYPE (*node);
32599 }
32600 else
32601 type = node;
32602
32603 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32604 || TREE_CODE (*type) == UNION_TYPE)))
32605 {
32606 warning (OPT_Wattributes, "%qE attribute ignored", name);
32607 *no_add_attrs = true;
32608 }
32609
32610 else if ((is_attribute_p ("ms_struct", name)
32611 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32612 || ((is_attribute_p ("gcc_struct", name)
32613 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32614 {
32615 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32616 name);
32617 *no_add_attrs = true;
32618 }
32619
32620 return NULL_TREE;
32621 }
32622
32623 static bool
32624 rs6000_ms_bitfield_layout_p (const_tree record_type)
32625 {
32626 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32627 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32628 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32629 }
32630 \f
32631 #ifdef USING_ELFOS_H
32632
32633 /* A get_unnamed_section callback, used for switching to toc_section. */
32634
32635 static void
32636 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32637 {
32638 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32639 && TARGET_MINIMAL_TOC)
32640 {
32641 if (!toc_initialized)
32642 {
32643 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32644 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32645 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32646 fprintf (asm_out_file, "\t.tc ");
32647 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32648 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32649 fprintf (asm_out_file, "\n");
32650
32651 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32652 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32653 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32654 fprintf (asm_out_file, " = .+32768\n");
32655 toc_initialized = 1;
32656 }
32657 else
32658 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32659 }
32660 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32661 {
32662 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32663 if (!toc_initialized)
32664 {
32665 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32666 toc_initialized = 1;
32667 }
32668 }
32669 else
32670 {
32671 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32672 if (!toc_initialized)
32673 {
32674 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32675 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32676 fprintf (asm_out_file, " = .+32768\n");
32677 toc_initialized = 1;
32678 }
32679 }
32680 }
32681
32682 /* Implement TARGET_ASM_INIT_SECTIONS. */
32683
32684 static void
32685 rs6000_elf_asm_init_sections (void)
32686 {
32687 toc_section
32688 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32689
32690 sdata2_section
32691 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32692 SDATA2_SECTION_ASM_OP);
32693 }
32694
32695 /* Implement TARGET_SELECT_RTX_SECTION. */
32696
32697 static section *
32698 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32699 unsigned HOST_WIDE_INT align)
32700 {
32701 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32702 return toc_section;
32703 else
32704 return default_elf_select_rtx_section (mode, x, align);
32705 }
32706 \f
32707 /* For a SYMBOL_REF, set generic flags and then perform some
32708 target-specific processing.
32709
32710 When the AIX ABI is requested on a non-AIX system, replace the
32711 function name with the real name (with a leading .) rather than the
32712 function descriptor name. This saves a lot of overriding code to
32713 read the prefixes. */
32714
32715 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32716 static void
32717 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32718 {
32719 default_encode_section_info (decl, rtl, first);
32720
32721 if (first
32722 && TREE_CODE (decl) == FUNCTION_DECL
32723 && !TARGET_AIX
32724 && DEFAULT_ABI == ABI_AIX)
32725 {
32726 rtx sym_ref = XEXP (rtl, 0);
32727 size_t len = strlen (XSTR (sym_ref, 0));
32728 char *str = XALLOCAVEC (char, len + 2);
32729 str[0] = '.';
32730 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32731 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32732 }
32733 }
32734
32735 static inline bool
32736 compare_section_name (const char *section, const char *templ)
32737 {
32738 int len;
32739
32740 len = strlen (templ);
32741 return (strncmp (section, templ, len) == 0
32742 && (section[len] == 0 || section[len] == '.'));
32743 }
32744
32745 bool
32746 rs6000_elf_in_small_data_p (const_tree decl)
32747 {
32748 if (rs6000_sdata == SDATA_NONE)
32749 return false;
32750
32751 /* We want to merge strings, so we never consider them small data. */
32752 if (TREE_CODE (decl) == STRING_CST)
32753 return false;
32754
32755 /* Functions are never in the small data area. */
32756 if (TREE_CODE (decl) == FUNCTION_DECL)
32757 return false;
32758
32759 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32760 {
32761 const char *section = DECL_SECTION_NAME (decl);
32762 if (compare_section_name (section, ".sdata")
32763 || compare_section_name (section, ".sdata2")
32764 || compare_section_name (section, ".gnu.linkonce.s")
32765 || compare_section_name (section, ".sbss")
32766 || compare_section_name (section, ".sbss2")
32767 || compare_section_name (section, ".gnu.linkonce.sb")
32768 || strcmp (section, ".PPC.EMB.sdata0") == 0
32769 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32770 return true;
32771 }
32772 else
32773 {
32774 /* If we are told not to put readonly data in sdata, then don't. */
32775 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
32776 && !rs6000_readonly_in_sdata)
32777 return false;
32778
32779 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32780
32781 if (size > 0
32782 && size <= g_switch_value
32783 /* If it's not public, and we're not going to reference it there,
32784 there's no need to put it in the small data section. */
32785 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32786 return true;
32787 }
32788
32789 return false;
32790 }
32791
32792 #endif /* USING_ELFOS_H */
32793 \f
32794 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32795
32796 static bool
32797 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32798 {
32799 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32800 }
32801
32802 /* Do not place thread-local symbols refs in the object blocks. */
32803
32804 static bool
32805 rs6000_use_blocks_for_decl_p (const_tree decl)
32806 {
32807 return !DECL_THREAD_LOCAL_P (decl);
32808 }
32809 \f
32810 /* Return a REG that occurs in ADDR with coefficient 1.
32811 ADDR can be effectively incremented by incrementing REG.
32812
32813 r0 is special and we must not select it as an address
32814 register by this routine since our caller will try to
32815 increment the returned register via an "la" instruction. */
32816
32817 rtx
32818 find_addr_reg (rtx addr)
32819 {
32820 while (GET_CODE (addr) == PLUS)
32821 {
32822 if (REG_P (XEXP (addr, 0))
32823 && REGNO (XEXP (addr, 0)) != 0)
32824 addr = XEXP (addr, 0);
32825 else if (REG_P (XEXP (addr, 1))
32826 && REGNO (XEXP (addr, 1)) != 0)
32827 addr = XEXP (addr, 1);
32828 else if (CONSTANT_P (XEXP (addr, 0)))
32829 addr = XEXP (addr, 1);
32830 else if (CONSTANT_P (XEXP (addr, 1)))
32831 addr = XEXP (addr, 0);
32832 else
32833 gcc_unreachable ();
32834 }
32835 gcc_assert (REG_P (addr) && REGNO (addr) != 0);
32836 return addr;
32837 }
32838
32839 void
32840 rs6000_fatal_bad_address (rtx op)
32841 {
32842 fatal_insn ("bad address", op);
32843 }
32844
32845 #if TARGET_MACHO
32846
32847 typedef struct branch_island_d {
32848 tree function_name;
32849 tree label_name;
32850 int line_number;
32851 } branch_island;
32852
32853
32854 static vec<branch_island, va_gc> *branch_islands;
32855
32856 /* Remember to generate a branch island for far calls to the given
32857 function. */
32858
32859 static void
32860 add_compiler_branch_island (tree label_name, tree function_name,
32861 int line_number)
32862 {
32863 branch_island bi = {function_name, label_name, line_number};
32864 vec_safe_push (branch_islands, bi);
32865 }
32866
32867 /* Generate far-jump branch islands for everything recorded in
32868 branch_islands. Invoked immediately after the last instruction of
32869 the epilogue has been emitted; the branch islands must be appended
32870 to, and contiguous with, the function body. Mach-O stubs are
32871 generated in machopic_output_stub(). */
32872
32873 static void
32874 macho_branch_islands (void)
32875 {
32876 char tmp_buf[512];
32877
32878 while (!vec_safe_is_empty (branch_islands))
32879 {
32880 branch_island *bi = &branch_islands->last ();
32881 const char *label = IDENTIFIER_POINTER (bi->label_name);
32882 const char *name = IDENTIFIER_POINTER (bi->function_name);
32883 char name_buf[512];
32884 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32885 if (name[0] == '*' || name[0] == '&')
32886 strcpy (name_buf, name+1);
32887 else
32888 {
32889 name_buf[0] = '_';
32890 strcpy (name_buf+1, name);
32891 }
32892 strcpy (tmp_buf, "\n");
32893 strcat (tmp_buf, label);
32894 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32895 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32896 dbxout_stabd (N_SLINE, bi->line_number);
32897 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32898 if (flag_pic)
32899 {
32900 if (TARGET_LINK_STACK)
32901 {
32902 char name[32];
32903 get_ppc476_thunk_name (name);
32904 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32905 strcat (tmp_buf, name);
32906 strcat (tmp_buf, "\n");
32907 strcat (tmp_buf, label);
32908 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32909 }
32910 else
32911 {
32912 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32913 strcat (tmp_buf, label);
32914 strcat (tmp_buf, "_pic\n");
32915 strcat (tmp_buf, label);
32916 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32917 }
32918
32919 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32920 strcat (tmp_buf, name_buf);
32921 strcat (tmp_buf, " - ");
32922 strcat (tmp_buf, label);
32923 strcat (tmp_buf, "_pic)\n");
32924
32925 strcat (tmp_buf, "\tmtlr r0\n");
32926
32927 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32928 strcat (tmp_buf, name_buf);
32929 strcat (tmp_buf, " - ");
32930 strcat (tmp_buf, label);
32931 strcat (tmp_buf, "_pic)\n");
32932
32933 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32934 }
32935 else
32936 {
32937 strcat (tmp_buf, ":\n\tlis r12,hi16(");
32938 strcat (tmp_buf, name_buf);
32939 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32940 strcat (tmp_buf, name_buf);
32941 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32942 }
32943 output_asm_insn (tmp_buf, 0);
32944 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32945 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32946 dbxout_stabd (N_SLINE, bi->line_number);
32947 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32948 branch_islands->pop ();
32949 }
32950 }
32951
32952 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32953 already there or not. */
32954
32955 static int
32956 no_previous_def (tree function_name)
32957 {
32958 branch_island *bi;
32959 unsigned ix;
32960
32961 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32962 if (function_name == bi->function_name)
32963 return 0;
32964 return 1;
32965 }
32966
32967 /* GET_PREV_LABEL gets the label name from the previous definition of
32968 the function. */
32969
32970 static tree
32971 get_prev_label (tree function_name)
32972 {
32973 branch_island *bi;
32974 unsigned ix;
32975
32976 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32977 if (function_name == bi->function_name)
32978 return bi->label_name;
32979 return NULL_TREE;
32980 }
32981
32982 /* Generate PIC and indirect symbol stubs. */
32983
32984 void
32985 machopic_output_stub (FILE *file, const char *symb, const char *stub)
32986 {
32987 unsigned int length;
32988 char *symbol_name, *lazy_ptr_name;
32989 char *local_label_0;
32990 static unsigned label = 0;
32991
32992 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
32993 symb = (*targetm.strip_name_encoding) (symb);
32994
32995
32996 length = strlen (symb);
32997 symbol_name = XALLOCAVEC (char, length + 32);
32998 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
32999
33000 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33001 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33002
33003 if (flag_pic == 2)
33004 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33005 else
33006 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33007
33008 if (flag_pic == 2)
33009 {
33010 fprintf (file, "\t.align 5\n");
33011
33012 fprintf (file, "%s:\n", stub);
33013 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33014
33015 label++;
33016 local_label_0 = XALLOCAVEC (char, 16);
33017 sprintf (local_label_0, "L%u$spb", label);
33018
33019 fprintf (file, "\tmflr r0\n");
33020 if (TARGET_LINK_STACK)
33021 {
33022 char name[32];
33023 get_ppc476_thunk_name (name);
33024 fprintf (file, "\tbl %s\n", name);
33025 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33026 }
33027 else
33028 {
33029 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33030 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33031 }
33032 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33033 lazy_ptr_name, local_label_0);
33034 fprintf (file, "\tmtlr r0\n");
33035 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33036 (TARGET_64BIT ? "ldu" : "lwzu"),
33037 lazy_ptr_name, local_label_0);
33038 fprintf (file, "\tmtctr r12\n");
33039 fprintf (file, "\tbctr\n");
33040 }
33041 else
33042 {
33043 fprintf (file, "\t.align 4\n");
33044
33045 fprintf (file, "%s:\n", stub);
33046 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33047
33048 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33049 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33050 (TARGET_64BIT ? "ldu" : "lwzu"),
33051 lazy_ptr_name);
33052 fprintf (file, "\tmtctr r12\n");
33053 fprintf (file, "\tbctr\n");
33054 }
33055
33056 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33057 fprintf (file, "%s:\n", lazy_ptr_name);
33058 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33059 fprintf (file, "%sdyld_stub_binding_helper\n",
33060 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33061 }
33062
33063 /* Legitimize PIC addresses. If the address is already
33064 position-independent, we return ORIG. Newly generated
33065 position-independent addresses go into a reg. This is REG if non
33066 zero, otherwise we allocate register(s) as necessary. */
33067
33068 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33069
33070 rtx
33071 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33072 rtx reg)
33073 {
33074 rtx base, offset;
33075
33076 if (reg == NULL && !reload_completed)
33077 reg = gen_reg_rtx (Pmode);
33078
33079 if (GET_CODE (orig) == CONST)
33080 {
33081 rtx reg_temp;
33082
33083 if (GET_CODE (XEXP (orig, 0)) == PLUS
33084 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33085 return orig;
33086
33087 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33088
33089 /* Use a different reg for the intermediate value, as
33090 it will be marked UNCHANGING. */
33091 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33092 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33093 Pmode, reg_temp);
33094 offset =
33095 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33096 Pmode, reg);
33097
33098 if (CONST_INT_P (offset))
33099 {
33100 if (SMALL_INT (offset))
33101 return plus_constant (Pmode, base, INTVAL (offset));
33102 else if (!reload_completed)
33103 offset = force_reg (Pmode, offset);
33104 else
33105 {
33106 rtx mem = force_const_mem (Pmode, orig);
33107 return machopic_legitimize_pic_address (mem, Pmode, reg);
33108 }
33109 }
33110 return gen_rtx_PLUS (Pmode, base, offset);
33111 }
33112
33113 /* Fall back on generic machopic code. */
33114 return machopic_legitimize_pic_address (orig, mode, reg);
33115 }
33116
33117 /* Output a .machine directive for the Darwin assembler, and call
33118 the generic start_file routine. */
33119
33120 static void
33121 rs6000_darwin_file_start (void)
33122 {
33123 static const struct
33124 {
33125 const char *arg;
33126 const char *name;
33127 HOST_WIDE_INT if_set;
33128 } mapping[] = {
33129 { "ppc64", "ppc64", MASK_64BIT },
33130 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33131 { "power4", "ppc970", 0 },
33132 { "G5", "ppc970", 0 },
33133 { "7450", "ppc7450", 0 },
33134 { "7400", "ppc7400", MASK_ALTIVEC },
33135 { "G4", "ppc7400", 0 },
33136 { "750", "ppc750", 0 },
33137 { "740", "ppc750", 0 },
33138 { "G3", "ppc750", 0 },
33139 { "604e", "ppc604e", 0 },
33140 { "604", "ppc604", 0 },
33141 { "603e", "ppc603", 0 },
33142 { "603", "ppc603", 0 },
33143 { "601", "ppc601", 0 },
33144 { NULL, "ppc", 0 } };
33145 const char *cpu_id = "";
33146 size_t i;
33147
33148 rs6000_file_start ();
33149 darwin_file_start ();
33150
33151 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33152
33153 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33154 cpu_id = rs6000_default_cpu;
33155
33156 if (global_options_set.x_rs6000_cpu_index)
33157 cpu_id = processor_target_table[rs6000_cpu_index].name;
33158
33159 /* Look through the mapping array. Pick the first name that either
33160 matches the argument, has a bit set in IF_SET that is also set
33161 in the target flags, or has a NULL name. */
33162
33163 i = 0;
33164 while (mapping[i].arg != NULL
33165 && strcmp (mapping[i].arg, cpu_id) != 0
33166 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33167 i++;
33168
33169 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33170 }
33171
33172 #endif /* TARGET_MACHO */
33173
33174 #if TARGET_ELF
33175 static int
33176 rs6000_elf_reloc_rw_mask (void)
33177 {
33178 if (flag_pic)
33179 return 3;
33180 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33181 return 2;
33182 else
33183 return 0;
33184 }
33185
33186 /* Record an element in the table of global constructors. SYMBOL is
33187 a SYMBOL_REF of the function to be called; PRIORITY is a number
33188 between 0 and MAX_INIT_PRIORITY.
33189
33190 This differs from default_named_section_asm_out_constructor in
33191 that we have special handling for -mrelocatable. */
33192
33193 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33194 static void
33195 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33196 {
33197 const char *section = ".ctors";
33198 char buf[18];
33199
33200 if (priority != DEFAULT_INIT_PRIORITY)
33201 {
33202 sprintf (buf, ".ctors.%.5u",
33203 /* Invert the numbering so the linker puts us in the proper
33204 order; constructors are run from right to left, and the
33205 linker sorts in increasing order. */
33206 MAX_INIT_PRIORITY - priority);
33207 section = buf;
33208 }
33209
33210 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33211 assemble_align (POINTER_SIZE);
33212
33213 if (DEFAULT_ABI == ABI_V4
33214 && (TARGET_RELOCATABLE || flag_pic > 1))
33215 {
33216 fputs ("\t.long (", asm_out_file);
33217 output_addr_const (asm_out_file, symbol);
33218 fputs (")@fixup\n", asm_out_file);
33219 }
33220 else
33221 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33222 }
33223
33224 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33225 static void
33226 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33227 {
33228 const char *section = ".dtors";
33229 char buf[18];
33230
33231 if (priority != DEFAULT_INIT_PRIORITY)
33232 {
33233 sprintf (buf, ".dtors.%.5u",
33234 /* Invert the numbering so the linker puts us in the proper
33235 order; constructors are run from right to left, and the
33236 linker sorts in increasing order. */
33237 MAX_INIT_PRIORITY - priority);
33238 section = buf;
33239 }
33240
33241 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33242 assemble_align (POINTER_SIZE);
33243
33244 if (DEFAULT_ABI == ABI_V4
33245 && (TARGET_RELOCATABLE || flag_pic > 1))
33246 {
33247 fputs ("\t.long (", asm_out_file);
33248 output_addr_const (asm_out_file, symbol);
33249 fputs (")@fixup\n", asm_out_file);
33250 }
33251 else
33252 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33253 }
33254
33255 void
33256 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33257 {
33258 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33259 {
33260 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33261 ASM_OUTPUT_LABEL (file, name);
33262 fputs (DOUBLE_INT_ASM_OP, file);
33263 rs6000_output_function_entry (file, name);
33264 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33265 if (DOT_SYMBOLS)
33266 {
33267 fputs ("\t.size\t", file);
33268 assemble_name (file, name);
33269 fputs (",24\n\t.type\t.", file);
33270 assemble_name (file, name);
33271 fputs (",@function\n", file);
33272 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33273 {
33274 fputs ("\t.globl\t.", file);
33275 assemble_name (file, name);
33276 putc ('\n', file);
33277 }
33278 }
33279 else
33280 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33281 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33282 rs6000_output_function_entry (file, name);
33283 fputs (":\n", file);
33284 return;
33285 }
33286
33287 int uses_toc;
33288 if (DEFAULT_ABI == ABI_V4
33289 && (TARGET_RELOCATABLE || flag_pic > 1)
33290 && !TARGET_SECURE_PLT
33291 && (!constant_pool_empty_p () || crtl->profile)
33292 && (uses_toc = uses_TOC ()))
33293 {
33294 char buf[256];
33295
33296 if (uses_toc == 2)
33297 switch_to_other_text_partition ();
33298 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33299
33300 fprintf (file, "\t.long ");
33301 assemble_name (file, toc_label_name);
33302 need_toc_init = 1;
33303 putc ('-', file);
33304 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33305 assemble_name (file, buf);
33306 putc ('\n', file);
33307 if (uses_toc == 2)
33308 switch_to_other_text_partition ();
33309 }
33310
33311 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33312 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33313
33314 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33315 {
33316 char buf[256];
33317
33318 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33319
33320 fprintf (file, "\t.quad .TOC.-");
33321 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33322 assemble_name (file, buf);
33323 putc ('\n', file);
33324 }
33325
33326 if (DEFAULT_ABI == ABI_AIX)
33327 {
33328 const char *desc_name, *orig_name;
33329
33330 orig_name = (*targetm.strip_name_encoding) (name);
33331 desc_name = orig_name;
33332 while (*desc_name == '.')
33333 desc_name++;
33334
33335 if (TREE_PUBLIC (decl))
33336 fprintf (file, "\t.globl %s\n", desc_name);
33337
33338 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33339 fprintf (file, "%s:\n", desc_name);
33340 fprintf (file, "\t.long %s\n", orig_name);
33341 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33342 fputs ("\t.long 0\n", file);
33343 fprintf (file, "\t.previous\n");
33344 }
33345 ASM_OUTPUT_LABEL (file, name);
33346 }
33347
33348 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33349 static void
33350 rs6000_elf_file_end (void)
33351 {
33352 #ifdef HAVE_AS_GNU_ATTRIBUTE
33353 /* ??? The value emitted depends on options active at file end.
33354 Assume anyone using #pragma or attributes that might change
33355 options knows what they are doing. */
33356 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33357 && rs6000_passes_float)
33358 {
33359 int fp;
33360
33361 if (TARGET_HARD_FLOAT)
33362 fp = 1;
33363 else
33364 fp = 2;
33365 if (rs6000_passes_long_double)
33366 {
33367 if (!TARGET_LONG_DOUBLE_128)
33368 fp |= 2 * 4;
33369 else if (TARGET_IEEEQUAD)
33370 fp |= 3 * 4;
33371 else
33372 fp |= 1 * 4;
33373 }
33374 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33375 }
33376 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33377 {
33378 if (rs6000_passes_vector)
33379 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33380 (TARGET_ALTIVEC_ABI ? 2 : 1));
33381 if (rs6000_returns_struct)
33382 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33383 aix_struct_return ? 2 : 1);
33384 }
33385 #endif
33386 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33387 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33388 file_end_indicate_exec_stack ();
33389 #endif
33390
33391 if (flag_split_stack)
33392 file_end_indicate_split_stack ();
33393
33394 if (cpu_builtin_p)
33395 {
33396 /* We have expanded a CPU builtin, so we need to emit a reference to
33397 the special symbol that LIBC uses to declare it supports the
33398 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33399 switch_to_section (data_section);
33400 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33401 fprintf (asm_out_file, "\t%s %s\n",
33402 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33403 }
33404 }
33405 #endif
33406
33407 #if TARGET_XCOFF
33408
33409 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33410 #define HAVE_XCOFF_DWARF_EXTRAS 0
33411 #endif
33412
33413 static enum unwind_info_type
33414 rs6000_xcoff_debug_unwind_info (void)
33415 {
33416 return UI_NONE;
33417 }
33418
33419 static void
33420 rs6000_xcoff_asm_output_anchor (rtx symbol)
33421 {
33422 char buffer[100];
33423
33424 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33425 SYMBOL_REF_BLOCK_OFFSET (symbol));
33426 fprintf (asm_out_file, "%s", SET_ASM_OP);
33427 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33428 fprintf (asm_out_file, ",");
33429 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33430 fprintf (asm_out_file, "\n");
33431 }
33432
33433 static void
33434 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33435 {
33436 fputs (GLOBAL_ASM_OP, stream);
33437 RS6000_OUTPUT_BASENAME (stream, name);
33438 putc ('\n', stream);
33439 }
33440
33441 /* A get_unnamed_decl callback, used for read-only sections. PTR
33442 points to the section string variable. */
33443
33444 static void
33445 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33446 {
33447 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33448 *(const char *const *) directive,
33449 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33450 }
33451
33452 /* Likewise for read-write sections. */
33453
33454 static void
33455 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33456 {
33457 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33458 *(const char *const *) directive,
33459 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33460 }
33461
33462 static void
33463 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33464 {
33465 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33466 *(const char *const *) directive,
33467 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33468 }
33469
33470 /* A get_unnamed_section callback, used for switching to toc_section. */
33471
33472 static void
33473 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33474 {
33475 if (TARGET_MINIMAL_TOC)
33476 {
33477 /* toc_section is always selected at least once from
33478 rs6000_xcoff_file_start, so this is guaranteed to
33479 always be defined once and only once in each file. */
33480 if (!toc_initialized)
33481 {
33482 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33483 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33484 toc_initialized = 1;
33485 }
33486 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33487 (TARGET_32BIT ? "" : ",3"));
33488 }
33489 else
33490 fputs ("\t.toc\n", asm_out_file);
33491 }
33492
33493 /* Implement TARGET_ASM_INIT_SECTIONS. */
33494
33495 static void
33496 rs6000_xcoff_asm_init_sections (void)
33497 {
33498 read_only_data_section
33499 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33500 &xcoff_read_only_section_name);
33501
33502 private_data_section
33503 = get_unnamed_section (SECTION_WRITE,
33504 rs6000_xcoff_output_readwrite_section_asm_op,
33505 &xcoff_private_data_section_name);
33506
33507 read_only_private_data_section
33508 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33509 &xcoff_private_rodata_section_name);
33510
33511 tls_data_section
33512 = get_unnamed_section (SECTION_TLS,
33513 rs6000_xcoff_output_tls_section_asm_op,
33514 &xcoff_tls_data_section_name);
33515
33516 tls_private_data_section
33517 = get_unnamed_section (SECTION_TLS,
33518 rs6000_xcoff_output_tls_section_asm_op,
33519 &xcoff_private_data_section_name);
33520
33521 toc_section
33522 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33523
33524 readonly_data_section = read_only_data_section;
33525 }
33526
33527 static int
33528 rs6000_xcoff_reloc_rw_mask (void)
33529 {
33530 return 3;
33531 }
33532
33533 static void
33534 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33535 tree decl ATTRIBUTE_UNUSED)
33536 {
33537 int smclass;
33538 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33539
33540 if (flags & SECTION_EXCLUDE)
33541 smclass = 4;
33542 else if (flags & SECTION_DEBUG)
33543 {
33544 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33545 return;
33546 }
33547 else if (flags & SECTION_CODE)
33548 smclass = 0;
33549 else if (flags & SECTION_TLS)
33550 smclass = 3;
33551 else if (flags & SECTION_WRITE)
33552 smclass = 2;
33553 else
33554 smclass = 1;
33555
33556 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33557 (flags & SECTION_CODE) ? "." : "",
33558 name, suffix[smclass], flags & SECTION_ENTSIZE);
33559 }
33560
33561 #define IN_NAMED_SECTION(DECL) \
33562 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33563 && DECL_SECTION_NAME (DECL) != NULL)
33564
33565 static section *
33566 rs6000_xcoff_select_section (tree decl, int reloc,
33567 unsigned HOST_WIDE_INT align)
33568 {
33569 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33570 named section. */
33571 if (align > BIGGEST_ALIGNMENT)
33572 {
33573 resolve_unique_section (decl, reloc, true);
33574 if (IN_NAMED_SECTION (decl))
33575 return get_named_section (decl, NULL, reloc);
33576 }
33577
33578 if (decl_readonly_section (decl, reloc))
33579 {
33580 if (TREE_PUBLIC (decl))
33581 return read_only_data_section;
33582 else
33583 return read_only_private_data_section;
33584 }
33585 else
33586 {
33587 #if HAVE_AS_TLS
33588 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33589 {
33590 if (TREE_PUBLIC (decl))
33591 return tls_data_section;
33592 else if (bss_initializer_p (decl))
33593 {
33594 /* Convert to COMMON to emit in BSS. */
33595 DECL_COMMON (decl) = 1;
33596 return tls_comm_section;
33597 }
33598 else
33599 return tls_private_data_section;
33600 }
33601 else
33602 #endif
33603 if (TREE_PUBLIC (decl))
33604 return data_section;
33605 else
33606 return private_data_section;
33607 }
33608 }
33609
33610 static void
33611 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33612 {
33613 const char *name;
33614
33615 /* Use select_section for private data and uninitialized data with
33616 alignment <= BIGGEST_ALIGNMENT. */
33617 if (!TREE_PUBLIC (decl)
33618 || DECL_COMMON (decl)
33619 || (DECL_INITIAL (decl) == NULL_TREE
33620 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33621 || DECL_INITIAL (decl) == error_mark_node
33622 || (flag_zero_initialized_in_bss
33623 && initializer_zerop (DECL_INITIAL (decl))))
33624 return;
33625
33626 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33627 name = (*targetm.strip_name_encoding) (name);
33628 set_decl_section_name (decl, name);
33629 }
33630
33631 /* Select section for constant in constant pool.
33632
33633 On RS/6000, all constants are in the private read-only data area.
33634 However, if this is being placed in the TOC it must be output as a
33635 toc entry. */
33636
33637 static section *
33638 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33639 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33640 {
33641 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33642 return toc_section;
33643 else
33644 return read_only_private_data_section;
33645 }
33646
33647 /* Remove any trailing [DS] or the like from the symbol name. */
33648
33649 static const char *
33650 rs6000_xcoff_strip_name_encoding (const char *name)
33651 {
33652 size_t len;
33653 if (*name == '*')
33654 name++;
33655 len = strlen (name);
33656 if (name[len - 1] == ']')
33657 return ggc_alloc_string (name, len - 4);
33658 else
33659 return name;
33660 }
33661
33662 /* Section attributes. AIX is always PIC. */
33663
33664 static unsigned int
33665 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33666 {
33667 unsigned int align;
33668 unsigned int flags = default_section_type_flags (decl, name, reloc);
33669
33670 /* Align to at least UNIT size. */
33671 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33672 align = MIN_UNITS_PER_WORD;
33673 else
33674 /* Increase alignment of large objects if not already stricter. */
33675 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33676 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33677 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33678
33679 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33680 }
33681
33682 /* Output at beginning of assembler file.
33683
33684 Initialize the section names for the RS/6000 at this point.
33685
33686 Specify filename, including full path, to assembler.
33687
33688 We want to go into the TOC section so at least one .toc will be emitted.
33689 Also, in order to output proper .bs/.es pairs, we need at least one static
33690 [RW] section emitted.
33691
33692 Finally, declare mcount when profiling to make the assembler happy. */
33693
33694 static void
33695 rs6000_xcoff_file_start (void)
33696 {
33697 rs6000_gen_section_name (&xcoff_bss_section_name,
33698 main_input_filename, ".bss_");
33699 rs6000_gen_section_name (&xcoff_private_data_section_name,
33700 main_input_filename, ".rw_");
33701 rs6000_gen_section_name (&xcoff_private_rodata_section_name,
33702 main_input_filename, ".rop_");
33703 rs6000_gen_section_name (&xcoff_read_only_section_name,
33704 main_input_filename, ".ro_");
33705 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33706 main_input_filename, ".tls_");
33707 rs6000_gen_section_name (&xcoff_tbss_section_name,
33708 main_input_filename, ".tbss_[UL]");
33709
33710 fputs ("\t.file\t", asm_out_file);
33711 output_quoted_string (asm_out_file, main_input_filename);
33712 fputc ('\n', asm_out_file);
33713 if (write_symbols != NO_DEBUG)
33714 switch_to_section (private_data_section);
33715 switch_to_section (toc_section);
33716 switch_to_section (text_section);
33717 if (profile_flag)
33718 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33719 rs6000_file_start ();
33720 }
33721
33722 /* Output at end of assembler file.
33723 On the RS/6000, referencing data should automatically pull in text. */
33724
33725 static void
33726 rs6000_xcoff_file_end (void)
33727 {
33728 switch_to_section (text_section);
33729 fputs ("_section_.text:\n", asm_out_file);
33730 switch_to_section (data_section);
33731 fputs (TARGET_32BIT
33732 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33733 asm_out_file);
33734 }
33735
33736 struct declare_alias_data
33737 {
33738 FILE *file;
33739 bool function_descriptor;
33740 };
33741
33742 /* Declare alias N. A helper function for for_node_and_aliases. */
33743
33744 static bool
33745 rs6000_declare_alias (struct symtab_node *n, void *d)
33746 {
33747 struct declare_alias_data *data = (struct declare_alias_data *)d;
33748 /* Main symbol is output specially, because varasm machinery does part of
33749 the job for us - we do not need to declare .globl/lglobs and such. */
33750 if (!n->alias || n->weakref)
33751 return false;
33752
33753 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33754 return false;
33755
33756 /* Prevent assemble_alias from trying to use .set pseudo operation
33757 that does not behave as expected by the middle-end. */
33758 TREE_ASM_WRITTEN (n->decl) = true;
33759
33760 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33761 char *buffer = (char *) alloca (strlen (name) + 2);
33762 char *p;
33763 int dollar_inside = 0;
33764
33765 strcpy (buffer, name);
33766 p = strchr (buffer, '$');
33767 while (p) {
33768 *p = '_';
33769 dollar_inside++;
33770 p = strchr (p + 1, '$');
33771 }
33772 if (TREE_PUBLIC (n->decl))
33773 {
33774 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33775 {
33776 if (dollar_inside) {
33777 if (data->function_descriptor)
33778 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33779 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33780 }
33781 if (data->function_descriptor)
33782 {
33783 fputs ("\t.globl .", data->file);
33784 RS6000_OUTPUT_BASENAME (data->file, buffer);
33785 putc ('\n', data->file);
33786 }
33787 fputs ("\t.globl ", data->file);
33788 RS6000_OUTPUT_BASENAME (data->file, buffer);
33789 putc ('\n', data->file);
33790 }
33791 #ifdef ASM_WEAKEN_DECL
33792 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33793 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33794 #endif
33795 }
33796 else
33797 {
33798 if (dollar_inside)
33799 {
33800 if (data->function_descriptor)
33801 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33802 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33803 }
33804 if (data->function_descriptor)
33805 {
33806 fputs ("\t.lglobl .", data->file);
33807 RS6000_OUTPUT_BASENAME (data->file, buffer);
33808 putc ('\n', data->file);
33809 }
33810 fputs ("\t.lglobl ", data->file);
33811 RS6000_OUTPUT_BASENAME (data->file, buffer);
33812 putc ('\n', data->file);
33813 }
33814 if (data->function_descriptor)
33815 fputs (".", data->file);
33816 RS6000_OUTPUT_BASENAME (data->file, buffer);
33817 fputs (":\n", data->file);
33818 return false;
33819 }
33820
33821
33822 #ifdef HAVE_GAS_HIDDEN
33823 /* Helper function to calculate visibility of a DECL
33824 and return the value as a const string. */
33825
33826 static const char *
33827 rs6000_xcoff_visibility (tree decl)
33828 {
33829 static const char * const visibility_types[] = {
33830 "", ",protected", ",hidden", ",internal"
33831 };
33832
33833 enum symbol_visibility vis = DECL_VISIBILITY (decl);
33834 return visibility_types[vis];
33835 }
33836 #endif
33837
33838
33839 /* This macro produces the initial definition of a function name.
33840 On the RS/6000, we need to place an extra '.' in the function name and
33841 output the function descriptor.
33842 Dollar signs are converted to underscores.
33843
33844 The csect for the function will have already been created when
33845 text_section was selected. We do have to go back to that csect, however.
33846
33847 The third and fourth parameters to the .function pseudo-op (16 and 044)
33848 are placeholders which no longer have any use.
33849
33850 Because AIX assembler's .set command has unexpected semantics, we output
33851 all aliases as alternative labels in front of the definition. */
33852
33853 void
33854 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33855 {
33856 char *buffer = (char *) alloca (strlen (name) + 1);
33857 char *p;
33858 int dollar_inside = 0;
33859 struct declare_alias_data data = {file, false};
33860
33861 strcpy (buffer, name);
33862 p = strchr (buffer, '$');
33863 while (p) {
33864 *p = '_';
33865 dollar_inside++;
33866 p = strchr (p + 1, '$');
33867 }
33868 if (TREE_PUBLIC (decl))
33869 {
33870 if (!RS6000_WEAK || !DECL_WEAK (decl))
33871 {
33872 if (dollar_inside) {
33873 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33874 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33875 }
33876 fputs ("\t.globl .", file);
33877 RS6000_OUTPUT_BASENAME (file, buffer);
33878 #ifdef HAVE_GAS_HIDDEN
33879 fputs (rs6000_xcoff_visibility (decl), file);
33880 #endif
33881 putc ('\n', file);
33882 }
33883 }
33884 else
33885 {
33886 if (dollar_inside) {
33887 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33888 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33889 }
33890 fputs ("\t.lglobl .", file);
33891 RS6000_OUTPUT_BASENAME (file, buffer);
33892 putc ('\n', file);
33893 }
33894 fputs ("\t.csect ", file);
33895 RS6000_OUTPUT_BASENAME (file, buffer);
33896 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33897 RS6000_OUTPUT_BASENAME (file, buffer);
33898 fputs (":\n", file);
33899 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33900 &data, true);
33901 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
33902 RS6000_OUTPUT_BASENAME (file, buffer);
33903 fputs (", TOC[tc0], 0\n", file);
33904 in_section = NULL;
33905 switch_to_section (function_section (decl));
33906 putc ('.', file);
33907 RS6000_OUTPUT_BASENAME (file, buffer);
33908 fputs (":\n", file);
33909 data.function_descriptor = true;
33910 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33911 &data, true);
33912 if (!DECL_IGNORED_P (decl))
33913 {
33914 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33915 xcoffout_declare_function (file, decl, buffer);
33916 else if (write_symbols == DWARF2_DEBUG)
33917 {
33918 name = (*targetm.strip_name_encoding) (name);
33919 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
33920 }
33921 }
33922 return;
33923 }
33924
33925
33926 /* Output assembly language to globalize a symbol from a DECL,
33927 possibly with visibility. */
33928
33929 void
33930 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
33931 {
33932 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
33933 fputs (GLOBAL_ASM_OP, stream);
33934 RS6000_OUTPUT_BASENAME (stream, name);
33935 #ifdef HAVE_GAS_HIDDEN
33936 fputs (rs6000_xcoff_visibility (decl), stream);
33937 #endif
33938 putc ('\n', stream);
33939 }
33940
33941 /* Output assembly language to define a symbol as COMMON from a DECL,
33942 possibly with visibility. */
33943
33944 void
33945 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
33946 tree decl ATTRIBUTE_UNUSED,
33947 const char *name,
33948 unsigned HOST_WIDE_INT size,
33949 unsigned HOST_WIDE_INT align)
33950 {
33951 unsigned HOST_WIDE_INT align2 = 2;
33952
33953 if (align > 32)
33954 align2 = floor_log2 (align / BITS_PER_UNIT);
33955 else if (size > 4)
33956 align2 = 3;
33957
33958 fputs (COMMON_ASM_OP, stream);
33959 RS6000_OUTPUT_BASENAME (stream, name);
33960
33961 fprintf (stream,
33962 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
33963 size, align2);
33964
33965 #ifdef HAVE_GAS_HIDDEN
33966 if (decl != NULL)
33967 fputs (rs6000_xcoff_visibility (decl), stream);
33968 #endif
33969 putc ('\n', stream);
33970 }
33971
33972 /* This macro produces the initial definition of a object (variable) name.
33973 Because AIX assembler's .set command has unexpected semantics, we output
33974 all aliases as alternative labels in front of the definition. */
33975
33976 void
33977 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
33978 {
33979 struct declare_alias_data data = {file, false};
33980 RS6000_OUTPUT_BASENAME (file, name);
33981 fputs (":\n", file);
33982 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33983 &data, true);
33984 }
33985
33986 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
33987
33988 void
33989 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
33990 {
33991 fputs (integer_asm_op (size, FALSE), file);
33992 assemble_name (file, label);
33993 fputs ("-$", file);
33994 }
33995
33996 /* Output a symbol offset relative to the dbase for the current object.
33997 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
33998 signed offsets.
33999
34000 __gcc_unwind_dbase is embedded in all executables/libraries through
34001 libgcc/config/rs6000/crtdbase.S. */
34002
34003 void
34004 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34005 {
34006 fputs (integer_asm_op (size, FALSE), file);
34007 assemble_name (file, label);
34008 fputs("-__gcc_unwind_dbase", file);
34009 }
34010
34011 #ifdef HAVE_AS_TLS
34012 static void
34013 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34014 {
34015 rtx symbol;
34016 int flags;
34017 const char *symname;
34018
34019 default_encode_section_info (decl, rtl, first);
34020
34021 /* Careful not to prod global register variables. */
34022 if (!MEM_P (rtl))
34023 return;
34024 symbol = XEXP (rtl, 0);
34025 if (!SYMBOL_REF_P (symbol))
34026 return;
34027
34028 flags = SYMBOL_REF_FLAGS (symbol);
34029
34030 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34031 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34032
34033 SYMBOL_REF_FLAGS (symbol) = flags;
34034
34035 /* Append mapping class to extern decls. */
34036 symname = XSTR (symbol, 0);
34037 if (decl /* sync condition with assemble_external () */
34038 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34039 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34040 || TREE_CODE (decl) == FUNCTION_DECL)
34041 && symname[strlen (symname) - 1] != ']')
34042 {
34043 char *newname = (char *) alloca (strlen (symname) + 5);
34044 strcpy (newname, symname);
34045 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34046 ? "[DS]" : "[UA]"));
34047 XSTR (symbol, 0) = ggc_strdup (newname);
34048 }
34049 }
34050 #endif /* HAVE_AS_TLS */
34051 #endif /* TARGET_XCOFF */
34052
34053 void
34054 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34055 const char *name, const char *val)
34056 {
34057 fputs ("\t.weak\t", stream);
34058 RS6000_OUTPUT_BASENAME (stream, name);
34059 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34060 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34061 {
34062 if (TARGET_XCOFF)
34063 fputs ("[DS]", stream);
34064 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34065 if (TARGET_XCOFF)
34066 fputs (rs6000_xcoff_visibility (decl), stream);
34067 #endif
34068 fputs ("\n\t.weak\t.", stream);
34069 RS6000_OUTPUT_BASENAME (stream, name);
34070 }
34071 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34072 if (TARGET_XCOFF)
34073 fputs (rs6000_xcoff_visibility (decl), stream);
34074 #endif
34075 fputc ('\n', stream);
34076 if (val)
34077 {
34078 #ifdef ASM_OUTPUT_DEF
34079 ASM_OUTPUT_DEF (stream, name, val);
34080 #endif
34081 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34082 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34083 {
34084 fputs ("\t.set\t.", stream);
34085 RS6000_OUTPUT_BASENAME (stream, name);
34086 fputs (",.", stream);
34087 RS6000_OUTPUT_BASENAME (stream, val);
34088 fputc ('\n', stream);
34089 }
34090 }
34091 }
34092
34093
34094 /* Return true if INSN should not be copied. */
34095
34096 static bool
34097 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34098 {
34099 return recog_memoized (insn) >= 0
34100 && get_attr_cannot_copy (insn);
34101 }
34102
34103 /* Compute a (partial) cost for rtx X. Return true if the complete
34104 cost has been computed, and false if subexpressions should be
34105 scanned. In either case, *TOTAL contains the cost result. */
34106
34107 static bool
34108 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34109 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34110 {
34111 int code = GET_CODE (x);
34112
34113 switch (code)
34114 {
34115 /* On the RS/6000, if it is valid in the insn, it is free. */
34116 case CONST_INT:
34117 if (((outer_code == SET
34118 || outer_code == PLUS
34119 || outer_code == MINUS)
34120 && (satisfies_constraint_I (x)
34121 || satisfies_constraint_L (x)))
34122 || (outer_code == AND
34123 && (satisfies_constraint_K (x)
34124 || (mode == SImode
34125 ? satisfies_constraint_L (x)
34126 : satisfies_constraint_J (x))))
34127 || ((outer_code == IOR || outer_code == XOR)
34128 && (satisfies_constraint_K (x)
34129 || (mode == SImode
34130 ? satisfies_constraint_L (x)
34131 : satisfies_constraint_J (x))))
34132 || outer_code == ASHIFT
34133 || outer_code == ASHIFTRT
34134 || outer_code == LSHIFTRT
34135 || outer_code == ROTATE
34136 || outer_code == ROTATERT
34137 || outer_code == ZERO_EXTRACT
34138 || (outer_code == MULT
34139 && satisfies_constraint_I (x))
34140 || ((outer_code == DIV || outer_code == UDIV
34141 || outer_code == MOD || outer_code == UMOD)
34142 && exact_log2 (INTVAL (x)) >= 0)
34143 || (outer_code == COMPARE
34144 && (satisfies_constraint_I (x)
34145 || satisfies_constraint_K (x)))
34146 || ((outer_code == EQ || outer_code == NE)
34147 && (satisfies_constraint_I (x)
34148 || satisfies_constraint_K (x)
34149 || (mode == SImode
34150 ? satisfies_constraint_L (x)
34151 : satisfies_constraint_J (x))))
34152 || (outer_code == GTU
34153 && satisfies_constraint_I (x))
34154 || (outer_code == LTU
34155 && satisfies_constraint_P (x)))
34156 {
34157 *total = 0;
34158 return true;
34159 }
34160 else if ((outer_code == PLUS
34161 && reg_or_add_cint_operand (x, VOIDmode))
34162 || (outer_code == MINUS
34163 && reg_or_sub_cint_operand (x, VOIDmode))
34164 || ((outer_code == SET
34165 || outer_code == IOR
34166 || outer_code == XOR)
34167 && (INTVAL (x)
34168 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34169 {
34170 *total = COSTS_N_INSNS (1);
34171 return true;
34172 }
34173 /* FALLTHRU */
34174
34175 case CONST_DOUBLE:
34176 case CONST_WIDE_INT:
34177 case CONST:
34178 case HIGH:
34179 case SYMBOL_REF:
34180 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34181 return true;
34182
34183 case MEM:
34184 /* When optimizing for size, MEM should be slightly more expensive
34185 than generating address, e.g., (plus (reg) (const)).
34186 L1 cache latency is about two instructions. */
34187 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34188 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34189 *total += COSTS_N_INSNS (100);
34190 return true;
34191
34192 case LABEL_REF:
34193 *total = 0;
34194 return true;
34195
34196 case PLUS:
34197 case MINUS:
34198 if (FLOAT_MODE_P (mode))
34199 *total = rs6000_cost->fp;
34200 else
34201 *total = COSTS_N_INSNS (1);
34202 return false;
34203
34204 case MULT:
34205 if (CONST_INT_P (XEXP (x, 1))
34206 && satisfies_constraint_I (XEXP (x, 1)))
34207 {
34208 if (INTVAL (XEXP (x, 1)) >= -256
34209 && INTVAL (XEXP (x, 1)) <= 255)
34210 *total = rs6000_cost->mulsi_const9;
34211 else
34212 *total = rs6000_cost->mulsi_const;
34213 }
34214 else if (mode == SFmode)
34215 *total = rs6000_cost->fp;
34216 else if (FLOAT_MODE_P (mode))
34217 *total = rs6000_cost->dmul;
34218 else if (mode == DImode)
34219 *total = rs6000_cost->muldi;
34220 else
34221 *total = rs6000_cost->mulsi;
34222 return false;
34223
34224 case FMA:
34225 if (mode == SFmode)
34226 *total = rs6000_cost->fp;
34227 else
34228 *total = rs6000_cost->dmul;
34229 break;
34230
34231 case DIV:
34232 case MOD:
34233 if (FLOAT_MODE_P (mode))
34234 {
34235 *total = mode == DFmode ? rs6000_cost->ddiv
34236 : rs6000_cost->sdiv;
34237 return false;
34238 }
34239 /* FALLTHRU */
34240
34241 case UDIV:
34242 case UMOD:
34243 if (CONST_INT_P (XEXP (x, 1))
34244 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34245 {
34246 if (code == DIV || code == MOD)
34247 /* Shift, addze */
34248 *total = COSTS_N_INSNS (2);
34249 else
34250 /* Shift */
34251 *total = COSTS_N_INSNS (1);
34252 }
34253 else
34254 {
34255 if (GET_MODE (XEXP (x, 1)) == DImode)
34256 *total = rs6000_cost->divdi;
34257 else
34258 *total = rs6000_cost->divsi;
34259 }
34260 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34261 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34262 *total += COSTS_N_INSNS (2);
34263 return false;
34264
34265 case CTZ:
34266 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34267 return false;
34268
34269 case FFS:
34270 *total = COSTS_N_INSNS (4);
34271 return false;
34272
34273 case POPCOUNT:
34274 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34275 return false;
34276
34277 case PARITY:
34278 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34279 return false;
34280
34281 case NOT:
34282 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34283 *total = 0;
34284 else
34285 *total = COSTS_N_INSNS (1);
34286 return false;
34287
34288 case AND:
34289 if (CONST_INT_P (XEXP (x, 1)))
34290 {
34291 rtx left = XEXP (x, 0);
34292 rtx_code left_code = GET_CODE (left);
34293
34294 /* rotate-and-mask: 1 insn. */
34295 if ((left_code == ROTATE
34296 || left_code == ASHIFT
34297 || left_code == LSHIFTRT)
34298 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34299 {
34300 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34301 if (!CONST_INT_P (XEXP (left, 1)))
34302 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34303 *total += COSTS_N_INSNS (1);
34304 return true;
34305 }
34306
34307 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34308 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34309 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34310 || (val & 0xffff) == val
34311 || (val & 0xffff0000) == val
34312 || ((val & 0xffff) == 0 && mode == SImode))
34313 {
34314 *total = rtx_cost (left, mode, AND, 0, speed);
34315 *total += COSTS_N_INSNS (1);
34316 return true;
34317 }
34318
34319 /* 2 insns. */
34320 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34321 {
34322 *total = rtx_cost (left, mode, AND, 0, speed);
34323 *total += COSTS_N_INSNS (2);
34324 return true;
34325 }
34326 }
34327
34328 *total = COSTS_N_INSNS (1);
34329 return false;
34330
34331 case IOR:
34332 /* FIXME */
34333 *total = COSTS_N_INSNS (1);
34334 return true;
34335
34336 case CLZ:
34337 case XOR:
34338 case ZERO_EXTRACT:
34339 *total = COSTS_N_INSNS (1);
34340 return false;
34341
34342 case ASHIFT:
34343 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34344 the sign extend and shift separately within the insn. */
34345 if (TARGET_EXTSWSLI && mode == DImode
34346 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34347 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34348 {
34349 *total = 0;
34350 return false;
34351 }
34352 /* fall through */
34353
34354 case ASHIFTRT:
34355 case LSHIFTRT:
34356 case ROTATE:
34357 case ROTATERT:
34358 /* Handle mul_highpart. */
34359 if (outer_code == TRUNCATE
34360 && GET_CODE (XEXP (x, 0)) == MULT)
34361 {
34362 if (mode == DImode)
34363 *total = rs6000_cost->muldi;
34364 else
34365 *total = rs6000_cost->mulsi;
34366 return true;
34367 }
34368 else if (outer_code == AND)
34369 *total = 0;
34370 else
34371 *total = COSTS_N_INSNS (1);
34372 return false;
34373
34374 case SIGN_EXTEND:
34375 case ZERO_EXTEND:
34376 if (MEM_P (XEXP (x, 0)))
34377 *total = 0;
34378 else
34379 *total = COSTS_N_INSNS (1);
34380 return false;
34381
34382 case COMPARE:
34383 case NEG:
34384 case ABS:
34385 if (!FLOAT_MODE_P (mode))
34386 {
34387 *total = COSTS_N_INSNS (1);
34388 return false;
34389 }
34390 /* FALLTHRU */
34391
34392 case FLOAT:
34393 case UNSIGNED_FLOAT:
34394 case FIX:
34395 case UNSIGNED_FIX:
34396 case FLOAT_TRUNCATE:
34397 *total = rs6000_cost->fp;
34398 return false;
34399
34400 case FLOAT_EXTEND:
34401 if (mode == DFmode)
34402 *total = rs6000_cost->sfdf_convert;
34403 else
34404 *total = rs6000_cost->fp;
34405 return false;
34406
34407 case UNSPEC:
34408 switch (XINT (x, 1))
34409 {
34410 case UNSPEC_FRSP:
34411 *total = rs6000_cost->fp;
34412 return true;
34413
34414 default:
34415 break;
34416 }
34417 break;
34418
34419 case CALL:
34420 case IF_THEN_ELSE:
34421 if (!speed)
34422 {
34423 *total = COSTS_N_INSNS (1);
34424 return true;
34425 }
34426 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34427 {
34428 *total = rs6000_cost->fp;
34429 return false;
34430 }
34431 break;
34432
34433 case NE:
34434 case EQ:
34435 case GTU:
34436 case LTU:
34437 /* Carry bit requires mode == Pmode.
34438 NEG or PLUS already counted so only add one. */
34439 if (mode == Pmode
34440 && (outer_code == NEG || outer_code == PLUS))
34441 {
34442 *total = COSTS_N_INSNS (1);
34443 return true;
34444 }
34445 /* FALLTHRU */
34446
34447 case GT:
34448 case LT:
34449 case UNORDERED:
34450 if (outer_code == SET)
34451 {
34452 if (XEXP (x, 1) == const0_rtx)
34453 {
34454 *total = COSTS_N_INSNS (2);
34455 return true;
34456 }
34457 else
34458 {
34459 *total = COSTS_N_INSNS (3);
34460 return false;
34461 }
34462 }
34463 /* CC COMPARE. */
34464 if (outer_code == COMPARE)
34465 {
34466 *total = 0;
34467 return true;
34468 }
34469 break;
34470
34471 default:
34472 break;
34473 }
34474
34475 return false;
34476 }
34477
34478 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34479
34480 static bool
34481 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34482 int opno, int *total, bool speed)
34483 {
34484 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34485
34486 fprintf (stderr,
34487 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34488 "opno = %d, total = %d, speed = %s, x:\n",
34489 ret ? "complete" : "scan inner",
34490 GET_MODE_NAME (mode),
34491 GET_RTX_NAME (outer_code),
34492 opno,
34493 *total,
34494 speed ? "true" : "false");
34495
34496 debug_rtx (x);
34497
34498 return ret;
34499 }
34500
34501 static int
34502 rs6000_insn_cost (rtx_insn *insn, bool speed)
34503 {
34504 if (recog_memoized (insn) < 0)
34505 return 0;
34506
34507 if (!speed)
34508 return get_attr_length (insn);
34509
34510 int cost = get_attr_cost (insn);
34511 if (cost > 0)
34512 return cost;
34513
34514 int n = get_attr_length (insn) / 4;
34515 enum attr_type type = get_attr_type (insn);
34516
34517 switch (type)
34518 {
34519 case TYPE_LOAD:
34520 case TYPE_FPLOAD:
34521 case TYPE_VECLOAD:
34522 cost = COSTS_N_INSNS (n + 1);
34523 break;
34524
34525 case TYPE_MUL:
34526 switch (get_attr_size (insn))
34527 {
34528 case SIZE_8:
34529 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34530 break;
34531 case SIZE_16:
34532 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34533 break;
34534 case SIZE_32:
34535 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34536 break;
34537 case SIZE_64:
34538 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34539 break;
34540 default:
34541 gcc_unreachable ();
34542 }
34543 break;
34544 case TYPE_DIV:
34545 switch (get_attr_size (insn))
34546 {
34547 case SIZE_32:
34548 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34549 break;
34550 case SIZE_64:
34551 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34552 break;
34553 default:
34554 gcc_unreachable ();
34555 }
34556 break;
34557
34558 case TYPE_FP:
34559 cost = n * rs6000_cost->fp;
34560 break;
34561 case TYPE_DMUL:
34562 cost = n * rs6000_cost->dmul;
34563 break;
34564 case TYPE_SDIV:
34565 cost = n * rs6000_cost->sdiv;
34566 break;
34567 case TYPE_DDIV:
34568 cost = n * rs6000_cost->ddiv;
34569 break;
34570
34571 case TYPE_SYNC:
34572 case TYPE_LOAD_L:
34573 case TYPE_MFCR:
34574 case TYPE_MFCRF:
34575 cost = COSTS_N_INSNS (n + 2);
34576 break;
34577
34578 default:
34579 cost = COSTS_N_INSNS (n);
34580 }
34581
34582 return cost;
34583 }
34584
34585 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34586
34587 static int
34588 rs6000_debug_address_cost (rtx x, machine_mode mode,
34589 addr_space_t as, bool speed)
34590 {
34591 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34592
34593 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34594 ret, speed ? "true" : "false");
34595 debug_rtx (x);
34596
34597 return ret;
34598 }
34599
34600
34601 /* A C expression returning the cost of moving data from a register of class
34602 CLASS1 to one of CLASS2. */
34603
34604 static int
34605 rs6000_register_move_cost (machine_mode mode,
34606 reg_class_t from, reg_class_t to)
34607 {
34608 int ret;
34609 reg_class_t rclass;
34610
34611 if (TARGET_DEBUG_COST)
34612 dbg_cost_ctrl++;
34613
34614 /* If we have VSX, we can easily move between FPR or Altivec registers,
34615 otherwise we can only easily move within classes.
34616 Do this first so we give best-case answers for union classes
34617 containing both gprs and vsx regs. */
34618 HARD_REG_SET to_vsx, from_vsx;
34619 COPY_HARD_REG_SET (to_vsx, reg_class_contents[to]);
34620 AND_HARD_REG_SET (to_vsx, reg_class_contents[VSX_REGS]);
34621 COPY_HARD_REG_SET (from_vsx, reg_class_contents[from]);
34622 AND_HARD_REG_SET (from_vsx, reg_class_contents[VSX_REGS]);
34623 if (!hard_reg_set_empty_p (to_vsx)
34624 && !hard_reg_set_empty_p (from_vsx)
34625 && (TARGET_VSX
34626 || hard_reg_set_intersect_p (to_vsx, from_vsx)))
34627 {
34628 int reg = FIRST_FPR_REGNO;
34629 if (TARGET_VSX
34630 || (TEST_HARD_REG_BIT (to_vsx, FIRST_ALTIVEC_REGNO)
34631 && TEST_HARD_REG_BIT (from_vsx, FIRST_ALTIVEC_REGNO)))
34632 reg = FIRST_ALTIVEC_REGNO;
34633 ret = 2 * hard_regno_nregs (reg, mode);
34634 }
34635
34636 /* Moves from/to GENERAL_REGS. */
34637 else if ((rclass = from, reg_classes_intersect_p (to, GENERAL_REGS))
34638 || (rclass = to, reg_classes_intersect_p (from, GENERAL_REGS)))
34639 {
34640 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34641 {
34642 if (TARGET_DIRECT_MOVE)
34643 {
34644 /* Keep the cost for direct moves above that for within
34645 a register class even if the actual processor cost is
34646 comparable. We do this because a direct move insn
34647 can't be a nop, whereas with ideal register
34648 allocation a move within the same class might turn
34649 out to be a nop. */
34650 if (rs6000_tune == PROCESSOR_POWER9)
34651 ret = 3 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34652 else
34653 ret = 4 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34654 /* SFmode requires a conversion when moving between gprs
34655 and vsx. */
34656 if (mode == SFmode)
34657 ret += 2;
34658 }
34659 else
34660 ret = (rs6000_memory_move_cost (mode, rclass, false)
34661 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34662 }
34663
34664 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34665 shift. */
34666 else if (rclass == CR_REGS)
34667 ret = 4;
34668
34669 /* For those processors that have slow LR/CTR moves, make them more
34670 expensive than memory in order to bias spills to memory .*/
34671 else if ((rs6000_tune == PROCESSOR_POWER6
34672 || rs6000_tune == PROCESSOR_POWER7
34673 || rs6000_tune == PROCESSOR_POWER8
34674 || rs6000_tune == PROCESSOR_POWER9)
34675 && reg_class_subset_p (rclass, SPECIAL_REGS))
34676 ret = 6 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34677
34678 else
34679 /* A move will cost one instruction per GPR moved. */
34680 ret = 2 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34681 }
34682
34683 /* Everything else has to go through GENERAL_REGS. */
34684 else
34685 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34686 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34687
34688 if (TARGET_DEBUG_COST)
34689 {
34690 if (dbg_cost_ctrl == 1)
34691 fprintf (stderr,
34692 "rs6000_register_move_cost: ret=%d, mode=%s, from=%s, to=%s\n",
34693 ret, GET_MODE_NAME (mode), reg_class_names[from],
34694 reg_class_names[to]);
34695 dbg_cost_ctrl--;
34696 }
34697
34698 return ret;
34699 }
34700
34701 /* A C expressions returning the cost of moving data of MODE from a register to
34702 or from memory. */
34703
34704 static int
34705 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34706 bool in ATTRIBUTE_UNUSED)
34707 {
34708 int ret;
34709
34710 if (TARGET_DEBUG_COST)
34711 dbg_cost_ctrl++;
34712
34713 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34714 ret = 4 * hard_regno_nregs (0, mode);
34715 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34716 || reg_classes_intersect_p (rclass, VSX_REGS)))
34717 ret = 4 * hard_regno_nregs (32, mode);
34718 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34719 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34720 else
34721 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34722
34723 if (TARGET_DEBUG_COST)
34724 {
34725 if (dbg_cost_ctrl == 1)
34726 fprintf (stderr,
34727 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34728 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34729 dbg_cost_ctrl--;
34730 }
34731
34732 return ret;
34733 }
34734
34735 /* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS.
34736
34737 The register allocator chooses GEN_OR_VSX_REGS for the allocno
34738 class if GENERAL_REGS and VSX_REGS cost is lower than the memory
34739 cost. This happens a lot when TARGET_DIRECT_MOVE makes the register
34740 move cost between GENERAL_REGS and VSX_REGS low.
34741
34742 It might seem reasonable to use a union class. After all, if usage
34743 of vsr is low and gpr high, it might make sense to spill gpr to vsr
34744 rather than memory. However, in cases where register pressure of
34745 both is high, like the cactus_adm spec test, allowing
34746 GEN_OR_VSX_REGS as the allocno class results in bad decisions in
34747 the first scheduling pass. This is partly due to an allocno of
34748 GEN_OR_VSX_REGS wrongly contributing to the GENERAL_REGS pressure
34749 class, which gives too high a pressure for GENERAL_REGS and too low
34750 for VSX_REGS. So, force a choice of the subclass here.
34751
34752 The best class is also the union if GENERAL_REGS and VSX_REGS have
34753 the same cost. In that case we do use GEN_OR_VSX_REGS as the
34754 allocno class, since trying to narrow down the class by regno mode
34755 is prone to error. For example, SImode is allowed in VSX regs and
34756 in some cases (eg. gcc.target/powerpc/p9-xxbr-3.c do_bswap32_vect)
34757 it would be wrong to choose an allocno of GENERAL_REGS based on
34758 SImode. */
34759
34760 static reg_class_t
34761 rs6000_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED,
34762 reg_class_t allocno_class,
34763 reg_class_t best_class)
34764 {
34765 switch (allocno_class)
34766 {
34767 case GEN_OR_VSX_REGS:
34768 /* best_class must be a subset of allocno_class. */
34769 gcc_checking_assert (best_class == GEN_OR_VSX_REGS
34770 || best_class == GEN_OR_FLOAT_REGS
34771 || best_class == VSX_REGS
34772 || best_class == ALTIVEC_REGS
34773 || best_class == FLOAT_REGS
34774 || best_class == GENERAL_REGS
34775 || best_class == BASE_REGS);
34776 /* Use best_class but choose wider classes when copying from the
34777 wider class to best_class is cheap. This mimics IRA choice
34778 of allocno class. */
34779 if (best_class == BASE_REGS)
34780 return GENERAL_REGS;
34781 if (TARGET_VSX
34782 && (best_class == FLOAT_REGS || best_class == ALTIVEC_REGS))
34783 return VSX_REGS;
34784 return best_class;
34785
34786 default:
34787 break;
34788 }
34789
34790 return allocno_class;
34791 }
34792
34793 /* Returns a code for a target-specific builtin that implements
34794 reciprocal of the function, or NULL_TREE if not available. */
34795
34796 static tree
34797 rs6000_builtin_reciprocal (tree fndecl)
34798 {
34799 switch (DECL_FUNCTION_CODE (fndecl))
34800 {
34801 case VSX_BUILTIN_XVSQRTDP:
34802 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34803 return NULL_TREE;
34804
34805 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34806
34807 case VSX_BUILTIN_XVSQRTSP:
34808 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34809 return NULL_TREE;
34810
34811 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34812
34813 default:
34814 return NULL_TREE;
34815 }
34816 }
34817
34818 /* Load up a constant. If the mode is a vector mode, splat the value across
34819 all of the vector elements. */
34820
34821 static rtx
34822 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34823 {
34824 rtx reg;
34825
34826 if (mode == SFmode || mode == DFmode)
34827 {
34828 rtx d = const_double_from_real_value (dconst, mode);
34829 reg = force_reg (mode, d);
34830 }
34831 else if (mode == V4SFmode)
34832 {
34833 rtx d = const_double_from_real_value (dconst, SFmode);
34834 rtvec v = gen_rtvec (4, d, d, d, d);
34835 reg = gen_reg_rtx (mode);
34836 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34837 }
34838 else if (mode == V2DFmode)
34839 {
34840 rtx d = const_double_from_real_value (dconst, DFmode);
34841 rtvec v = gen_rtvec (2, d, d);
34842 reg = gen_reg_rtx (mode);
34843 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34844 }
34845 else
34846 gcc_unreachable ();
34847
34848 return reg;
34849 }
34850
34851 /* Generate an FMA instruction. */
34852
34853 static void
34854 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34855 {
34856 machine_mode mode = GET_MODE (target);
34857 rtx dst;
34858
34859 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34860 gcc_assert (dst != NULL);
34861
34862 if (dst != target)
34863 emit_move_insn (target, dst);
34864 }
34865
34866 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34867
34868 static void
34869 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34870 {
34871 machine_mode mode = GET_MODE (dst);
34872 rtx r;
34873
34874 /* This is a tad more complicated, since the fnma_optab is for
34875 a different expression: fma(-m1, m2, a), which is the same
34876 thing except in the case of signed zeros.
34877
34878 Fortunately we know that if FMA is supported that FNMSUB is
34879 also supported in the ISA. Just expand it directly. */
34880
34881 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34882
34883 r = gen_rtx_NEG (mode, a);
34884 r = gen_rtx_FMA (mode, m1, m2, r);
34885 r = gen_rtx_NEG (mode, r);
34886 emit_insn (gen_rtx_SET (dst, r));
34887 }
34888
34889 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34890 add a reg_note saying that this was a division. Support both scalar and
34891 vector divide. Assumes no trapping math and finite arguments. */
34892
34893 void
34894 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34895 {
34896 machine_mode mode = GET_MODE (dst);
34897 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34898 int i;
34899
34900 /* Low precision estimates guarantee 5 bits of accuracy. High
34901 precision estimates guarantee 14 bits of accuracy. SFmode
34902 requires 23 bits of accuracy. DFmode requires 52 bits of
34903 accuracy. Each pass at least doubles the accuracy, leading
34904 to the following. */
34905 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34906 if (mode == DFmode || mode == V2DFmode)
34907 passes++;
34908
34909 enum insn_code code = optab_handler (smul_optab, mode);
34910 insn_gen_fn gen_mul = GEN_FCN (code);
34911
34912 gcc_assert (code != CODE_FOR_nothing);
34913
34914 one = rs6000_load_constant_and_splat (mode, dconst1);
34915
34916 /* x0 = 1./d estimate */
34917 x0 = gen_reg_rtx (mode);
34918 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34919 UNSPEC_FRES)));
34920
34921 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34922 if (passes > 1) {
34923
34924 /* e0 = 1. - d * x0 */
34925 e0 = gen_reg_rtx (mode);
34926 rs6000_emit_nmsub (e0, d, x0, one);
34927
34928 /* x1 = x0 + e0 * x0 */
34929 x1 = gen_reg_rtx (mode);
34930 rs6000_emit_madd (x1, e0, x0, x0);
34931
34932 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34933 ++i, xprev = xnext, eprev = enext) {
34934
34935 /* enext = eprev * eprev */
34936 enext = gen_reg_rtx (mode);
34937 emit_insn (gen_mul (enext, eprev, eprev));
34938
34939 /* xnext = xprev + enext * xprev */
34940 xnext = gen_reg_rtx (mode);
34941 rs6000_emit_madd (xnext, enext, xprev, xprev);
34942 }
34943
34944 } else
34945 xprev = x0;
34946
34947 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34948
34949 /* u = n * xprev */
34950 u = gen_reg_rtx (mode);
34951 emit_insn (gen_mul (u, n, xprev));
34952
34953 /* v = n - (d * u) */
34954 v = gen_reg_rtx (mode);
34955 rs6000_emit_nmsub (v, d, u, n);
34956
34957 /* dst = (v * xprev) + u */
34958 rs6000_emit_madd (dst, v, xprev, u);
34959
34960 if (note_p)
34961 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34962 }
34963
34964 /* Goldschmidt's Algorithm for single/double-precision floating point
34965 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34966
34967 void
34968 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34969 {
34970 machine_mode mode = GET_MODE (src);
34971 rtx e = gen_reg_rtx (mode);
34972 rtx g = gen_reg_rtx (mode);
34973 rtx h = gen_reg_rtx (mode);
34974
34975 /* Low precision estimates guarantee 5 bits of accuracy. High
34976 precision estimates guarantee 14 bits of accuracy. SFmode
34977 requires 23 bits of accuracy. DFmode requires 52 bits of
34978 accuracy. Each pass at least doubles the accuracy, leading
34979 to the following. */
34980 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34981 if (mode == DFmode || mode == V2DFmode)
34982 passes++;
34983
34984 int i;
34985 rtx mhalf;
34986 enum insn_code code = optab_handler (smul_optab, mode);
34987 insn_gen_fn gen_mul = GEN_FCN (code);
34988
34989 gcc_assert (code != CODE_FOR_nothing);
34990
34991 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
34992
34993 /* e = rsqrt estimate */
34994 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
34995 UNSPEC_RSQRT)));
34996
34997 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34998 if (!recip)
34999 {
35000 rtx zero = force_reg (mode, CONST0_RTX (mode));
35001
35002 if (mode == SFmode)
35003 {
35004 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35005 e, zero, mode, 0);
35006 if (target != e)
35007 emit_move_insn (e, target);
35008 }
35009 else
35010 {
35011 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35012 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35013 }
35014 }
35015
35016 /* g = sqrt estimate. */
35017 emit_insn (gen_mul (g, e, src));
35018 /* h = 1/(2*sqrt) estimate. */
35019 emit_insn (gen_mul (h, e, mhalf));
35020
35021 if (recip)
35022 {
35023 if (passes == 1)
35024 {
35025 rtx t = gen_reg_rtx (mode);
35026 rs6000_emit_nmsub (t, g, h, mhalf);
35027 /* Apply correction directly to 1/rsqrt estimate. */
35028 rs6000_emit_madd (dst, e, t, e);
35029 }
35030 else
35031 {
35032 for (i = 0; i < passes; i++)
35033 {
35034 rtx t1 = gen_reg_rtx (mode);
35035 rtx g1 = gen_reg_rtx (mode);
35036 rtx h1 = gen_reg_rtx (mode);
35037
35038 rs6000_emit_nmsub (t1, g, h, mhalf);
35039 rs6000_emit_madd (g1, g, t1, g);
35040 rs6000_emit_madd (h1, h, t1, h);
35041
35042 g = g1;
35043 h = h1;
35044 }
35045 /* Multiply by 2 for 1/rsqrt. */
35046 emit_insn (gen_add3_insn (dst, h, h));
35047 }
35048 }
35049 else
35050 {
35051 rtx t = gen_reg_rtx (mode);
35052 rs6000_emit_nmsub (t, g, h, mhalf);
35053 rs6000_emit_madd (dst, g, t, g);
35054 }
35055
35056 return;
35057 }
35058
35059 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35060 (Power7) targets. DST is the target, and SRC is the argument operand. */
35061
35062 void
35063 rs6000_emit_popcount (rtx dst, rtx src)
35064 {
35065 machine_mode mode = GET_MODE (dst);
35066 rtx tmp1, tmp2;
35067
35068 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35069 if (TARGET_POPCNTD)
35070 {
35071 if (mode == SImode)
35072 emit_insn (gen_popcntdsi2 (dst, src));
35073 else
35074 emit_insn (gen_popcntddi2 (dst, src));
35075 return;
35076 }
35077
35078 tmp1 = gen_reg_rtx (mode);
35079
35080 if (mode == SImode)
35081 {
35082 emit_insn (gen_popcntbsi2 (tmp1, src));
35083 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35084 NULL_RTX, 0);
35085 tmp2 = force_reg (SImode, tmp2);
35086 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35087 }
35088 else
35089 {
35090 emit_insn (gen_popcntbdi2 (tmp1, src));
35091 tmp2 = expand_mult (DImode, tmp1,
35092 GEN_INT ((HOST_WIDE_INT)
35093 0x01010101 << 32 | 0x01010101),
35094 NULL_RTX, 0);
35095 tmp2 = force_reg (DImode, tmp2);
35096 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35097 }
35098 }
35099
35100
35101 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35102 target, and SRC is the argument operand. */
35103
35104 void
35105 rs6000_emit_parity (rtx dst, rtx src)
35106 {
35107 machine_mode mode = GET_MODE (dst);
35108 rtx tmp;
35109
35110 tmp = gen_reg_rtx (mode);
35111
35112 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35113 if (TARGET_CMPB)
35114 {
35115 if (mode == SImode)
35116 {
35117 emit_insn (gen_popcntbsi2 (tmp, src));
35118 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35119 }
35120 else
35121 {
35122 emit_insn (gen_popcntbdi2 (tmp, src));
35123 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35124 }
35125 return;
35126 }
35127
35128 if (mode == SImode)
35129 {
35130 /* Is mult+shift >= shift+xor+shift+xor? */
35131 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35132 {
35133 rtx tmp1, tmp2, tmp3, tmp4;
35134
35135 tmp1 = gen_reg_rtx (SImode);
35136 emit_insn (gen_popcntbsi2 (tmp1, src));
35137
35138 tmp2 = gen_reg_rtx (SImode);
35139 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35140 tmp3 = gen_reg_rtx (SImode);
35141 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35142
35143 tmp4 = gen_reg_rtx (SImode);
35144 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35145 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35146 }
35147 else
35148 rs6000_emit_popcount (tmp, src);
35149 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35150 }
35151 else
35152 {
35153 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35154 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35155 {
35156 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35157
35158 tmp1 = gen_reg_rtx (DImode);
35159 emit_insn (gen_popcntbdi2 (tmp1, src));
35160
35161 tmp2 = gen_reg_rtx (DImode);
35162 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35163 tmp3 = gen_reg_rtx (DImode);
35164 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35165
35166 tmp4 = gen_reg_rtx (DImode);
35167 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35168 tmp5 = gen_reg_rtx (DImode);
35169 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35170
35171 tmp6 = gen_reg_rtx (DImode);
35172 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35173 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35174 }
35175 else
35176 rs6000_emit_popcount (tmp, src);
35177 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35178 }
35179 }
35180
35181 /* Expand an Altivec constant permutation for little endian mode.
35182 OP0 and OP1 are the input vectors and TARGET is the output vector.
35183 SEL specifies the constant permutation vector.
35184
35185 There are two issues: First, the two input operands must be
35186 swapped so that together they form a double-wide array in LE
35187 order. Second, the vperm instruction has surprising behavior
35188 in LE mode: it interprets the elements of the source vectors
35189 in BE mode ("left to right") and interprets the elements of
35190 the destination vector in LE mode ("right to left"). To
35191 correct for this, we must subtract each element of the permute
35192 control vector from 31.
35193
35194 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35195 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35196 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35197 serve as the permute control vector. Then, in BE mode,
35198
35199 vperm 9,10,11,12
35200
35201 places the desired result in vr9. However, in LE mode the
35202 vector contents will be
35203
35204 vr10 = 00000003 00000002 00000001 00000000
35205 vr11 = 00000007 00000006 00000005 00000004
35206
35207 The result of the vperm using the same permute control vector is
35208
35209 vr9 = 05000000 07000000 01000000 03000000
35210
35211 That is, the leftmost 4 bytes of vr10 are interpreted as the
35212 source for the rightmost 4 bytes of vr9, and so on.
35213
35214 If we change the permute control vector to
35215
35216 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35217
35218 and issue
35219
35220 vperm 9,11,10,12
35221
35222 we get the desired
35223
35224 vr9 = 00000006 00000004 00000002 00000000. */
35225
35226 static void
35227 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35228 const vec_perm_indices &sel)
35229 {
35230 unsigned int i;
35231 rtx perm[16];
35232 rtx constv, unspec;
35233
35234 /* Unpack and adjust the constant selector. */
35235 for (i = 0; i < 16; ++i)
35236 {
35237 unsigned int elt = 31 - (sel[i] & 31);
35238 perm[i] = GEN_INT (elt);
35239 }
35240
35241 /* Expand to a permute, swapping the inputs and using the
35242 adjusted selector. */
35243 if (!REG_P (op0))
35244 op0 = force_reg (V16QImode, op0);
35245 if (!REG_P (op1))
35246 op1 = force_reg (V16QImode, op1);
35247
35248 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35249 constv = force_reg (V16QImode, constv);
35250 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35251 UNSPEC_VPERM);
35252 if (!REG_P (target))
35253 {
35254 rtx tmp = gen_reg_rtx (V16QImode);
35255 emit_move_insn (tmp, unspec);
35256 unspec = tmp;
35257 }
35258
35259 emit_move_insn (target, unspec);
35260 }
35261
35262 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35263 permute control vector. But here it's not a constant, so we must
35264 generate a vector NAND or NOR to do the adjustment. */
35265
35266 void
35267 altivec_expand_vec_perm_le (rtx operands[4])
35268 {
35269 rtx notx, iorx, unspec;
35270 rtx target = operands[0];
35271 rtx op0 = operands[1];
35272 rtx op1 = operands[2];
35273 rtx sel = operands[3];
35274 rtx tmp = target;
35275 rtx norreg = gen_reg_rtx (V16QImode);
35276 machine_mode mode = GET_MODE (target);
35277
35278 /* Get everything in regs so the pattern matches. */
35279 if (!REG_P (op0))
35280 op0 = force_reg (mode, op0);
35281 if (!REG_P (op1))
35282 op1 = force_reg (mode, op1);
35283 if (!REG_P (sel))
35284 sel = force_reg (V16QImode, sel);
35285 if (!REG_P (target))
35286 tmp = gen_reg_rtx (mode);
35287
35288 if (TARGET_P9_VECTOR)
35289 {
35290 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35291 UNSPEC_VPERMR);
35292 }
35293 else
35294 {
35295 /* Invert the selector with a VNAND if available, else a VNOR.
35296 The VNAND is preferred for future fusion opportunities. */
35297 notx = gen_rtx_NOT (V16QImode, sel);
35298 iorx = (TARGET_P8_VECTOR
35299 ? gen_rtx_IOR (V16QImode, notx, notx)
35300 : gen_rtx_AND (V16QImode, notx, notx));
35301 emit_insn (gen_rtx_SET (norreg, iorx));
35302
35303 /* Permute with operands reversed and adjusted selector. */
35304 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35305 UNSPEC_VPERM);
35306 }
35307
35308 /* Copy into target, possibly by way of a register. */
35309 if (!REG_P (target))
35310 {
35311 emit_move_insn (tmp, unspec);
35312 unspec = tmp;
35313 }
35314
35315 emit_move_insn (target, unspec);
35316 }
35317
35318 /* Expand an Altivec constant permutation. Return true if we match
35319 an efficient implementation; false to fall back to VPERM.
35320
35321 OP0 and OP1 are the input vectors and TARGET is the output vector.
35322 SEL specifies the constant permutation vector. */
35323
35324 static bool
35325 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35326 const vec_perm_indices &sel)
35327 {
35328 struct altivec_perm_insn {
35329 HOST_WIDE_INT mask;
35330 enum insn_code impl;
35331 unsigned char perm[16];
35332 };
35333 static const struct altivec_perm_insn patterns[] = {
35334 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35335 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35336 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35337 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35338 { OPTION_MASK_ALTIVEC,
35339 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35340 : CODE_FOR_altivec_vmrglb_direct),
35341 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35342 { OPTION_MASK_ALTIVEC,
35343 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35344 : CODE_FOR_altivec_vmrglh_direct),
35345 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35346 { OPTION_MASK_ALTIVEC,
35347 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35348 : CODE_FOR_altivec_vmrglw_direct),
35349 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35350 { OPTION_MASK_ALTIVEC,
35351 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35352 : CODE_FOR_altivec_vmrghb_direct),
35353 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35354 { OPTION_MASK_ALTIVEC,
35355 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35356 : CODE_FOR_altivec_vmrghh_direct),
35357 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35358 { OPTION_MASK_ALTIVEC,
35359 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35360 : CODE_FOR_altivec_vmrghw_direct),
35361 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35362 { OPTION_MASK_P8_VECTOR,
35363 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35364 : CODE_FOR_p8_vmrgow_v4sf_direct),
35365 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35366 { OPTION_MASK_P8_VECTOR,
35367 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35368 : CODE_FOR_p8_vmrgew_v4sf_direct),
35369 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35370 };
35371
35372 unsigned int i, j, elt, which;
35373 unsigned char perm[16];
35374 rtx x;
35375 bool one_vec;
35376
35377 /* Unpack the constant selector. */
35378 for (i = which = 0; i < 16; ++i)
35379 {
35380 elt = sel[i] & 31;
35381 which |= (elt < 16 ? 1 : 2);
35382 perm[i] = elt;
35383 }
35384
35385 /* Simplify the constant selector based on operands. */
35386 switch (which)
35387 {
35388 default:
35389 gcc_unreachable ();
35390
35391 case 3:
35392 one_vec = false;
35393 if (!rtx_equal_p (op0, op1))
35394 break;
35395 /* FALLTHRU */
35396
35397 case 2:
35398 for (i = 0; i < 16; ++i)
35399 perm[i] &= 15;
35400 op0 = op1;
35401 one_vec = true;
35402 break;
35403
35404 case 1:
35405 op1 = op0;
35406 one_vec = true;
35407 break;
35408 }
35409
35410 /* Look for splat patterns. */
35411 if (one_vec)
35412 {
35413 elt = perm[0];
35414
35415 for (i = 0; i < 16; ++i)
35416 if (perm[i] != elt)
35417 break;
35418 if (i == 16)
35419 {
35420 if (!BYTES_BIG_ENDIAN)
35421 elt = 15 - elt;
35422 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35423 return true;
35424 }
35425
35426 if (elt % 2 == 0)
35427 {
35428 for (i = 0; i < 16; i += 2)
35429 if (perm[i] != elt || perm[i + 1] != elt + 1)
35430 break;
35431 if (i == 16)
35432 {
35433 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35434 x = gen_reg_rtx (V8HImode);
35435 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35436 GEN_INT (field)));
35437 emit_move_insn (target, gen_lowpart (V16QImode, x));
35438 return true;
35439 }
35440 }
35441
35442 if (elt % 4 == 0)
35443 {
35444 for (i = 0; i < 16; i += 4)
35445 if (perm[i] != elt
35446 || perm[i + 1] != elt + 1
35447 || perm[i + 2] != elt + 2
35448 || perm[i + 3] != elt + 3)
35449 break;
35450 if (i == 16)
35451 {
35452 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35453 x = gen_reg_rtx (V4SImode);
35454 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35455 GEN_INT (field)));
35456 emit_move_insn (target, gen_lowpart (V16QImode, x));
35457 return true;
35458 }
35459 }
35460 }
35461
35462 /* Look for merge and pack patterns. */
35463 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35464 {
35465 bool swapped;
35466
35467 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35468 continue;
35469
35470 elt = patterns[j].perm[0];
35471 if (perm[0] == elt)
35472 swapped = false;
35473 else if (perm[0] == elt + 16)
35474 swapped = true;
35475 else
35476 continue;
35477 for (i = 1; i < 16; ++i)
35478 {
35479 elt = patterns[j].perm[i];
35480 if (swapped)
35481 elt = (elt >= 16 ? elt - 16 : elt + 16);
35482 else if (one_vec && elt >= 16)
35483 elt -= 16;
35484 if (perm[i] != elt)
35485 break;
35486 }
35487 if (i == 16)
35488 {
35489 enum insn_code icode = patterns[j].impl;
35490 machine_mode omode = insn_data[icode].operand[0].mode;
35491 machine_mode imode = insn_data[icode].operand[1].mode;
35492
35493 /* For little-endian, don't use vpkuwum and vpkuhum if the
35494 underlying vector type is not V4SI and V8HI, respectively.
35495 For example, using vpkuwum with a V8HI picks up the even
35496 halfwords (BE numbering) when the even halfwords (LE
35497 numbering) are what we need. */
35498 if (!BYTES_BIG_ENDIAN
35499 && icode == CODE_FOR_altivec_vpkuwum_direct
35500 && ((REG_P (op0)
35501 && GET_MODE (op0) != V4SImode)
35502 || (SUBREG_P (op0)
35503 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35504 continue;
35505 if (!BYTES_BIG_ENDIAN
35506 && icode == CODE_FOR_altivec_vpkuhum_direct
35507 && ((REG_P (op0)
35508 && GET_MODE (op0) != V8HImode)
35509 || (SUBREG_P (op0)
35510 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35511 continue;
35512
35513 /* For little-endian, the two input operands must be swapped
35514 (or swapped back) to ensure proper right-to-left numbering
35515 from 0 to 2N-1. */
35516 if (swapped ^ !BYTES_BIG_ENDIAN)
35517 std::swap (op0, op1);
35518 if (imode != V16QImode)
35519 {
35520 op0 = gen_lowpart (imode, op0);
35521 op1 = gen_lowpart (imode, op1);
35522 }
35523 if (omode == V16QImode)
35524 x = target;
35525 else
35526 x = gen_reg_rtx (omode);
35527 emit_insn (GEN_FCN (icode) (x, op0, op1));
35528 if (omode != V16QImode)
35529 emit_move_insn (target, gen_lowpart (V16QImode, x));
35530 return true;
35531 }
35532 }
35533
35534 if (!BYTES_BIG_ENDIAN)
35535 {
35536 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35537 return true;
35538 }
35539
35540 return false;
35541 }
35542
35543 /* Expand a VSX Permute Doubleword constant permutation.
35544 Return true if we match an efficient implementation. */
35545
35546 static bool
35547 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35548 unsigned char perm0, unsigned char perm1)
35549 {
35550 rtx x;
35551
35552 /* If both selectors come from the same operand, fold to single op. */
35553 if ((perm0 & 2) == (perm1 & 2))
35554 {
35555 if (perm0 & 2)
35556 op0 = op1;
35557 else
35558 op1 = op0;
35559 }
35560 /* If both operands are equal, fold to simpler permutation. */
35561 if (rtx_equal_p (op0, op1))
35562 {
35563 perm0 = perm0 & 1;
35564 perm1 = (perm1 & 1) + 2;
35565 }
35566 /* If the first selector comes from the second operand, swap. */
35567 else if (perm0 & 2)
35568 {
35569 if (perm1 & 2)
35570 return false;
35571 perm0 -= 2;
35572 perm1 += 2;
35573 std::swap (op0, op1);
35574 }
35575 /* If the second selector does not come from the second operand, fail. */
35576 else if ((perm1 & 2) == 0)
35577 return false;
35578
35579 /* Success! */
35580 if (target != NULL)
35581 {
35582 machine_mode vmode, dmode;
35583 rtvec v;
35584
35585 vmode = GET_MODE (target);
35586 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35587 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35588 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35589 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35590 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35591 emit_insn (gen_rtx_SET (target, x));
35592 }
35593 return true;
35594 }
35595
35596 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35597
35598 static bool
35599 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35600 rtx op1, const vec_perm_indices &sel)
35601 {
35602 bool testing_p = !target;
35603
35604 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35605 if (TARGET_ALTIVEC && testing_p)
35606 return true;
35607
35608 /* Check for ps_merge* or xxpermdi insns. */
35609 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35610 {
35611 if (testing_p)
35612 {
35613 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35614 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35615 }
35616 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35617 return true;
35618 }
35619
35620 if (TARGET_ALTIVEC)
35621 {
35622 /* Force the target-independent code to lower to V16QImode. */
35623 if (vmode != V16QImode)
35624 return false;
35625 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35626 return true;
35627 }
35628
35629 return false;
35630 }
35631
35632 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35633 OP0 and OP1 are the input vectors and TARGET is the output vector.
35634 PERM specifies the constant permutation vector. */
35635
35636 static void
35637 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35638 machine_mode vmode, const vec_perm_builder &perm)
35639 {
35640 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35641 if (x != target)
35642 emit_move_insn (target, x);
35643 }
35644
35645 /* Expand an extract even operation. */
35646
35647 void
35648 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35649 {
35650 machine_mode vmode = GET_MODE (target);
35651 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35652 vec_perm_builder perm (nelt, nelt, 1);
35653
35654 for (i = 0; i < nelt; i++)
35655 perm.quick_push (i * 2);
35656
35657 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35658 }
35659
35660 /* Expand a vector interleave operation. */
35661
35662 void
35663 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35664 {
35665 machine_mode vmode = GET_MODE (target);
35666 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35667 vec_perm_builder perm (nelt, nelt, 1);
35668
35669 high = (highp ? 0 : nelt / 2);
35670 for (i = 0; i < nelt / 2; i++)
35671 {
35672 perm.quick_push (i + high);
35673 perm.quick_push (i + nelt + high);
35674 }
35675
35676 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35677 }
35678
35679 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35680 void
35681 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35682 {
35683 HOST_WIDE_INT hwi_scale (scale);
35684 REAL_VALUE_TYPE r_pow;
35685 rtvec v = rtvec_alloc (2);
35686 rtx elt;
35687 rtx scale_vec = gen_reg_rtx (V2DFmode);
35688 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35689 elt = const_double_from_real_value (r_pow, DFmode);
35690 RTVEC_ELT (v, 0) = elt;
35691 RTVEC_ELT (v, 1) = elt;
35692 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35693 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35694 }
35695
35696 /* Return an RTX representing where to find the function value of a
35697 function returning MODE. */
35698 static rtx
35699 rs6000_complex_function_value (machine_mode mode)
35700 {
35701 unsigned int regno;
35702 rtx r1, r2;
35703 machine_mode inner = GET_MODE_INNER (mode);
35704 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35705
35706 if (TARGET_FLOAT128_TYPE
35707 && (mode == KCmode
35708 || (mode == TCmode && TARGET_IEEEQUAD)))
35709 regno = ALTIVEC_ARG_RETURN;
35710
35711 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35712 regno = FP_ARG_RETURN;
35713
35714 else
35715 {
35716 regno = GP_ARG_RETURN;
35717
35718 /* 32-bit is OK since it'll go in r3/r4. */
35719 if (TARGET_32BIT && inner_bytes >= 4)
35720 return gen_rtx_REG (mode, regno);
35721 }
35722
35723 if (inner_bytes >= 8)
35724 return gen_rtx_REG (mode, regno);
35725
35726 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35727 const0_rtx);
35728 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35729 GEN_INT (inner_bytes));
35730 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35731 }
35732
35733 /* Return an rtx describing a return value of MODE as a PARALLEL
35734 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35735 stride REG_STRIDE. */
35736
35737 static rtx
35738 rs6000_parallel_return (machine_mode mode,
35739 int n_elts, machine_mode elt_mode,
35740 unsigned int regno, unsigned int reg_stride)
35741 {
35742 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35743
35744 int i;
35745 for (i = 0; i < n_elts; i++)
35746 {
35747 rtx r = gen_rtx_REG (elt_mode, regno);
35748 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35749 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35750 regno += reg_stride;
35751 }
35752
35753 return par;
35754 }
35755
35756 /* Target hook for TARGET_FUNCTION_VALUE.
35757
35758 An integer value is in r3 and a floating-point value is in fp1,
35759 unless -msoft-float. */
35760
35761 static rtx
35762 rs6000_function_value (const_tree valtype,
35763 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35764 bool outgoing ATTRIBUTE_UNUSED)
35765 {
35766 machine_mode mode;
35767 unsigned int regno;
35768 machine_mode elt_mode;
35769 int n_elts;
35770
35771 /* Special handling for structs in darwin64. */
35772 if (TARGET_MACHO
35773 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35774 {
35775 CUMULATIVE_ARGS valcum;
35776 rtx valret;
35777
35778 valcum.words = 0;
35779 valcum.fregno = FP_ARG_MIN_REG;
35780 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35781 /* Do a trial code generation as if this were going to be passed as
35782 an argument; if any part goes in memory, we return NULL. */
35783 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35784 if (valret)
35785 return valret;
35786 /* Otherwise fall through to standard ABI rules. */
35787 }
35788
35789 mode = TYPE_MODE (valtype);
35790
35791 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35792 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35793 {
35794 int first_reg, n_regs;
35795
35796 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35797 {
35798 /* _Decimal128 must use even/odd register pairs. */
35799 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35800 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35801 }
35802 else
35803 {
35804 first_reg = ALTIVEC_ARG_RETURN;
35805 n_regs = 1;
35806 }
35807
35808 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35809 }
35810
35811 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35812 if (TARGET_32BIT && TARGET_POWERPC64)
35813 switch (mode)
35814 {
35815 default:
35816 break;
35817 case E_DImode:
35818 case E_SCmode:
35819 case E_DCmode:
35820 case E_TCmode:
35821 int count = GET_MODE_SIZE (mode) / 4;
35822 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35823 }
35824
35825 if ((INTEGRAL_TYPE_P (valtype)
35826 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35827 || POINTER_TYPE_P (valtype))
35828 mode = TARGET_32BIT ? SImode : DImode;
35829
35830 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35831 /* _Decimal128 must use an even/odd register pair. */
35832 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35833 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35834 && !FLOAT128_VECTOR_P (mode))
35835 regno = FP_ARG_RETURN;
35836 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35837 && targetm.calls.split_complex_arg)
35838 return rs6000_complex_function_value (mode);
35839 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35840 return register is used in both cases, and we won't see V2DImode/V2DFmode
35841 for pure altivec, combine the two cases. */
35842 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35843 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35844 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35845 regno = ALTIVEC_ARG_RETURN;
35846 else
35847 regno = GP_ARG_RETURN;
35848
35849 return gen_rtx_REG (mode, regno);
35850 }
35851
35852 /* Define how to find the value returned by a library function
35853 assuming the value has mode MODE. */
35854 rtx
35855 rs6000_libcall_value (machine_mode mode)
35856 {
35857 unsigned int regno;
35858
35859 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35860 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35861 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35862
35863 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35864 /* _Decimal128 must use an even/odd register pair. */
35865 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35866 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
35867 regno = FP_ARG_RETURN;
35868 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35869 return register is used in both cases, and we won't see V2DImode/V2DFmode
35870 for pure altivec, combine the two cases. */
35871 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35872 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35873 regno = ALTIVEC_ARG_RETURN;
35874 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35875 return rs6000_complex_function_value (mode);
35876 else
35877 regno = GP_ARG_RETURN;
35878
35879 return gen_rtx_REG (mode, regno);
35880 }
35881
35882 /* Compute register pressure classes. We implement the target hook to avoid
35883 IRA picking something like GEN_OR_FLOAT_REGS as a pressure class, which can
35884 lead to incorrect estimates of number of available registers and therefor
35885 increased register pressure/spill. */
35886 static int
35887 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35888 {
35889 int n;
35890
35891 n = 0;
35892 pressure_classes[n++] = GENERAL_REGS;
35893 if (TARGET_VSX)
35894 pressure_classes[n++] = VSX_REGS;
35895 else
35896 {
35897 if (TARGET_ALTIVEC)
35898 pressure_classes[n++] = ALTIVEC_REGS;
35899 if (TARGET_HARD_FLOAT)
35900 pressure_classes[n++] = FLOAT_REGS;
35901 }
35902 pressure_classes[n++] = CR_REGS;
35903 pressure_classes[n++] = SPECIAL_REGS;
35904
35905 return n;
35906 }
35907
35908 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35909 Frame pointer elimination is automatically handled.
35910
35911 For the RS/6000, if frame pointer elimination is being done, we would like
35912 to convert ap into fp, not sp.
35913
35914 We need r30 if -mminimal-toc was specified, and there are constant pool
35915 references. */
35916
35917 static bool
35918 rs6000_can_eliminate (const int from, const int to)
35919 {
35920 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35921 ? ! frame_pointer_needed
35922 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35923 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
35924 || constant_pool_empty_p ()
35925 : true);
35926 }
35927
35928 /* Define the offset between two registers, FROM to be eliminated and its
35929 replacement TO, at the start of a routine. */
35930 HOST_WIDE_INT
35931 rs6000_initial_elimination_offset (int from, int to)
35932 {
35933 rs6000_stack_t *info = rs6000_stack_info ();
35934 HOST_WIDE_INT offset;
35935
35936 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35937 offset = info->push_p ? 0 : -info->total_size;
35938 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35939 {
35940 offset = info->push_p ? 0 : -info->total_size;
35941 if (FRAME_GROWS_DOWNWARD)
35942 offset += info->fixed_size + info->vars_size + info->parm_size;
35943 }
35944 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35945 offset = FRAME_GROWS_DOWNWARD
35946 ? info->fixed_size + info->vars_size + info->parm_size
35947 : 0;
35948 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35949 offset = info->total_size;
35950 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35951 offset = info->push_p ? info->total_size : 0;
35952 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35953 offset = 0;
35954 else
35955 gcc_unreachable ();
35956
35957 return offset;
35958 }
35959
35960 /* Fill in sizes of registers used by unwinder. */
35961
35962 static void
35963 rs6000_init_dwarf_reg_sizes_extra (tree address)
35964 {
35965 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35966 {
35967 int i;
35968 machine_mode mode = TYPE_MODE (char_type_node);
35969 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35970 rtx mem = gen_rtx_MEM (BLKmode, addr);
35971 rtx value = gen_int_mode (16, mode);
35972
35973 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35974 The unwinder still needs to know the size of Altivec registers. */
35975
35976 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35977 {
35978 int column = DWARF_REG_TO_UNWIND_COLUMN
35979 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
35980 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
35981
35982 emit_move_insn (adjust_address (mem, mode, offset), value);
35983 }
35984 }
35985 }
35986
35987 /* Map internal gcc register numbers to debug format register numbers.
35988 FORMAT specifies the type of debug register number to use:
35989 0 -- debug information, except for frame-related sections
35990 1 -- DWARF .debug_frame section
35991 2 -- DWARF .eh_frame section */
35992
35993 unsigned int
35994 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
35995 {
35996 /* On some platforms, we use the standard DWARF register
35997 numbering for .debug_info and .debug_frame. */
35998 if ((format == 0 && write_symbols == DWARF2_DEBUG) || format == 1)
35999 {
36000 #ifdef RS6000_USE_DWARF_NUMBERING
36001 if (regno <= 31)
36002 return regno;
36003 if (FP_REGNO_P (regno))
36004 return regno - FIRST_FPR_REGNO + 32;
36005 if (ALTIVEC_REGNO_P (regno))
36006 return regno - FIRST_ALTIVEC_REGNO + 1124;
36007 if (regno == LR_REGNO)
36008 return 108;
36009 if (regno == CTR_REGNO)
36010 return 109;
36011 if (regno == CA_REGNO)
36012 return 101; /* XER */
36013 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36014 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36015 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36016 to the DWARF reg for CR. */
36017 if (format == 1 && regno == CR2_REGNO)
36018 return 64;
36019 if (CR_REGNO_P (regno))
36020 return regno - CR0_REGNO + 86;
36021 if (regno == VRSAVE_REGNO)
36022 return 356;
36023 if (regno == VSCR_REGNO)
36024 return 67;
36025
36026 /* These do not make much sense. */
36027 if (regno == FRAME_POINTER_REGNUM)
36028 return 111;
36029 if (regno == ARG_POINTER_REGNUM)
36030 return 67;
36031 if (regno == 64)
36032 return 100;
36033
36034 gcc_unreachable ();
36035 #endif
36036 }
36037
36038 /* We use the GCC 7 (and before) internal number for non-DWARF debug
36039 information, and also for .eh_frame. */
36040 /* Translate the regnos to their numbers in GCC 7 (and before). */
36041 if (regno <= 31)
36042 return regno;
36043 if (FP_REGNO_P (regno))
36044 return regno - FIRST_FPR_REGNO + 32;
36045 if (ALTIVEC_REGNO_P (regno))
36046 return regno - FIRST_ALTIVEC_REGNO + 77;
36047 if (regno == LR_REGNO)
36048 return 65;
36049 if (regno == CTR_REGNO)
36050 return 66;
36051 if (regno == CA_REGNO)
36052 return 76; /* XER */
36053 if (CR_REGNO_P (regno))
36054 return regno - CR0_REGNO + 68;
36055 if (regno == VRSAVE_REGNO)
36056 return 109;
36057 if (regno == VSCR_REGNO)
36058 return 110;
36059
36060 if (regno == FRAME_POINTER_REGNUM)
36061 return 111;
36062 if (regno == ARG_POINTER_REGNUM)
36063 return 67;
36064 if (regno == 64)
36065 return 64;
36066
36067 gcc_unreachable ();
36068 }
36069
36070 /* target hook eh_return_filter_mode */
36071 static scalar_int_mode
36072 rs6000_eh_return_filter_mode (void)
36073 {
36074 return TARGET_32BIT ? SImode : word_mode;
36075 }
36076
36077 /* Target hook for translate_mode_attribute. */
36078 static machine_mode
36079 rs6000_translate_mode_attribute (machine_mode mode)
36080 {
36081 if ((FLOAT128_IEEE_P (mode)
36082 && ieee128_float_type_node == long_double_type_node)
36083 || (FLOAT128_IBM_P (mode)
36084 && ibm128_float_type_node == long_double_type_node))
36085 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
36086 return mode;
36087 }
36088
36089 /* Target hook for scalar_mode_supported_p. */
36090 static bool
36091 rs6000_scalar_mode_supported_p (scalar_mode mode)
36092 {
36093 /* -m32 does not support TImode. This is the default, from
36094 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36095 same ABI as for -m32. But default_scalar_mode_supported_p allows
36096 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36097 for -mpowerpc64. */
36098 if (TARGET_32BIT && mode == TImode)
36099 return false;
36100
36101 if (DECIMAL_FLOAT_MODE_P (mode))
36102 return default_decimal_float_supported_p ();
36103 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36104 return true;
36105 else
36106 return default_scalar_mode_supported_p (mode);
36107 }
36108
36109 /* Target hook for vector_mode_supported_p. */
36110 static bool
36111 rs6000_vector_mode_supported_p (machine_mode mode)
36112 {
36113 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36114 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36115 double-double. */
36116 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36117 return true;
36118
36119 else
36120 return false;
36121 }
36122
36123 /* Target hook for floatn_mode. */
36124 static opt_scalar_float_mode
36125 rs6000_floatn_mode (int n, bool extended)
36126 {
36127 if (extended)
36128 {
36129 switch (n)
36130 {
36131 case 32:
36132 return DFmode;
36133
36134 case 64:
36135 if (TARGET_FLOAT128_TYPE)
36136 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36137 else
36138 return opt_scalar_float_mode ();
36139
36140 case 128:
36141 return opt_scalar_float_mode ();
36142
36143 default:
36144 /* Those are the only valid _FloatNx types. */
36145 gcc_unreachable ();
36146 }
36147 }
36148 else
36149 {
36150 switch (n)
36151 {
36152 case 32:
36153 return SFmode;
36154
36155 case 64:
36156 return DFmode;
36157
36158 case 128:
36159 if (TARGET_FLOAT128_TYPE)
36160 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36161 else
36162 return opt_scalar_float_mode ();
36163
36164 default:
36165 return opt_scalar_float_mode ();
36166 }
36167 }
36168
36169 }
36170
36171 /* Target hook for c_mode_for_suffix. */
36172 static machine_mode
36173 rs6000_c_mode_for_suffix (char suffix)
36174 {
36175 if (TARGET_FLOAT128_TYPE)
36176 {
36177 if (suffix == 'q' || suffix == 'Q')
36178 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36179
36180 /* At the moment, we are not defining a suffix for IBM extended double.
36181 If/when the default for -mabi=ieeelongdouble is changed, and we want
36182 to support __ibm128 constants in legacy library code, we may need to
36183 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36184 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36185 __float80 constants. */
36186 }
36187
36188 return VOIDmode;
36189 }
36190
36191 /* Target hook for invalid_arg_for_unprototyped_fn. */
36192 static const char *
36193 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36194 {
36195 return (!rs6000_darwin64_abi
36196 && typelist == 0
36197 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36198 && (funcdecl == NULL_TREE
36199 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36200 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36201 ? N_("AltiVec argument passed to unprototyped function")
36202 : NULL;
36203 }
36204
36205 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36206 setup by using __stack_chk_fail_local hidden function instead of
36207 calling __stack_chk_fail directly. Otherwise it is better to call
36208 __stack_chk_fail directly. */
36209
36210 static tree ATTRIBUTE_UNUSED
36211 rs6000_stack_protect_fail (void)
36212 {
36213 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36214 ? default_hidden_stack_protect_fail ()
36215 : default_external_stack_protect_fail ();
36216 }
36217
36218 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36219
36220 #if TARGET_ELF
36221 static unsigned HOST_WIDE_INT
36222 rs6000_asan_shadow_offset (void)
36223 {
36224 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36225 }
36226 #endif
36227 \f
36228 /* Mask options that we want to support inside of attribute((target)) and
36229 #pragma GCC target operations. Note, we do not include things like
36230 64/32-bit, endianness, hard/soft floating point, etc. that would have
36231 different calling sequences. */
36232
36233 struct rs6000_opt_mask {
36234 const char *name; /* option name */
36235 HOST_WIDE_INT mask; /* mask to set */
36236 bool invert; /* invert sense of mask */
36237 bool valid_target; /* option is a target option */
36238 };
36239
36240 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36241 {
36242 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36243 { "cmpb", OPTION_MASK_CMPB, false, true },
36244 { "crypto", OPTION_MASK_CRYPTO, false, true },
36245 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36246 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36247 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36248 false, true },
36249 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36250 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36251 { "fprnd", OPTION_MASK_FPRND, false, true },
36252 { "hard-dfp", OPTION_MASK_DFP, false, true },
36253 { "htm", OPTION_MASK_HTM, false, true },
36254 { "isel", OPTION_MASK_ISEL, false, true },
36255 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36256 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36257 { "modulo", OPTION_MASK_MODULO, false, true },
36258 { "mulhw", OPTION_MASK_MULHW, false, true },
36259 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36260 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36261 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36262 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36263 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36264 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36265 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36266 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36267 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36268 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36269 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36270 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36271 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36272 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36273 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36274 { "string", 0, false, true },
36275 { "update", OPTION_MASK_NO_UPDATE, true , true },
36276 { "vsx", OPTION_MASK_VSX, false, true },
36277 #ifdef OPTION_MASK_64BIT
36278 #if TARGET_AIX_OS
36279 { "aix64", OPTION_MASK_64BIT, false, false },
36280 { "aix32", OPTION_MASK_64BIT, true, false },
36281 #else
36282 { "64", OPTION_MASK_64BIT, false, false },
36283 { "32", OPTION_MASK_64BIT, true, false },
36284 #endif
36285 #endif
36286 #ifdef OPTION_MASK_EABI
36287 { "eabi", OPTION_MASK_EABI, false, false },
36288 #endif
36289 #ifdef OPTION_MASK_LITTLE_ENDIAN
36290 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36291 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36292 #endif
36293 #ifdef OPTION_MASK_RELOCATABLE
36294 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36295 #endif
36296 #ifdef OPTION_MASK_STRICT_ALIGN
36297 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36298 #endif
36299 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36300 { "string", 0, false, false },
36301 };
36302
36303 /* Builtin mask mapping for printing the flags. */
36304 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36305 {
36306 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36307 { "vsx", RS6000_BTM_VSX, false, false },
36308 { "fre", RS6000_BTM_FRE, false, false },
36309 { "fres", RS6000_BTM_FRES, false, false },
36310 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36311 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36312 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36313 { "cell", RS6000_BTM_CELL, false, false },
36314 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36315 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36316 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36317 { "crypto", RS6000_BTM_CRYPTO, false, false },
36318 { "htm", RS6000_BTM_HTM, false, false },
36319 { "hard-dfp", RS6000_BTM_DFP, false, false },
36320 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36321 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36322 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36323 { "float128", RS6000_BTM_FLOAT128, false, false },
36324 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36325 };
36326
36327 /* Option variables that we want to support inside attribute((target)) and
36328 #pragma GCC target operations. */
36329
36330 struct rs6000_opt_var {
36331 const char *name; /* option name */
36332 size_t global_offset; /* offset of the option in global_options. */
36333 size_t target_offset; /* offset of the option in target options. */
36334 };
36335
36336 static struct rs6000_opt_var const rs6000_opt_vars[] =
36337 {
36338 { "friz",
36339 offsetof (struct gcc_options, x_TARGET_FRIZ),
36340 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36341 { "avoid-indexed-addresses",
36342 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36343 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36344 { "longcall",
36345 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36346 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36347 { "optimize-swaps",
36348 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36349 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36350 { "allow-movmisalign",
36351 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36352 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36353 { "sched-groups",
36354 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36355 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36356 { "always-hint",
36357 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36358 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36359 { "align-branch-targets",
36360 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36361 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36362 { "tls-markers",
36363 offsetof (struct gcc_options, x_tls_markers),
36364 offsetof (struct cl_target_option, x_tls_markers), },
36365 { "sched-prolog",
36366 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36367 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36368 { "sched-epilog",
36369 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36370 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36371 { "speculate-indirect-jumps",
36372 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36373 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36374 };
36375
36376 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36377 parsing. Return true if there were no errors. */
36378
36379 static bool
36380 rs6000_inner_target_options (tree args, bool attr_p)
36381 {
36382 bool ret = true;
36383
36384 if (args == NULL_TREE)
36385 ;
36386
36387 else if (TREE_CODE (args) == STRING_CST)
36388 {
36389 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36390 char *q;
36391
36392 while ((q = strtok (p, ",")) != NULL)
36393 {
36394 bool error_p = false;
36395 bool not_valid_p = false;
36396 const char *cpu_opt = NULL;
36397
36398 p = NULL;
36399 if (strncmp (q, "cpu=", 4) == 0)
36400 {
36401 int cpu_index = rs6000_cpu_name_lookup (q+4);
36402 if (cpu_index >= 0)
36403 rs6000_cpu_index = cpu_index;
36404 else
36405 {
36406 error_p = true;
36407 cpu_opt = q+4;
36408 }
36409 }
36410 else if (strncmp (q, "tune=", 5) == 0)
36411 {
36412 int tune_index = rs6000_cpu_name_lookup (q+5);
36413 if (tune_index >= 0)
36414 rs6000_tune_index = tune_index;
36415 else
36416 {
36417 error_p = true;
36418 cpu_opt = q+5;
36419 }
36420 }
36421 else
36422 {
36423 size_t i;
36424 bool invert = false;
36425 char *r = q;
36426
36427 error_p = true;
36428 if (strncmp (r, "no-", 3) == 0)
36429 {
36430 invert = true;
36431 r += 3;
36432 }
36433
36434 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36435 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36436 {
36437 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36438
36439 if (!rs6000_opt_masks[i].valid_target)
36440 not_valid_p = true;
36441 else
36442 {
36443 error_p = false;
36444 rs6000_isa_flags_explicit |= mask;
36445
36446 /* VSX needs altivec, so -mvsx automagically sets
36447 altivec and disables -mavoid-indexed-addresses. */
36448 if (!invert)
36449 {
36450 if (mask == OPTION_MASK_VSX)
36451 {
36452 mask |= OPTION_MASK_ALTIVEC;
36453 TARGET_AVOID_XFORM = 0;
36454 }
36455 }
36456
36457 if (rs6000_opt_masks[i].invert)
36458 invert = !invert;
36459
36460 if (invert)
36461 rs6000_isa_flags &= ~mask;
36462 else
36463 rs6000_isa_flags |= mask;
36464 }
36465 break;
36466 }
36467
36468 if (error_p && !not_valid_p)
36469 {
36470 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36471 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36472 {
36473 size_t j = rs6000_opt_vars[i].global_offset;
36474 *((int *) ((char *)&global_options + j)) = !invert;
36475 error_p = false;
36476 not_valid_p = false;
36477 break;
36478 }
36479 }
36480 }
36481
36482 if (error_p)
36483 {
36484 const char *eprefix, *esuffix;
36485
36486 ret = false;
36487 if (attr_p)
36488 {
36489 eprefix = "__attribute__((__target__(";
36490 esuffix = ")))";
36491 }
36492 else
36493 {
36494 eprefix = "#pragma GCC target ";
36495 esuffix = "";
36496 }
36497
36498 if (cpu_opt)
36499 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36500 q, esuffix);
36501 else if (not_valid_p)
36502 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36503 else
36504 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36505 }
36506 }
36507 }
36508
36509 else if (TREE_CODE (args) == TREE_LIST)
36510 {
36511 do
36512 {
36513 tree value = TREE_VALUE (args);
36514 if (value)
36515 {
36516 bool ret2 = rs6000_inner_target_options (value, attr_p);
36517 if (!ret2)
36518 ret = false;
36519 }
36520 args = TREE_CHAIN (args);
36521 }
36522 while (args != NULL_TREE);
36523 }
36524
36525 else
36526 {
36527 error ("attribute %<target%> argument not a string");
36528 return false;
36529 }
36530
36531 return ret;
36532 }
36533
36534 /* Print out the target options as a list for -mdebug=target. */
36535
36536 static void
36537 rs6000_debug_target_options (tree args, const char *prefix)
36538 {
36539 if (args == NULL_TREE)
36540 fprintf (stderr, "%s<NULL>", prefix);
36541
36542 else if (TREE_CODE (args) == STRING_CST)
36543 {
36544 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36545 char *q;
36546
36547 while ((q = strtok (p, ",")) != NULL)
36548 {
36549 p = NULL;
36550 fprintf (stderr, "%s\"%s\"", prefix, q);
36551 prefix = ", ";
36552 }
36553 }
36554
36555 else if (TREE_CODE (args) == TREE_LIST)
36556 {
36557 do
36558 {
36559 tree value = TREE_VALUE (args);
36560 if (value)
36561 {
36562 rs6000_debug_target_options (value, prefix);
36563 prefix = ", ";
36564 }
36565 args = TREE_CHAIN (args);
36566 }
36567 while (args != NULL_TREE);
36568 }
36569
36570 else
36571 gcc_unreachable ();
36572
36573 return;
36574 }
36575
36576 \f
36577 /* Hook to validate attribute((target("..."))). */
36578
36579 static bool
36580 rs6000_valid_attribute_p (tree fndecl,
36581 tree ARG_UNUSED (name),
36582 tree args,
36583 int flags)
36584 {
36585 struct cl_target_option cur_target;
36586 bool ret;
36587 tree old_optimize;
36588 tree new_target, new_optimize;
36589 tree func_optimize;
36590
36591 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36592
36593 if (TARGET_DEBUG_TARGET)
36594 {
36595 tree tname = DECL_NAME (fndecl);
36596 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36597 if (tname)
36598 fprintf (stderr, "function: %.*s\n",
36599 (int) IDENTIFIER_LENGTH (tname),
36600 IDENTIFIER_POINTER (tname));
36601 else
36602 fprintf (stderr, "function: unknown\n");
36603
36604 fprintf (stderr, "args:");
36605 rs6000_debug_target_options (args, " ");
36606 fprintf (stderr, "\n");
36607
36608 if (flags)
36609 fprintf (stderr, "flags: 0x%x\n", flags);
36610
36611 fprintf (stderr, "--------------------\n");
36612 }
36613
36614 /* attribute((target("default"))) does nothing, beyond
36615 affecting multi-versioning. */
36616 if (TREE_VALUE (args)
36617 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36618 && TREE_CHAIN (args) == NULL_TREE
36619 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36620 return true;
36621
36622 old_optimize = build_optimization_node (&global_options);
36623 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36624
36625 /* If the function changed the optimization levels as well as setting target
36626 options, start with the optimizations specified. */
36627 if (func_optimize && func_optimize != old_optimize)
36628 cl_optimization_restore (&global_options,
36629 TREE_OPTIMIZATION (func_optimize));
36630
36631 /* The target attributes may also change some optimization flags, so update
36632 the optimization options if necessary. */
36633 cl_target_option_save (&cur_target, &global_options);
36634 rs6000_cpu_index = rs6000_tune_index = -1;
36635 ret = rs6000_inner_target_options (args, true);
36636
36637 /* Set up any additional state. */
36638 if (ret)
36639 {
36640 ret = rs6000_option_override_internal (false);
36641 new_target = build_target_option_node (&global_options);
36642 }
36643 else
36644 new_target = NULL;
36645
36646 new_optimize = build_optimization_node (&global_options);
36647
36648 if (!new_target)
36649 ret = false;
36650
36651 else if (fndecl)
36652 {
36653 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36654
36655 if (old_optimize != new_optimize)
36656 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36657 }
36658
36659 cl_target_option_restore (&global_options, &cur_target);
36660
36661 if (old_optimize != new_optimize)
36662 cl_optimization_restore (&global_options,
36663 TREE_OPTIMIZATION (old_optimize));
36664
36665 return ret;
36666 }
36667
36668 \f
36669 /* Hook to validate the current #pragma GCC target and set the state, and
36670 update the macros based on what was changed. If ARGS is NULL, then
36671 POP_TARGET is used to reset the options. */
36672
36673 bool
36674 rs6000_pragma_target_parse (tree args, tree pop_target)
36675 {
36676 tree prev_tree = build_target_option_node (&global_options);
36677 tree cur_tree;
36678 struct cl_target_option *prev_opt, *cur_opt;
36679 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36680 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36681
36682 if (TARGET_DEBUG_TARGET)
36683 {
36684 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36685 fprintf (stderr, "args:");
36686 rs6000_debug_target_options (args, " ");
36687 fprintf (stderr, "\n");
36688
36689 if (pop_target)
36690 {
36691 fprintf (stderr, "pop_target:\n");
36692 debug_tree (pop_target);
36693 }
36694 else
36695 fprintf (stderr, "pop_target: <NULL>\n");
36696
36697 fprintf (stderr, "--------------------\n");
36698 }
36699
36700 if (! args)
36701 {
36702 cur_tree = ((pop_target)
36703 ? pop_target
36704 : target_option_default_node);
36705 cl_target_option_restore (&global_options,
36706 TREE_TARGET_OPTION (cur_tree));
36707 }
36708 else
36709 {
36710 rs6000_cpu_index = rs6000_tune_index = -1;
36711 if (!rs6000_inner_target_options (args, false)
36712 || !rs6000_option_override_internal (false)
36713 || (cur_tree = build_target_option_node (&global_options))
36714 == NULL_TREE)
36715 {
36716 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36717 fprintf (stderr, "invalid pragma\n");
36718
36719 return false;
36720 }
36721 }
36722
36723 target_option_current_node = cur_tree;
36724 rs6000_activate_target_options (target_option_current_node);
36725
36726 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36727 change the macros that are defined. */
36728 if (rs6000_target_modify_macros_ptr)
36729 {
36730 prev_opt = TREE_TARGET_OPTION (prev_tree);
36731 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36732 prev_flags = prev_opt->x_rs6000_isa_flags;
36733
36734 cur_opt = TREE_TARGET_OPTION (cur_tree);
36735 cur_flags = cur_opt->x_rs6000_isa_flags;
36736 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36737
36738 diff_bumask = (prev_bumask ^ cur_bumask);
36739 diff_flags = (prev_flags ^ cur_flags);
36740
36741 if ((diff_flags != 0) || (diff_bumask != 0))
36742 {
36743 /* Delete old macros. */
36744 rs6000_target_modify_macros_ptr (false,
36745 prev_flags & diff_flags,
36746 prev_bumask & diff_bumask);
36747
36748 /* Define new macros. */
36749 rs6000_target_modify_macros_ptr (true,
36750 cur_flags & diff_flags,
36751 cur_bumask & diff_bumask);
36752 }
36753 }
36754
36755 return true;
36756 }
36757
36758 \f
36759 /* Remember the last target of rs6000_set_current_function. */
36760 static GTY(()) tree rs6000_previous_fndecl;
36761
36762 /* Restore target's globals from NEW_TREE and invalidate the
36763 rs6000_previous_fndecl cache. */
36764
36765 void
36766 rs6000_activate_target_options (tree new_tree)
36767 {
36768 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36769 if (TREE_TARGET_GLOBALS (new_tree))
36770 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36771 else if (new_tree == target_option_default_node)
36772 restore_target_globals (&default_target_globals);
36773 else
36774 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36775 rs6000_previous_fndecl = NULL_TREE;
36776 }
36777
36778 /* Establish appropriate back-end context for processing the function
36779 FNDECL. The argument might be NULL to indicate processing at top
36780 level, outside of any function scope. */
36781 static void
36782 rs6000_set_current_function (tree fndecl)
36783 {
36784 if (TARGET_DEBUG_TARGET)
36785 {
36786 fprintf (stderr, "\n==================== rs6000_set_current_function");
36787
36788 if (fndecl)
36789 fprintf (stderr, ", fndecl %s (%p)",
36790 (DECL_NAME (fndecl)
36791 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36792 : "<unknown>"), (void *)fndecl);
36793
36794 if (rs6000_previous_fndecl)
36795 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36796
36797 fprintf (stderr, "\n");
36798 }
36799
36800 /* Only change the context if the function changes. This hook is called
36801 several times in the course of compiling a function, and we don't want to
36802 slow things down too much or call target_reinit when it isn't safe. */
36803 if (fndecl == rs6000_previous_fndecl)
36804 return;
36805
36806 tree old_tree;
36807 if (rs6000_previous_fndecl == NULL_TREE)
36808 old_tree = target_option_current_node;
36809 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36810 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36811 else
36812 old_tree = target_option_default_node;
36813
36814 tree new_tree;
36815 if (fndecl == NULL_TREE)
36816 {
36817 if (old_tree != target_option_current_node)
36818 new_tree = target_option_current_node;
36819 else
36820 new_tree = NULL_TREE;
36821 }
36822 else
36823 {
36824 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36825 if (new_tree == NULL_TREE)
36826 new_tree = target_option_default_node;
36827 }
36828
36829 if (TARGET_DEBUG_TARGET)
36830 {
36831 if (new_tree)
36832 {
36833 fprintf (stderr, "\nnew fndecl target specific options:\n");
36834 debug_tree (new_tree);
36835 }
36836
36837 if (old_tree)
36838 {
36839 fprintf (stderr, "\nold fndecl target specific options:\n");
36840 debug_tree (old_tree);
36841 }
36842
36843 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36844 fprintf (stderr, "--------------------\n");
36845 }
36846
36847 if (new_tree && old_tree != new_tree)
36848 rs6000_activate_target_options (new_tree);
36849
36850 if (fndecl)
36851 rs6000_previous_fndecl = fndecl;
36852 }
36853
36854 \f
36855 /* Save the current options */
36856
36857 static void
36858 rs6000_function_specific_save (struct cl_target_option *ptr,
36859 struct gcc_options *opts)
36860 {
36861 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36862 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36863 }
36864
36865 /* Restore the current options */
36866
36867 static void
36868 rs6000_function_specific_restore (struct gcc_options *opts,
36869 struct cl_target_option *ptr)
36870
36871 {
36872 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36873 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36874 (void) rs6000_option_override_internal (false);
36875 }
36876
36877 /* Print the current options */
36878
36879 static void
36880 rs6000_function_specific_print (FILE *file, int indent,
36881 struct cl_target_option *ptr)
36882 {
36883 rs6000_print_isa_options (file, indent, "Isa options set",
36884 ptr->x_rs6000_isa_flags);
36885
36886 rs6000_print_isa_options (file, indent, "Isa options explicit",
36887 ptr->x_rs6000_isa_flags_explicit);
36888 }
36889
36890 /* Helper function to print the current isa or misc options on a line. */
36891
36892 static void
36893 rs6000_print_options_internal (FILE *file,
36894 int indent,
36895 const char *string,
36896 HOST_WIDE_INT flags,
36897 const char *prefix,
36898 const struct rs6000_opt_mask *opts,
36899 size_t num_elements)
36900 {
36901 size_t i;
36902 size_t start_column = 0;
36903 size_t cur_column;
36904 size_t max_column = 120;
36905 size_t prefix_len = strlen (prefix);
36906 size_t comma_len = 0;
36907 const char *comma = "";
36908
36909 if (indent)
36910 start_column += fprintf (file, "%*s", indent, "");
36911
36912 if (!flags)
36913 {
36914 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36915 return;
36916 }
36917
36918 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36919
36920 /* Print the various mask options. */
36921 cur_column = start_column;
36922 for (i = 0; i < num_elements; i++)
36923 {
36924 bool invert = opts[i].invert;
36925 const char *name = opts[i].name;
36926 const char *no_str = "";
36927 HOST_WIDE_INT mask = opts[i].mask;
36928 size_t len = comma_len + prefix_len + strlen (name);
36929
36930 if (!invert)
36931 {
36932 if ((flags & mask) == 0)
36933 {
36934 no_str = "no-";
36935 len += sizeof ("no-") - 1;
36936 }
36937
36938 flags &= ~mask;
36939 }
36940
36941 else
36942 {
36943 if ((flags & mask) != 0)
36944 {
36945 no_str = "no-";
36946 len += sizeof ("no-") - 1;
36947 }
36948
36949 flags |= mask;
36950 }
36951
36952 cur_column += len;
36953 if (cur_column > max_column)
36954 {
36955 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36956 cur_column = start_column + len;
36957 comma = "";
36958 }
36959
36960 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36961 comma = ", ";
36962 comma_len = sizeof (", ") - 1;
36963 }
36964
36965 fputs ("\n", file);
36966 }
36967
36968 /* Helper function to print the current isa options on a line. */
36969
36970 static void
36971 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36972 HOST_WIDE_INT flags)
36973 {
36974 rs6000_print_options_internal (file, indent, string, flags, "-m",
36975 &rs6000_opt_masks[0],
36976 ARRAY_SIZE (rs6000_opt_masks));
36977 }
36978
36979 static void
36980 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
36981 HOST_WIDE_INT flags)
36982 {
36983 rs6000_print_options_internal (file, indent, string, flags, "",
36984 &rs6000_builtin_mask_names[0],
36985 ARRAY_SIZE (rs6000_builtin_mask_names));
36986 }
36987
36988 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
36989 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
36990 -mupper-regs-df, etc.).
36991
36992 If the user used -mno-power8-vector, we need to turn off all of the implicit
36993 ISA 2.07 and 3.0 options that relate to the vector unit.
36994
36995 If the user used -mno-power9-vector, we need to turn off all of the implicit
36996 ISA 3.0 options that relate to the vector unit.
36997
36998 This function does not handle explicit options such as the user specifying
36999 -mdirect-move. These are handled in rs6000_option_override_internal, and
37000 the appropriate error is given if needed.
37001
37002 We return a mask of all of the implicit options that should not be enabled
37003 by default. */
37004
37005 static HOST_WIDE_INT
37006 rs6000_disable_incompatible_switches (void)
37007 {
37008 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37009 size_t i, j;
37010
37011 static const struct {
37012 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37013 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37014 const char *const name; /* name of the switch. */
37015 } flags[] = {
37016 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37017 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37018 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37019 };
37020
37021 for (i = 0; i < ARRAY_SIZE (flags); i++)
37022 {
37023 HOST_WIDE_INT no_flag = flags[i].no_flag;
37024
37025 if ((rs6000_isa_flags & no_flag) == 0
37026 && (rs6000_isa_flags_explicit & no_flag) != 0)
37027 {
37028 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37029 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37030 & rs6000_isa_flags
37031 & dep_flags);
37032
37033 if (set_flags)
37034 {
37035 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37036 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37037 {
37038 set_flags &= ~rs6000_opt_masks[j].mask;
37039 error ("%<-mno-%s%> turns off %<-m%s%>",
37040 flags[i].name,
37041 rs6000_opt_masks[j].name);
37042 }
37043
37044 gcc_assert (!set_flags);
37045 }
37046
37047 rs6000_isa_flags &= ~dep_flags;
37048 ignore_masks |= no_flag | dep_flags;
37049 }
37050 }
37051
37052 return ignore_masks;
37053 }
37054
37055 \f
37056 /* Helper function for printing the function name when debugging. */
37057
37058 static const char *
37059 get_decl_name (tree fn)
37060 {
37061 tree name;
37062
37063 if (!fn)
37064 return "<null>";
37065
37066 name = DECL_NAME (fn);
37067 if (!name)
37068 return "<no-name>";
37069
37070 return IDENTIFIER_POINTER (name);
37071 }
37072
37073 /* Return the clone id of the target we are compiling code for in a target
37074 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37075 the priority list for the target clones (ordered from lowest to
37076 highest). */
37077
37078 static int
37079 rs6000_clone_priority (tree fndecl)
37080 {
37081 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37082 HOST_WIDE_INT isa_masks;
37083 int ret = CLONE_DEFAULT;
37084 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37085 const char *attrs_str = NULL;
37086
37087 attrs = TREE_VALUE (TREE_VALUE (attrs));
37088 attrs_str = TREE_STRING_POINTER (attrs);
37089
37090 /* Return priority zero for default function. Return the ISA needed for the
37091 function if it is not the default. */
37092 if (strcmp (attrs_str, "default") != 0)
37093 {
37094 if (fn_opts == NULL_TREE)
37095 fn_opts = target_option_default_node;
37096
37097 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37098 isa_masks = rs6000_isa_flags;
37099 else
37100 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37101
37102 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37103 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37104 break;
37105 }
37106
37107 if (TARGET_DEBUG_TARGET)
37108 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37109 get_decl_name (fndecl), ret);
37110
37111 return ret;
37112 }
37113
37114 /* This compares the priority of target features in function DECL1 and DECL2.
37115 It returns positive value if DECL1 is higher priority, negative value if
37116 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37117 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37118
37119 static int
37120 rs6000_compare_version_priority (tree decl1, tree decl2)
37121 {
37122 int priority1 = rs6000_clone_priority (decl1);
37123 int priority2 = rs6000_clone_priority (decl2);
37124 int ret = priority1 - priority2;
37125
37126 if (TARGET_DEBUG_TARGET)
37127 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37128 get_decl_name (decl1), get_decl_name (decl2), ret);
37129
37130 return ret;
37131 }
37132
37133 /* Make a dispatcher declaration for the multi-versioned function DECL.
37134 Calls to DECL function will be replaced with calls to the dispatcher
37135 by the front-end. Returns the decl of the dispatcher function. */
37136
37137 static tree
37138 rs6000_get_function_versions_dispatcher (void *decl)
37139 {
37140 tree fn = (tree) decl;
37141 struct cgraph_node *node = NULL;
37142 struct cgraph_node *default_node = NULL;
37143 struct cgraph_function_version_info *node_v = NULL;
37144 struct cgraph_function_version_info *first_v = NULL;
37145
37146 tree dispatch_decl = NULL;
37147
37148 struct cgraph_function_version_info *default_version_info = NULL;
37149 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37150
37151 if (TARGET_DEBUG_TARGET)
37152 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37153 get_decl_name (fn));
37154
37155 node = cgraph_node::get (fn);
37156 gcc_assert (node != NULL);
37157
37158 node_v = node->function_version ();
37159 gcc_assert (node_v != NULL);
37160
37161 if (node_v->dispatcher_resolver != NULL)
37162 return node_v->dispatcher_resolver;
37163
37164 /* Find the default version and make it the first node. */
37165 first_v = node_v;
37166 /* Go to the beginning of the chain. */
37167 while (first_v->prev != NULL)
37168 first_v = first_v->prev;
37169
37170 default_version_info = first_v;
37171 while (default_version_info != NULL)
37172 {
37173 const tree decl2 = default_version_info->this_node->decl;
37174 if (is_function_default_version (decl2))
37175 break;
37176 default_version_info = default_version_info->next;
37177 }
37178
37179 /* If there is no default node, just return NULL. */
37180 if (default_version_info == NULL)
37181 return NULL;
37182
37183 /* Make default info the first node. */
37184 if (first_v != default_version_info)
37185 {
37186 default_version_info->prev->next = default_version_info->next;
37187 if (default_version_info->next)
37188 default_version_info->next->prev = default_version_info->prev;
37189 first_v->prev = default_version_info;
37190 default_version_info->next = first_v;
37191 default_version_info->prev = NULL;
37192 }
37193
37194 default_node = default_version_info->this_node;
37195
37196 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37197 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37198 "%<target_clones%> attribute needs GLIBC (2.23 and newer) that "
37199 "exports hardware capability bits");
37200 #else
37201
37202 if (targetm.has_ifunc_p ())
37203 {
37204 struct cgraph_function_version_info *it_v = NULL;
37205 struct cgraph_node *dispatcher_node = NULL;
37206 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37207
37208 /* Right now, the dispatching is done via ifunc. */
37209 dispatch_decl = make_dispatcher_decl (default_node->decl);
37210
37211 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37212 gcc_assert (dispatcher_node != NULL);
37213 dispatcher_node->dispatcher_function = 1;
37214 dispatcher_version_info
37215 = dispatcher_node->insert_new_function_version ();
37216 dispatcher_version_info->next = default_version_info;
37217 dispatcher_node->definition = 1;
37218
37219 /* Set the dispatcher for all the versions. */
37220 it_v = default_version_info;
37221 while (it_v != NULL)
37222 {
37223 it_v->dispatcher_resolver = dispatch_decl;
37224 it_v = it_v->next;
37225 }
37226 }
37227 else
37228 {
37229 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37230 "multiversioning needs ifunc which is not supported "
37231 "on this target");
37232 }
37233 #endif
37234
37235 return dispatch_decl;
37236 }
37237
37238 /* Make the resolver function decl to dispatch the versions of a multi-
37239 versioned function, DEFAULT_DECL. Create an empty basic block in the
37240 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37241 function. */
37242
37243 static tree
37244 make_resolver_func (const tree default_decl,
37245 const tree dispatch_decl,
37246 basic_block *empty_bb)
37247 {
37248 /* Make the resolver function static. The resolver function returns
37249 void *. */
37250 tree decl_name = clone_function_name (default_decl, "resolver");
37251 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37252 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37253 tree decl = build_fn_decl (resolver_name, type);
37254 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37255
37256 DECL_NAME (decl) = decl_name;
37257 TREE_USED (decl) = 1;
37258 DECL_ARTIFICIAL (decl) = 1;
37259 DECL_IGNORED_P (decl) = 0;
37260 TREE_PUBLIC (decl) = 0;
37261 DECL_UNINLINABLE (decl) = 1;
37262
37263 /* Resolver is not external, body is generated. */
37264 DECL_EXTERNAL (decl) = 0;
37265 DECL_EXTERNAL (dispatch_decl) = 0;
37266
37267 DECL_CONTEXT (decl) = NULL_TREE;
37268 DECL_INITIAL (decl) = make_node (BLOCK);
37269 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37270
37271 /* Build result decl and add to function_decl. */
37272 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37273 DECL_CONTEXT (t) = decl;
37274 DECL_ARTIFICIAL (t) = 1;
37275 DECL_IGNORED_P (t) = 1;
37276 DECL_RESULT (decl) = t;
37277
37278 gimplify_function_tree (decl);
37279 push_cfun (DECL_STRUCT_FUNCTION (decl));
37280 *empty_bb = init_lowered_empty_function (decl, false,
37281 profile_count::uninitialized ());
37282
37283 cgraph_node::add_new_function (decl, true);
37284 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37285
37286 pop_cfun ();
37287
37288 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37289 DECL_ATTRIBUTES (dispatch_decl)
37290 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37291
37292 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37293
37294 return decl;
37295 }
37296
37297 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37298 return a pointer to VERSION_DECL if we are running on a machine that
37299 supports the index CLONE_ISA hardware architecture bits. This function will
37300 be called during version dispatch to decide which function version to
37301 execute. It returns the basic block at the end, to which more conditions
37302 can be added. */
37303
37304 static basic_block
37305 add_condition_to_bb (tree function_decl, tree version_decl,
37306 int clone_isa, basic_block new_bb)
37307 {
37308 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37309
37310 gcc_assert (new_bb != NULL);
37311 gimple_seq gseq = bb_seq (new_bb);
37312
37313
37314 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37315 build_fold_addr_expr (version_decl));
37316 tree result_var = create_tmp_var (ptr_type_node);
37317 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37318 gimple *return_stmt = gimple_build_return (result_var);
37319
37320 if (clone_isa == CLONE_DEFAULT)
37321 {
37322 gimple_seq_add_stmt (&gseq, convert_stmt);
37323 gimple_seq_add_stmt (&gseq, return_stmt);
37324 set_bb_seq (new_bb, gseq);
37325 gimple_set_bb (convert_stmt, new_bb);
37326 gimple_set_bb (return_stmt, new_bb);
37327 pop_cfun ();
37328 return new_bb;
37329 }
37330
37331 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37332 tree cond_var = create_tmp_var (bool_int_type_node);
37333 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37334 const char *arg_str = rs6000_clone_map[clone_isa].name;
37335 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37336 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37337 gimple_call_set_lhs (call_cond_stmt, cond_var);
37338
37339 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37340 gimple_set_bb (call_cond_stmt, new_bb);
37341 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37342
37343 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37344 NULL_TREE, NULL_TREE);
37345 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37346 gimple_set_bb (if_else_stmt, new_bb);
37347 gimple_seq_add_stmt (&gseq, if_else_stmt);
37348
37349 gimple_seq_add_stmt (&gseq, convert_stmt);
37350 gimple_seq_add_stmt (&gseq, return_stmt);
37351 set_bb_seq (new_bb, gseq);
37352
37353 basic_block bb1 = new_bb;
37354 edge e12 = split_block (bb1, if_else_stmt);
37355 basic_block bb2 = e12->dest;
37356 e12->flags &= ~EDGE_FALLTHRU;
37357 e12->flags |= EDGE_TRUE_VALUE;
37358
37359 edge e23 = split_block (bb2, return_stmt);
37360 gimple_set_bb (convert_stmt, bb2);
37361 gimple_set_bb (return_stmt, bb2);
37362
37363 basic_block bb3 = e23->dest;
37364 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37365
37366 remove_edge (e23);
37367 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37368
37369 pop_cfun ();
37370 return bb3;
37371 }
37372
37373 /* This function generates the dispatch function for multi-versioned functions.
37374 DISPATCH_DECL is the function which will contain the dispatch logic.
37375 FNDECLS are the function choices for dispatch, and is a tree chain.
37376 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37377 code is generated. */
37378
37379 static int
37380 dispatch_function_versions (tree dispatch_decl,
37381 void *fndecls_p,
37382 basic_block *empty_bb)
37383 {
37384 int ix;
37385 tree ele;
37386 vec<tree> *fndecls;
37387 tree clones[CLONE_MAX];
37388
37389 if (TARGET_DEBUG_TARGET)
37390 fputs ("dispatch_function_versions, top\n", stderr);
37391
37392 gcc_assert (dispatch_decl != NULL
37393 && fndecls_p != NULL
37394 && empty_bb != NULL);
37395
37396 /* fndecls_p is actually a vector. */
37397 fndecls = static_cast<vec<tree> *> (fndecls_p);
37398
37399 /* At least one more version other than the default. */
37400 gcc_assert (fndecls->length () >= 2);
37401
37402 /* The first version in the vector is the default decl. */
37403 memset ((void *) clones, '\0', sizeof (clones));
37404 clones[CLONE_DEFAULT] = (*fndecls)[0];
37405
37406 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37407 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37408 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37409 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37410 to insert the code here to do the call. */
37411
37412 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37413 {
37414 int priority = rs6000_clone_priority (ele);
37415 if (!clones[priority])
37416 clones[priority] = ele;
37417 }
37418
37419 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37420 if (clones[ix])
37421 {
37422 if (TARGET_DEBUG_TARGET)
37423 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37424 ix, get_decl_name (clones[ix]));
37425
37426 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37427 *empty_bb);
37428 }
37429
37430 return 0;
37431 }
37432
37433 /* Generate the dispatching code body to dispatch multi-versioned function
37434 DECL. The target hook is called to process the "target" attributes and
37435 provide the code to dispatch the right function at run-time. NODE points
37436 to the dispatcher decl whose body will be created. */
37437
37438 static tree
37439 rs6000_generate_version_dispatcher_body (void *node_p)
37440 {
37441 tree resolver;
37442 basic_block empty_bb;
37443 struct cgraph_node *node = (cgraph_node *) node_p;
37444 struct cgraph_function_version_info *ninfo = node->function_version ();
37445
37446 if (ninfo->dispatcher_resolver)
37447 return ninfo->dispatcher_resolver;
37448
37449 /* node is going to be an alias, so remove the finalized bit. */
37450 node->definition = false;
37451
37452 /* The first version in the chain corresponds to the default version. */
37453 ninfo->dispatcher_resolver = resolver
37454 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37455
37456 if (TARGET_DEBUG_TARGET)
37457 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37458 get_decl_name (resolver));
37459
37460 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37461 auto_vec<tree, 2> fn_ver_vec;
37462
37463 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37464 vinfo;
37465 vinfo = vinfo->next)
37466 {
37467 struct cgraph_node *version = vinfo->this_node;
37468 /* Check for virtual functions here again, as by this time it should
37469 have been determined if this function needs a vtable index or
37470 not. This happens for methods in derived classes that override
37471 virtual methods in base classes but are not explicitly marked as
37472 virtual. */
37473 if (DECL_VINDEX (version->decl))
37474 sorry ("Virtual function multiversioning not supported");
37475
37476 fn_ver_vec.safe_push (version->decl);
37477 }
37478
37479 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37480 cgraph_edge::rebuild_edges ();
37481 pop_cfun ();
37482 return resolver;
37483 }
37484
37485 \f
37486 /* Hook to determine if one function can safely inline another. */
37487
37488 static bool
37489 rs6000_can_inline_p (tree caller, tree callee)
37490 {
37491 bool ret = false;
37492 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37493 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37494
37495 /* If callee has no option attributes, then it is ok to inline. */
37496 if (!callee_tree)
37497 ret = true;
37498
37499 /* If caller has no option attributes, but callee does then it is not ok to
37500 inline. */
37501 else if (!caller_tree)
37502 ret = false;
37503
37504 else
37505 {
37506 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37507 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37508
37509 /* Callee's options should a subset of the caller's, i.e. a vsx function
37510 can inline an altivec function but a non-vsx function can't inline a
37511 vsx function. */
37512 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37513 == callee_opts->x_rs6000_isa_flags)
37514 ret = true;
37515 }
37516
37517 if (TARGET_DEBUG_TARGET)
37518 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37519 get_decl_name (caller), get_decl_name (callee),
37520 (ret ? "can" : "cannot"));
37521
37522 return ret;
37523 }
37524 \f
37525 /* Allocate a stack temp and fixup the address so it meets the particular
37526 memory requirements (either offetable or REG+REG addressing). */
37527
37528 rtx
37529 rs6000_allocate_stack_temp (machine_mode mode,
37530 bool offsettable_p,
37531 bool reg_reg_p)
37532 {
37533 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37534 rtx addr = XEXP (stack, 0);
37535 int strict_p = reload_completed;
37536
37537 if (!legitimate_indirect_address_p (addr, strict_p))
37538 {
37539 if (offsettable_p
37540 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37541 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37542
37543 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37544 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37545 }
37546
37547 return stack;
37548 }
37549
37550 /* Given a memory reference, if it is not a reg or reg+reg addressing,
37551 convert to such a form to deal with memory reference instructions
37552 like STFIWX and LDBRX that only take reg+reg addressing. */
37553
37554 rtx
37555 rs6000_force_indexed_or_indirect_mem (rtx x)
37556 {
37557 machine_mode mode = GET_MODE (x);
37558
37559 gcc_assert (MEM_P (x));
37560 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
37561 {
37562 rtx addr = XEXP (x, 0);
37563 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37564 {
37565 rtx reg = XEXP (addr, 0);
37566 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37567 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37568 gcc_assert (REG_P (reg));
37569 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37570 addr = reg;
37571 }
37572 else if (GET_CODE (addr) == PRE_MODIFY)
37573 {
37574 rtx reg = XEXP (addr, 0);
37575 rtx expr = XEXP (addr, 1);
37576 gcc_assert (REG_P (reg));
37577 gcc_assert (GET_CODE (expr) == PLUS);
37578 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37579 addr = reg;
37580 }
37581
37582 x = replace_equiv_address (x, force_reg (Pmode, addr));
37583 }
37584
37585 return x;
37586 }
37587
37588 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37589
37590 On the RS/6000, all integer constants are acceptable, most won't be valid
37591 for particular insns, though. Only easy FP constants are acceptable. */
37592
37593 static bool
37594 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37595 {
37596 if (TARGET_ELF && tls_referenced_p (x))
37597 return false;
37598
37599 if (CONST_DOUBLE_P (x))
37600 return easy_fp_constant (x, mode);
37601
37602 if (GET_CODE (x) == CONST_VECTOR)
37603 return easy_vector_constant (x, mode);
37604
37605 return true;
37606 }
37607
37608 \f
37609 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37610
37611 static bool
37612 chain_already_loaded (rtx_insn *last)
37613 {
37614 for (; last != NULL; last = PREV_INSN (last))
37615 {
37616 if (NONJUMP_INSN_P (last))
37617 {
37618 rtx patt = PATTERN (last);
37619
37620 if (GET_CODE (patt) == SET)
37621 {
37622 rtx lhs = XEXP (patt, 0);
37623
37624 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37625 return true;
37626 }
37627 }
37628 }
37629 return false;
37630 }
37631
37632 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37633
37634 void
37635 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37636 {
37637 rtx func = func_desc;
37638 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37639 rtx toc_load = NULL_RTX;
37640 rtx toc_restore = NULL_RTX;
37641 rtx func_addr;
37642 rtx abi_reg = NULL_RTX;
37643 rtx call[4];
37644 int n_call;
37645 rtx insn;
37646 bool is_pltseq_longcall;
37647
37648 if (global_tlsarg)
37649 tlsarg = global_tlsarg;
37650
37651 /* Handle longcall attributes. */
37652 is_pltseq_longcall = false;
37653 if ((INTVAL (cookie) & CALL_LONG) != 0
37654 && GET_CODE (func_desc) == SYMBOL_REF)
37655 {
37656 func = rs6000_longcall_ref (func_desc, tlsarg);
37657 if (TARGET_PLTSEQ)
37658 is_pltseq_longcall = true;
37659 }
37660
37661 /* Handle indirect calls. */
37662 if (!SYMBOL_REF_P (func)
37663 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
37664 {
37665 /* Save the TOC into its reserved slot before the call,
37666 and prepare to restore it after the call. */
37667 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37668 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37669 gen_rtvec (1, stack_toc_offset),
37670 UNSPEC_TOCSLOT);
37671 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37672
37673 /* Can we optimize saving the TOC in the prologue or
37674 do we need to do it at every call? */
37675 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37676 cfun->machine->save_toc_in_prologue = true;
37677 else
37678 {
37679 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37680 rtx stack_toc_mem = gen_frame_mem (Pmode,
37681 gen_rtx_PLUS (Pmode, stack_ptr,
37682 stack_toc_offset));
37683 MEM_VOLATILE_P (stack_toc_mem) = 1;
37684 if (is_pltseq_longcall)
37685 {
37686 /* Use USPEC_PLTSEQ here to emit every instruction in an
37687 inline PLT call sequence with a reloc, enabling the
37688 linker to edit the sequence back to a direct call
37689 when that makes sense. */
37690 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
37691 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37692 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
37693 }
37694 else
37695 emit_move_insn (stack_toc_mem, toc_reg);
37696 }
37697
37698 if (DEFAULT_ABI == ABI_ELFv2)
37699 {
37700 /* A function pointer in the ELFv2 ABI is just a plain address, but
37701 the ABI requires it to be loaded into r12 before the call. */
37702 func_addr = gen_rtx_REG (Pmode, 12);
37703 if (!rtx_equal_p (func_addr, func))
37704 emit_move_insn (func_addr, func);
37705 abi_reg = func_addr;
37706 /* Indirect calls via CTR are strongly preferred over indirect
37707 calls via LR, so move the address there. Needed to mark
37708 this insn for linker plt sequence editing too. */
37709 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37710 if (is_pltseq_longcall)
37711 {
37712 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
37713 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37714 emit_insn (gen_rtx_SET (func_addr, mark_func));
37715 v = gen_rtvec (2, func_addr, func_desc);
37716 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37717 }
37718 else
37719 emit_move_insn (func_addr, abi_reg);
37720 }
37721 else
37722 {
37723 /* A function pointer under AIX is a pointer to a data area whose
37724 first word contains the actual address of the function, whose
37725 second word contains a pointer to its TOC, and whose third word
37726 contains a value to place in the static chain register (r11).
37727 Note that if we load the static chain, our "trampoline" need
37728 not have any executable code. */
37729
37730 /* Load up address of the actual function. */
37731 func = force_reg (Pmode, func);
37732 func_addr = gen_reg_rtx (Pmode);
37733 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
37734
37735 /* Indirect calls via CTR are strongly preferred over indirect
37736 calls via LR, so move the address there. */
37737 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
37738 emit_move_insn (ctr_reg, func_addr);
37739 func_addr = ctr_reg;
37740
37741 /* Prepare to load the TOC of the called function. Note that the
37742 TOC load must happen immediately before the actual call so
37743 that unwinding the TOC registers works correctly. See the
37744 comment in frob_update_context. */
37745 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37746 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37747 gen_rtx_PLUS (Pmode, func,
37748 func_toc_offset));
37749 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37750
37751 /* If we have a static chain, load it up. But, if the call was
37752 originally direct, the 3rd word has not been written since no
37753 trampoline has been built, so we ought not to load it, lest we
37754 override a static chain value. */
37755 if (!(GET_CODE (func_desc) == SYMBOL_REF
37756 && SYMBOL_REF_FUNCTION_P (func_desc))
37757 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37758 && !chain_already_loaded (get_current_sequence ()->next->last))
37759 {
37760 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37761 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37762 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37763 gen_rtx_PLUS (Pmode, func,
37764 func_sc_offset));
37765 emit_move_insn (sc_reg, func_sc_mem);
37766 abi_reg = sc_reg;
37767 }
37768 }
37769 }
37770 else
37771 {
37772 /* Direct calls use the TOC: for local calls, the callee will
37773 assume the TOC register is set; for non-local calls, the
37774 PLT stub needs the TOC register. */
37775 abi_reg = toc_reg;
37776 func_addr = func;
37777 }
37778
37779 /* Create the call. */
37780 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37781 if (value != NULL_RTX)
37782 call[0] = gen_rtx_SET (value, call[0]);
37783 n_call = 1;
37784
37785 if (toc_load)
37786 call[n_call++] = toc_load;
37787 if (toc_restore)
37788 call[n_call++] = toc_restore;
37789
37790 call[n_call++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37791
37792 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37793 insn = emit_call_insn (insn);
37794
37795 /* Mention all registers defined by the ABI to hold information
37796 as uses in CALL_INSN_FUNCTION_USAGE. */
37797 if (abi_reg)
37798 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37799 }
37800
37801 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37802
37803 void
37804 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37805 {
37806 rtx call[2];
37807 rtx insn;
37808
37809 gcc_assert (INTVAL (cookie) == 0);
37810
37811 if (global_tlsarg)
37812 tlsarg = global_tlsarg;
37813
37814 /* Create the call. */
37815 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
37816 if (value != NULL_RTX)
37817 call[0] = gen_rtx_SET (value, call[0]);
37818
37819 call[1] = simple_return_rtx;
37820
37821 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37822 insn = emit_call_insn (insn);
37823
37824 /* Note use of the TOC register. */
37825 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37826 }
37827
37828 /* Expand code to perform a call under the SYSV4 ABI. */
37829
37830 void
37831 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37832 {
37833 rtx func = func_desc;
37834 rtx func_addr;
37835 rtx call[4];
37836 rtx insn;
37837 rtx abi_reg = NULL_RTX;
37838 int n;
37839
37840 if (global_tlsarg)
37841 tlsarg = global_tlsarg;
37842
37843 /* Handle longcall attributes. */
37844 if ((INTVAL (cookie) & CALL_LONG) != 0
37845 && GET_CODE (func_desc) == SYMBOL_REF)
37846 {
37847 func = rs6000_longcall_ref (func_desc, tlsarg);
37848 /* If the longcall was implemented as an inline PLT call using
37849 PLT unspecs then func will be REG:r11. If not, func will be
37850 a pseudo reg. The inline PLT call sequence supports lazy
37851 linking (and longcalls to functions in dlopen'd libraries).
37852 The other style of longcalls don't. The lazy linking entry
37853 to the dynamic symbol resolver requires r11 be the function
37854 address (as it is for linker generated PLT stubs). Ensure
37855 r11 stays valid to the bctrl by marking r11 used by the call. */
37856 if (TARGET_PLTSEQ)
37857 abi_reg = func;
37858 }
37859
37860 /* Handle indirect calls. */
37861 if (GET_CODE (func) != SYMBOL_REF)
37862 {
37863 func = force_reg (Pmode, func);
37864
37865 /* Indirect calls via CTR are strongly preferred over indirect
37866 calls via LR, so move the address there. That can't be left
37867 to reload because we want to mark every instruction in an
37868 inline PLT call sequence with a reloc, enabling the linker to
37869 edit the sequence back to a direct call when that makes sense. */
37870 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37871 if (abi_reg)
37872 {
37873 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
37874 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37875 emit_insn (gen_rtx_SET (func_addr, mark_func));
37876 v = gen_rtvec (2, func_addr, func_desc);
37877 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37878 }
37879 else
37880 emit_move_insn (func_addr, func);
37881 }
37882 else
37883 func_addr = func;
37884
37885 /* Create the call. */
37886 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37887 if (value != NULL_RTX)
37888 call[0] = gen_rtx_SET (value, call[0]);
37889
37890 call[1] = gen_rtx_USE (VOIDmode, cookie);
37891 n = 2;
37892 if (TARGET_SECURE_PLT
37893 && flag_pic
37894 && GET_CODE (func_addr) == SYMBOL_REF
37895 && !SYMBOL_REF_LOCAL_P (func_addr))
37896 call[n++] = gen_rtx_USE (VOIDmode, pic_offset_table_rtx);
37897
37898 call[n++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37899
37900 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n, call));
37901 insn = emit_call_insn (insn);
37902 if (abi_reg)
37903 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37904 }
37905
37906 /* Expand code to perform a sibling call under the SysV4 ABI. */
37907
37908 void
37909 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37910 {
37911 rtx func = func_desc;
37912 rtx func_addr;
37913 rtx call[3];
37914 rtx insn;
37915 rtx abi_reg = NULL_RTX;
37916
37917 if (global_tlsarg)
37918 tlsarg = global_tlsarg;
37919
37920 /* Handle longcall attributes. */
37921 if ((INTVAL (cookie) & CALL_LONG) != 0
37922 && GET_CODE (func_desc) == SYMBOL_REF)
37923 {
37924 func = rs6000_longcall_ref (func_desc, tlsarg);
37925 /* If the longcall was implemented as an inline PLT call using
37926 PLT unspecs then func will be REG:r11. If not, func will be
37927 a pseudo reg. The inline PLT call sequence supports lazy
37928 linking (and longcalls to functions in dlopen'd libraries).
37929 The other style of longcalls don't. The lazy linking entry
37930 to the dynamic symbol resolver requires r11 be the function
37931 address (as it is for linker generated PLT stubs). Ensure
37932 r11 stays valid to the bctr by marking r11 used by the call. */
37933 if (TARGET_PLTSEQ)
37934 abi_reg = func;
37935 }
37936
37937 /* Handle indirect calls. */
37938 if (GET_CODE (func) != SYMBOL_REF)
37939 {
37940 func = force_reg (Pmode, func);
37941
37942 /* Indirect sibcalls must go via CTR. That can't be left to
37943 reload because we want to mark every instruction in an inline
37944 PLT call sequence with a reloc, enabling the linker to edit
37945 the sequence back to a direct call when that makes sense. */
37946 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37947 if (abi_reg)
37948 {
37949 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
37950 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37951 emit_insn (gen_rtx_SET (func_addr, mark_func));
37952 v = gen_rtvec (2, func_addr, func_desc);
37953 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37954 }
37955 else
37956 emit_move_insn (func_addr, func);
37957 }
37958 else
37959 func_addr = func;
37960
37961 /* Create the call. */
37962 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37963 if (value != NULL_RTX)
37964 call[0] = gen_rtx_SET (value, call[0]);
37965
37966 call[1] = gen_rtx_USE (VOIDmode, cookie);
37967 call[2] = simple_return_rtx;
37968
37969 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
37970 insn = emit_call_insn (insn);
37971 if (abi_reg)
37972 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37973 }
37974
37975 #if TARGET_MACHO
37976
37977 /* Expand code to perform a call under the Darwin ABI.
37978 Modulo handling of mlongcall, this is much the same as sysv.
37979 if/when the longcall optimisation is removed, we could drop this
37980 code and use the sysv case (taking care to avoid the tls stuff).
37981
37982 We can use this for sibcalls too, if needed. */
37983
37984 void
37985 rs6000_call_darwin_1 (rtx value, rtx func_desc, rtx tlsarg,
37986 rtx cookie, bool sibcall)
37987 {
37988 rtx func = func_desc;
37989 rtx func_addr;
37990 rtx call[3];
37991 rtx insn;
37992 int cookie_val = INTVAL (cookie);
37993 bool make_island = false;
37994
37995 /* Handle longcall attributes, there are two cases for Darwin:
37996 1) Newer linkers are capable of synthesising any branch islands needed.
37997 2) We need a helper branch island synthesised by the compiler.
37998 The second case has mostly been retired and we don't use it for m64.
37999 In fact, it's is an optimisation, we could just indirect as sysv does..
38000 ... however, backwards compatibility for now.
38001 If we're going to use this, then we need to keep the CALL_LONG bit set,
38002 so that we can pick up the special insn form later. */
38003 if ((cookie_val & CALL_LONG) != 0
38004 && GET_CODE (func_desc) == SYMBOL_REF)
38005 {
38006 if (darwin_emit_branch_islands && TARGET_32BIT)
38007 make_island = true; /* Do nothing yet, retain the CALL_LONG flag. */
38008 else
38009 {
38010 /* The linker is capable of doing this, but the user explicitly
38011 asked for -mlongcall, so we'll do the 'normal' version. */
38012 func = rs6000_longcall_ref (func_desc, NULL_RTX);
38013 cookie_val &= ~CALL_LONG; /* Handled, zap it. */
38014 }
38015 }
38016
38017 /* Handle indirect calls. */
38018 if (GET_CODE (func) != SYMBOL_REF)
38019 {
38020 func = force_reg (Pmode, func);
38021
38022 /* Indirect calls via CTR are strongly preferred over indirect
38023 calls via LR, and are required for indirect sibcalls, so move
38024 the address there. */
38025 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38026 emit_move_insn (func_addr, func);
38027 }
38028 else
38029 func_addr = func;
38030
38031 /* Create the call. */
38032 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38033 if (value != NULL_RTX)
38034 call[0] = gen_rtx_SET (value, call[0]);
38035
38036 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (cookie_val));
38037
38038 if (sibcall)
38039 call[2] = simple_return_rtx;
38040 else
38041 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38042
38043 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38044 insn = emit_call_insn (insn);
38045 /* Now we have the debug info in the insn, we can set up the branch island
38046 if we're using one. */
38047 if (make_island)
38048 {
38049 tree funname = get_identifier (XSTR (func_desc, 0));
38050
38051 if (no_previous_def (funname))
38052 {
38053 rtx label_rtx = gen_label_rtx ();
38054 char *label_buf, temp_buf[256];
38055 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
38056 CODE_LABEL_NUMBER (label_rtx));
38057 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
38058 tree labelname = get_identifier (label_buf);
38059 add_compiler_branch_island (labelname, funname,
38060 insn_line ((const rtx_insn*)insn));
38061 }
38062 }
38063 }
38064 #endif
38065
38066 void
38067 rs6000_call_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38068 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38069 {
38070 #if TARGET_MACHO
38071 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, false);
38072 #else
38073 gcc_unreachable();
38074 #endif
38075 }
38076
38077
38078 void
38079 rs6000_sibcall_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38080 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38081 {
38082 #if TARGET_MACHO
38083 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, true);
38084 #else
38085 gcc_unreachable();
38086 #endif
38087 }
38088
38089
38090 /* Return whether we need to always update the saved TOC pointer when we update
38091 the stack pointer. */
38092
38093 static bool
38094 rs6000_save_toc_in_prologue_p (void)
38095 {
38096 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38097 }
38098
38099 #ifdef HAVE_GAS_HIDDEN
38100 # define USE_HIDDEN_LINKONCE 1
38101 #else
38102 # define USE_HIDDEN_LINKONCE 0
38103 #endif
38104
38105 /* Fills in the label name that should be used for a 476 link stack thunk. */
38106
38107 void
38108 get_ppc476_thunk_name (char name[32])
38109 {
38110 gcc_assert (TARGET_LINK_STACK);
38111
38112 if (USE_HIDDEN_LINKONCE)
38113 sprintf (name, "__ppc476.get_thunk");
38114 else
38115 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38116 }
38117
38118 /* This function emits the simple thunk routine that is used to preserve
38119 the link stack on the 476 cpu. */
38120
38121 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38122 static void
38123 rs6000_code_end (void)
38124 {
38125 char name[32];
38126 tree decl;
38127
38128 if (!TARGET_LINK_STACK)
38129 return;
38130
38131 get_ppc476_thunk_name (name);
38132
38133 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38134 build_function_type_list (void_type_node, NULL_TREE));
38135 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38136 NULL_TREE, void_type_node);
38137 TREE_PUBLIC (decl) = 1;
38138 TREE_STATIC (decl) = 1;
38139
38140 #if RS6000_WEAK
38141 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38142 {
38143 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38144 targetm.asm_out.unique_section (decl, 0);
38145 switch_to_section (get_named_section (decl, NULL, 0));
38146 DECL_WEAK (decl) = 1;
38147 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38148 targetm.asm_out.globalize_label (asm_out_file, name);
38149 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38150 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38151 }
38152 else
38153 #endif
38154 {
38155 switch_to_section (text_section);
38156 ASM_OUTPUT_LABEL (asm_out_file, name);
38157 }
38158
38159 DECL_INITIAL (decl) = make_node (BLOCK);
38160 current_function_decl = decl;
38161 allocate_struct_function (decl, false);
38162 init_function_start (decl);
38163 first_function_block_is_cold = false;
38164 /* Make sure unwind info is emitted for the thunk if needed. */
38165 final_start_function (emit_barrier (), asm_out_file, 1);
38166
38167 fputs ("\tblr\n", asm_out_file);
38168
38169 final_end_function ();
38170 init_insn_lengths ();
38171 free_after_compilation (cfun);
38172 set_cfun (NULL);
38173 current_function_decl = NULL;
38174 }
38175
38176 /* Add r30 to hard reg set if the prologue sets it up and it is not
38177 pic_offset_table_rtx. */
38178
38179 static void
38180 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38181 {
38182 if (!TARGET_SINGLE_PIC_BASE
38183 && TARGET_TOC
38184 && TARGET_MINIMAL_TOC
38185 && !constant_pool_empty_p ())
38186 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38187 if (cfun->machine->split_stack_argp_used)
38188 add_to_hard_reg_set (&set->set, Pmode, 12);
38189
38190 /* Make sure the hard reg set doesn't include r2, which was possibly added
38191 via PIC_OFFSET_TABLE_REGNUM. */
38192 if (TARGET_TOC)
38193 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38194 }
38195
38196 \f
38197 /* Helper function for rs6000_split_logical to emit a logical instruction after
38198 spliting the operation to single GPR registers.
38199
38200 DEST is the destination register.
38201 OP1 and OP2 are the input source registers.
38202 CODE is the base operation (AND, IOR, XOR, NOT).
38203 MODE is the machine mode.
38204 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38205 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38206 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38207
38208 static void
38209 rs6000_split_logical_inner (rtx dest,
38210 rtx op1,
38211 rtx op2,
38212 enum rtx_code code,
38213 machine_mode mode,
38214 bool complement_final_p,
38215 bool complement_op1_p,
38216 bool complement_op2_p)
38217 {
38218 rtx bool_rtx;
38219
38220 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38221 if (op2 && CONST_INT_P (op2)
38222 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38223 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38224 {
38225 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38226 HOST_WIDE_INT value = INTVAL (op2) & mask;
38227
38228 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38229 if (code == AND)
38230 {
38231 if (value == 0)
38232 {
38233 emit_insn (gen_rtx_SET (dest, const0_rtx));
38234 return;
38235 }
38236
38237 else if (value == mask)
38238 {
38239 if (!rtx_equal_p (dest, op1))
38240 emit_insn (gen_rtx_SET (dest, op1));
38241 return;
38242 }
38243 }
38244
38245 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38246 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38247 else if (code == IOR || code == XOR)
38248 {
38249 if (value == 0)
38250 {
38251 if (!rtx_equal_p (dest, op1))
38252 emit_insn (gen_rtx_SET (dest, op1));
38253 return;
38254 }
38255 }
38256 }
38257
38258 if (code == AND && mode == SImode
38259 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38260 {
38261 emit_insn (gen_andsi3 (dest, op1, op2));
38262 return;
38263 }
38264
38265 if (complement_op1_p)
38266 op1 = gen_rtx_NOT (mode, op1);
38267
38268 if (complement_op2_p)
38269 op2 = gen_rtx_NOT (mode, op2);
38270
38271 /* For canonical RTL, if only one arm is inverted it is the first. */
38272 if (!complement_op1_p && complement_op2_p)
38273 std::swap (op1, op2);
38274
38275 bool_rtx = ((code == NOT)
38276 ? gen_rtx_NOT (mode, op1)
38277 : gen_rtx_fmt_ee (code, mode, op1, op2));
38278
38279 if (complement_final_p)
38280 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38281
38282 emit_insn (gen_rtx_SET (dest, bool_rtx));
38283 }
38284
38285 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38286 operations are split immediately during RTL generation to allow for more
38287 optimizations of the AND/IOR/XOR.
38288
38289 OPERANDS is an array containing the destination and two input operands.
38290 CODE is the base operation (AND, IOR, XOR, NOT).
38291 MODE is the machine mode.
38292 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38293 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38294 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38295 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38296 formation of the AND instructions. */
38297
38298 static void
38299 rs6000_split_logical_di (rtx operands[3],
38300 enum rtx_code code,
38301 bool complement_final_p,
38302 bool complement_op1_p,
38303 bool complement_op2_p)
38304 {
38305 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38306 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38307 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38308 enum hi_lo { hi = 0, lo = 1 };
38309 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38310 size_t i;
38311
38312 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38313 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38314 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38315 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38316
38317 if (code == NOT)
38318 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38319 else
38320 {
38321 if (!CONST_INT_P (operands[2]))
38322 {
38323 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38324 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38325 }
38326 else
38327 {
38328 HOST_WIDE_INT value = INTVAL (operands[2]);
38329 HOST_WIDE_INT value_hi_lo[2];
38330
38331 gcc_assert (!complement_final_p);
38332 gcc_assert (!complement_op1_p);
38333 gcc_assert (!complement_op2_p);
38334
38335 value_hi_lo[hi] = value >> 32;
38336 value_hi_lo[lo] = value & lower_32bits;
38337
38338 for (i = 0; i < 2; i++)
38339 {
38340 HOST_WIDE_INT sub_value = value_hi_lo[i];
38341
38342 if (sub_value & sign_bit)
38343 sub_value |= upper_32bits;
38344
38345 op2_hi_lo[i] = GEN_INT (sub_value);
38346
38347 /* If this is an AND instruction, check to see if we need to load
38348 the value in a register. */
38349 if (code == AND && sub_value != -1 && sub_value != 0
38350 && !and_operand (op2_hi_lo[i], SImode))
38351 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38352 }
38353 }
38354 }
38355
38356 for (i = 0; i < 2; i++)
38357 {
38358 /* Split large IOR/XOR operations. */
38359 if ((code == IOR || code == XOR)
38360 && CONST_INT_P (op2_hi_lo[i])
38361 && !complement_final_p
38362 && !complement_op1_p
38363 && !complement_op2_p
38364 && !logical_const_operand (op2_hi_lo[i], SImode))
38365 {
38366 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38367 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38368 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38369 rtx tmp = gen_reg_rtx (SImode);
38370
38371 /* Make sure the constant is sign extended. */
38372 if ((hi_16bits & sign_bit) != 0)
38373 hi_16bits |= upper_32bits;
38374
38375 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38376 code, SImode, false, false, false);
38377
38378 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38379 code, SImode, false, false, false);
38380 }
38381 else
38382 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38383 code, SImode, complement_final_p,
38384 complement_op1_p, complement_op2_p);
38385 }
38386
38387 return;
38388 }
38389
38390 /* Split the insns that make up boolean operations operating on multiple GPR
38391 registers. The boolean MD patterns ensure that the inputs either are
38392 exactly the same as the output registers, or there is no overlap.
38393
38394 OPERANDS is an array containing the destination and two input operands.
38395 CODE is the base operation (AND, IOR, XOR, NOT).
38396 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38397 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38398 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38399
38400 void
38401 rs6000_split_logical (rtx operands[3],
38402 enum rtx_code code,
38403 bool complement_final_p,
38404 bool complement_op1_p,
38405 bool complement_op2_p)
38406 {
38407 machine_mode mode = GET_MODE (operands[0]);
38408 machine_mode sub_mode;
38409 rtx op0, op1, op2;
38410 int sub_size, regno0, regno1, nregs, i;
38411
38412 /* If this is DImode, use the specialized version that can run before
38413 register allocation. */
38414 if (mode == DImode && !TARGET_POWERPC64)
38415 {
38416 rs6000_split_logical_di (operands, code, complement_final_p,
38417 complement_op1_p, complement_op2_p);
38418 return;
38419 }
38420
38421 op0 = operands[0];
38422 op1 = operands[1];
38423 op2 = (code == NOT) ? NULL_RTX : operands[2];
38424 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38425 sub_size = GET_MODE_SIZE (sub_mode);
38426 regno0 = REGNO (op0);
38427 regno1 = REGNO (op1);
38428
38429 gcc_assert (reload_completed);
38430 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38431 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38432
38433 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38434 gcc_assert (nregs > 1);
38435
38436 if (op2 && REG_P (op2))
38437 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38438
38439 for (i = 0; i < nregs; i++)
38440 {
38441 int offset = i * sub_size;
38442 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38443 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38444 rtx sub_op2 = ((code == NOT)
38445 ? NULL_RTX
38446 : simplify_subreg (sub_mode, op2, mode, offset));
38447
38448 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38449 complement_final_p, complement_op1_p,
38450 complement_op2_p);
38451 }
38452
38453 return;
38454 }
38455
38456 \f
38457 /* Return true if the peephole2 can combine a load involving a combination of
38458 an addis instruction and a load with an offset that can be fused together on
38459 a power8. */
38460
38461 bool
38462 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38463 rtx addis_value, /* addis value. */
38464 rtx target, /* target register that is loaded. */
38465 rtx mem) /* bottom part of the memory addr. */
38466 {
38467 rtx addr;
38468 rtx base_reg;
38469
38470 /* Validate arguments. */
38471 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38472 return false;
38473
38474 if (!base_reg_operand (target, GET_MODE (target)))
38475 return false;
38476
38477 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38478 return false;
38479
38480 /* Allow sign/zero extension. */
38481 if (GET_CODE (mem) == ZERO_EXTEND
38482 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38483 mem = XEXP (mem, 0);
38484
38485 if (!MEM_P (mem))
38486 return false;
38487
38488 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38489 return false;
38490
38491 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38492 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38493 return false;
38494
38495 /* Validate that the register used to load the high value is either the
38496 register being loaded, or we can safely replace its use.
38497
38498 This function is only called from the peephole2 pass and we assume that
38499 there are 2 instructions in the peephole (addis and load), so we want to
38500 check if the target register was not used in the memory address and the
38501 register to hold the addis result is dead after the peephole. */
38502 if (REGNO (addis_reg) != REGNO (target))
38503 {
38504 if (reg_mentioned_p (target, mem))
38505 return false;
38506
38507 if (!peep2_reg_dead_p (2, addis_reg))
38508 return false;
38509
38510 /* If the target register being loaded is the stack pointer, we must
38511 avoid loading any other value into it, even temporarily. */
38512 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38513 return false;
38514 }
38515
38516 base_reg = XEXP (addr, 0);
38517 return REGNO (addis_reg) == REGNO (base_reg);
38518 }
38519
38520 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38521 sequence. We adjust the addis register to use the target register. If the
38522 load sign extends, we adjust the code to do the zero extending load, and an
38523 explicit sign extension later since the fusion only covers zero extending
38524 loads.
38525
38526 The operands are:
38527 operands[0] register set with addis (to be replaced with target)
38528 operands[1] value set via addis
38529 operands[2] target register being loaded
38530 operands[3] D-form memory reference using operands[0]. */
38531
38532 void
38533 expand_fusion_gpr_load (rtx *operands)
38534 {
38535 rtx addis_value = operands[1];
38536 rtx target = operands[2];
38537 rtx orig_mem = operands[3];
38538 rtx new_addr, new_mem, orig_addr, offset;
38539 enum rtx_code plus_or_lo_sum;
38540 machine_mode target_mode = GET_MODE (target);
38541 machine_mode extend_mode = target_mode;
38542 machine_mode ptr_mode = Pmode;
38543 enum rtx_code extend = UNKNOWN;
38544
38545 if (GET_CODE (orig_mem) == ZERO_EXTEND
38546 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38547 {
38548 extend = GET_CODE (orig_mem);
38549 orig_mem = XEXP (orig_mem, 0);
38550 target_mode = GET_MODE (orig_mem);
38551 }
38552
38553 gcc_assert (MEM_P (orig_mem));
38554
38555 orig_addr = XEXP (orig_mem, 0);
38556 plus_or_lo_sum = GET_CODE (orig_addr);
38557 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38558
38559 offset = XEXP (orig_addr, 1);
38560 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38561 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38562
38563 if (extend != UNKNOWN)
38564 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38565
38566 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38567 UNSPEC_FUSION_GPR);
38568 emit_insn (gen_rtx_SET (target, new_mem));
38569
38570 if (extend == SIGN_EXTEND)
38571 {
38572 int sub_off = ((BYTES_BIG_ENDIAN)
38573 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38574 : 0);
38575 rtx sign_reg
38576 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38577
38578 emit_insn (gen_rtx_SET (target,
38579 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38580 }
38581
38582 return;
38583 }
38584
38585 /* Emit the addis instruction that will be part of a fused instruction
38586 sequence. */
38587
38588 void
38589 emit_fusion_addis (rtx target, rtx addis_value)
38590 {
38591 rtx fuse_ops[10];
38592 const char *addis_str = NULL;
38593
38594 /* Emit the addis instruction. */
38595 fuse_ops[0] = target;
38596 if (satisfies_constraint_L (addis_value))
38597 {
38598 fuse_ops[1] = addis_value;
38599 addis_str = "lis %0,%v1";
38600 }
38601
38602 else if (GET_CODE (addis_value) == PLUS)
38603 {
38604 rtx op0 = XEXP (addis_value, 0);
38605 rtx op1 = XEXP (addis_value, 1);
38606
38607 if (REG_P (op0) && CONST_INT_P (op1)
38608 && satisfies_constraint_L (op1))
38609 {
38610 fuse_ops[1] = op0;
38611 fuse_ops[2] = op1;
38612 addis_str = "addis %0,%1,%v2";
38613 }
38614 }
38615
38616 else if (GET_CODE (addis_value) == HIGH)
38617 {
38618 rtx value = XEXP (addis_value, 0);
38619 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38620 {
38621 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38622 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38623 if (TARGET_ELF)
38624 addis_str = "addis %0,%2,%1@toc@ha";
38625
38626 else if (TARGET_XCOFF)
38627 addis_str = "addis %0,%1@u(%2)";
38628
38629 else
38630 gcc_unreachable ();
38631 }
38632
38633 else if (GET_CODE (value) == PLUS)
38634 {
38635 rtx op0 = XEXP (value, 0);
38636 rtx op1 = XEXP (value, 1);
38637
38638 if (GET_CODE (op0) == UNSPEC
38639 && XINT (op0, 1) == UNSPEC_TOCREL
38640 && CONST_INT_P (op1))
38641 {
38642 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38643 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38644 fuse_ops[3] = op1;
38645 if (TARGET_ELF)
38646 addis_str = "addis %0,%2,%1+%3@toc@ha";
38647
38648 else if (TARGET_XCOFF)
38649 addis_str = "addis %0,%1+%3@u(%2)";
38650
38651 else
38652 gcc_unreachable ();
38653 }
38654 }
38655
38656 else if (satisfies_constraint_L (value))
38657 {
38658 fuse_ops[1] = value;
38659 addis_str = "lis %0,%v1";
38660 }
38661
38662 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38663 {
38664 fuse_ops[1] = value;
38665 addis_str = "lis %0,%1@ha";
38666 }
38667 }
38668
38669 if (!addis_str)
38670 fatal_insn ("Could not generate addis value for fusion", addis_value);
38671
38672 output_asm_insn (addis_str, fuse_ops);
38673 }
38674
38675 /* Emit a D-form load or store instruction that is the second instruction
38676 of a fusion sequence. */
38677
38678 static void
38679 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
38680 {
38681 rtx fuse_ops[10];
38682 char insn_template[80];
38683
38684 fuse_ops[0] = load_reg;
38685 fuse_ops[1] = addis_reg;
38686
38687 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38688 {
38689 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38690 fuse_ops[2] = offset;
38691 output_asm_insn (insn_template, fuse_ops);
38692 }
38693
38694 else if (GET_CODE (offset) == UNSPEC
38695 && XINT (offset, 1) == UNSPEC_TOCREL)
38696 {
38697 if (TARGET_ELF)
38698 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38699
38700 else if (TARGET_XCOFF)
38701 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38702
38703 else
38704 gcc_unreachable ();
38705
38706 fuse_ops[2] = XVECEXP (offset, 0, 0);
38707 output_asm_insn (insn_template, fuse_ops);
38708 }
38709
38710 else if (GET_CODE (offset) == PLUS
38711 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38712 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38713 && CONST_INT_P (XEXP (offset, 1)))
38714 {
38715 rtx tocrel_unspec = XEXP (offset, 0);
38716 if (TARGET_ELF)
38717 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38718
38719 else if (TARGET_XCOFF)
38720 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38721
38722 else
38723 gcc_unreachable ();
38724
38725 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38726 fuse_ops[3] = XEXP (offset, 1);
38727 output_asm_insn (insn_template, fuse_ops);
38728 }
38729
38730 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38731 {
38732 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38733
38734 fuse_ops[2] = offset;
38735 output_asm_insn (insn_template, fuse_ops);
38736 }
38737
38738 else
38739 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38740
38741 return;
38742 }
38743
38744 /* Given an address, convert it into the addis and load offset parts. Addresses
38745 created during the peephole2 process look like:
38746 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38747 (unspec [(...)] UNSPEC_TOCREL)) */
38748
38749 static void
38750 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38751 {
38752 rtx hi, lo;
38753
38754 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38755 {
38756 hi = XEXP (addr, 0);
38757 lo = XEXP (addr, 1);
38758 }
38759 else
38760 gcc_unreachable ();
38761
38762 *p_hi = hi;
38763 *p_lo = lo;
38764 }
38765
38766 /* Return a string to fuse an addis instruction with a gpr load to the same
38767 register that we loaded up the addis instruction. The address that is used
38768 is the logical address that was formed during peephole2:
38769 (lo_sum (high) (low-part))
38770
38771 The code is complicated, so we call output_asm_insn directly, and just
38772 return "". */
38773
38774 const char *
38775 emit_fusion_gpr_load (rtx target, rtx mem)
38776 {
38777 rtx addis_value;
38778 rtx addr;
38779 rtx load_offset;
38780 const char *load_str = NULL;
38781 machine_mode mode;
38782
38783 if (GET_CODE (mem) == ZERO_EXTEND)
38784 mem = XEXP (mem, 0);
38785
38786 gcc_assert (REG_P (target) && MEM_P (mem));
38787
38788 addr = XEXP (mem, 0);
38789 fusion_split_address (addr, &addis_value, &load_offset);
38790
38791 /* Now emit the load instruction to the same register. */
38792 mode = GET_MODE (mem);
38793 switch (mode)
38794 {
38795 case E_QImode:
38796 load_str = "lbz";
38797 break;
38798
38799 case E_HImode:
38800 load_str = "lhz";
38801 break;
38802
38803 case E_SImode:
38804 case E_SFmode:
38805 load_str = "lwz";
38806 break;
38807
38808 case E_DImode:
38809 case E_DFmode:
38810 gcc_assert (TARGET_POWERPC64);
38811 load_str = "ld";
38812 break;
38813
38814 default:
38815 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38816 }
38817
38818 /* Emit the addis instruction. */
38819 emit_fusion_addis (target, addis_value);
38820
38821 /* Emit the D-form load instruction. */
38822 emit_fusion_load (target, target, load_offset, load_str);
38823
38824 return "";
38825 }
38826 \f
38827
38828 #ifdef RS6000_GLIBC_ATOMIC_FENV
38829 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38830 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38831 #endif
38832
38833 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38834
38835 static void
38836 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38837 {
38838 if (!TARGET_HARD_FLOAT)
38839 {
38840 #ifdef RS6000_GLIBC_ATOMIC_FENV
38841 if (atomic_hold_decl == NULL_TREE)
38842 {
38843 atomic_hold_decl
38844 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38845 get_identifier ("__atomic_feholdexcept"),
38846 build_function_type_list (void_type_node,
38847 double_ptr_type_node,
38848 NULL_TREE));
38849 TREE_PUBLIC (atomic_hold_decl) = 1;
38850 DECL_EXTERNAL (atomic_hold_decl) = 1;
38851 }
38852
38853 if (atomic_clear_decl == NULL_TREE)
38854 {
38855 atomic_clear_decl
38856 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38857 get_identifier ("__atomic_feclearexcept"),
38858 build_function_type_list (void_type_node,
38859 NULL_TREE));
38860 TREE_PUBLIC (atomic_clear_decl) = 1;
38861 DECL_EXTERNAL (atomic_clear_decl) = 1;
38862 }
38863
38864 tree const_double = build_qualified_type (double_type_node,
38865 TYPE_QUAL_CONST);
38866 tree const_double_ptr = build_pointer_type (const_double);
38867 if (atomic_update_decl == NULL_TREE)
38868 {
38869 atomic_update_decl
38870 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38871 get_identifier ("__atomic_feupdateenv"),
38872 build_function_type_list (void_type_node,
38873 const_double_ptr,
38874 NULL_TREE));
38875 TREE_PUBLIC (atomic_update_decl) = 1;
38876 DECL_EXTERNAL (atomic_update_decl) = 1;
38877 }
38878
38879 tree fenv_var = create_tmp_var_raw (double_type_node);
38880 TREE_ADDRESSABLE (fenv_var) = 1;
38881 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38882
38883 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38884 *clear = build_call_expr (atomic_clear_decl, 0);
38885 *update = build_call_expr (atomic_update_decl, 1,
38886 fold_convert (const_double_ptr, fenv_addr));
38887 #endif
38888 return;
38889 }
38890
38891 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38892 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38893 tree call_mffs = build_call_expr (mffs, 0);
38894
38895 /* Generates the equivalent of feholdexcept (&fenv_var)
38896
38897 *fenv_var = __builtin_mffs ();
38898 double fenv_hold;
38899 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38900 __builtin_mtfsf (0xff, fenv_hold); */
38901
38902 /* Mask to clear everything except for the rounding modes and non-IEEE
38903 arithmetic flag. */
38904 const unsigned HOST_WIDE_INT hold_exception_mask =
38905 HOST_WIDE_INT_C (0xffffffff00000007);
38906
38907 tree fenv_var = create_tmp_var_raw (double_type_node);
38908
38909 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38910
38911 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38912 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38913 build_int_cst (uint64_type_node,
38914 hold_exception_mask));
38915
38916 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38917 fenv_llu_and);
38918
38919 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38920 build_int_cst (unsigned_type_node, 0xff),
38921 fenv_hold_mtfsf);
38922
38923 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38924
38925 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38926
38927 double fenv_clear = __builtin_mffs ();
38928 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38929 __builtin_mtfsf (0xff, fenv_clear); */
38930
38931 /* Mask to clear everything except for the rounding modes and non-IEEE
38932 arithmetic flag. */
38933 const unsigned HOST_WIDE_INT clear_exception_mask =
38934 HOST_WIDE_INT_C (0xffffffff00000000);
38935
38936 tree fenv_clear = create_tmp_var_raw (double_type_node);
38937
38938 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38939
38940 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38941 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38942 fenv_clean_llu,
38943 build_int_cst (uint64_type_node,
38944 clear_exception_mask));
38945
38946 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38947 fenv_clear_llu_and);
38948
38949 tree clear_mtfsf = build_call_expr (mtfsf, 2,
38950 build_int_cst (unsigned_type_node, 0xff),
38951 fenv_clear_mtfsf);
38952
38953 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
38954
38955 /* Generates the equivalent of feupdateenv (&fenv_var)
38956
38957 double old_fenv = __builtin_mffs ();
38958 double fenv_update;
38959 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38960 (*(uint64_t*)fenv_var 0x1ff80fff);
38961 __builtin_mtfsf (0xff, fenv_update); */
38962
38963 const unsigned HOST_WIDE_INT update_exception_mask =
38964 HOST_WIDE_INT_C (0xffffffff1fffff00);
38965 const unsigned HOST_WIDE_INT new_exception_mask =
38966 HOST_WIDE_INT_C (0x1ff80fff);
38967
38968 tree old_fenv = create_tmp_var_raw (double_type_node);
38969 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
38970
38971 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
38972 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
38973 build_int_cst (uint64_type_node,
38974 update_exception_mask));
38975
38976 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38977 build_int_cst (uint64_type_node,
38978 new_exception_mask));
38979
38980 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
38981 old_llu_and, new_llu_and);
38982
38983 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38984 new_llu_mask);
38985
38986 tree update_mtfsf = build_call_expr (mtfsf, 2,
38987 build_int_cst (unsigned_type_node, 0xff),
38988 fenv_update_mtfsf);
38989
38990 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
38991 }
38992
38993 void
38994 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
38995 {
38996 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
38997
38998 rtx_tmp0 = gen_reg_rtx (V2DFmode);
38999 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39000
39001 /* The destination of the vmrgew instruction layout is:
39002 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39003 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39004 vmrgew instruction will be correct. */
39005 if (BYTES_BIG_ENDIAN)
39006 {
39007 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
39008 GEN_INT (0)));
39009 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
39010 GEN_INT (3)));
39011 }
39012 else
39013 {
39014 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
39015 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
39016 }
39017
39018 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39019 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39020
39021 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
39022 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
39023
39024 if (BYTES_BIG_ENDIAN)
39025 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39026 else
39027 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39028 }
39029
39030 void
39031 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39032 {
39033 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39034
39035 rtx_tmp0 = gen_reg_rtx (V2DImode);
39036 rtx_tmp1 = gen_reg_rtx (V2DImode);
39037
39038 /* The destination of the vmrgew instruction layout is:
39039 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39040 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39041 vmrgew instruction will be correct. */
39042 if (BYTES_BIG_ENDIAN)
39043 {
39044 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39045 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39046 }
39047 else
39048 {
39049 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39050 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39051 }
39052
39053 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39054 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39055
39056 if (signed_convert)
39057 {
39058 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39059 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39060 }
39061 else
39062 {
39063 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39064 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39065 }
39066
39067 if (BYTES_BIG_ENDIAN)
39068 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39069 else
39070 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39071 }
39072
39073 void
39074 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39075 rtx src2)
39076 {
39077 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39078
39079 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39080 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39081
39082 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39083 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39084
39085 rtx_tmp2 = gen_reg_rtx (V4SImode);
39086 rtx_tmp3 = gen_reg_rtx (V4SImode);
39087
39088 if (signed_convert)
39089 {
39090 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39091 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39092 }
39093 else
39094 {
39095 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39096 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39097 }
39098
39099 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39100 }
39101
39102 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39103
39104 static bool
39105 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39106 optimization_type opt_type)
39107 {
39108 switch (op)
39109 {
39110 case rsqrt_optab:
39111 return (opt_type == OPTIMIZE_FOR_SPEED
39112 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39113
39114 default:
39115 return true;
39116 }
39117 }
39118
39119 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39120
39121 static HOST_WIDE_INT
39122 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39123 {
39124 if (TREE_CODE (exp) == STRING_CST
39125 && (STRICT_ALIGNMENT || !optimize_size))
39126 return MAX (align, BITS_PER_WORD);
39127 return align;
39128 }
39129
39130 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39131
39132 static HOST_WIDE_INT
39133 rs6000_starting_frame_offset (void)
39134 {
39135 if (FRAME_GROWS_DOWNWARD)
39136 return 0;
39137 return RS6000_STARTING_FRAME_OFFSET;
39138 }
39139 \f
39140
39141 /* Create an alias for a mangled name where we have changed the mangling (in
39142 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
39143 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
39144
39145 #if TARGET_ELF && RS6000_WEAK
39146 static void
39147 rs6000_globalize_decl_name (FILE * stream, tree decl)
39148 {
39149 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
39150
39151 targetm.asm_out.globalize_label (stream, name);
39152
39153 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
39154 {
39155 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
39156 const char *old_name;
39157
39158 ieee128_mangling_gcc_8_1 = true;
39159 lang_hooks.set_decl_assembler_name (decl);
39160 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
39161 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
39162 ieee128_mangling_gcc_8_1 = false;
39163
39164 if (strcmp (name, old_name) != 0)
39165 {
39166 fprintf (stream, "\t.weak %s\n", old_name);
39167 fprintf (stream, "\t.set %s,%s\n", old_name, name);
39168 }
39169 }
39170 }
39171 #endif
39172
39173 \f
39174 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
39175 function names from <foo>l to <foo>f128 if the default long double type is
39176 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
39177 include file switches the names on systems that support long double as IEEE
39178 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
39179 In the future, glibc will export names like __ieee128_sinf128 and we can
39180 switch to using those instead of using sinf128, which pollutes the user's
39181 namespace.
39182
39183 This will switch the names for Fortran math functions as well (which doesn't
39184 use math.h). However, Fortran needs other changes to the compiler and
39185 library before you can switch the real*16 type at compile time.
39186
39187 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
39188 only do this if the default is that long double is IBM extended double, and
39189 the user asked for IEEE 128-bit. */
39190
39191 static tree
39192 rs6000_mangle_decl_assembler_name (tree decl, tree id)
39193 {
39194 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
39195 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
39196 {
39197 size_t len = IDENTIFIER_LENGTH (id);
39198 const char *name = IDENTIFIER_POINTER (id);
39199
39200 if (name[len - 1] == 'l')
39201 {
39202 bool uses_ieee128_p = false;
39203 tree type = TREE_TYPE (decl);
39204 machine_mode ret_mode = TYPE_MODE (type);
39205
39206 /* See if the function returns a IEEE 128-bit floating point type or
39207 complex type. */
39208 if (ret_mode == TFmode || ret_mode == TCmode)
39209 uses_ieee128_p = true;
39210 else
39211 {
39212 function_args_iterator args_iter;
39213 tree arg;
39214
39215 /* See if the function passes a IEEE 128-bit floating point type
39216 or complex type. */
39217 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
39218 {
39219 machine_mode arg_mode = TYPE_MODE (arg);
39220 if (arg_mode == TFmode || arg_mode == TCmode)
39221 {
39222 uses_ieee128_p = true;
39223 break;
39224 }
39225 }
39226 }
39227
39228 /* If we passed or returned an IEEE 128-bit floating point type,
39229 change the name. */
39230 if (uses_ieee128_p)
39231 {
39232 char *name2 = (char *) alloca (len + 4);
39233 memcpy (name2, name, len - 1);
39234 strcpy (name2 + len - 1, "f128");
39235 id = get_identifier (name2);
39236 }
39237 }
39238 }
39239
39240 return id;
39241 }
39242
39243 \f
39244 struct gcc_target targetm = TARGET_INITIALIZER;
39245
39246 #include "gt-rs6000.h"