rs6000: Delete the "wy" constraint
[gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2019 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #define IN_TARGET_CODE 1
22
23 #include "config.h"
24 #include "system.h"
25 #include "coretypes.h"
26 #include "backend.h"
27 #include "rtl.h"
28 #include "tree.h"
29 #include "memmodel.h"
30 #include "gimple.h"
31 #include "cfghooks.h"
32 #include "cfgloop.h"
33 #include "df.h"
34 #include "tm_p.h"
35 #include "stringpool.h"
36 #include "expmed.h"
37 #include "optabs.h"
38 #include "regs.h"
39 #include "ira.h"
40 #include "recog.h"
41 #include "cgraph.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
44 #include "flags.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "attribs.h"
48 #include "stor-layout.h"
49 #include "calls.h"
50 #include "print-tree.h"
51 #include "varasm.h"
52 #include "explow.h"
53 #include "expr.h"
54 #include "output.h"
55 #include "dbxout.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
58 #include "reload.h"
59 #include "sched-int.h"
60 #include "gimplify.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
65 #include "intl.h"
66 #include "params.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
70 #include "builtins.h"
71 #include "tree-vector-builder.h"
72 #include "context.h"
73 #include "tree-pass.h"
74 #include "except.h"
75 #if TARGET_XCOFF
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
77 #endif
78 #if TARGET_MACHO
79 #include "gstab.h" /* for N_SLINE */
80 #endif
81 #include "case-cfn-macros.h"
82 #include "ppc-auxv.h"
83 #include "tree-ssa-propagate.h"
84 #include "tree-vrp.h"
85 #include "tree-ssanames.h"
86
87 /* This file should be included last. */
88 #include "target-def.h"
89
90 #ifndef TARGET_NO_PROTOTYPE
91 #define TARGET_NO_PROTOTYPE 0
92 #endif
93
94 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
95 systems will also set long double to be IEEE 128-bit. AIX and Darwin
96 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
97 those systems will not pick up this default. This needs to be after all
98 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
99 properly defined. */
100 #ifndef TARGET_IEEEQUAD_DEFAULT
101 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
102 #define TARGET_IEEEQUAD_DEFAULT 1
103 #else
104 #define TARGET_IEEEQUAD_DEFAULT 0
105 #endif
106 #endif
107
108 static pad_direction rs6000_function_arg_padding (machine_mode, const_tree);
109
110 /* Structure used to define the rs6000 stack */
111 typedef struct rs6000_stack {
112 int reload_completed; /* stack info won't change from here on */
113 int first_gp_reg_save; /* first callee saved GP register used */
114 int first_fp_reg_save; /* first callee saved FP register used */
115 int first_altivec_reg_save; /* first callee saved AltiVec register used */
116 int lr_save_p; /* true if the link reg needs to be saved */
117 int cr_save_p; /* true if the CR reg needs to be saved */
118 unsigned int vrsave_mask; /* mask of vec registers to save */
119 int push_p; /* true if we need to allocate stack space */
120 int calls_p; /* true if the function makes any calls */
121 int world_save_p; /* true if we're saving *everything*:
122 r13-r31, cr, f14-f31, vrsave, v20-v31 */
123 enum rs6000_abi abi; /* which ABI to use */
124 int gp_save_offset; /* offset to save GP regs from initial SP */
125 int fp_save_offset; /* offset to save FP regs from initial SP */
126 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
127 int lr_save_offset; /* offset to save LR from initial SP */
128 int cr_save_offset; /* offset to save CR from initial SP */
129 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
130 int varargs_save_offset; /* offset to save the varargs registers */
131 int ehrd_offset; /* offset to EH return data */
132 int ehcr_offset; /* offset to EH CR field data */
133 int reg_size; /* register size (4 or 8) */
134 HOST_WIDE_INT vars_size; /* variable save area size */
135 int parm_size; /* outgoing parameter size */
136 int save_size; /* save area size */
137 int fixed_size; /* fixed size of stack frame */
138 int gp_size; /* size of saved GP registers */
139 int fp_size; /* size of saved FP registers */
140 int altivec_size; /* size of saved AltiVec registers */
141 int cr_size; /* size to hold CR if not in fixed area */
142 int vrsave_size; /* size to hold VRSAVE */
143 int altivec_padding_size; /* size of altivec alignment padding */
144 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
145 int savres_strategy;
146 } rs6000_stack_t;
147
148 /* A C structure for machine-specific, per-function data.
149 This is added to the cfun structure. */
150 typedef struct GTY(()) machine_function
151 {
152 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
153 int ra_needs_full_frame;
154 /* Flags if __builtin_return_address (0) was used. */
155 int ra_need_lr;
156 /* Cache lr_save_p after expansion of builtin_eh_return. */
157 int lr_save_state;
158 /* Whether we need to save the TOC to the reserved stack location in the
159 function prologue. */
160 bool save_toc_in_prologue;
161 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
162 varargs save area. */
163 HOST_WIDE_INT varargs_save_offset;
164 /* Alternative internal arg pointer for -fsplit-stack. */
165 rtx split_stack_arg_pointer;
166 bool split_stack_argp_used;
167 /* Flag if r2 setup is needed with ELFv2 ABI. */
168 bool r2_setup_needed;
169 /* The number of components we use for separate shrink-wrapping. */
170 int n_components;
171 /* The components already handled by separate shrink-wrapping, which should
172 not be considered by the prologue and epilogue. */
173 bool gpr_is_wrapped_separately[32];
174 bool fpr_is_wrapped_separately[32];
175 bool lr_is_wrapped_separately;
176 bool toc_is_wrapped_separately;
177 } machine_function;
178
179 /* Support targetm.vectorize.builtin_mask_for_load. */
180 static GTY(()) tree altivec_builtin_mask_for_load;
181
182 /* Set to nonzero once AIX common-mode calls have been defined. */
183 static GTY(()) int common_mode_defined;
184
185 /* Label number of label created for -mrelocatable, to call to so we can
186 get the address of the GOT section */
187 static int rs6000_pic_labelno;
188
189 #ifdef USING_ELFOS_H
190 /* Counter for labels which are to be placed in .fixup. */
191 int fixuplabelno = 0;
192 #endif
193
194 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
195 int dot_symbols;
196
197 /* Specify the machine mode that pointers have. After generation of rtl, the
198 compiler makes no further distinction between pointers and any other objects
199 of this machine mode. */
200 scalar_int_mode rs6000_pmode;
201
202 #if TARGET_ELF
203 /* Note whether IEEE 128-bit floating point was passed or returned, either as
204 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
205 floating point. We changed the default C++ mangling for these types and we
206 may want to generate a weak alias of the old mangling (U10__float128) to the
207 new mangling (u9__ieee128). */
208 static bool rs6000_passes_ieee128;
209 #endif
210
211 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
212 name used in current releases (i.e. u9__ieee128). */
213 static bool ieee128_mangling_gcc_8_1;
214
215 /* Width in bits of a pointer. */
216 unsigned rs6000_pointer_size;
217
218 #ifdef HAVE_AS_GNU_ATTRIBUTE
219 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
220 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
221 # endif
222 /* Flag whether floating point values have been passed/returned.
223 Note that this doesn't say whether fprs are used, since the
224 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
225 should be set for soft-float values passed in gprs and ieee128
226 values passed in vsx registers. */
227 static bool rs6000_passes_float;
228 static bool rs6000_passes_long_double;
229 /* Flag whether vector values have been passed/returned. */
230 static bool rs6000_passes_vector;
231 /* Flag whether small (<= 8 byte) structures have been returned. */
232 static bool rs6000_returns_struct;
233 #endif
234
235 /* Value is TRUE if register/mode pair is acceptable. */
236 static bool rs6000_hard_regno_mode_ok_p
237 [NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
238
239 /* Maximum number of registers needed for a given register class and mode. */
240 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
241
242 /* How many registers are needed for a given register and mode. */
243 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
244
245 /* Map register number to register class. */
246 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
247
248 static int dbg_cost_ctrl;
249
250 /* Built in types. */
251 tree rs6000_builtin_types[RS6000_BTI_MAX];
252 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
253
254 /* Flag to say the TOC is initialized */
255 int toc_initialized, need_toc_init;
256 char toc_label_name[10];
257
258 /* Cached value of rs6000_variable_issue. This is cached in
259 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
260 static short cached_can_issue_more;
261
262 static GTY(()) section *read_only_data_section;
263 static GTY(()) section *private_data_section;
264 static GTY(()) section *tls_data_section;
265 static GTY(()) section *tls_private_data_section;
266 static GTY(()) section *read_only_private_data_section;
267 static GTY(()) section *sdata2_section;
268 static GTY(()) section *toc_section;
269
270 struct builtin_description
271 {
272 const HOST_WIDE_INT mask;
273 const enum insn_code icode;
274 const char *const name;
275 const enum rs6000_builtins code;
276 };
277
278 /* Describe the vector unit used for modes. */
279 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
280 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
281
282 /* Register classes for various constraints that are based on the target
283 switches. */
284 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
285
286 /* Describe the alignment of a vector. */
287 int rs6000_vector_align[NUM_MACHINE_MODES];
288
289 /* Map selected modes to types for builtins. */
290 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
291
292 /* What modes to automatically generate reciprocal divide estimate (fre) and
293 reciprocal sqrt (frsqrte) for. */
294 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
295
296 /* Masks to determine which reciprocal esitmate instructions to generate
297 automatically. */
298 enum rs6000_recip_mask {
299 RECIP_SF_DIV = 0x001, /* Use divide estimate */
300 RECIP_DF_DIV = 0x002,
301 RECIP_V4SF_DIV = 0x004,
302 RECIP_V2DF_DIV = 0x008,
303
304 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
305 RECIP_DF_RSQRT = 0x020,
306 RECIP_V4SF_RSQRT = 0x040,
307 RECIP_V2DF_RSQRT = 0x080,
308
309 /* Various combination of flags for -mrecip=xxx. */
310 RECIP_NONE = 0,
311 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
312 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
313 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
314
315 RECIP_HIGH_PRECISION = RECIP_ALL,
316
317 /* On low precision machines like the power5, don't enable double precision
318 reciprocal square root estimate, since it isn't accurate enough. */
319 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
320 };
321
322 /* -mrecip options. */
323 static struct
324 {
325 const char *string; /* option name */
326 unsigned int mask; /* mask bits to set */
327 } recip_options[] = {
328 { "all", RECIP_ALL },
329 { "none", RECIP_NONE },
330 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
331 | RECIP_V2DF_DIV) },
332 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
333 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
334 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
335 | RECIP_V2DF_RSQRT) },
336 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
337 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
338 };
339
340 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
341 static const struct
342 {
343 const char *cpu;
344 unsigned int cpuid;
345 } cpu_is_info[] = {
346 { "power9", PPC_PLATFORM_POWER9 },
347 { "power8", PPC_PLATFORM_POWER8 },
348 { "power7", PPC_PLATFORM_POWER7 },
349 { "power6x", PPC_PLATFORM_POWER6X },
350 { "power6", PPC_PLATFORM_POWER6 },
351 { "power5+", PPC_PLATFORM_POWER5_PLUS },
352 { "power5", PPC_PLATFORM_POWER5 },
353 { "ppc970", PPC_PLATFORM_PPC970 },
354 { "power4", PPC_PLATFORM_POWER4 },
355 { "ppca2", PPC_PLATFORM_PPCA2 },
356 { "ppc476", PPC_PLATFORM_PPC476 },
357 { "ppc464", PPC_PLATFORM_PPC464 },
358 { "ppc440", PPC_PLATFORM_PPC440 },
359 { "ppc405", PPC_PLATFORM_PPC405 },
360 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
361 };
362
363 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
364 static const struct
365 {
366 const char *hwcap;
367 int mask;
368 unsigned int id;
369 } cpu_supports_info[] = {
370 /* AT_HWCAP masks. */
371 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
372 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
373 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
374 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
375 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
376 { "booke", PPC_FEATURE_BOOKE, 0 },
377 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
378 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
379 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
380 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
381 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
382 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
383 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
384 { "notb", PPC_FEATURE_NO_TB, 0 },
385 { "pa6t", PPC_FEATURE_PA6T, 0 },
386 { "power4", PPC_FEATURE_POWER4, 0 },
387 { "power5", PPC_FEATURE_POWER5, 0 },
388 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
389 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
390 { "ppc32", PPC_FEATURE_32, 0 },
391 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
392 { "ppc64", PPC_FEATURE_64, 0 },
393 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
394 { "smt", PPC_FEATURE_SMT, 0 },
395 { "spe", PPC_FEATURE_HAS_SPE, 0 },
396 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
397 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
398 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
399
400 /* AT_HWCAP2 masks. */
401 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
402 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
403 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
404 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
405 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
406 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND, 1 },
407 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
408 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
409 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
410 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
411 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 },
412 { "darn", PPC_FEATURE2_DARN, 1 },
413 { "scv", PPC_FEATURE2_SCV, 1 }
414 };
415
416 /* On PowerPC, we have a limited number of target clones that we care about
417 which means we can use an array to hold the options, rather than having more
418 elaborate data structures to identify each possible variation. Order the
419 clones from the default to the highest ISA. */
420 enum {
421 CLONE_DEFAULT = 0, /* default clone. */
422 CLONE_ISA_2_05, /* ISA 2.05 (power6). */
423 CLONE_ISA_2_06, /* ISA 2.06 (power7). */
424 CLONE_ISA_2_07, /* ISA 2.07 (power8). */
425 CLONE_ISA_3_00, /* ISA 3.00 (power9). */
426 CLONE_MAX
427 };
428
429 /* Map compiler ISA bits into HWCAP names. */
430 struct clone_map {
431 HOST_WIDE_INT isa_mask; /* rs6000_isa mask */
432 const char *name; /* name to use in __builtin_cpu_supports. */
433 };
434
435 static const struct clone_map rs6000_clone_map[CLONE_MAX] = {
436 { 0, "" }, /* Default options. */
437 { OPTION_MASK_CMPB, "arch_2_05" }, /* ISA 2.05 (power6). */
438 { OPTION_MASK_POPCNTD, "arch_2_06" }, /* ISA 2.06 (power7). */
439 { OPTION_MASK_P8_VECTOR, "arch_2_07" }, /* ISA 2.07 (power8). */
440 { OPTION_MASK_P9_VECTOR, "arch_3_00" }, /* ISA 3.00 (power9). */
441 };
442
443
444 /* Newer LIBCs explicitly export this symbol to declare that they provide
445 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
446 reference to this symbol whenever we expand a CPU builtin, so that
447 we never link against an old LIBC. */
448 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
449
450 /* True if we have expanded a CPU builtin. */
451 bool cpu_builtin_p;
452
453 /* Pointer to function (in rs6000-c.c) that can define or undefine target
454 macros that have changed. Languages that don't support the preprocessor
455 don't link in rs6000-c.c, so we can't call it directly. */
456 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
457
458 /* Simplfy register classes into simpler classifications. We assume
459 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
460 check for standard register classes (gpr/floating/altivec/vsx) and
461 floating/vector classes (float/altivec/vsx). */
462
463 enum rs6000_reg_type {
464 NO_REG_TYPE,
465 PSEUDO_REG_TYPE,
466 GPR_REG_TYPE,
467 VSX_REG_TYPE,
468 ALTIVEC_REG_TYPE,
469 FPR_REG_TYPE,
470 SPR_REG_TYPE,
471 CR_REG_TYPE
472 };
473
474 /* Map register class to register type. */
475 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
476
477 /* First/last register type for the 'normal' register types (i.e. general
478 purpose, floating point, altivec, and VSX registers). */
479 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
480
481 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
482
483
484 /* Register classes we care about in secondary reload or go if legitimate
485 address. We only need to worry about GPR, FPR, and Altivec registers here,
486 along an ANY field that is the OR of the 3 register classes. */
487
488 enum rs6000_reload_reg_type {
489 RELOAD_REG_GPR, /* General purpose registers. */
490 RELOAD_REG_FPR, /* Traditional floating point regs. */
491 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
492 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
493 N_RELOAD_REG
494 };
495
496 /* For setting up register classes, loop through the 3 register classes mapping
497 into real registers, and skip the ANY class, which is just an OR of the
498 bits. */
499 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
500 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
501
502 /* Map reload register type to a register in the register class. */
503 struct reload_reg_map_type {
504 const char *name; /* Register class name. */
505 int reg; /* Register in the register class. */
506 };
507
508 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
509 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
510 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
511 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
512 { "Any", -1 }, /* RELOAD_REG_ANY. */
513 };
514
515 /* Mask bits for each register class, indexed per mode. Historically the
516 compiler has been more restrictive which types can do PRE_MODIFY instead of
517 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
518 typedef unsigned char addr_mask_type;
519
520 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
521 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
522 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
523 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
524 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
525 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
526 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
527 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
528
529 /* Register type masks based on the type, of valid addressing modes. */
530 struct rs6000_reg_addr {
531 enum insn_code reload_load; /* INSN to reload for loading. */
532 enum insn_code reload_store; /* INSN to reload for storing. */
533 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
534 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
535 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
536 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
537 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
538 };
539
540 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
541
542 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
543 static inline bool
544 mode_supports_pre_incdec_p (machine_mode mode)
545 {
546 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
547 != 0);
548 }
549
550 /* Helper function to say whether a mode supports PRE_MODIFY. */
551 static inline bool
552 mode_supports_pre_modify_p (machine_mode mode)
553 {
554 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
555 != 0);
556 }
557
558 /* Return true if we have D-form addressing in altivec registers. */
559 static inline bool
560 mode_supports_vmx_dform (machine_mode mode)
561 {
562 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
563 }
564
565 /* Return true if we have D-form addressing in VSX registers. This addressing
566 is more limited than normal d-form addressing in that the offset must be
567 aligned on a 16-byte boundary. */
568 static inline bool
569 mode_supports_dq_form (machine_mode mode)
570 {
571 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
572 != 0);
573 }
574
575 /* Given that there exists at least one variable that is set (produced)
576 by OUT_INSN and read (consumed) by IN_INSN, return true iff
577 IN_INSN represents one or more memory store operations and none of
578 the variables set by OUT_INSN is used by IN_INSN as the address of a
579 store operation. If either IN_INSN or OUT_INSN does not represent
580 a "single" RTL SET expression (as loosely defined by the
581 implementation of the single_set function) or a PARALLEL with only
582 SETs, CLOBBERs, and USEs inside, this function returns false.
583
584 This rs6000-specific version of store_data_bypass_p checks for
585 certain conditions that result in assertion failures (and internal
586 compiler errors) in the generic store_data_bypass_p function and
587 returns false rather than calling store_data_bypass_p if one of the
588 problematic conditions is detected. */
589
590 int
591 rs6000_store_data_bypass_p (rtx_insn *out_insn, rtx_insn *in_insn)
592 {
593 rtx out_set, in_set;
594 rtx out_pat, in_pat;
595 rtx out_exp, in_exp;
596 int i, j;
597
598 in_set = single_set (in_insn);
599 if (in_set)
600 {
601 if (MEM_P (SET_DEST (in_set)))
602 {
603 out_set = single_set (out_insn);
604 if (!out_set)
605 {
606 out_pat = PATTERN (out_insn);
607 if (GET_CODE (out_pat) == PARALLEL)
608 {
609 for (i = 0; i < XVECLEN (out_pat, 0); i++)
610 {
611 out_exp = XVECEXP (out_pat, 0, i);
612 if ((GET_CODE (out_exp) == CLOBBER)
613 || (GET_CODE (out_exp) == USE))
614 continue;
615 else if (GET_CODE (out_exp) != SET)
616 return false;
617 }
618 }
619 }
620 }
621 }
622 else
623 {
624 in_pat = PATTERN (in_insn);
625 if (GET_CODE (in_pat) != PARALLEL)
626 return false;
627
628 for (i = 0; i < XVECLEN (in_pat, 0); i++)
629 {
630 in_exp = XVECEXP (in_pat, 0, i);
631 if ((GET_CODE (in_exp) == CLOBBER) || (GET_CODE (in_exp) == USE))
632 continue;
633 else if (GET_CODE (in_exp) != SET)
634 return false;
635
636 if (MEM_P (SET_DEST (in_exp)))
637 {
638 out_set = single_set (out_insn);
639 if (!out_set)
640 {
641 out_pat = PATTERN (out_insn);
642 if (GET_CODE (out_pat) != PARALLEL)
643 return false;
644 for (j = 0; j < XVECLEN (out_pat, 0); j++)
645 {
646 out_exp = XVECEXP (out_pat, 0, j);
647 if ((GET_CODE (out_exp) == CLOBBER)
648 || (GET_CODE (out_exp) == USE))
649 continue;
650 else if (GET_CODE (out_exp) != SET)
651 return false;
652 }
653 }
654 }
655 }
656 }
657 return store_data_bypass_p (out_insn, in_insn);
658 }
659
660 \f
661 /* Processor costs (relative to an add) */
662
663 const struct processor_costs *rs6000_cost;
664
665 /* Instruction size costs on 32bit processors. */
666 static const
667 struct processor_costs size32_cost = {
668 COSTS_N_INSNS (1), /* mulsi */
669 COSTS_N_INSNS (1), /* mulsi_const */
670 COSTS_N_INSNS (1), /* mulsi_const9 */
671 COSTS_N_INSNS (1), /* muldi */
672 COSTS_N_INSNS (1), /* divsi */
673 COSTS_N_INSNS (1), /* divdi */
674 COSTS_N_INSNS (1), /* fp */
675 COSTS_N_INSNS (1), /* dmul */
676 COSTS_N_INSNS (1), /* sdiv */
677 COSTS_N_INSNS (1), /* ddiv */
678 32, /* cache line size */
679 0, /* l1 cache */
680 0, /* l2 cache */
681 0, /* streams */
682 0, /* SF->DF convert */
683 };
684
685 /* Instruction size costs on 64bit processors. */
686 static const
687 struct processor_costs size64_cost = {
688 COSTS_N_INSNS (1), /* mulsi */
689 COSTS_N_INSNS (1), /* mulsi_const */
690 COSTS_N_INSNS (1), /* mulsi_const9 */
691 COSTS_N_INSNS (1), /* muldi */
692 COSTS_N_INSNS (1), /* divsi */
693 COSTS_N_INSNS (1), /* divdi */
694 COSTS_N_INSNS (1), /* fp */
695 COSTS_N_INSNS (1), /* dmul */
696 COSTS_N_INSNS (1), /* sdiv */
697 COSTS_N_INSNS (1), /* ddiv */
698 128, /* cache line size */
699 0, /* l1 cache */
700 0, /* l2 cache */
701 0, /* streams */
702 0, /* SF->DF convert */
703 };
704
705 /* Instruction costs on RS64A processors. */
706 static const
707 struct processor_costs rs64a_cost = {
708 COSTS_N_INSNS (20), /* mulsi */
709 COSTS_N_INSNS (12), /* mulsi_const */
710 COSTS_N_INSNS (8), /* mulsi_const9 */
711 COSTS_N_INSNS (34), /* muldi */
712 COSTS_N_INSNS (65), /* divsi */
713 COSTS_N_INSNS (67), /* divdi */
714 COSTS_N_INSNS (4), /* fp */
715 COSTS_N_INSNS (4), /* dmul */
716 COSTS_N_INSNS (31), /* sdiv */
717 COSTS_N_INSNS (31), /* ddiv */
718 128, /* cache line size */
719 128, /* l1 cache */
720 2048, /* l2 cache */
721 1, /* streams */
722 0, /* SF->DF convert */
723 };
724
725 /* Instruction costs on MPCCORE processors. */
726 static const
727 struct processor_costs mpccore_cost = {
728 COSTS_N_INSNS (2), /* mulsi */
729 COSTS_N_INSNS (2), /* mulsi_const */
730 COSTS_N_INSNS (2), /* mulsi_const9 */
731 COSTS_N_INSNS (2), /* muldi */
732 COSTS_N_INSNS (6), /* divsi */
733 COSTS_N_INSNS (6), /* divdi */
734 COSTS_N_INSNS (4), /* fp */
735 COSTS_N_INSNS (5), /* dmul */
736 COSTS_N_INSNS (10), /* sdiv */
737 COSTS_N_INSNS (17), /* ddiv */
738 32, /* cache line size */
739 4, /* l1 cache */
740 16, /* l2 cache */
741 1, /* streams */
742 0, /* SF->DF convert */
743 };
744
745 /* Instruction costs on PPC403 processors. */
746 static const
747 struct processor_costs ppc403_cost = {
748 COSTS_N_INSNS (4), /* mulsi */
749 COSTS_N_INSNS (4), /* mulsi_const */
750 COSTS_N_INSNS (4), /* mulsi_const9 */
751 COSTS_N_INSNS (4), /* muldi */
752 COSTS_N_INSNS (33), /* divsi */
753 COSTS_N_INSNS (33), /* divdi */
754 COSTS_N_INSNS (11), /* fp */
755 COSTS_N_INSNS (11), /* dmul */
756 COSTS_N_INSNS (11), /* sdiv */
757 COSTS_N_INSNS (11), /* ddiv */
758 32, /* cache line size */
759 4, /* l1 cache */
760 16, /* l2 cache */
761 1, /* streams */
762 0, /* SF->DF convert */
763 };
764
765 /* Instruction costs on PPC405 processors. */
766 static const
767 struct processor_costs ppc405_cost = {
768 COSTS_N_INSNS (5), /* mulsi */
769 COSTS_N_INSNS (4), /* mulsi_const */
770 COSTS_N_INSNS (3), /* mulsi_const9 */
771 COSTS_N_INSNS (5), /* muldi */
772 COSTS_N_INSNS (35), /* divsi */
773 COSTS_N_INSNS (35), /* divdi */
774 COSTS_N_INSNS (11), /* fp */
775 COSTS_N_INSNS (11), /* dmul */
776 COSTS_N_INSNS (11), /* sdiv */
777 COSTS_N_INSNS (11), /* ddiv */
778 32, /* cache line size */
779 16, /* l1 cache */
780 128, /* l2 cache */
781 1, /* streams */
782 0, /* SF->DF convert */
783 };
784
785 /* Instruction costs on PPC440 processors. */
786 static const
787 struct processor_costs ppc440_cost = {
788 COSTS_N_INSNS (3), /* mulsi */
789 COSTS_N_INSNS (2), /* mulsi_const */
790 COSTS_N_INSNS (2), /* mulsi_const9 */
791 COSTS_N_INSNS (3), /* muldi */
792 COSTS_N_INSNS (34), /* divsi */
793 COSTS_N_INSNS (34), /* divdi */
794 COSTS_N_INSNS (5), /* fp */
795 COSTS_N_INSNS (5), /* dmul */
796 COSTS_N_INSNS (19), /* sdiv */
797 COSTS_N_INSNS (33), /* ddiv */
798 32, /* cache line size */
799 32, /* l1 cache */
800 256, /* l2 cache */
801 1, /* streams */
802 0, /* SF->DF convert */
803 };
804
805 /* Instruction costs on PPC476 processors. */
806 static const
807 struct processor_costs ppc476_cost = {
808 COSTS_N_INSNS (4), /* mulsi */
809 COSTS_N_INSNS (4), /* mulsi_const */
810 COSTS_N_INSNS (4), /* mulsi_const9 */
811 COSTS_N_INSNS (4), /* muldi */
812 COSTS_N_INSNS (11), /* divsi */
813 COSTS_N_INSNS (11), /* divdi */
814 COSTS_N_INSNS (6), /* fp */
815 COSTS_N_INSNS (6), /* dmul */
816 COSTS_N_INSNS (19), /* sdiv */
817 COSTS_N_INSNS (33), /* ddiv */
818 32, /* l1 cache line size */
819 32, /* l1 cache */
820 512, /* l2 cache */
821 1, /* streams */
822 0, /* SF->DF convert */
823 };
824
825 /* Instruction costs on PPC601 processors. */
826 static const
827 struct processor_costs ppc601_cost = {
828 COSTS_N_INSNS (5), /* mulsi */
829 COSTS_N_INSNS (5), /* mulsi_const */
830 COSTS_N_INSNS (5), /* mulsi_const9 */
831 COSTS_N_INSNS (5), /* muldi */
832 COSTS_N_INSNS (36), /* divsi */
833 COSTS_N_INSNS (36), /* divdi */
834 COSTS_N_INSNS (4), /* fp */
835 COSTS_N_INSNS (5), /* dmul */
836 COSTS_N_INSNS (17), /* sdiv */
837 COSTS_N_INSNS (31), /* ddiv */
838 32, /* cache line size */
839 32, /* l1 cache */
840 256, /* l2 cache */
841 1, /* streams */
842 0, /* SF->DF convert */
843 };
844
845 /* Instruction costs on PPC603 processors. */
846 static const
847 struct processor_costs ppc603_cost = {
848 COSTS_N_INSNS (5), /* mulsi */
849 COSTS_N_INSNS (3), /* mulsi_const */
850 COSTS_N_INSNS (2), /* mulsi_const9 */
851 COSTS_N_INSNS (5), /* muldi */
852 COSTS_N_INSNS (37), /* divsi */
853 COSTS_N_INSNS (37), /* divdi */
854 COSTS_N_INSNS (3), /* fp */
855 COSTS_N_INSNS (4), /* dmul */
856 COSTS_N_INSNS (18), /* sdiv */
857 COSTS_N_INSNS (33), /* ddiv */
858 32, /* cache line size */
859 8, /* l1 cache */
860 64, /* l2 cache */
861 1, /* streams */
862 0, /* SF->DF convert */
863 };
864
865 /* Instruction costs on PPC604 processors. */
866 static const
867 struct processor_costs ppc604_cost = {
868 COSTS_N_INSNS (4), /* mulsi */
869 COSTS_N_INSNS (4), /* mulsi_const */
870 COSTS_N_INSNS (4), /* mulsi_const9 */
871 COSTS_N_INSNS (4), /* muldi */
872 COSTS_N_INSNS (20), /* divsi */
873 COSTS_N_INSNS (20), /* divdi */
874 COSTS_N_INSNS (3), /* fp */
875 COSTS_N_INSNS (3), /* dmul */
876 COSTS_N_INSNS (18), /* sdiv */
877 COSTS_N_INSNS (32), /* ddiv */
878 32, /* cache line size */
879 16, /* l1 cache */
880 512, /* l2 cache */
881 1, /* streams */
882 0, /* SF->DF convert */
883 };
884
885 /* Instruction costs on PPC604e processors. */
886 static const
887 struct processor_costs ppc604e_cost = {
888 COSTS_N_INSNS (2), /* mulsi */
889 COSTS_N_INSNS (2), /* mulsi_const */
890 COSTS_N_INSNS (2), /* mulsi_const9 */
891 COSTS_N_INSNS (2), /* muldi */
892 COSTS_N_INSNS (20), /* divsi */
893 COSTS_N_INSNS (20), /* divdi */
894 COSTS_N_INSNS (3), /* fp */
895 COSTS_N_INSNS (3), /* dmul */
896 COSTS_N_INSNS (18), /* sdiv */
897 COSTS_N_INSNS (32), /* ddiv */
898 32, /* cache line size */
899 32, /* l1 cache */
900 1024, /* l2 cache */
901 1, /* streams */
902 0, /* SF->DF convert */
903 };
904
905 /* Instruction costs on PPC620 processors. */
906 static const
907 struct processor_costs ppc620_cost = {
908 COSTS_N_INSNS (5), /* mulsi */
909 COSTS_N_INSNS (4), /* mulsi_const */
910 COSTS_N_INSNS (3), /* mulsi_const9 */
911 COSTS_N_INSNS (7), /* muldi */
912 COSTS_N_INSNS (21), /* divsi */
913 COSTS_N_INSNS (37), /* divdi */
914 COSTS_N_INSNS (3), /* fp */
915 COSTS_N_INSNS (3), /* dmul */
916 COSTS_N_INSNS (18), /* sdiv */
917 COSTS_N_INSNS (32), /* ddiv */
918 128, /* cache line size */
919 32, /* l1 cache */
920 1024, /* l2 cache */
921 1, /* streams */
922 0, /* SF->DF convert */
923 };
924
925 /* Instruction costs on PPC630 processors. */
926 static const
927 struct processor_costs ppc630_cost = {
928 COSTS_N_INSNS (5), /* mulsi */
929 COSTS_N_INSNS (4), /* mulsi_const */
930 COSTS_N_INSNS (3), /* mulsi_const9 */
931 COSTS_N_INSNS (7), /* muldi */
932 COSTS_N_INSNS (21), /* divsi */
933 COSTS_N_INSNS (37), /* divdi */
934 COSTS_N_INSNS (3), /* fp */
935 COSTS_N_INSNS (3), /* dmul */
936 COSTS_N_INSNS (17), /* sdiv */
937 COSTS_N_INSNS (21), /* ddiv */
938 128, /* cache line size */
939 64, /* l1 cache */
940 1024, /* l2 cache */
941 1, /* streams */
942 0, /* SF->DF convert */
943 };
944
945 /* Instruction costs on Cell processor. */
946 /* COSTS_N_INSNS (1) ~ one add. */
947 static const
948 struct processor_costs ppccell_cost = {
949 COSTS_N_INSNS (9/2)+2, /* mulsi */
950 COSTS_N_INSNS (6/2), /* mulsi_const */
951 COSTS_N_INSNS (6/2), /* mulsi_const9 */
952 COSTS_N_INSNS (15/2)+2, /* muldi */
953 COSTS_N_INSNS (38/2), /* divsi */
954 COSTS_N_INSNS (70/2), /* divdi */
955 COSTS_N_INSNS (10/2), /* fp */
956 COSTS_N_INSNS (10/2), /* dmul */
957 COSTS_N_INSNS (74/2), /* sdiv */
958 COSTS_N_INSNS (74/2), /* ddiv */
959 128, /* cache line size */
960 32, /* l1 cache */
961 512, /* l2 cache */
962 6, /* streams */
963 0, /* SF->DF convert */
964 };
965
966 /* Instruction costs on PPC750 and PPC7400 processors. */
967 static const
968 struct processor_costs ppc750_cost = {
969 COSTS_N_INSNS (5), /* mulsi */
970 COSTS_N_INSNS (3), /* mulsi_const */
971 COSTS_N_INSNS (2), /* mulsi_const9 */
972 COSTS_N_INSNS (5), /* muldi */
973 COSTS_N_INSNS (17), /* divsi */
974 COSTS_N_INSNS (17), /* divdi */
975 COSTS_N_INSNS (3), /* fp */
976 COSTS_N_INSNS (3), /* dmul */
977 COSTS_N_INSNS (17), /* sdiv */
978 COSTS_N_INSNS (31), /* ddiv */
979 32, /* cache line size */
980 32, /* l1 cache */
981 512, /* l2 cache */
982 1, /* streams */
983 0, /* SF->DF convert */
984 };
985
986 /* Instruction costs on PPC7450 processors. */
987 static const
988 struct processor_costs ppc7450_cost = {
989 COSTS_N_INSNS (4), /* mulsi */
990 COSTS_N_INSNS (3), /* mulsi_const */
991 COSTS_N_INSNS (3), /* mulsi_const9 */
992 COSTS_N_INSNS (4), /* muldi */
993 COSTS_N_INSNS (23), /* divsi */
994 COSTS_N_INSNS (23), /* divdi */
995 COSTS_N_INSNS (5), /* fp */
996 COSTS_N_INSNS (5), /* dmul */
997 COSTS_N_INSNS (21), /* sdiv */
998 COSTS_N_INSNS (35), /* ddiv */
999 32, /* cache line size */
1000 32, /* l1 cache */
1001 1024, /* l2 cache */
1002 1, /* streams */
1003 0, /* SF->DF convert */
1004 };
1005
1006 /* Instruction costs on PPC8540 processors. */
1007 static const
1008 struct processor_costs ppc8540_cost = {
1009 COSTS_N_INSNS (4), /* mulsi */
1010 COSTS_N_INSNS (4), /* mulsi_const */
1011 COSTS_N_INSNS (4), /* mulsi_const9 */
1012 COSTS_N_INSNS (4), /* muldi */
1013 COSTS_N_INSNS (19), /* divsi */
1014 COSTS_N_INSNS (19), /* divdi */
1015 COSTS_N_INSNS (4), /* fp */
1016 COSTS_N_INSNS (4), /* dmul */
1017 COSTS_N_INSNS (29), /* sdiv */
1018 COSTS_N_INSNS (29), /* ddiv */
1019 32, /* cache line size */
1020 32, /* l1 cache */
1021 256, /* l2 cache */
1022 1, /* prefetch streams /*/
1023 0, /* SF->DF convert */
1024 };
1025
1026 /* Instruction costs on E300C2 and E300C3 cores. */
1027 static const
1028 struct processor_costs ppce300c2c3_cost = {
1029 COSTS_N_INSNS (4), /* mulsi */
1030 COSTS_N_INSNS (4), /* mulsi_const */
1031 COSTS_N_INSNS (4), /* mulsi_const9 */
1032 COSTS_N_INSNS (4), /* muldi */
1033 COSTS_N_INSNS (19), /* divsi */
1034 COSTS_N_INSNS (19), /* divdi */
1035 COSTS_N_INSNS (3), /* fp */
1036 COSTS_N_INSNS (4), /* dmul */
1037 COSTS_N_INSNS (18), /* sdiv */
1038 COSTS_N_INSNS (33), /* ddiv */
1039 32,
1040 16, /* l1 cache */
1041 16, /* l2 cache */
1042 1, /* prefetch streams /*/
1043 0, /* SF->DF convert */
1044 };
1045
1046 /* Instruction costs on PPCE500MC processors. */
1047 static const
1048 struct processor_costs ppce500mc_cost = {
1049 COSTS_N_INSNS (4), /* mulsi */
1050 COSTS_N_INSNS (4), /* mulsi_const */
1051 COSTS_N_INSNS (4), /* mulsi_const9 */
1052 COSTS_N_INSNS (4), /* muldi */
1053 COSTS_N_INSNS (14), /* divsi */
1054 COSTS_N_INSNS (14), /* divdi */
1055 COSTS_N_INSNS (8), /* fp */
1056 COSTS_N_INSNS (10), /* dmul */
1057 COSTS_N_INSNS (36), /* sdiv */
1058 COSTS_N_INSNS (66), /* ddiv */
1059 64, /* cache line size */
1060 32, /* l1 cache */
1061 128, /* l2 cache */
1062 1, /* prefetch streams /*/
1063 0, /* SF->DF convert */
1064 };
1065
1066 /* Instruction costs on PPCE500MC64 processors. */
1067 static const
1068 struct processor_costs ppce500mc64_cost = {
1069 COSTS_N_INSNS (4), /* mulsi */
1070 COSTS_N_INSNS (4), /* mulsi_const */
1071 COSTS_N_INSNS (4), /* mulsi_const9 */
1072 COSTS_N_INSNS (4), /* muldi */
1073 COSTS_N_INSNS (14), /* divsi */
1074 COSTS_N_INSNS (14), /* divdi */
1075 COSTS_N_INSNS (4), /* fp */
1076 COSTS_N_INSNS (10), /* dmul */
1077 COSTS_N_INSNS (36), /* sdiv */
1078 COSTS_N_INSNS (66), /* ddiv */
1079 64, /* cache line size */
1080 32, /* l1 cache */
1081 128, /* l2 cache */
1082 1, /* prefetch streams /*/
1083 0, /* SF->DF convert */
1084 };
1085
1086 /* Instruction costs on PPCE5500 processors. */
1087 static const
1088 struct processor_costs ppce5500_cost = {
1089 COSTS_N_INSNS (5), /* mulsi */
1090 COSTS_N_INSNS (5), /* mulsi_const */
1091 COSTS_N_INSNS (4), /* mulsi_const9 */
1092 COSTS_N_INSNS (5), /* muldi */
1093 COSTS_N_INSNS (14), /* divsi */
1094 COSTS_N_INSNS (14), /* divdi */
1095 COSTS_N_INSNS (7), /* fp */
1096 COSTS_N_INSNS (10), /* dmul */
1097 COSTS_N_INSNS (36), /* sdiv */
1098 COSTS_N_INSNS (66), /* ddiv */
1099 64, /* cache line size */
1100 32, /* l1 cache */
1101 128, /* l2 cache */
1102 1, /* prefetch streams /*/
1103 0, /* SF->DF convert */
1104 };
1105
1106 /* Instruction costs on PPCE6500 processors. */
1107 static const
1108 struct processor_costs ppce6500_cost = {
1109 COSTS_N_INSNS (5), /* mulsi */
1110 COSTS_N_INSNS (5), /* mulsi_const */
1111 COSTS_N_INSNS (4), /* mulsi_const9 */
1112 COSTS_N_INSNS (5), /* muldi */
1113 COSTS_N_INSNS (14), /* divsi */
1114 COSTS_N_INSNS (14), /* divdi */
1115 COSTS_N_INSNS (7), /* fp */
1116 COSTS_N_INSNS (10), /* dmul */
1117 COSTS_N_INSNS (36), /* sdiv */
1118 COSTS_N_INSNS (66), /* ddiv */
1119 64, /* cache line size */
1120 32, /* l1 cache */
1121 128, /* l2 cache */
1122 1, /* prefetch streams /*/
1123 0, /* SF->DF convert */
1124 };
1125
1126 /* Instruction costs on AppliedMicro Titan processors. */
1127 static const
1128 struct processor_costs titan_cost = {
1129 COSTS_N_INSNS (5), /* mulsi */
1130 COSTS_N_INSNS (5), /* mulsi_const */
1131 COSTS_N_INSNS (5), /* mulsi_const9 */
1132 COSTS_N_INSNS (5), /* muldi */
1133 COSTS_N_INSNS (18), /* divsi */
1134 COSTS_N_INSNS (18), /* divdi */
1135 COSTS_N_INSNS (10), /* fp */
1136 COSTS_N_INSNS (10), /* dmul */
1137 COSTS_N_INSNS (46), /* sdiv */
1138 COSTS_N_INSNS (72), /* ddiv */
1139 32, /* cache line size */
1140 32, /* l1 cache */
1141 512, /* l2 cache */
1142 1, /* prefetch streams /*/
1143 0, /* SF->DF convert */
1144 };
1145
1146 /* Instruction costs on POWER4 and POWER5 processors. */
1147 static const
1148 struct processor_costs power4_cost = {
1149 COSTS_N_INSNS (3), /* mulsi */
1150 COSTS_N_INSNS (2), /* mulsi_const */
1151 COSTS_N_INSNS (2), /* mulsi_const9 */
1152 COSTS_N_INSNS (4), /* muldi */
1153 COSTS_N_INSNS (18), /* divsi */
1154 COSTS_N_INSNS (34), /* divdi */
1155 COSTS_N_INSNS (3), /* fp */
1156 COSTS_N_INSNS (3), /* dmul */
1157 COSTS_N_INSNS (17), /* sdiv */
1158 COSTS_N_INSNS (17), /* ddiv */
1159 128, /* cache line size */
1160 32, /* l1 cache */
1161 1024, /* l2 cache */
1162 8, /* prefetch streams /*/
1163 0, /* SF->DF convert */
1164 };
1165
1166 /* Instruction costs on POWER6 processors. */
1167 static const
1168 struct processor_costs power6_cost = {
1169 COSTS_N_INSNS (8), /* mulsi */
1170 COSTS_N_INSNS (8), /* mulsi_const */
1171 COSTS_N_INSNS (8), /* mulsi_const9 */
1172 COSTS_N_INSNS (8), /* muldi */
1173 COSTS_N_INSNS (22), /* divsi */
1174 COSTS_N_INSNS (28), /* divdi */
1175 COSTS_N_INSNS (3), /* fp */
1176 COSTS_N_INSNS (3), /* dmul */
1177 COSTS_N_INSNS (13), /* sdiv */
1178 COSTS_N_INSNS (16), /* ddiv */
1179 128, /* cache line size */
1180 64, /* l1 cache */
1181 2048, /* l2 cache */
1182 16, /* prefetch streams */
1183 0, /* SF->DF convert */
1184 };
1185
1186 /* Instruction costs on POWER7 processors. */
1187 static const
1188 struct processor_costs power7_cost = {
1189 COSTS_N_INSNS (2), /* mulsi */
1190 COSTS_N_INSNS (2), /* mulsi_const */
1191 COSTS_N_INSNS (2), /* mulsi_const9 */
1192 COSTS_N_INSNS (2), /* muldi */
1193 COSTS_N_INSNS (18), /* divsi */
1194 COSTS_N_INSNS (34), /* divdi */
1195 COSTS_N_INSNS (3), /* fp */
1196 COSTS_N_INSNS (3), /* dmul */
1197 COSTS_N_INSNS (13), /* sdiv */
1198 COSTS_N_INSNS (16), /* ddiv */
1199 128, /* cache line size */
1200 32, /* l1 cache */
1201 256, /* l2 cache */
1202 12, /* prefetch streams */
1203 COSTS_N_INSNS (3), /* SF->DF convert */
1204 };
1205
1206 /* Instruction costs on POWER8 processors. */
1207 static const
1208 struct processor_costs power8_cost = {
1209 COSTS_N_INSNS (3), /* mulsi */
1210 COSTS_N_INSNS (3), /* mulsi_const */
1211 COSTS_N_INSNS (3), /* mulsi_const9 */
1212 COSTS_N_INSNS (3), /* muldi */
1213 COSTS_N_INSNS (19), /* divsi */
1214 COSTS_N_INSNS (35), /* divdi */
1215 COSTS_N_INSNS (3), /* fp */
1216 COSTS_N_INSNS (3), /* dmul */
1217 COSTS_N_INSNS (14), /* sdiv */
1218 COSTS_N_INSNS (17), /* ddiv */
1219 128, /* cache line size */
1220 32, /* l1 cache */
1221 256, /* l2 cache */
1222 12, /* prefetch streams */
1223 COSTS_N_INSNS (3), /* SF->DF convert */
1224 };
1225
1226 /* Instruction costs on POWER9 processors. */
1227 static const
1228 struct processor_costs power9_cost = {
1229 COSTS_N_INSNS (3), /* mulsi */
1230 COSTS_N_INSNS (3), /* mulsi_const */
1231 COSTS_N_INSNS (3), /* mulsi_const9 */
1232 COSTS_N_INSNS (3), /* muldi */
1233 COSTS_N_INSNS (8), /* divsi */
1234 COSTS_N_INSNS (12), /* divdi */
1235 COSTS_N_INSNS (3), /* fp */
1236 COSTS_N_INSNS (3), /* dmul */
1237 COSTS_N_INSNS (13), /* sdiv */
1238 COSTS_N_INSNS (18), /* ddiv */
1239 128, /* cache line size */
1240 32, /* l1 cache */
1241 512, /* l2 cache */
1242 8, /* prefetch streams */
1243 COSTS_N_INSNS (3), /* SF->DF convert */
1244 };
1245
1246 /* Instruction costs on POWER A2 processors. */
1247 static const
1248 struct processor_costs ppca2_cost = {
1249 COSTS_N_INSNS (16), /* mulsi */
1250 COSTS_N_INSNS (16), /* mulsi_const */
1251 COSTS_N_INSNS (16), /* mulsi_const9 */
1252 COSTS_N_INSNS (16), /* muldi */
1253 COSTS_N_INSNS (22), /* divsi */
1254 COSTS_N_INSNS (28), /* divdi */
1255 COSTS_N_INSNS (3), /* fp */
1256 COSTS_N_INSNS (3), /* dmul */
1257 COSTS_N_INSNS (59), /* sdiv */
1258 COSTS_N_INSNS (72), /* ddiv */
1259 64,
1260 16, /* l1 cache */
1261 2048, /* l2 cache */
1262 16, /* prefetch streams */
1263 0, /* SF->DF convert */
1264 };
1265
1266 \f
1267 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1268 #undef RS6000_BUILTIN_0
1269 #undef RS6000_BUILTIN_1
1270 #undef RS6000_BUILTIN_2
1271 #undef RS6000_BUILTIN_3
1272 #undef RS6000_BUILTIN_A
1273 #undef RS6000_BUILTIN_D
1274 #undef RS6000_BUILTIN_H
1275 #undef RS6000_BUILTIN_P
1276 #undef RS6000_BUILTIN_X
1277
1278 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1279 { NAME, ICODE, MASK, ATTR },
1280
1281 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1282 { NAME, ICODE, MASK, ATTR },
1283
1284 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1285 { NAME, ICODE, MASK, ATTR },
1286
1287 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1288 { NAME, ICODE, MASK, ATTR },
1289
1290 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1291 { NAME, ICODE, MASK, ATTR },
1292
1293 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1294 { NAME, ICODE, MASK, ATTR },
1295
1296 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1297 { NAME, ICODE, MASK, ATTR },
1298
1299 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1300 { NAME, ICODE, MASK, ATTR },
1301
1302 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1303 { NAME, ICODE, MASK, ATTR },
1304
1305 struct rs6000_builtin_info_type {
1306 const char *name;
1307 const enum insn_code icode;
1308 const HOST_WIDE_INT mask;
1309 const unsigned attr;
1310 };
1311
1312 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1313 {
1314 #include "rs6000-builtin.def"
1315 };
1316
1317 #undef RS6000_BUILTIN_0
1318 #undef RS6000_BUILTIN_1
1319 #undef RS6000_BUILTIN_2
1320 #undef RS6000_BUILTIN_3
1321 #undef RS6000_BUILTIN_A
1322 #undef RS6000_BUILTIN_D
1323 #undef RS6000_BUILTIN_H
1324 #undef RS6000_BUILTIN_P
1325 #undef RS6000_BUILTIN_X
1326
1327 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1328 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1329
1330 \f
1331 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1332 static struct machine_function * rs6000_init_machine_status (void);
1333 static int rs6000_ra_ever_killed (void);
1334 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1335 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1336 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1337 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1338 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1339 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1340 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1341 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1342 bool);
1343 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1344 unsigned int);
1345 static bool is_microcoded_insn (rtx_insn *);
1346 static bool is_nonpipeline_insn (rtx_insn *);
1347 static bool is_cracked_insn (rtx_insn *);
1348 static bool is_load_insn (rtx, rtx *);
1349 static bool is_store_insn (rtx, rtx *);
1350 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1351 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1352 static bool insn_must_be_first_in_group (rtx_insn *);
1353 static bool insn_must_be_last_in_group (rtx_insn *);
1354 static void altivec_init_builtins (void);
1355 static tree builtin_function_type (machine_mode, machine_mode,
1356 machine_mode, machine_mode,
1357 enum rs6000_builtins, const char *name);
1358 static void rs6000_common_init_builtins (void);
1359 static void htm_init_builtins (void);
1360 static rs6000_stack_t *rs6000_stack_info (void);
1361 static void is_altivec_return_reg (rtx, void *);
1362 int easy_vector_constant (rtx, machine_mode);
1363 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1364 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1365 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1366 bool, bool);
1367 #if TARGET_MACHO
1368 static void macho_branch_islands (void);
1369 static tree get_prev_label (tree);
1370 #endif
1371 static bool rs6000_mode_dependent_address (const_rtx);
1372 static bool rs6000_debug_mode_dependent_address (const_rtx);
1373 static bool rs6000_offsettable_memref_p (rtx, machine_mode, bool);
1374 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1375 machine_mode, rtx);
1376 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1377 machine_mode,
1378 rtx);
1379 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1380 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1381 enum reg_class);
1382 static bool rs6000_debug_secondary_memory_needed (machine_mode,
1383 reg_class_t,
1384 reg_class_t);
1385 static bool rs6000_debug_can_change_mode_class (machine_mode,
1386 machine_mode,
1387 reg_class_t);
1388 static bool rs6000_save_toc_in_prologue_p (void);
1389 static rtx rs6000_internal_arg_pointer (void);
1390
1391 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1392 = rs6000_mode_dependent_address;
1393
1394 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1395 machine_mode, rtx)
1396 = rs6000_secondary_reload_class;
1397
1398 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1399 = rs6000_preferred_reload_class;
1400
1401 const int INSN_NOT_AVAILABLE = -1;
1402
1403 static void rs6000_print_isa_options (FILE *, int, const char *,
1404 HOST_WIDE_INT);
1405 static void rs6000_print_builtin_options (FILE *, int, const char *,
1406 HOST_WIDE_INT);
1407 static HOST_WIDE_INT rs6000_disable_incompatible_switches (void);
1408
1409 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1410 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1411 enum rs6000_reg_type,
1412 machine_mode,
1413 secondary_reload_info *,
1414 bool);
1415 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1416 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1417 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1418
1419 /* Hash table stuff for keeping track of TOC entries. */
1420
1421 struct GTY((for_user)) toc_hash_struct
1422 {
1423 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1424 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1425 rtx key;
1426 machine_mode key_mode;
1427 int labelno;
1428 };
1429
1430 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1431 {
1432 static hashval_t hash (toc_hash_struct *);
1433 static bool equal (toc_hash_struct *, toc_hash_struct *);
1434 };
1435
1436 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1437
1438 /* Hash table to keep track of the argument types for builtin functions. */
1439
1440 struct GTY((for_user)) builtin_hash_struct
1441 {
1442 tree type;
1443 machine_mode mode[4]; /* return value + 3 arguments. */
1444 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1445 };
1446
1447 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1448 {
1449 static hashval_t hash (builtin_hash_struct *);
1450 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1451 };
1452
1453 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1454
1455 \f
1456 /* Default register names. */
1457 char rs6000_reg_names[][8] =
1458 {
1459 /* GPRs */
1460 "0", "1", "2", "3", "4", "5", "6", "7",
1461 "8", "9", "10", "11", "12", "13", "14", "15",
1462 "16", "17", "18", "19", "20", "21", "22", "23",
1463 "24", "25", "26", "27", "28", "29", "30", "31",
1464 /* FPRs */
1465 "0", "1", "2", "3", "4", "5", "6", "7",
1466 "8", "9", "10", "11", "12", "13", "14", "15",
1467 "16", "17", "18", "19", "20", "21", "22", "23",
1468 "24", "25", "26", "27", "28", "29", "30", "31",
1469 /* VRs */
1470 "0", "1", "2", "3", "4", "5", "6", "7",
1471 "8", "9", "10", "11", "12", "13", "14", "15",
1472 "16", "17", "18", "19", "20", "21", "22", "23",
1473 "24", "25", "26", "27", "28", "29", "30", "31",
1474 /* lr ctr ca ap */
1475 "lr", "ctr", "ca", "ap",
1476 /* cr0..cr7 */
1477 "0", "1", "2", "3", "4", "5", "6", "7",
1478 /* vrsave vscr sfp */
1479 "vrsave", "vscr", "sfp",
1480 };
1481
1482 #ifdef TARGET_REGNAMES
1483 static const char alt_reg_names[][8] =
1484 {
1485 /* GPRs */
1486 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1487 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1488 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1489 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1490 /* FPRs */
1491 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1492 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1493 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1494 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1495 /* VRs */
1496 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1497 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1498 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1499 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1500 /* lr ctr ca ap */
1501 "lr", "ctr", "ca", "ap",
1502 /* cr0..cr7 */
1503 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1504 /* vrsave vscr sfp */
1505 "vrsave", "vscr", "sfp",
1506 };
1507 #endif
1508
1509 /* Table of valid machine attributes. */
1510
1511 static const struct attribute_spec rs6000_attribute_table[] =
1512 {
1513 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1514 affects_type_identity, handler, exclude } */
1515 { "altivec", 1, 1, false, true, false, false,
1516 rs6000_handle_altivec_attribute, NULL },
1517 { "longcall", 0, 0, false, true, true, false,
1518 rs6000_handle_longcall_attribute, NULL },
1519 { "shortcall", 0, 0, false, true, true, false,
1520 rs6000_handle_longcall_attribute, NULL },
1521 { "ms_struct", 0, 0, false, false, false, false,
1522 rs6000_handle_struct_attribute, NULL },
1523 { "gcc_struct", 0, 0, false, false, false, false,
1524 rs6000_handle_struct_attribute, NULL },
1525 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1526 SUBTARGET_ATTRIBUTE_TABLE,
1527 #endif
1528 { NULL, 0, 0, false, false, false, false, NULL, NULL }
1529 };
1530 \f
1531 #ifndef TARGET_PROFILE_KERNEL
1532 #define TARGET_PROFILE_KERNEL 0
1533 #endif
1534
1535 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1536 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1537 \f
1538 /* Initialize the GCC target structure. */
1539 #undef TARGET_ATTRIBUTE_TABLE
1540 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1541 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1542 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1543 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1544 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1545
1546 #undef TARGET_ASM_ALIGNED_DI_OP
1547 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1548
1549 /* Default unaligned ops are only provided for ELF. Find the ops needed
1550 for non-ELF systems. */
1551 #ifndef OBJECT_FORMAT_ELF
1552 #if TARGET_XCOFF
1553 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1554 64-bit targets. */
1555 #undef TARGET_ASM_UNALIGNED_HI_OP
1556 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1557 #undef TARGET_ASM_UNALIGNED_SI_OP
1558 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1559 #undef TARGET_ASM_UNALIGNED_DI_OP
1560 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1561 #else
1562 /* For Darwin. */
1563 #undef TARGET_ASM_UNALIGNED_HI_OP
1564 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1565 #undef TARGET_ASM_UNALIGNED_SI_OP
1566 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1567 #undef TARGET_ASM_UNALIGNED_DI_OP
1568 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1569 #undef TARGET_ASM_ALIGNED_DI_OP
1570 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1571 #endif
1572 #endif
1573
1574 /* This hook deals with fixups for relocatable code and DI-mode objects
1575 in 64-bit code. */
1576 #undef TARGET_ASM_INTEGER
1577 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1578
1579 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1580 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1581 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1582 #endif
1583
1584 #undef TARGET_SET_UP_BY_PROLOGUE
1585 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1586
1587 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1588 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1589 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1590 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1591 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1592 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1593 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1594 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1595 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1596 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1597 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1598 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1599
1600 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1601 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1602
1603 #undef TARGET_INTERNAL_ARG_POINTER
1604 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1605
1606 #undef TARGET_HAVE_TLS
1607 #define TARGET_HAVE_TLS HAVE_AS_TLS
1608
1609 #undef TARGET_CANNOT_FORCE_CONST_MEM
1610 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1611
1612 #undef TARGET_DELEGITIMIZE_ADDRESS
1613 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1614
1615 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1616 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1617
1618 #undef TARGET_LEGITIMATE_COMBINED_INSN
1619 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1620
1621 #undef TARGET_ASM_FUNCTION_PROLOGUE
1622 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1623 #undef TARGET_ASM_FUNCTION_EPILOGUE
1624 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1625
1626 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1627 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1628
1629 #undef TARGET_LEGITIMIZE_ADDRESS
1630 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1631
1632 #undef TARGET_SCHED_VARIABLE_ISSUE
1633 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1634
1635 #undef TARGET_SCHED_ISSUE_RATE
1636 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1637 #undef TARGET_SCHED_ADJUST_COST
1638 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1639 #undef TARGET_SCHED_ADJUST_PRIORITY
1640 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1641 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1642 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1643 #undef TARGET_SCHED_INIT
1644 #define TARGET_SCHED_INIT rs6000_sched_init
1645 #undef TARGET_SCHED_FINISH
1646 #define TARGET_SCHED_FINISH rs6000_sched_finish
1647 #undef TARGET_SCHED_REORDER
1648 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1649 #undef TARGET_SCHED_REORDER2
1650 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1651
1652 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1653 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1654
1655 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1656 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1657
1658 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1659 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1660 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1661 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1662 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1663 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1664 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1665 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1666
1667 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1668 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1669
1670 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1671 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1672 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1673 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1674 rs6000_builtin_support_vector_misalignment
1675 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1676 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1677 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1678 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1679 rs6000_builtin_vectorization_cost
1680 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1681 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1682 rs6000_preferred_simd_mode
1683 #undef TARGET_VECTORIZE_INIT_COST
1684 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1685 #undef TARGET_VECTORIZE_ADD_STMT_COST
1686 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1687 #undef TARGET_VECTORIZE_FINISH_COST
1688 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1689 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1690 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1691
1692 #undef TARGET_INIT_BUILTINS
1693 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1694 #undef TARGET_BUILTIN_DECL
1695 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1696
1697 #undef TARGET_FOLD_BUILTIN
1698 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1699 #undef TARGET_GIMPLE_FOLD_BUILTIN
1700 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1701
1702 #undef TARGET_EXPAND_BUILTIN
1703 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1704
1705 #undef TARGET_MANGLE_TYPE
1706 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1707
1708 #undef TARGET_INIT_LIBFUNCS
1709 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1710
1711 #if TARGET_MACHO
1712 #undef TARGET_BINDS_LOCAL_P
1713 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1714 #endif
1715
1716 #undef TARGET_MS_BITFIELD_LAYOUT_P
1717 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1718
1719 #undef TARGET_ASM_OUTPUT_MI_THUNK
1720 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1721
1722 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1723 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1724
1725 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1726 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1727
1728 #undef TARGET_REGISTER_MOVE_COST
1729 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1730 #undef TARGET_MEMORY_MOVE_COST
1731 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1732 #undef TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS
1733 #define TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS \
1734 rs6000_ira_change_pseudo_allocno_class
1735 #undef TARGET_CANNOT_COPY_INSN_P
1736 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1737 #undef TARGET_RTX_COSTS
1738 #define TARGET_RTX_COSTS rs6000_rtx_costs
1739 #undef TARGET_ADDRESS_COST
1740 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1741 #undef TARGET_INSN_COST
1742 #define TARGET_INSN_COST rs6000_insn_cost
1743
1744 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1745 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1746
1747 #undef TARGET_PROMOTE_FUNCTION_MODE
1748 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1749
1750 #undef TARGET_RETURN_IN_MEMORY
1751 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1752
1753 #undef TARGET_RETURN_IN_MSB
1754 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1755
1756 #undef TARGET_SETUP_INCOMING_VARARGS
1757 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1758
1759 /* Always strict argument naming on rs6000. */
1760 #undef TARGET_STRICT_ARGUMENT_NAMING
1761 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1762 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1763 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1764 #undef TARGET_SPLIT_COMPLEX_ARG
1765 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1766 #undef TARGET_MUST_PASS_IN_STACK
1767 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1768 #undef TARGET_PASS_BY_REFERENCE
1769 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1770 #undef TARGET_ARG_PARTIAL_BYTES
1771 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1772 #undef TARGET_FUNCTION_ARG_ADVANCE
1773 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1774 #undef TARGET_FUNCTION_ARG
1775 #define TARGET_FUNCTION_ARG rs6000_function_arg
1776 #undef TARGET_FUNCTION_ARG_PADDING
1777 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1778 #undef TARGET_FUNCTION_ARG_BOUNDARY
1779 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1780
1781 #undef TARGET_BUILD_BUILTIN_VA_LIST
1782 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1783
1784 #undef TARGET_EXPAND_BUILTIN_VA_START
1785 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1786
1787 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1788 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1789
1790 #undef TARGET_EH_RETURN_FILTER_MODE
1791 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1792
1793 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1794 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1795
1796 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1797 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1798
1799 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1800 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1801
1802 #undef TARGET_FLOATN_MODE
1803 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1804
1805 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1806 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1807
1808 #undef TARGET_MD_ASM_ADJUST
1809 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1810
1811 #undef TARGET_OPTION_OVERRIDE
1812 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1813
1814 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1815 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1816 rs6000_builtin_vectorized_function
1817
1818 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1819 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1820 rs6000_builtin_md_vectorized_function
1821
1822 #undef TARGET_STACK_PROTECT_GUARD
1823 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1824
1825 #if !TARGET_MACHO
1826 #undef TARGET_STACK_PROTECT_FAIL
1827 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1828 #endif
1829
1830 #ifdef HAVE_AS_TLS
1831 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1832 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1833 #endif
1834
1835 /* Use a 32-bit anchor range. This leads to sequences like:
1836
1837 addis tmp,anchor,high
1838 add dest,tmp,low
1839
1840 where tmp itself acts as an anchor, and can be shared between
1841 accesses to the same 64k page. */
1842 #undef TARGET_MIN_ANCHOR_OFFSET
1843 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1844 #undef TARGET_MAX_ANCHOR_OFFSET
1845 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1846 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1847 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1848 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1849 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1850
1851 #undef TARGET_BUILTIN_RECIPROCAL
1852 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1853
1854 #undef TARGET_SECONDARY_RELOAD
1855 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1856 #undef TARGET_SECONDARY_MEMORY_NEEDED
1857 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1858 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1859 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1860
1861 #undef TARGET_LEGITIMATE_ADDRESS_P
1862 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1863
1864 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1865 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1866
1867 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1868 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1869
1870 #undef TARGET_CAN_ELIMINATE
1871 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1872
1873 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1874 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1875
1876 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1877 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1878
1879 #undef TARGET_TRAMPOLINE_INIT
1880 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1881
1882 #undef TARGET_FUNCTION_VALUE
1883 #define TARGET_FUNCTION_VALUE rs6000_function_value
1884
1885 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1886 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1887
1888 #undef TARGET_OPTION_SAVE
1889 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1890
1891 #undef TARGET_OPTION_RESTORE
1892 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1893
1894 #undef TARGET_OPTION_PRINT
1895 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1896
1897 #undef TARGET_CAN_INLINE_P
1898 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1899
1900 #undef TARGET_SET_CURRENT_FUNCTION
1901 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1902
1903 #undef TARGET_LEGITIMATE_CONSTANT_P
1904 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1905
1906 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1907 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1908
1909 #undef TARGET_CAN_USE_DOLOOP_P
1910 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1911
1912 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1913 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1914
1915 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1916 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1917 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1918 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1919 #undef TARGET_UNWIND_WORD_MODE
1920 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1921
1922 #undef TARGET_OFFLOAD_OPTIONS
1923 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1924
1925 #undef TARGET_C_MODE_FOR_SUFFIX
1926 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1927
1928 #undef TARGET_INVALID_BINARY_OP
1929 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1930
1931 #undef TARGET_OPTAB_SUPPORTED_P
1932 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1933
1934 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1935 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1936
1937 #undef TARGET_COMPARE_VERSION_PRIORITY
1938 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1939
1940 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1941 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1942 rs6000_generate_version_dispatcher_body
1943
1944 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1945 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1946 rs6000_get_function_versions_dispatcher
1947
1948 #undef TARGET_OPTION_FUNCTION_VERSIONS
1949 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1950
1951 #undef TARGET_HARD_REGNO_NREGS
1952 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1953 #undef TARGET_HARD_REGNO_MODE_OK
1954 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1955
1956 #undef TARGET_MODES_TIEABLE_P
1957 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1958
1959 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1960 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1961 rs6000_hard_regno_call_part_clobbered
1962
1963 #undef TARGET_SLOW_UNALIGNED_ACCESS
1964 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1965
1966 #undef TARGET_CAN_CHANGE_MODE_CLASS
1967 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1968
1969 #undef TARGET_CONSTANT_ALIGNMENT
1970 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1971
1972 #undef TARGET_STARTING_FRAME_OFFSET
1973 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1974
1975 #if TARGET_ELF && RS6000_WEAK
1976 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1977 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1978 #endif
1979
1980 #undef TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P
1981 #define TARGET_SETJMP_PRESERVES_NONVOLATILE_REGS_P hook_bool_void_true
1982
1983 #undef TARGET_MANGLE_DECL_ASSEMBLER_NAME
1984 #define TARGET_MANGLE_DECL_ASSEMBLER_NAME rs6000_mangle_decl_assembler_name
1985 \f
1986
1987 /* Processor table. */
1988 struct rs6000_ptt
1989 {
1990 const char *const name; /* Canonical processor name. */
1991 const enum processor_type processor; /* Processor type enum value. */
1992 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1993 };
1994
1995 static struct rs6000_ptt const processor_target_table[] =
1996 {
1997 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1998 #include "rs6000-cpus.def"
1999 #undef RS6000_CPU
2000 };
2001
2002 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2003 name is invalid. */
2004
2005 static int
2006 rs6000_cpu_name_lookup (const char *name)
2007 {
2008 size_t i;
2009
2010 if (name != NULL)
2011 {
2012 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
2013 if (! strcmp (name, processor_target_table[i].name))
2014 return (int)i;
2015 }
2016
2017 return -1;
2018 }
2019
2020 \f
2021 /* Return number of consecutive hard regs needed starting at reg REGNO
2022 to hold something of mode MODE.
2023 This is ordinarily the length in words of a value of mode MODE
2024 but can be less for certain modes in special long registers.
2025
2026 POWER and PowerPC GPRs hold 32 bits worth;
2027 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2028
2029 static int
2030 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
2031 {
2032 unsigned HOST_WIDE_INT reg_size;
2033
2034 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2035 128-bit floating point that can go in vector registers, which has VSX
2036 memory addressing. */
2037 if (FP_REGNO_P (regno))
2038 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
2039 ? UNITS_PER_VSX_WORD
2040 : UNITS_PER_FP_WORD);
2041
2042 else if (ALTIVEC_REGNO_P (regno))
2043 reg_size = UNITS_PER_ALTIVEC_WORD;
2044
2045 else
2046 reg_size = UNITS_PER_WORD;
2047
2048 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
2049 }
2050
2051 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2052 MODE. */
2053 static int
2054 rs6000_hard_regno_mode_ok_uncached (int regno, machine_mode mode)
2055 {
2056 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
2057
2058 if (COMPLEX_MODE_P (mode))
2059 mode = GET_MODE_INNER (mode);
2060
2061 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2062 register combinations, and use PTImode where we need to deal with quad
2063 word memory operations. Don't allow quad words in the argument or frame
2064 pointer registers, just registers 0..31. */
2065 if (mode == PTImode)
2066 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2067 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
2068 && ((regno & 1) == 0));
2069
2070 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2071 implementations. Don't allow an item to be split between a FP register
2072 and an Altivec register. Allow TImode in all VSX registers if the user
2073 asked for it. */
2074 if (TARGET_VSX && VSX_REGNO_P (regno)
2075 && (VECTOR_MEM_VSX_P (mode)
2076 || FLOAT128_VECTOR_P (mode)
2077 || reg_addr[mode].scalar_in_vmx_p
2078 || mode == TImode
2079 || (TARGET_VADDUQM && mode == V1TImode)))
2080 {
2081 if (FP_REGNO_P (regno))
2082 return FP_REGNO_P (last_regno);
2083
2084 if (ALTIVEC_REGNO_P (regno))
2085 {
2086 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
2087 return 0;
2088
2089 return ALTIVEC_REGNO_P (last_regno);
2090 }
2091 }
2092
2093 /* The GPRs can hold any mode, but values bigger than one register
2094 cannot go past R31. */
2095 if (INT_REGNO_P (regno))
2096 return INT_REGNO_P (last_regno);
2097
2098 /* The float registers (except for VSX vector modes) can only hold floating
2099 modes and DImode. */
2100 if (FP_REGNO_P (regno))
2101 {
2102 if (FLOAT128_VECTOR_P (mode))
2103 return false;
2104
2105 if (SCALAR_FLOAT_MODE_P (mode)
2106 && (mode != TDmode || (regno % 2) == 0)
2107 && FP_REGNO_P (last_regno))
2108 return 1;
2109
2110 if (GET_MODE_CLASS (mode) == MODE_INT)
2111 {
2112 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2113 return 1;
2114
2115 if (TARGET_P8_VECTOR && (mode == SImode))
2116 return 1;
2117
2118 if (TARGET_P9_VECTOR && (mode == QImode || mode == HImode))
2119 return 1;
2120 }
2121
2122 return 0;
2123 }
2124
2125 /* The CR register can only hold CC modes. */
2126 if (CR_REGNO_P (regno))
2127 return GET_MODE_CLASS (mode) == MODE_CC;
2128
2129 if (CA_REGNO_P (regno))
2130 return mode == Pmode || mode == SImode;
2131
2132 /* AltiVec only in AldyVec registers. */
2133 if (ALTIVEC_REGNO_P (regno))
2134 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2135 || mode == V1TImode);
2136
2137 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2138 and it must be able to fit within the register set. */
2139
2140 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2141 }
2142
2143 /* Implement TARGET_HARD_REGNO_NREGS. */
2144
2145 static unsigned int
2146 rs6000_hard_regno_nregs_hook (unsigned int regno, machine_mode mode)
2147 {
2148 return rs6000_hard_regno_nregs[mode][regno];
2149 }
2150
2151 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2152
2153 static bool
2154 rs6000_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2155 {
2156 return rs6000_hard_regno_mode_ok_p[mode][regno];
2157 }
2158
2159 /* Implement TARGET_MODES_TIEABLE_P.
2160
2161 PTImode cannot tie with other modes because PTImode is restricted to even
2162 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2163 57744).
2164
2165 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2166 128-bit floating point on VSX systems ties with other vectors. */
2167
2168 static bool
2169 rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
2170 {
2171 if (mode1 == PTImode)
2172 return mode2 == PTImode;
2173 if (mode2 == PTImode)
2174 return false;
2175
2176 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1))
2177 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2);
2178 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2))
2179 return false;
2180
2181 if (SCALAR_FLOAT_MODE_P (mode1))
2182 return SCALAR_FLOAT_MODE_P (mode2);
2183 if (SCALAR_FLOAT_MODE_P (mode2))
2184 return false;
2185
2186 if (GET_MODE_CLASS (mode1) == MODE_CC)
2187 return GET_MODE_CLASS (mode2) == MODE_CC;
2188 if (GET_MODE_CLASS (mode2) == MODE_CC)
2189 return false;
2190
2191 return true;
2192 }
2193
2194 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2195
2196 static bool
2197 rs6000_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
2198 unsigned int regno, machine_mode mode)
2199 {
2200 if (TARGET_32BIT
2201 && TARGET_POWERPC64
2202 && GET_MODE_SIZE (mode) > 4
2203 && INT_REGNO_P (regno))
2204 return true;
2205
2206 if (TARGET_VSX
2207 && FP_REGNO_P (regno)
2208 && GET_MODE_SIZE (mode) > 8
2209 && !FLOAT128_2REG_P (mode))
2210 return true;
2211
2212 return false;
2213 }
2214
2215 /* Print interesting facts about registers. */
2216 static void
2217 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2218 {
2219 int r, m;
2220
2221 for (r = first_regno; r <= last_regno; ++r)
2222 {
2223 const char *comma = "";
2224 int len;
2225
2226 if (first_regno == last_regno)
2227 fprintf (stderr, "%s:\t", reg_name);
2228 else
2229 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2230
2231 len = 8;
2232 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2233 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2234 {
2235 if (len > 70)
2236 {
2237 fprintf (stderr, ",\n\t");
2238 len = 8;
2239 comma = "";
2240 }
2241
2242 if (rs6000_hard_regno_nregs[m][r] > 1)
2243 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2244 rs6000_hard_regno_nregs[m][r]);
2245 else
2246 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2247
2248 comma = ", ";
2249 }
2250
2251 if (call_used_regs[r])
2252 {
2253 if (len > 70)
2254 {
2255 fprintf (stderr, ",\n\t");
2256 len = 8;
2257 comma = "";
2258 }
2259
2260 len += fprintf (stderr, "%s%s", comma, "call-used");
2261 comma = ", ";
2262 }
2263
2264 if (fixed_regs[r])
2265 {
2266 if (len > 70)
2267 {
2268 fprintf (stderr, ",\n\t");
2269 len = 8;
2270 comma = "";
2271 }
2272
2273 len += fprintf (stderr, "%s%s", comma, "fixed");
2274 comma = ", ";
2275 }
2276
2277 if (len > 70)
2278 {
2279 fprintf (stderr, ",\n\t");
2280 comma = "";
2281 }
2282
2283 len += fprintf (stderr, "%sreg-class = %s", comma,
2284 reg_class_names[(int)rs6000_regno_regclass[r]]);
2285 comma = ", ";
2286
2287 if (len > 70)
2288 {
2289 fprintf (stderr, ",\n\t");
2290 comma = "";
2291 }
2292
2293 fprintf (stderr, "%sregno = %d\n", comma, r);
2294 }
2295 }
2296
2297 static const char *
2298 rs6000_debug_vector_unit (enum rs6000_vector v)
2299 {
2300 const char *ret;
2301
2302 switch (v)
2303 {
2304 case VECTOR_NONE: ret = "none"; break;
2305 case VECTOR_ALTIVEC: ret = "altivec"; break;
2306 case VECTOR_VSX: ret = "vsx"; break;
2307 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2308 default: ret = "unknown"; break;
2309 }
2310
2311 return ret;
2312 }
2313
2314 /* Inner function printing just the address mask for a particular reload
2315 register class. */
2316 DEBUG_FUNCTION char *
2317 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2318 {
2319 static char ret[8];
2320 char *p = ret;
2321
2322 if ((mask & RELOAD_REG_VALID) != 0)
2323 *p++ = 'v';
2324 else if (keep_spaces)
2325 *p++ = ' ';
2326
2327 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2328 *p++ = 'm';
2329 else if (keep_spaces)
2330 *p++ = ' ';
2331
2332 if ((mask & RELOAD_REG_INDEXED) != 0)
2333 *p++ = 'i';
2334 else if (keep_spaces)
2335 *p++ = ' ';
2336
2337 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2338 *p++ = 'O';
2339 else if ((mask & RELOAD_REG_OFFSET) != 0)
2340 *p++ = 'o';
2341 else if (keep_spaces)
2342 *p++ = ' ';
2343
2344 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2345 *p++ = '+';
2346 else if (keep_spaces)
2347 *p++ = ' ';
2348
2349 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2350 *p++ = '+';
2351 else if (keep_spaces)
2352 *p++ = ' ';
2353
2354 if ((mask & RELOAD_REG_AND_M16) != 0)
2355 *p++ = '&';
2356 else if (keep_spaces)
2357 *p++ = ' ';
2358
2359 *p = '\0';
2360
2361 return ret;
2362 }
2363
2364 /* Print the address masks in a human readble fashion. */
2365 DEBUG_FUNCTION void
2366 rs6000_debug_print_mode (ssize_t m)
2367 {
2368 ssize_t rc;
2369 int spaces = 0;
2370
2371 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2372 for (rc = 0; rc < N_RELOAD_REG; rc++)
2373 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2374 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2375
2376 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2377 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2378 {
2379 fprintf (stderr, "%*s Reload=%c%c", spaces, "",
2380 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2381 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2382 spaces = 0;
2383 }
2384 else
2385 spaces += sizeof (" Reload=sl") - 1;
2386
2387 if (reg_addr[m].scalar_in_vmx_p)
2388 {
2389 fprintf (stderr, "%*s Upper=y", spaces, "");
2390 spaces = 0;
2391 }
2392 else
2393 spaces += sizeof (" Upper=y") - 1;
2394
2395 if (rs6000_vector_unit[m] != VECTOR_NONE
2396 || rs6000_vector_mem[m] != VECTOR_NONE)
2397 {
2398 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2399 spaces, "",
2400 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2401 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2402 }
2403
2404 fputs ("\n", stderr);
2405 }
2406
2407 #define DEBUG_FMT_ID "%-32s= "
2408 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2409 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2410 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2411
2412 /* Print various interesting information with -mdebug=reg. */
2413 static void
2414 rs6000_debug_reg_global (void)
2415 {
2416 static const char *const tf[2] = { "false", "true" };
2417 const char *nl = (const char *)0;
2418 int m;
2419 size_t m1, m2, v;
2420 char costly_num[20];
2421 char nop_num[20];
2422 char flags_buffer[40];
2423 const char *costly_str;
2424 const char *nop_str;
2425 const char *trace_str;
2426 const char *abi_str;
2427 const char *cmodel_str;
2428 struct cl_target_option cl_opts;
2429
2430 /* Modes we want tieable information on. */
2431 static const machine_mode print_tieable_modes[] = {
2432 QImode,
2433 HImode,
2434 SImode,
2435 DImode,
2436 TImode,
2437 PTImode,
2438 SFmode,
2439 DFmode,
2440 TFmode,
2441 IFmode,
2442 KFmode,
2443 SDmode,
2444 DDmode,
2445 TDmode,
2446 V16QImode,
2447 V8HImode,
2448 V4SImode,
2449 V2DImode,
2450 V1TImode,
2451 V32QImode,
2452 V16HImode,
2453 V8SImode,
2454 V4DImode,
2455 V2TImode,
2456 V4SFmode,
2457 V2DFmode,
2458 V8SFmode,
2459 V4DFmode,
2460 CCmode,
2461 CCUNSmode,
2462 CCEQmode,
2463 };
2464
2465 /* Virtual regs we are interested in. */
2466 const static struct {
2467 int regno; /* register number. */
2468 const char *name; /* register name. */
2469 } virtual_regs[] = {
2470 { STACK_POINTER_REGNUM, "stack pointer:" },
2471 { TOC_REGNUM, "toc: " },
2472 { STATIC_CHAIN_REGNUM, "static chain: " },
2473 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2474 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2475 { ARG_POINTER_REGNUM, "arg pointer: " },
2476 { FRAME_POINTER_REGNUM, "frame pointer:" },
2477 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2478 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2479 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2480 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2481 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2482 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2483 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2484 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2485 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2486 };
2487
2488 fputs ("\nHard register information:\n", stderr);
2489 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2490 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2491 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2492 LAST_ALTIVEC_REGNO,
2493 "vs");
2494 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2495 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2496 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2497 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2498 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2499 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2500
2501 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2502 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2503 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2504
2505 fprintf (stderr,
2506 "\n"
2507 "d reg_class = %s\n"
2508 "f reg_class = %s\n"
2509 "v reg_class = %s\n"
2510 "wa reg_class = %s\n"
2511 "wd reg_class = %s\n"
2512 "we reg_class = %s\n"
2513 "wf reg_class = %s\n"
2514 "wg reg_class = %s\n"
2515 "wh reg_class = %s\n"
2516 "wi reg_class = %s\n"
2517 "wj reg_class = %s\n"
2518 "wk reg_class = %s\n"
2519 "wl reg_class = %s\n"
2520 "wm reg_class = %s\n"
2521 "wp reg_class = %s\n"
2522 "wq reg_class = %s\n"
2523 "wr reg_class = %s\n"
2524 "ws reg_class = %s\n"
2525 "wt reg_class = %s\n"
2526 "wv reg_class = %s\n"
2527 "ww reg_class = %s\n"
2528 "wx reg_class = %s\n"
2529 "wz reg_class = %s\n"
2530 "wA reg_class = %s\n"
2531 "wH reg_class = %s\n"
2532 "wI reg_class = %s\n"
2533 "\n",
2534 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2535 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2536 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2537 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2538 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2539 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2540 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2541 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2542 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2543 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2544 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2545 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2546 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2547 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2548 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2549 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2550 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2551 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2552 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2553 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2554 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2555 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2556 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2557 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wA]],
2558 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2559 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]]);
2560
2561 nl = "\n";
2562 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2563 rs6000_debug_print_mode (m);
2564
2565 fputs ("\n", stderr);
2566
2567 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2568 {
2569 machine_mode mode1 = print_tieable_modes[m1];
2570 bool first_time = true;
2571
2572 nl = (const char *)0;
2573 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2574 {
2575 machine_mode mode2 = print_tieable_modes[m2];
2576 if (mode1 != mode2 && rs6000_modes_tieable_p (mode1, mode2))
2577 {
2578 if (first_time)
2579 {
2580 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2581 nl = "\n";
2582 first_time = false;
2583 }
2584
2585 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2586 }
2587 }
2588
2589 if (!first_time)
2590 fputs ("\n", stderr);
2591 }
2592
2593 if (nl)
2594 fputs (nl, stderr);
2595
2596 if (rs6000_recip_control)
2597 {
2598 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2599
2600 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2601 if (rs6000_recip_bits[m])
2602 {
2603 fprintf (stderr,
2604 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2605 GET_MODE_NAME (m),
2606 (RS6000_RECIP_AUTO_RE_P (m)
2607 ? "auto"
2608 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2609 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2610 ? "auto"
2611 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2612 }
2613
2614 fputs ("\n", stderr);
2615 }
2616
2617 if (rs6000_cpu_index >= 0)
2618 {
2619 const char *name = processor_target_table[rs6000_cpu_index].name;
2620 HOST_WIDE_INT flags
2621 = processor_target_table[rs6000_cpu_index].target_enable;
2622
2623 sprintf (flags_buffer, "-mcpu=%s flags", name);
2624 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2625 }
2626 else
2627 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2628
2629 if (rs6000_tune_index >= 0)
2630 {
2631 const char *name = processor_target_table[rs6000_tune_index].name;
2632 HOST_WIDE_INT flags
2633 = processor_target_table[rs6000_tune_index].target_enable;
2634
2635 sprintf (flags_buffer, "-mtune=%s flags", name);
2636 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2637 }
2638 else
2639 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2640
2641 cl_target_option_save (&cl_opts, &global_options);
2642 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2643 rs6000_isa_flags);
2644
2645 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2646 rs6000_isa_flags_explicit);
2647
2648 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2649 rs6000_builtin_mask);
2650
2651 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2652
2653 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2654 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2655
2656 switch (rs6000_sched_costly_dep)
2657 {
2658 case max_dep_latency:
2659 costly_str = "max_dep_latency";
2660 break;
2661
2662 case no_dep_costly:
2663 costly_str = "no_dep_costly";
2664 break;
2665
2666 case all_deps_costly:
2667 costly_str = "all_deps_costly";
2668 break;
2669
2670 case true_store_to_load_dep_costly:
2671 costly_str = "true_store_to_load_dep_costly";
2672 break;
2673
2674 case store_to_load_dep_costly:
2675 costly_str = "store_to_load_dep_costly";
2676 break;
2677
2678 default:
2679 costly_str = costly_num;
2680 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2681 break;
2682 }
2683
2684 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2685
2686 switch (rs6000_sched_insert_nops)
2687 {
2688 case sched_finish_regroup_exact:
2689 nop_str = "sched_finish_regroup_exact";
2690 break;
2691
2692 case sched_finish_pad_groups:
2693 nop_str = "sched_finish_pad_groups";
2694 break;
2695
2696 case sched_finish_none:
2697 nop_str = "sched_finish_none";
2698 break;
2699
2700 default:
2701 nop_str = nop_num;
2702 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2703 break;
2704 }
2705
2706 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2707
2708 switch (rs6000_sdata)
2709 {
2710 default:
2711 case SDATA_NONE:
2712 break;
2713
2714 case SDATA_DATA:
2715 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2716 break;
2717
2718 case SDATA_SYSV:
2719 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2720 break;
2721
2722 case SDATA_EABI:
2723 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2724 break;
2725
2726 }
2727
2728 switch (rs6000_traceback)
2729 {
2730 case traceback_default: trace_str = "default"; break;
2731 case traceback_none: trace_str = "none"; break;
2732 case traceback_part: trace_str = "part"; break;
2733 case traceback_full: trace_str = "full"; break;
2734 default: trace_str = "unknown"; break;
2735 }
2736
2737 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2738
2739 switch (rs6000_current_cmodel)
2740 {
2741 case CMODEL_SMALL: cmodel_str = "small"; break;
2742 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2743 case CMODEL_LARGE: cmodel_str = "large"; break;
2744 default: cmodel_str = "unknown"; break;
2745 }
2746
2747 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2748
2749 switch (rs6000_current_abi)
2750 {
2751 case ABI_NONE: abi_str = "none"; break;
2752 case ABI_AIX: abi_str = "aix"; break;
2753 case ABI_ELFv2: abi_str = "ELFv2"; break;
2754 case ABI_V4: abi_str = "V4"; break;
2755 case ABI_DARWIN: abi_str = "darwin"; break;
2756 default: abi_str = "unknown"; break;
2757 }
2758
2759 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2760
2761 if (rs6000_altivec_abi)
2762 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2763
2764 if (rs6000_darwin64_abi)
2765 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2766
2767 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2768 (TARGET_SOFT_FLOAT ? "true" : "false"));
2769
2770 if (TARGET_LINK_STACK)
2771 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2772
2773 if (TARGET_P8_FUSION)
2774 {
2775 char options[80];
2776
2777 strcpy (options, "power8");
2778 if (TARGET_P8_FUSION_SIGN)
2779 strcat (options, ", sign");
2780
2781 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2782 }
2783
2784 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2785 TARGET_SECURE_PLT ? "secure" : "bss");
2786 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2787 aix_struct_return ? "aix" : "sysv");
2788 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2789 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2790 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2791 tf[!!rs6000_align_branch_targets]);
2792 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2793 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2794 rs6000_long_double_type_size);
2795 if (rs6000_long_double_type_size > 64)
2796 {
2797 fprintf (stderr, DEBUG_FMT_S, "long double type",
2798 TARGET_IEEEQUAD ? "IEEE" : "IBM");
2799 fprintf (stderr, DEBUG_FMT_S, "default long double type",
2800 TARGET_IEEEQUAD_DEFAULT ? "IEEE" : "IBM");
2801 }
2802 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2803 (int)rs6000_sched_restricted_insns_priority);
2804 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2805 (int)END_BUILTINS);
2806 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2807 (int)RS6000_BUILTIN_COUNT);
2808
2809 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2810 (int)TARGET_FLOAT128_ENABLE_TYPE);
2811
2812 if (TARGET_VSX)
2813 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2814 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2815
2816 if (TARGET_DIRECT_MOVE_128)
2817 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2818 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2819 }
2820
2821 \f
2822 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2823 legitimate address support to figure out the appropriate addressing to
2824 use. */
2825
2826 static void
2827 rs6000_setup_reg_addr_masks (void)
2828 {
2829 ssize_t rc, reg, m, nregs;
2830 addr_mask_type any_addr_mask, addr_mask;
2831
2832 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2833 {
2834 machine_mode m2 = (machine_mode) m;
2835 bool complex_p = false;
2836 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2837 size_t msize;
2838
2839 if (COMPLEX_MODE_P (m2))
2840 {
2841 complex_p = true;
2842 m2 = GET_MODE_INNER (m2);
2843 }
2844
2845 msize = GET_MODE_SIZE (m2);
2846
2847 /* SDmode is special in that we want to access it only via REG+REG
2848 addressing on power7 and above, since we want to use the LFIWZX and
2849 STFIWZX instructions to load it. */
2850 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2851
2852 any_addr_mask = 0;
2853 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2854 {
2855 addr_mask = 0;
2856 reg = reload_reg_map[rc].reg;
2857
2858 /* Can mode values go in the GPR/FPR/Altivec registers? */
2859 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2860 {
2861 bool small_int_vsx_p = (small_int_p
2862 && (rc == RELOAD_REG_FPR
2863 || rc == RELOAD_REG_VMX));
2864
2865 nregs = rs6000_hard_regno_nregs[m][reg];
2866 addr_mask |= RELOAD_REG_VALID;
2867
2868 /* Indicate if the mode takes more than 1 physical register. If
2869 it takes a single register, indicate it can do REG+REG
2870 addressing. Small integers in VSX registers can only do
2871 REG+REG addressing. */
2872 if (small_int_vsx_p)
2873 addr_mask |= RELOAD_REG_INDEXED;
2874 else if (nregs > 1 || m == BLKmode || complex_p)
2875 addr_mask |= RELOAD_REG_MULTIPLE;
2876 else
2877 addr_mask |= RELOAD_REG_INDEXED;
2878
2879 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2880 addressing. If we allow scalars into Altivec registers,
2881 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2882
2883 For VSX systems, we don't allow update addressing for
2884 DFmode/SFmode if those registers can go in both the
2885 traditional floating point registers and Altivec registers.
2886 The load/store instructions for the Altivec registers do not
2887 have update forms. If we allowed update addressing, it seems
2888 to break IV-OPT code using floating point if the index type is
2889 int instead of long (PR target/81550 and target/84042). */
2890
2891 if (TARGET_UPDATE
2892 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2893 && msize <= 8
2894 && !VECTOR_MODE_P (m2)
2895 && !FLOAT128_VECTOR_P (m2)
2896 && !complex_p
2897 && (m != E_DFmode || !TARGET_VSX)
2898 && (m != E_SFmode || !TARGET_P8_VECTOR)
2899 && !small_int_vsx_p)
2900 {
2901 addr_mask |= RELOAD_REG_PRE_INCDEC;
2902
2903 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2904 we don't allow PRE_MODIFY for some multi-register
2905 operations. */
2906 switch (m)
2907 {
2908 default:
2909 addr_mask |= RELOAD_REG_PRE_MODIFY;
2910 break;
2911
2912 case E_DImode:
2913 if (TARGET_POWERPC64)
2914 addr_mask |= RELOAD_REG_PRE_MODIFY;
2915 break;
2916
2917 case E_DFmode:
2918 case E_DDmode:
2919 if (TARGET_HARD_FLOAT)
2920 addr_mask |= RELOAD_REG_PRE_MODIFY;
2921 break;
2922 }
2923 }
2924 }
2925
2926 /* GPR and FPR registers can do REG+OFFSET addressing, except
2927 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2928 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2929 if ((addr_mask != 0) && !indexed_only_p
2930 && msize <= 8
2931 && (rc == RELOAD_REG_GPR
2932 || ((msize == 8 || m2 == SFmode)
2933 && (rc == RELOAD_REG_FPR
2934 || (rc == RELOAD_REG_VMX && TARGET_P9_VECTOR)))))
2935 addr_mask |= RELOAD_REG_OFFSET;
2936
2937 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2938 instructions are enabled. The offset for 128-bit VSX registers is
2939 only 12-bits. While GPRs can handle the full offset range, VSX
2940 registers can only handle the restricted range. */
2941 else if ((addr_mask != 0) && !indexed_only_p
2942 && msize == 16 && TARGET_P9_VECTOR
2943 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2944 || (m2 == TImode && TARGET_VSX)))
2945 {
2946 addr_mask |= RELOAD_REG_OFFSET;
2947 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2948 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2949 }
2950
2951 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2952 addressing on 128-bit types. */
2953 if (rc == RELOAD_REG_VMX && msize == 16
2954 && (addr_mask & RELOAD_REG_VALID) != 0)
2955 addr_mask |= RELOAD_REG_AND_M16;
2956
2957 reg_addr[m].addr_mask[rc] = addr_mask;
2958 any_addr_mask |= addr_mask;
2959 }
2960
2961 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2962 }
2963 }
2964
2965 \f
2966 /* Initialize the various global tables that are based on register size. */
2967 static void
2968 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2969 {
2970 ssize_t r, m, c;
2971 int align64;
2972 int align32;
2973
2974 /* Precalculate REGNO_REG_CLASS. */
2975 rs6000_regno_regclass[0] = GENERAL_REGS;
2976 for (r = 1; r < 32; ++r)
2977 rs6000_regno_regclass[r] = BASE_REGS;
2978
2979 for (r = 32; r < 64; ++r)
2980 rs6000_regno_regclass[r] = FLOAT_REGS;
2981
2982 for (r = 64; HARD_REGISTER_NUM_P (r); ++r)
2983 rs6000_regno_regclass[r] = NO_REGS;
2984
2985 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2986 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2987
2988 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2989 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2990 rs6000_regno_regclass[r] = CR_REGS;
2991
2992 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2993 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2994 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
2995 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2996 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2997 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2998 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2999
3000 /* Precalculate register class to simpler reload register class. We don't
3001 need all of the register classes that are combinations of different
3002 classes, just the simple ones that have constraint letters. */
3003 for (c = 0; c < N_REG_CLASSES; c++)
3004 reg_class_to_reg_type[c] = NO_REG_TYPE;
3005
3006 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
3007 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
3008 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
3009 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
3010 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
3011 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
3012 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
3013 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
3014 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
3015 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
3016
3017 if (TARGET_VSX)
3018 {
3019 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
3020 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
3021 }
3022 else
3023 {
3024 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
3025 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
3026 }
3027
3028 /* Precalculate the valid memory formats as well as the vector information,
3029 this must be set up before the rs6000_hard_regno_nregs_internal calls
3030 below. */
3031 gcc_assert ((int)VECTOR_NONE == 0);
3032 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
3033 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_mem));
3034
3035 gcc_assert ((int)CODE_FOR_nothing == 0);
3036 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3037
3038 gcc_assert ((int)NO_REGS == 0);
3039 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3040
3041 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3042 believes it can use native alignment or still uses 128-bit alignment. */
3043 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3044 {
3045 align64 = 64;
3046 align32 = 32;
3047 }
3048 else
3049 {
3050 align64 = 128;
3051 align32 = 128;
3052 }
3053
3054 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3055 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3056 if (TARGET_FLOAT128_TYPE)
3057 {
3058 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3059 rs6000_vector_align[KFmode] = 128;
3060
3061 if (FLOAT128_IEEE_P (TFmode))
3062 {
3063 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3064 rs6000_vector_align[TFmode] = 128;
3065 }
3066 }
3067
3068 /* V2DF mode, VSX only. */
3069 if (TARGET_VSX)
3070 {
3071 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3072 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3073 rs6000_vector_align[V2DFmode] = align64;
3074 }
3075
3076 /* V4SF mode, either VSX or Altivec. */
3077 if (TARGET_VSX)
3078 {
3079 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3080 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3081 rs6000_vector_align[V4SFmode] = align32;
3082 }
3083 else if (TARGET_ALTIVEC)
3084 {
3085 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3086 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3087 rs6000_vector_align[V4SFmode] = align32;
3088 }
3089
3090 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3091 and stores. */
3092 if (TARGET_ALTIVEC)
3093 {
3094 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3095 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3096 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3097 rs6000_vector_align[V4SImode] = align32;
3098 rs6000_vector_align[V8HImode] = align32;
3099 rs6000_vector_align[V16QImode] = align32;
3100
3101 if (TARGET_VSX)
3102 {
3103 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3104 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3105 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3106 }
3107 else
3108 {
3109 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3110 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3111 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3112 }
3113 }
3114
3115 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3116 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3117 if (TARGET_VSX)
3118 {
3119 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3120 rs6000_vector_unit[V2DImode]
3121 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3122 rs6000_vector_align[V2DImode] = align64;
3123
3124 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3125 rs6000_vector_unit[V1TImode]
3126 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3127 rs6000_vector_align[V1TImode] = 128;
3128 }
3129
3130 /* DFmode, see if we want to use the VSX unit. Memory is handled
3131 differently, so don't set rs6000_vector_mem. */
3132 if (TARGET_VSX)
3133 {
3134 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3135 rs6000_vector_align[DFmode] = 64;
3136 }
3137
3138 /* SFmode, see if we want to use the VSX unit. */
3139 if (TARGET_P8_VECTOR)
3140 {
3141 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3142 rs6000_vector_align[SFmode] = 32;
3143 }
3144
3145 /* Allow TImode in VSX register and set the VSX memory macros. */
3146 if (TARGET_VSX)
3147 {
3148 rs6000_vector_mem[TImode] = VECTOR_VSX;
3149 rs6000_vector_align[TImode] = align64;
3150 }
3151
3152 /* Register class constraints for the constraints that depend on compile
3153 switches. When the VSX code was added, different constraints were added
3154 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3155 of the VSX registers are used. The register classes for scalar floating
3156 point types is set, based on whether we allow that type into the upper
3157 (Altivec) registers. GCC has register classes to target the Altivec
3158 registers for load/store operations, to select using a VSX memory
3159 operation instead of the traditional floating point operation. The
3160 constraints are:
3161
3162 d - Register class to use with traditional DFmode instructions.
3163 f - Register class to use with traditional SFmode instructions.
3164 v - Altivec register.
3165 wa - Any VSX register.
3166 wc - Reserved to represent individual CR bits (used in LLVM).
3167 wd - Preferred register class for V2DFmode.
3168 wf - Preferred register class for V4SFmode.
3169 wg - Float register for power6x move insns.
3170 wh - FP register for direct move instructions.
3171 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3172 wj - FP or VSX register to hold 64-bit integers for direct moves.
3173 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3174 wl - Float register if we can do 32-bit signed int loads.
3175 wm - VSX register for ISA 2.07 direct move operations.
3176 wn - always NO_REGS.
3177 wr - GPR if 64-bit mode is permitted.
3178 ws - Register class to do ISA 2.06 DF operations.
3179 wt - VSX register for TImode in VSX registers.
3180 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3181 ww - Register class to do SF conversions in with VSX operations.
3182 wx - Float register if we can do 32-bit int stores.
3183 wz - Float register if we can do 32-bit unsigned int loads.
3184 wH - Altivec register if SImode is allowed in VSX registers.
3185 wI - Float register if SImode is allowed in VSX registers. */
3186
3187 if (TARGET_HARD_FLOAT)
3188 {
3189 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3190 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3191 }
3192
3193 if (TARGET_VSX)
3194 {
3195 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3196 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3197 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3198 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS; /* DFmode */
3199 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS; /* DFmode */
3200 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS; /* DImode */
3201 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3202 }
3203
3204 /* Add conditional constraints based on various options, to allow us to
3205 collapse multiple insn patterns. */
3206 if (TARGET_ALTIVEC)
3207 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3208
3209 if (TARGET_MFPGPR) /* DFmode */
3210 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3211
3212 if (TARGET_LFIWAX)
3213 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3214
3215 if (TARGET_DIRECT_MOVE)
3216 {
3217 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3218 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3219 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3220 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3221 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3222 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3223 }
3224
3225 if (TARGET_POWERPC64)
3226 {
3227 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3228 rs6000_constraints[RS6000_CONSTRAINT_wA] = BASE_REGS;
3229 }
3230
3231 if (TARGET_P8_VECTOR) /* SFmode */
3232 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3233 else if (TARGET_VSX)
3234 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3235
3236 if (TARGET_STFIWX)
3237 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3238
3239 if (TARGET_LFIWZX)
3240 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3241
3242 if (TARGET_FLOAT128_TYPE)
3243 {
3244 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3245 if (FLOAT128_IEEE_P (TFmode))
3246 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3247 }
3248
3249 /* Support for new direct moves (ISA 3.0 + 64bit). */
3250 if (TARGET_DIRECT_MOVE_128)
3251 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3252
3253 /* Support small integers in VSX registers. */
3254 if (TARGET_P8_VECTOR)
3255 {
3256 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3257 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3258 }
3259
3260 /* Set up the reload helper and direct move functions. */
3261 if (TARGET_VSX || TARGET_ALTIVEC)
3262 {
3263 if (TARGET_64BIT)
3264 {
3265 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3266 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3267 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3268 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3269 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3270 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3271 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3272 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3273 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3274 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3275 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3276 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3277 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3278 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3279 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3280 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3281 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3282 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3283 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3284 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3285
3286 if (FLOAT128_VECTOR_P (KFmode))
3287 {
3288 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3289 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3290 }
3291
3292 if (FLOAT128_VECTOR_P (TFmode))
3293 {
3294 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3295 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3296 }
3297
3298 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3299 available. */
3300 if (TARGET_NO_SDMODE_STACK)
3301 {
3302 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3303 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3304 }
3305
3306 if (TARGET_VSX)
3307 {
3308 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3309 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3310 }
3311
3312 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3313 {
3314 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3315 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3316 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3317 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3318 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3319 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3320 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3321 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3322 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3323
3324 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3325 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3326 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3327 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3328 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3329 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3330 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3331 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3332 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3333
3334 if (FLOAT128_VECTOR_P (KFmode))
3335 {
3336 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3337 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3338 }
3339
3340 if (FLOAT128_VECTOR_P (TFmode))
3341 {
3342 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3343 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3344 }
3345 }
3346 }
3347 else
3348 {
3349 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3350 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3351 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3352 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3353 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3354 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3355 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3356 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3357 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3358 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3359 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3360 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3361 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3362 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3363 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3364 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3365 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3366 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3367 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3368 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3369
3370 if (FLOAT128_VECTOR_P (KFmode))
3371 {
3372 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3373 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3374 }
3375
3376 if (FLOAT128_IEEE_P (TFmode))
3377 {
3378 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3379 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3380 }
3381
3382 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3383 available. */
3384 if (TARGET_NO_SDMODE_STACK)
3385 {
3386 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3387 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3388 }
3389
3390 if (TARGET_VSX)
3391 {
3392 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3393 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3394 }
3395
3396 if (TARGET_DIRECT_MOVE)
3397 {
3398 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3399 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3400 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3401 }
3402 }
3403
3404 reg_addr[DFmode].scalar_in_vmx_p = true;
3405 reg_addr[DImode].scalar_in_vmx_p = true;
3406
3407 if (TARGET_P8_VECTOR)
3408 {
3409 reg_addr[SFmode].scalar_in_vmx_p = true;
3410 reg_addr[SImode].scalar_in_vmx_p = true;
3411
3412 if (TARGET_P9_VECTOR)
3413 {
3414 reg_addr[HImode].scalar_in_vmx_p = true;
3415 reg_addr[QImode].scalar_in_vmx_p = true;
3416 }
3417 }
3418 }
3419
3420 /* Precalculate HARD_REGNO_NREGS. */
3421 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3422 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3423 rs6000_hard_regno_nregs[m][r]
3424 = rs6000_hard_regno_nregs_internal (r, (machine_mode) m);
3425
3426 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3427 for (r = 0; HARD_REGISTER_NUM_P (r); ++r)
3428 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3429 rs6000_hard_regno_mode_ok_p[m][r]
3430 = rs6000_hard_regno_mode_ok_uncached (r, (machine_mode) m);
3431
3432 /* Precalculate CLASS_MAX_NREGS sizes. */
3433 for (c = 0; c < LIM_REG_CLASSES; ++c)
3434 {
3435 int reg_size;
3436
3437 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3438 reg_size = UNITS_PER_VSX_WORD;
3439
3440 else if (c == ALTIVEC_REGS)
3441 reg_size = UNITS_PER_ALTIVEC_WORD;
3442
3443 else if (c == FLOAT_REGS)
3444 reg_size = UNITS_PER_FP_WORD;
3445
3446 else
3447 reg_size = UNITS_PER_WORD;
3448
3449 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3450 {
3451 machine_mode m2 = (machine_mode)m;
3452 int reg_size2 = reg_size;
3453
3454 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3455 in VSX. */
3456 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3457 reg_size2 = UNITS_PER_FP_WORD;
3458
3459 rs6000_class_max_nregs[m][c]
3460 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3461 }
3462 }
3463
3464 /* Calculate which modes to automatically generate code to use a the
3465 reciprocal divide and square root instructions. In the future, possibly
3466 automatically generate the instructions even if the user did not specify
3467 -mrecip. The older machines double precision reciprocal sqrt estimate is
3468 not accurate enough. */
3469 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3470 if (TARGET_FRES)
3471 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3472 if (TARGET_FRE)
3473 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3474 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3475 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3476 if (VECTOR_UNIT_VSX_P (V2DFmode))
3477 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3478
3479 if (TARGET_FRSQRTES)
3480 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3481 if (TARGET_FRSQRTE)
3482 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3483 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3484 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3485 if (VECTOR_UNIT_VSX_P (V2DFmode))
3486 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3487
3488 if (rs6000_recip_control)
3489 {
3490 if (!flag_finite_math_only)
3491 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3492 "-ffast-math");
3493 if (flag_trapping_math)
3494 warning (0, "%qs requires %qs or %qs", "-mrecip",
3495 "-fno-trapping-math", "-ffast-math");
3496 if (!flag_reciprocal_math)
3497 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3498 "-ffast-math");
3499 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3500 {
3501 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3502 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3503 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3504
3505 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3506 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3507 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3508
3509 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3510 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3511 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3512
3513 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3514 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3515 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3516
3517 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3518 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3519 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3520
3521 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3522 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3523 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3524
3525 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3526 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3527 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3528
3529 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3530 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3531 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3532 }
3533 }
3534
3535 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3536 legitimate address support to figure out the appropriate addressing to
3537 use. */
3538 rs6000_setup_reg_addr_masks ();
3539
3540 if (global_init_p || TARGET_DEBUG_TARGET)
3541 {
3542 if (TARGET_DEBUG_REG)
3543 rs6000_debug_reg_global ();
3544
3545 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3546 fprintf (stderr,
3547 "SImode variable mult cost = %d\n"
3548 "SImode constant mult cost = %d\n"
3549 "SImode short constant mult cost = %d\n"
3550 "DImode multipliciation cost = %d\n"
3551 "SImode division cost = %d\n"
3552 "DImode division cost = %d\n"
3553 "Simple fp operation cost = %d\n"
3554 "DFmode multiplication cost = %d\n"
3555 "SFmode division cost = %d\n"
3556 "DFmode division cost = %d\n"
3557 "cache line size = %d\n"
3558 "l1 cache size = %d\n"
3559 "l2 cache size = %d\n"
3560 "simultaneous prefetches = %d\n"
3561 "\n",
3562 rs6000_cost->mulsi,
3563 rs6000_cost->mulsi_const,
3564 rs6000_cost->mulsi_const9,
3565 rs6000_cost->muldi,
3566 rs6000_cost->divsi,
3567 rs6000_cost->divdi,
3568 rs6000_cost->fp,
3569 rs6000_cost->dmul,
3570 rs6000_cost->sdiv,
3571 rs6000_cost->ddiv,
3572 rs6000_cost->cache_line_size,
3573 rs6000_cost->l1_cache_size,
3574 rs6000_cost->l2_cache_size,
3575 rs6000_cost->simultaneous_prefetches);
3576 }
3577 }
3578
3579 #if TARGET_MACHO
3580 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3581
3582 static void
3583 darwin_rs6000_override_options (void)
3584 {
3585 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3586 off. */
3587 rs6000_altivec_abi = 1;
3588 TARGET_ALTIVEC_VRSAVE = 1;
3589 rs6000_current_abi = ABI_DARWIN;
3590
3591 if (DEFAULT_ABI == ABI_DARWIN
3592 && TARGET_64BIT)
3593 darwin_one_byte_bool = 1;
3594
3595 if (TARGET_64BIT && ! TARGET_POWERPC64)
3596 {
3597 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3598 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3599 }
3600 if (flag_mkernel)
3601 {
3602 rs6000_default_long_calls = 1;
3603 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3604 }
3605
3606 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3607 Altivec. */
3608 if (!flag_mkernel && !flag_apple_kext
3609 && TARGET_64BIT
3610 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3611 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3612
3613 /* Unless the user (not the configurer) has explicitly overridden
3614 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3615 G4 unless targeting the kernel. */
3616 if (!flag_mkernel
3617 && !flag_apple_kext
3618 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3619 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3620 && ! global_options_set.x_rs6000_cpu_index)
3621 {
3622 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3623 }
3624 }
3625 #endif
3626
3627 /* If not otherwise specified by a target, make 'long double' equivalent to
3628 'double'. */
3629
3630 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3631 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3632 #endif
3633
3634 /* Return the builtin mask of the various options used that could affect which
3635 builtins were used. In the past we used target_flags, but we've run out of
3636 bits, and some options are no longer in target_flags. */
3637
3638 HOST_WIDE_INT
3639 rs6000_builtin_mask_calculate (void)
3640 {
3641 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3642 | ((TARGET_CMPB) ? RS6000_BTM_CMPB : 0)
3643 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3644 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3645 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3646 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3647 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3648 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3649 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3650 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3651 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3652 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3653 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3654 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3655 | ((TARGET_POWERPC64) ? RS6000_BTM_POWERPC64 : 0)
3656 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3657 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3658 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3659 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3660 | ((TARGET_LONG_DOUBLE_128
3661 && TARGET_HARD_FLOAT
3662 && !TARGET_IEEEQUAD) ? RS6000_BTM_LDBL128 : 0)
3663 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0)
3664 | ((TARGET_FLOAT128_HW) ? RS6000_BTM_FLOAT128_HW : 0));
3665 }
3666
3667 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3668 to clobber the XER[CA] bit because clobbering that bit without telling
3669 the compiler worked just fine with versions of GCC before GCC 5, and
3670 breaking a lot of older code in ways that are hard to track down is
3671 not such a great idea. */
3672
3673 static rtx_insn *
3674 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3675 vec<const char *> &/*constraints*/,
3676 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3677 {
3678 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3679 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3680 return NULL;
3681 }
3682
3683 /* Override command line options.
3684
3685 Combine build-specific configuration information with options
3686 specified on the command line to set various state variables which
3687 influence code generation, optimization, and expansion of built-in
3688 functions. Assure that command-line configuration preferences are
3689 compatible with each other and with the build configuration; issue
3690 warnings while adjusting configuration or error messages while
3691 rejecting configuration.
3692
3693 Upon entry to this function:
3694
3695 This function is called once at the beginning of
3696 compilation, and then again at the start and end of compiling
3697 each section of code that has a different configuration, as
3698 indicated, for example, by adding the
3699
3700 __attribute__((__target__("cpu=power9")))
3701
3702 qualifier to a function definition or, for example, by bracketing
3703 code between
3704
3705 #pragma GCC target("altivec")
3706
3707 and
3708
3709 #pragma GCC reset_options
3710
3711 directives. Parameter global_init_p is true for the initial
3712 invocation, which initializes global variables, and false for all
3713 subsequent invocations.
3714
3715
3716 Various global state information is assumed to be valid. This
3717 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3718 default CPU specified at build configure time, TARGET_DEFAULT,
3719 representing the default set of option flags for the default
3720 target, and global_options_set.x_rs6000_isa_flags, representing
3721 which options were requested on the command line.
3722
3723 Upon return from this function:
3724
3725 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3726 was set by name on the command line. Additionally, if certain
3727 attributes are automatically enabled or disabled by this function
3728 in order to assure compatibility between options and
3729 configuration, the flags associated with those attributes are
3730 also set. By setting these "explicit bits", we avoid the risk
3731 that other code might accidentally overwrite these particular
3732 attributes with "default values".
3733
3734 The various bits of rs6000_isa_flags are set to indicate the
3735 target options that have been selected for the most current
3736 compilation efforts. This has the effect of also turning on the
3737 associated TARGET_XXX values since these are macros which are
3738 generally defined to test the corresponding bit of the
3739 rs6000_isa_flags variable.
3740
3741 The variable rs6000_builtin_mask is set to represent the target
3742 options for the most current compilation efforts, consistent with
3743 the current contents of rs6000_isa_flags. This variable controls
3744 expansion of built-in functions.
3745
3746 Various other global variables and fields of global structures
3747 (over 50 in all) are initialized to reflect the desired options
3748 for the most current compilation efforts. */
3749
3750 static bool
3751 rs6000_option_override_internal (bool global_init_p)
3752 {
3753 bool ret = true;
3754
3755 HOST_WIDE_INT set_masks;
3756 HOST_WIDE_INT ignore_masks;
3757 int cpu_index = -1;
3758 int tune_index;
3759 struct cl_target_option *main_target_opt
3760 = ((global_init_p || target_option_default_node == NULL)
3761 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3762
3763 /* Print defaults. */
3764 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3765 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3766
3767 /* Remember the explicit arguments. */
3768 if (global_init_p)
3769 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3770
3771 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3772 library functions, so warn about it. The flag may be useful for
3773 performance studies from time to time though, so don't disable it
3774 entirely. */
3775 if (global_options_set.x_rs6000_alignment_flags
3776 && rs6000_alignment_flags == MASK_ALIGN_POWER
3777 && DEFAULT_ABI == ABI_DARWIN
3778 && TARGET_64BIT)
3779 warning (0, "%qs is not supported for 64-bit Darwin;"
3780 " it is incompatible with the installed C and C++ libraries",
3781 "-malign-power");
3782
3783 /* Numerous experiment shows that IRA based loop pressure
3784 calculation works better for RTL loop invariant motion on targets
3785 with enough (>= 32) registers. It is an expensive optimization.
3786 So it is on only for peak performance. */
3787 if (optimize >= 3 && global_init_p
3788 && !global_options_set.x_flag_ira_loop_pressure)
3789 flag_ira_loop_pressure = 1;
3790
3791 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3792 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3793 options were already specified. */
3794 if (flag_sanitize & SANITIZE_USER_ADDRESS
3795 && !global_options_set.x_flag_asynchronous_unwind_tables)
3796 flag_asynchronous_unwind_tables = 1;
3797
3798 /* Set the pointer size. */
3799 if (TARGET_64BIT)
3800 {
3801 rs6000_pmode = DImode;
3802 rs6000_pointer_size = 64;
3803 }
3804 else
3805 {
3806 rs6000_pmode = SImode;
3807 rs6000_pointer_size = 32;
3808 }
3809
3810 /* Some OSs don't support saving the high part of 64-bit registers on context
3811 switch. Other OSs don't support saving Altivec registers. On those OSs,
3812 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3813 if the user wants either, the user must explicitly specify them and we
3814 won't interfere with the user's specification. */
3815
3816 set_masks = POWERPC_MASKS;
3817 #ifdef OS_MISSING_POWERPC64
3818 if (OS_MISSING_POWERPC64)
3819 set_masks &= ~OPTION_MASK_POWERPC64;
3820 #endif
3821 #ifdef OS_MISSING_ALTIVEC
3822 if (OS_MISSING_ALTIVEC)
3823 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX
3824 | OTHER_VSX_VECTOR_MASKS);
3825 #endif
3826
3827 /* Don't override by the processor default if given explicitly. */
3828 set_masks &= ~rs6000_isa_flags_explicit;
3829
3830 if (global_init_p && rs6000_dejagnu_cpu_index >= 0)
3831 rs6000_cpu_index = rs6000_dejagnu_cpu_index;
3832
3833 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3834 the cpu in a target attribute or pragma, but did not specify a tuning
3835 option, use the cpu for the tuning option rather than the option specified
3836 with -mtune on the command line. Process a '--with-cpu' configuration
3837 request as an implicit --cpu. */
3838 if (rs6000_cpu_index >= 0)
3839 cpu_index = rs6000_cpu_index;
3840 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3841 cpu_index = main_target_opt->x_rs6000_cpu_index;
3842 else if (OPTION_TARGET_CPU_DEFAULT)
3843 cpu_index = rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT);
3844
3845 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3846 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3847 with those from the cpu, except for options that were explicitly set. If
3848 we don't have a cpu, do not override the target bits set in
3849 TARGET_DEFAULT. */
3850 if (cpu_index >= 0)
3851 {
3852 rs6000_cpu_index = cpu_index;
3853 rs6000_isa_flags &= ~set_masks;
3854 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3855 & set_masks);
3856 }
3857 else
3858 {
3859 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3860 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3861 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3862 to using rs6000_isa_flags, we need to do the initialization here.
3863
3864 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
3865 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
3866 HOST_WIDE_INT flags;
3867 if (TARGET_DEFAULT)
3868 flags = TARGET_DEFAULT;
3869 else
3870 {
3871 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3872 const char *default_cpu = (!TARGET_POWERPC64
3873 ? "powerpc"
3874 : (BYTES_BIG_ENDIAN
3875 ? "powerpc64"
3876 : "powerpc64le"));
3877 int default_cpu_index = rs6000_cpu_name_lookup (default_cpu);
3878 flags = processor_target_table[default_cpu_index].target_enable;
3879 }
3880 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
3881 }
3882
3883 if (rs6000_tune_index >= 0)
3884 tune_index = rs6000_tune_index;
3885 else if (cpu_index >= 0)
3886 rs6000_tune_index = tune_index = cpu_index;
3887 else
3888 {
3889 size_t i;
3890 enum processor_type tune_proc
3891 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3892
3893 tune_index = -1;
3894 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3895 if (processor_target_table[i].processor == tune_proc)
3896 {
3897 tune_index = i;
3898 break;
3899 }
3900 }
3901
3902 if (cpu_index >= 0)
3903 rs6000_cpu = processor_target_table[cpu_index].processor;
3904 else
3905 rs6000_cpu = TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT;
3906
3907 gcc_assert (tune_index >= 0);
3908 rs6000_tune = processor_target_table[tune_index].processor;
3909
3910 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3911 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3912 || rs6000_cpu == PROCESSOR_PPCE5500)
3913 {
3914 if (TARGET_ALTIVEC)
3915 error ("AltiVec not supported in this target");
3916 }
3917
3918 /* If we are optimizing big endian systems for space, use the load/store
3919 multiple instructions. */
3920 if (BYTES_BIG_ENDIAN && optimize_size)
3921 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE;
3922
3923 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
3924 because the hardware doesn't support the instructions used in little
3925 endian mode, and causes an alignment trap. The 750 does not cause an
3926 alignment trap (except when the target is unaligned). */
3927
3928 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750 && TARGET_MULTIPLE)
3929 {
3930 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3931 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3932 warning (0, "%qs is not supported on little endian systems",
3933 "-mmultiple");
3934 }
3935
3936 /* If little-endian, default to -mstrict-align on older processors.
3937 Testing for htm matches power8 and later. */
3938 if (!BYTES_BIG_ENDIAN
3939 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3940 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3941
3942 if (!rs6000_fold_gimple)
3943 fprintf (stderr,
3944 "gimple folding of rs6000 builtins has been disabled.\n");
3945
3946 /* Add some warnings for VSX. */
3947 if (TARGET_VSX)
3948 {
3949 const char *msg = NULL;
3950 if (!TARGET_HARD_FLOAT)
3951 {
3952 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3953 msg = N_("%<-mvsx%> requires hardware floating point");
3954 else
3955 {
3956 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3957 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3958 }
3959 }
3960 else if (TARGET_AVOID_XFORM > 0)
3961 msg = N_("%<-mvsx%> needs indexed addressing");
3962 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3963 & OPTION_MASK_ALTIVEC))
3964 {
3965 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3966 msg = N_("%<-mvsx%> and %<-mno-altivec%> are incompatible");
3967 else
3968 msg = N_("%<-mno-altivec%> disables vsx");
3969 }
3970
3971 if (msg)
3972 {
3973 warning (0, msg);
3974 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3975 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3976 }
3977 }
3978
3979 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
3980 the -mcpu setting to enable options that conflict. */
3981 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
3982 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
3983 | OPTION_MASK_ALTIVEC
3984 | OPTION_MASK_VSX)) != 0)
3985 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
3986 | OPTION_MASK_DIRECT_MOVE)
3987 & ~rs6000_isa_flags_explicit);
3988
3989 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3990 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
3991
3992 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
3993 off all of the options that depend on those flags. */
3994 ignore_masks = rs6000_disable_incompatible_switches ();
3995
3996 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
3997 unless the user explicitly used the -mno-<option> to disable the code. */
3998 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_MISC)
3999 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4000 else if (TARGET_P9_MINMAX)
4001 {
4002 if (cpu_index >= 0)
4003 {
4004 if (cpu_index == PROCESSOR_POWER9)
4005 {
4006 /* legacy behavior: allow -mcpu=power9 with certain
4007 capabilities explicitly disabled. */
4008 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~ignore_masks);
4009 }
4010 else
4011 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4012 "for <xxx> less than power9", "-mcpu");
4013 }
4014 else if ((ISA_3_0_MASKS_SERVER & rs6000_isa_flags_explicit)
4015 != (ISA_3_0_MASKS_SERVER & rs6000_isa_flags
4016 & rs6000_isa_flags_explicit))
4017 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4018 were explicitly cleared. */
4019 error ("%qs incompatible with explicitly disabled options",
4020 "-mpower9-minmax");
4021 else
4022 rs6000_isa_flags |= ISA_3_0_MASKS_SERVER;
4023 }
4024 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4025 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~ignore_masks);
4026 else if (TARGET_VSX)
4027 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~ignore_masks);
4028 else if (TARGET_POPCNTD)
4029 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~ignore_masks);
4030 else if (TARGET_DFP)
4031 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~ignore_masks);
4032 else if (TARGET_CMPB)
4033 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~ignore_masks);
4034 else if (TARGET_FPRND)
4035 rs6000_isa_flags |= (ISA_2_4_MASKS & ~ignore_masks);
4036 else if (TARGET_POPCNTB)
4037 rs6000_isa_flags |= (ISA_2_2_MASKS & ~ignore_masks);
4038 else if (TARGET_ALTIVEC)
4039 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~ignore_masks);
4040
4041 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4042 {
4043 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4044 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4045 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4046 }
4047
4048 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4049 {
4050 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4051 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4052 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4053 }
4054
4055 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4056 {
4057 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4058 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4059 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4060 }
4061
4062 if (TARGET_P8_VECTOR && !TARGET_VSX)
4063 {
4064 if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4065 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX))
4066 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4067 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR) == 0)
4068 {
4069 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4070 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4071 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4072 }
4073 else
4074 {
4075 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4076 not explicit. */
4077 rs6000_isa_flags |= OPTION_MASK_VSX;
4078 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4079 }
4080 }
4081
4082 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4083 {
4084 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4085 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4086 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4087 }
4088
4089 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4090 silently turn off quad memory mode. */
4091 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4092 {
4093 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4094 warning (0, N_("%<-mquad-memory%> requires 64-bit mode"));
4095
4096 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4097 warning (0, N_("%<-mquad-memory-atomic%> requires 64-bit mode"));
4098
4099 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4100 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4101 }
4102
4103 /* Non-atomic quad memory load/store are disabled for little endian, since
4104 the words are reversed, but atomic operations can still be done by
4105 swapping the words. */
4106 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4107 {
4108 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4109 warning (0, N_("%<-mquad-memory%> is not available in little endian "
4110 "mode"));
4111
4112 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4113 }
4114
4115 /* Assume if the user asked for normal quad memory instructions, they want
4116 the atomic versions as well, unless they explicity told us not to use quad
4117 word atomic instructions. */
4118 if (TARGET_QUAD_MEMORY
4119 && !TARGET_QUAD_MEMORY_ATOMIC
4120 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4121 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4122
4123 /* If we can shrink-wrap the TOC register save separately, then use
4124 -msave-toc-indirect unless explicitly disabled. */
4125 if ((rs6000_isa_flags_explicit & OPTION_MASK_SAVE_TOC_INDIRECT) == 0
4126 && flag_shrink_wrap_separate
4127 && optimize_function_for_speed_p (cfun))
4128 rs6000_isa_flags |= OPTION_MASK_SAVE_TOC_INDIRECT;
4129
4130 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4131 generating power8 instructions. Power9 does not optimize power8 fusion
4132 cases. */
4133 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4134 {
4135 if (processor_target_table[tune_index].processor == PROCESSOR_POWER8)
4136 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4137 else
4138 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4139 }
4140
4141 /* Setting additional fusion flags turns on base fusion. */
4142 if (!TARGET_P8_FUSION && TARGET_P8_FUSION_SIGN)
4143 {
4144 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4145 {
4146 if (TARGET_P8_FUSION_SIGN)
4147 error ("%qs requires %qs", "-mpower8-fusion-sign",
4148 "-mpower8-fusion");
4149
4150 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4151 }
4152 else
4153 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4154 }
4155
4156 /* Power8 does not fuse sign extended loads with the addis. If we are
4157 optimizing at high levels for speed, convert a sign extended load into a
4158 zero extending load, and an explicit sign extension. */
4159 if (TARGET_P8_FUSION
4160 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4161 && optimize_function_for_speed_p (cfun)
4162 && optimize >= 3)
4163 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4164
4165 /* ISA 3.0 vector instructions include ISA 2.07. */
4166 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4167 {
4168 /* We prefer to not mention undocumented options in
4169 error messages. However, if users have managed to select
4170 power9-vector without selecting power8-vector, they
4171 already know about undocumented flags. */
4172 if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) &&
4173 (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR))
4174 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4175 else if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR) == 0)
4176 {
4177 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4178 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4179 rs6000_isa_flags_explicit |= OPTION_MASK_P9_VECTOR;
4180 }
4181 else
4182 {
4183 /* OPTION_MASK_P9_VECTOR is explicit and
4184 OPTION_MASK_P8_VECTOR is not explicit. */
4185 rs6000_isa_flags |= OPTION_MASK_P8_VECTOR;
4186 rs6000_isa_flags_explicit |= OPTION_MASK_P8_VECTOR;
4187 }
4188 }
4189
4190 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4191 support. If we only have ISA 2.06 support, and the user did not specify
4192 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4193 but we don't enable the full vectorization support */
4194 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4195 TARGET_ALLOW_MOVMISALIGN = 1;
4196
4197 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4198 {
4199 if (TARGET_ALLOW_MOVMISALIGN > 0
4200 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4201 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4202
4203 TARGET_ALLOW_MOVMISALIGN = 0;
4204 }
4205
4206 /* Determine when unaligned vector accesses are permitted, and when
4207 they are preferred over masked Altivec loads. Note that if
4208 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4209 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4210 not true. */
4211 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4212 {
4213 if (!TARGET_VSX)
4214 {
4215 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4216 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4217
4218 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4219 }
4220
4221 else if (!TARGET_ALLOW_MOVMISALIGN)
4222 {
4223 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4224 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4225 "-mallow-movmisalign");
4226
4227 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4228 }
4229 }
4230
4231 /* Use long double size to select the appropriate long double. We use
4232 TYPE_PRECISION to differentiate the 3 different long double types. We map
4233 128 into the precision used for TFmode. */
4234 int default_long_double_size = (RS6000_DEFAULT_LONG_DOUBLE_SIZE == 64
4235 ? 64
4236 : FLOAT_PRECISION_TFmode);
4237
4238 /* Set long double size before the IEEE 128-bit tests. */
4239 if (!global_options_set.x_rs6000_long_double_type_size)
4240 {
4241 if (main_target_opt != NULL
4242 && (main_target_opt->x_rs6000_long_double_type_size
4243 != default_long_double_size))
4244 error ("target attribute or pragma changes long double size");
4245 else
4246 rs6000_long_double_type_size = default_long_double_size;
4247 }
4248 else if (rs6000_long_double_type_size == 128)
4249 rs6000_long_double_type_size = FLOAT_PRECISION_TFmode;
4250 else if (global_options_set.x_rs6000_ieeequad)
4251 {
4252 if (global_options.x_rs6000_ieeequad)
4253 error ("%qs requires %qs", "-mabi=ieeelongdouble", "-mlong-double-128");
4254 else
4255 error ("%qs requires %qs", "-mabi=ibmlongdouble", "-mlong-double-128");
4256 }
4257
4258 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4259 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4260 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4261 those systems will not pick up this default. Warn if the user changes the
4262 default unless -Wno-psabi. */
4263 if (!global_options_set.x_rs6000_ieeequad)
4264 rs6000_ieeequad = TARGET_IEEEQUAD_DEFAULT;
4265
4266 else
4267 {
4268 if (global_options.x_rs6000_ieeequad
4269 && (!TARGET_POPCNTD || !TARGET_VSX))
4270 error ("%qs requires full ISA 2.06 support", "-mabi=ieeelongdouble");
4271
4272 if (rs6000_ieeequad != TARGET_IEEEQUAD_DEFAULT && TARGET_LONG_DOUBLE_128)
4273 {
4274 static bool warned_change_long_double;
4275 if (!warned_change_long_double)
4276 {
4277 warned_change_long_double = true;
4278 if (TARGET_IEEEQUAD)
4279 warning (OPT_Wpsabi, "Using IEEE extended precision long double");
4280 else
4281 warning (OPT_Wpsabi, "Using IBM extended precision long double");
4282 }
4283 }
4284 }
4285
4286 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4287 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4288 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4289 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4290 the keyword as well as the type. */
4291 TARGET_FLOAT128_TYPE = TARGET_FLOAT128_ENABLE_TYPE && TARGET_VSX;
4292
4293 /* IEEE 128-bit floating point requires VSX support. */
4294 if (TARGET_FLOAT128_KEYWORD)
4295 {
4296 if (!TARGET_VSX)
4297 {
4298 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4299 error ("%qs requires VSX support", "%<-mfloat128%>");
4300
4301 TARGET_FLOAT128_TYPE = 0;
4302 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_KEYWORD
4303 | OPTION_MASK_FLOAT128_HW);
4304 }
4305 else if (!TARGET_FLOAT128_TYPE)
4306 {
4307 TARGET_FLOAT128_TYPE = 1;
4308 warning (0, "The %<-mfloat128%> option may not be fully supported");
4309 }
4310 }
4311
4312 /* Enable the __float128 keyword under Linux by default. */
4313 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_KEYWORD
4314 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4315 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4316
4317 /* If we have are supporting the float128 type and full ISA 3.0 support,
4318 enable -mfloat128-hardware by default. However, don't enable the
4319 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4320 because sometimes the compiler wants to put things in an integer
4321 container, and if we don't have __int128 support, it is impossible. */
4322 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW && TARGET_64BIT
4323 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4324 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4325 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4326
4327 if (TARGET_FLOAT128_HW
4328 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4329 {
4330 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4331 error ("%qs requires full ISA 3.0 support", "%<-mfloat128-hardware%>");
4332
4333 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4334 }
4335
4336 if (TARGET_FLOAT128_HW && !TARGET_64BIT)
4337 {
4338 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4339 error ("%qs requires %qs", "%<-mfloat128-hardware%>", "-m64");
4340
4341 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4342 }
4343
4344 /* Print the options after updating the defaults. */
4345 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4346 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4347
4348 /* E500mc does "better" if we inline more aggressively. Respect the
4349 user's opinion, though. */
4350 if (rs6000_block_move_inline_limit == 0
4351 && (rs6000_tune == PROCESSOR_PPCE500MC
4352 || rs6000_tune == PROCESSOR_PPCE500MC64
4353 || rs6000_tune == PROCESSOR_PPCE5500
4354 || rs6000_tune == PROCESSOR_PPCE6500))
4355 rs6000_block_move_inline_limit = 128;
4356
4357 /* store_one_arg depends on expand_block_move to handle at least the
4358 size of reg_parm_stack_space. */
4359 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4360 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4361
4362 if (global_init_p)
4363 {
4364 /* If the appropriate debug option is enabled, replace the target hooks
4365 with debug versions that call the real version and then prints
4366 debugging information. */
4367 if (TARGET_DEBUG_COST)
4368 {
4369 targetm.rtx_costs = rs6000_debug_rtx_costs;
4370 targetm.address_cost = rs6000_debug_address_cost;
4371 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4372 }
4373
4374 if (TARGET_DEBUG_ADDR)
4375 {
4376 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4377 targetm.legitimize_address = rs6000_debug_legitimize_address;
4378 rs6000_secondary_reload_class_ptr
4379 = rs6000_debug_secondary_reload_class;
4380 targetm.secondary_memory_needed
4381 = rs6000_debug_secondary_memory_needed;
4382 targetm.can_change_mode_class
4383 = rs6000_debug_can_change_mode_class;
4384 rs6000_preferred_reload_class_ptr
4385 = rs6000_debug_preferred_reload_class;
4386 rs6000_mode_dependent_address_ptr
4387 = rs6000_debug_mode_dependent_address;
4388 }
4389
4390 if (rs6000_veclibabi_name)
4391 {
4392 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4393 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4394 else
4395 {
4396 error ("unknown vectorization library ABI type (%qs) for "
4397 "%qs switch", rs6000_veclibabi_name, "-mveclibabi=");
4398 ret = false;
4399 }
4400 }
4401 }
4402
4403 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4404 target attribute or pragma which automatically enables both options,
4405 unless the altivec ABI was set. This is set by default for 64-bit, but
4406 not for 32-bit. */
4407 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4408 {
4409 TARGET_FLOAT128_TYPE = 0;
4410 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4411 | OPTION_MASK_FLOAT128_KEYWORD)
4412 & ~rs6000_isa_flags_explicit);
4413 }
4414
4415 /* Enable Altivec ABI for AIX -maltivec. */
4416 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4417 {
4418 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4419 error ("target attribute or pragma changes AltiVec ABI");
4420 else
4421 rs6000_altivec_abi = 1;
4422 }
4423
4424 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4425 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4426 be explicitly overridden in either case. */
4427 if (TARGET_ELF)
4428 {
4429 if (!global_options_set.x_rs6000_altivec_abi
4430 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4431 {
4432 if (main_target_opt != NULL &&
4433 !main_target_opt->x_rs6000_altivec_abi)
4434 error ("target attribute or pragma changes AltiVec ABI");
4435 else
4436 rs6000_altivec_abi = 1;
4437 }
4438 }
4439
4440 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4441 So far, the only darwin64 targets are also MACH-O. */
4442 if (TARGET_MACHO
4443 && DEFAULT_ABI == ABI_DARWIN
4444 && TARGET_64BIT)
4445 {
4446 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4447 error ("target attribute or pragma changes darwin64 ABI");
4448 else
4449 {
4450 rs6000_darwin64_abi = 1;
4451 /* Default to natural alignment, for better performance. */
4452 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4453 }
4454 }
4455
4456 /* Place FP constants in the constant pool instead of TOC
4457 if section anchors enabled. */
4458 if (flag_section_anchors
4459 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4460 TARGET_NO_FP_IN_TOC = 1;
4461
4462 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4463 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4464
4465 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4466 SUBTARGET_OVERRIDE_OPTIONS;
4467 #endif
4468 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4469 SUBSUBTARGET_OVERRIDE_OPTIONS;
4470 #endif
4471 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4472 SUB3TARGET_OVERRIDE_OPTIONS;
4473 #endif
4474
4475 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4476 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4477
4478 rs6000_always_hint = (rs6000_tune != PROCESSOR_POWER4
4479 && rs6000_tune != PROCESSOR_POWER5
4480 && rs6000_tune != PROCESSOR_POWER6
4481 && rs6000_tune != PROCESSOR_POWER7
4482 && rs6000_tune != PROCESSOR_POWER8
4483 && rs6000_tune != PROCESSOR_POWER9
4484 && rs6000_tune != PROCESSOR_PPCA2
4485 && rs6000_tune != PROCESSOR_CELL
4486 && rs6000_tune != PROCESSOR_PPC476);
4487 rs6000_sched_groups = (rs6000_tune == PROCESSOR_POWER4
4488 || rs6000_tune == PROCESSOR_POWER5
4489 || rs6000_tune == PROCESSOR_POWER7
4490 || rs6000_tune == PROCESSOR_POWER8);
4491 rs6000_align_branch_targets = (rs6000_tune == PROCESSOR_POWER4
4492 || rs6000_tune == PROCESSOR_POWER5
4493 || rs6000_tune == PROCESSOR_POWER6
4494 || rs6000_tune == PROCESSOR_POWER7
4495 || rs6000_tune == PROCESSOR_POWER8
4496 || rs6000_tune == PROCESSOR_POWER9
4497 || rs6000_tune == PROCESSOR_PPCE500MC
4498 || rs6000_tune == PROCESSOR_PPCE500MC64
4499 || rs6000_tune == PROCESSOR_PPCE5500
4500 || rs6000_tune == PROCESSOR_PPCE6500);
4501
4502 /* Allow debug switches to override the above settings. These are set to -1
4503 in rs6000.opt to indicate the user hasn't directly set the switch. */
4504 if (TARGET_ALWAYS_HINT >= 0)
4505 rs6000_always_hint = TARGET_ALWAYS_HINT;
4506
4507 if (TARGET_SCHED_GROUPS >= 0)
4508 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4509
4510 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4511 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4512
4513 rs6000_sched_restricted_insns_priority
4514 = (rs6000_sched_groups ? 1 : 0);
4515
4516 /* Handle -msched-costly-dep option. */
4517 rs6000_sched_costly_dep
4518 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4519
4520 if (rs6000_sched_costly_dep_str)
4521 {
4522 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4523 rs6000_sched_costly_dep = no_dep_costly;
4524 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4525 rs6000_sched_costly_dep = all_deps_costly;
4526 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4527 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4528 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4529 rs6000_sched_costly_dep = store_to_load_dep_costly;
4530 else
4531 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4532 atoi (rs6000_sched_costly_dep_str));
4533 }
4534
4535 /* Handle -minsert-sched-nops option. */
4536 rs6000_sched_insert_nops
4537 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4538
4539 if (rs6000_sched_insert_nops_str)
4540 {
4541 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4542 rs6000_sched_insert_nops = sched_finish_none;
4543 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4544 rs6000_sched_insert_nops = sched_finish_pad_groups;
4545 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4546 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4547 else
4548 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4549 atoi (rs6000_sched_insert_nops_str));
4550 }
4551
4552 /* Handle stack protector */
4553 if (!global_options_set.x_rs6000_stack_protector_guard)
4554 #ifdef TARGET_THREAD_SSP_OFFSET
4555 rs6000_stack_protector_guard = SSP_TLS;
4556 #else
4557 rs6000_stack_protector_guard = SSP_GLOBAL;
4558 #endif
4559
4560 #ifdef TARGET_THREAD_SSP_OFFSET
4561 rs6000_stack_protector_guard_offset = TARGET_THREAD_SSP_OFFSET;
4562 rs6000_stack_protector_guard_reg = TARGET_64BIT ? 13 : 2;
4563 #endif
4564
4565 if (global_options_set.x_rs6000_stack_protector_guard_offset_str)
4566 {
4567 char *endp;
4568 const char *str = rs6000_stack_protector_guard_offset_str;
4569
4570 errno = 0;
4571 long offset = strtol (str, &endp, 0);
4572 if (!*str || *endp || errno)
4573 error ("%qs is not a valid number in %qs", str,
4574 "-mstack-protector-guard-offset=");
4575
4576 if (!IN_RANGE (offset, -0x8000, 0x7fff)
4577 || (TARGET_64BIT && (offset & 3)))
4578 error ("%qs is not a valid offset in %qs", str,
4579 "-mstack-protector-guard-offset=");
4580
4581 rs6000_stack_protector_guard_offset = offset;
4582 }
4583
4584 if (global_options_set.x_rs6000_stack_protector_guard_reg_str)
4585 {
4586 const char *str = rs6000_stack_protector_guard_reg_str;
4587 int reg = decode_reg_name (str);
4588
4589 if (!IN_RANGE (reg, 1, 31))
4590 error ("%qs is not a valid base register in %qs", str,
4591 "-mstack-protector-guard-reg=");
4592
4593 rs6000_stack_protector_guard_reg = reg;
4594 }
4595
4596 if (rs6000_stack_protector_guard == SSP_TLS
4597 && !IN_RANGE (rs6000_stack_protector_guard_reg, 1, 31))
4598 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4599
4600 if (global_init_p)
4601 {
4602 #ifdef TARGET_REGNAMES
4603 /* If the user desires alternate register names, copy in the
4604 alternate names now. */
4605 if (TARGET_REGNAMES)
4606 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4607 #endif
4608
4609 /* Set aix_struct_return last, after the ABI is determined.
4610 If -maix-struct-return or -msvr4-struct-return was explicitly
4611 used, don't override with the ABI default. */
4612 if (!global_options_set.x_aix_struct_return)
4613 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4614
4615 #if 0
4616 /* IBM XL compiler defaults to unsigned bitfields. */
4617 if (TARGET_XL_COMPAT)
4618 flag_signed_bitfields = 0;
4619 #endif
4620
4621 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4622 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4623
4624 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4625
4626 /* We can only guarantee the availability of DI pseudo-ops when
4627 assembling for 64-bit targets. */
4628 if (!TARGET_64BIT)
4629 {
4630 targetm.asm_out.aligned_op.di = NULL;
4631 targetm.asm_out.unaligned_op.di = NULL;
4632 }
4633
4634
4635 /* Set branch target alignment, if not optimizing for size. */
4636 if (!optimize_size)
4637 {
4638 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4639 aligned 8byte to avoid misprediction by the branch predictor. */
4640 if (rs6000_tune == PROCESSOR_TITAN
4641 || rs6000_tune == PROCESSOR_CELL)
4642 {
4643 if (flag_align_functions && !str_align_functions)
4644 str_align_functions = "8";
4645 if (flag_align_jumps && !str_align_jumps)
4646 str_align_jumps = "8";
4647 if (flag_align_loops && !str_align_loops)
4648 str_align_loops = "8";
4649 }
4650 if (rs6000_align_branch_targets)
4651 {
4652 if (flag_align_functions && !str_align_functions)
4653 str_align_functions = "16";
4654 if (flag_align_jumps && !str_align_jumps)
4655 str_align_jumps = "16";
4656 if (flag_align_loops && !str_align_loops)
4657 {
4658 can_override_loop_align = 1;
4659 str_align_loops = "16";
4660 }
4661 }
4662
4663 if (flag_align_jumps && !str_align_jumps)
4664 str_align_jumps = "16";
4665 if (flag_align_loops && !str_align_loops)
4666 str_align_loops = "16";
4667 }
4668
4669 /* Arrange to save and restore machine status around nested functions. */
4670 init_machine_status = rs6000_init_machine_status;
4671
4672 /* We should always be splitting complex arguments, but we can't break
4673 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4674 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
4675 targetm.calls.split_complex_arg = NULL;
4676
4677 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4678 if (DEFAULT_ABI == ABI_AIX)
4679 targetm.calls.custom_function_descriptors = 0;
4680 }
4681
4682 /* Initialize rs6000_cost with the appropriate target costs. */
4683 if (optimize_size)
4684 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
4685 else
4686 switch (rs6000_tune)
4687 {
4688 case PROCESSOR_RS64A:
4689 rs6000_cost = &rs64a_cost;
4690 break;
4691
4692 case PROCESSOR_MPCCORE:
4693 rs6000_cost = &mpccore_cost;
4694 break;
4695
4696 case PROCESSOR_PPC403:
4697 rs6000_cost = &ppc403_cost;
4698 break;
4699
4700 case PROCESSOR_PPC405:
4701 rs6000_cost = &ppc405_cost;
4702 break;
4703
4704 case PROCESSOR_PPC440:
4705 rs6000_cost = &ppc440_cost;
4706 break;
4707
4708 case PROCESSOR_PPC476:
4709 rs6000_cost = &ppc476_cost;
4710 break;
4711
4712 case PROCESSOR_PPC601:
4713 rs6000_cost = &ppc601_cost;
4714 break;
4715
4716 case PROCESSOR_PPC603:
4717 rs6000_cost = &ppc603_cost;
4718 break;
4719
4720 case PROCESSOR_PPC604:
4721 rs6000_cost = &ppc604_cost;
4722 break;
4723
4724 case PROCESSOR_PPC604e:
4725 rs6000_cost = &ppc604e_cost;
4726 break;
4727
4728 case PROCESSOR_PPC620:
4729 rs6000_cost = &ppc620_cost;
4730 break;
4731
4732 case PROCESSOR_PPC630:
4733 rs6000_cost = &ppc630_cost;
4734 break;
4735
4736 case PROCESSOR_CELL:
4737 rs6000_cost = &ppccell_cost;
4738 break;
4739
4740 case PROCESSOR_PPC750:
4741 case PROCESSOR_PPC7400:
4742 rs6000_cost = &ppc750_cost;
4743 break;
4744
4745 case PROCESSOR_PPC7450:
4746 rs6000_cost = &ppc7450_cost;
4747 break;
4748
4749 case PROCESSOR_PPC8540:
4750 case PROCESSOR_PPC8548:
4751 rs6000_cost = &ppc8540_cost;
4752 break;
4753
4754 case PROCESSOR_PPCE300C2:
4755 case PROCESSOR_PPCE300C3:
4756 rs6000_cost = &ppce300c2c3_cost;
4757 break;
4758
4759 case PROCESSOR_PPCE500MC:
4760 rs6000_cost = &ppce500mc_cost;
4761 break;
4762
4763 case PROCESSOR_PPCE500MC64:
4764 rs6000_cost = &ppce500mc64_cost;
4765 break;
4766
4767 case PROCESSOR_PPCE5500:
4768 rs6000_cost = &ppce5500_cost;
4769 break;
4770
4771 case PROCESSOR_PPCE6500:
4772 rs6000_cost = &ppce6500_cost;
4773 break;
4774
4775 case PROCESSOR_TITAN:
4776 rs6000_cost = &titan_cost;
4777 break;
4778
4779 case PROCESSOR_POWER4:
4780 case PROCESSOR_POWER5:
4781 rs6000_cost = &power4_cost;
4782 break;
4783
4784 case PROCESSOR_POWER6:
4785 rs6000_cost = &power6_cost;
4786 break;
4787
4788 case PROCESSOR_POWER7:
4789 rs6000_cost = &power7_cost;
4790 break;
4791
4792 case PROCESSOR_POWER8:
4793 rs6000_cost = &power8_cost;
4794 break;
4795
4796 case PROCESSOR_POWER9:
4797 rs6000_cost = &power9_cost;
4798 break;
4799
4800 case PROCESSOR_PPCA2:
4801 rs6000_cost = &ppca2_cost;
4802 break;
4803
4804 default:
4805 gcc_unreachable ();
4806 }
4807
4808 if (global_init_p)
4809 {
4810 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
4811 rs6000_cost->simultaneous_prefetches,
4812 global_options.x_param_values,
4813 global_options_set.x_param_values);
4814 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
4815 global_options.x_param_values,
4816 global_options_set.x_param_values);
4817 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
4818 rs6000_cost->cache_line_size,
4819 global_options.x_param_values,
4820 global_options_set.x_param_values);
4821 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
4822 global_options.x_param_values,
4823 global_options_set.x_param_values);
4824
4825 /* Increase loop peeling limits based on performance analysis. */
4826 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
4827 global_options.x_param_values,
4828 global_options_set.x_param_values);
4829 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
4830 global_options.x_param_values,
4831 global_options_set.x_param_values);
4832
4833 /* Use the 'model' -fsched-pressure algorithm by default. */
4834 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
4835 SCHED_PRESSURE_MODEL,
4836 global_options.x_param_values,
4837 global_options_set.x_param_values);
4838
4839 /* If using typedef char *va_list, signal that
4840 __builtin_va_start (&ap, 0) can be optimized to
4841 ap = __builtin_next_arg (0). */
4842 if (DEFAULT_ABI != ABI_V4)
4843 targetm.expand_builtin_va_start = NULL;
4844 }
4845
4846 /* If not explicitly specified via option, decide whether to generate indexed
4847 load/store instructions. A value of -1 indicates that the
4848 initial value of this variable has not been overwritten. During
4849 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
4850 if (TARGET_AVOID_XFORM == -1)
4851 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4852 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4853 need indexed accesses and the type used is the scalar type of the element
4854 being loaded or stored. */
4855 TARGET_AVOID_XFORM = (rs6000_tune == PROCESSOR_POWER6 && TARGET_CMPB
4856 && !TARGET_ALTIVEC);
4857
4858 /* Set the -mrecip options. */
4859 if (rs6000_recip_name)
4860 {
4861 char *p = ASTRDUP (rs6000_recip_name);
4862 char *q;
4863 unsigned int mask, i;
4864 bool invert;
4865
4866 while ((q = strtok (p, ",")) != NULL)
4867 {
4868 p = NULL;
4869 if (*q == '!')
4870 {
4871 invert = true;
4872 q++;
4873 }
4874 else
4875 invert = false;
4876
4877 if (!strcmp (q, "default"))
4878 mask = ((TARGET_RECIP_PRECISION)
4879 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4880 else
4881 {
4882 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4883 if (!strcmp (q, recip_options[i].string))
4884 {
4885 mask = recip_options[i].mask;
4886 break;
4887 }
4888
4889 if (i == ARRAY_SIZE (recip_options))
4890 {
4891 error ("unknown option for %<%s=%s%>", "-mrecip", q);
4892 invert = false;
4893 mask = 0;
4894 ret = false;
4895 }
4896 }
4897
4898 if (invert)
4899 rs6000_recip_control &= ~mask;
4900 else
4901 rs6000_recip_control |= mask;
4902 }
4903 }
4904
4905 /* Set the builtin mask of the various options used that could affect which
4906 builtins were used. In the past we used target_flags, but we've run out
4907 of bits, and some options are no longer in target_flags. */
4908 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4909 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4910 rs6000_print_builtin_options (stderr, 0, "builtin mask",
4911 rs6000_builtin_mask);
4912
4913 /* Initialize all of the registers. */
4914 rs6000_init_hard_regno_mode_ok (global_init_p);
4915
4916 /* Save the initial options in case the user does function specific options */
4917 if (global_init_p)
4918 target_option_default_node = target_option_current_node
4919 = build_target_option_node (&global_options);
4920
4921 /* If not explicitly specified via option, decide whether to generate the
4922 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4923 if (TARGET_LINK_STACK == -1)
4924 SET_TARGET_LINK_STACK (rs6000_tune == PROCESSOR_PPC476 && flag_pic);
4925
4926 /* Deprecate use of -mno-speculate-indirect-jumps. */
4927 if (!rs6000_speculate_indirect_jumps)
4928 warning (0, "%qs is deprecated and not recommended in any circumstances",
4929 "-mno-speculate-indirect-jumps");
4930
4931 return ret;
4932 }
4933
4934 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4935 define the target cpu type. */
4936
4937 static void
4938 rs6000_option_override (void)
4939 {
4940 (void) rs6000_option_override_internal (true);
4941 }
4942
4943 \f
4944 /* Implement targetm.vectorize.builtin_mask_for_load. */
4945 static tree
4946 rs6000_builtin_mask_for_load (void)
4947 {
4948 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
4949 if ((TARGET_ALTIVEC && !TARGET_VSX)
4950 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
4951 return altivec_builtin_mask_for_load;
4952 else
4953 return 0;
4954 }
4955
4956 /* Implement LOOP_ALIGN. */
4957 align_flags
4958 rs6000_loop_align (rtx label)
4959 {
4960 basic_block bb;
4961 int ninsns;
4962
4963 /* Don't override loop alignment if -falign-loops was specified. */
4964 if (!can_override_loop_align)
4965 return align_loops;
4966
4967 bb = BLOCK_FOR_INSN (label);
4968 ninsns = num_loop_insns(bb->loop_father);
4969
4970 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
4971 if (ninsns > 4 && ninsns <= 8
4972 && (rs6000_tune == PROCESSOR_POWER4
4973 || rs6000_tune == PROCESSOR_POWER5
4974 || rs6000_tune == PROCESSOR_POWER6
4975 || rs6000_tune == PROCESSOR_POWER7
4976 || rs6000_tune == PROCESSOR_POWER8))
4977 return align_flags (5);
4978 else
4979 return align_loops;
4980 }
4981
4982 /* Return true iff, data reference of TYPE can reach vector alignment (16)
4983 after applying N number of iterations. This routine does not determine
4984 how may iterations are required to reach desired alignment. */
4985
4986 static bool
4987 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
4988 {
4989 if (is_packed)
4990 return false;
4991
4992 if (TARGET_32BIT)
4993 {
4994 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
4995 return true;
4996
4997 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
4998 return true;
4999
5000 return false;
5001 }
5002 else
5003 {
5004 if (TARGET_MACHO)
5005 return false;
5006
5007 /* Assuming that all other types are naturally aligned. CHECKME! */
5008 return true;
5009 }
5010 }
5011
5012 /* Return true if the vector misalignment factor is supported by the
5013 target. */
5014 static bool
5015 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5016 const_tree type,
5017 int misalignment,
5018 bool is_packed)
5019 {
5020 if (TARGET_VSX)
5021 {
5022 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5023 return true;
5024
5025 /* Return if movmisalign pattern is not supported for this mode. */
5026 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5027 return false;
5028
5029 if (misalignment == -1)
5030 {
5031 /* Misalignment factor is unknown at compile time but we know
5032 it's word aligned. */
5033 if (rs6000_vector_alignment_reachable (type, is_packed))
5034 {
5035 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5036
5037 if (element_size == 64 || element_size == 32)
5038 return true;
5039 }
5040
5041 return false;
5042 }
5043
5044 /* VSX supports word-aligned vector. */
5045 if (misalignment % 4 == 0)
5046 return true;
5047 }
5048 return false;
5049 }
5050
5051 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5052 static int
5053 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5054 tree vectype, int misalign)
5055 {
5056 unsigned elements;
5057 tree elem_type;
5058
5059 switch (type_of_cost)
5060 {
5061 case scalar_stmt:
5062 case scalar_load:
5063 case scalar_store:
5064 case vector_stmt:
5065 case vector_load:
5066 case vector_store:
5067 case vec_to_scalar:
5068 case scalar_to_vec:
5069 case cond_branch_not_taken:
5070 return 1;
5071
5072 case vec_perm:
5073 if (TARGET_VSX)
5074 return 3;
5075 else
5076 return 1;
5077
5078 case vec_promote_demote:
5079 if (TARGET_VSX)
5080 return 4;
5081 else
5082 return 1;
5083
5084 case cond_branch_taken:
5085 return 3;
5086
5087 case unaligned_load:
5088 case vector_gather_load:
5089 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5090 return 1;
5091
5092 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5093 {
5094 elements = TYPE_VECTOR_SUBPARTS (vectype);
5095 if (elements == 2)
5096 /* Double word aligned. */
5097 return 2;
5098
5099 if (elements == 4)
5100 {
5101 switch (misalign)
5102 {
5103 case 8:
5104 /* Double word aligned. */
5105 return 2;
5106
5107 case -1:
5108 /* Unknown misalignment. */
5109 case 4:
5110 case 12:
5111 /* Word aligned. */
5112 return 22;
5113
5114 default:
5115 gcc_unreachable ();
5116 }
5117 }
5118 }
5119
5120 if (TARGET_ALTIVEC)
5121 /* Misaligned loads are not supported. */
5122 gcc_unreachable ();
5123
5124 return 2;
5125
5126 case unaligned_store:
5127 case vector_scatter_store:
5128 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5129 return 1;
5130
5131 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5132 {
5133 elements = TYPE_VECTOR_SUBPARTS (vectype);
5134 if (elements == 2)
5135 /* Double word aligned. */
5136 return 2;
5137
5138 if (elements == 4)
5139 {
5140 switch (misalign)
5141 {
5142 case 8:
5143 /* Double word aligned. */
5144 return 2;
5145
5146 case -1:
5147 /* Unknown misalignment. */
5148 case 4:
5149 case 12:
5150 /* Word aligned. */
5151 return 23;
5152
5153 default:
5154 gcc_unreachable ();
5155 }
5156 }
5157 }
5158
5159 if (TARGET_ALTIVEC)
5160 /* Misaligned stores are not supported. */
5161 gcc_unreachable ();
5162
5163 return 2;
5164
5165 case vec_construct:
5166 /* This is a rough approximation assuming non-constant elements
5167 constructed into a vector via element insertion. FIXME:
5168 vec_construct is not granular enough for uniformly good
5169 decisions. If the initialization is a splat, this is
5170 cheaper than we estimate. Improve this someday. */
5171 elem_type = TREE_TYPE (vectype);
5172 /* 32-bit vectors loaded into registers are stored as double
5173 precision, so we need 2 permutes, 2 converts, and 1 merge
5174 to construct a vector of short floats from them. */
5175 if (SCALAR_FLOAT_TYPE_P (elem_type)
5176 && TYPE_PRECISION (elem_type) == 32)
5177 return 5;
5178 /* On POWER9, integer vector types are built up in GPRs and then
5179 use a direct move (2 cycles). For POWER8 this is even worse,
5180 as we need two direct moves and a merge, and the direct moves
5181 are five cycles. */
5182 else if (INTEGRAL_TYPE_P (elem_type))
5183 {
5184 if (TARGET_P9_VECTOR)
5185 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 2;
5186 else
5187 return TYPE_VECTOR_SUBPARTS (vectype) - 1 + 5;
5188 }
5189 else
5190 /* V2DFmode doesn't need a direct move. */
5191 return 2;
5192
5193 default:
5194 gcc_unreachable ();
5195 }
5196 }
5197
5198 /* Implement targetm.vectorize.preferred_simd_mode. */
5199
5200 static machine_mode
5201 rs6000_preferred_simd_mode (scalar_mode mode)
5202 {
5203 if (TARGET_VSX)
5204 switch (mode)
5205 {
5206 case E_DFmode:
5207 return V2DFmode;
5208 default:;
5209 }
5210 if (TARGET_ALTIVEC || TARGET_VSX)
5211 switch (mode)
5212 {
5213 case E_SFmode:
5214 return V4SFmode;
5215 case E_TImode:
5216 return V1TImode;
5217 case E_DImode:
5218 return V2DImode;
5219 case E_SImode:
5220 return V4SImode;
5221 case E_HImode:
5222 return V8HImode;
5223 case E_QImode:
5224 return V16QImode;
5225 default:;
5226 }
5227 return word_mode;
5228 }
5229
5230 typedef struct _rs6000_cost_data
5231 {
5232 struct loop *loop_info;
5233 unsigned cost[3];
5234 } rs6000_cost_data;
5235
5236 /* Test for likely overcommitment of vector hardware resources. If a
5237 loop iteration is relatively large, and too large a percentage of
5238 instructions in the loop are vectorized, the cost model may not
5239 adequately reflect delays from unavailable vector resources.
5240 Penalize the loop body cost for this case. */
5241
5242 static void
5243 rs6000_density_test (rs6000_cost_data *data)
5244 {
5245 const int DENSITY_PCT_THRESHOLD = 85;
5246 const int DENSITY_SIZE_THRESHOLD = 70;
5247 const int DENSITY_PENALTY = 10;
5248 struct loop *loop = data->loop_info;
5249 basic_block *bbs = get_loop_body (loop);
5250 int nbbs = loop->num_nodes;
5251 loop_vec_info loop_vinfo = loop_vec_info_for_loop (data->loop_info);
5252 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5253 int i, density_pct;
5254
5255 for (i = 0; i < nbbs; i++)
5256 {
5257 basic_block bb = bbs[i];
5258 gimple_stmt_iterator gsi;
5259
5260 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5261 {
5262 gimple *stmt = gsi_stmt (gsi);
5263 stmt_vec_info stmt_info = loop_vinfo->lookup_stmt (stmt);
5264
5265 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5266 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5267 not_vec_cost++;
5268 }
5269 }
5270
5271 free (bbs);
5272 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5273
5274 if (density_pct > DENSITY_PCT_THRESHOLD
5275 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5276 {
5277 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5278 if (dump_enabled_p ())
5279 dump_printf_loc (MSG_NOTE, vect_location,
5280 "density %d%%, cost %d exceeds threshold, penalizing "
5281 "loop body cost by %d%%", density_pct,
5282 vec_cost + not_vec_cost, DENSITY_PENALTY);
5283 }
5284 }
5285
5286 /* Implement targetm.vectorize.init_cost. */
5287
5288 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5289 instruction is needed by the vectorization. */
5290 static bool rs6000_vect_nonmem;
5291
5292 static void *
5293 rs6000_init_cost (struct loop *loop_info)
5294 {
5295 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5296 data->loop_info = loop_info;
5297 data->cost[vect_prologue] = 0;
5298 data->cost[vect_body] = 0;
5299 data->cost[vect_epilogue] = 0;
5300 rs6000_vect_nonmem = false;
5301 return data;
5302 }
5303
5304 /* Implement targetm.vectorize.add_stmt_cost. */
5305
5306 static unsigned
5307 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5308 struct _stmt_vec_info *stmt_info, int misalign,
5309 enum vect_cost_model_location where)
5310 {
5311 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5312 unsigned retval = 0;
5313
5314 if (flag_vect_cost_model)
5315 {
5316 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5317 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5318 misalign);
5319 /* Statements in an inner loop relative to the loop being
5320 vectorized are weighted more heavily. The value here is
5321 arbitrary and could potentially be improved with analysis. */
5322 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5323 count *= 50; /* FIXME. */
5324
5325 retval = (unsigned) (count * stmt_cost);
5326 cost_data->cost[where] += retval;
5327
5328 /* Check whether we're doing something other than just a copy loop.
5329 Not all such loops may be profitably vectorized; see
5330 rs6000_finish_cost. */
5331 if ((kind == vec_to_scalar || kind == vec_perm
5332 || kind == vec_promote_demote || kind == vec_construct
5333 || kind == scalar_to_vec)
5334 || (where == vect_body && kind == vector_stmt))
5335 rs6000_vect_nonmem = true;
5336 }
5337
5338 return retval;
5339 }
5340
5341 /* Implement targetm.vectorize.finish_cost. */
5342
5343 static void
5344 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5345 unsigned *body_cost, unsigned *epilogue_cost)
5346 {
5347 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5348
5349 if (cost_data->loop_info)
5350 rs6000_density_test (cost_data);
5351
5352 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5353 that require versioning for any reason. The vectorization is at
5354 best a wash inside the loop, and the versioning checks make
5355 profitability highly unlikely and potentially quite harmful. */
5356 if (cost_data->loop_info)
5357 {
5358 loop_vec_info vec_info = loop_vec_info_for_loop (cost_data->loop_info);
5359 if (!rs6000_vect_nonmem
5360 && LOOP_VINFO_VECT_FACTOR (vec_info) == 2
5361 && LOOP_REQUIRES_VERSIONING (vec_info))
5362 cost_data->cost[vect_body] += 10000;
5363 }
5364
5365 *prologue_cost = cost_data->cost[vect_prologue];
5366 *body_cost = cost_data->cost[vect_body];
5367 *epilogue_cost = cost_data->cost[vect_epilogue];
5368 }
5369
5370 /* Implement targetm.vectorize.destroy_cost_data. */
5371
5372 static void
5373 rs6000_destroy_cost_data (void *data)
5374 {
5375 free (data);
5376 }
5377
5378 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5379 library with vectorized intrinsics. */
5380
5381 static tree
5382 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5383 tree type_in)
5384 {
5385 char name[32];
5386 const char *suffix = NULL;
5387 tree fntype, new_fndecl, bdecl = NULL_TREE;
5388 int n_args = 1;
5389 const char *bname;
5390 machine_mode el_mode, in_mode;
5391 int n, in_n;
5392
5393 /* Libmass is suitable for unsafe math only as it does not correctly support
5394 parts of IEEE with the required precision such as denormals. Only support
5395 it if we have VSX to use the simd d2 or f4 functions.
5396 XXX: Add variable length support. */
5397 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5398 return NULL_TREE;
5399
5400 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5401 n = TYPE_VECTOR_SUBPARTS (type_out);
5402 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5403 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5404 if (el_mode != in_mode
5405 || n != in_n)
5406 return NULL_TREE;
5407
5408 switch (fn)
5409 {
5410 CASE_CFN_ATAN2:
5411 CASE_CFN_HYPOT:
5412 CASE_CFN_POW:
5413 n_args = 2;
5414 gcc_fallthrough ();
5415
5416 CASE_CFN_ACOS:
5417 CASE_CFN_ACOSH:
5418 CASE_CFN_ASIN:
5419 CASE_CFN_ASINH:
5420 CASE_CFN_ATAN:
5421 CASE_CFN_ATANH:
5422 CASE_CFN_CBRT:
5423 CASE_CFN_COS:
5424 CASE_CFN_COSH:
5425 CASE_CFN_ERF:
5426 CASE_CFN_ERFC:
5427 CASE_CFN_EXP2:
5428 CASE_CFN_EXP:
5429 CASE_CFN_EXPM1:
5430 CASE_CFN_LGAMMA:
5431 CASE_CFN_LOG10:
5432 CASE_CFN_LOG1P:
5433 CASE_CFN_LOG2:
5434 CASE_CFN_LOG:
5435 CASE_CFN_SIN:
5436 CASE_CFN_SINH:
5437 CASE_CFN_SQRT:
5438 CASE_CFN_TAN:
5439 CASE_CFN_TANH:
5440 if (el_mode == DFmode && n == 2)
5441 {
5442 bdecl = mathfn_built_in (double_type_node, fn);
5443 suffix = "d2"; /* pow -> powd2 */
5444 }
5445 else if (el_mode == SFmode && n == 4)
5446 {
5447 bdecl = mathfn_built_in (float_type_node, fn);
5448 suffix = "4"; /* powf -> powf4 */
5449 }
5450 else
5451 return NULL_TREE;
5452 if (!bdecl)
5453 return NULL_TREE;
5454 break;
5455
5456 default:
5457 return NULL_TREE;
5458 }
5459
5460 gcc_assert (suffix != NULL);
5461 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5462 if (!bname)
5463 return NULL_TREE;
5464
5465 strcpy (name, bname + sizeof ("__builtin_") - 1);
5466 strcat (name, suffix);
5467
5468 if (n_args == 1)
5469 fntype = build_function_type_list (type_out, type_in, NULL);
5470 else if (n_args == 2)
5471 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5472 else
5473 gcc_unreachable ();
5474
5475 /* Build a function declaration for the vectorized function. */
5476 new_fndecl = build_decl (BUILTINS_LOCATION,
5477 FUNCTION_DECL, get_identifier (name), fntype);
5478 TREE_PUBLIC (new_fndecl) = 1;
5479 DECL_EXTERNAL (new_fndecl) = 1;
5480 DECL_IS_NOVOPS (new_fndecl) = 1;
5481 TREE_READONLY (new_fndecl) = 1;
5482
5483 return new_fndecl;
5484 }
5485
5486 /* Returns a function decl for a vectorized version of the builtin function
5487 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5488 if it is not available. */
5489
5490 static tree
5491 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5492 tree type_in)
5493 {
5494 machine_mode in_mode, out_mode;
5495 int in_n, out_n;
5496
5497 if (TARGET_DEBUG_BUILTIN)
5498 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5499 combined_fn_name (combined_fn (fn)),
5500 GET_MODE_NAME (TYPE_MODE (type_out)),
5501 GET_MODE_NAME (TYPE_MODE (type_in)));
5502
5503 if (TREE_CODE (type_out) != VECTOR_TYPE
5504 || TREE_CODE (type_in) != VECTOR_TYPE)
5505 return NULL_TREE;
5506
5507 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5508 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5509 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5510 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5511
5512 switch (fn)
5513 {
5514 CASE_CFN_COPYSIGN:
5515 if (VECTOR_UNIT_VSX_P (V2DFmode)
5516 && out_mode == DFmode && out_n == 2
5517 && in_mode == DFmode && in_n == 2)
5518 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5519 if (VECTOR_UNIT_VSX_P (V4SFmode)
5520 && out_mode == SFmode && out_n == 4
5521 && in_mode == SFmode && in_n == 4)
5522 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5523 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5524 && out_mode == SFmode && out_n == 4
5525 && in_mode == SFmode && in_n == 4)
5526 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5527 break;
5528 CASE_CFN_CEIL:
5529 if (VECTOR_UNIT_VSX_P (V2DFmode)
5530 && out_mode == DFmode && out_n == 2
5531 && in_mode == DFmode && in_n == 2)
5532 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5533 if (VECTOR_UNIT_VSX_P (V4SFmode)
5534 && out_mode == SFmode && out_n == 4
5535 && in_mode == SFmode && in_n == 4)
5536 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5537 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5538 && out_mode == SFmode && out_n == 4
5539 && in_mode == SFmode && in_n == 4)
5540 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5541 break;
5542 CASE_CFN_FLOOR:
5543 if (VECTOR_UNIT_VSX_P (V2DFmode)
5544 && out_mode == DFmode && out_n == 2
5545 && in_mode == DFmode && in_n == 2)
5546 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5547 if (VECTOR_UNIT_VSX_P (V4SFmode)
5548 && out_mode == SFmode && out_n == 4
5549 && in_mode == SFmode && in_n == 4)
5550 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5551 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5552 && out_mode == SFmode && out_n == 4
5553 && in_mode == SFmode && in_n == 4)
5554 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5555 break;
5556 CASE_CFN_FMA:
5557 if (VECTOR_UNIT_VSX_P (V2DFmode)
5558 && out_mode == DFmode && out_n == 2
5559 && in_mode == DFmode && in_n == 2)
5560 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5561 if (VECTOR_UNIT_VSX_P (V4SFmode)
5562 && out_mode == SFmode && out_n == 4
5563 && in_mode == SFmode && in_n == 4)
5564 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5565 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5566 && out_mode == SFmode && out_n == 4
5567 && in_mode == SFmode && in_n == 4)
5568 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5569 break;
5570 CASE_CFN_TRUNC:
5571 if (VECTOR_UNIT_VSX_P (V2DFmode)
5572 && out_mode == DFmode && out_n == 2
5573 && in_mode == DFmode && in_n == 2)
5574 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5575 if (VECTOR_UNIT_VSX_P (V4SFmode)
5576 && out_mode == SFmode && out_n == 4
5577 && in_mode == SFmode && in_n == 4)
5578 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5579 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5580 && out_mode == SFmode && out_n == 4
5581 && in_mode == SFmode && in_n == 4)
5582 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5583 break;
5584 CASE_CFN_NEARBYINT:
5585 if (VECTOR_UNIT_VSX_P (V2DFmode)
5586 && flag_unsafe_math_optimizations
5587 && out_mode == DFmode && out_n == 2
5588 && in_mode == DFmode && in_n == 2)
5589 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5590 if (VECTOR_UNIT_VSX_P (V4SFmode)
5591 && flag_unsafe_math_optimizations
5592 && out_mode == SFmode && out_n == 4
5593 && in_mode == SFmode && in_n == 4)
5594 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5595 break;
5596 CASE_CFN_RINT:
5597 if (VECTOR_UNIT_VSX_P (V2DFmode)
5598 && !flag_trapping_math
5599 && out_mode == DFmode && out_n == 2
5600 && in_mode == DFmode && in_n == 2)
5601 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5602 if (VECTOR_UNIT_VSX_P (V4SFmode)
5603 && !flag_trapping_math
5604 && out_mode == SFmode && out_n == 4
5605 && in_mode == SFmode && in_n == 4)
5606 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5607 break;
5608 default:
5609 break;
5610 }
5611
5612 /* Generate calls to libmass if appropriate. */
5613 if (rs6000_veclib_handler)
5614 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5615
5616 return NULL_TREE;
5617 }
5618
5619 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5620
5621 static tree
5622 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5623 tree type_in)
5624 {
5625 machine_mode in_mode, out_mode;
5626 int in_n, out_n;
5627
5628 if (TARGET_DEBUG_BUILTIN)
5629 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5630 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5631 GET_MODE_NAME (TYPE_MODE (type_out)),
5632 GET_MODE_NAME (TYPE_MODE (type_in)));
5633
5634 if (TREE_CODE (type_out) != VECTOR_TYPE
5635 || TREE_CODE (type_in) != VECTOR_TYPE)
5636 return NULL_TREE;
5637
5638 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5639 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5640 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5641 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5642
5643 enum rs6000_builtins fn
5644 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5645 switch (fn)
5646 {
5647 case RS6000_BUILTIN_RSQRTF:
5648 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5649 && out_mode == SFmode && out_n == 4
5650 && in_mode == SFmode && in_n == 4)
5651 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5652 break;
5653 case RS6000_BUILTIN_RSQRT:
5654 if (VECTOR_UNIT_VSX_P (V2DFmode)
5655 && out_mode == DFmode && out_n == 2
5656 && in_mode == DFmode && in_n == 2)
5657 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5658 break;
5659 case RS6000_BUILTIN_RECIPF:
5660 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5661 && out_mode == SFmode && out_n == 4
5662 && in_mode == SFmode && in_n == 4)
5663 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5664 break;
5665 case RS6000_BUILTIN_RECIP:
5666 if (VECTOR_UNIT_VSX_P (V2DFmode)
5667 && out_mode == DFmode && out_n == 2
5668 && in_mode == DFmode && in_n == 2)
5669 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5670 break;
5671 default:
5672 break;
5673 }
5674 return NULL_TREE;
5675 }
5676 \f
5677 /* Default CPU string for rs6000*_file_start functions. */
5678 static const char *rs6000_default_cpu;
5679
5680 /* Do anything needed at the start of the asm file. */
5681
5682 static void
5683 rs6000_file_start (void)
5684 {
5685 char buffer[80];
5686 const char *start = buffer;
5687 FILE *file = asm_out_file;
5688
5689 rs6000_default_cpu = TARGET_CPU_DEFAULT;
5690
5691 default_file_start ();
5692
5693 if (flag_verbose_asm)
5694 {
5695 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
5696
5697 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
5698 {
5699 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
5700 start = "";
5701 }
5702
5703 if (global_options_set.x_rs6000_cpu_index)
5704 {
5705 fprintf (file, "%s -mcpu=%s", start,
5706 processor_target_table[rs6000_cpu_index].name);
5707 start = "";
5708 }
5709
5710 if (global_options_set.x_rs6000_tune_index)
5711 {
5712 fprintf (file, "%s -mtune=%s", start,
5713 processor_target_table[rs6000_tune_index].name);
5714 start = "";
5715 }
5716
5717 if (PPC405_ERRATUM77)
5718 {
5719 fprintf (file, "%s PPC405CR_ERRATUM77", start);
5720 start = "";
5721 }
5722
5723 #ifdef USING_ELFOS_H
5724 switch (rs6000_sdata)
5725 {
5726 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
5727 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
5728 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
5729 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
5730 }
5731
5732 if (rs6000_sdata && g_switch_value)
5733 {
5734 fprintf (file, "%s -G %d", start,
5735 g_switch_value);
5736 start = "";
5737 }
5738 #endif
5739
5740 if (*start == '\0')
5741 putc ('\n', file);
5742 }
5743
5744 #ifdef USING_ELFOS_H
5745 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
5746 && !global_options_set.x_rs6000_cpu_index)
5747 {
5748 fputs ("\t.machine ", asm_out_file);
5749 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
5750 fputs ("power9\n", asm_out_file);
5751 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
5752 fputs ("power8\n", asm_out_file);
5753 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
5754 fputs ("power7\n", asm_out_file);
5755 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
5756 fputs ("power6\n", asm_out_file);
5757 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
5758 fputs ("power5\n", asm_out_file);
5759 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
5760 fputs ("power4\n", asm_out_file);
5761 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
5762 fputs ("ppc64\n", asm_out_file);
5763 else
5764 fputs ("ppc\n", asm_out_file);
5765 }
5766 #endif
5767
5768 if (DEFAULT_ABI == ABI_ELFv2)
5769 fprintf (file, "\t.abiversion 2\n");
5770 }
5771
5772 \f
5773 /* Return nonzero if this function is known to have a null epilogue. */
5774
5775 int
5776 direct_return (void)
5777 {
5778 if (reload_completed)
5779 {
5780 rs6000_stack_t *info = rs6000_stack_info ();
5781
5782 if (info->first_gp_reg_save == 32
5783 && info->first_fp_reg_save == 64
5784 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
5785 && ! info->lr_save_p
5786 && ! info->cr_save_p
5787 && info->vrsave_size == 0
5788 && ! info->push_p)
5789 return 1;
5790 }
5791
5792 return 0;
5793 }
5794
5795 /* Helper for num_insns_constant. Calculate number of instructions to
5796 load VALUE to a single gpr using combinations of addi, addis, ori,
5797 oris and sldi instructions. */
5798
5799 static int
5800 num_insns_constant_gpr (HOST_WIDE_INT value)
5801 {
5802 /* signed constant loadable with addi */
5803 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
5804 return 1;
5805
5806 /* constant loadable with addis */
5807 else if ((value & 0xffff) == 0
5808 && (value >> 31 == -1 || value >> 31 == 0))
5809 return 1;
5810
5811 else if (TARGET_POWERPC64)
5812 {
5813 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
5814 HOST_WIDE_INT high = value >> 31;
5815
5816 if (high == 0 || high == -1)
5817 return 2;
5818
5819 high >>= 1;
5820
5821 if (low == 0)
5822 return num_insns_constant_gpr (high) + 1;
5823 else if (high == 0)
5824 return num_insns_constant_gpr (low) + 1;
5825 else
5826 return (num_insns_constant_gpr (high)
5827 + num_insns_constant_gpr (low) + 1);
5828 }
5829
5830 else
5831 return 2;
5832 }
5833
5834 /* Helper for num_insns_constant. Allow constants formed by the
5835 num_insns_constant_gpr sequences, plus li -1, rldicl/rldicr/rlwinm,
5836 and handle modes that require multiple gprs. */
5837
5838 static int
5839 num_insns_constant_multi (HOST_WIDE_INT value, machine_mode mode)
5840 {
5841 int nregs = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
5842 int total = 0;
5843 while (nregs-- > 0)
5844 {
5845 HOST_WIDE_INT low = sext_hwi (value, BITS_PER_WORD);
5846 int insns = num_insns_constant_gpr (low);
5847 if (insns > 2
5848 /* We won't get more than 2 from num_insns_constant_gpr
5849 except when TARGET_POWERPC64 and mode is DImode or
5850 wider, so the register mode must be DImode. */
5851 && rs6000_is_valid_and_mask (GEN_INT (low), DImode))
5852 insns = 2;
5853 total += insns;
5854 value >>= BITS_PER_WORD;
5855 }
5856 return total;
5857 }
5858
5859 /* Return the number of instructions it takes to form a constant in as
5860 many gprs are needed for MODE. */
5861
5862 int
5863 num_insns_constant (rtx op, machine_mode mode)
5864 {
5865 HOST_WIDE_INT val;
5866
5867 switch (GET_CODE (op))
5868 {
5869 case CONST_INT:
5870 val = INTVAL (op);
5871 break;
5872
5873 case CONST_WIDE_INT:
5874 {
5875 int insns = 0;
5876 for (int i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5877 insns += num_insns_constant_multi (CONST_WIDE_INT_ELT (op, i),
5878 DImode);
5879 return insns;
5880 }
5881
5882 case CONST_DOUBLE:
5883 {
5884 const struct real_value *rv = CONST_DOUBLE_REAL_VALUE (op);
5885
5886 if (mode == SFmode || mode == SDmode)
5887 {
5888 long l;
5889
5890 if (mode == SDmode)
5891 REAL_VALUE_TO_TARGET_DECIMAL32 (*rv, l);
5892 else
5893 REAL_VALUE_TO_TARGET_SINGLE (*rv, l);
5894 /* See the first define_split in rs6000.md handling a
5895 const_double_operand. */
5896 val = l;
5897 mode = SImode;
5898 }
5899 else if (mode == DFmode || mode == DDmode)
5900 {
5901 long l[2];
5902
5903 if (mode == DDmode)
5904 REAL_VALUE_TO_TARGET_DECIMAL64 (*rv, l);
5905 else
5906 REAL_VALUE_TO_TARGET_DOUBLE (*rv, l);
5907
5908 /* See the second (32-bit) and third (64-bit) define_split
5909 in rs6000.md handling a const_double_operand. */
5910 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 1] << 32;
5911 val |= l[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffffUL;
5912 mode = DImode;
5913 }
5914 else if (mode == TFmode || mode == TDmode
5915 || mode == KFmode || mode == IFmode)
5916 {
5917 long l[4];
5918 int insns;
5919
5920 if (mode == TDmode)
5921 REAL_VALUE_TO_TARGET_DECIMAL128 (*rv, l);
5922 else
5923 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*rv, l);
5924
5925 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 0 : 3] << 32;
5926 val |= l[WORDS_BIG_ENDIAN ? 1 : 2] & 0xffffffffUL;
5927 insns = num_insns_constant_multi (val, DImode);
5928 val = (unsigned HOST_WIDE_INT) l[WORDS_BIG_ENDIAN ? 2 : 1] << 32;
5929 val |= l[WORDS_BIG_ENDIAN ? 3 : 0] & 0xffffffffUL;
5930 insns += num_insns_constant_multi (val, DImode);
5931 return insns;
5932 }
5933 else
5934 gcc_unreachable ();
5935 }
5936 break;
5937
5938 default:
5939 gcc_unreachable ();
5940 }
5941
5942 return num_insns_constant_multi (val, mode);
5943 }
5944
5945 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5946 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5947 corresponding element of the vector, but for V4SFmode, the
5948 corresponding "float" is interpreted as an SImode integer. */
5949
5950 HOST_WIDE_INT
5951 const_vector_elt_as_int (rtx op, unsigned int elt)
5952 {
5953 rtx tmp;
5954
5955 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5956 gcc_assert (GET_MODE (op) != V2DImode
5957 && GET_MODE (op) != V2DFmode);
5958
5959 tmp = CONST_VECTOR_ELT (op, elt);
5960 if (GET_MODE (op) == V4SFmode)
5961 tmp = gen_lowpart (SImode, tmp);
5962 return INTVAL (tmp);
5963 }
5964
5965 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5966 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5967 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5968 all items are set to the same value and contain COPIES replicas of the
5969 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5970 operand and the others are set to the value of the operand's msb. */
5971
5972 static bool
5973 vspltis_constant (rtx op, unsigned step, unsigned copies)
5974 {
5975 machine_mode mode = GET_MODE (op);
5976 machine_mode inner = GET_MODE_INNER (mode);
5977
5978 unsigned i;
5979 unsigned nunits;
5980 unsigned bitsize;
5981 unsigned mask;
5982
5983 HOST_WIDE_INT val;
5984 HOST_WIDE_INT splat_val;
5985 HOST_WIDE_INT msb_val;
5986
5987 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5988 return false;
5989
5990 nunits = GET_MODE_NUNITS (mode);
5991 bitsize = GET_MODE_BITSIZE (inner);
5992 mask = GET_MODE_MASK (inner);
5993
5994 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5995 splat_val = val;
5996 msb_val = val >= 0 ? 0 : -1;
5997
5998 /* Construct the value to be splatted, if possible. If not, return 0. */
5999 for (i = 2; i <= copies; i *= 2)
6000 {
6001 HOST_WIDE_INT small_val;
6002 bitsize /= 2;
6003 small_val = splat_val >> bitsize;
6004 mask >>= bitsize;
6005 if (splat_val != ((HOST_WIDE_INT)
6006 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6007 | (small_val & mask)))
6008 return false;
6009 splat_val = small_val;
6010 }
6011
6012 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6013 if (EASY_VECTOR_15 (splat_val))
6014 ;
6015
6016 /* Also check if we can splat, and then add the result to itself. Do so if
6017 the value is positive, of if the splat instruction is using OP's mode;
6018 for splat_val < 0, the splat and the add should use the same mode. */
6019 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6020 && (splat_val >= 0 || (step == 1 && copies == 1)))
6021 ;
6022
6023 /* Also check if are loading up the most significant bit which can be done by
6024 loading up -1 and shifting the value left by -1. */
6025 else if (EASY_VECTOR_MSB (splat_val, inner))
6026 ;
6027
6028 else
6029 return false;
6030
6031 /* Check if VAL is present in every STEP-th element, and the
6032 other elements are filled with its most significant bit. */
6033 for (i = 1; i < nunits; ++i)
6034 {
6035 HOST_WIDE_INT desired_val;
6036 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6037 if ((i & (step - 1)) == 0)
6038 desired_val = val;
6039 else
6040 desired_val = msb_val;
6041
6042 if (desired_val != const_vector_elt_as_int (op, elt))
6043 return false;
6044 }
6045
6046 return true;
6047 }
6048
6049 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6050 instruction, filling in the bottom elements with 0 or -1.
6051
6052 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6053 for the number of zeroes to shift in, or negative for the number of 0xff
6054 bytes to shift in.
6055
6056 OP is a CONST_VECTOR. */
6057
6058 int
6059 vspltis_shifted (rtx op)
6060 {
6061 machine_mode mode = GET_MODE (op);
6062 machine_mode inner = GET_MODE_INNER (mode);
6063
6064 unsigned i, j;
6065 unsigned nunits;
6066 unsigned mask;
6067
6068 HOST_WIDE_INT val;
6069
6070 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6071 return false;
6072
6073 /* We need to create pseudo registers to do the shift, so don't recognize
6074 shift vector constants after reload. */
6075 if (!can_create_pseudo_p ())
6076 return false;
6077
6078 nunits = GET_MODE_NUNITS (mode);
6079 mask = GET_MODE_MASK (inner);
6080
6081 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6082
6083 /* Check if the value can really be the operand of a vspltis[bhw]. */
6084 if (EASY_VECTOR_15 (val))
6085 ;
6086
6087 /* Also check if we are loading up the most significant bit which can be done
6088 by loading up -1 and shifting the value left by -1. */
6089 else if (EASY_VECTOR_MSB (val, inner))
6090 ;
6091
6092 else
6093 return 0;
6094
6095 /* Check if VAL is present in every STEP-th element until we find elements
6096 that are 0 or all 1 bits. */
6097 for (i = 1; i < nunits; ++i)
6098 {
6099 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6100 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6101
6102 /* If the value isn't the splat value, check for the remaining elements
6103 being 0/-1. */
6104 if (val != elt_val)
6105 {
6106 if (elt_val == 0)
6107 {
6108 for (j = i+1; j < nunits; ++j)
6109 {
6110 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6111 if (const_vector_elt_as_int (op, elt2) != 0)
6112 return 0;
6113 }
6114
6115 return (nunits - i) * GET_MODE_SIZE (inner);
6116 }
6117
6118 else if ((elt_val & mask) == mask)
6119 {
6120 for (j = i+1; j < nunits; ++j)
6121 {
6122 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6123 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6124 return 0;
6125 }
6126
6127 return -((nunits - i) * GET_MODE_SIZE (inner));
6128 }
6129
6130 else
6131 return 0;
6132 }
6133 }
6134
6135 /* If all elements are equal, we don't need to do VLSDOI. */
6136 return 0;
6137 }
6138
6139
6140 /* Return true if OP is of the given MODE and can be synthesized
6141 with a vspltisb, vspltish or vspltisw. */
6142
6143 bool
6144 easy_altivec_constant (rtx op, machine_mode mode)
6145 {
6146 unsigned step, copies;
6147
6148 if (mode == VOIDmode)
6149 mode = GET_MODE (op);
6150 else if (mode != GET_MODE (op))
6151 return false;
6152
6153 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6154 constants. */
6155 if (mode == V2DFmode)
6156 return zero_constant (op, mode);
6157
6158 else if (mode == V2DImode)
6159 {
6160 if (!CONST_INT_P (CONST_VECTOR_ELT (op, 0))
6161 || !CONST_INT_P (CONST_VECTOR_ELT (op, 1)))
6162 return false;
6163
6164 if (zero_constant (op, mode))
6165 return true;
6166
6167 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6168 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6169 return true;
6170
6171 return false;
6172 }
6173
6174 /* V1TImode is a special container for TImode. Ignore for now. */
6175 else if (mode == V1TImode)
6176 return false;
6177
6178 /* Start with a vspltisw. */
6179 step = GET_MODE_NUNITS (mode) / 4;
6180 copies = 1;
6181
6182 if (vspltis_constant (op, step, copies))
6183 return true;
6184
6185 /* Then try with a vspltish. */
6186 if (step == 1)
6187 copies <<= 1;
6188 else
6189 step >>= 1;
6190
6191 if (vspltis_constant (op, step, copies))
6192 return true;
6193
6194 /* And finally a vspltisb. */
6195 if (step == 1)
6196 copies <<= 1;
6197 else
6198 step >>= 1;
6199
6200 if (vspltis_constant (op, step, copies))
6201 return true;
6202
6203 if (vspltis_shifted (op) != 0)
6204 return true;
6205
6206 return false;
6207 }
6208
6209 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6210 result is OP. Abort if it is not possible. */
6211
6212 rtx
6213 gen_easy_altivec_constant (rtx op)
6214 {
6215 machine_mode mode = GET_MODE (op);
6216 int nunits = GET_MODE_NUNITS (mode);
6217 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6218 unsigned step = nunits / 4;
6219 unsigned copies = 1;
6220
6221 /* Start with a vspltisw. */
6222 if (vspltis_constant (op, step, copies))
6223 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6224
6225 /* Then try with a vspltish. */
6226 if (step == 1)
6227 copies <<= 1;
6228 else
6229 step >>= 1;
6230
6231 if (vspltis_constant (op, step, copies))
6232 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6233
6234 /* And finally a vspltisb. */
6235 if (step == 1)
6236 copies <<= 1;
6237 else
6238 step >>= 1;
6239
6240 if (vspltis_constant (op, step, copies))
6241 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6242
6243 gcc_unreachable ();
6244 }
6245
6246 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6247 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6248
6249 Return the number of instructions needed (1 or 2) into the address pointed
6250 via NUM_INSNS_PTR.
6251
6252 Return the constant that is being split via CONSTANT_PTR. */
6253
6254 bool
6255 xxspltib_constant_p (rtx op,
6256 machine_mode mode,
6257 int *num_insns_ptr,
6258 int *constant_ptr)
6259 {
6260 size_t nunits = GET_MODE_NUNITS (mode);
6261 size_t i;
6262 HOST_WIDE_INT value;
6263 rtx element;
6264
6265 /* Set the returned values to out of bound values. */
6266 *num_insns_ptr = -1;
6267 *constant_ptr = 256;
6268
6269 if (!TARGET_P9_VECTOR)
6270 return false;
6271
6272 if (mode == VOIDmode)
6273 mode = GET_MODE (op);
6274
6275 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6276 return false;
6277
6278 /* Handle (vec_duplicate <constant>). */
6279 if (GET_CODE (op) == VEC_DUPLICATE)
6280 {
6281 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6282 && mode != V2DImode)
6283 return false;
6284
6285 element = XEXP (op, 0);
6286 if (!CONST_INT_P (element))
6287 return false;
6288
6289 value = INTVAL (element);
6290 if (!IN_RANGE (value, -128, 127))
6291 return false;
6292 }
6293
6294 /* Handle (const_vector [...]). */
6295 else if (GET_CODE (op) == CONST_VECTOR)
6296 {
6297 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6298 && mode != V2DImode)
6299 return false;
6300
6301 element = CONST_VECTOR_ELT (op, 0);
6302 if (!CONST_INT_P (element))
6303 return false;
6304
6305 value = INTVAL (element);
6306 if (!IN_RANGE (value, -128, 127))
6307 return false;
6308
6309 for (i = 1; i < nunits; i++)
6310 {
6311 element = CONST_VECTOR_ELT (op, i);
6312 if (!CONST_INT_P (element))
6313 return false;
6314
6315 if (value != INTVAL (element))
6316 return false;
6317 }
6318 }
6319
6320 /* Handle integer constants being loaded into the upper part of the VSX
6321 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6322 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6323 else if (CONST_INT_P (op))
6324 {
6325 if (!SCALAR_INT_MODE_P (mode))
6326 return false;
6327
6328 value = INTVAL (op);
6329 if (!IN_RANGE (value, -128, 127))
6330 return false;
6331
6332 if (!IN_RANGE (value, -1, 0))
6333 {
6334 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6335 return false;
6336
6337 if (EASY_VECTOR_15 (value))
6338 return false;
6339 }
6340 }
6341
6342 else
6343 return false;
6344
6345 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6346 sign extend. Special case 0/-1 to allow getting any VSX register instead
6347 of an Altivec register. */
6348 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6349 && EASY_VECTOR_15 (value))
6350 return false;
6351
6352 /* Return # of instructions and the constant byte for XXSPLTIB. */
6353 if (mode == V16QImode)
6354 *num_insns_ptr = 1;
6355
6356 else if (IN_RANGE (value, -1, 0))
6357 *num_insns_ptr = 1;
6358
6359 else
6360 *num_insns_ptr = 2;
6361
6362 *constant_ptr = (int) value;
6363 return true;
6364 }
6365
6366 const char *
6367 output_vec_const_move (rtx *operands)
6368 {
6369 int shift;
6370 machine_mode mode;
6371 rtx dest, vec;
6372
6373 dest = operands[0];
6374 vec = operands[1];
6375 mode = GET_MODE (dest);
6376
6377 if (TARGET_VSX)
6378 {
6379 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6380 int xxspltib_value = 256;
6381 int num_insns = -1;
6382
6383 if (zero_constant (vec, mode))
6384 {
6385 if (TARGET_P9_VECTOR)
6386 return "xxspltib %x0,0";
6387
6388 else if (dest_vmx_p)
6389 return "vspltisw %0,0";
6390
6391 else
6392 return "xxlxor %x0,%x0,%x0";
6393 }
6394
6395 if (all_ones_constant (vec, mode))
6396 {
6397 if (TARGET_P9_VECTOR)
6398 return "xxspltib %x0,255";
6399
6400 else if (dest_vmx_p)
6401 return "vspltisw %0,-1";
6402
6403 else if (TARGET_P8_VECTOR)
6404 return "xxlorc %x0,%x0,%x0";
6405
6406 else
6407 gcc_unreachable ();
6408 }
6409
6410 if (TARGET_P9_VECTOR
6411 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6412 {
6413 if (num_insns == 1)
6414 {
6415 operands[2] = GEN_INT (xxspltib_value & 0xff);
6416 return "xxspltib %x0,%2";
6417 }
6418
6419 return "#";
6420 }
6421 }
6422
6423 if (TARGET_ALTIVEC)
6424 {
6425 rtx splat_vec;
6426
6427 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6428 if (zero_constant (vec, mode))
6429 return "vspltisw %0,0";
6430
6431 if (all_ones_constant (vec, mode))
6432 return "vspltisw %0,-1";
6433
6434 /* Do we need to construct a value using VSLDOI? */
6435 shift = vspltis_shifted (vec);
6436 if (shift != 0)
6437 return "#";
6438
6439 splat_vec = gen_easy_altivec_constant (vec);
6440 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6441 operands[1] = XEXP (splat_vec, 0);
6442 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6443 return "#";
6444
6445 switch (GET_MODE (splat_vec))
6446 {
6447 case E_V4SImode:
6448 return "vspltisw %0,%1";
6449
6450 case E_V8HImode:
6451 return "vspltish %0,%1";
6452
6453 case E_V16QImode:
6454 return "vspltisb %0,%1";
6455
6456 default:
6457 gcc_unreachable ();
6458 }
6459 }
6460
6461 gcc_unreachable ();
6462 }
6463
6464 /* Initialize vector TARGET to VALS. */
6465
6466 void
6467 rs6000_expand_vector_init (rtx target, rtx vals)
6468 {
6469 machine_mode mode = GET_MODE (target);
6470 machine_mode inner_mode = GET_MODE_INNER (mode);
6471 int n_elts = GET_MODE_NUNITS (mode);
6472 int n_var = 0, one_var = -1;
6473 bool all_same = true, all_const_zero = true;
6474 rtx x, mem;
6475 int i;
6476
6477 for (i = 0; i < n_elts; ++i)
6478 {
6479 x = XVECEXP (vals, 0, i);
6480 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6481 ++n_var, one_var = i;
6482 else if (x != CONST0_RTX (inner_mode))
6483 all_const_zero = false;
6484
6485 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6486 all_same = false;
6487 }
6488
6489 if (n_var == 0)
6490 {
6491 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6492 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6493 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6494 {
6495 /* Zero register. */
6496 emit_move_insn (target, CONST0_RTX (mode));
6497 return;
6498 }
6499 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6500 {
6501 /* Splat immediate. */
6502 emit_insn (gen_rtx_SET (target, const_vec));
6503 return;
6504 }
6505 else
6506 {
6507 /* Load from constant pool. */
6508 emit_move_insn (target, const_vec);
6509 return;
6510 }
6511 }
6512
6513 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6514 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6515 {
6516 rtx op[2];
6517 size_t i;
6518 size_t num_elements = all_same ? 1 : 2;
6519 for (i = 0; i < num_elements; i++)
6520 {
6521 op[i] = XVECEXP (vals, 0, i);
6522 /* Just in case there is a SUBREG with a smaller mode, do a
6523 conversion. */
6524 if (GET_MODE (op[i]) != inner_mode)
6525 {
6526 rtx tmp = gen_reg_rtx (inner_mode);
6527 convert_move (tmp, op[i], 0);
6528 op[i] = tmp;
6529 }
6530 /* Allow load with splat double word. */
6531 else if (MEM_P (op[i]))
6532 {
6533 if (!all_same)
6534 op[i] = force_reg (inner_mode, op[i]);
6535 }
6536 else if (!REG_P (op[i]))
6537 op[i] = force_reg (inner_mode, op[i]);
6538 }
6539
6540 if (all_same)
6541 {
6542 if (mode == V2DFmode)
6543 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6544 else
6545 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6546 }
6547 else
6548 {
6549 if (mode == V2DFmode)
6550 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
6551 else
6552 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
6553 }
6554 return;
6555 }
6556
6557 /* Special case initializing vector int if we are on 64-bit systems with
6558 direct move or we have the ISA 3.0 instructions. */
6559 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
6560 && TARGET_DIRECT_MOVE_64BIT)
6561 {
6562 if (all_same)
6563 {
6564 rtx element0 = XVECEXP (vals, 0, 0);
6565 if (MEM_P (element0))
6566 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6567 else
6568 element0 = force_reg (SImode, element0);
6569
6570 if (TARGET_P9_VECTOR)
6571 emit_insn (gen_vsx_splat_v4si (target, element0));
6572 else
6573 {
6574 rtx tmp = gen_reg_rtx (DImode);
6575 emit_insn (gen_zero_extendsidi2 (tmp, element0));
6576 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
6577 }
6578 return;
6579 }
6580 else
6581 {
6582 rtx elements[4];
6583 size_t i;
6584
6585 for (i = 0; i < 4; i++)
6586 elements[i] = force_reg (SImode, XVECEXP (vals, 0, i));
6587
6588 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
6589 elements[2], elements[3]));
6590 return;
6591 }
6592 }
6593
6594 /* With single precision floating point on VSX, know that internally single
6595 precision is actually represented as a double, and either make 2 V2DF
6596 vectors, and convert these vectors to single precision, or do one
6597 conversion, and splat the result to the other elements. */
6598 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
6599 {
6600 if (all_same)
6601 {
6602 rtx element0 = XVECEXP (vals, 0, 0);
6603
6604 if (TARGET_P9_VECTOR)
6605 {
6606 if (MEM_P (element0))
6607 element0 = rs6000_force_indexed_or_indirect_mem (element0);
6608
6609 emit_insn (gen_vsx_splat_v4sf (target, element0));
6610 }
6611
6612 else
6613 {
6614 rtx freg = gen_reg_rtx (V4SFmode);
6615 rtx sreg = force_reg (SFmode, element0);
6616 rtx cvt = (TARGET_XSCVDPSPN
6617 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
6618 : gen_vsx_xscvdpsp_scalar (freg, sreg));
6619
6620 emit_insn (cvt);
6621 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
6622 const0_rtx));
6623 }
6624 }
6625 else
6626 {
6627 rtx dbl_even = gen_reg_rtx (V2DFmode);
6628 rtx dbl_odd = gen_reg_rtx (V2DFmode);
6629 rtx flt_even = gen_reg_rtx (V4SFmode);
6630 rtx flt_odd = gen_reg_rtx (V4SFmode);
6631 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
6632 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
6633 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
6634 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
6635
6636 /* Use VMRGEW if we can instead of doing a permute. */
6637 if (TARGET_P8_VECTOR)
6638 {
6639 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
6640 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
6641 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6642 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6643 if (BYTES_BIG_ENDIAN)
6644 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
6645 else
6646 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
6647 }
6648 else
6649 {
6650 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
6651 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
6652 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
6653 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
6654 rs6000_expand_extract_even (target, flt_even, flt_odd);
6655 }
6656 }
6657 return;
6658 }
6659
6660 /* Special case initializing vector short/char that are splats if we are on
6661 64-bit systems with direct move. */
6662 if (all_same && TARGET_DIRECT_MOVE_64BIT
6663 && (mode == V16QImode || mode == V8HImode))
6664 {
6665 rtx op0 = XVECEXP (vals, 0, 0);
6666 rtx di_tmp = gen_reg_rtx (DImode);
6667
6668 if (!REG_P (op0))
6669 op0 = force_reg (GET_MODE_INNER (mode), op0);
6670
6671 if (mode == V16QImode)
6672 {
6673 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
6674 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
6675 return;
6676 }
6677
6678 if (mode == V8HImode)
6679 {
6680 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
6681 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
6682 return;
6683 }
6684 }
6685
6686 /* Store value to stack temp. Load vector element. Splat. However, splat
6687 of 64-bit items is not supported on Altivec. */
6688 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
6689 {
6690 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6691 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
6692 XVECEXP (vals, 0, 0));
6693 x = gen_rtx_UNSPEC (VOIDmode,
6694 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6695 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6696 gen_rtvec (2,
6697 gen_rtx_SET (target, mem),
6698 x)));
6699 x = gen_rtx_VEC_SELECT (inner_mode, target,
6700 gen_rtx_PARALLEL (VOIDmode,
6701 gen_rtvec (1, const0_rtx)));
6702 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
6703 return;
6704 }
6705
6706 /* One field is non-constant. Load constant then overwrite
6707 varying field. */
6708 if (n_var == 1)
6709 {
6710 rtx copy = copy_rtx (vals);
6711
6712 /* Load constant part of vector, substitute neighboring value for
6713 varying element. */
6714 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
6715 rs6000_expand_vector_init (target, copy);
6716
6717 /* Insert variable. */
6718 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
6719 return;
6720 }
6721
6722 /* Construct the vector in memory one field at a time
6723 and load the whole vector. */
6724 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6725 for (i = 0; i < n_elts; i++)
6726 emit_move_insn (adjust_address_nv (mem, inner_mode,
6727 i * GET_MODE_SIZE (inner_mode)),
6728 XVECEXP (vals, 0, i));
6729 emit_move_insn (target, mem);
6730 }
6731
6732 /* Set field ELT of TARGET to VAL. */
6733
6734 void
6735 rs6000_expand_vector_set (rtx target, rtx val, int elt)
6736 {
6737 machine_mode mode = GET_MODE (target);
6738 machine_mode inner_mode = GET_MODE_INNER (mode);
6739 rtx reg = gen_reg_rtx (mode);
6740 rtx mask, mem, x;
6741 int width = GET_MODE_SIZE (inner_mode);
6742 int i;
6743
6744 val = force_reg (GET_MODE (val), val);
6745
6746 if (VECTOR_MEM_VSX_P (mode))
6747 {
6748 rtx insn = NULL_RTX;
6749 rtx elt_rtx = GEN_INT (elt);
6750
6751 if (mode == V2DFmode)
6752 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
6753
6754 else if (mode == V2DImode)
6755 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
6756
6757 else if (TARGET_P9_VECTOR && TARGET_POWERPC64)
6758 {
6759 if (mode == V4SImode)
6760 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
6761 else if (mode == V8HImode)
6762 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
6763 else if (mode == V16QImode)
6764 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
6765 else if (mode == V4SFmode)
6766 insn = gen_vsx_set_v4sf_p9 (target, target, val, elt_rtx);
6767 }
6768
6769 if (insn)
6770 {
6771 emit_insn (insn);
6772 return;
6773 }
6774 }
6775
6776 /* Simplify setting single element vectors like V1TImode. */
6777 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
6778 {
6779 emit_move_insn (target, gen_lowpart (mode, val));
6780 return;
6781 }
6782
6783 /* Load single variable value. */
6784 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
6785 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
6786 x = gen_rtx_UNSPEC (VOIDmode,
6787 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
6788 emit_insn (gen_rtx_PARALLEL (VOIDmode,
6789 gen_rtvec (2,
6790 gen_rtx_SET (reg, mem),
6791 x)));
6792
6793 /* Linear sequence. */
6794 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
6795 for (i = 0; i < 16; ++i)
6796 XVECEXP (mask, 0, i) = GEN_INT (i);
6797
6798 /* Set permute mask to insert element into target. */
6799 for (i = 0; i < width; ++i)
6800 XVECEXP (mask, 0, elt*width + i)
6801 = GEN_INT (i + 0x10);
6802 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
6803
6804 if (BYTES_BIG_ENDIAN)
6805 x = gen_rtx_UNSPEC (mode,
6806 gen_rtvec (3, target, reg,
6807 force_reg (V16QImode, x)),
6808 UNSPEC_VPERM);
6809 else
6810 {
6811 if (TARGET_P9_VECTOR)
6812 x = gen_rtx_UNSPEC (mode,
6813 gen_rtvec (3, reg, target,
6814 force_reg (V16QImode, x)),
6815 UNSPEC_VPERMR);
6816 else
6817 {
6818 /* Invert selector. We prefer to generate VNAND on P8 so
6819 that future fusion opportunities can kick in, but must
6820 generate VNOR elsewhere. */
6821 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
6822 rtx iorx = (TARGET_P8_VECTOR
6823 ? gen_rtx_IOR (V16QImode, notx, notx)
6824 : gen_rtx_AND (V16QImode, notx, notx));
6825 rtx tmp = gen_reg_rtx (V16QImode);
6826 emit_insn (gen_rtx_SET (tmp, iorx));
6827
6828 /* Permute with operands reversed and adjusted selector. */
6829 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
6830 UNSPEC_VPERM);
6831 }
6832 }
6833
6834 emit_insn (gen_rtx_SET (target, x));
6835 }
6836
6837 /* Extract field ELT from VEC into TARGET. */
6838
6839 void
6840 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
6841 {
6842 machine_mode mode = GET_MODE (vec);
6843 machine_mode inner_mode = GET_MODE_INNER (mode);
6844 rtx mem;
6845
6846 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
6847 {
6848 switch (mode)
6849 {
6850 default:
6851 break;
6852 case E_V1TImode:
6853 emit_move_insn (target, gen_lowpart (TImode, vec));
6854 break;
6855 case E_V2DFmode:
6856 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
6857 return;
6858 case E_V2DImode:
6859 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
6860 return;
6861 case E_V4SFmode:
6862 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
6863 return;
6864 case E_V16QImode:
6865 if (TARGET_DIRECT_MOVE_64BIT)
6866 {
6867 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
6868 return;
6869 }
6870 else
6871 break;
6872 case E_V8HImode:
6873 if (TARGET_DIRECT_MOVE_64BIT)
6874 {
6875 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
6876 return;
6877 }
6878 else
6879 break;
6880 case E_V4SImode:
6881 if (TARGET_DIRECT_MOVE_64BIT)
6882 {
6883 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
6884 return;
6885 }
6886 break;
6887 }
6888 }
6889 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
6890 && TARGET_DIRECT_MOVE_64BIT)
6891 {
6892 if (GET_MODE (elt) != DImode)
6893 {
6894 rtx tmp = gen_reg_rtx (DImode);
6895 convert_move (tmp, elt, 0);
6896 elt = tmp;
6897 }
6898 else if (!REG_P (elt))
6899 elt = force_reg (DImode, elt);
6900
6901 switch (mode)
6902 {
6903 case E_V1TImode:
6904 emit_move_insn (target, gen_lowpart (TImode, vec));
6905 return;
6906
6907 case E_V2DFmode:
6908 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
6909 return;
6910
6911 case E_V2DImode:
6912 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
6913 return;
6914
6915 case E_V4SFmode:
6916 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
6917 return;
6918
6919 case E_V4SImode:
6920 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
6921 return;
6922
6923 case E_V8HImode:
6924 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
6925 return;
6926
6927 case E_V16QImode:
6928 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
6929 return;
6930
6931 default:
6932 gcc_unreachable ();
6933 }
6934 }
6935
6936 /* Allocate mode-sized buffer. */
6937 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
6938
6939 emit_move_insn (mem, vec);
6940 if (CONST_INT_P (elt))
6941 {
6942 int modulo_elt = INTVAL (elt) % GET_MODE_NUNITS (mode);
6943
6944 /* Add offset to field within buffer matching vector element. */
6945 mem = adjust_address_nv (mem, inner_mode,
6946 modulo_elt * GET_MODE_SIZE (inner_mode));
6947 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
6948 }
6949 else
6950 {
6951 unsigned int ele_size = GET_MODE_SIZE (inner_mode);
6952 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
6953 rtx new_addr = gen_reg_rtx (Pmode);
6954
6955 elt = gen_rtx_AND (Pmode, elt, num_ele_m1);
6956 if (ele_size > 1)
6957 elt = gen_rtx_MULT (Pmode, elt, GEN_INT (ele_size));
6958 new_addr = gen_rtx_PLUS (Pmode, XEXP (mem, 0), elt);
6959 new_addr = change_address (mem, inner_mode, new_addr);
6960 emit_move_insn (target, new_addr);
6961 }
6962 }
6963
6964 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
6965 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
6966 temporary (BASE_TMP) to fixup the address. Return the new memory address
6967 that is valid for reads or writes to a given register (SCALAR_REG). */
6968
6969 rtx
6970 rs6000_adjust_vec_address (rtx scalar_reg,
6971 rtx mem,
6972 rtx element,
6973 rtx base_tmp,
6974 machine_mode scalar_mode)
6975 {
6976 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
6977 rtx addr = XEXP (mem, 0);
6978 rtx element_offset;
6979 rtx new_addr;
6980 bool valid_addr_p;
6981
6982 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
6983 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
6984
6985 /* Calculate what we need to add to the address to get the element
6986 address. */
6987 if (CONST_INT_P (element))
6988 element_offset = GEN_INT (INTVAL (element) * scalar_size);
6989 else
6990 {
6991 int byte_shift = exact_log2 (scalar_size);
6992 gcc_assert (byte_shift >= 0);
6993
6994 if (byte_shift == 0)
6995 element_offset = element;
6996
6997 else
6998 {
6999 if (TARGET_POWERPC64)
7000 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7001 else
7002 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7003
7004 element_offset = base_tmp;
7005 }
7006 }
7007
7008 /* Create the new address pointing to the element within the vector. If we
7009 are adding 0, we don't have to change the address. */
7010 if (element_offset == const0_rtx)
7011 new_addr = addr;
7012
7013 /* A simple indirect address can be converted into a reg + offset
7014 address. */
7015 else if (REG_P (addr) || SUBREG_P (addr))
7016 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7017
7018 /* Optimize D-FORM addresses with constant offset with a constant element, to
7019 include the element offset in the address directly. */
7020 else if (GET_CODE (addr) == PLUS)
7021 {
7022 rtx op0 = XEXP (addr, 0);
7023 rtx op1 = XEXP (addr, 1);
7024 rtx insn;
7025
7026 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7027 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7028 {
7029 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7030 rtx offset_rtx = GEN_INT (offset);
7031
7032 if (IN_RANGE (offset, -32768, 32767)
7033 && (scalar_size < 8 || (offset & 0x3) == 0))
7034 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7035 else
7036 {
7037 emit_move_insn (base_tmp, offset_rtx);
7038 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7039 }
7040 }
7041 else
7042 {
7043 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7044 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7045
7046 /* Note, ADDI requires the register being added to be a base
7047 register. If the register was R0, load it up into the temporary
7048 and do the add. */
7049 if (op1_reg_p
7050 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7051 {
7052 insn = gen_add3_insn (base_tmp, op1, element_offset);
7053 gcc_assert (insn != NULL_RTX);
7054 emit_insn (insn);
7055 }
7056
7057 else if (ele_reg_p
7058 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7059 {
7060 insn = gen_add3_insn (base_tmp, element_offset, op1);
7061 gcc_assert (insn != NULL_RTX);
7062 emit_insn (insn);
7063 }
7064
7065 else
7066 {
7067 emit_move_insn (base_tmp, op1);
7068 emit_insn (gen_add2_insn (base_tmp, element_offset));
7069 }
7070
7071 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7072 }
7073 }
7074
7075 else
7076 {
7077 emit_move_insn (base_tmp, addr);
7078 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7079 }
7080
7081 /* If we have a PLUS, we need to see whether the particular register class
7082 allows for D-FORM or X-FORM addressing. */
7083 if (GET_CODE (new_addr) == PLUS)
7084 {
7085 rtx op1 = XEXP (new_addr, 1);
7086 addr_mask_type addr_mask;
7087 unsigned int scalar_regno = reg_or_subregno (scalar_reg);
7088
7089 gcc_assert (HARD_REGISTER_NUM_P (scalar_regno));
7090 if (INT_REGNO_P (scalar_regno))
7091 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7092
7093 else if (FP_REGNO_P (scalar_regno))
7094 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7095
7096 else if (ALTIVEC_REGNO_P (scalar_regno))
7097 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7098
7099 else
7100 gcc_unreachable ();
7101
7102 if (REG_P (op1) || SUBREG_P (op1))
7103 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7104 else
7105 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7106 }
7107
7108 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7109 valid_addr_p = true;
7110
7111 else
7112 valid_addr_p = false;
7113
7114 if (!valid_addr_p)
7115 {
7116 emit_move_insn (base_tmp, new_addr);
7117 new_addr = base_tmp;
7118 }
7119
7120 return change_address (mem, scalar_mode, new_addr);
7121 }
7122
7123 /* Split a variable vec_extract operation into the component instructions. */
7124
7125 void
7126 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7127 rtx tmp_altivec)
7128 {
7129 machine_mode mode = GET_MODE (src);
7130 machine_mode scalar_mode = GET_MODE_INNER (GET_MODE (src));
7131 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7132 int byte_shift = exact_log2 (scalar_size);
7133
7134 gcc_assert (byte_shift >= 0);
7135
7136 /* If we are given a memory address, optimize to load just the element. We
7137 don't have to adjust the vector element number on little endian
7138 systems. */
7139 if (MEM_P (src))
7140 {
7141 int num_elements = GET_MODE_NUNITS (mode);
7142 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7143
7144 emit_insn (gen_anddi3 (element, element, num_ele_m1));
7145 gcc_assert (REG_P (tmp_gpr));
7146 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7147 tmp_gpr, scalar_mode));
7148 return;
7149 }
7150
7151 else if (REG_P (src) || SUBREG_P (src))
7152 {
7153 int num_elements = GET_MODE_NUNITS (mode);
7154 int bits_in_element = mode_to_bits (GET_MODE_INNER (mode));
7155 int bit_shift = 7 - exact_log2 (num_elements);
7156 rtx element2;
7157 unsigned int dest_regno = reg_or_subregno (dest);
7158 unsigned int src_regno = reg_or_subregno (src);
7159 unsigned int element_regno = reg_or_subregno (element);
7160
7161 gcc_assert (REG_P (tmp_gpr));
7162
7163 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7164 a general purpose register. */
7165 if (TARGET_P9_VECTOR
7166 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7167 && INT_REGNO_P (dest_regno)
7168 && ALTIVEC_REGNO_P (src_regno)
7169 && INT_REGNO_P (element_regno))
7170 {
7171 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7172 rtx element_si = gen_rtx_REG (SImode, element_regno);
7173
7174 if (mode == V16QImode)
7175 emit_insn (BYTES_BIG_ENDIAN
7176 ? gen_vextublx (dest_si, element_si, src)
7177 : gen_vextubrx (dest_si, element_si, src));
7178
7179 else if (mode == V8HImode)
7180 {
7181 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7182 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7183 emit_insn (BYTES_BIG_ENDIAN
7184 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7185 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7186 }
7187
7188
7189 else
7190 {
7191 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7192 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7193 emit_insn (BYTES_BIG_ENDIAN
7194 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7195 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7196 }
7197
7198 return;
7199 }
7200
7201
7202 gcc_assert (REG_P (tmp_altivec));
7203
7204 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7205 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7206 will shift the element into the upper position (adding 3 to convert a
7207 byte shift into a bit shift). */
7208 if (scalar_size == 8)
7209 {
7210 if (!BYTES_BIG_ENDIAN)
7211 {
7212 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7213 element2 = tmp_gpr;
7214 }
7215 else
7216 element2 = element;
7217
7218 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7219 bit. */
7220 emit_insn (gen_rtx_SET (tmp_gpr,
7221 gen_rtx_AND (DImode,
7222 gen_rtx_ASHIFT (DImode,
7223 element2,
7224 GEN_INT (6)),
7225 GEN_INT (64))));
7226 }
7227 else
7228 {
7229 if (!BYTES_BIG_ENDIAN)
7230 {
7231 rtx num_ele_m1 = GEN_INT (num_elements - 1);
7232
7233 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7234 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7235 element2 = tmp_gpr;
7236 }
7237 else
7238 element2 = element;
7239
7240 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7241 }
7242
7243 /* Get the value into the lower byte of the Altivec register where VSLO
7244 expects it. */
7245 if (TARGET_P9_VECTOR)
7246 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7247 else if (can_create_pseudo_p ())
7248 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7249 else
7250 {
7251 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7252 emit_move_insn (tmp_di, tmp_gpr);
7253 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7254 }
7255
7256 /* Do the VSLO to get the value into the final location. */
7257 switch (mode)
7258 {
7259 case E_V2DFmode:
7260 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7261 return;
7262
7263 case E_V2DImode:
7264 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7265 return;
7266
7267 case E_V4SFmode:
7268 {
7269 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7270 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7271 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7272 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7273 tmp_altivec));
7274
7275 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7276 return;
7277 }
7278
7279 case E_V4SImode:
7280 case E_V8HImode:
7281 case E_V16QImode:
7282 {
7283 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7284 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7285 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7286 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7287 tmp_altivec));
7288 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7289 emit_insn (gen_lshrdi3 (tmp_gpr_di, tmp_gpr_di,
7290 GEN_INT (64 - bits_in_element)));
7291 return;
7292 }
7293
7294 default:
7295 gcc_unreachable ();
7296 }
7297
7298 return;
7299 }
7300 else
7301 gcc_unreachable ();
7302 }
7303
7304 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7305 selects whether the alignment is abi mandated, optional, or
7306 both abi and optional alignment. */
7307
7308 unsigned int
7309 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7310 {
7311 if (how != align_opt)
7312 {
7313 if (TREE_CODE (type) == VECTOR_TYPE && align < 128)
7314 align = 128;
7315 }
7316
7317 if (how != align_abi)
7318 {
7319 if (TREE_CODE (type) == ARRAY_TYPE
7320 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7321 {
7322 if (align < BITS_PER_WORD)
7323 align = BITS_PER_WORD;
7324 }
7325 }
7326
7327 return align;
7328 }
7329
7330 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7331 instructions simply ignore the low bits; VSX memory instructions
7332 are aligned to 4 or 8 bytes. */
7333
7334 static bool
7335 rs6000_slow_unaligned_access (machine_mode mode, unsigned int align)
7336 {
7337 return (STRICT_ALIGNMENT
7338 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7339 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && align < 32)
7340 || ((VECTOR_MODE_P (mode) || FLOAT128_VECTOR_P (mode))
7341 && (int) align < VECTOR_ALIGN (mode)))));
7342 }
7343
7344 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7345
7346 bool
7347 rs6000_special_adjust_field_align_p (tree type, unsigned int computed)
7348 {
7349 if (TARGET_ALTIVEC && TREE_CODE (type) == VECTOR_TYPE)
7350 {
7351 if (computed != 128)
7352 {
7353 static bool warned;
7354 if (!warned && warn_psabi)
7355 {
7356 warned = true;
7357 inform (input_location,
7358 "the layout of aggregates containing vectors with"
7359 " %d-byte alignment has changed in GCC 5",
7360 computed / BITS_PER_UNIT);
7361 }
7362 }
7363 /* In current GCC there is no special case. */
7364 return false;
7365 }
7366
7367 return false;
7368 }
7369
7370 /* AIX increases natural record alignment to doubleword if the first
7371 field is an FP double while the FP fields remain word aligned. */
7372
7373 unsigned int
7374 rs6000_special_round_type_align (tree type, unsigned int computed,
7375 unsigned int specified)
7376 {
7377 unsigned int align = MAX (computed, specified);
7378 tree field = TYPE_FIELDS (type);
7379
7380 /* Skip all non field decls */
7381 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7382 field = DECL_CHAIN (field);
7383
7384 if (field != NULL && field != type)
7385 {
7386 type = TREE_TYPE (field);
7387 while (TREE_CODE (type) == ARRAY_TYPE)
7388 type = TREE_TYPE (type);
7389
7390 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7391 align = MAX (align, 64);
7392 }
7393
7394 return align;
7395 }
7396
7397 /* Darwin increases record alignment to the natural alignment of
7398 the first field. */
7399
7400 unsigned int
7401 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7402 unsigned int specified)
7403 {
7404 unsigned int align = MAX (computed, specified);
7405
7406 if (TYPE_PACKED (type))
7407 return align;
7408
7409 /* Find the first field, looking down into aggregates. */
7410 do {
7411 tree field = TYPE_FIELDS (type);
7412 /* Skip all non field decls */
7413 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7414 field = DECL_CHAIN (field);
7415 if (! field)
7416 break;
7417 /* A packed field does not contribute any extra alignment. */
7418 if (DECL_PACKED (field))
7419 return align;
7420 type = TREE_TYPE (field);
7421 while (TREE_CODE (type) == ARRAY_TYPE)
7422 type = TREE_TYPE (type);
7423 } while (AGGREGATE_TYPE_P (type));
7424
7425 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
7426 align = MAX (align, TYPE_ALIGN (type));
7427
7428 return align;
7429 }
7430
7431 /* Return 1 for an operand in small memory on V.4/eabi. */
7432
7433 int
7434 small_data_operand (rtx op ATTRIBUTE_UNUSED,
7435 machine_mode mode ATTRIBUTE_UNUSED)
7436 {
7437 #if TARGET_ELF
7438 rtx sym_ref;
7439
7440 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
7441 return 0;
7442
7443 if (DEFAULT_ABI != ABI_V4)
7444 return 0;
7445
7446 if (SYMBOL_REF_P (op))
7447 sym_ref = op;
7448
7449 else if (GET_CODE (op) != CONST
7450 || GET_CODE (XEXP (op, 0)) != PLUS
7451 || !SYMBOL_REF_P (XEXP (XEXP (op, 0), 0))
7452 || !CONST_INT_P (XEXP (XEXP (op, 0), 1)))
7453 return 0;
7454
7455 else
7456 {
7457 rtx sum = XEXP (op, 0);
7458 HOST_WIDE_INT summand;
7459
7460 /* We have to be careful here, because it is the referenced address
7461 that must be 32k from _SDA_BASE_, not just the symbol. */
7462 summand = INTVAL (XEXP (sum, 1));
7463 if (summand < 0 || summand > g_switch_value)
7464 return 0;
7465
7466 sym_ref = XEXP (sum, 0);
7467 }
7468
7469 return SYMBOL_REF_SMALL_P (sym_ref);
7470 #else
7471 return 0;
7472 #endif
7473 }
7474
7475 /* Return true if either operand is a general purpose register. */
7476
7477 bool
7478 gpr_or_gpr_p (rtx op0, rtx op1)
7479 {
7480 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
7481 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
7482 }
7483
7484 /* Return true if this is a move direct operation between GPR registers and
7485 floating point/VSX registers. */
7486
7487 bool
7488 direct_move_p (rtx op0, rtx op1)
7489 {
7490 int regno0, regno1;
7491
7492 if (!REG_P (op0) || !REG_P (op1))
7493 return false;
7494
7495 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
7496 return false;
7497
7498 regno0 = REGNO (op0);
7499 regno1 = REGNO (op1);
7500 if (!HARD_REGISTER_NUM_P (regno0) || !HARD_REGISTER_NUM_P (regno1))
7501 return false;
7502
7503 if (INT_REGNO_P (regno0))
7504 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
7505
7506 else if (INT_REGNO_P (regno1))
7507 {
7508 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
7509 return true;
7510
7511 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
7512 return true;
7513 }
7514
7515 return false;
7516 }
7517
7518 /* Return true if the OFFSET is valid for the quad address instructions that
7519 use d-form (register + offset) addressing. */
7520
7521 static inline bool
7522 quad_address_offset_p (HOST_WIDE_INT offset)
7523 {
7524 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
7525 }
7526
7527 /* Return true if the ADDR is an acceptable address for a quad memory
7528 operation of mode MODE (either LQ/STQ for general purpose registers, or
7529 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7530 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7531 3.0 LXV/STXV instruction. */
7532
7533 bool
7534 quad_address_p (rtx addr, machine_mode mode, bool strict)
7535 {
7536 rtx op0, op1;
7537
7538 if (GET_MODE_SIZE (mode) != 16)
7539 return false;
7540
7541 if (legitimate_indirect_address_p (addr, strict))
7542 return true;
7543
7544 if (VECTOR_MODE_P (mode) && !mode_supports_dq_form (mode))
7545 return false;
7546
7547 if (GET_CODE (addr) != PLUS)
7548 return false;
7549
7550 op0 = XEXP (addr, 0);
7551 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
7552 return false;
7553
7554 op1 = XEXP (addr, 1);
7555 if (!CONST_INT_P (op1))
7556 return false;
7557
7558 return quad_address_offset_p (INTVAL (op1));
7559 }
7560
7561 /* Return true if this is a load or store quad operation. This function does
7562 not handle the atomic quad memory instructions. */
7563
7564 bool
7565 quad_load_store_p (rtx op0, rtx op1)
7566 {
7567 bool ret;
7568
7569 if (!TARGET_QUAD_MEMORY)
7570 ret = false;
7571
7572 else if (REG_P (op0) && MEM_P (op1))
7573 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
7574 && quad_memory_operand (op1, GET_MODE (op1))
7575 && !reg_overlap_mentioned_p (op0, op1));
7576
7577 else if (MEM_P (op0) && REG_P (op1))
7578 ret = (quad_memory_operand (op0, GET_MODE (op0))
7579 && quad_int_reg_operand (op1, GET_MODE (op1)));
7580
7581 else
7582 ret = false;
7583
7584 if (TARGET_DEBUG_ADDR)
7585 {
7586 fprintf (stderr, "\n========== quad_load_store, return %s\n",
7587 ret ? "true" : "false");
7588 debug_rtx (gen_rtx_SET (op0, op1));
7589 }
7590
7591 return ret;
7592 }
7593
7594 /* Given an address, return a constant offset term if one exists. */
7595
7596 static rtx
7597 address_offset (rtx op)
7598 {
7599 if (GET_CODE (op) == PRE_INC
7600 || GET_CODE (op) == PRE_DEC)
7601 op = XEXP (op, 0);
7602 else if (GET_CODE (op) == PRE_MODIFY
7603 || GET_CODE (op) == LO_SUM)
7604 op = XEXP (op, 1);
7605
7606 if (GET_CODE (op) == CONST)
7607 op = XEXP (op, 0);
7608
7609 if (GET_CODE (op) == PLUS)
7610 op = XEXP (op, 1);
7611
7612 if (CONST_INT_P (op))
7613 return op;
7614
7615 return NULL_RTX;
7616 }
7617
7618 /* Return true if the MEM operand is a memory operand suitable for use
7619 with a (full width, possibly multiple) gpr load/store. On
7620 powerpc64 this means the offset must be divisible by 4.
7621 Implements 'Y' constraint.
7622
7623 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7624 a constraint function we know the operand has satisfied a suitable
7625 memory predicate.
7626
7627 Offsetting a lo_sum should not be allowed, except where we know by
7628 alignment that a 32k boundary is not crossed. Note that by
7629 "offsetting" here we mean a further offset to access parts of the
7630 MEM. It's fine to have a lo_sum where the inner address is offset
7631 from a sym, since the same sym+offset will appear in the high part
7632 of the address calculation. */
7633
7634 bool
7635 mem_operand_gpr (rtx op, machine_mode mode)
7636 {
7637 unsigned HOST_WIDE_INT offset;
7638 int extra;
7639 rtx addr = XEXP (op, 0);
7640
7641 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
7642 if (TARGET_UPDATE
7643 && (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
7644 && mode_supports_pre_incdec_p (mode)
7645 && legitimate_indirect_address_p (XEXP (addr, 0), false))
7646 return true;
7647
7648 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
7649 if (!rs6000_offsettable_memref_p (op, mode, false))
7650 return false;
7651
7652 op = address_offset (addr);
7653 if (op == NULL_RTX)
7654 return true;
7655
7656 offset = INTVAL (op);
7657 if (TARGET_POWERPC64 && (offset & 3) != 0)
7658 return false;
7659
7660 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7661 if (extra < 0)
7662 extra = 0;
7663
7664 if (GET_CODE (addr) == LO_SUM)
7665 /* For lo_sum addresses, we must allow any offset except one that
7666 causes a wrap, so test only the low 16 bits. */
7667 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7668
7669 return offset + 0x8000 < 0x10000u - extra;
7670 }
7671
7672 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
7673 enforce an offset divisible by 4 even for 32-bit. */
7674
7675 bool
7676 mem_operand_ds_form (rtx op, machine_mode mode)
7677 {
7678 unsigned HOST_WIDE_INT offset;
7679 int extra;
7680 rtx addr = XEXP (op, 0);
7681
7682 if (!offsettable_address_p (false, mode, addr))
7683 return false;
7684
7685 op = address_offset (addr);
7686 if (op == NULL_RTX)
7687 return true;
7688
7689 offset = INTVAL (op);
7690 if ((offset & 3) != 0)
7691 return false;
7692
7693 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
7694 if (extra < 0)
7695 extra = 0;
7696
7697 if (GET_CODE (addr) == LO_SUM)
7698 /* For lo_sum addresses, we must allow any offset except one that
7699 causes a wrap, so test only the low 16 bits. */
7700 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
7701
7702 return offset + 0x8000 < 0x10000u - extra;
7703 }
7704 \f
7705 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
7706
7707 static bool
7708 reg_offset_addressing_ok_p (machine_mode mode)
7709 {
7710 switch (mode)
7711 {
7712 case E_V16QImode:
7713 case E_V8HImode:
7714 case E_V4SFmode:
7715 case E_V4SImode:
7716 case E_V2DFmode:
7717 case E_V2DImode:
7718 case E_V1TImode:
7719 case E_TImode:
7720 case E_TFmode:
7721 case E_KFmode:
7722 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
7723 ISA 3.0 vector d-form addressing mode was added. While TImode is not
7724 a vector mode, if we want to use the VSX registers to move it around,
7725 we need to restrict ourselves to reg+reg addressing. Similarly for
7726 IEEE 128-bit floating point that is passed in a single vector
7727 register. */
7728 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
7729 return mode_supports_dq_form (mode);
7730 break;
7731
7732 case E_SDmode:
7733 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
7734 addressing for the LFIWZX and STFIWX instructions. */
7735 if (TARGET_NO_SDMODE_STACK)
7736 return false;
7737 break;
7738
7739 default:
7740 break;
7741 }
7742
7743 return true;
7744 }
7745
7746 static bool
7747 virtual_stack_registers_memory_p (rtx op)
7748 {
7749 int regnum;
7750
7751 if (REG_P (op))
7752 regnum = REGNO (op);
7753
7754 else if (GET_CODE (op) == PLUS
7755 && REG_P (XEXP (op, 0))
7756 && CONST_INT_P (XEXP (op, 1)))
7757 regnum = REGNO (XEXP (op, 0));
7758
7759 else
7760 return false;
7761
7762 return (regnum >= FIRST_VIRTUAL_REGISTER
7763 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
7764 }
7765
7766 /* Return true if a MODE sized memory accesses to OP plus OFFSET
7767 is known to not straddle a 32k boundary. This function is used
7768 to determine whether -mcmodel=medium code can use TOC pointer
7769 relative addressing for OP. This means the alignment of the TOC
7770 pointer must also be taken into account, and unfortunately that is
7771 only 8 bytes. */
7772
7773 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
7774 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
7775 #endif
7776
7777 static bool
7778 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
7779 machine_mode mode)
7780 {
7781 tree decl;
7782 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
7783
7784 if (!SYMBOL_REF_P (op))
7785 return false;
7786
7787 /* ISA 3.0 vector d-form addressing is restricted, don't allow
7788 SYMBOL_REF. */
7789 if (mode_supports_dq_form (mode))
7790 return false;
7791
7792 dsize = GET_MODE_SIZE (mode);
7793 decl = SYMBOL_REF_DECL (op);
7794 if (!decl)
7795 {
7796 if (dsize == 0)
7797 return false;
7798
7799 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
7800 replacing memory addresses with an anchor plus offset. We
7801 could find the decl by rummaging around in the block->objects
7802 VEC for the given offset but that seems like too much work. */
7803 dalign = BITS_PER_UNIT;
7804 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
7805 && SYMBOL_REF_ANCHOR_P (op)
7806 && SYMBOL_REF_BLOCK (op) != NULL)
7807 {
7808 struct object_block *block = SYMBOL_REF_BLOCK (op);
7809
7810 dalign = block->alignment;
7811 offset += SYMBOL_REF_BLOCK_OFFSET (op);
7812 }
7813 else if (CONSTANT_POOL_ADDRESS_P (op))
7814 {
7815 /* It would be nice to have get_pool_align().. */
7816 machine_mode cmode = get_pool_mode (op);
7817
7818 dalign = GET_MODE_ALIGNMENT (cmode);
7819 }
7820 }
7821 else if (DECL_P (decl))
7822 {
7823 dalign = DECL_ALIGN (decl);
7824
7825 if (dsize == 0)
7826 {
7827 /* Allow BLKmode when the entire object is known to not
7828 cross a 32k boundary. */
7829 if (!DECL_SIZE_UNIT (decl))
7830 return false;
7831
7832 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
7833 return false;
7834
7835 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
7836 if (dsize > 32768)
7837 return false;
7838
7839 dalign /= BITS_PER_UNIT;
7840 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7841 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7842 return dalign >= dsize;
7843 }
7844 }
7845 else
7846 gcc_unreachable ();
7847
7848 /* Find how many bits of the alignment we know for this access. */
7849 dalign /= BITS_PER_UNIT;
7850 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
7851 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
7852 mask = dalign - 1;
7853 lsb = offset & -offset;
7854 mask &= lsb - 1;
7855 dalign = mask + 1;
7856
7857 return dalign >= dsize;
7858 }
7859
7860 static bool
7861 constant_pool_expr_p (rtx op)
7862 {
7863 rtx base, offset;
7864
7865 split_const (op, &base, &offset);
7866 return (SYMBOL_REF_P (base)
7867 && CONSTANT_POOL_ADDRESS_P (base)
7868 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
7869 }
7870
7871 /* These are only used to pass through from print_operand/print_operand_address
7872 to rs6000_output_addr_const_extra over the intervening function
7873 output_addr_const which is not target code. */
7874 static const_rtx tocrel_base_oac, tocrel_offset_oac;
7875
7876 /* Return true if OP is a toc pointer relative address (the output
7877 of create_TOC_reference). If STRICT, do not match non-split
7878 -mcmodel=large/medium toc pointer relative addresses. If the pointers
7879 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
7880 TOCREL_OFFSET_RET respectively. */
7881
7882 bool
7883 toc_relative_expr_p (const_rtx op, bool strict, const_rtx *tocrel_base_ret,
7884 const_rtx *tocrel_offset_ret)
7885 {
7886 if (!TARGET_TOC)
7887 return false;
7888
7889 if (TARGET_CMODEL != CMODEL_SMALL)
7890 {
7891 /* When strict ensure we have everything tidy. */
7892 if (strict
7893 && !(GET_CODE (op) == LO_SUM
7894 && REG_P (XEXP (op, 0))
7895 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
7896 return false;
7897
7898 /* When not strict, allow non-split TOC addresses and also allow
7899 (lo_sum (high ..)) TOC addresses created during reload. */
7900 if (GET_CODE (op) == LO_SUM)
7901 op = XEXP (op, 1);
7902 }
7903
7904 const_rtx tocrel_base = op;
7905 const_rtx tocrel_offset = const0_rtx;
7906
7907 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
7908 {
7909 tocrel_base = XEXP (op, 0);
7910 tocrel_offset = XEXP (op, 1);
7911 }
7912
7913 if (tocrel_base_ret)
7914 *tocrel_base_ret = tocrel_base;
7915 if (tocrel_offset_ret)
7916 *tocrel_offset_ret = tocrel_offset;
7917
7918 return (GET_CODE (tocrel_base) == UNSPEC
7919 && XINT (tocrel_base, 1) == UNSPEC_TOCREL
7920 && REG_P (XVECEXP (tocrel_base, 0, 1))
7921 && REGNO (XVECEXP (tocrel_base, 0, 1)) == TOC_REGISTER);
7922 }
7923
7924 /* Return true if X is a constant pool address, and also for cmodel=medium
7925 if X is a toc-relative address known to be offsettable within MODE. */
7926
7927 bool
7928 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
7929 bool strict)
7930 {
7931 const_rtx tocrel_base, tocrel_offset;
7932 return (toc_relative_expr_p (x, strict, &tocrel_base, &tocrel_offset)
7933 && (TARGET_CMODEL != CMODEL_MEDIUM
7934 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
7935 || mode == QImode
7936 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
7937 INTVAL (tocrel_offset), mode)));
7938 }
7939
7940 static bool
7941 legitimate_small_data_p (machine_mode mode, rtx x)
7942 {
7943 return (DEFAULT_ABI == ABI_V4
7944 && !flag_pic && !TARGET_TOC
7945 && (SYMBOL_REF_P (x) || GET_CODE (x) == CONST)
7946 && small_data_operand (x, mode));
7947 }
7948
7949 bool
7950 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
7951 bool strict, bool worst_case)
7952 {
7953 unsigned HOST_WIDE_INT offset;
7954 unsigned int extra;
7955
7956 if (GET_CODE (x) != PLUS)
7957 return false;
7958 if (!REG_P (XEXP (x, 0)))
7959 return false;
7960 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
7961 return false;
7962 if (mode_supports_dq_form (mode))
7963 return quad_address_p (x, mode, strict);
7964 if (!reg_offset_addressing_ok_p (mode))
7965 return virtual_stack_registers_memory_p (x);
7966 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
7967 return true;
7968 if (!CONST_INT_P (XEXP (x, 1)))
7969 return false;
7970
7971 offset = INTVAL (XEXP (x, 1));
7972 extra = 0;
7973 switch (mode)
7974 {
7975 case E_DFmode:
7976 case E_DDmode:
7977 case E_DImode:
7978 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
7979 addressing. */
7980 if (VECTOR_MEM_VSX_P (mode))
7981 return false;
7982
7983 if (!worst_case)
7984 break;
7985 if (!TARGET_POWERPC64)
7986 extra = 4;
7987 else if (offset & 3)
7988 return false;
7989 break;
7990
7991 case E_TFmode:
7992 case E_IFmode:
7993 case E_KFmode:
7994 case E_TDmode:
7995 case E_TImode:
7996 case E_PTImode:
7997 extra = 8;
7998 if (!worst_case)
7999 break;
8000 if (!TARGET_POWERPC64)
8001 extra = 12;
8002 else if (offset & 3)
8003 return false;
8004 break;
8005
8006 default:
8007 break;
8008 }
8009
8010 offset += 0x8000;
8011 return offset < 0x10000 - extra;
8012 }
8013
8014 bool
8015 legitimate_indexed_address_p (rtx x, int strict)
8016 {
8017 rtx op0, op1;
8018
8019 if (GET_CODE (x) != PLUS)
8020 return false;
8021
8022 op0 = XEXP (x, 0);
8023 op1 = XEXP (x, 1);
8024
8025 return (REG_P (op0) && REG_P (op1)
8026 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8027 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8028 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8029 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8030 }
8031
8032 bool
8033 avoiding_indexed_address_p (machine_mode mode)
8034 {
8035 /* Avoid indexed addressing for modes that have non-indexed
8036 load/store instruction forms. */
8037 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8038 }
8039
8040 bool
8041 legitimate_indirect_address_p (rtx x, int strict)
8042 {
8043 return REG_P (x) && INT_REG_OK_FOR_BASE_P (x, strict);
8044 }
8045
8046 bool
8047 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8048 {
8049 if (!TARGET_MACHO || !flag_pic
8050 || mode != SImode || !MEM_P (x))
8051 return false;
8052 x = XEXP (x, 0);
8053
8054 if (GET_CODE (x) != LO_SUM)
8055 return false;
8056 if (!REG_P (XEXP (x, 0)))
8057 return false;
8058 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8059 return false;
8060 x = XEXP (x, 1);
8061
8062 return CONSTANT_P (x);
8063 }
8064
8065 static bool
8066 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8067 {
8068 if (GET_CODE (x) != LO_SUM)
8069 return false;
8070 if (!REG_P (XEXP (x, 0)))
8071 return false;
8072 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8073 return false;
8074 /* quad word addresses are restricted, and we can't use LO_SUM. */
8075 if (mode_supports_dq_form (mode))
8076 return false;
8077 x = XEXP (x, 1);
8078
8079 if (TARGET_ELF || TARGET_MACHO)
8080 {
8081 bool large_toc_ok;
8082
8083 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8084 return false;
8085 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8086 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8087 recognizes some LO_SUM addresses as valid although this
8088 function says opposite. In most cases, LRA through different
8089 transformations can generate correct code for address reloads.
8090 It cannot manage only some LO_SUM cases. So we need to add
8091 code here saying that some addresses are still valid. */
8092 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8093 && small_toc_ref (x, VOIDmode));
8094 if (TARGET_TOC && ! large_toc_ok)
8095 return false;
8096 if (GET_MODE_NUNITS (mode) != 1)
8097 return false;
8098 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8099 && !(/* ??? Assume floating point reg based on mode? */
8100 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8101 return false;
8102
8103 return CONSTANT_P (x) || large_toc_ok;
8104 }
8105
8106 return false;
8107 }
8108
8109
8110 /* Try machine-dependent ways of modifying an illegitimate address
8111 to be legitimate. If we find one, return the new, valid address.
8112 This is used from only one place: `memory_address' in explow.c.
8113
8114 OLDX is the address as it was before break_out_memory_refs was
8115 called. In some cases it is useful to look at this to decide what
8116 needs to be done.
8117
8118 It is always safe for this function to do nothing. It exists to
8119 recognize opportunities to optimize the output.
8120
8121 On RS/6000, first check for the sum of a register with a constant
8122 integer that is out of range. If so, generate code to add the
8123 constant with the low-order 16 bits masked to the register and force
8124 this result into another register (this can be done with `cau').
8125 Then generate an address of REG+(CONST&0xffff), allowing for the
8126 possibility of bit 16 being a one.
8127
8128 Then check for the sum of a register and something not constant, try to
8129 load the other things into a register and return the sum. */
8130
8131 static rtx
8132 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8133 machine_mode mode)
8134 {
8135 unsigned int extra;
8136
8137 if (!reg_offset_addressing_ok_p (mode)
8138 || mode_supports_dq_form (mode))
8139 {
8140 if (virtual_stack_registers_memory_p (x))
8141 return x;
8142
8143 /* In theory we should not be seeing addresses of the form reg+0,
8144 but just in case it is generated, optimize it away. */
8145 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8146 return force_reg (Pmode, XEXP (x, 0));
8147
8148 /* For TImode with load/store quad, restrict addresses to just a single
8149 pointer, so it works with both GPRs and VSX registers. */
8150 /* Make sure both operands are registers. */
8151 else if (GET_CODE (x) == PLUS
8152 && (mode != TImode || !TARGET_VSX))
8153 return gen_rtx_PLUS (Pmode,
8154 force_reg (Pmode, XEXP (x, 0)),
8155 force_reg (Pmode, XEXP (x, 1)));
8156 else
8157 return force_reg (Pmode, x);
8158 }
8159 if (SYMBOL_REF_P (x))
8160 {
8161 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8162 if (model != 0)
8163 return rs6000_legitimize_tls_address (x, model);
8164 }
8165
8166 extra = 0;
8167 switch (mode)
8168 {
8169 case E_TFmode:
8170 case E_TDmode:
8171 case E_TImode:
8172 case E_PTImode:
8173 case E_IFmode:
8174 case E_KFmode:
8175 /* As in legitimate_offset_address_p we do not assume
8176 worst-case. The mode here is just a hint as to the registers
8177 used. A TImode is usually in gprs, but may actually be in
8178 fprs. Leave worst-case scenario for reload to handle via
8179 insn constraints. PTImode is only GPRs. */
8180 extra = 8;
8181 break;
8182 default:
8183 break;
8184 }
8185
8186 if (GET_CODE (x) == PLUS
8187 && REG_P (XEXP (x, 0))
8188 && CONST_INT_P (XEXP (x, 1))
8189 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8190 >= 0x10000 - extra))
8191 {
8192 HOST_WIDE_INT high_int, low_int;
8193 rtx sum;
8194 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8195 if (low_int >= 0x8000 - extra)
8196 low_int = 0;
8197 high_int = INTVAL (XEXP (x, 1)) - low_int;
8198 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8199 GEN_INT (high_int)), 0);
8200 return plus_constant (Pmode, sum, low_int);
8201 }
8202 else if (GET_CODE (x) == PLUS
8203 && REG_P (XEXP (x, 0))
8204 && !CONST_INT_P (XEXP (x, 1))
8205 && GET_MODE_NUNITS (mode) == 1
8206 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8207 || (/* ??? Assume floating point reg based on mode? */
8208 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode)))
8209 && !avoiding_indexed_address_p (mode))
8210 {
8211 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8212 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8213 }
8214 else if ((TARGET_ELF
8215 #if TARGET_MACHO
8216 || !MACHO_DYNAMIC_NO_PIC_P
8217 #endif
8218 )
8219 && TARGET_32BIT
8220 && TARGET_NO_TOC
8221 && !flag_pic
8222 && !CONST_INT_P (x)
8223 && !CONST_WIDE_INT_P (x)
8224 && !CONST_DOUBLE_P (x)
8225 && CONSTANT_P (x)
8226 && GET_MODE_NUNITS (mode) == 1
8227 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8228 || (/* ??? Assume floating point reg based on mode? */
8229 TARGET_HARD_FLOAT && (mode == DFmode || mode == DDmode))))
8230 {
8231 rtx reg = gen_reg_rtx (Pmode);
8232 if (TARGET_ELF)
8233 emit_insn (gen_elf_high (reg, x));
8234 else
8235 emit_insn (gen_macho_high (reg, x));
8236 return gen_rtx_LO_SUM (Pmode, reg, x);
8237 }
8238 else if (TARGET_TOC
8239 && SYMBOL_REF_P (x)
8240 && constant_pool_expr_p (x)
8241 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8242 return create_TOC_reference (x, NULL_RTX);
8243 else
8244 return x;
8245 }
8246
8247 /* Debug version of rs6000_legitimize_address. */
8248 static rtx
8249 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8250 {
8251 rtx ret;
8252 rtx_insn *insns;
8253
8254 start_sequence ();
8255 ret = rs6000_legitimize_address (x, oldx, mode);
8256 insns = get_insns ();
8257 end_sequence ();
8258
8259 if (ret != x)
8260 {
8261 fprintf (stderr,
8262 "\nrs6000_legitimize_address: mode %s, old code %s, "
8263 "new code %s, modified\n",
8264 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8265 GET_RTX_NAME (GET_CODE (ret)));
8266
8267 fprintf (stderr, "Original address:\n");
8268 debug_rtx (x);
8269
8270 fprintf (stderr, "oldx:\n");
8271 debug_rtx (oldx);
8272
8273 fprintf (stderr, "New address:\n");
8274 debug_rtx (ret);
8275
8276 if (insns)
8277 {
8278 fprintf (stderr, "Insns added:\n");
8279 debug_rtx_list (insns, 20);
8280 }
8281 }
8282 else
8283 {
8284 fprintf (stderr,
8285 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8286 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8287
8288 debug_rtx (x);
8289 }
8290
8291 if (insns)
8292 emit_insn (insns);
8293
8294 return ret;
8295 }
8296
8297 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8298 We need to emit DTP-relative relocations. */
8299
8300 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8301 static void
8302 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8303 {
8304 switch (size)
8305 {
8306 case 4:
8307 fputs ("\t.long\t", file);
8308 break;
8309 case 8:
8310 fputs (DOUBLE_INT_ASM_OP, file);
8311 break;
8312 default:
8313 gcc_unreachable ();
8314 }
8315 output_addr_const (file, x);
8316 if (TARGET_ELF)
8317 fputs ("@dtprel+0x8000", file);
8318 else if (TARGET_XCOFF && SYMBOL_REF_P (x))
8319 {
8320 switch (SYMBOL_REF_TLS_MODEL (x))
8321 {
8322 case 0:
8323 break;
8324 case TLS_MODEL_LOCAL_EXEC:
8325 fputs ("@le", file);
8326 break;
8327 case TLS_MODEL_INITIAL_EXEC:
8328 fputs ("@ie", file);
8329 break;
8330 case TLS_MODEL_GLOBAL_DYNAMIC:
8331 case TLS_MODEL_LOCAL_DYNAMIC:
8332 fputs ("@m", file);
8333 break;
8334 default:
8335 gcc_unreachable ();
8336 }
8337 }
8338 }
8339
8340 /* Return true if X is a symbol that refers to real (rather than emulated)
8341 TLS. */
8342
8343 static bool
8344 rs6000_real_tls_symbol_ref_p (rtx x)
8345 {
8346 return (SYMBOL_REF_P (x)
8347 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8348 }
8349
8350 /* In the name of slightly smaller debug output, and to cater to
8351 general assembler lossage, recognize various UNSPEC sequences
8352 and turn them back into a direct symbol reference. */
8353
8354 static rtx
8355 rs6000_delegitimize_address (rtx orig_x)
8356 {
8357 rtx x, y, offset;
8358
8359 if (GET_CODE (orig_x) == UNSPEC && XINT (orig_x, 1) == UNSPEC_FUSION_GPR)
8360 orig_x = XVECEXP (orig_x, 0, 0);
8361
8362 orig_x = delegitimize_mem_from_attrs (orig_x);
8363
8364 x = orig_x;
8365 if (MEM_P (x))
8366 x = XEXP (x, 0);
8367
8368 y = x;
8369 if (TARGET_CMODEL != CMODEL_SMALL && GET_CODE (y) == LO_SUM)
8370 y = XEXP (y, 1);
8371
8372 offset = NULL_RTX;
8373 if (GET_CODE (y) == PLUS
8374 && GET_MODE (y) == Pmode
8375 && CONST_INT_P (XEXP (y, 1)))
8376 {
8377 offset = XEXP (y, 1);
8378 y = XEXP (y, 0);
8379 }
8380
8381 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_TOCREL)
8382 {
8383 y = XVECEXP (y, 0, 0);
8384
8385 #ifdef HAVE_AS_TLS
8386 /* Do not associate thread-local symbols with the original
8387 constant pool symbol. */
8388 if (TARGET_XCOFF
8389 && SYMBOL_REF_P (y)
8390 && CONSTANT_POOL_ADDRESS_P (y)
8391 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
8392 return orig_x;
8393 #endif
8394
8395 if (offset != NULL_RTX)
8396 y = gen_rtx_PLUS (Pmode, y, offset);
8397 if (!MEM_P (orig_x))
8398 return y;
8399 else
8400 return replace_equiv_address_nv (orig_x, y);
8401 }
8402
8403 if (TARGET_MACHO
8404 && GET_CODE (orig_x) == LO_SUM
8405 && GET_CODE (XEXP (orig_x, 1)) == CONST)
8406 {
8407 y = XEXP (XEXP (orig_x, 1), 0);
8408 if (GET_CODE (y) == UNSPEC && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
8409 return XVECEXP (y, 0, 0);
8410 }
8411
8412 return orig_x;
8413 }
8414
8415 /* Return true if X shouldn't be emitted into the debug info.
8416 The linker doesn't like .toc section references from
8417 .debug_* sections, so reject .toc section symbols. */
8418
8419 static bool
8420 rs6000_const_not_ok_for_debug_p (rtx x)
8421 {
8422 if (GET_CODE (x) == UNSPEC)
8423 return true;
8424 if (SYMBOL_REF_P (x)
8425 && CONSTANT_POOL_ADDRESS_P (x))
8426 {
8427 rtx c = get_pool_constant (x);
8428 machine_mode cmode = get_pool_mode (x);
8429 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
8430 return true;
8431 }
8432
8433 return false;
8434 }
8435
8436 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8437
8438 static bool
8439 rs6000_legitimate_combined_insn (rtx_insn *insn)
8440 {
8441 int icode = INSN_CODE (insn);
8442
8443 /* Reject creating doloop insns. Combine should not be allowed
8444 to create these for a number of reasons:
8445 1) In a nested loop, if combine creates one of these in an
8446 outer loop and the register allocator happens to allocate ctr
8447 to the outer loop insn, then the inner loop can't use ctr.
8448 Inner loops ought to be more highly optimized.
8449 2) Combine often wants to create one of these from what was
8450 originally a three insn sequence, first combining the three
8451 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8452 allocated ctr, the splitter takes use back to the three insn
8453 sequence. It's better to stop combine at the two insn
8454 sequence.
8455 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8456 insns, the register allocator sometimes uses floating point
8457 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8458 jump insn and output reloads are not implemented for jumps,
8459 the ctrsi/ctrdi splitters need to handle all possible cases.
8460 That's a pain, and it gets to be seriously difficult when a
8461 splitter that runs after reload needs memory to transfer from
8462 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8463 for the difficult case. It's better to not create problems
8464 in the first place. */
8465 if (icode != CODE_FOR_nothing
8466 && (icode == CODE_FOR_bdz_si
8467 || icode == CODE_FOR_bdz_di
8468 || icode == CODE_FOR_bdnz_si
8469 || icode == CODE_FOR_bdnz_di
8470 || icode == CODE_FOR_bdztf_si
8471 || icode == CODE_FOR_bdztf_di
8472 || icode == CODE_FOR_bdnztf_si
8473 || icode == CODE_FOR_bdnztf_di))
8474 return false;
8475
8476 return true;
8477 }
8478
8479 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8480
8481 static GTY(()) rtx rs6000_tls_symbol;
8482 static rtx
8483 rs6000_tls_get_addr (void)
8484 {
8485 if (!rs6000_tls_symbol)
8486 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
8487
8488 return rs6000_tls_symbol;
8489 }
8490
8491 /* Construct the SYMBOL_REF for TLS GOT references. */
8492
8493 static GTY(()) rtx rs6000_got_symbol;
8494 static rtx
8495 rs6000_got_sym (void)
8496 {
8497 if (!rs6000_got_symbol)
8498 {
8499 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
8500 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
8501 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
8502 }
8503
8504 return rs6000_got_symbol;
8505 }
8506
8507 /* AIX Thread-Local Address support. */
8508
8509 static rtx
8510 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
8511 {
8512 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
8513 const char *name;
8514 char *tlsname;
8515
8516 name = XSTR (addr, 0);
8517 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8518 or the symbol will be in TLS private data section. */
8519 if (name[strlen (name) - 1] != ']'
8520 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
8521 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
8522 {
8523 tlsname = XALLOCAVEC (char, strlen (name) + 4);
8524 strcpy (tlsname, name);
8525 strcat (tlsname,
8526 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
8527 tlsaddr = copy_rtx (addr);
8528 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
8529 }
8530 else
8531 tlsaddr = addr;
8532
8533 /* Place addr into TOC constant pool. */
8534 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
8535
8536 /* Output the TOC entry and create the MEM referencing the value. */
8537 if (constant_pool_expr_p (XEXP (sym, 0))
8538 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
8539 {
8540 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
8541 mem = gen_const_mem (Pmode, tocref);
8542 set_mem_alias_set (mem, get_TOC_alias_set ());
8543 }
8544 else
8545 return sym;
8546
8547 /* Use global-dynamic for local-dynamic. */
8548 if (model == TLS_MODEL_GLOBAL_DYNAMIC
8549 || model == TLS_MODEL_LOCAL_DYNAMIC)
8550 {
8551 /* Create new TOC reference for @m symbol. */
8552 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
8553 tlsname = XALLOCAVEC (char, strlen (name) + 1);
8554 strcpy (tlsname, "*LCM");
8555 strcat (tlsname, name + 3);
8556 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
8557 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
8558 tocref = create_TOC_reference (modaddr, NULL_RTX);
8559 rtx modmem = gen_const_mem (Pmode, tocref);
8560 set_mem_alias_set (modmem, get_TOC_alias_set ());
8561
8562 rtx modreg = gen_reg_rtx (Pmode);
8563 emit_insn (gen_rtx_SET (modreg, modmem));
8564
8565 tmpreg = gen_reg_rtx (Pmode);
8566 emit_insn (gen_rtx_SET (tmpreg, mem));
8567
8568 dest = gen_reg_rtx (Pmode);
8569 if (TARGET_32BIT)
8570 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
8571 else
8572 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
8573 return dest;
8574 }
8575 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8576 else if (TARGET_32BIT)
8577 {
8578 tlsreg = gen_reg_rtx (SImode);
8579 emit_insn (gen_tls_get_tpointer (tlsreg));
8580 }
8581 else
8582 tlsreg = gen_rtx_REG (DImode, 13);
8583
8584 /* Load the TOC value into temporary register. */
8585 tmpreg = gen_reg_rtx (Pmode);
8586 emit_insn (gen_rtx_SET (tmpreg, mem));
8587 set_unique_reg_note (get_last_insn (), REG_EQUAL,
8588 gen_rtx_MINUS (Pmode, addr, tlsreg));
8589
8590 /* Add TOC symbol value to TLS pointer. */
8591 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
8592
8593 return dest;
8594 }
8595
8596 /* Output arg setup instructions for a !TARGET_TLS_MARKERS
8597 __tls_get_addr call. */
8598
8599 void
8600 rs6000_output_tlsargs (rtx *operands)
8601 {
8602 /* Set up operands for output_asm_insn, without modifying OPERANDS. */
8603 rtx op[3];
8604
8605 /* The set dest of the call, ie. r3, which is also the first arg reg. */
8606 op[0] = operands[0];
8607 /* The TLS symbol from global_tlsarg stashed as CALL operand 2. */
8608 op[1] = XVECEXP (operands[2], 0, 0);
8609 if (XINT (operands[2], 1) == UNSPEC_TLSGD)
8610 {
8611 /* The GOT register. */
8612 op[2] = XVECEXP (operands[2], 0, 1);
8613 if (TARGET_CMODEL != CMODEL_SMALL)
8614 output_asm_insn ("addis %0,%2,%1@got@tlsgd@ha\n\t"
8615 "addi %0,%0,%1@got@tlsgd@l", op);
8616 else
8617 output_asm_insn ("addi %0,%2,%1@got@tlsgd", op);
8618 }
8619 else if (XINT (operands[2], 1) == UNSPEC_TLSLD)
8620 {
8621 if (TARGET_CMODEL != CMODEL_SMALL)
8622 output_asm_insn ("addis %0,%1,%&@got@tlsld@ha\n\t"
8623 "addi %0,%0,%&@got@tlsld@l", op);
8624 else
8625 output_asm_insn ("addi %0,%1,%&@got@tlsld", op);
8626 }
8627 else
8628 gcc_unreachable ();
8629 }
8630
8631 /* Passes the tls arg value for global dynamic and local dynamic
8632 emit_library_call_value in rs6000_legitimize_tls_address to
8633 rs6000_call_aix and rs6000_call_sysv. This is used to emit the
8634 marker relocs put on __tls_get_addr calls. */
8635 static rtx global_tlsarg;
8636
8637 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8638 this (thread-local) address. */
8639
8640 static rtx
8641 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
8642 {
8643 rtx dest, insn;
8644
8645 if (TARGET_XCOFF)
8646 return rs6000_legitimize_tls_address_aix (addr, model);
8647
8648 dest = gen_reg_rtx (Pmode);
8649 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
8650 {
8651 rtx tlsreg;
8652
8653 if (TARGET_64BIT)
8654 {
8655 tlsreg = gen_rtx_REG (Pmode, 13);
8656 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
8657 }
8658 else
8659 {
8660 tlsreg = gen_rtx_REG (Pmode, 2);
8661 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
8662 }
8663 emit_insn (insn);
8664 }
8665 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
8666 {
8667 rtx tlsreg, tmp;
8668
8669 tmp = gen_reg_rtx (Pmode);
8670 if (TARGET_64BIT)
8671 {
8672 tlsreg = gen_rtx_REG (Pmode, 13);
8673 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
8674 }
8675 else
8676 {
8677 tlsreg = gen_rtx_REG (Pmode, 2);
8678 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
8679 }
8680 emit_insn (insn);
8681 if (TARGET_64BIT)
8682 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
8683 else
8684 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
8685 emit_insn (insn);
8686 }
8687 else
8688 {
8689 rtx got, tga, tmp1, tmp2;
8690
8691 /* We currently use relocations like @got@tlsgd for tls, which
8692 means the linker will handle allocation of tls entries, placing
8693 them in the .got section. So use a pointer to the .got section,
8694 not one to secondary TOC sections used by 64-bit -mminimal-toc,
8695 or to secondary GOT sections used by 32-bit -fPIC. */
8696 if (TARGET_64BIT)
8697 got = gen_rtx_REG (Pmode, 2);
8698 else
8699 {
8700 if (flag_pic == 1)
8701 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
8702 else
8703 {
8704 rtx gsym = rs6000_got_sym ();
8705 got = gen_reg_rtx (Pmode);
8706 if (flag_pic == 0)
8707 rs6000_emit_move (got, gsym, Pmode);
8708 else
8709 {
8710 rtx mem, lab;
8711
8712 tmp1 = gen_reg_rtx (Pmode);
8713 tmp2 = gen_reg_rtx (Pmode);
8714 mem = gen_const_mem (Pmode, tmp1);
8715 lab = gen_label_rtx ();
8716 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
8717 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
8718 if (TARGET_LINK_STACK)
8719 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
8720 emit_move_insn (tmp2, mem);
8721 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
8722 set_unique_reg_note (last, REG_EQUAL, gsym);
8723 }
8724 }
8725 }
8726
8727 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
8728 {
8729 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, addr, got),
8730 UNSPEC_TLSGD);
8731 tga = rs6000_tls_get_addr ();
8732 global_tlsarg = arg;
8733 if (TARGET_TLS_MARKERS)
8734 {
8735 rtx argreg = gen_rtx_REG (Pmode, 3);
8736 emit_insn (gen_rtx_SET (argreg, arg));
8737 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
8738 argreg, Pmode);
8739 }
8740 else
8741 emit_library_call_value (tga, dest, LCT_CONST, Pmode);
8742 global_tlsarg = NULL_RTX;
8743
8744 /* Make a note so that the result of this call can be CSEd. */
8745 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8746 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8747 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8748 }
8749 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
8750 {
8751 rtx arg = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, got), UNSPEC_TLSLD);
8752 tga = rs6000_tls_get_addr ();
8753 tmp1 = gen_reg_rtx (Pmode);
8754 global_tlsarg = arg;
8755 if (TARGET_TLS_MARKERS)
8756 {
8757 rtx argreg = gen_rtx_REG (Pmode, 3);
8758 emit_insn (gen_rtx_SET (argreg, arg));
8759 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
8760 argreg, Pmode);
8761 }
8762 else
8763 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode);
8764 global_tlsarg = NULL_RTX;
8765
8766 /* Make a note so that the result of this call can be CSEd. */
8767 rtvec vec = gen_rtvec (1, copy_rtx (arg));
8768 rtx uns = gen_rtx_UNSPEC (Pmode, vec, UNSPEC_TLS_GET_ADDR);
8769 set_unique_reg_note (get_last_insn (), REG_EQUAL, uns);
8770
8771 if (rs6000_tls_size == 16)
8772 {
8773 if (TARGET_64BIT)
8774 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
8775 else
8776 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
8777 }
8778 else if (rs6000_tls_size == 32)
8779 {
8780 tmp2 = gen_reg_rtx (Pmode);
8781 if (TARGET_64BIT)
8782 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
8783 else
8784 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
8785 emit_insn (insn);
8786 if (TARGET_64BIT)
8787 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
8788 else
8789 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
8790 }
8791 else
8792 {
8793 tmp2 = gen_reg_rtx (Pmode);
8794 if (TARGET_64BIT)
8795 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
8796 else
8797 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
8798 emit_insn (insn);
8799 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
8800 }
8801 emit_insn (insn);
8802 }
8803 else
8804 {
8805 /* IE, or 64-bit offset LE. */
8806 tmp2 = gen_reg_rtx (Pmode);
8807 if (TARGET_64BIT)
8808 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
8809 else
8810 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
8811 emit_insn (insn);
8812 if (TARGET_64BIT)
8813 insn = gen_tls_tls_64 (dest, tmp2, addr);
8814 else
8815 insn = gen_tls_tls_32 (dest, tmp2, addr);
8816 emit_insn (insn);
8817 }
8818 }
8819
8820 return dest;
8821 }
8822
8823 /* Only create the global variable for the stack protect guard if we are using
8824 the global flavor of that guard. */
8825 static tree
8826 rs6000_init_stack_protect_guard (void)
8827 {
8828 if (rs6000_stack_protector_guard == SSP_GLOBAL)
8829 return default_stack_protect_guard ();
8830
8831 return NULL_TREE;
8832 }
8833
8834 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8835
8836 static bool
8837 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8838 {
8839 if (GET_CODE (x) == HIGH
8840 && GET_CODE (XEXP (x, 0)) == UNSPEC)
8841 return true;
8842
8843 /* A TLS symbol in the TOC cannot contain a sum. */
8844 if (GET_CODE (x) == CONST
8845 && GET_CODE (XEXP (x, 0)) == PLUS
8846 && SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
8847 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
8848 return true;
8849
8850 /* Do not place an ELF TLS symbol in the constant pool. */
8851 return TARGET_ELF && tls_referenced_p (x);
8852 }
8853
8854 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
8855 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
8856 can be addressed relative to the toc pointer. */
8857
8858 static bool
8859 use_toc_relative_ref (rtx sym, machine_mode mode)
8860 {
8861 return ((constant_pool_expr_p (sym)
8862 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
8863 get_pool_mode (sym)))
8864 || (TARGET_CMODEL == CMODEL_MEDIUM
8865 && SYMBOL_REF_LOCAL_P (sym)
8866 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
8867 }
8868
8869 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
8870 that is a valid memory address for an instruction.
8871 The MODE argument is the machine mode for the MEM expression
8872 that wants to use this address.
8873
8874 On the RS/6000, there are four valid address: a SYMBOL_REF that
8875 refers to a constant pool entry of an address (or the sum of it
8876 plus a constant), a short (16-bit signed) constant plus a register,
8877 the sum of two registers, or a register indirect, possibly with an
8878 auto-increment. For DFmode, DDmode and DImode with a constant plus
8879 register, we must ensure that both words are addressable or PowerPC64
8880 with offset word aligned.
8881
8882 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
8883 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
8884 because adjacent memory cells are accessed by adding word-sized offsets
8885 during assembly output. */
8886 static bool
8887 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
8888 {
8889 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
8890 bool quad_offset_p = mode_supports_dq_form (mode);
8891
8892 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
8893 if (VECTOR_MEM_ALTIVEC_P (mode)
8894 && GET_CODE (x) == AND
8895 && CONST_INT_P (XEXP (x, 1))
8896 && INTVAL (XEXP (x, 1)) == -16)
8897 x = XEXP (x, 0);
8898
8899 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
8900 return 0;
8901 if (legitimate_indirect_address_p (x, reg_ok_strict))
8902 return 1;
8903 if (TARGET_UPDATE
8904 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
8905 && mode_supports_pre_incdec_p (mode)
8906 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
8907 return 1;
8908 /* Handle restricted vector d-form offsets in ISA 3.0. */
8909 if (quad_offset_p)
8910 {
8911 if (quad_address_p (x, mode, reg_ok_strict))
8912 return 1;
8913 }
8914 else if (virtual_stack_registers_memory_p (x))
8915 return 1;
8916
8917 else if (reg_offset_p)
8918 {
8919 if (legitimate_small_data_p (mode, x))
8920 return 1;
8921 if (legitimate_constant_pool_address_p (x, mode,
8922 reg_ok_strict || lra_in_progress))
8923 return 1;
8924 }
8925
8926 /* For TImode, if we have TImode in VSX registers, only allow register
8927 indirect addresses. This will allow the values to go in either GPRs
8928 or VSX registers without reloading. The vector types would tend to
8929 go into VSX registers, so we allow REG+REG, while TImode seems
8930 somewhat split, in that some uses are GPR based, and some VSX based. */
8931 /* FIXME: We could loosen this by changing the following to
8932 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
8933 but currently we cannot allow REG+REG addressing for TImode. See
8934 PR72827 for complete details on how this ends up hoodwinking DSE. */
8935 if (mode == TImode && TARGET_VSX)
8936 return 0;
8937 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
8938 if (! reg_ok_strict
8939 && reg_offset_p
8940 && GET_CODE (x) == PLUS
8941 && REG_P (XEXP (x, 0))
8942 && (XEXP (x, 0) == virtual_stack_vars_rtx
8943 || XEXP (x, 0) == arg_pointer_rtx)
8944 && CONST_INT_P (XEXP (x, 1)))
8945 return 1;
8946 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
8947 return 1;
8948 if (!FLOAT128_2REG_P (mode)
8949 && (TARGET_HARD_FLOAT
8950 || TARGET_POWERPC64
8951 || (mode != DFmode && mode != DDmode))
8952 && (TARGET_POWERPC64 || mode != DImode)
8953 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
8954 && mode != PTImode
8955 && !avoiding_indexed_address_p (mode)
8956 && legitimate_indexed_address_p (x, reg_ok_strict))
8957 return 1;
8958 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
8959 && mode_supports_pre_modify_p (mode)
8960 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
8961 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
8962 reg_ok_strict, false)
8963 || (!avoiding_indexed_address_p (mode)
8964 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
8965 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
8966 return 1;
8967 if (reg_offset_p && !quad_offset_p
8968 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
8969 return 1;
8970 return 0;
8971 }
8972
8973 /* Debug version of rs6000_legitimate_address_p. */
8974 static bool
8975 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
8976 bool reg_ok_strict)
8977 {
8978 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
8979 fprintf (stderr,
8980 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
8981 "strict = %d, reload = %s, code = %s\n",
8982 ret ? "true" : "false",
8983 GET_MODE_NAME (mode),
8984 reg_ok_strict,
8985 (reload_completed ? "after" : "before"),
8986 GET_RTX_NAME (GET_CODE (x)));
8987 debug_rtx (x);
8988
8989 return ret;
8990 }
8991
8992 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
8993
8994 static bool
8995 rs6000_mode_dependent_address_p (const_rtx addr,
8996 addr_space_t as ATTRIBUTE_UNUSED)
8997 {
8998 return rs6000_mode_dependent_address_ptr (addr);
8999 }
9000
9001 /* Go to LABEL if ADDR (a legitimate address expression)
9002 has an effect that depends on the machine mode it is used for.
9003
9004 On the RS/6000 this is true of all integral offsets (since AltiVec
9005 and VSX modes don't allow them) or is a pre-increment or decrement.
9006
9007 ??? Except that due to conceptual problems in offsettable_address_p
9008 we can't really report the problems of integral offsets. So leave
9009 this assuming that the adjustable offset must be valid for the
9010 sub-words of a TFmode operand, which is what we had before. */
9011
9012 static bool
9013 rs6000_mode_dependent_address (const_rtx addr)
9014 {
9015 switch (GET_CODE (addr))
9016 {
9017 case PLUS:
9018 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9019 is considered a legitimate address before reload, so there
9020 are no offset restrictions in that case. Note that this
9021 condition is safe in strict mode because any address involving
9022 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9023 been rejected as illegitimate. */
9024 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9025 && XEXP (addr, 0) != arg_pointer_rtx
9026 && CONST_INT_P (XEXP (addr, 1)))
9027 {
9028 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9029 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9030 }
9031 break;
9032
9033 case LO_SUM:
9034 /* Anything in the constant pool is sufficiently aligned that
9035 all bytes have the same high part address. */
9036 return !legitimate_constant_pool_address_p (addr, QImode, false);
9037
9038 /* Auto-increment cases are now treated generically in recog.c. */
9039 case PRE_MODIFY:
9040 return TARGET_UPDATE;
9041
9042 /* AND is only allowed in Altivec loads. */
9043 case AND:
9044 return true;
9045
9046 default:
9047 break;
9048 }
9049
9050 return false;
9051 }
9052
9053 /* Debug version of rs6000_mode_dependent_address. */
9054 static bool
9055 rs6000_debug_mode_dependent_address (const_rtx addr)
9056 {
9057 bool ret = rs6000_mode_dependent_address (addr);
9058
9059 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9060 ret ? "true" : "false");
9061 debug_rtx (addr);
9062
9063 return ret;
9064 }
9065
9066 /* Implement FIND_BASE_TERM. */
9067
9068 rtx
9069 rs6000_find_base_term (rtx op)
9070 {
9071 rtx base;
9072
9073 base = op;
9074 if (GET_CODE (base) == CONST)
9075 base = XEXP (base, 0);
9076 if (GET_CODE (base) == PLUS)
9077 base = XEXP (base, 0);
9078 if (GET_CODE (base) == UNSPEC)
9079 switch (XINT (base, 1))
9080 {
9081 case UNSPEC_TOCREL:
9082 case UNSPEC_MACHOPIC_OFFSET:
9083 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9084 for aliasing purposes. */
9085 return XVECEXP (base, 0, 0);
9086 }
9087
9088 return op;
9089 }
9090
9091 /* More elaborate version of recog's offsettable_memref_p predicate
9092 that works around the ??? note of rs6000_mode_dependent_address.
9093 In particular it accepts
9094
9095 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9096
9097 in 32-bit mode, that the recog predicate rejects. */
9098
9099 static bool
9100 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode, bool strict)
9101 {
9102 bool worst_case;
9103
9104 if (!MEM_P (op))
9105 return false;
9106
9107 /* First mimic offsettable_memref_p. */
9108 if (offsettable_address_p (strict, GET_MODE (op), XEXP (op, 0)))
9109 return true;
9110
9111 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9112 the latter predicate knows nothing about the mode of the memory
9113 reference and, therefore, assumes that it is the largest supported
9114 mode (TFmode). As a consequence, legitimate offsettable memory
9115 references are rejected. rs6000_legitimate_offset_address_p contains
9116 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9117 at least with a little bit of help here given that we know the
9118 actual registers used. */
9119 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9120 || GET_MODE_SIZE (reg_mode) == 4);
9121 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9122 strict, worst_case);
9123 }
9124
9125 /* Determine the reassociation width to be used in reassociate_bb.
9126 This takes into account how many parallel operations we
9127 can actually do of a given type, and also the latency.
9128 P8:
9129 int add/sub 6/cycle
9130 mul 2/cycle
9131 vect add/sub/mul 2/cycle
9132 fp add/sub/mul 2/cycle
9133 dfp 1/cycle
9134 */
9135
9136 static int
9137 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9138 machine_mode mode)
9139 {
9140 switch (rs6000_tune)
9141 {
9142 case PROCESSOR_POWER8:
9143 case PROCESSOR_POWER9:
9144 if (DECIMAL_FLOAT_MODE_P (mode))
9145 return 1;
9146 if (VECTOR_MODE_P (mode))
9147 return 4;
9148 if (INTEGRAL_MODE_P (mode))
9149 return 1;
9150 if (FLOAT_MODE_P (mode))
9151 return 4;
9152 break;
9153 default:
9154 break;
9155 }
9156 return 1;
9157 }
9158
9159 /* Change register usage conditional on target flags. */
9160 static void
9161 rs6000_conditional_register_usage (void)
9162 {
9163 int i;
9164
9165 if (TARGET_DEBUG_TARGET)
9166 fprintf (stderr, "rs6000_conditional_register_usage called\n");
9167
9168 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9169 if (TARGET_64BIT)
9170 fixed_regs[13] = call_used_regs[13]
9171 = call_really_used_regs[13] = 1;
9172
9173 /* Conditionally disable FPRs. */
9174 if (TARGET_SOFT_FLOAT)
9175 for (i = 32; i < 64; i++)
9176 fixed_regs[i] = call_used_regs[i]
9177 = call_really_used_regs[i] = 1;
9178
9179 /* The TOC register is not killed across calls in a way that is
9180 visible to the compiler. */
9181 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9182 call_really_used_regs[2] = 0;
9183
9184 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
9185 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9186
9187 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
9188 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9189 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9190 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9191
9192 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
9193 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9194 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9195 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9196
9197 if (TARGET_TOC && TARGET_MINIMAL_TOC)
9198 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
9199 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
9200
9201 if (!TARGET_ALTIVEC && !TARGET_VSX)
9202 {
9203 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
9204 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9205 call_really_used_regs[VRSAVE_REGNO] = 1;
9206 }
9207
9208 if (TARGET_ALTIVEC || TARGET_VSX)
9209 global_regs[VSCR_REGNO] = 1;
9210
9211 if (TARGET_ALTIVEC_ABI)
9212 {
9213 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
9214 call_used_regs[i] = call_really_used_regs[i] = 1;
9215
9216 /* AIX reserves VR20:31 in non-extended ABI mode. */
9217 if (TARGET_XCOFF)
9218 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
9219 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
9220 }
9221 }
9222
9223 \f
9224 /* Output insns to set DEST equal to the constant SOURCE as a series of
9225 lis, ori and shl instructions and return TRUE. */
9226
9227 bool
9228 rs6000_emit_set_const (rtx dest, rtx source)
9229 {
9230 machine_mode mode = GET_MODE (dest);
9231 rtx temp, set;
9232 rtx_insn *insn;
9233 HOST_WIDE_INT c;
9234
9235 gcc_checking_assert (CONST_INT_P (source));
9236 c = INTVAL (source);
9237 switch (mode)
9238 {
9239 case E_QImode:
9240 case E_HImode:
9241 emit_insn (gen_rtx_SET (dest, source));
9242 return true;
9243
9244 case E_SImode:
9245 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
9246
9247 emit_insn (gen_rtx_SET (copy_rtx (temp),
9248 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
9249 emit_insn (gen_rtx_SET (dest,
9250 gen_rtx_IOR (SImode, copy_rtx (temp),
9251 GEN_INT (c & 0xffff))));
9252 break;
9253
9254 case E_DImode:
9255 if (!TARGET_POWERPC64)
9256 {
9257 rtx hi, lo;
9258
9259 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
9260 DImode);
9261 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
9262 DImode);
9263 emit_move_insn (hi, GEN_INT (c >> 32));
9264 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
9265 emit_move_insn (lo, GEN_INT (c));
9266 }
9267 else
9268 rs6000_emit_set_long_const (dest, c);
9269 break;
9270
9271 default:
9272 gcc_unreachable ();
9273 }
9274
9275 insn = get_last_insn ();
9276 set = single_set (insn);
9277 if (! CONSTANT_P (SET_SRC (set)))
9278 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
9279
9280 return true;
9281 }
9282
9283 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9284 Output insns to set DEST equal to the constant C as a series of
9285 lis, ori and shl instructions. */
9286
9287 static void
9288 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
9289 {
9290 rtx temp;
9291 HOST_WIDE_INT ud1, ud2, ud3, ud4;
9292
9293 ud1 = c & 0xffff;
9294 c = c >> 16;
9295 ud2 = c & 0xffff;
9296 c = c >> 16;
9297 ud3 = c & 0xffff;
9298 c = c >> 16;
9299 ud4 = c & 0xffff;
9300
9301 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
9302 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
9303 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
9304
9305 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
9306 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
9307 {
9308 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9309
9310 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9311 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9312 if (ud1 != 0)
9313 emit_move_insn (dest,
9314 gen_rtx_IOR (DImode, copy_rtx (temp),
9315 GEN_INT (ud1)));
9316 }
9317 else if (ud3 == 0 && ud4 == 0)
9318 {
9319 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9320
9321 gcc_assert (ud2 & 0x8000);
9322 emit_move_insn (copy_rtx (temp),
9323 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
9324 if (ud1 != 0)
9325 emit_move_insn (copy_rtx (temp),
9326 gen_rtx_IOR (DImode, copy_rtx (temp),
9327 GEN_INT (ud1)));
9328 emit_move_insn (dest,
9329 gen_rtx_ZERO_EXTEND (DImode,
9330 gen_lowpart (SImode,
9331 copy_rtx (temp))));
9332 }
9333 else if ((ud4 == 0xffff && (ud3 & 0x8000))
9334 || (ud4 == 0 && ! (ud3 & 0x8000)))
9335 {
9336 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9337
9338 emit_move_insn (copy_rtx (temp),
9339 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
9340 if (ud2 != 0)
9341 emit_move_insn (copy_rtx (temp),
9342 gen_rtx_IOR (DImode, copy_rtx (temp),
9343 GEN_INT (ud2)));
9344 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9345 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9346 GEN_INT (16)));
9347 if (ud1 != 0)
9348 emit_move_insn (dest,
9349 gen_rtx_IOR (DImode, copy_rtx (temp),
9350 GEN_INT (ud1)));
9351 }
9352 else
9353 {
9354 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
9355
9356 emit_move_insn (copy_rtx (temp),
9357 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
9358 if (ud3 != 0)
9359 emit_move_insn (copy_rtx (temp),
9360 gen_rtx_IOR (DImode, copy_rtx (temp),
9361 GEN_INT (ud3)));
9362
9363 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
9364 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
9365 GEN_INT (32)));
9366 if (ud2 != 0)
9367 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
9368 gen_rtx_IOR (DImode, copy_rtx (temp),
9369 GEN_INT (ud2 << 16)));
9370 if (ud1 != 0)
9371 emit_move_insn (dest,
9372 gen_rtx_IOR (DImode, copy_rtx (temp),
9373 GEN_INT (ud1)));
9374 }
9375 }
9376
9377 /* Helper for the following. Get rid of [r+r] memory refs
9378 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
9379
9380 static void
9381 rs6000_eliminate_indexed_memrefs (rtx operands[2])
9382 {
9383 if (MEM_P (operands[0])
9384 && !REG_P (XEXP (operands[0], 0))
9385 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
9386 GET_MODE (operands[0]), false))
9387 operands[0]
9388 = replace_equiv_address (operands[0],
9389 copy_addr_to_reg (XEXP (operands[0], 0)));
9390
9391 if (MEM_P (operands[1])
9392 && !REG_P (XEXP (operands[1], 0))
9393 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
9394 GET_MODE (operands[1]), false))
9395 operands[1]
9396 = replace_equiv_address (operands[1],
9397 copy_addr_to_reg (XEXP (operands[1], 0)));
9398 }
9399
9400 /* Generate a vector of constants to permute MODE for a little-endian
9401 storage operation by swapping the two halves of a vector. */
9402 static rtvec
9403 rs6000_const_vec (machine_mode mode)
9404 {
9405 int i, subparts;
9406 rtvec v;
9407
9408 switch (mode)
9409 {
9410 case E_V1TImode:
9411 subparts = 1;
9412 break;
9413 case E_V2DFmode:
9414 case E_V2DImode:
9415 subparts = 2;
9416 break;
9417 case E_V4SFmode:
9418 case E_V4SImode:
9419 subparts = 4;
9420 break;
9421 case E_V8HImode:
9422 subparts = 8;
9423 break;
9424 case E_V16QImode:
9425 subparts = 16;
9426 break;
9427 default:
9428 gcc_unreachable();
9429 }
9430
9431 v = rtvec_alloc (subparts);
9432
9433 for (i = 0; i < subparts / 2; ++i)
9434 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
9435 for (i = subparts / 2; i < subparts; ++i)
9436 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
9437
9438 return v;
9439 }
9440
9441 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
9442 store operation. */
9443 void
9444 rs6000_emit_le_vsx_permute (rtx dest, rtx source, machine_mode mode)
9445 {
9446 /* Scalar permutations are easier to express in integer modes rather than
9447 floating-point modes, so cast them here. We use V1TImode instead
9448 of TImode to ensure that the values don't go through GPRs. */
9449 if (FLOAT128_VECTOR_P (mode))
9450 {
9451 dest = gen_lowpart (V1TImode, dest);
9452 source = gen_lowpart (V1TImode, source);
9453 mode = V1TImode;
9454 }
9455
9456 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
9457 scalar. */
9458 if (mode == TImode || mode == V1TImode)
9459 emit_insn (gen_rtx_SET (dest, gen_rtx_ROTATE (mode, source,
9460 GEN_INT (64))));
9461 else
9462 {
9463 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
9464 emit_insn (gen_rtx_SET (dest, gen_rtx_VEC_SELECT (mode, source, par)));
9465 }
9466 }
9467
9468 /* Emit a little-endian load from vector memory location SOURCE to VSX
9469 register DEST in mode MODE. The load is done with two permuting
9470 insn's that represent an lxvd2x and xxpermdi. */
9471 void
9472 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
9473 {
9474 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
9475 V1TImode). */
9476 if (mode == TImode || mode == V1TImode)
9477 {
9478 mode = V2DImode;
9479 dest = gen_lowpart (V2DImode, dest);
9480 source = adjust_address (source, V2DImode, 0);
9481 }
9482
9483 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
9484 rs6000_emit_le_vsx_permute (tmp, source, mode);
9485 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9486 }
9487
9488 /* Emit a little-endian store to vector memory location DEST from VSX
9489 register SOURCE in mode MODE. The store is done with two permuting
9490 insn's that represent an xxpermdi and an stxvd2x. */
9491 void
9492 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
9493 {
9494 /* This should never be called during or after LRA, because it does
9495 not re-permute the source register. It is intended only for use
9496 during expand. */
9497 gcc_assert (!lra_in_progress && !reload_completed);
9498
9499 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
9500 V1TImode). */
9501 if (mode == TImode || mode == V1TImode)
9502 {
9503 mode = V2DImode;
9504 dest = adjust_address (dest, V2DImode, 0);
9505 source = gen_lowpart (V2DImode, source);
9506 }
9507
9508 rtx tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
9509 rs6000_emit_le_vsx_permute (tmp, source, mode);
9510 rs6000_emit_le_vsx_permute (dest, tmp, mode);
9511 }
9512
9513 /* Emit a sequence representing a little-endian VSX load or store,
9514 moving data from SOURCE to DEST in mode MODE. This is done
9515 separately from rs6000_emit_move to ensure it is called only
9516 during expand. LE VSX loads and stores introduced later are
9517 handled with a split. The expand-time RTL generation allows
9518 us to optimize away redundant pairs of register-permutes. */
9519 void
9520 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
9521 {
9522 gcc_assert (!BYTES_BIG_ENDIAN
9523 && VECTOR_MEM_VSX_P (mode)
9524 && !TARGET_P9_VECTOR
9525 && !gpr_or_gpr_p (dest, source)
9526 && (MEM_P (source) ^ MEM_P (dest)));
9527
9528 if (MEM_P (source))
9529 {
9530 gcc_assert (REG_P (dest) || SUBREG_P (dest));
9531 rs6000_emit_le_vsx_load (dest, source, mode);
9532 }
9533 else
9534 {
9535 if (!REG_P (source))
9536 source = force_reg (mode, source);
9537 rs6000_emit_le_vsx_store (dest, source, mode);
9538 }
9539 }
9540
9541 /* Return whether a SFmode or SImode move can be done without converting one
9542 mode to another. This arrises when we have:
9543
9544 (SUBREG:SF (REG:SI ...))
9545 (SUBREG:SI (REG:SF ...))
9546
9547 and one of the values is in a floating point/vector register, where SFmode
9548 scalars are stored in DFmode format. */
9549
9550 bool
9551 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
9552 {
9553 if (TARGET_ALLOW_SF_SUBREG)
9554 return true;
9555
9556 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
9557 return true;
9558
9559 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
9560 return true;
9561
9562 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
9563 if (SUBREG_P (dest))
9564 {
9565 rtx dest_subreg = SUBREG_REG (dest);
9566 rtx src_subreg = SUBREG_REG (src);
9567 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
9568 }
9569
9570 return false;
9571 }
9572
9573
9574 /* Helper function to change moves with:
9575
9576 (SUBREG:SF (REG:SI)) and
9577 (SUBREG:SI (REG:SF))
9578
9579 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
9580 values are stored as DFmode values in the VSX registers. We need to convert
9581 the bits before we can use a direct move or operate on the bits in the
9582 vector register as an integer type.
9583
9584 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
9585
9586 static bool
9587 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
9588 {
9589 if (TARGET_DIRECT_MOVE_64BIT && !reload_completed
9590 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
9591 && SUBREG_P (source) && sf_subreg_operand (source, mode))
9592 {
9593 rtx inner_source = SUBREG_REG (source);
9594 machine_mode inner_mode = GET_MODE (inner_source);
9595
9596 if (mode == SImode && inner_mode == SFmode)
9597 {
9598 emit_insn (gen_movsi_from_sf (dest, inner_source));
9599 return true;
9600 }
9601
9602 if (mode == SFmode && inner_mode == SImode)
9603 {
9604 emit_insn (gen_movsf_from_si (dest, inner_source));
9605 return true;
9606 }
9607 }
9608
9609 return false;
9610 }
9611
9612 /* Emit a move from SOURCE to DEST in mode MODE. */
9613 void
9614 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
9615 {
9616 rtx operands[2];
9617 operands[0] = dest;
9618 operands[1] = source;
9619
9620 if (TARGET_DEBUG_ADDR)
9621 {
9622 fprintf (stderr,
9623 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
9624 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
9625 GET_MODE_NAME (mode),
9626 lra_in_progress,
9627 reload_completed,
9628 can_create_pseudo_p ());
9629 debug_rtx (dest);
9630 fprintf (stderr, "source:\n");
9631 debug_rtx (source);
9632 }
9633
9634 /* Check that we get CONST_WIDE_INT only when we should. */
9635 if (CONST_WIDE_INT_P (operands[1])
9636 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
9637 gcc_unreachable ();
9638
9639 #ifdef HAVE_AS_GNU_ATTRIBUTE
9640 /* If we use a long double type, set the flags in .gnu_attribute that say
9641 what the long double type is. This is to allow the linker's warning
9642 message for the wrong long double to be useful, even if the function does
9643 not do a call (for example, doing a 128-bit add on power9 if the long
9644 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
9645 used if they aren't the default long dobule type. */
9646 if (rs6000_gnu_attr && (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT))
9647 {
9648 if (TARGET_LONG_DOUBLE_128 && (mode == TFmode || mode == TCmode))
9649 rs6000_passes_float = rs6000_passes_long_double = true;
9650
9651 else if (!TARGET_LONG_DOUBLE_128 && (mode == DFmode || mode == DCmode))
9652 rs6000_passes_float = rs6000_passes_long_double = true;
9653 }
9654 #endif
9655
9656 /* See if we need to special case SImode/SFmode SUBREG moves. */
9657 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
9658 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
9659 return;
9660
9661 /* Check if GCC is setting up a block move that will end up using FP
9662 registers as temporaries. We must make sure this is acceptable. */
9663 if (MEM_P (operands[0])
9664 && MEM_P (operands[1])
9665 && mode == DImode
9666 && (rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[0]))
9667 || rs6000_slow_unaligned_access (DImode, MEM_ALIGN (operands[1])))
9668 && ! (rs6000_slow_unaligned_access (SImode,
9669 (MEM_ALIGN (operands[0]) > 32
9670 ? 32 : MEM_ALIGN (operands[0])))
9671 || rs6000_slow_unaligned_access (SImode,
9672 (MEM_ALIGN (operands[1]) > 32
9673 ? 32 : MEM_ALIGN (operands[1]))))
9674 && ! MEM_VOLATILE_P (operands [0])
9675 && ! MEM_VOLATILE_P (operands [1]))
9676 {
9677 emit_move_insn (adjust_address (operands[0], SImode, 0),
9678 adjust_address (operands[1], SImode, 0));
9679 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
9680 adjust_address (copy_rtx (operands[1]), SImode, 4));
9681 return;
9682 }
9683
9684 if (can_create_pseudo_p () && MEM_P (operands[0])
9685 && !gpc_reg_operand (operands[1], mode))
9686 operands[1] = force_reg (mode, operands[1]);
9687
9688 /* Recognize the case where operand[1] is a reference to thread-local
9689 data and load its address to a register. */
9690 if (tls_referenced_p (operands[1]))
9691 {
9692 enum tls_model model;
9693 rtx tmp = operands[1];
9694 rtx addend = NULL;
9695
9696 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
9697 {
9698 addend = XEXP (XEXP (tmp, 0), 1);
9699 tmp = XEXP (XEXP (tmp, 0), 0);
9700 }
9701
9702 gcc_assert (SYMBOL_REF_P (tmp));
9703 model = SYMBOL_REF_TLS_MODEL (tmp);
9704 gcc_assert (model != 0);
9705
9706 tmp = rs6000_legitimize_tls_address (tmp, model);
9707 if (addend)
9708 {
9709 tmp = gen_rtx_PLUS (mode, tmp, addend);
9710 tmp = force_operand (tmp, operands[0]);
9711 }
9712 operands[1] = tmp;
9713 }
9714
9715 /* 128-bit constant floating-point values on Darwin should really be loaded
9716 as two parts. However, this premature splitting is a problem when DFmode
9717 values can go into Altivec registers. */
9718 if (TARGET_MACHO && CONST_DOUBLE_P (operands[1]) && FLOAT128_IBM_P (mode)
9719 && !reg_addr[DFmode].scalar_in_vmx_p)
9720 {
9721 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
9722 simplify_gen_subreg (DFmode, operands[1], mode, 0),
9723 DFmode);
9724 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
9725 GET_MODE_SIZE (DFmode)),
9726 simplify_gen_subreg (DFmode, operands[1], mode,
9727 GET_MODE_SIZE (DFmode)),
9728 DFmode);
9729 return;
9730 }
9731
9732 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
9733 p1:SD) if p1 is not of floating point class and p0 is spilled as
9734 we can have no analogous movsd_store for this. */
9735 if (lra_in_progress && mode == DDmode
9736 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
9737 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9738 && SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1]))
9739 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
9740 {
9741 enum reg_class cl;
9742 int regno = REGNO (SUBREG_REG (operands[1]));
9743
9744 if (!HARD_REGISTER_NUM_P (regno))
9745 {
9746 cl = reg_preferred_class (regno);
9747 regno = reg_renumber[regno];
9748 if (regno < 0)
9749 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
9750 }
9751 if (regno >= 0 && ! FP_REGNO_P (regno))
9752 {
9753 mode = SDmode;
9754 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
9755 operands[1] = SUBREG_REG (operands[1]);
9756 }
9757 }
9758 if (lra_in_progress
9759 && mode == SDmode
9760 && REG_P (operands[0]) && !HARD_REGISTER_P (operands[0])
9761 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
9762 && (REG_P (operands[1])
9763 || (SUBREG_P (operands[1]) && REG_P (SUBREG_REG (operands[1])))))
9764 {
9765 int regno = reg_or_subregno (operands[1]);
9766 enum reg_class cl;
9767
9768 if (!HARD_REGISTER_NUM_P (regno))
9769 {
9770 cl = reg_preferred_class (regno);
9771 gcc_assert (cl != NO_REGS);
9772 regno = reg_renumber[regno];
9773 if (regno < 0)
9774 regno = ira_class_hard_regs[cl][0];
9775 }
9776 if (FP_REGNO_P (regno))
9777 {
9778 if (GET_MODE (operands[0]) != DDmode)
9779 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
9780 emit_insn (gen_movsd_store (operands[0], operands[1]));
9781 }
9782 else if (INT_REGNO_P (regno))
9783 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9784 else
9785 gcc_unreachable();
9786 return;
9787 }
9788 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
9789 p:DD)) if p0 is not of floating point class and p1 is spilled as
9790 we can have no analogous movsd_load for this. */
9791 if (lra_in_progress && mode == DDmode
9792 && SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))
9793 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
9794 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
9795 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9796 {
9797 enum reg_class cl;
9798 int regno = REGNO (SUBREG_REG (operands[0]));
9799
9800 if (!HARD_REGISTER_NUM_P (regno))
9801 {
9802 cl = reg_preferred_class (regno);
9803 regno = reg_renumber[regno];
9804 if (regno < 0)
9805 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
9806 }
9807 if (regno >= 0 && ! FP_REGNO_P (regno))
9808 {
9809 mode = SDmode;
9810 operands[0] = SUBREG_REG (operands[0]);
9811 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
9812 }
9813 }
9814 if (lra_in_progress
9815 && mode == SDmode
9816 && (REG_P (operands[0])
9817 || (SUBREG_P (operands[0]) && REG_P (SUBREG_REG (operands[0]))))
9818 && REG_P (operands[1]) && !HARD_REGISTER_P (operands[1])
9819 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
9820 {
9821 int regno = reg_or_subregno (operands[0]);
9822 enum reg_class cl;
9823
9824 if (!HARD_REGISTER_NUM_P (regno))
9825 {
9826 cl = reg_preferred_class (regno);
9827 gcc_assert (cl != NO_REGS);
9828 regno = reg_renumber[regno];
9829 if (regno < 0)
9830 regno = ira_class_hard_regs[cl][0];
9831 }
9832 if (FP_REGNO_P (regno))
9833 {
9834 if (GET_MODE (operands[1]) != DDmode)
9835 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
9836 emit_insn (gen_movsd_load (operands[0], operands[1]));
9837 }
9838 else if (INT_REGNO_P (regno))
9839 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
9840 else
9841 gcc_unreachable();
9842 return;
9843 }
9844
9845 /* FIXME: In the long term, this switch statement should go away
9846 and be replaced by a sequence of tests based on things like
9847 mode == Pmode. */
9848 switch (mode)
9849 {
9850 case E_HImode:
9851 case E_QImode:
9852 if (CONSTANT_P (operands[1])
9853 && !CONST_INT_P (operands[1]))
9854 operands[1] = force_const_mem (mode, operands[1]);
9855 break;
9856
9857 case E_TFmode:
9858 case E_TDmode:
9859 case E_IFmode:
9860 case E_KFmode:
9861 if (FLOAT128_2REG_P (mode))
9862 rs6000_eliminate_indexed_memrefs (operands);
9863 /* fall through */
9864
9865 case E_DFmode:
9866 case E_DDmode:
9867 case E_SFmode:
9868 case E_SDmode:
9869 if (CONSTANT_P (operands[1])
9870 && ! easy_fp_constant (operands[1], mode))
9871 operands[1] = force_const_mem (mode, operands[1]);
9872 break;
9873
9874 case E_V16QImode:
9875 case E_V8HImode:
9876 case E_V4SFmode:
9877 case E_V4SImode:
9878 case E_V2DFmode:
9879 case E_V2DImode:
9880 case E_V1TImode:
9881 if (CONSTANT_P (operands[1])
9882 && !easy_vector_constant (operands[1], mode))
9883 operands[1] = force_const_mem (mode, operands[1]);
9884 break;
9885
9886 case E_SImode:
9887 case E_DImode:
9888 /* Use default pattern for address of ELF small data */
9889 if (TARGET_ELF
9890 && mode == Pmode
9891 && DEFAULT_ABI == ABI_V4
9892 && (SYMBOL_REF_P (operands[1])
9893 || GET_CODE (operands[1]) == CONST)
9894 && small_data_operand (operands[1], mode))
9895 {
9896 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9897 return;
9898 }
9899
9900 if (DEFAULT_ABI == ABI_V4
9901 && mode == Pmode && mode == SImode
9902 && flag_pic == 1 && got_operand (operands[1], mode))
9903 {
9904 emit_insn (gen_movsi_got (operands[0], operands[1]));
9905 return;
9906 }
9907
9908 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
9909 && TARGET_NO_TOC
9910 && ! flag_pic
9911 && mode == Pmode
9912 && CONSTANT_P (operands[1])
9913 && GET_CODE (operands[1]) != HIGH
9914 && !CONST_INT_P (operands[1]))
9915 {
9916 rtx target = (!can_create_pseudo_p ()
9917 ? operands[0]
9918 : gen_reg_rtx (mode));
9919
9920 /* If this is a function address on -mcall-aixdesc,
9921 convert it to the address of the descriptor. */
9922 if (DEFAULT_ABI == ABI_AIX
9923 && SYMBOL_REF_P (operands[1])
9924 && XSTR (operands[1], 0)[0] == '.')
9925 {
9926 const char *name = XSTR (operands[1], 0);
9927 rtx new_ref;
9928 while (*name == '.')
9929 name++;
9930 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
9931 CONSTANT_POOL_ADDRESS_P (new_ref)
9932 = CONSTANT_POOL_ADDRESS_P (operands[1]);
9933 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
9934 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
9935 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
9936 operands[1] = new_ref;
9937 }
9938
9939 if (DEFAULT_ABI == ABI_DARWIN)
9940 {
9941 #if TARGET_MACHO
9942 if (MACHO_DYNAMIC_NO_PIC_P)
9943 {
9944 /* Take care of any required data indirection. */
9945 operands[1] = rs6000_machopic_legitimize_pic_address (
9946 operands[1], mode, operands[0]);
9947 if (operands[0] != operands[1])
9948 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9949 return;
9950 }
9951 #endif
9952 emit_insn (gen_macho_high (target, operands[1]));
9953 emit_insn (gen_macho_low (operands[0], target, operands[1]));
9954 return;
9955 }
9956
9957 emit_insn (gen_elf_high (target, operands[1]));
9958 emit_insn (gen_elf_low (operands[0], target, operands[1]));
9959 return;
9960 }
9961
9962 /* If this is a SYMBOL_REF that refers to a constant pool entry,
9963 and we have put it in the TOC, we just need to make a TOC-relative
9964 reference to it. */
9965 if (TARGET_TOC
9966 && SYMBOL_REF_P (operands[1])
9967 && use_toc_relative_ref (operands[1], mode))
9968 operands[1] = create_TOC_reference (operands[1], operands[0]);
9969 else if (mode == Pmode
9970 && CONSTANT_P (operands[1])
9971 && GET_CODE (operands[1]) != HIGH
9972 && ((REG_P (operands[0])
9973 && FP_REGNO_P (REGNO (operands[0])))
9974 || !CONST_INT_P (operands[1])
9975 || (num_insns_constant (operands[1], mode)
9976 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
9977 && !toc_relative_expr_p (operands[1], false, NULL, NULL)
9978 && (TARGET_CMODEL == CMODEL_SMALL
9979 || can_create_pseudo_p ()
9980 || (REG_P (operands[0])
9981 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
9982 {
9983
9984 #if TARGET_MACHO
9985 /* Darwin uses a special PIC legitimizer. */
9986 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
9987 {
9988 operands[1] =
9989 rs6000_machopic_legitimize_pic_address (operands[1], mode,
9990 operands[0]);
9991 if (operands[0] != operands[1])
9992 emit_insn (gen_rtx_SET (operands[0], operands[1]));
9993 return;
9994 }
9995 #endif
9996
9997 /* If we are to limit the number of things we put in the TOC and
9998 this is a symbol plus a constant we can add in one insn,
9999 just put the symbol in the TOC and add the constant. */
10000 if (GET_CODE (operands[1]) == CONST
10001 && TARGET_NO_SUM_IN_TOC
10002 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10003 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10004 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10005 || SYMBOL_REF_P (XEXP (XEXP (operands[1], 0), 0)))
10006 && ! side_effects_p (operands[0]))
10007 {
10008 rtx sym =
10009 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10010 rtx other = XEXP (XEXP (operands[1], 0), 1);
10011
10012 sym = force_reg (mode, sym);
10013 emit_insn (gen_add3_insn (operands[0], sym, other));
10014 return;
10015 }
10016
10017 operands[1] = force_const_mem (mode, operands[1]);
10018
10019 if (TARGET_TOC
10020 && SYMBOL_REF_P (XEXP (operands[1], 0))
10021 && use_toc_relative_ref (XEXP (operands[1], 0), mode))
10022 {
10023 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10024 operands[0]);
10025 operands[1] = gen_const_mem (mode, tocref);
10026 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10027 }
10028 }
10029 break;
10030
10031 case E_TImode:
10032 if (!VECTOR_MEM_VSX_P (TImode))
10033 rs6000_eliminate_indexed_memrefs (operands);
10034 break;
10035
10036 case E_PTImode:
10037 rs6000_eliminate_indexed_memrefs (operands);
10038 break;
10039
10040 default:
10041 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10042 }
10043
10044 /* Above, we may have called force_const_mem which may have returned
10045 an invalid address. If we can, fix this up; otherwise, reload will
10046 have to deal with it. */
10047 if (MEM_P (operands[1]))
10048 operands[1] = validize_mem (operands[1]);
10049
10050 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10051 }
10052 \f
10053 /* Nonzero if we can use a floating-point register to pass this arg. */
10054 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10055 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10056 && (CUM)->fregno <= FP_ARG_MAX_REG \
10057 && TARGET_HARD_FLOAT)
10058
10059 /* Nonzero if we can use an AltiVec register to pass this arg. */
10060 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10061 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10062 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10063 && TARGET_ALTIVEC_ABI \
10064 && (NAMED))
10065
10066 /* Walk down the type tree of TYPE counting consecutive base elements.
10067 If *MODEP is VOIDmode, then set it to the first valid floating point
10068 or vector type. If a non-floating point or vector type is found, or
10069 if a floating point or vector type that doesn't match a non-VOIDmode
10070 *MODEP is found, then return -1, otherwise return the count in the
10071 sub-tree. */
10072
10073 static int
10074 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
10075 {
10076 machine_mode mode;
10077 HOST_WIDE_INT size;
10078
10079 switch (TREE_CODE (type))
10080 {
10081 case REAL_TYPE:
10082 mode = TYPE_MODE (type);
10083 if (!SCALAR_FLOAT_MODE_P (mode))
10084 return -1;
10085
10086 if (*modep == VOIDmode)
10087 *modep = mode;
10088
10089 if (*modep == mode)
10090 return 1;
10091
10092 break;
10093
10094 case COMPLEX_TYPE:
10095 mode = TYPE_MODE (TREE_TYPE (type));
10096 if (!SCALAR_FLOAT_MODE_P (mode))
10097 return -1;
10098
10099 if (*modep == VOIDmode)
10100 *modep = mode;
10101
10102 if (*modep == mode)
10103 return 2;
10104
10105 break;
10106
10107 case VECTOR_TYPE:
10108 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
10109 return -1;
10110
10111 /* Use V4SImode as representative of all 128-bit vector types. */
10112 size = int_size_in_bytes (type);
10113 switch (size)
10114 {
10115 case 16:
10116 mode = V4SImode;
10117 break;
10118 default:
10119 return -1;
10120 }
10121
10122 if (*modep == VOIDmode)
10123 *modep = mode;
10124
10125 /* Vector modes are considered to be opaque: two vectors are
10126 equivalent for the purposes of being homogeneous aggregates
10127 if they are the same size. */
10128 if (*modep == mode)
10129 return 1;
10130
10131 break;
10132
10133 case ARRAY_TYPE:
10134 {
10135 int count;
10136 tree index = TYPE_DOMAIN (type);
10137
10138 /* Can't handle incomplete types nor sizes that are not
10139 fixed. */
10140 if (!COMPLETE_TYPE_P (type)
10141 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10142 return -1;
10143
10144 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
10145 if (count == -1
10146 || !index
10147 || !TYPE_MAX_VALUE (index)
10148 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
10149 || !TYPE_MIN_VALUE (index)
10150 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
10151 || count < 0)
10152 return -1;
10153
10154 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
10155 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
10156
10157 /* There must be no padding. */
10158 if (wi::to_wide (TYPE_SIZE (type))
10159 != count * GET_MODE_BITSIZE (*modep))
10160 return -1;
10161
10162 return count;
10163 }
10164
10165 case RECORD_TYPE:
10166 {
10167 int count = 0;
10168 int sub_count;
10169 tree field;
10170
10171 /* Can't handle incomplete types nor sizes that are not
10172 fixed. */
10173 if (!COMPLETE_TYPE_P (type)
10174 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10175 return -1;
10176
10177 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10178 {
10179 if (TREE_CODE (field) != FIELD_DECL)
10180 continue;
10181
10182 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10183 if (sub_count < 0)
10184 return -1;
10185 count += sub_count;
10186 }
10187
10188 /* There must be no padding. */
10189 if (wi::to_wide (TYPE_SIZE (type))
10190 != count * GET_MODE_BITSIZE (*modep))
10191 return -1;
10192
10193 return count;
10194 }
10195
10196 case UNION_TYPE:
10197 case QUAL_UNION_TYPE:
10198 {
10199 /* These aren't very interesting except in a degenerate case. */
10200 int count = 0;
10201 int sub_count;
10202 tree field;
10203
10204 /* Can't handle incomplete types nor sizes that are not
10205 fixed. */
10206 if (!COMPLETE_TYPE_P (type)
10207 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
10208 return -1;
10209
10210 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
10211 {
10212 if (TREE_CODE (field) != FIELD_DECL)
10213 continue;
10214
10215 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
10216 if (sub_count < 0)
10217 return -1;
10218 count = count > sub_count ? count : sub_count;
10219 }
10220
10221 /* There must be no padding. */
10222 if (wi::to_wide (TYPE_SIZE (type))
10223 != count * GET_MODE_BITSIZE (*modep))
10224 return -1;
10225
10226 return count;
10227 }
10228
10229 default:
10230 break;
10231 }
10232
10233 return -1;
10234 }
10235
10236 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10237 float or vector aggregate that shall be passed in FP/vector registers
10238 according to the ELFv2 ABI, return the homogeneous element mode in
10239 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10240
10241 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10242
10243 static bool
10244 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
10245 machine_mode *elt_mode,
10246 int *n_elts)
10247 {
10248 /* Note that we do not accept complex types at the top level as
10249 homogeneous aggregates; these types are handled via the
10250 targetm.calls.split_complex_arg mechanism. Complex types
10251 can be elements of homogeneous aggregates, however. */
10252 if (TARGET_HARD_FLOAT && DEFAULT_ABI == ABI_ELFv2 && type
10253 && AGGREGATE_TYPE_P (type))
10254 {
10255 machine_mode field_mode = VOIDmode;
10256 int field_count = rs6000_aggregate_candidate (type, &field_mode);
10257
10258 if (field_count > 0)
10259 {
10260 int reg_size = ALTIVEC_OR_VSX_VECTOR_MODE (field_mode) ? 16 : 8;
10261 int field_size = ROUND_UP (GET_MODE_SIZE (field_mode), reg_size);
10262
10263 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10264 up to AGGR_ARG_NUM_REG registers. */
10265 if (field_count * field_size <= AGGR_ARG_NUM_REG * reg_size)
10266 {
10267 if (elt_mode)
10268 *elt_mode = field_mode;
10269 if (n_elts)
10270 *n_elts = field_count;
10271 return true;
10272 }
10273 }
10274 }
10275
10276 if (elt_mode)
10277 *elt_mode = mode;
10278 if (n_elts)
10279 *n_elts = 1;
10280 return false;
10281 }
10282
10283 /* Return a nonzero value to say to return the function value in
10284 memory, just as large structures are always returned. TYPE will be
10285 the data type of the value, and FNTYPE will be the type of the
10286 function doing the returning, or @code{NULL} for libcalls.
10287
10288 The AIX ABI for the RS/6000 specifies that all structures are
10289 returned in memory. The Darwin ABI does the same.
10290
10291 For the Darwin 64 Bit ABI, a function result can be returned in
10292 registers or in memory, depending on the size of the return data
10293 type. If it is returned in registers, the value occupies the same
10294 registers as it would if it were the first and only function
10295 argument. Otherwise, the function places its result in memory at
10296 the location pointed to by GPR3.
10297
10298 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10299 but a draft put them in memory, and GCC used to implement the draft
10300 instead of the final standard. Therefore, aix_struct_return
10301 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10302 compatibility can change DRAFT_V4_STRUCT_RET to override the
10303 default, and -m switches get the final word. See
10304 rs6000_option_override_internal for more details.
10305
10306 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10307 long double support is enabled. These values are returned in memory.
10308
10309 int_size_in_bytes returns -1 for variable size objects, which go in
10310 memory always. The cast to unsigned makes -1 > 8. */
10311
10312 static bool
10313 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
10314 {
10315 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10316 if (TARGET_MACHO
10317 && rs6000_darwin64_abi
10318 && TREE_CODE (type) == RECORD_TYPE
10319 && int_size_in_bytes (type) > 0)
10320 {
10321 CUMULATIVE_ARGS valcum;
10322 rtx valret;
10323
10324 valcum.words = 0;
10325 valcum.fregno = FP_ARG_MIN_REG;
10326 valcum.vregno = ALTIVEC_ARG_MIN_REG;
10327 /* Do a trial code generation as if this were going to be passed
10328 as an argument; if any part goes in memory, we return NULL. */
10329 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
10330 if (valret)
10331 return false;
10332 /* Otherwise fall through to more conventional ABI rules. */
10333 }
10334
10335 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10336 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
10337 NULL, NULL))
10338 return false;
10339
10340 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10341 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
10342 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
10343 return false;
10344
10345 if (AGGREGATE_TYPE_P (type)
10346 && (aix_struct_return
10347 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
10348 return true;
10349
10350 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10351 modes only exist for GCC vector types if -maltivec. */
10352 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
10353 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
10354 return false;
10355
10356 /* Return synthetic vectors in memory. */
10357 if (TREE_CODE (type) == VECTOR_TYPE
10358 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10359 {
10360 static bool warned_for_return_big_vectors = false;
10361 if (!warned_for_return_big_vectors)
10362 {
10363 warning (OPT_Wpsabi, "GCC vector returned by reference: "
10364 "non-standard ABI extension with no compatibility "
10365 "guarantee");
10366 warned_for_return_big_vectors = true;
10367 }
10368 return true;
10369 }
10370
10371 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
10372 && FLOAT128_IEEE_P (TYPE_MODE (type)))
10373 return true;
10374
10375 return false;
10376 }
10377
10378 /* Specify whether values returned in registers should be at the most
10379 significant end of a register. We want aggregates returned by
10380 value to match the way aggregates are passed to functions. */
10381
10382 static bool
10383 rs6000_return_in_msb (const_tree valtype)
10384 {
10385 return (DEFAULT_ABI == ABI_ELFv2
10386 && BYTES_BIG_ENDIAN
10387 && AGGREGATE_TYPE_P (valtype)
10388 && (rs6000_function_arg_padding (TYPE_MODE (valtype), valtype)
10389 == PAD_UPWARD));
10390 }
10391
10392 #ifdef HAVE_AS_GNU_ATTRIBUTE
10393 /* Return TRUE if a call to function FNDECL may be one that
10394 potentially affects the function calling ABI of the object file. */
10395
10396 static bool
10397 call_ABI_of_interest (tree fndecl)
10398 {
10399 if (rs6000_gnu_attr && symtab->state == EXPANSION)
10400 {
10401 struct cgraph_node *c_node;
10402
10403 /* Libcalls are always interesting. */
10404 if (fndecl == NULL_TREE)
10405 return true;
10406
10407 /* Any call to an external function is interesting. */
10408 if (DECL_EXTERNAL (fndecl))
10409 return true;
10410
10411 /* Interesting functions that we are emitting in this object file. */
10412 c_node = cgraph_node::get (fndecl);
10413 c_node = c_node->ultimate_alias_target ();
10414 return !c_node->only_called_directly_p ();
10415 }
10416 return false;
10417 }
10418 #endif
10419
10420 /* Initialize a variable CUM of type CUMULATIVE_ARGS
10421 for a call to a function whose data type is FNTYPE.
10422 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
10423
10424 For incoming args we set the number of arguments in the prototype large
10425 so we never return a PARALLEL. */
10426
10427 void
10428 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
10429 rtx libname ATTRIBUTE_UNUSED, int incoming,
10430 int libcall, int n_named_args,
10431 tree fndecl,
10432 machine_mode return_mode ATTRIBUTE_UNUSED)
10433 {
10434 static CUMULATIVE_ARGS zero_cumulative;
10435
10436 *cum = zero_cumulative;
10437 cum->words = 0;
10438 cum->fregno = FP_ARG_MIN_REG;
10439 cum->vregno = ALTIVEC_ARG_MIN_REG;
10440 cum->prototype = (fntype && prototype_p (fntype));
10441 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
10442 ? CALL_LIBCALL : CALL_NORMAL);
10443 cum->sysv_gregno = GP_ARG_MIN_REG;
10444 cum->stdarg = stdarg_p (fntype);
10445 cum->libcall = libcall;
10446
10447 cum->nargs_prototype = 0;
10448 if (incoming || cum->prototype)
10449 cum->nargs_prototype = n_named_args;
10450
10451 /* Check for a longcall attribute. */
10452 if ((!fntype && rs6000_default_long_calls)
10453 || (fntype
10454 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
10455 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
10456 cum->call_cookie |= CALL_LONG;
10457 else if (DEFAULT_ABI != ABI_DARWIN)
10458 {
10459 bool is_local = (fndecl
10460 && !DECL_EXTERNAL (fndecl)
10461 && !DECL_WEAK (fndecl)
10462 && (*targetm.binds_local_p) (fndecl));
10463 if (is_local)
10464 ;
10465 else if (flag_plt)
10466 {
10467 if (fntype
10468 && lookup_attribute ("noplt", TYPE_ATTRIBUTES (fntype)))
10469 cum->call_cookie |= CALL_LONG;
10470 }
10471 else
10472 {
10473 if (!(fntype
10474 && lookup_attribute ("plt", TYPE_ATTRIBUTES (fntype))))
10475 cum->call_cookie |= CALL_LONG;
10476 }
10477 }
10478
10479 if (TARGET_DEBUG_ARG)
10480 {
10481 fprintf (stderr, "\ninit_cumulative_args:");
10482 if (fntype)
10483 {
10484 tree ret_type = TREE_TYPE (fntype);
10485 fprintf (stderr, " ret code = %s,",
10486 get_tree_code_name (TREE_CODE (ret_type)));
10487 }
10488
10489 if (cum->call_cookie & CALL_LONG)
10490 fprintf (stderr, " longcall,");
10491
10492 fprintf (stderr, " proto = %d, nargs = %d\n",
10493 cum->prototype, cum->nargs_prototype);
10494 }
10495
10496 #ifdef HAVE_AS_GNU_ATTRIBUTE
10497 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
10498 {
10499 cum->escapes = call_ABI_of_interest (fndecl);
10500 if (cum->escapes)
10501 {
10502 tree return_type;
10503
10504 if (fntype)
10505 {
10506 return_type = TREE_TYPE (fntype);
10507 return_mode = TYPE_MODE (return_type);
10508 }
10509 else
10510 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
10511
10512 if (return_type != NULL)
10513 {
10514 if (TREE_CODE (return_type) == RECORD_TYPE
10515 && TYPE_TRANSPARENT_AGGR (return_type))
10516 {
10517 return_type = TREE_TYPE (first_field (return_type));
10518 return_mode = TYPE_MODE (return_type);
10519 }
10520 if (AGGREGATE_TYPE_P (return_type)
10521 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
10522 <= 8))
10523 rs6000_returns_struct = true;
10524 }
10525 if (SCALAR_FLOAT_MODE_P (return_mode))
10526 {
10527 rs6000_passes_float = true;
10528 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10529 && (FLOAT128_IBM_P (return_mode)
10530 || FLOAT128_IEEE_P (return_mode)
10531 || (return_type != NULL
10532 && (TYPE_MAIN_VARIANT (return_type)
10533 == long_double_type_node))))
10534 rs6000_passes_long_double = true;
10535
10536 /* Note if we passed or return a IEEE 128-bit type. We changed
10537 the mangling for these types, and we may need to make an alias
10538 with the old mangling. */
10539 if (FLOAT128_IEEE_P (return_mode))
10540 rs6000_passes_ieee128 = true;
10541 }
10542 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode))
10543 rs6000_passes_vector = true;
10544 }
10545 }
10546 #endif
10547
10548 if (fntype
10549 && !TARGET_ALTIVEC
10550 && TARGET_ALTIVEC_ABI
10551 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
10552 {
10553 error ("cannot return value in vector register because"
10554 " altivec instructions are disabled, use %qs"
10555 " to enable them", "-maltivec");
10556 }
10557 }
10558 \f
10559 /* The mode the ABI uses for a word. This is not the same as word_mode
10560 for -m32 -mpowerpc64. This is used to implement various target hooks. */
10561
10562 static scalar_int_mode
10563 rs6000_abi_word_mode (void)
10564 {
10565 return TARGET_32BIT ? SImode : DImode;
10566 }
10567
10568 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
10569 static char *
10570 rs6000_offload_options (void)
10571 {
10572 if (TARGET_64BIT)
10573 return xstrdup ("-foffload-abi=lp64");
10574 else
10575 return xstrdup ("-foffload-abi=ilp32");
10576 }
10577
10578 /* On rs6000, function arguments are promoted, as are function return
10579 values. */
10580
10581 static machine_mode
10582 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
10583 machine_mode mode,
10584 int *punsignedp ATTRIBUTE_UNUSED,
10585 const_tree, int)
10586 {
10587 PROMOTE_MODE (mode, *punsignedp, type);
10588
10589 return mode;
10590 }
10591
10592 /* Return true if TYPE must be passed on the stack and not in registers. */
10593
10594 static bool
10595 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
10596 {
10597 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
10598 return must_pass_in_stack_var_size (mode, type);
10599 else
10600 return must_pass_in_stack_var_size_or_pad (mode, type);
10601 }
10602
10603 static inline bool
10604 is_complex_IBM_long_double (machine_mode mode)
10605 {
10606 return mode == ICmode || (mode == TCmode && FLOAT128_IBM_P (TCmode));
10607 }
10608
10609 /* Whether ABI_V4 passes MODE args to a function in floating point
10610 registers. */
10611
10612 static bool
10613 abi_v4_pass_in_fpr (machine_mode mode, bool named)
10614 {
10615 if (!TARGET_HARD_FLOAT)
10616 return false;
10617 if (mode == DFmode)
10618 return true;
10619 if (mode == SFmode && named)
10620 return true;
10621 /* ABI_V4 passes complex IBM long double in 8 gprs.
10622 Stupid, but we can't change the ABI now. */
10623 if (is_complex_IBM_long_double (mode))
10624 return false;
10625 if (FLOAT128_2REG_P (mode))
10626 return true;
10627 if (DECIMAL_FLOAT_MODE_P (mode))
10628 return true;
10629 return false;
10630 }
10631
10632 /* Implement TARGET_FUNCTION_ARG_PADDING.
10633
10634 For the AIX ABI structs are always stored left shifted in their
10635 argument slot. */
10636
10637 static pad_direction
10638 rs6000_function_arg_padding (machine_mode mode, const_tree type)
10639 {
10640 #ifndef AGGREGATE_PADDING_FIXED
10641 #define AGGREGATE_PADDING_FIXED 0
10642 #endif
10643 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
10644 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
10645 #endif
10646
10647 if (!AGGREGATE_PADDING_FIXED)
10648 {
10649 /* GCC used to pass structures of the same size as integer types as
10650 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
10651 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
10652 passed padded downward, except that -mstrict-align further
10653 muddied the water in that multi-component structures of 2 and 4
10654 bytes in size were passed padded upward.
10655
10656 The following arranges for best compatibility with previous
10657 versions of gcc, but removes the -mstrict-align dependency. */
10658 if (BYTES_BIG_ENDIAN)
10659 {
10660 HOST_WIDE_INT size = 0;
10661
10662 if (mode == BLKmode)
10663 {
10664 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
10665 size = int_size_in_bytes (type);
10666 }
10667 else
10668 size = GET_MODE_SIZE (mode);
10669
10670 if (size == 1 || size == 2 || size == 4)
10671 return PAD_DOWNWARD;
10672 }
10673 return PAD_UPWARD;
10674 }
10675
10676 if (AGGREGATES_PAD_UPWARD_ALWAYS)
10677 {
10678 if (type != 0 && AGGREGATE_TYPE_P (type))
10679 return PAD_UPWARD;
10680 }
10681
10682 /* Fall back to the default. */
10683 return default_function_arg_padding (mode, type);
10684 }
10685
10686 /* If defined, a C expression that gives the alignment boundary, in bits,
10687 of an argument with the specified mode and type. If it is not defined,
10688 PARM_BOUNDARY is used for all arguments.
10689
10690 V.4 wants long longs and doubles to be double word aligned. Just
10691 testing the mode size is a boneheaded way to do this as it means
10692 that other types such as complex int are also double word aligned.
10693 However, we're stuck with this because changing the ABI might break
10694 existing library interfaces.
10695
10696 Quadword align Altivec/VSX vectors.
10697 Quadword align large synthetic vector types. */
10698
10699 static unsigned int
10700 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
10701 {
10702 machine_mode elt_mode;
10703 int n_elts;
10704
10705 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10706
10707 if (DEFAULT_ABI == ABI_V4
10708 && (GET_MODE_SIZE (mode) == 8
10709 || (TARGET_HARD_FLOAT
10710 && !is_complex_IBM_long_double (mode)
10711 && FLOAT128_2REG_P (mode))))
10712 return 64;
10713 else if (FLOAT128_VECTOR_P (mode))
10714 return 128;
10715 else if (type && TREE_CODE (type) == VECTOR_TYPE
10716 && int_size_in_bytes (type) >= 8
10717 && int_size_in_bytes (type) < 16)
10718 return 64;
10719 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
10720 || (type && TREE_CODE (type) == VECTOR_TYPE
10721 && int_size_in_bytes (type) >= 16))
10722 return 128;
10723
10724 /* Aggregate types that need > 8 byte alignment are quadword-aligned
10725 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
10726 -mcompat-align-parm is used. */
10727 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
10728 || DEFAULT_ABI == ABI_ELFv2)
10729 && type && TYPE_ALIGN (type) > 64)
10730 {
10731 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
10732 or homogeneous float/vector aggregates here. We already handled
10733 vector aggregates above, but still need to check for float here. */
10734 bool aggregate_p = (AGGREGATE_TYPE_P (type)
10735 && !SCALAR_FLOAT_MODE_P (elt_mode));
10736
10737 /* We used to check for BLKmode instead of the above aggregate type
10738 check. Warn when this results in any difference to the ABI. */
10739 if (aggregate_p != (mode == BLKmode))
10740 {
10741 static bool warned;
10742 if (!warned && warn_psabi)
10743 {
10744 warned = true;
10745 inform (input_location,
10746 "the ABI of passing aggregates with %d-byte alignment"
10747 " has changed in GCC 5",
10748 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
10749 }
10750 }
10751
10752 if (aggregate_p)
10753 return 128;
10754 }
10755
10756 /* Similar for the Darwin64 ABI. Note that for historical reasons we
10757 implement the "aggregate type" check as a BLKmode check here; this
10758 means certain aggregate types are in fact not aligned. */
10759 if (TARGET_MACHO && rs6000_darwin64_abi
10760 && mode == BLKmode
10761 && type && TYPE_ALIGN (type) > 64)
10762 return 128;
10763
10764 return PARM_BOUNDARY;
10765 }
10766
10767 /* The offset in words to the start of the parameter save area. */
10768
10769 static unsigned int
10770 rs6000_parm_offset (void)
10771 {
10772 return (DEFAULT_ABI == ABI_V4 ? 2
10773 : DEFAULT_ABI == ABI_ELFv2 ? 4
10774 : 6);
10775 }
10776
10777 /* For a function parm of MODE and TYPE, return the starting word in
10778 the parameter area. NWORDS of the parameter area are already used. */
10779
10780 static unsigned int
10781 rs6000_parm_start (machine_mode mode, const_tree type,
10782 unsigned int nwords)
10783 {
10784 unsigned int align;
10785
10786 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
10787 return nwords + (-(rs6000_parm_offset () + nwords) & align);
10788 }
10789
10790 /* Compute the size (in words) of a function argument. */
10791
10792 static unsigned long
10793 rs6000_arg_size (machine_mode mode, const_tree type)
10794 {
10795 unsigned long size;
10796
10797 if (mode != BLKmode)
10798 size = GET_MODE_SIZE (mode);
10799 else
10800 size = int_size_in_bytes (type);
10801
10802 if (TARGET_32BIT)
10803 return (size + 3) >> 2;
10804 else
10805 return (size + 7) >> 3;
10806 }
10807 \f
10808 /* Use this to flush pending int fields. */
10809
10810 static void
10811 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
10812 HOST_WIDE_INT bitpos, int final)
10813 {
10814 unsigned int startbit, endbit;
10815 int intregs, intoffset;
10816
10817 /* Handle the situations where a float is taking up the first half
10818 of the GPR, and the other half is empty (typically due to
10819 alignment restrictions). We can detect this by a 8-byte-aligned
10820 int field, or by seeing that this is the final flush for this
10821 argument. Count the word and continue on. */
10822 if (cum->floats_in_gpr == 1
10823 && (cum->intoffset % 64 == 0
10824 || (cum->intoffset == -1 && final)))
10825 {
10826 cum->words++;
10827 cum->floats_in_gpr = 0;
10828 }
10829
10830 if (cum->intoffset == -1)
10831 return;
10832
10833 intoffset = cum->intoffset;
10834 cum->intoffset = -1;
10835 cum->floats_in_gpr = 0;
10836
10837 if (intoffset % BITS_PER_WORD != 0)
10838 {
10839 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
10840 if (!int_mode_for_size (bits, 0).exists ())
10841 {
10842 /* We couldn't find an appropriate mode, which happens,
10843 e.g., in packed structs when there are 3 bytes to load.
10844 Back intoffset back to the beginning of the word in this
10845 case. */
10846 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
10847 }
10848 }
10849
10850 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
10851 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
10852 intregs = (endbit - startbit) / BITS_PER_WORD;
10853 cum->words += intregs;
10854 /* words should be unsigned. */
10855 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
10856 {
10857 int pad = (endbit/BITS_PER_WORD) - cum->words;
10858 cum->words += pad;
10859 }
10860 }
10861
10862 /* The darwin64 ABI calls for us to recurse down through structs,
10863 looking for elements passed in registers. Unfortunately, we have
10864 to track int register count here also because of misalignments
10865 in powerpc alignment mode. */
10866
10867 static void
10868 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
10869 const_tree type,
10870 HOST_WIDE_INT startbitpos)
10871 {
10872 tree f;
10873
10874 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
10875 if (TREE_CODE (f) == FIELD_DECL)
10876 {
10877 HOST_WIDE_INT bitpos = startbitpos;
10878 tree ftype = TREE_TYPE (f);
10879 machine_mode mode;
10880 if (ftype == error_mark_node)
10881 continue;
10882 mode = TYPE_MODE (ftype);
10883
10884 if (DECL_SIZE (f) != 0
10885 && tree_fits_uhwi_p (bit_position (f)))
10886 bitpos += int_bit_position (f);
10887
10888 /* ??? FIXME: else assume zero offset. */
10889
10890 if (TREE_CODE (ftype) == RECORD_TYPE)
10891 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
10892 else if (USE_FP_FOR_ARG_P (cum, mode))
10893 {
10894 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
10895 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10896 cum->fregno += n_fpregs;
10897 /* Single-precision floats present a special problem for
10898 us, because they are smaller than an 8-byte GPR, and so
10899 the structure-packing rules combined with the standard
10900 varargs behavior mean that we want to pack float/float
10901 and float/int combinations into a single register's
10902 space. This is complicated by the arg advance flushing,
10903 which works on arbitrarily large groups of int-type
10904 fields. */
10905 if (mode == SFmode)
10906 {
10907 if (cum->floats_in_gpr == 1)
10908 {
10909 /* Two floats in a word; count the word and reset
10910 the float count. */
10911 cum->words++;
10912 cum->floats_in_gpr = 0;
10913 }
10914 else if (bitpos % 64 == 0)
10915 {
10916 /* A float at the beginning of an 8-byte word;
10917 count it and put off adjusting cum->words until
10918 we see if a arg advance flush is going to do it
10919 for us. */
10920 cum->floats_in_gpr++;
10921 }
10922 else
10923 {
10924 /* The float is at the end of a word, preceded
10925 by integer fields, so the arg advance flush
10926 just above has already set cum->words and
10927 everything is taken care of. */
10928 }
10929 }
10930 else
10931 cum->words += n_fpregs;
10932 }
10933 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
10934 {
10935 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
10936 cum->vregno++;
10937 cum->words += 2;
10938 }
10939 else if (cum->intoffset == -1)
10940 cum->intoffset = bitpos;
10941 }
10942 }
10943
10944 /* Check for an item that needs to be considered specially under the darwin 64
10945 bit ABI. These are record types where the mode is BLK or the structure is
10946 8 bytes in size. */
10947 static int
10948 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
10949 {
10950 return rs6000_darwin64_abi
10951 && ((mode == BLKmode
10952 && TREE_CODE (type) == RECORD_TYPE
10953 && int_size_in_bytes (type) > 0)
10954 || (type && TREE_CODE (type) == RECORD_TYPE
10955 && int_size_in_bytes (type) == 8)) ? 1 : 0;
10956 }
10957
10958 /* Update the data in CUM to advance over an argument
10959 of mode MODE and data type TYPE.
10960 (TYPE is null for libcalls where that information may not be available.)
10961
10962 Note that for args passed by reference, function_arg will be called
10963 with MODE and TYPE set to that of the pointer to the arg, not the arg
10964 itself. */
10965
10966 static void
10967 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
10968 const_tree type, bool named, int depth)
10969 {
10970 machine_mode elt_mode;
10971 int n_elts;
10972
10973 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10974
10975 /* Only tick off an argument if we're not recursing. */
10976 if (depth == 0)
10977 cum->nargs_prototype--;
10978
10979 #ifdef HAVE_AS_GNU_ATTRIBUTE
10980 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
10981 && cum->escapes)
10982 {
10983 if (SCALAR_FLOAT_MODE_P (mode))
10984 {
10985 rs6000_passes_float = true;
10986 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
10987 && (FLOAT128_IBM_P (mode)
10988 || FLOAT128_IEEE_P (mode)
10989 || (type != NULL
10990 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
10991 rs6000_passes_long_double = true;
10992
10993 /* Note if we passed or return a IEEE 128-bit type. We changed the
10994 mangling for these types, and we may need to make an alias with
10995 the old mangling. */
10996 if (FLOAT128_IEEE_P (mode))
10997 rs6000_passes_ieee128 = true;
10998 }
10999 if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11000 rs6000_passes_vector = true;
11001 }
11002 #endif
11003
11004 if (TARGET_ALTIVEC_ABI
11005 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11006 || (type && TREE_CODE (type) == VECTOR_TYPE
11007 && int_size_in_bytes (type) == 16)))
11008 {
11009 bool stack = false;
11010
11011 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11012 {
11013 cum->vregno += n_elts;
11014
11015 if (!TARGET_ALTIVEC)
11016 error ("cannot pass argument in vector register because"
11017 " altivec instructions are disabled, use %qs"
11018 " to enable them", "-maltivec");
11019
11020 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11021 even if it is going to be passed in a vector register.
11022 Darwin does the same for variable-argument functions. */
11023 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11024 && TARGET_64BIT)
11025 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11026 stack = true;
11027 }
11028 else
11029 stack = true;
11030
11031 if (stack)
11032 {
11033 int align;
11034
11035 /* Vector parameters must be 16-byte aligned. In 32-bit
11036 mode this means we need to take into account the offset
11037 to the parameter save area. In 64-bit mode, they just
11038 have to start on an even word, since the parameter save
11039 area is 16-byte aligned. */
11040 if (TARGET_32BIT)
11041 align = -(rs6000_parm_offset () + cum->words) & 3;
11042 else
11043 align = cum->words & 1;
11044 cum->words += align + rs6000_arg_size (mode, type);
11045
11046 if (TARGET_DEBUG_ARG)
11047 {
11048 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11049 cum->words, align);
11050 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11051 cum->nargs_prototype, cum->prototype,
11052 GET_MODE_NAME (mode));
11053 }
11054 }
11055 }
11056 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11057 {
11058 int size = int_size_in_bytes (type);
11059 /* Variable sized types have size == -1 and are
11060 treated as if consisting entirely of ints.
11061 Pad to 16 byte boundary if needed. */
11062 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11063 && (cum->words % 2) != 0)
11064 cum->words++;
11065 /* For varargs, we can just go up by the size of the struct. */
11066 if (!named)
11067 cum->words += (size + 7) / 8;
11068 else
11069 {
11070 /* It is tempting to say int register count just goes up by
11071 sizeof(type)/8, but this is wrong in a case such as
11072 { int; double; int; } [powerpc alignment]. We have to
11073 grovel through the fields for these too. */
11074 cum->intoffset = 0;
11075 cum->floats_in_gpr = 0;
11076 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11077 rs6000_darwin64_record_arg_advance_flush (cum,
11078 size * BITS_PER_UNIT, 1);
11079 }
11080 if (TARGET_DEBUG_ARG)
11081 {
11082 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
11083 cum->words, TYPE_ALIGN (type), size);
11084 fprintf (stderr,
11085 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11086 cum->nargs_prototype, cum->prototype,
11087 GET_MODE_NAME (mode));
11088 }
11089 }
11090 else if (DEFAULT_ABI == ABI_V4)
11091 {
11092 if (abi_v4_pass_in_fpr (mode, named))
11093 {
11094 /* _Decimal128 must use an even/odd register pair. This assumes
11095 that the register number is odd when fregno is odd. */
11096 if (mode == TDmode && (cum->fregno % 2) == 1)
11097 cum->fregno++;
11098
11099 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11100 <= FP_ARG_V4_MAX_REG)
11101 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
11102 else
11103 {
11104 cum->fregno = FP_ARG_V4_MAX_REG + 1;
11105 if (mode == DFmode || FLOAT128_IBM_P (mode)
11106 || mode == DDmode || mode == TDmode)
11107 cum->words += cum->words & 1;
11108 cum->words += rs6000_arg_size (mode, type);
11109 }
11110 }
11111 else
11112 {
11113 int n_words = rs6000_arg_size (mode, type);
11114 int gregno = cum->sysv_gregno;
11115
11116 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11117 As does any other 2 word item such as complex int due to a
11118 historical mistake. */
11119 if (n_words == 2)
11120 gregno += (1 - gregno) & 1;
11121
11122 /* Multi-reg args are not split between registers and stack. */
11123 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11124 {
11125 /* Long long is aligned on the stack. So are other 2 word
11126 items such as complex int due to a historical mistake. */
11127 if (n_words == 2)
11128 cum->words += cum->words & 1;
11129 cum->words += n_words;
11130 }
11131
11132 /* Note: continuing to accumulate gregno past when we've started
11133 spilling to the stack indicates the fact that we've started
11134 spilling to the stack to expand_builtin_saveregs. */
11135 cum->sysv_gregno = gregno + n_words;
11136 }
11137
11138 if (TARGET_DEBUG_ARG)
11139 {
11140 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11141 cum->words, cum->fregno);
11142 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
11143 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
11144 fprintf (stderr, "mode = %4s, named = %d\n",
11145 GET_MODE_NAME (mode), named);
11146 }
11147 }
11148 else
11149 {
11150 int n_words = rs6000_arg_size (mode, type);
11151 int start_words = cum->words;
11152 int align_words = rs6000_parm_start (mode, type, start_words);
11153
11154 cum->words = align_words + n_words;
11155
11156 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT)
11157 {
11158 /* _Decimal128 must be passed in an even/odd float register pair.
11159 This assumes that the register number is odd when fregno is
11160 odd. */
11161 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11162 cum->fregno++;
11163 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
11164 }
11165
11166 if (TARGET_DEBUG_ARG)
11167 {
11168 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
11169 cum->words, cum->fregno);
11170 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
11171 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
11172 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
11173 named, align_words - start_words, depth);
11174 }
11175 }
11176 }
11177
11178 static void
11179 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
11180 const_tree type, bool named)
11181 {
11182 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
11183 0);
11184 }
11185
11186 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11187 structure between cum->intoffset and bitpos to integer registers. */
11188
11189 static void
11190 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
11191 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
11192 {
11193 machine_mode mode;
11194 unsigned int regno;
11195 unsigned int startbit, endbit;
11196 int this_regno, intregs, intoffset;
11197 rtx reg;
11198
11199 if (cum->intoffset == -1)
11200 return;
11201
11202 intoffset = cum->intoffset;
11203 cum->intoffset = -1;
11204
11205 /* If this is the trailing part of a word, try to only load that
11206 much into the register. Otherwise load the whole register. Note
11207 that in the latter case we may pick up unwanted bits. It's not a
11208 problem at the moment but may wish to revisit. */
11209
11210 if (intoffset % BITS_PER_WORD != 0)
11211 {
11212 unsigned int bits = BITS_PER_WORD - intoffset % BITS_PER_WORD;
11213 if (!int_mode_for_size (bits, 0).exists (&mode))
11214 {
11215 /* We couldn't find an appropriate mode, which happens,
11216 e.g., in packed structs when there are 3 bytes to load.
11217 Back intoffset back to the beginning of the word in this
11218 case. */
11219 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11220 mode = word_mode;
11221 }
11222 }
11223 else
11224 mode = word_mode;
11225
11226 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11227 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11228 intregs = (endbit - startbit) / BITS_PER_WORD;
11229 this_regno = cum->words + intoffset / BITS_PER_WORD;
11230
11231 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
11232 cum->use_stack = 1;
11233
11234 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
11235 if (intregs <= 0)
11236 return;
11237
11238 intoffset /= BITS_PER_UNIT;
11239 do
11240 {
11241 regno = GP_ARG_MIN_REG + this_regno;
11242 reg = gen_rtx_REG (mode, regno);
11243 rvec[(*k)++] =
11244 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
11245
11246 this_regno += 1;
11247 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
11248 mode = word_mode;
11249 intregs -= 1;
11250 }
11251 while (intregs > 0);
11252 }
11253
11254 /* Recursive workhorse for the following. */
11255
11256 static void
11257 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
11258 HOST_WIDE_INT startbitpos, rtx rvec[],
11259 int *k)
11260 {
11261 tree f;
11262
11263 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11264 if (TREE_CODE (f) == FIELD_DECL)
11265 {
11266 HOST_WIDE_INT bitpos = startbitpos;
11267 tree ftype = TREE_TYPE (f);
11268 machine_mode mode;
11269 if (ftype == error_mark_node)
11270 continue;
11271 mode = TYPE_MODE (ftype);
11272
11273 if (DECL_SIZE (f) != 0
11274 && tree_fits_uhwi_p (bit_position (f)))
11275 bitpos += int_bit_position (f);
11276
11277 /* ??? FIXME: else assume zero offset. */
11278
11279 if (TREE_CODE (ftype) == RECORD_TYPE)
11280 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
11281 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
11282 {
11283 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
11284 #if 0
11285 switch (mode)
11286 {
11287 case E_SCmode: mode = SFmode; break;
11288 case E_DCmode: mode = DFmode; break;
11289 case E_TCmode: mode = TFmode; break;
11290 default: break;
11291 }
11292 #endif
11293 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11294 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
11295 {
11296 gcc_assert (cum->fregno == FP_ARG_MAX_REG
11297 && (mode == TFmode || mode == TDmode));
11298 /* Long double or _Decimal128 split over regs and memory. */
11299 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
11300 cum->use_stack=1;
11301 }
11302 rvec[(*k)++]
11303 = gen_rtx_EXPR_LIST (VOIDmode,
11304 gen_rtx_REG (mode, cum->fregno++),
11305 GEN_INT (bitpos / BITS_PER_UNIT));
11306 if (FLOAT128_2REG_P (mode))
11307 cum->fregno++;
11308 }
11309 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11310 {
11311 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
11312 rvec[(*k)++]
11313 = gen_rtx_EXPR_LIST (VOIDmode,
11314 gen_rtx_REG (mode, cum->vregno++),
11315 GEN_INT (bitpos / BITS_PER_UNIT));
11316 }
11317 else if (cum->intoffset == -1)
11318 cum->intoffset = bitpos;
11319 }
11320 }
11321
11322 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11323 the register(s) to be used for each field and subfield of a struct
11324 being passed by value, along with the offset of where the
11325 register's value may be found in the block. FP fields go in FP
11326 register, vector fields go in vector registers, and everything
11327 else goes in int registers, packed as in memory.
11328
11329 This code is also used for function return values. RETVAL indicates
11330 whether this is the case.
11331
11332 Much of this is taken from the SPARC V9 port, which has a similar
11333 calling convention. */
11334
11335 static rtx
11336 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
11337 bool named, bool retval)
11338 {
11339 rtx rvec[FIRST_PSEUDO_REGISTER];
11340 int k = 1, kbase = 1;
11341 HOST_WIDE_INT typesize = int_size_in_bytes (type);
11342 /* This is a copy; modifications are not visible to our caller. */
11343 CUMULATIVE_ARGS copy_cum = *orig_cum;
11344 CUMULATIVE_ARGS *cum = &copy_cum;
11345
11346 /* Pad to 16 byte boundary if needed. */
11347 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11348 && (cum->words % 2) != 0)
11349 cum->words++;
11350
11351 cum->intoffset = 0;
11352 cum->use_stack = 0;
11353 cum->named = named;
11354
11355 /* Put entries into rvec[] for individual FP and vector fields, and
11356 for the chunks of memory that go in int regs. Note we start at
11357 element 1; 0 is reserved for an indication of using memory, and
11358 may or may not be filled in below. */
11359 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
11360 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
11361
11362 /* If any part of the struct went on the stack put all of it there.
11363 This hack is because the generic code for
11364 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11365 parts of the struct are not at the beginning. */
11366 if (cum->use_stack)
11367 {
11368 if (retval)
11369 return NULL_RTX; /* doesn't go in registers at all */
11370 kbase = 0;
11371 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11372 }
11373 if (k > 1 || cum->use_stack)
11374 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
11375 else
11376 return NULL_RTX;
11377 }
11378
11379 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11380
11381 static rtx
11382 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
11383 int align_words)
11384 {
11385 int n_units;
11386 int i, k;
11387 rtx rvec[GP_ARG_NUM_REG + 1];
11388
11389 if (align_words >= GP_ARG_NUM_REG)
11390 return NULL_RTX;
11391
11392 n_units = rs6000_arg_size (mode, type);
11393
11394 /* Optimize the simple case where the arg fits in one gpr, except in
11395 the case of BLKmode due to assign_parms assuming that registers are
11396 BITS_PER_WORD wide. */
11397 if (n_units == 0
11398 || (n_units == 1 && mode != BLKmode))
11399 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11400
11401 k = 0;
11402 if (align_words + n_units > GP_ARG_NUM_REG)
11403 /* Not all of the arg fits in gprs. Say that it goes in memory too,
11404 using a magic NULL_RTX component.
11405 This is not strictly correct. Only some of the arg belongs in
11406 memory, not all of it. However, the normal scheme using
11407 function_arg_partial_nregs can result in unusual subregs, eg.
11408 (subreg:SI (reg:DF) 4), which are not handled well. The code to
11409 store the whole arg to memory is often more efficient than code
11410 to store pieces, and we know that space is available in the right
11411 place for the whole arg. */
11412 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11413
11414 i = 0;
11415 do
11416 {
11417 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
11418 rtx off = GEN_INT (i++ * 4);
11419 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11420 }
11421 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
11422
11423 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11424 }
11425
11426 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
11427 but must also be copied into the parameter save area starting at
11428 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
11429 to the GPRs and/or memory. Return the number of elements used. */
11430
11431 static int
11432 rs6000_psave_function_arg (machine_mode mode, const_tree type,
11433 int align_words, rtx *rvec)
11434 {
11435 int k = 0;
11436
11437 if (align_words < GP_ARG_NUM_REG)
11438 {
11439 int n_words = rs6000_arg_size (mode, type);
11440
11441 if (align_words + n_words > GP_ARG_NUM_REG
11442 || mode == BLKmode
11443 || (TARGET_32BIT && TARGET_POWERPC64))
11444 {
11445 /* If this is partially on the stack, then we only
11446 include the portion actually in registers here. */
11447 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11448 int i = 0;
11449
11450 if (align_words + n_words > GP_ARG_NUM_REG)
11451 {
11452 /* Not all of the arg fits in gprs. Say that it goes in memory
11453 too, using a magic NULL_RTX component. Also see comment in
11454 rs6000_mixed_function_arg for why the normal
11455 function_arg_partial_nregs scheme doesn't work in this case. */
11456 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11457 }
11458
11459 do
11460 {
11461 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11462 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
11463 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11464 }
11465 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11466 }
11467 else
11468 {
11469 /* The whole arg fits in gprs. */
11470 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11471 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
11472 }
11473 }
11474 else
11475 {
11476 /* It's entirely in memory. */
11477 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
11478 }
11479
11480 return k;
11481 }
11482
11483 /* RVEC is a vector of K components of an argument of mode MODE.
11484 Construct the final function_arg return value from it. */
11485
11486 static rtx
11487 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
11488 {
11489 gcc_assert (k >= 1);
11490
11491 /* Avoid returning a PARALLEL in the trivial cases. */
11492 if (k == 1)
11493 {
11494 if (XEXP (rvec[0], 0) == NULL_RTX)
11495 return NULL_RTX;
11496
11497 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
11498 return XEXP (rvec[0], 0);
11499 }
11500
11501 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
11502 }
11503
11504 /* Determine where to put an argument to a function.
11505 Value is zero to push the argument on the stack,
11506 or a hard register in which to store the argument.
11507
11508 MODE is the argument's machine mode.
11509 TYPE is the data type of the argument (as a tree).
11510 This is null for libcalls where that information may
11511 not be available.
11512 CUM is a variable of type CUMULATIVE_ARGS which gives info about
11513 the preceding args and about the function being called. It is
11514 not modified in this routine.
11515 NAMED is nonzero if this argument is a named parameter
11516 (otherwise it is an extra parameter matching an ellipsis).
11517
11518 On RS/6000 the first eight words of non-FP are normally in registers
11519 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
11520 Under V.4, the first 8 FP args are in registers.
11521
11522 If this is floating-point and no prototype is specified, we use
11523 both an FP and integer register (or possibly FP reg and stack). Library
11524 functions (when CALL_LIBCALL is set) always have the proper types for args,
11525 so we can pass the FP value just in one register. emit_library_function
11526 doesn't support PARALLEL anyway.
11527
11528 Note that for args passed by reference, function_arg will be called
11529 with MODE and TYPE set to that of the pointer to the arg, not the arg
11530 itself. */
11531
11532 static rtx
11533 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
11534 const_tree type, bool named)
11535 {
11536 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11537 enum rs6000_abi abi = DEFAULT_ABI;
11538 machine_mode elt_mode;
11539 int n_elts;
11540
11541 /* Return a marker to indicate whether CR1 needs to set or clear the
11542 bit that V.4 uses to say fp args were passed in registers.
11543 Assume that we don't need the marker for software floating point,
11544 or compiler generated library calls. */
11545 if (mode == VOIDmode)
11546 {
11547 if (abi == ABI_V4
11548 && (cum->call_cookie & CALL_LIBCALL) == 0
11549 && (cum->stdarg
11550 || (cum->nargs_prototype < 0
11551 && (cum->prototype || TARGET_NO_PROTOTYPE)))
11552 && TARGET_HARD_FLOAT)
11553 return GEN_INT (cum->call_cookie
11554 | ((cum->fregno == FP_ARG_MIN_REG)
11555 ? CALL_V4_SET_FP_ARGS
11556 : CALL_V4_CLEAR_FP_ARGS));
11557
11558 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
11559 }
11560
11561 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11562
11563 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11564 {
11565 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
11566 if (rslt != NULL_RTX)
11567 return rslt;
11568 /* Else fall through to usual handling. */
11569 }
11570
11571 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11572 {
11573 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11574 rtx r, off;
11575 int i, k = 0;
11576
11577 /* Do we also need to pass this argument in the parameter save area?
11578 Library support functions for IEEE 128-bit are assumed to not need the
11579 value passed both in GPRs and in vector registers. */
11580 if (TARGET_64BIT && !cum->prototype
11581 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11582 {
11583 int align_words = ROUND_UP (cum->words, 2);
11584 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11585 }
11586
11587 /* Describe where this argument goes in the vector registers. */
11588 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
11589 {
11590 r = gen_rtx_REG (elt_mode, cum->vregno + i);
11591 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11592 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11593 }
11594
11595 return rs6000_finish_function_arg (mode, rvec, k);
11596 }
11597 else if (TARGET_ALTIVEC_ABI
11598 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
11599 || (type && TREE_CODE (type) == VECTOR_TYPE
11600 && int_size_in_bytes (type) == 16)))
11601 {
11602 if (named || abi == ABI_V4)
11603 return NULL_RTX;
11604 else
11605 {
11606 /* Vector parameters to varargs functions under AIX or Darwin
11607 get passed in memory and possibly also in GPRs. */
11608 int align, align_words, n_words;
11609 machine_mode part_mode;
11610
11611 /* Vector parameters must be 16-byte aligned. In 32-bit
11612 mode this means we need to take into account the offset
11613 to the parameter save area. In 64-bit mode, they just
11614 have to start on an even word, since the parameter save
11615 area is 16-byte aligned. */
11616 if (TARGET_32BIT)
11617 align = -(rs6000_parm_offset () + cum->words) & 3;
11618 else
11619 align = cum->words & 1;
11620 align_words = cum->words + align;
11621
11622 /* Out of registers? Memory, then. */
11623 if (align_words >= GP_ARG_NUM_REG)
11624 return NULL_RTX;
11625
11626 if (TARGET_32BIT && TARGET_POWERPC64)
11627 return rs6000_mixed_function_arg (mode, type, align_words);
11628
11629 /* The vector value goes in GPRs. Only the part of the
11630 value in GPRs is reported here. */
11631 part_mode = mode;
11632 n_words = rs6000_arg_size (mode, type);
11633 if (align_words + n_words > GP_ARG_NUM_REG)
11634 /* Fortunately, there are only two possibilities, the value
11635 is either wholly in GPRs or half in GPRs and half not. */
11636 part_mode = DImode;
11637
11638 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
11639 }
11640 }
11641
11642 else if (abi == ABI_V4)
11643 {
11644 if (abi_v4_pass_in_fpr (mode, named))
11645 {
11646 /* _Decimal128 must use an even/odd register pair. This assumes
11647 that the register number is odd when fregno is odd. */
11648 if (mode == TDmode && (cum->fregno % 2) == 1)
11649 cum->fregno++;
11650
11651 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
11652 <= FP_ARG_V4_MAX_REG)
11653 return gen_rtx_REG (mode, cum->fregno);
11654 else
11655 return NULL_RTX;
11656 }
11657 else
11658 {
11659 int n_words = rs6000_arg_size (mode, type);
11660 int gregno = cum->sysv_gregno;
11661
11662 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11663 As does any other 2 word item such as complex int due to a
11664 historical mistake. */
11665 if (n_words == 2)
11666 gregno += (1 - gregno) & 1;
11667
11668 /* Multi-reg args are not split between registers and stack. */
11669 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
11670 return NULL_RTX;
11671
11672 if (TARGET_32BIT && TARGET_POWERPC64)
11673 return rs6000_mixed_function_arg (mode, type,
11674 gregno - GP_ARG_MIN_REG);
11675 return gen_rtx_REG (mode, gregno);
11676 }
11677 }
11678 else
11679 {
11680 int align_words = rs6000_parm_start (mode, type, cum->words);
11681
11682 /* _Decimal128 must be passed in an even/odd float register pair.
11683 This assumes that the register number is odd when fregno is odd. */
11684 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
11685 cum->fregno++;
11686
11687 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11688 && !(TARGET_AIX && !TARGET_ELF
11689 && type != NULL && AGGREGATE_TYPE_P (type)))
11690 {
11691 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
11692 rtx r, off;
11693 int i, k = 0;
11694 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11695 int fpr_words;
11696
11697 /* Do we also need to pass this argument in the parameter
11698 save area? */
11699 if (type && (cum->nargs_prototype <= 0
11700 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11701 && TARGET_XL_COMPAT
11702 && align_words >= GP_ARG_NUM_REG)))
11703 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
11704
11705 /* Describe where this argument goes in the fprs. */
11706 for (i = 0; i < n_elts
11707 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
11708 {
11709 /* Check if the argument is split over registers and memory.
11710 This can only ever happen for long double or _Decimal128;
11711 complex types are handled via split_complex_arg. */
11712 machine_mode fmode = elt_mode;
11713 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
11714 {
11715 gcc_assert (FLOAT128_2REG_P (fmode));
11716 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
11717 }
11718
11719 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
11720 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
11721 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11722 }
11723
11724 /* If there were not enough FPRs to hold the argument, the rest
11725 usually goes into memory. However, if the current position
11726 is still within the register parameter area, a portion may
11727 actually have to go into GPRs.
11728
11729 Note that it may happen that the portion of the argument
11730 passed in the first "half" of the first GPR was already
11731 passed in the last FPR as well.
11732
11733 For unnamed arguments, we already set up GPRs to cover the
11734 whole argument in rs6000_psave_function_arg, so there is
11735 nothing further to do at this point. */
11736 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
11737 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
11738 && cum->nargs_prototype > 0)
11739 {
11740 static bool warned;
11741
11742 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
11743 int n_words = rs6000_arg_size (mode, type);
11744
11745 align_words += fpr_words;
11746 n_words -= fpr_words;
11747
11748 do
11749 {
11750 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
11751 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
11752 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
11753 }
11754 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
11755
11756 if (!warned && warn_psabi)
11757 {
11758 warned = true;
11759 inform (input_location,
11760 "the ABI of passing homogeneous float aggregates"
11761 " has changed in GCC 5");
11762 }
11763 }
11764
11765 return rs6000_finish_function_arg (mode, rvec, k);
11766 }
11767 else if (align_words < GP_ARG_NUM_REG)
11768 {
11769 if (TARGET_32BIT && TARGET_POWERPC64)
11770 return rs6000_mixed_function_arg (mode, type, align_words);
11771
11772 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
11773 }
11774 else
11775 return NULL_RTX;
11776 }
11777 }
11778 \f
11779 /* For an arg passed partly in registers and partly in memory, this is
11780 the number of bytes passed in registers. For args passed entirely in
11781 registers or entirely in memory, zero. When an arg is described by a
11782 PARALLEL, perhaps using more than one register type, this function
11783 returns the number of bytes used by the first element of the PARALLEL. */
11784
11785 static int
11786 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
11787 tree type, bool named)
11788 {
11789 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
11790 bool passed_in_gprs = true;
11791 int ret = 0;
11792 int align_words;
11793 machine_mode elt_mode;
11794 int n_elts;
11795
11796 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11797
11798 if (DEFAULT_ABI == ABI_V4)
11799 return 0;
11800
11801 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11802 {
11803 /* If we are passing this arg in the fixed parameter save area (gprs or
11804 memory) as well as VRs, we do not use the partial bytes mechanism;
11805 instead, rs6000_function_arg will return a PARALLEL including a memory
11806 element as necessary. Library support functions for IEEE 128-bit are
11807 assumed to not need the value passed both in GPRs and in vector
11808 registers. */
11809 if (TARGET_64BIT && !cum->prototype
11810 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
11811 return 0;
11812
11813 /* Otherwise, we pass in VRs only. Check for partial copies. */
11814 passed_in_gprs = false;
11815 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
11816 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
11817 }
11818
11819 /* In this complicated case we just disable the partial_nregs code. */
11820 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11821 return 0;
11822
11823 align_words = rs6000_parm_start (mode, type, cum->words);
11824
11825 if (USE_FP_FOR_ARG_P (cum, elt_mode)
11826 && !(TARGET_AIX && !TARGET_ELF
11827 && type != NULL && AGGREGATE_TYPE_P (type)))
11828 {
11829 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
11830
11831 /* If we are passing this arg in the fixed parameter save area
11832 (gprs or memory) as well as FPRs, we do not use the partial
11833 bytes mechanism; instead, rs6000_function_arg will return a
11834 PARALLEL including a memory element as necessary. */
11835 if (type
11836 && (cum->nargs_prototype <= 0
11837 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11838 && TARGET_XL_COMPAT
11839 && align_words >= GP_ARG_NUM_REG)))
11840 return 0;
11841
11842 /* Otherwise, we pass in FPRs only. Check for partial copies. */
11843 passed_in_gprs = false;
11844 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
11845 {
11846 /* Compute number of bytes / words passed in FPRs. If there
11847 is still space available in the register parameter area
11848 *after* that amount, a part of the argument will be passed
11849 in GPRs. In that case, the total amount passed in any
11850 registers is equal to the amount that would have been passed
11851 in GPRs if everything were passed there, so we fall back to
11852 the GPR code below to compute the appropriate value. */
11853 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
11854 * MIN (8, GET_MODE_SIZE (elt_mode)));
11855 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
11856
11857 if (align_words + fpr_words < GP_ARG_NUM_REG)
11858 passed_in_gprs = true;
11859 else
11860 ret = fpr;
11861 }
11862 }
11863
11864 if (passed_in_gprs
11865 && align_words < GP_ARG_NUM_REG
11866 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
11867 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
11868
11869 if (ret != 0 && TARGET_DEBUG_ARG)
11870 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
11871
11872 return ret;
11873 }
11874 \f
11875 /* A C expression that indicates when an argument must be passed by
11876 reference. If nonzero for an argument, a copy of that argument is
11877 made in memory and a pointer to the argument is passed instead of
11878 the argument itself. The pointer is passed in whatever way is
11879 appropriate for passing a pointer to that type.
11880
11881 Under V.4, aggregates and long double are passed by reference.
11882
11883 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
11884 reference unless the AltiVec vector extension ABI is in force.
11885
11886 As an extension to all ABIs, variable sized types are passed by
11887 reference. */
11888
11889 static bool
11890 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
11891 machine_mode mode, const_tree type,
11892 bool named ATTRIBUTE_UNUSED)
11893 {
11894 if (!type)
11895 return 0;
11896
11897 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11898 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11899 {
11900 if (TARGET_DEBUG_ARG)
11901 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
11902 return 1;
11903 }
11904
11905 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
11906 {
11907 if (TARGET_DEBUG_ARG)
11908 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
11909 return 1;
11910 }
11911
11912 if (int_size_in_bytes (type) < 0)
11913 {
11914 if (TARGET_DEBUG_ARG)
11915 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
11916 return 1;
11917 }
11918
11919 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11920 modes only exist for GCC vector types if -maltivec. */
11921 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
11922 {
11923 if (TARGET_DEBUG_ARG)
11924 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
11925 return 1;
11926 }
11927
11928 /* Pass synthetic vectors in memory. */
11929 if (TREE_CODE (type) == VECTOR_TYPE
11930 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11931 {
11932 static bool warned_for_pass_big_vectors = false;
11933 if (TARGET_DEBUG_ARG)
11934 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
11935 if (!warned_for_pass_big_vectors)
11936 {
11937 warning (OPT_Wpsabi, "GCC vector passed by reference: "
11938 "non-standard ABI extension with no compatibility "
11939 "guarantee");
11940 warned_for_pass_big_vectors = true;
11941 }
11942 return 1;
11943 }
11944
11945 return 0;
11946 }
11947
11948 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
11949 already processes. Return true if the parameter must be passed
11950 (fully or partially) on the stack. */
11951
11952 static bool
11953 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
11954 {
11955 machine_mode mode;
11956 int unsignedp;
11957 rtx entry_parm;
11958
11959 /* Catch errors. */
11960 if (type == NULL || type == error_mark_node)
11961 return true;
11962
11963 /* Handle types with no storage requirement. */
11964 if (TYPE_MODE (type) == VOIDmode)
11965 return false;
11966
11967 /* Handle complex types. */
11968 if (TREE_CODE (type) == COMPLEX_TYPE)
11969 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
11970 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
11971
11972 /* Handle transparent aggregates. */
11973 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
11974 && TYPE_TRANSPARENT_AGGR (type))
11975 type = TREE_TYPE (first_field (type));
11976
11977 /* See if this arg was passed by invisible reference. */
11978 if (pass_by_reference (get_cumulative_args (args_so_far),
11979 TYPE_MODE (type), type, true))
11980 type = build_pointer_type (type);
11981
11982 /* Find mode as it is passed by the ABI. */
11983 unsignedp = TYPE_UNSIGNED (type);
11984 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
11985
11986 /* If we must pass in stack, we need a stack. */
11987 if (rs6000_must_pass_in_stack (mode, type))
11988 return true;
11989
11990 /* If there is no incoming register, we need a stack. */
11991 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
11992 if (entry_parm == NULL)
11993 return true;
11994
11995 /* Likewise if we need to pass both in registers and on the stack. */
11996 if (GET_CODE (entry_parm) == PARALLEL
11997 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
11998 return true;
11999
12000 /* Also true if we're partially in registers and partially not. */
12001 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
12002 return true;
12003
12004 /* Update info on where next arg arrives in registers. */
12005 rs6000_function_arg_advance (args_so_far, mode, type, true);
12006 return false;
12007 }
12008
12009 /* Return true if FUN has no prototype, has a variable argument
12010 list, or passes any parameter in memory. */
12011
12012 static bool
12013 rs6000_function_parms_need_stack (tree fun, bool incoming)
12014 {
12015 tree fntype, result;
12016 CUMULATIVE_ARGS args_so_far_v;
12017 cumulative_args_t args_so_far;
12018
12019 if (!fun)
12020 /* Must be a libcall, all of which only use reg parms. */
12021 return false;
12022
12023 fntype = fun;
12024 if (!TYPE_P (fun))
12025 fntype = TREE_TYPE (fun);
12026
12027 /* Varargs functions need the parameter save area. */
12028 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
12029 return true;
12030
12031 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
12032 args_so_far = pack_cumulative_args (&args_so_far_v);
12033
12034 /* When incoming, we will have been passed the function decl.
12035 It is necessary to use the decl to handle K&R style functions,
12036 where TYPE_ARG_TYPES may not be available. */
12037 if (incoming)
12038 {
12039 gcc_assert (DECL_P (fun));
12040 result = DECL_RESULT (fun);
12041 }
12042 else
12043 result = TREE_TYPE (fntype);
12044
12045 if (result && aggregate_value_p (result, fntype))
12046 {
12047 if (!TYPE_P (result))
12048 result = TREE_TYPE (result);
12049 result = build_pointer_type (result);
12050 rs6000_parm_needs_stack (args_so_far, result);
12051 }
12052
12053 if (incoming)
12054 {
12055 tree parm;
12056
12057 for (parm = DECL_ARGUMENTS (fun);
12058 parm && parm != void_list_node;
12059 parm = TREE_CHAIN (parm))
12060 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
12061 return true;
12062 }
12063 else
12064 {
12065 function_args_iterator args_iter;
12066 tree arg_type;
12067
12068 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
12069 if (rs6000_parm_needs_stack (args_so_far, arg_type))
12070 return true;
12071 }
12072
12073 return false;
12074 }
12075
12076 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12077 usually a constant depending on the ABI. However, in the ELFv2 ABI
12078 the register parameter area is optional when calling a function that
12079 has a prototype is scope, has no variable argument list, and passes
12080 all parameters in registers. */
12081
12082 int
12083 rs6000_reg_parm_stack_space (tree fun, bool incoming)
12084 {
12085 int reg_parm_stack_space;
12086
12087 switch (DEFAULT_ABI)
12088 {
12089 default:
12090 reg_parm_stack_space = 0;
12091 break;
12092
12093 case ABI_AIX:
12094 case ABI_DARWIN:
12095 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12096 break;
12097
12098 case ABI_ELFv2:
12099 /* ??? Recomputing this every time is a bit expensive. Is there
12100 a place to cache this information? */
12101 if (rs6000_function_parms_need_stack (fun, incoming))
12102 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
12103 else
12104 reg_parm_stack_space = 0;
12105 break;
12106 }
12107
12108 return reg_parm_stack_space;
12109 }
12110
12111 static void
12112 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
12113 {
12114 int i;
12115 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
12116
12117 if (nregs == 0)
12118 return;
12119
12120 for (i = 0; i < nregs; i++)
12121 {
12122 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
12123 if (reload_completed)
12124 {
12125 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
12126 tem = NULL_RTX;
12127 else
12128 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
12129 i * GET_MODE_SIZE (reg_mode));
12130 }
12131 else
12132 tem = replace_equiv_address (tem, XEXP (tem, 0));
12133
12134 gcc_assert (tem);
12135
12136 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
12137 }
12138 }
12139 \f
12140 /* Perform any needed actions needed for a function that is receiving a
12141 variable number of arguments.
12142
12143 CUM is as above.
12144
12145 MODE and TYPE are the mode and type of the current parameter.
12146
12147 PRETEND_SIZE is a variable that should be set to the amount of stack
12148 that must be pushed by the prolog to pretend that our caller pushed
12149 it.
12150
12151 Normally, this macro will push all remaining incoming registers on the
12152 stack and set PRETEND_SIZE to the length of the registers pushed. */
12153
12154 static void
12155 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
12156 tree type, int *pretend_size ATTRIBUTE_UNUSED,
12157 int no_rtl)
12158 {
12159 CUMULATIVE_ARGS next_cum;
12160 int reg_size = TARGET_32BIT ? 4 : 8;
12161 rtx save_area = NULL_RTX, mem;
12162 int first_reg_offset;
12163 alias_set_type set;
12164
12165 /* Skip the last named argument. */
12166 next_cum = *get_cumulative_args (cum);
12167 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
12168
12169 if (DEFAULT_ABI == ABI_V4)
12170 {
12171 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
12172
12173 if (! no_rtl)
12174 {
12175 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
12176 HOST_WIDE_INT offset = 0;
12177
12178 /* Try to optimize the size of the varargs save area.
12179 The ABI requires that ap.reg_save_area is doubleword
12180 aligned, but we don't need to allocate space for all
12181 the bytes, only those to which we actually will save
12182 anything. */
12183 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
12184 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
12185 if (TARGET_HARD_FLOAT
12186 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12187 && cfun->va_list_fpr_size)
12188 {
12189 if (gpr_reg_num)
12190 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
12191 * UNITS_PER_FP_WORD;
12192 if (cfun->va_list_fpr_size
12193 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12194 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
12195 else
12196 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
12197 * UNITS_PER_FP_WORD;
12198 }
12199 if (gpr_reg_num)
12200 {
12201 offset = -((first_reg_offset * reg_size) & ~7);
12202 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
12203 {
12204 gpr_reg_num = cfun->va_list_gpr_size;
12205 if (reg_size == 4 && (first_reg_offset & 1))
12206 gpr_reg_num++;
12207 }
12208 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
12209 }
12210 else if (fpr_size)
12211 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
12212 * UNITS_PER_FP_WORD
12213 - (int) (GP_ARG_NUM_REG * reg_size);
12214
12215 if (gpr_size + fpr_size)
12216 {
12217 rtx reg_save_area
12218 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
12219 gcc_assert (MEM_P (reg_save_area));
12220 reg_save_area = XEXP (reg_save_area, 0);
12221 if (GET_CODE (reg_save_area) == PLUS)
12222 {
12223 gcc_assert (XEXP (reg_save_area, 0)
12224 == virtual_stack_vars_rtx);
12225 gcc_assert (CONST_INT_P (XEXP (reg_save_area, 1)));
12226 offset += INTVAL (XEXP (reg_save_area, 1));
12227 }
12228 else
12229 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
12230 }
12231
12232 cfun->machine->varargs_save_offset = offset;
12233 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
12234 }
12235 }
12236 else
12237 {
12238 first_reg_offset = next_cum.words;
12239 save_area = crtl->args.internal_arg_pointer;
12240
12241 if (targetm.calls.must_pass_in_stack (mode, type))
12242 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
12243 }
12244
12245 set = get_varargs_alias_set ();
12246 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
12247 && cfun->va_list_gpr_size)
12248 {
12249 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
12250
12251 if (va_list_gpr_counter_field)
12252 /* V4 va_list_gpr_size counts number of registers needed. */
12253 n_gpr = cfun->va_list_gpr_size;
12254 else
12255 /* char * va_list instead counts number of bytes needed. */
12256 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
12257
12258 if (nregs > n_gpr)
12259 nregs = n_gpr;
12260
12261 mem = gen_rtx_MEM (BLKmode,
12262 plus_constant (Pmode, save_area,
12263 first_reg_offset * reg_size));
12264 MEM_NOTRAP_P (mem) = 1;
12265 set_mem_alias_set (mem, set);
12266 set_mem_align (mem, BITS_PER_WORD);
12267
12268 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
12269 nregs);
12270 }
12271
12272 /* Save FP registers if needed. */
12273 if (DEFAULT_ABI == ABI_V4
12274 && TARGET_HARD_FLOAT
12275 && ! no_rtl
12276 && next_cum.fregno <= FP_ARG_V4_MAX_REG
12277 && cfun->va_list_fpr_size)
12278 {
12279 int fregno = next_cum.fregno, nregs;
12280 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
12281 rtx lab = gen_label_rtx ();
12282 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
12283 * UNITS_PER_FP_WORD);
12284
12285 emit_jump_insn
12286 (gen_rtx_SET (pc_rtx,
12287 gen_rtx_IF_THEN_ELSE (VOIDmode,
12288 gen_rtx_NE (VOIDmode, cr1,
12289 const0_rtx),
12290 gen_rtx_LABEL_REF (VOIDmode, lab),
12291 pc_rtx)));
12292
12293 for (nregs = 0;
12294 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
12295 fregno++, off += UNITS_PER_FP_WORD, nregs++)
12296 {
12297 mem = gen_rtx_MEM (TARGET_HARD_FLOAT ? DFmode : SFmode,
12298 plus_constant (Pmode, save_area, off));
12299 MEM_NOTRAP_P (mem) = 1;
12300 set_mem_alias_set (mem, set);
12301 set_mem_align (mem, GET_MODE_ALIGNMENT (
12302 TARGET_HARD_FLOAT ? DFmode : SFmode));
12303 emit_move_insn (mem, gen_rtx_REG (
12304 TARGET_HARD_FLOAT ? DFmode : SFmode, fregno));
12305 }
12306
12307 emit_label (lab);
12308 }
12309 }
12310
12311 /* Create the va_list data type. */
12312
12313 static tree
12314 rs6000_build_builtin_va_list (void)
12315 {
12316 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
12317
12318 /* For AIX, prefer 'char *' because that's what the system
12319 header files like. */
12320 if (DEFAULT_ABI != ABI_V4)
12321 return build_pointer_type (char_type_node);
12322
12323 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
12324 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
12325 get_identifier ("__va_list_tag"), record);
12326
12327 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
12328 unsigned_char_type_node);
12329 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
12330 unsigned_char_type_node);
12331 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12332 every user file. */
12333 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12334 get_identifier ("reserved"), short_unsigned_type_node);
12335 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12336 get_identifier ("overflow_arg_area"),
12337 ptr_type_node);
12338 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
12339 get_identifier ("reg_save_area"),
12340 ptr_type_node);
12341
12342 va_list_gpr_counter_field = f_gpr;
12343 va_list_fpr_counter_field = f_fpr;
12344
12345 DECL_FIELD_CONTEXT (f_gpr) = record;
12346 DECL_FIELD_CONTEXT (f_fpr) = record;
12347 DECL_FIELD_CONTEXT (f_res) = record;
12348 DECL_FIELD_CONTEXT (f_ovf) = record;
12349 DECL_FIELD_CONTEXT (f_sav) = record;
12350
12351 TYPE_STUB_DECL (record) = type_decl;
12352 TYPE_NAME (record) = type_decl;
12353 TYPE_FIELDS (record) = f_gpr;
12354 DECL_CHAIN (f_gpr) = f_fpr;
12355 DECL_CHAIN (f_fpr) = f_res;
12356 DECL_CHAIN (f_res) = f_ovf;
12357 DECL_CHAIN (f_ovf) = f_sav;
12358
12359 layout_type (record);
12360
12361 /* The correct type is an array type of one element. */
12362 return build_array_type (record, build_index_type (size_zero_node));
12363 }
12364
12365 /* Implement va_start. */
12366
12367 static void
12368 rs6000_va_start (tree valist, rtx nextarg)
12369 {
12370 HOST_WIDE_INT words, n_gpr, n_fpr;
12371 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12372 tree gpr, fpr, ovf, sav, t;
12373
12374 /* Only SVR4 needs something special. */
12375 if (DEFAULT_ABI != ABI_V4)
12376 {
12377 std_expand_builtin_va_start (valist, nextarg);
12378 return;
12379 }
12380
12381 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12382 f_fpr = DECL_CHAIN (f_gpr);
12383 f_res = DECL_CHAIN (f_fpr);
12384 f_ovf = DECL_CHAIN (f_res);
12385 f_sav = DECL_CHAIN (f_ovf);
12386
12387 valist = build_simple_mem_ref (valist);
12388 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12389 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12390 f_fpr, NULL_TREE);
12391 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12392 f_ovf, NULL_TREE);
12393 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12394 f_sav, NULL_TREE);
12395
12396 /* Count number of gp and fp argument registers used. */
12397 words = crtl->args.info.words;
12398 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
12399 GP_ARG_NUM_REG);
12400 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
12401 FP_ARG_NUM_REG);
12402
12403 if (TARGET_DEBUG_ARG)
12404 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
12405 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
12406 words, n_gpr, n_fpr);
12407
12408 if (cfun->va_list_gpr_size)
12409 {
12410 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
12411 build_int_cst (NULL_TREE, n_gpr));
12412 TREE_SIDE_EFFECTS (t) = 1;
12413 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12414 }
12415
12416 if (cfun->va_list_fpr_size)
12417 {
12418 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
12419 build_int_cst (NULL_TREE, n_fpr));
12420 TREE_SIDE_EFFECTS (t) = 1;
12421 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12422
12423 #ifdef HAVE_AS_GNU_ATTRIBUTE
12424 if (call_ABI_of_interest (cfun->decl))
12425 rs6000_passes_float = true;
12426 #endif
12427 }
12428
12429 /* Find the overflow area. */
12430 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
12431 if (words != 0)
12432 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
12433 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
12434 TREE_SIDE_EFFECTS (t) = 1;
12435 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12436
12437 /* If there were no va_arg invocations, don't set up the register
12438 save area. */
12439 if (!cfun->va_list_gpr_size
12440 && !cfun->va_list_fpr_size
12441 && n_gpr < GP_ARG_NUM_REG
12442 && n_fpr < FP_ARG_V4_MAX_REG)
12443 return;
12444
12445 /* Find the register save area. */
12446 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
12447 if (cfun->machine->varargs_save_offset)
12448 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
12449 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
12450 TREE_SIDE_EFFECTS (t) = 1;
12451 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
12452 }
12453
12454 /* Implement va_arg. */
12455
12456 static tree
12457 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
12458 gimple_seq *post_p)
12459 {
12460 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
12461 tree gpr, fpr, ovf, sav, reg, t, u;
12462 int size, rsize, n_reg, sav_ofs, sav_scale;
12463 tree lab_false, lab_over, addr;
12464 int align;
12465 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
12466 int regalign = 0;
12467 gimple *stmt;
12468
12469 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
12470 {
12471 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
12472 return build_va_arg_indirect_ref (t);
12473 }
12474
12475 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
12476 earlier version of gcc, with the property that it always applied alignment
12477 adjustments to the va-args (even for zero-sized types). The cheapest way
12478 to deal with this is to replicate the effect of the part of
12479 std_gimplify_va_arg_expr that carries out the align adjust, for the case
12480 of relevance.
12481 We don't need to check for pass-by-reference because of the test above.
12482 We can return a simplifed answer, since we know there's no offset to add. */
12483
12484 if (((TARGET_MACHO
12485 && rs6000_darwin64_abi)
12486 || DEFAULT_ABI == ABI_ELFv2
12487 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
12488 && integer_zerop (TYPE_SIZE (type)))
12489 {
12490 unsigned HOST_WIDE_INT align, boundary;
12491 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
12492 align = PARM_BOUNDARY / BITS_PER_UNIT;
12493 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
12494 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
12495 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
12496 boundary /= BITS_PER_UNIT;
12497 if (boundary > align)
12498 {
12499 tree t ;
12500 /* This updates arg ptr by the amount that would be necessary
12501 to align the zero-sized (but not zero-alignment) item. */
12502 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12503 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
12504 gimplify_and_add (t, pre_p);
12505
12506 t = fold_convert (sizetype, valist_tmp);
12507 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
12508 fold_convert (TREE_TYPE (valist),
12509 fold_build2 (BIT_AND_EXPR, sizetype, t,
12510 size_int (-boundary))));
12511 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
12512 gimplify_and_add (t, pre_p);
12513 }
12514 /* Since it is zero-sized there's no increment for the item itself. */
12515 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
12516 return build_va_arg_indirect_ref (valist_tmp);
12517 }
12518
12519 if (DEFAULT_ABI != ABI_V4)
12520 {
12521 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
12522 {
12523 tree elem_type = TREE_TYPE (type);
12524 machine_mode elem_mode = TYPE_MODE (elem_type);
12525 int elem_size = GET_MODE_SIZE (elem_mode);
12526
12527 if (elem_size < UNITS_PER_WORD)
12528 {
12529 tree real_part, imag_part;
12530 gimple_seq post = NULL;
12531
12532 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12533 &post);
12534 /* Copy the value into a temporary, lest the formal temporary
12535 be reused out from under us. */
12536 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
12537 gimple_seq_add_seq (pre_p, post);
12538
12539 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
12540 post_p);
12541
12542 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
12543 }
12544 }
12545
12546 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
12547 }
12548
12549 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
12550 f_fpr = DECL_CHAIN (f_gpr);
12551 f_res = DECL_CHAIN (f_fpr);
12552 f_ovf = DECL_CHAIN (f_res);
12553 f_sav = DECL_CHAIN (f_ovf);
12554
12555 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
12556 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
12557 f_fpr, NULL_TREE);
12558 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
12559 f_ovf, NULL_TREE);
12560 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
12561 f_sav, NULL_TREE);
12562
12563 size = int_size_in_bytes (type);
12564 rsize = (size + 3) / 4;
12565 int pad = 4 * rsize - size;
12566 align = 1;
12567
12568 machine_mode mode = TYPE_MODE (type);
12569 if (abi_v4_pass_in_fpr (mode, false))
12570 {
12571 /* FP args go in FP registers, if present. */
12572 reg = fpr;
12573 n_reg = (size + 7) / 8;
12574 sav_ofs = (TARGET_HARD_FLOAT ? 8 : 4) * 4;
12575 sav_scale = (TARGET_HARD_FLOAT ? 8 : 4);
12576 if (mode != SFmode && mode != SDmode)
12577 align = 8;
12578 }
12579 else
12580 {
12581 /* Otherwise into GP registers. */
12582 reg = gpr;
12583 n_reg = rsize;
12584 sav_ofs = 0;
12585 sav_scale = 4;
12586 if (n_reg == 2)
12587 align = 8;
12588 }
12589
12590 /* Pull the value out of the saved registers.... */
12591
12592 lab_over = NULL;
12593 addr = create_tmp_var (ptr_type_node, "addr");
12594
12595 /* AltiVec vectors never go in registers when -mabi=altivec. */
12596 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12597 align = 16;
12598 else
12599 {
12600 lab_false = create_artificial_label (input_location);
12601 lab_over = create_artificial_label (input_location);
12602
12603 /* Long long is aligned in the registers. As are any other 2 gpr
12604 item such as complex int due to a historical mistake. */
12605 u = reg;
12606 if (n_reg == 2 && reg == gpr)
12607 {
12608 regalign = 1;
12609 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12610 build_int_cst (TREE_TYPE (reg), n_reg - 1));
12611 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
12612 unshare_expr (reg), u);
12613 }
12614 /* _Decimal128 is passed in even/odd fpr pairs; the stored
12615 reg number is 0 for f1, so we want to make it odd. */
12616 else if (reg == fpr && mode == TDmode)
12617 {
12618 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12619 build_int_cst (TREE_TYPE (reg), 1));
12620 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
12621 }
12622
12623 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
12624 t = build2 (GE_EXPR, boolean_type_node, u, t);
12625 u = build1 (GOTO_EXPR, void_type_node, lab_false);
12626 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
12627 gimplify_and_add (t, pre_p);
12628
12629 t = sav;
12630 if (sav_ofs)
12631 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
12632
12633 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
12634 build_int_cst (TREE_TYPE (reg), n_reg));
12635 u = fold_convert (sizetype, u);
12636 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
12637 t = fold_build_pointer_plus (t, u);
12638
12639 /* _Decimal32 varargs are located in the second word of the 64-bit
12640 FP register for 32-bit binaries. */
12641 if (TARGET_32BIT && TARGET_HARD_FLOAT && mode == SDmode)
12642 t = fold_build_pointer_plus_hwi (t, size);
12643
12644 /* Args are passed right-aligned. */
12645 if (BYTES_BIG_ENDIAN)
12646 t = fold_build_pointer_plus_hwi (t, pad);
12647
12648 gimplify_assign (addr, t, pre_p);
12649
12650 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
12651
12652 stmt = gimple_build_label (lab_false);
12653 gimple_seq_add_stmt (pre_p, stmt);
12654
12655 if ((n_reg == 2 && !regalign) || n_reg > 2)
12656 {
12657 /* Ensure that we don't find any more args in regs.
12658 Alignment has taken care of for special cases. */
12659 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
12660 }
12661 }
12662
12663 /* ... otherwise out of the overflow area. */
12664
12665 /* Care for on-stack alignment if needed. */
12666 t = ovf;
12667 if (align != 1)
12668 {
12669 t = fold_build_pointer_plus_hwi (t, align - 1);
12670 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
12671 build_int_cst (TREE_TYPE (t), -align));
12672 }
12673
12674 /* Args are passed right-aligned. */
12675 if (BYTES_BIG_ENDIAN)
12676 t = fold_build_pointer_plus_hwi (t, pad);
12677
12678 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
12679
12680 gimplify_assign (unshare_expr (addr), t, pre_p);
12681
12682 t = fold_build_pointer_plus_hwi (t, size);
12683 gimplify_assign (unshare_expr (ovf), t, pre_p);
12684
12685 if (lab_over)
12686 {
12687 stmt = gimple_build_label (lab_over);
12688 gimple_seq_add_stmt (pre_p, stmt);
12689 }
12690
12691 if (STRICT_ALIGNMENT
12692 && (TYPE_ALIGN (type)
12693 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
12694 {
12695 /* The value (of type complex double, for example) may not be
12696 aligned in memory in the saved registers, so copy via a
12697 temporary. (This is the same code as used for SPARC.) */
12698 tree tmp = create_tmp_var (type, "va_arg_tmp");
12699 tree dest_addr = build_fold_addr_expr (tmp);
12700
12701 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
12702 3, dest_addr, addr, size_int (rsize * 4));
12703 TREE_ADDRESSABLE (tmp) = 1;
12704
12705 gimplify_and_add (copy, pre_p);
12706 addr = dest_addr;
12707 }
12708
12709 addr = fold_convert (ptrtype, addr);
12710 return build_va_arg_indirect_ref (addr);
12711 }
12712
12713 /* Builtins. */
12714
12715 static void
12716 def_builtin (const char *name, tree type, enum rs6000_builtins code)
12717 {
12718 tree t;
12719 unsigned classify = rs6000_builtin_info[(int)code].attr;
12720 const char *attr_string = "";
12721
12722 gcc_assert (name != NULL);
12723 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
12724
12725 if (rs6000_builtin_decls[(int)code])
12726 fatal_error (input_location,
12727 "internal error: builtin function %qs already processed",
12728 name);
12729
12730 rs6000_builtin_decls[(int)code] = t =
12731 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
12732
12733 /* Set any special attributes. */
12734 if ((classify & RS6000_BTC_CONST) != 0)
12735 {
12736 /* const function, function only depends on the inputs. */
12737 TREE_READONLY (t) = 1;
12738 TREE_NOTHROW (t) = 1;
12739 attr_string = ", const";
12740 }
12741 else if ((classify & RS6000_BTC_PURE) != 0)
12742 {
12743 /* pure function, function can read global memory, but does not set any
12744 external state. */
12745 DECL_PURE_P (t) = 1;
12746 TREE_NOTHROW (t) = 1;
12747 attr_string = ", pure";
12748 }
12749 else if ((classify & RS6000_BTC_FP) != 0)
12750 {
12751 /* Function is a math function. If rounding mode is on, then treat the
12752 function as not reading global memory, but it can have arbitrary side
12753 effects. If it is off, then assume the function is a const function.
12754 This mimics the ATTR_MATHFN_FPROUNDING attribute in
12755 builtin-attribute.def that is used for the math functions. */
12756 TREE_NOTHROW (t) = 1;
12757 if (flag_rounding_math)
12758 {
12759 DECL_PURE_P (t) = 1;
12760 DECL_IS_NOVOPS (t) = 1;
12761 attr_string = ", fp, pure";
12762 }
12763 else
12764 {
12765 TREE_READONLY (t) = 1;
12766 attr_string = ", fp, const";
12767 }
12768 }
12769 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
12770 gcc_unreachable ();
12771
12772 if (TARGET_DEBUG_BUILTIN)
12773 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
12774 (int)code, name, attr_string);
12775 }
12776
12777 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
12778
12779 #undef RS6000_BUILTIN_0
12780 #undef RS6000_BUILTIN_1
12781 #undef RS6000_BUILTIN_2
12782 #undef RS6000_BUILTIN_3
12783 #undef RS6000_BUILTIN_A
12784 #undef RS6000_BUILTIN_D
12785 #undef RS6000_BUILTIN_H
12786 #undef RS6000_BUILTIN_P
12787 #undef RS6000_BUILTIN_X
12788
12789 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12790 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12791 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12792 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
12793 { MASK, ICODE, NAME, ENUM },
12794
12795 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12796 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12797 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12798 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12799 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12800
12801 static const struct builtin_description bdesc_3arg[] =
12802 {
12803 #include "rs6000-builtin.def"
12804 };
12805
12806 /* DST operations: void foo (void *, const int, const char). */
12807
12808 #undef RS6000_BUILTIN_0
12809 #undef RS6000_BUILTIN_1
12810 #undef RS6000_BUILTIN_2
12811 #undef RS6000_BUILTIN_3
12812 #undef RS6000_BUILTIN_A
12813 #undef RS6000_BUILTIN_D
12814 #undef RS6000_BUILTIN_H
12815 #undef RS6000_BUILTIN_P
12816 #undef RS6000_BUILTIN_X
12817
12818 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12819 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12820 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12821 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12822 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12823 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
12824 { MASK, ICODE, NAME, ENUM },
12825
12826 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12827 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12828 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12829
12830 static const struct builtin_description bdesc_dst[] =
12831 {
12832 #include "rs6000-builtin.def"
12833 };
12834
12835 /* Simple binary operations: VECc = foo (VECa, VECb). */
12836
12837 #undef RS6000_BUILTIN_0
12838 #undef RS6000_BUILTIN_1
12839 #undef RS6000_BUILTIN_2
12840 #undef RS6000_BUILTIN_3
12841 #undef RS6000_BUILTIN_A
12842 #undef RS6000_BUILTIN_D
12843 #undef RS6000_BUILTIN_H
12844 #undef RS6000_BUILTIN_P
12845 #undef RS6000_BUILTIN_X
12846
12847 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12848 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12849 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
12850 { MASK, ICODE, NAME, ENUM },
12851
12852 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12853 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12854 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12855 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12856 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12857 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12858
12859 static const struct builtin_description bdesc_2arg[] =
12860 {
12861 #include "rs6000-builtin.def"
12862 };
12863
12864 #undef RS6000_BUILTIN_0
12865 #undef RS6000_BUILTIN_1
12866 #undef RS6000_BUILTIN_2
12867 #undef RS6000_BUILTIN_3
12868 #undef RS6000_BUILTIN_A
12869 #undef RS6000_BUILTIN_D
12870 #undef RS6000_BUILTIN_H
12871 #undef RS6000_BUILTIN_P
12872 #undef RS6000_BUILTIN_X
12873
12874 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12875 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12876 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12877 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12878 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12879 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12880 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12881 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
12882 { MASK, ICODE, NAME, ENUM },
12883
12884 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12885
12886 /* AltiVec predicates. */
12887
12888 static const struct builtin_description bdesc_altivec_preds[] =
12889 {
12890 #include "rs6000-builtin.def"
12891 };
12892
12893 /* ABS* operations. */
12894
12895 #undef RS6000_BUILTIN_0
12896 #undef RS6000_BUILTIN_1
12897 #undef RS6000_BUILTIN_2
12898 #undef RS6000_BUILTIN_3
12899 #undef RS6000_BUILTIN_A
12900 #undef RS6000_BUILTIN_D
12901 #undef RS6000_BUILTIN_H
12902 #undef RS6000_BUILTIN_P
12903 #undef RS6000_BUILTIN_X
12904
12905 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12906 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12907 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12908 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12909 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
12910 { MASK, ICODE, NAME, ENUM },
12911
12912 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12913 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12914 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12915 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12916
12917 static const struct builtin_description bdesc_abs[] =
12918 {
12919 #include "rs6000-builtin.def"
12920 };
12921
12922 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
12923 foo (VECa). */
12924
12925 #undef RS6000_BUILTIN_0
12926 #undef RS6000_BUILTIN_1
12927 #undef RS6000_BUILTIN_2
12928 #undef RS6000_BUILTIN_3
12929 #undef RS6000_BUILTIN_A
12930 #undef RS6000_BUILTIN_D
12931 #undef RS6000_BUILTIN_H
12932 #undef RS6000_BUILTIN_P
12933 #undef RS6000_BUILTIN_X
12934
12935 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12936 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
12937 { MASK, ICODE, NAME, ENUM },
12938
12939 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12940 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12941 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12942 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12943 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12944 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12945 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12946
12947 static const struct builtin_description bdesc_1arg[] =
12948 {
12949 #include "rs6000-builtin.def"
12950 };
12951
12952 /* Simple no-argument operations: result = __builtin_darn_32 () */
12953
12954 #undef RS6000_BUILTIN_0
12955 #undef RS6000_BUILTIN_1
12956 #undef RS6000_BUILTIN_2
12957 #undef RS6000_BUILTIN_3
12958 #undef RS6000_BUILTIN_A
12959 #undef RS6000_BUILTIN_D
12960 #undef RS6000_BUILTIN_H
12961 #undef RS6000_BUILTIN_P
12962 #undef RS6000_BUILTIN_X
12963
12964 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
12965 { MASK, ICODE, NAME, ENUM },
12966
12967 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12968 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12969 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12970 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12971 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12972 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
12973 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
12974 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
12975
12976 static const struct builtin_description bdesc_0arg[] =
12977 {
12978 #include "rs6000-builtin.def"
12979 };
12980
12981 /* HTM builtins. */
12982 #undef RS6000_BUILTIN_0
12983 #undef RS6000_BUILTIN_1
12984 #undef RS6000_BUILTIN_2
12985 #undef RS6000_BUILTIN_3
12986 #undef RS6000_BUILTIN_A
12987 #undef RS6000_BUILTIN_D
12988 #undef RS6000_BUILTIN_H
12989 #undef RS6000_BUILTIN_P
12990 #undef RS6000_BUILTIN_X
12991
12992 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
12993 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
12994 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
12995 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
12996 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
12997 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
12998 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
12999 { MASK, ICODE, NAME, ENUM },
13000
13001 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13002 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13003
13004 static const struct builtin_description bdesc_htm[] =
13005 {
13006 #include "rs6000-builtin.def"
13007 };
13008
13009 #undef RS6000_BUILTIN_0
13010 #undef RS6000_BUILTIN_1
13011 #undef RS6000_BUILTIN_2
13012 #undef RS6000_BUILTIN_3
13013 #undef RS6000_BUILTIN_A
13014 #undef RS6000_BUILTIN_D
13015 #undef RS6000_BUILTIN_H
13016 #undef RS6000_BUILTIN_P
13017
13018 /* Return true if a builtin function is overloaded. */
13019 bool
13020 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
13021 {
13022 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
13023 }
13024
13025 const char *
13026 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
13027 {
13028 return rs6000_builtin_info[(int)fncode].name;
13029 }
13030
13031 /* Expand an expression EXP that calls a builtin without arguments. */
13032 static rtx
13033 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
13034 {
13035 rtx pat;
13036 machine_mode tmode = insn_data[icode].operand[0].mode;
13037
13038 if (icode == CODE_FOR_nothing)
13039 /* Builtin not supported on this processor. */
13040 return 0;
13041
13042 if (icode == CODE_FOR_rs6000_mffsl
13043 && rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13044 {
13045 error ("%<__builtin_mffsl%> not supported with %<-msoft-float%>");
13046 return const0_rtx;
13047 }
13048
13049 if (target == 0
13050 || GET_MODE (target) != tmode
13051 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13052 target = gen_reg_rtx (tmode);
13053
13054 pat = GEN_FCN (icode) (target);
13055 if (! pat)
13056 return 0;
13057 emit_insn (pat);
13058
13059 return target;
13060 }
13061
13062
13063 static rtx
13064 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
13065 {
13066 rtx pat;
13067 tree arg0 = CALL_EXPR_ARG (exp, 0);
13068 tree arg1 = CALL_EXPR_ARG (exp, 1);
13069 rtx op0 = expand_normal (arg0);
13070 rtx op1 = expand_normal (arg1);
13071 machine_mode mode0 = insn_data[icode].operand[0].mode;
13072 machine_mode mode1 = insn_data[icode].operand[1].mode;
13073
13074 if (icode == CODE_FOR_nothing)
13075 /* Builtin not supported on this processor. */
13076 return 0;
13077
13078 /* If we got invalid arguments bail out before generating bad rtl. */
13079 if (arg0 == error_mark_node || arg1 == error_mark_node)
13080 return const0_rtx;
13081
13082 if (!CONST_INT_P (op0)
13083 || INTVAL (op0) > 255
13084 || INTVAL (op0) < 0)
13085 {
13086 error ("argument 1 must be an 8-bit field value");
13087 return const0_rtx;
13088 }
13089
13090 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13091 op0 = copy_to_mode_reg (mode0, op0);
13092
13093 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13094 op1 = copy_to_mode_reg (mode1, op1);
13095
13096 pat = GEN_FCN (icode) (op0, op1);
13097 if (!pat)
13098 return const0_rtx;
13099 emit_insn (pat);
13100
13101 return NULL_RTX;
13102 }
13103
13104 static rtx
13105 rs6000_expand_mtfsb_builtin (enum insn_code icode, tree exp)
13106 {
13107 rtx pat;
13108 tree arg0 = CALL_EXPR_ARG (exp, 0);
13109 rtx op0 = expand_normal (arg0);
13110
13111 if (icode == CODE_FOR_nothing)
13112 /* Builtin not supported on this processor. */
13113 return 0;
13114
13115 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13116 {
13117 error ("%<__builtin_mtfsb0%> and %<__builtin_mtfsb1%> not supported with "
13118 "%<-msoft-float%>");
13119 return const0_rtx;
13120 }
13121
13122 /* If we got invalid arguments bail out before generating bad rtl. */
13123 if (arg0 == error_mark_node)
13124 return const0_rtx;
13125
13126 /* Only allow bit numbers 0 to 31. */
13127 if (!u5bit_cint_operand (op0, VOIDmode))
13128 {
13129 error ("Argument must be a constant between 0 and 31.");
13130 return const0_rtx;
13131 }
13132
13133 pat = GEN_FCN (icode) (op0);
13134 if (!pat)
13135 return const0_rtx;
13136 emit_insn (pat);
13137
13138 return NULL_RTX;
13139 }
13140
13141 static rtx
13142 rs6000_expand_set_fpscr_rn_builtin (enum insn_code icode, tree exp)
13143 {
13144 rtx pat;
13145 tree arg0 = CALL_EXPR_ARG (exp, 0);
13146 rtx op0 = expand_normal (arg0);
13147 machine_mode mode0 = insn_data[icode].operand[0].mode;
13148
13149 if (icode == CODE_FOR_nothing)
13150 /* Builtin not supported on this processor. */
13151 return 0;
13152
13153 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13154 {
13155 error ("%<__builtin_set_fpscr_rn%> not supported with %<-msoft-float%>");
13156 return const0_rtx;
13157 }
13158
13159 /* If we got invalid arguments bail out before generating bad rtl. */
13160 if (arg0 == error_mark_node)
13161 return const0_rtx;
13162
13163 /* If the argument is a constant, check the range. Argument can only be a
13164 2-bit value. Unfortunately, can't check the range of the value at
13165 compile time if the argument is a variable. The least significant two
13166 bits of the argument, regardless of type, are used to set the rounding
13167 mode. All other bits are ignored. */
13168 if (CONST_INT_P (op0) && !const_0_to_3_operand(op0, VOIDmode))
13169 {
13170 error ("Argument must be a value between 0 and 3.");
13171 return const0_rtx;
13172 }
13173
13174 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13175 op0 = copy_to_mode_reg (mode0, op0);
13176
13177 pat = GEN_FCN (icode) (op0);
13178 if (!pat)
13179 return const0_rtx;
13180 emit_insn (pat);
13181
13182 return NULL_RTX;
13183 }
13184 static rtx
13185 rs6000_expand_set_fpscr_drn_builtin (enum insn_code icode, tree exp)
13186 {
13187 rtx pat;
13188 tree arg0 = CALL_EXPR_ARG (exp, 0);
13189 rtx op0 = expand_normal (arg0);
13190 machine_mode mode0 = insn_data[icode].operand[0].mode;
13191
13192 if (TARGET_32BIT)
13193 /* Builtin not supported in 32-bit mode. */
13194 fatal_error (input_location,
13195 "%<__builtin_set_fpscr_drn%> is not supported "
13196 "in 32-bit mode.");
13197
13198 if (rs6000_isa_flags & OPTION_MASK_SOFT_FLOAT)
13199 {
13200 error ("%<__builtin_set_fpscr_drn%> not supported with %<-msoft-float%>");
13201 return const0_rtx;
13202 }
13203
13204 if (icode == CODE_FOR_nothing)
13205 /* Builtin not supported on this processor. */
13206 return 0;
13207
13208 /* If we got invalid arguments bail out before generating bad rtl. */
13209 if (arg0 == error_mark_node)
13210 return const0_rtx;
13211
13212 /* If the argument is a constant, check the range. Agrument can only be a
13213 3-bit value. Unfortunately, can't check the range of the value at
13214 compile time if the argument is a variable. The least significant two
13215 bits of the argument, regardless of type, are used to set the rounding
13216 mode. All other bits are ignored. */
13217 if (CONST_INT_P (op0) && !const_0_to_7_operand(op0, VOIDmode))
13218 {
13219 error ("Argument must be a value between 0 and 7.");
13220 return const0_rtx;
13221 }
13222
13223 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13224 op0 = copy_to_mode_reg (mode0, op0);
13225
13226 pat = GEN_FCN (icode) (op0);
13227 if (! pat)
13228 return const0_rtx;
13229 emit_insn (pat);
13230
13231 return NULL_RTX;
13232 }
13233
13234 static rtx
13235 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
13236 {
13237 rtx pat;
13238 tree arg0 = CALL_EXPR_ARG (exp, 0);
13239 rtx op0 = expand_normal (arg0);
13240 machine_mode tmode = insn_data[icode].operand[0].mode;
13241 machine_mode mode0 = insn_data[icode].operand[1].mode;
13242
13243 if (icode == CODE_FOR_nothing)
13244 /* Builtin not supported on this processor. */
13245 return 0;
13246
13247 /* If we got invalid arguments bail out before generating bad rtl. */
13248 if (arg0 == error_mark_node)
13249 return const0_rtx;
13250
13251 if (icode == CODE_FOR_altivec_vspltisb
13252 || icode == CODE_FOR_altivec_vspltish
13253 || icode == CODE_FOR_altivec_vspltisw)
13254 {
13255 /* Only allow 5-bit *signed* literals. */
13256 if (!CONST_INT_P (op0)
13257 || INTVAL (op0) > 15
13258 || INTVAL (op0) < -16)
13259 {
13260 error ("argument 1 must be a 5-bit signed literal");
13261 return CONST0_RTX (tmode);
13262 }
13263 }
13264
13265 if (target == 0
13266 || GET_MODE (target) != tmode
13267 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13268 target = gen_reg_rtx (tmode);
13269
13270 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13271 op0 = copy_to_mode_reg (mode0, op0);
13272
13273 pat = GEN_FCN (icode) (target, op0);
13274 if (! pat)
13275 return 0;
13276 emit_insn (pat);
13277
13278 return target;
13279 }
13280
13281 static rtx
13282 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
13283 {
13284 rtx pat, scratch1, scratch2;
13285 tree arg0 = CALL_EXPR_ARG (exp, 0);
13286 rtx op0 = expand_normal (arg0);
13287 machine_mode tmode = insn_data[icode].operand[0].mode;
13288 machine_mode mode0 = insn_data[icode].operand[1].mode;
13289
13290 /* If we have invalid arguments, bail out before generating bad rtl. */
13291 if (arg0 == error_mark_node)
13292 return const0_rtx;
13293
13294 if (target == 0
13295 || GET_MODE (target) != tmode
13296 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13297 target = gen_reg_rtx (tmode);
13298
13299 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13300 op0 = copy_to_mode_reg (mode0, op0);
13301
13302 scratch1 = gen_reg_rtx (mode0);
13303 scratch2 = gen_reg_rtx (mode0);
13304
13305 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
13306 if (! pat)
13307 return 0;
13308 emit_insn (pat);
13309
13310 return target;
13311 }
13312
13313 static rtx
13314 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
13315 {
13316 rtx pat;
13317 tree arg0 = CALL_EXPR_ARG (exp, 0);
13318 tree arg1 = CALL_EXPR_ARG (exp, 1);
13319 rtx op0 = expand_normal (arg0);
13320 rtx op1 = expand_normal (arg1);
13321 machine_mode tmode = insn_data[icode].operand[0].mode;
13322 machine_mode mode0 = insn_data[icode].operand[1].mode;
13323 machine_mode mode1 = insn_data[icode].operand[2].mode;
13324
13325 if (icode == CODE_FOR_nothing)
13326 /* Builtin not supported on this processor. */
13327 return 0;
13328
13329 /* If we got invalid arguments bail out before generating bad rtl. */
13330 if (arg0 == error_mark_node || arg1 == error_mark_node)
13331 return const0_rtx;
13332
13333 if (icode == CODE_FOR_unpackv1ti
13334 || icode == CODE_FOR_unpackkf
13335 || icode == CODE_FOR_unpacktf
13336 || icode == CODE_FOR_unpackif
13337 || icode == CODE_FOR_unpacktd)
13338 {
13339 /* Only allow 1-bit unsigned literals. */
13340 STRIP_NOPS (arg1);
13341 if (TREE_CODE (arg1) != INTEGER_CST
13342 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 1))
13343 {
13344 error ("argument 2 must be a 1-bit unsigned literal");
13345 return CONST0_RTX (tmode);
13346 }
13347 }
13348 else if (icode == CODE_FOR_altivec_vspltw)
13349 {
13350 /* Only allow 2-bit unsigned literals. */
13351 STRIP_NOPS (arg1);
13352 if (TREE_CODE (arg1) != INTEGER_CST
13353 || TREE_INT_CST_LOW (arg1) & ~3)
13354 {
13355 error ("argument 2 must be a 2-bit unsigned literal");
13356 return CONST0_RTX (tmode);
13357 }
13358 }
13359 else if (icode == CODE_FOR_altivec_vsplth)
13360 {
13361 /* Only allow 3-bit unsigned literals. */
13362 STRIP_NOPS (arg1);
13363 if (TREE_CODE (arg1) != INTEGER_CST
13364 || TREE_INT_CST_LOW (arg1) & ~7)
13365 {
13366 error ("argument 2 must be a 3-bit unsigned literal");
13367 return CONST0_RTX (tmode);
13368 }
13369 }
13370 else if (icode == CODE_FOR_altivec_vspltb)
13371 {
13372 /* Only allow 4-bit unsigned literals. */
13373 STRIP_NOPS (arg1);
13374 if (TREE_CODE (arg1) != INTEGER_CST
13375 || TREE_INT_CST_LOW (arg1) & ~15)
13376 {
13377 error ("argument 2 must be a 4-bit unsigned literal");
13378 return CONST0_RTX (tmode);
13379 }
13380 }
13381 else if (icode == CODE_FOR_altivec_vcfux
13382 || icode == CODE_FOR_altivec_vcfsx
13383 || icode == CODE_FOR_altivec_vctsxs
13384 || icode == CODE_FOR_altivec_vctuxs)
13385 {
13386 /* Only allow 5-bit unsigned literals. */
13387 STRIP_NOPS (arg1);
13388 if (TREE_CODE (arg1) != INTEGER_CST
13389 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13390 {
13391 error ("argument 2 must be a 5-bit unsigned literal");
13392 return CONST0_RTX (tmode);
13393 }
13394 }
13395 else if (icode == CODE_FOR_dfptstsfi_eq_dd
13396 || icode == CODE_FOR_dfptstsfi_lt_dd
13397 || icode == CODE_FOR_dfptstsfi_gt_dd
13398 || icode == CODE_FOR_dfptstsfi_unordered_dd
13399 || icode == CODE_FOR_dfptstsfi_eq_td
13400 || icode == CODE_FOR_dfptstsfi_lt_td
13401 || icode == CODE_FOR_dfptstsfi_gt_td
13402 || icode == CODE_FOR_dfptstsfi_unordered_td)
13403 {
13404 /* Only allow 6-bit unsigned literals. */
13405 STRIP_NOPS (arg0);
13406 if (TREE_CODE (arg0) != INTEGER_CST
13407 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
13408 {
13409 error ("argument 1 must be a 6-bit unsigned literal");
13410 return CONST0_RTX (tmode);
13411 }
13412 }
13413 else if (icode == CODE_FOR_xststdcqp_kf
13414 || icode == CODE_FOR_xststdcqp_tf
13415 || icode == CODE_FOR_xststdcdp
13416 || icode == CODE_FOR_xststdcsp
13417 || icode == CODE_FOR_xvtstdcdp
13418 || icode == CODE_FOR_xvtstdcsp)
13419 {
13420 /* Only allow 7-bit unsigned literals. */
13421 STRIP_NOPS (arg1);
13422 if (TREE_CODE (arg1) != INTEGER_CST
13423 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
13424 {
13425 error ("argument 2 must be a 7-bit unsigned literal");
13426 return CONST0_RTX (tmode);
13427 }
13428 }
13429
13430 if (target == 0
13431 || GET_MODE (target) != tmode
13432 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13433 target = gen_reg_rtx (tmode);
13434
13435 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13436 op0 = copy_to_mode_reg (mode0, op0);
13437 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13438 op1 = copy_to_mode_reg (mode1, op1);
13439
13440 pat = GEN_FCN (icode) (target, op0, op1);
13441 if (! pat)
13442 return 0;
13443 emit_insn (pat);
13444
13445 return target;
13446 }
13447
13448 static rtx
13449 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13450 {
13451 rtx pat, scratch;
13452 tree cr6_form = CALL_EXPR_ARG (exp, 0);
13453 tree arg0 = CALL_EXPR_ARG (exp, 1);
13454 tree arg1 = CALL_EXPR_ARG (exp, 2);
13455 rtx op0 = expand_normal (arg0);
13456 rtx op1 = expand_normal (arg1);
13457 machine_mode tmode = SImode;
13458 machine_mode mode0 = insn_data[icode].operand[1].mode;
13459 machine_mode mode1 = insn_data[icode].operand[2].mode;
13460 int cr6_form_int;
13461
13462 if (TREE_CODE (cr6_form) != INTEGER_CST)
13463 {
13464 error ("argument 1 of %qs must be a constant",
13465 "__builtin_altivec_predicate");
13466 return const0_rtx;
13467 }
13468 else
13469 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
13470
13471 gcc_assert (mode0 == mode1);
13472
13473 /* If we have invalid arguments, bail out before generating bad rtl. */
13474 if (arg0 == error_mark_node || arg1 == error_mark_node)
13475 return const0_rtx;
13476
13477 if (target == 0
13478 || GET_MODE (target) != tmode
13479 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13480 target = gen_reg_rtx (tmode);
13481
13482 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13483 op0 = copy_to_mode_reg (mode0, op0);
13484 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13485 op1 = copy_to_mode_reg (mode1, op1);
13486
13487 /* Note that for many of the relevant operations (e.g. cmpne or
13488 cmpeq) with float or double operands, it makes more sense for the
13489 mode of the allocated scratch register to select a vector of
13490 integer. But the choice to copy the mode of operand 0 was made
13491 long ago and there are no plans to change it. */
13492 scratch = gen_reg_rtx (mode0);
13493
13494 pat = GEN_FCN (icode) (scratch, op0, op1);
13495 if (! pat)
13496 return 0;
13497 emit_insn (pat);
13498
13499 /* The vec_any* and vec_all* predicates use the same opcodes for two
13500 different operations, but the bits in CR6 will be different
13501 depending on what information we want. So we have to play tricks
13502 with CR6 to get the right bits out.
13503
13504 If you think this is disgusting, look at the specs for the
13505 AltiVec predicates. */
13506
13507 switch (cr6_form_int)
13508 {
13509 case 0:
13510 emit_insn (gen_cr6_test_for_zero (target));
13511 break;
13512 case 1:
13513 emit_insn (gen_cr6_test_for_zero_reverse (target));
13514 break;
13515 case 2:
13516 emit_insn (gen_cr6_test_for_lt (target));
13517 break;
13518 case 3:
13519 emit_insn (gen_cr6_test_for_lt_reverse (target));
13520 break;
13521 default:
13522 error ("argument 1 of %qs is out of range",
13523 "__builtin_altivec_predicate");
13524 break;
13525 }
13526
13527 return target;
13528 }
13529
13530 rtx
13531 swap_endian_selector_for_mode (machine_mode mode)
13532 {
13533 unsigned int swap1[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13534 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13535 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13536 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13537
13538 unsigned int *swaparray, i;
13539 rtx perm[16];
13540
13541 switch (mode)
13542 {
13543 case E_V1TImode:
13544 swaparray = swap1;
13545 break;
13546 case E_V2DFmode:
13547 case E_V2DImode:
13548 swaparray = swap2;
13549 break;
13550 case E_V4SFmode:
13551 case E_V4SImode:
13552 swaparray = swap4;
13553 break;
13554 case E_V8HImode:
13555 swaparray = swap8;
13556 break;
13557 default:
13558 gcc_unreachable ();
13559 }
13560
13561 for (i = 0; i < 16; ++i)
13562 perm[i] = GEN_INT (swaparray[i]);
13563
13564 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode,
13565 gen_rtvec_v (16, perm)));
13566 }
13567
13568 static rtx
13569 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
13570 {
13571 rtx pat, addr;
13572 tree arg0 = CALL_EXPR_ARG (exp, 0);
13573 tree arg1 = CALL_EXPR_ARG (exp, 1);
13574 machine_mode tmode = insn_data[icode].operand[0].mode;
13575 machine_mode mode0 = Pmode;
13576 machine_mode mode1 = Pmode;
13577 rtx op0 = expand_normal (arg0);
13578 rtx op1 = expand_normal (arg1);
13579
13580 if (icode == CODE_FOR_nothing)
13581 /* Builtin not supported on this processor. */
13582 return 0;
13583
13584 /* If we got invalid arguments bail out before generating bad rtl. */
13585 if (arg0 == error_mark_node || arg1 == error_mark_node)
13586 return const0_rtx;
13587
13588 if (target == 0
13589 || GET_MODE (target) != tmode
13590 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13591 target = gen_reg_rtx (tmode);
13592
13593 op1 = copy_to_mode_reg (mode1, op1);
13594
13595 /* For LVX, express the RTL accurately by ANDing the address with -16.
13596 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
13597 so the raw address is fine. */
13598 if (icode == CODE_FOR_altivec_lvx_v1ti
13599 || icode == CODE_FOR_altivec_lvx_v2df
13600 || icode == CODE_FOR_altivec_lvx_v2di
13601 || icode == CODE_FOR_altivec_lvx_v4sf
13602 || icode == CODE_FOR_altivec_lvx_v4si
13603 || icode == CODE_FOR_altivec_lvx_v8hi
13604 || icode == CODE_FOR_altivec_lvx_v16qi)
13605 {
13606 rtx rawaddr;
13607 if (op0 == const0_rtx)
13608 rawaddr = op1;
13609 else
13610 {
13611 op0 = copy_to_mode_reg (mode0, op0);
13612 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
13613 }
13614 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13615 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
13616
13617 emit_insn (gen_rtx_SET (target, addr));
13618 }
13619 else
13620 {
13621 if (op0 == const0_rtx)
13622 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
13623 else
13624 {
13625 op0 = copy_to_mode_reg (mode0, op0);
13626 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
13627 gen_rtx_PLUS (Pmode, op1, op0));
13628 }
13629
13630 pat = GEN_FCN (icode) (target, addr);
13631 if (! pat)
13632 return 0;
13633 emit_insn (pat);
13634 }
13635
13636 return target;
13637 }
13638
13639 static rtx
13640 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
13641 {
13642 rtx pat;
13643 tree arg0 = CALL_EXPR_ARG (exp, 0);
13644 tree arg1 = CALL_EXPR_ARG (exp, 1);
13645 tree arg2 = CALL_EXPR_ARG (exp, 2);
13646 rtx op0 = expand_normal (arg0);
13647 rtx op1 = expand_normal (arg1);
13648 rtx op2 = expand_normal (arg2);
13649 machine_mode mode0 = insn_data[icode].operand[0].mode;
13650 machine_mode mode1 = insn_data[icode].operand[1].mode;
13651 machine_mode mode2 = insn_data[icode].operand[2].mode;
13652
13653 if (icode == CODE_FOR_nothing)
13654 /* Builtin not supported on this processor. */
13655 return NULL_RTX;
13656
13657 /* If we got invalid arguments bail out before generating bad rtl. */
13658 if (arg0 == error_mark_node
13659 || arg1 == error_mark_node
13660 || arg2 == error_mark_node)
13661 return NULL_RTX;
13662
13663 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13664 op0 = copy_to_mode_reg (mode0, op0);
13665 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13666 op1 = copy_to_mode_reg (mode1, op1);
13667 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13668 op2 = copy_to_mode_reg (mode2, op2);
13669
13670 pat = GEN_FCN (icode) (op0, op1, op2);
13671 if (pat)
13672 emit_insn (pat);
13673
13674 return NULL_RTX;
13675 }
13676
13677 static rtx
13678 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
13679 {
13680 tree arg0 = CALL_EXPR_ARG (exp, 0);
13681 tree arg1 = CALL_EXPR_ARG (exp, 1);
13682 tree arg2 = CALL_EXPR_ARG (exp, 2);
13683 rtx op0 = expand_normal (arg0);
13684 rtx op1 = expand_normal (arg1);
13685 rtx op2 = expand_normal (arg2);
13686 rtx pat, addr, rawaddr;
13687 machine_mode tmode = insn_data[icode].operand[0].mode;
13688 machine_mode smode = insn_data[icode].operand[1].mode;
13689 machine_mode mode1 = Pmode;
13690 machine_mode mode2 = Pmode;
13691
13692 /* Invalid arguments. Bail before doing anything stoopid! */
13693 if (arg0 == error_mark_node
13694 || arg1 == error_mark_node
13695 || arg2 == error_mark_node)
13696 return const0_rtx;
13697
13698 op2 = copy_to_mode_reg (mode2, op2);
13699
13700 /* For STVX, express the RTL accurately by ANDing the address with -16.
13701 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
13702 so the raw address is fine. */
13703 if (icode == CODE_FOR_altivec_stvx_v2df
13704 || icode == CODE_FOR_altivec_stvx_v2di
13705 || icode == CODE_FOR_altivec_stvx_v4sf
13706 || icode == CODE_FOR_altivec_stvx_v4si
13707 || icode == CODE_FOR_altivec_stvx_v8hi
13708 || icode == CODE_FOR_altivec_stvx_v16qi)
13709 {
13710 if (op1 == const0_rtx)
13711 rawaddr = op2;
13712 else
13713 {
13714 op1 = copy_to_mode_reg (mode1, op1);
13715 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
13716 }
13717
13718 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
13719 addr = gen_rtx_MEM (tmode, addr);
13720
13721 op0 = copy_to_mode_reg (tmode, op0);
13722
13723 emit_insn (gen_rtx_SET (addr, op0));
13724 }
13725 else
13726 {
13727 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
13728 op0 = copy_to_mode_reg (smode, op0);
13729
13730 if (op1 == const0_rtx)
13731 addr = gen_rtx_MEM (tmode, op2);
13732 else
13733 {
13734 op1 = copy_to_mode_reg (mode1, op1);
13735 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
13736 }
13737
13738 pat = GEN_FCN (icode) (addr, op0);
13739 if (pat)
13740 emit_insn (pat);
13741 }
13742
13743 return NULL_RTX;
13744 }
13745
13746 /* Return the appropriate SPR number associated with the given builtin. */
13747 static inline HOST_WIDE_INT
13748 htm_spr_num (enum rs6000_builtins code)
13749 {
13750 if (code == HTM_BUILTIN_GET_TFHAR
13751 || code == HTM_BUILTIN_SET_TFHAR)
13752 return TFHAR_SPR;
13753 else if (code == HTM_BUILTIN_GET_TFIAR
13754 || code == HTM_BUILTIN_SET_TFIAR)
13755 return TFIAR_SPR;
13756 else if (code == HTM_BUILTIN_GET_TEXASR
13757 || code == HTM_BUILTIN_SET_TEXASR)
13758 return TEXASR_SPR;
13759 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
13760 || code == HTM_BUILTIN_SET_TEXASRU);
13761 return TEXASRU_SPR;
13762 }
13763
13764 /* Return the correct ICODE value depending on whether we are
13765 setting or reading the HTM SPRs. */
13766 static inline enum insn_code
13767 rs6000_htm_spr_icode (bool nonvoid)
13768 {
13769 if (nonvoid)
13770 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
13771 else
13772 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
13773 }
13774
13775 /* Expand the HTM builtin in EXP and store the result in TARGET.
13776 Store true in *EXPANDEDP if we found a builtin to expand. */
13777 static rtx
13778 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
13779 {
13780 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13781 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
13782 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13783 const struct builtin_description *d;
13784 size_t i;
13785
13786 *expandedp = true;
13787
13788 if (!TARGET_POWERPC64
13789 && (fcode == HTM_BUILTIN_TABORTDC
13790 || fcode == HTM_BUILTIN_TABORTDCI))
13791 {
13792 size_t uns_fcode = (size_t)fcode;
13793 const char *name = rs6000_builtin_info[uns_fcode].name;
13794 error ("builtin %qs is only valid in 64-bit mode", name);
13795 return const0_rtx;
13796 }
13797
13798 /* Expand the HTM builtins. */
13799 d = bdesc_htm;
13800 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
13801 if (d->code == fcode)
13802 {
13803 rtx op[MAX_HTM_OPERANDS], pat;
13804 int nopnds = 0;
13805 tree arg;
13806 call_expr_arg_iterator iter;
13807 unsigned attr = rs6000_builtin_info[fcode].attr;
13808 enum insn_code icode = d->icode;
13809 const struct insn_operand_data *insn_op;
13810 bool uses_spr = (attr & RS6000_BTC_SPR);
13811 rtx cr = NULL_RTX;
13812
13813 if (uses_spr)
13814 icode = rs6000_htm_spr_icode (nonvoid);
13815 insn_op = &insn_data[icode].operand[0];
13816
13817 if (nonvoid)
13818 {
13819 machine_mode tmode = (uses_spr) ? insn_op->mode : E_SImode;
13820 if (!target
13821 || GET_MODE (target) != tmode
13822 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
13823 target = gen_reg_rtx (tmode);
13824 if (uses_spr)
13825 op[nopnds++] = target;
13826 }
13827
13828 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
13829 {
13830 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
13831 return const0_rtx;
13832
13833 insn_op = &insn_data[icode].operand[nopnds];
13834
13835 op[nopnds] = expand_normal (arg);
13836
13837 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
13838 {
13839 if (!strcmp (insn_op->constraint, "n"))
13840 {
13841 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
13842 if (!CONST_INT_P (op[nopnds]))
13843 error ("argument %d must be an unsigned literal", arg_num);
13844 else
13845 error ("argument %d is an unsigned literal that is "
13846 "out of range", arg_num);
13847 return const0_rtx;
13848 }
13849 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
13850 }
13851
13852 nopnds++;
13853 }
13854
13855 /* Handle the builtins for extended mnemonics. These accept
13856 no arguments, but map to builtins that take arguments. */
13857 switch (fcode)
13858 {
13859 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
13860 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
13861 op[nopnds++] = GEN_INT (1);
13862 if (flag_checking)
13863 attr |= RS6000_BTC_UNARY;
13864 break;
13865 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
13866 op[nopnds++] = GEN_INT (0);
13867 if (flag_checking)
13868 attr |= RS6000_BTC_UNARY;
13869 break;
13870 default:
13871 break;
13872 }
13873
13874 /* If this builtin accesses SPRs, then pass in the appropriate
13875 SPR number and SPR regno as the last two operands. */
13876 if (uses_spr)
13877 {
13878 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
13879 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
13880 }
13881 /* If this builtin accesses a CR, then pass in a scratch
13882 CR as the last operand. */
13883 else if (attr & RS6000_BTC_CR)
13884 { cr = gen_reg_rtx (CCmode);
13885 op[nopnds++] = cr;
13886 }
13887
13888 if (flag_checking)
13889 {
13890 int expected_nopnds = 0;
13891 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
13892 expected_nopnds = 1;
13893 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
13894 expected_nopnds = 2;
13895 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
13896 expected_nopnds = 3;
13897 if (!(attr & RS6000_BTC_VOID))
13898 expected_nopnds += 1;
13899 if (uses_spr)
13900 expected_nopnds += 1;
13901
13902 gcc_assert (nopnds == expected_nopnds
13903 && nopnds <= MAX_HTM_OPERANDS);
13904 }
13905
13906 switch (nopnds)
13907 {
13908 case 1:
13909 pat = GEN_FCN (icode) (op[0]);
13910 break;
13911 case 2:
13912 pat = GEN_FCN (icode) (op[0], op[1]);
13913 break;
13914 case 3:
13915 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
13916 break;
13917 case 4:
13918 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
13919 break;
13920 default:
13921 gcc_unreachable ();
13922 }
13923 if (!pat)
13924 return NULL_RTX;
13925 emit_insn (pat);
13926
13927 if (attr & RS6000_BTC_CR)
13928 {
13929 if (fcode == HTM_BUILTIN_TBEGIN)
13930 {
13931 /* Emit code to set TARGET to true or false depending on
13932 whether the tbegin. instruction successfully or failed
13933 to start a transaction. We do this by placing the 1's
13934 complement of CR's EQ bit into TARGET. */
13935 rtx scratch = gen_reg_rtx (SImode);
13936 emit_insn (gen_rtx_SET (scratch,
13937 gen_rtx_EQ (SImode, cr,
13938 const0_rtx)));
13939 emit_insn (gen_rtx_SET (target,
13940 gen_rtx_XOR (SImode, scratch,
13941 GEN_INT (1))));
13942 }
13943 else
13944 {
13945 /* Emit code to copy the 4-bit condition register field
13946 CR into the least significant end of register TARGET. */
13947 rtx scratch1 = gen_reg_rtx (SImode);
13948 rtx scratch2 = gen_reg_rtx (SImode);
13949 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
13950 emit_insn (gen_movcc (subreg, cr));
13951 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
13952 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
13953 }
13954 }
13955
13956 if (nonvoid)
13957 return target;
13958 return const0_rtx;
13959 }
13960
13961 *expandedp = false;
13962 return NULL_RTX;
13963 }
13964
13965 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
13966
13967 static rtx
13968 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
13969 rtx target)
13970 {
13971 /* __builtin_cpu_init () is a nop, so expand to nothing. */
13972 if (fcode == RS6000_BUILTIN_CPU_INIT)
13973 return const0_rtx;
13974
13975 if (target == 0 || GET_MODE (target) != SImode)
13976 target = gen_reg_rtx (SImode);
13977
13978 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
13979 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
13980 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
13981 to a STRING_CST. */
13982 if (TREE_CODE (arg) == ARRAY_REF
13983 && TREE_CODE (TREE_OPERAND (arg, 0)) == STRING_CST
13984 && TREE_CODE (TREE_OPERAND (arg, 1)) == INTEGER_CST
13985 && compare_tree_int (TREE_OPERAND (arg, 1), 0) == 0)
13986 arg = TREE_OPERAND (arg, 0);
13987
13988 if (TREE_CODE (arg) != STRING_CST)
13989 {
13990 error ("builtin %qs only accepts a string argument",
13991 rs6000_builtin_info[(size_t) fcode].name);
13992 return const0_rtx;
13993 }
13994
13995 if (fcode == RS6000_BUILTIN_CPU_IS)
13996 {
13997 const char *cpu = TREE_STRING_POINTER (arg);
13998 rtx cpuid = NULL_RTX;
13999 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
14000 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
14001 {
14002 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14003 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
14004 break;
14005 }
14006 if (cpuid == NULL_RTX)
14007 {
14008 /* Invalid CPU argument. */
14009 error ("cpu %qs is an invalid argument to builtin %qs",
14010 cpu, rs6000_builtin_info[(size_t) fcode].name);
14011 return const0_rtx;
14012 }
14013
14014 rtx platform = gen_reg_rtx (SImode);
14015 rtx tcbmem = gen_const_mem (SImode,
14016 gen_rtx_PLUS (Pmode,
14017 gen_rtx_REG (Pmode, TLS_REGNUM),
14018 GEN_INT (TCB_PLATFORM_OFFSET)));
14019 emit_move_insn (platform, tcbmem);
14020 emit_insn (gen_eqsi3 (target, platform, cpuid));
14021 }
14022 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
14023 {
14024 const char *hwcap = TREE_STRING_POINTER (arg);
14025 rtx mask = NULL_RTX;
14026 int hwcap_offset;
14027 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
14028 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
14029 {
14030 mask = GEN_INT (cpu_supports_info[i].mask);
14031 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
14032 break;
14033 }
14034 if (mask == NULL_RTX)
14035 {
14036 /* Invalid HWCAP argument. */
14037 error ("%s %qs is an invalid argument to builtin %qs",
14038 "hwcap", hwcap, rs6000_builtin_info[(size_t) fcode].name);
14039 return const0_rtx;
14040 }
14041
14042 rtx tcb_hwcap = gen_reg_rtx (SImode);
14043 rtx tcbmem = gen_const_mem (SImode,
14044 gen_rtx_PLUS (Pmode,
14045 gen_rtx_REG (Pmode, TLS_REGNUM),
14046 GEN_INT (hwcap_offset)));
14047 emit_move_insn (tcb_hwcap, tcbmem);
14048 rtx scratch1 = gen_reg_rtx (SImode);
14049 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
14050 rtx scratch2 = gen_reg_rtx (SImode);
14051 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
14052 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
14053 }
14054 else
14055 gcc_unreachable ();
14056
14057 /* Record that we have expanded a CPU builtin, so that we can later
14058 emit a reference to the special symbol exported by LIBC to ensure we
14059 do not link against an old LIBC that doesn't support this feature. */
14060 cpu_builtin_p = true;
14061
14062 #else
14063 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14064 "capability bits", rs6000_builtin_info[(size_t) fcode].name);
14065
14066 /* For old LIBCs, always return FALSE. */
14067 emit_move_insn (target, GEN_INT (0));
14068 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14069
14070 return target;
14071 }
14072
14073 static rtx
14074 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
14075 {
14076 rtx pat;
14077 tree arg0 = CALL_EXPR_ARG (exp, 0);
14078 tree arg1 = CALL_EXPR_ARG (exp, 1);
14079 tree arg2 = CALL_EXPR_ARG (exp, 2);
14080 rtx op0 = expand_normal (arg0);
14081 rtx op1 = expand_normal (arg1);
14082 rtx op2 = expand_normal (arg2);
14083 machine_mode tmode = insn_data[icode].operand[0].mode;
14084 machine_mode mode0 = insn_data[icode].operand[1].mode;
14085 machine_mode mode1 = insn_data[icode].operand[2].mode;
14086 machine_mode mode2 = insn_data[icode].operand[3].mode;
14087
14088 if (icode == CODE_FOR_nothing)
14089 /* Builtin not supported on this processor. */
14090 return 0;
14091
14092 /* If we got invalid arguments bail out before generating bad rtl. */
14093 if (arg0 == error_mark_node
14094 || arg1 == error_mark_node
14095 || arg2 == error_mark_node)
14096 return const0_rtx;
14097
14098 /* Check and prepare argument depending on the instruction code.
14099
14100 Note that a switch statement instead of the sequence of tests
14101 would be incorrect as many of the CODE_FOR values could be
14102 CODE_FOR_nothing and that would yield multiple alternatives
14103 with identical values. We'd never reach here at runtime in
14104 this case. */
14105 if (icode == CODE_FOR_altivec_vsldoi_v4sf
14106 || icode == CODE_FOR_altivec_vsldoi_v2df
14107 || icode == CODE_FOR_altivec_vsldoi_v4si
14108 || icode == CODE_FOR_altivec_vsldoi_v8hi
14109 || icode == CODE_FOR_altivec_vsldoi_v16qi)
14110 {
14111 /* Only allow 4-bit unsigned literals. */
14112 STRIP_NOPS (arg2);
14113 if (TREE_CODE (arg2) != INTEGER_CST
14114 || TREE_INT_CST_LOW (arg2) & ~0xf)
14115 {
14116 error ("argument 3 must be a 4-bit unsigned literal");
14117 return CONST0_RTX (tmode);
14118 }
14119 }
14120 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
14121 || icode == CODE_FOR_vsx_xxpermdi_v2di
14122 || icode == CODE_FOR_vsx_xxpermdi_v2df_be
14123 || icode == CODE_FOR_vsx_xxpermdi_v2di_be
14124 || icode == CODE_FOR_vsx_xxpermdi_v1ti
14125 || icode == CODE_FOR_vsx_xxpermdi_v4sf
14126 || icode == CODE_FOR_vsx_xxpermdi_v4si
14127 || icode == CODE_FOR_vsx_xxpermdi_v8hi
14128 || icode == CODE_FOR_vsx_xxpermdi_v16qi
14129 || icode == CODE_FOR_vsx_xxsldwi_v16qi
14130 || icode == CODE_FOR_vsx_xxsldwi_v8hi
14131 || icode == CODE_FOR_vsx_xxsldwi_v4si
14132 || icode == CODE_FOR_vsx_xxsldwi_v4sf
14133 || icode == CODE_FOR_vsx_xxsldwi_v2di
14134 || icode == CODE_FOR_vsx_xxsldwi_v2df)
14135 {
14136 /* Only allow 2-bit unsigned literals. */
14137 STRIP_NOPS (arg2);
14138 if (TREE_CODE (arg2) != INTEGER_CST
14139 || TREE_INT_CST_LOW (arg2) & ~0x3)
14140 {
14141 error ("argument 3 must be a 2-bit unsigned literal");
14142 return CONST0_RTX (tmode);
14143 }
14144 }
14145 else if (icode == CODE_FOR_vsx_set_v2df
14146 || icode == CODE_FOR_vsx_set_v2di
14147 || icode == CODE_FOR_bcdadd
14148 || icode == CODE_FOR_bcdadd_lt
14149 || icode == CODE_FOR_bcdadd_eq
14150 || icode == CODE_FOR_bcdadd_gt
14151 || icode == CODE_FOR_bcdsub
14152 || icode == CODE_FOR_bcdsub_lt
14153 || icode == CODE_FOR_bcdsub_eq
14154 || icode == CODE_FOR_bcdsub_gt)
14155 {
14156 /* Only allow 1-bit unsigned literals. */
14157 STRIP_NOPS (arg2);
14158 if (TREE_CODE (arg2) != INTEGER_CST
14159 || TREE_INT_CST_LOW (arg2) & ~0x1)
14160 {
14161 error ("argument 3 must be a 1-bit unsigned literal");
14162 return CONST0_RTX (tmode);
14163 }
14164 }
14165 else if (icode == CODE_FOR_dfp_ddedpd_dd
14166 || icode == CODE_FOR_dfp_ddedpd_td)
14167 {
14168 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14169 STRIP_NOPS (arg0);
14170 if (TREE_CODE (arg0) != INTEGER_CST
14171 || TREE_INT_CST_LOW (arg2) & ~0x3)
14172 {
14173 error ("argument 1 must be 0 or 2");
14174 return CONST0_RTX (tmode);
14175 }
14176 }
14177 else if (icode == CODE_FOR_dfp_denbcd_dd
14178 || icode == CODE_FOR_dfp_denbcd_td)
14179 {
14180 /* Only allow 1-bit unsigned literals. */
14181 STRIP_NOPS (arg0);
14182 if (TREE_CODE (arg0) != INTEGER_CST
14183 || TREE_INT_CST_LOW (arg0) & ~0x1)
14184 {
14185 error ("argument 1 must be a 1-bit unsigned literal");
14186 return CONST0_RTX (tmode);
14187 }
14188 }
14189 else if (icode == CODE_FOR_dfp_dscli_dd
14190 || icode == CODE_FOR_dfp_dscli_td
14191 || icode == CODE_FOR_dfp_dscri_dd
14192 || icode == CODE_FOR_dfp_dscri_td)
14193 {
14194 /* Only allow 6-bit unsigned literals. */
14195 STRIP_NOPS (arg1);
14196 if (TREE_CODE (arg1) != INTEGER_CST
14197 || TREE_INT_CST_LOW (arg1) & ~0x3f)
14198 {
14199 error ("argument 2 must be a 6-bit unsigned literal");
14200 return CONST0_RTX (tmode);
14201 }
14202 }
14203 else if (icode == CODE_FOR_crypto_vshasigmaw
14204 || icode == CODE_FOR_crypto_vshasigmad)
14205 {
14206 /* Check whether the 2nd and 3rd arguments are integer constants and in
14207 range and prepare arguments. */
14208 STRIP_NOPS (arg1);
14209 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (wi::to_wide (arg1), 2))
14210 {
14211 error ("argument 2 must be 0 or 1");
14212 return CONST0_RTX (tmode);
14213 }
14214
14215 STRIP_NOPS (arg2);
14216 if (TREE_CODE (arg2) != INTEGER_CST
14217 || wi::geu_p (wi::to_wide (arg2), 16))
14218 {
14219 error ("argument 3 must be in the range 0..15");
14220 return CONST0_RTX (tmode);
14221 }
14222 }
14223
14224 if (target == 0
14225 || GET_MODE (target) != tmode
14226 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14227 target = gen_reg_rtx (tmode);
14228
14229 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14230 op0 = copy_to_mode_reg (mode0, op0);
14231 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14232 op1 = copy_to_mode_reg (mode1, op1);
14233 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14234 op2 = copy_to_mode_reg (mode2, op2);
14235
14236 pat = GEN_FCN (icode) (target, op0, op1, op2);
14237 if (! pat)
14238 return 0;
14239 emit_insn (pat);
14240
14241 return target;
14242 }
14243
14244
14245 /* Expand the dst builtins. */
14246 static rtx
14247 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
14248 bool *expandedp)
14249 {
14250 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14251 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14252 tree arg0, arg1, arg2;
14253 machine_mode mode0, mode1;
14254 rtx pat, op0, op1, op2;
14255 const struct builtin_description *d;
14256 size_t i;
14257
14258 *expandedp = false;
14259
14260 /* Handle DST variants. */
14261 d = bdesc_dst;
14262 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14263 if (d->code == fcode)
14264 {
14265 arg0 = CALL_EXPR_ARG (exp, 0);
14266 arg1 = CALL_EXPR_ARG (exp, 1);
14267 arg2 = CALL_EXPR_ARG (exp, 2);
14268 op0 = expand_normal (arg0);
14269 op1 = expand_normal (arg1);
14270 op2 = expand_normal (arg2);
14271 mode0 = insn_data[d->icode].operand[0].mode;
14272 mode1 = insn_data[d->icode].operand[1].mode;
14273
14274 /* Invalid arguments, bail out before generating bad rtl. */
14275 if (arg0 == error_mark_node
14276 || arg1 == error_mark_node
14277 || arg2 == error_mark_node)
14278 return const0_rtx;
14279
14280 *expandedp = true;
14281 STRIP_NOPS (arg2);
14282 if (TREE_CODE (arg2) != INTEGER_CST
14283 || TREE_INT_CST_LOW (arg2) & ~0x3)
14284 {
14285 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
14286 return const0_rtx;
14287 }
14288
14289 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
14290 op0 = copy_to_mode_reg (Pmode, op0);
14291 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
14292 op1 = copy_to_mode_reg (mode1, op1);
14293
14294 pat = GEN_FCN (d->icode) (op0, op1, op2);
14295 if (pat != 0)
14296 emit_insn (pat);
14297
14298 return NULL_RTX;
14299 }
14300
14301 return NULL_RTX;
14302 }
14303
14304 /* Expand vec_init builtin. */
14305 static rtx
14306 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
14307 {
14308 machine_mode tmode = TYPE_MODE (type);
14309 machine_mode inner_mode = GET_MODE_INNER (tmode);
14310 int i, n_elt = GET_MODE_NUNITS (tmode);
14311
14312 gcc_assert (VECTOR_MODE_P (tmode));
14313 gcc_assert (n_elt == call_expr_nargs (exp));
14314
14315 if (!target || !register_operand (target, tmode))
14316 target = gen_reg_rtx (tmode);
14317
14318 /* If we have a vector compromised of a single element, such as V1TImode, do
14319 the initialization directly. */
14320 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
14321 {
14322 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
14323 emit_move_insn (target, gen_lowpart (tmode, x));
14324 }
14325 else
14326 {
14327 rtvec v = rtvec_alloc (n_elt);
14328
14329 for (i = 0; i < n_elt; ++i)
14330 {
14331 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
14332 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
14333 }
14334
14335 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
14336 }
14337
14338 return target;
14339 }
14340
14341 /* Return the integer constant in ARG. Constrain it to be in the range
14342 of the subparts of VEC_TYPE; issue an error if not. */
14343
14344 static int
14345 get_element_number (tree vec_type, tree arg)
14346 {
14347 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
14348
14349 if (!tree_fits_uhwi_p (arg)
14350 || (elt = tree_to_uhwi (arg), elt > max))
14351 {
14352 error ("selector must be an integer constant in the range 0..%wi", max);
14353 return 0;
14354 }
14355
14356 return elt;
14357 }
14358
14359 /* Expand vec_set builtin. */
14360 static rtx
14361 altivec_expand_vec_set_builtin (tree exp)
14362 {
14363 machine_mode tmode, mode1;
14364 tree arg0, arg1, arg2;
14365 int elt;
14366 rtx op0, op1;
14367
14368 arg0 = CALL_EXPR_ARG (exp, 0);
14369 arg1 = CALL_EXPR_ARG (exp, 1);
14370 arg2 = CALL_EXPR_ARG (exp, 2);
14371
14372 tmode = TYPE_MODE (TREE_TYPE (arg0));
14373 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14374 gcc_assert (VECTOR_MODE_P (tmode));
14375
14376 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
14377 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
14378 elt = get_element_number (TREE_TYPE (arg0), arg2);
14379
14380 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
14381 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
14382
14383 op0 = force_reg (tmode, op0);
14384 op1 = force_reg (mode1, op1);
14385
14386 rs6000_expand_vector_set (op0, op1, elt);
14387
14388 return op0;
14389 }
14390
14391 /* Expand vec_ext builtin. */
14392 static rtx
14393 altivec_expand_vec_ext_builtin (tree exp, rtx target)
14394 {
14395 machine_mode tmode, mode0;
14396 tree arg0, arg1;
14397 rtx op0;
14398 rtx op1;
14399
14400 arg0 = CALL_EXPR_ARG (exp, 0);
14401 arg1 = CALL_EXPR_ARG (exp, 1);
14402
14403 op0 = expand_normal (arg0);
14404 op1 = expand_normal (arg1);
14405
14406 if (TREE_CODE (arg1) == INTEGER_CST)
14407 {
14408 unsigned HOST_WIDE_INT elt;
14409 unsigned HOST_WIDE_INT size = TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0));
14410 unsigned int truncated_selector;
14411 /* Even if !tree_fits_uhwi_p (arg1)), TREE_INT_CST_LOW (arg0)
14412 returns low-order bits of INTEGER_CST for modulo indexing. */
14413 elt = TREE_INT_CST_LOW (arg1);
14414 truncated_selector = elt % size;
14415 op1 = GEN_INT (truncated_selector);
14416 }
14417
14418 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
14419 mode0 = TYPE_MODE (TREE_TYPE (arg0));
14420 gcc_assert (VECTOR_MODE_P (mode0));
14421
14422 op0 = force_reg (mode0, op0);
14423
14424 if (optimize || !target || !register_operand (target, tmode))
14425 target = gen_reg_rtx (tmode);
14426
14427 rs6000_expand_vector_extract (target, op0, op1);
14428
14429 return target;
14430 }
14431
14432 /* Expand the builtin in EXP and store the result in TARGET. Store
14433 true in *EXPANDEDP if we found a builtin to expand. */
14434 static rtx
14435 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
14436 {
14437 const struct builtin_description *d;
14438 size_t i;
14439 enum insn_code icode;
14440 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14441 tree arg0, arg1, arg2;
14442 rtx op0, pat;
14443 machine_mode tmode, mode0;
14444 enum rs6000_builtins fcode
14445 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14446
14447 if (rs6000_overloaded_builtin_p (fcode))
14448 {
14449 *expandedp = true;
14450 error ("unresolved overload for Altivec builtin %qF", fndecl);
14451
14452 /* Given it is invalid, just generate a normal call. */
14453 return expand_call (exp, target, false);
14454 }
14455
14456 target = altivec_expand_dst_builtin (exp, target, expandedp);
14457 if (*expandedp)
14458 return target;
14459
14460 *expandedp = true;
14461
14462 switch (fcode)
14463 {
14464 case ALTIVEC_BUILTIN_STVX_V2DF:
14465 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
14466 case ALTIVEC_BUILTIN_STVX_V2DI:
14467 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
14468 case ALTIVEC_BUILTIN_STVX_V4SF:
14469 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
14470 case ALTIVEC_BUILTIN_STVX:
14471 case ALTIVEC_BUILTIN_STVX_V4SI:
14472 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
14473 case ALTIVEC_BUILTIN_STVX_V8HI:
14474 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
14475 case ALTIVEC_BUILTIN_STVX_V16QI:
14476 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
14477 case ALTIVEC_BUILTIN_STVEBX:
14478 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
14479 case ALTIVEC_BUILTIN_STVEHX:
14480 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
14481 case ALTIVEC_BUILTIN_STVEWX:
14482 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
14483 case ALTIVEC_BUILTIN_STVXL_V2DF:
14484 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
14485 case ALTIVEC_BUILTIN_STVXL_V2DI:
14486 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
14487 case ALTIVEC_BUILTIN_STVXL_V4SF:
14488 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
14489 case ALTIVEC_BUILTIN_STVXL:
14490 case ALTIVEC_BUILTIN_STVXL_V4SI:
14491 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
14492 case ALTIVEC_BUILTIN_STVXL_V8HI:
14493 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
14494 case ALTIVEC_BUILTIN_STVXL_V16QI:
14495 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
14496
14497 case ALTIVEC_BUILTIN_STVLX:
14498 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
14499 case ALTIVEC_BUILTIN_STVLXL:
14500 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
14501 case ALTIVEC_BUILTIN_STVRX:
14502 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
14503 case ALTIVEC_BUILTIN_STVRXL:
14504 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
14505
14506 case P9V_BUILTIN_STXVL:
14507 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
14508
14509 case P9V_BUILTIN_XST_LEN_R:
14510 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r, exp);
14511
14512 case VSX_BUILTIN_STXVD2X_V1TI:
14513 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
14514 case VSX_BUILTIN_STXVD2X_V2DF:
14515 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
14516 case VSX_BUILTIN_STXVD2X_V2DI:
14517 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
14518 case VSX_BUILTIN_STXVW4X_V4SF:
14519 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
14520 case VSX_BUILTIN_STXVW4X_V4SI:
14521 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
14522 case VSX_BUILTIN_STXVW4X_V8HI:
14523 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
14524 case VSX_BUILTIN_STXVW4X_V16QI:
14525 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
14526
14527 /* For the following on big endian, it's ok to use any appropriate
14528 unaligned-supporting store, so use a generic expander. For
14529 little-endian, the exact element-reversing instruction must
14530 be used. */
14531 case VSX_BUILTIN_ST_ELEMREV_V1TI:
14532 {
14533 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v1ti
14534 : CODE_FOR_vsx_st_elemrev_v1ti);
14535 return altivec_expand_stv_builtin (code, exp);
14536 }
14537 case VSX_BUILTIN_ST_ELEMREV_V2DF:
14538 {
14539 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
14540 : CODE_FOR_vsx_st_elemrev_v2df);
14541 return altivec_expand_stv_builtin (code, exp);
14542 }
14543 case VSX_BUILTIN_ST_ELEMREV_V2DI:
14544 {
14545 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
14546 : CODE_FOR_vsx_st_elemrev_v2di);
14547 return altivec_expand_stv_builtin (code, exp);
14548 }
14549 case VSX_BUILTIN_ST_ELEMREV_V4SF:
14550 {
14551 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
14552 : CODE_FOR_vsx_st_elemrev_v4sf);
14553 return altivec_expand_stv_builtin (code, exp);
14554 }
14555 case VSX_BUILTIN_ST_ELEMREV_V4SI:
14556 {
14557 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
14558 : CODE_FOR_vsx_st_elemrev_v4si);
14559 return altivec_expand_stv_builtin (code, exp);
14560 }
14561 case VSX_BUILTIN_ST_ELEMREV_V8HI:
14562 {
14563 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
14564 : CODE_FOR_vsx_st_elemrev_v8hi);
14565 return altivec_expand_stv_builtin (code, exp);
14566 }
14567 case VSX_BUILTIN_ST_ELEMREV_V16QI:
14568 {
14569 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
14570 : CODE_FOR_vsx_st_elemrev_v16qi);
14571 return altivec_expand_stv_builtin (code, exp);
14572 }
14573
14574 case ALTIVEC_BUILTIN_MFVSCR:
14575 icode = CODE_FOR_altivec_mfvscr;
14576 tmode = insn_data[icode].operand[0].mode;
14577
14578 if (target == 0
14579 || GET_MODE (target) != tmode
14580 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14581 target = gen_reg_rtx (tmode);
14582
14583 pat = GEN_FCN (icode) (target);
14584 if (! pat)
14585 return 0;
14586 emit_insn (pat);
14587 return target;
14588
14589 case ALTIVEC_BUILTIN_MTVSCR:
14590 icode = CODE_FOR_altivec_mtvscr;
14591 arg0 = CALL_EXPR_ARG (exp, 0);
14592 op0 = expand_normal (arg0);
14593 mode0 = insn_data[icode].operand[0].mode;
14594
14595 /* If we got invalid arguments bail out before generating bad rtl. */
14596 if (arg0 == error_mark_node)
14597 return const0_rtx;
14598
14599 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14600 op0 = copy_to_mode_reg (mode0, op0);
14601
14602 pat = GEN_FCN (icode) (op0);
14603 if (pat)
14604 emit_insn (pat);
14605 return NULL_RTX;
14606
14607 case ALTIVEC_BUILTIN_DSSALL:
14608 emit_insn (gen_altivec_dssall ());
14609 return NULL_RTX;
14610
14611 case ALTIVEC_BUILTIN_DSS:
14612 icode = CODE_FOR_altivec_dss;
14613 arg0 = CALL_EXPR_ARG (exp, 0);
14614 STRIP_NOPS (arg0);
14615 op0 = expand_normal (arg0);
14616 mode0 = insn_data[icode].operand[0].mode;
14617
14618 /* If we got invalid arguments bail out before generating bad rtl. */
14619 if (arg0 == error_mark_node)
14620 return const0_rtx;
14621
14622 if (TREE_CODE (arg0) != INTEGER_CST
14623 || TREE_INT_CST_LOW (arg0) & ~0x3)
14624 {
14625 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
14626 return const0_rtx;
14627 }
14628
14629 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14630 op0 = copy_to_mode_reg (mode0, op0);
14631
14632 emit_insn (gen_altivec_dss (op0));
14633 return NULL_RTX;
14634
14635 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
14636 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
14637 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
14638 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
14639 case VSX_BUILTIN_VEC_INIT_V2DF:
14640 case VSX_BUILTIN_VEC_INIT_V2DI:
14641 case VSX_BUILTIN_VEC_INIT_V1TI:
14642 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
14643
14644 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
14645 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
14646 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
14647 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
14648 case VSX_BUILTIN_VEC_SET_V2DF:
14649 case VSX_BUILTIN_VEC_SET_V2DI:
14650 case VSX_BUILTIN_VEC_SET_V1TI:
14651 return altivec_expand_vec_set_builtin (exp);
14652
14653 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
14654 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
14655 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
14656 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
14657 case VSX_BUILTIN_VEC_EXT_V2DF:
14658 case VSX_BUILTIN_VEC_EXT_V2DI:
14659 case VSX_BUILTIN_VEC_EXT_V1TI:
14660 return altivec_expand_vec_ext_builtin (exp, target);
14661
14662 case P9V_BUILTIN_VEC_EXTRACT4B:
14663 arg1 = CALL_EXPR_ARG (exp, 1);
14664 STRIP_NOPS (arg1);
14665
14666 /* Generate a normal call if it is invalid. */
14667 if (arg1 == error_mark_node)
14668 return expand_call (exp, target, false);
14669
14670 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
14671 {
14672 error ("second argument to %qs must be 0..12", "vec_vextract4b");
14673 return expand_call (exp, target, false);
14674 }
14675 break;
14676
14677 case P9V_BUILTIN_VEC_INSERT4B:
14678 arg2 = CALL_EXPR_ARG (exp, 2);
14679 STRIP_NOPS (arg2);
14680
14681 /* Generate a normal call if it is invalid. */
14682 if (arg2 == error_mark_node)
14683 return expand_call (exp, target, false);
14684
14685 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
14686 {
14687 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
14688 return expand_call (exp, target, false);
14689 }
14690 break;
14691
14692 default:
14693 break;
14694 /* Fall through. */
14695 }
14696
14697 /* Expand abs* operations. */
14698 d = bdesc_abs;
14699 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14700 if (d->code == fcode)
14701 return altivec_expand_abs_builtin (d->icode, exp, target);
14702
14703 /* Expand the AltiVec predicates. */
14704 d = bdesc_altivec_preds;
14705 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14706 if (d->code == fcode)
14707 return altivec_expand_predicate_builtin (d->icode, exp, target);
14708
14709 /* LV* are funky. We initialized them differently. */
14710 switch (fcode)
14711 {
14712 case ALTIVEC_BUILTIN_LVSL:
14713 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
14714 exp, target, false);
14715 case ALTIVEC_BUILTIN_LVSR:
14716 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
14717 exp, target, false);
14718 case ALTIVEC_BUILTIN_LVEBX:
14719 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
14720 exp, target, false);
14721 case ALTIVEC_BUILTIN_LVEHX:
14722 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
14723 exp, target, false);
14724 case ALTIVEC_BUILTIN_LVEWX:
14725 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
14726 exp, target, false);
14727 case ALTIVEC_BUILTIN_LVXL_V2DF:
14728 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
14729 exp, target, false);
14730 case ALTIVEC_BUILTIN_LVXL_V2DI:
14731 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
14732 exp, target, false);
14733 case ALTIVEC_BUILTIN_LVXL_V4SF:
14734 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
14735 exp, target, false);
14736 case ALTIVEC_BUILTIN_LVXL:
14737 case ALTIVEC_BUILTIN_LVXL_V4SI:
14738 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
14739 exp, target, false);
14740 case ALTIVEC_BUILTIN_LVXL_V8HI:
14741 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
14742 exp, target, false);
14743 case ALTIVEC_BUILTIN_LVXL_V16QI:
14744 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
14745 exp, target, false);
14746 case ALTIVEC_BUILTIN_LVX_V1TI:
14747 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti,
14748 exp, target, false);
14749 case ALTIVEC_BUILTIN_LVX_V2DF:
14750 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
14751 exp, target, false);
14752 case ALTIVEC_BUILTIN_LVX_V2DI:
14753 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
14754 exp, target, false);
14755 case ALTIVEC_BUILTIN_LVX_V4SF:
14756 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
14757 exp, target, false);
14758 case ALTIVEC_BUILTIN_LVX:
14759 case ALTIVEC_BUILTIN_LVX_V4SI:
14760 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
14761 exp, target, false);
14762 case ALTIVEC_BUILTIN_LVX_V8HI:
14763 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
14764 exp, target, false);
14765 case ALTIVEC_BUILTIN_LVX_V16QI:
14766 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
14767 exp, target, false);
14768 case ALTIVEC_BUILTIN_LVLX:
14769 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
14770 exp, target, true);
14771 case ALTIVEC_BUILTIN_LVLXL:
14772 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
14773 exp, target, true);
14774 case ALTIVEC_BUILTIN_LVRX:
14775 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
14776 exp, target, true);
14777 case ALTIVEC_BUILTIN_LVRXL:
14778 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
14779 exp, target, true);
14780 case VSX_BUILTIN_LXVD2X_V1TI:
14781 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
14782 exp, target, false);
14783 case VSX_BUILTIN_LXVD2X_V2DF:
14784 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
14785 exp, target, false);
14786 case VSX_BUILTIN_LXVD2X_V2DI:
14787 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
14788 exp, target, false);
14789 case VSX_BUILTIN_LXVW4X_V4SF:
14790 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
14791 exp, target, false);
14792 case VSX_BUILTIN_LXVW4X_V4SI:
14793 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
14794 exp, target, false);
14795 case VSX_BUILTIN_LXVW4X_V8HI:
14796 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
14797 exp, target, false);
14798 case VSX_BUILTIN_LXVW4X_V16QI:
14799 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
14800 exp, target, false);
14801 /* For the following on big endian, it's ok to use any appropriate
14802 unaligned-supporting load, so use a generic expander. For
14803 little-endian, the exact element-reversing instruction must
14804 be used. */
14805 case VSX_BUILTIN_LD_ELEMREV_V2DF:
14806 {
14807 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
14808 : CODE_FOR_vsx_ld_elemrev_v2df);
14809 return altivec_expand_lv_builtin (code, exp, target, false);
14810 }
14811 case VSX_BUILTIN_LD_ELEMREV_V1TI:
14812 {
14813 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v1ti
14814 : CODE_FOR_vsx_ld_elemrev_v1ti);
14815 return altivec_expand_lv_builtin (code, exp, target, false);
14816 }
14817 case VSX_BUILTIN_LD_ELEMREV_V2DI:
14818 {
14819 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
14820 : CODE_FOR_vsx_ld_elemrev_v2di);
14821 return altivec_expand_lv_builtin (code, exp, target, false);
14822 }
14823 case VSX_BUILTIN_LD_ELEMREV_V4SF:
14824 {
14825 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
14826 : CODE_FOR_vsx_ld_elemrev_v4sf);
14827 return altivec_expand_lv_builtin (code, exp, target, false);
14828 }
14829 case VSX_BUILTIN_LD_ELEMREV_V4SI:
14830 {
14831 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
14832 : CODE_FOR_vsx_ld_elemrev_v4si);
14833 return altivec_expand_lv_builtin (code, exp, target, false);
14834 }
14835 case VSX_BUILTIN_LD_ELEMREV_V8HI:
14836 {
14837 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
14838 : CODE_FOR_vsx_ld_elemrev_v8hi);
14839 return altivec_expand_lv_builtin (code, exp, target, false);
14840 }
14841 case VSX_BUILTIN_LD_ELEMREV_V16QI:
14842 {
14843 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
14844 : CODE_FOR_vsx_ld_elemrev_v16qi);
14845 return altivec_expand_lv_builtin (code, exp, target, false);
14846 }
14847 break;
14848 default:
14849 break;
14850 /* Fall through. */
14851 }
14852
14853 *expandedp = false;
14854 return NULL_RTX;
14855 }
14856
14857 /* Check whether a builtin function is supported in this target
14858 configuration. */
14859 bool
14860 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode)
14861 {
14862 HOST_WIDE_INT fnmask = rs6000_builtin_info[fncode].mask;
14863 if ((fnmask & rs6000_builtin_mask) != fnmask)
14864 return false;
14865 else
14866 return true;
14867 }
14868
14869 /* Raise an error message for a builtin function that is called without the
14870 appropriate target options being set. */
14871
14872 static void
14873 rs6000_invalid_builtin (enum rs6000_builtins fncode)
14874 {
14875 size_t uns_fncode = (size_t) fncode;
14876 const char *name = rs6000_builtin_info[uns_fncode].name;
14877 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
14878
14879 gcc_assert (name != NULL);
14880 if ((fnmask & RS6000_BTM_CELL) != 0)
14881 error ("builtin function %qs is only valid for the cell processor", name);
14882 else if ((fnmask & RS6000_BTM_VSX) != 0)
14883 error ("builtin function %qs requires the %qs option", name, "-mvsx");
14884 else if ((fnmask & RS6000_BTM_HTM) != 0)
14885 error ("builtin function %qs requires the %qs option", name, "-mhtm");
14886 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
14887 error ("builtin function %qs requires the %qs option", name, "-maltivec");
14888 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14889 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
14890 error ("builtin function %qs requires the %qs and %qs options",
14891 name, "-mhard-dfp", "-mpower8-vector");
14892 else if ((fnmask & RS6000_BTM_DFP) != 0)
14893 error ("builtin function %qs requires the %qs option", name, "-mhard-dfp");
14894 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
14895 error ("builtin function %qs requires the %qs option", name,
14896 "-mpower8-vector");
14897 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14898 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
14899 error ("builtin function %qs requires the %qs and %qs options",
14900 name, "-mcpu=power9", "-m64");
14901 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
14902 error ("builtin function %qs requires the %qs option", name,
14903 "-mcpu=power9");
14904 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
14905 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
14906 error ("builtin function %qs requires the %qs and %qs options",
14907 name, "-mcpu=power9", "-m64");
14908 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
14909 error ("builtin function %qs requires the %qs option", name,
14910 "-mcpu=power9");
14911 else if ((fnmask & RS6000_BTM_LDBL128) == RS6000_BTM_LDBL128)
14912 {
14913 if (!TARGET_HARD_FLOAT)
14914 error ("builtin function %qs requires the %qs option", name,
14915 "-mhard-float");
14916 else
14917 error ("builtin function %qs requires the %qs option", name,
14918 TARGET_IEEEQUAD ? "-mabi=ibmlongdouble" : "-mlong-double-128");
14919 }
14920 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
14921 error ("builtin function %qs requires the %qs option", name,
14922 "-mhard-float");
14923 else if ((fnmask & RS6000_BTM_FLOAT128_HW) != 0)
14924 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
14925 name);
14926 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
14927 error ("builtin function %qs requires the %qs option", name,
14928 "%<-mfloat128%>");
14929 else if ((fnmask & (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
14930 == (RS6000_BTM_POPCNTD | RS6000_BTM_POWERPC64))
14931 error ("builtin function %qs requires the %qs (or newer), and "
14932 "%qs or %qs options",
14933 name, "-mcpu=power7", "-m64", "-mpowerpc64");
14934 else
14935 error ("builtin function %qs is not supported with the current options",
14936 name);
14937 }
14938
14939 /* Target hook for early folding of built-ins, shamelessly stolen
14940 from ia64.c. */
14941
14942 static tree
14943 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED,
14944 int n_args ATTRIBUTE_UNUSED,
14945 tree *args ATTRIBUTE_UNUSED,
14946 bool ignore ATTRIBUTE_UNUSED)
14947 {
14948 #ifdef SUBTARGET_FOLD_BUILTIN
14949 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
14950 #else
14951 return NULL_TREE;
14952 #endif
14953 }
14954
14955 /* Helper function to sort out which built-ins may be valid without having
14956 a LHS. */
14957 static bool
14958 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code)
14959 {
14960 switch (fn_code)
14961 {
14962 case ALTIVEC_BUILTIN_STVX_V16QI:
14963 case ALTIVEC_BUILTIN_STVX_V8HI:
14964 case ALTIVEC_BUILTIN_STVX_V4SI:
14965 case ALTIVEC_BUILTIN_STVX_V4SF:
14966 case ALTIVEC_BUILTIN_STVX_V2DI:
14967 case ALTIVEC_BUILTIN_STVX_V2DF:
14968 case VSX_BUILTIN_STXVW4X_V16QI:
14969 case VSX_BUILTIN_STXVW4X_V8HI:
14970 case VSX_BUILTIN_STXVW4X_V4SF:
14971 case VSX_BUILTIN_STXVW4X_V4SI:
14972 case VSX_BUILTIN_STXVD2X_V2DF:
14973 case VSX_BUILTIN_STXVD2X_V2DI:
14974 return true;
14975 default:
14976 return false;
14977 }
14978 }
14979
14980 /* Helper function to handle the gimple folding of a vector compare
14981 operation. This sets up true/false vectors, and uses the
14982 VEC_COND_EXPR operation.
14983 CODE indicates which comparison is to be made. (EQ, GT, ...).
14984 TYPE indicates the type of the result. */
14985 static tree
14986 fold_build_vec_cmp (tree_code code, tree type,
14987 tree arg0, tree arg1)
14988 {
14989 tree cmp_type = build_same_sized_truth_vector_type (type);
14990 tree zero_vec = build_zero_cst (type);
14991 tree minus_one_vec = build_minus_one_cst (type);
14992 tree cmp = fold_build2 (code, cmp_type, arg0, arg1);
14993 return fold_build3 (VEC_COND_EXPR, type, cmp, minus_one_vec, zero_vec);
14994 }
14995
14996 /* Helper function to handle the in-between steps for the
14997 vector compare built-ins. */
14998 static void
14999 fold_compare_helper (gimple_stmt_iterator *gsi, tree_code code, gimple *stmt)
15000 {
15001 tree arg0 = gimple_call_arg (stmt, 0);
15002 tree arg1 = gimple_call_arg (stmt, 1);
15003 tree lhs = gimple_call_lhs (stmt);
15004 tree cmp = fold_build_vec_cmp (code, TREE_TYPE (lhs), arg0, arg1);
15005 gimple *g = gimple_build_assign (lhs, cmp);
15006 gimple_set_location (g, gimple_location (stmt));
15007 gsi_replace (gsi, g, true);
15008 }
15009
15010 /* Helper function to map V2DF and V4SF types to their
15011 integral equivalents (V2DI and V4SI). */
15012 tree map_to_integral_tree_type (tree input_tree_type)
15013 {
15014 if (INTEGRAL_TYPE_P (TREE_TYPE (input_tree_type)))
15015 return input_tree_type;
15016 else
15017 {
15018 if (types_compatible_p (TREE_TYPE (input_tree_type),
15019 TREE_TYPE (V2DF_type_node)))
15020 return V2DI_type_node;
15021 else if (types_compatible_p (TREE_TYPE (input_tree_type),
15022 TREE_TYPE (V4SF_type_node)))
15023 return V4SI_type_node;
15024 else
15025 gcc_unreachable ();
15026 }
15027 }
15028
15029 /* Helper function to handle the vector merge[hl] built-ins. The
15030 implementation difference between h and l versions for this code are in
15031 the values used when building of the permute vector for high word versus
15032 low word merge. The variance is keyed off the use_high parameter. */
15033 static void
15034 fold_mergehl_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_high)
15035 {
15036 tree arg0 = gimple_call_arg (stmt, 0);
15037 tree arg1 = gimple_call_arg (stmt, 1);
15038 tree lhs = gimple_call_lhs (stmt);
15039 tree lhs_type = TREE_TYPE (lhs);
15040 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15041 int midpoint = n_elts / 2;
15042 int offset = 0;
15043
15044 if (use_high == 1)
15045 offset = midpoint;
15046
15047 /* The permute_type will match the lhs for integral types. For double and
15048 float types, the permute type needs to map to the V2 or V4 type that
15049 matches size. */
15050 tree permute_type;
15051 permute_type = map_to_integral_tree_type (lhs_type);
15052 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15053
15054 for (int i = 0; i < midpoint; i++)
15055 {
15056 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15057 offset + i));
15058 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15059 offset + n_elts + i));
15060 }
15061
15062 tree permute = elts.build ();
15063
15064 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15065 gimple_set_location (g, gimple_location (stmt));
15066 gsi_replace (gsi, g, true);
15067 }
15068
15069 /* Helper function to handle the vector merge[eo] built-ins. */
15070 static void
15071 fold_mergeeo_helper (gimple_stmt_iterator *gsi, gimple *stmt, int use_odd)
15072 {
15073 tree arg0 = gimple_call_arg (stmt, 0);
15074 tree arg1 = gimple_call_arg (stmt, 1);
15075 tree lhs = gimple_call_lhs (stmt);
15076 tree lhs_type = TREE_TYPE (lhs);
15077 int n_elts = TYPE_VECTOR_SUBPARTS (lhs_type);
15078
15079 /* The permute_type will match the lhs for integral types. For double and
15080 float types, the permute type needs to map to the V2 or V4 type that
15081 matches size. */
15082 tree permute_type;
15083 permute_type = map_to_integral_tree_type (lhs_type);
15084
15085 tree_vector_builder elts (permute_type, VECTOR_CST_NELTS (arg0), 1);
15086
15087 /* Build the permute vector. */
15088 for (int i = 0; i < n_elts / 2; i++)
15089 {
15090 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15091 2*i + use_odd));
15092 elts.safe_push (build_int_cst (TREE_TYPE (permute_type),
15093 2*i + use_odd + n_elts));
15094 }
15095
15096 tree permute = elts.build ();
15097
15098 gimple *g = gimple_build_assign (lhs, VEC_PERM_EXPR, arg0, arg1, permute);
15099 gimple_set_location (g, gimple_location (stmt));
15100 gsi_replace (gsi, g, true);
15101 }
15102
15103 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15104 a constant, use rs6000_fold_builtin.) */
15105
15106 bool
15107 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
15108 {
15109 gimple *stmt = gsi_stmt (*gsi);
15110 tree fndecl = gimple_call_fndecl (stmt);
15111 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
15112 enum rs6000_builtins fn_code
15113 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15114 tree arg0, arg1, lhs, temp;
15115 enum tree_code bcode;
15116 gimple *g;
15117
15118 size_t uns_fncode = (size_t) fn_code;
15119 enum insn_code icode = rs6000_builtin_info[uns_fncode].icode;
15120 const char *fn_name1 = rs6000_builtin_info[uns_fncode].name;
15121 const char *fn_name2 = (icode != CODE_FOR_nothing)
15122 ? get_insn_name ((int) icode)
15123 : "nothing";
15124
15125 if (TARGET_DEBUG_BUILTIN)
15126 fprintf (stderr, "rs6000_gimple_fold_builtin %d %s %s\n",
15127 fn_code, fn_name1, fn_name2);
15128
15129 if (!rs6000_fold_gimple)
15130 return false;
15131
15132 /* Prevent gimple folding for code that does not have a LHS, unless it is
15133 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15134 if (!gimple_call_lhs (stmt) && !rs6000_builtin_valid_without_lhs (fn_code))
15135 return false;
15136
15137 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15138 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fncode].mask;
15139 bool func_valid_p = (rs6000_builtin_mask & mask) == mask;
15140 if (!func_valid_p)
15141 return false;
15142
15143 switch (fn_code)
15144 {
15145 /* Flavors of vec_add. We deliberately don't expand
15146 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15147 TImode, resulting in much poorer code generation. */
15148 case ALTIVEC_BUILTIN_VADDUBM:
15149 case ALTIVEC_BUILTIN_VADDUHM:
15150 case ALTIVEC_BUILTIN_VADDUWM:
15151 case P8V_BUILTIN_VADDUDM:
15152 case ALTIVEC_BUILTIN_VADDFP:
15153 case VSX_BUILTIN_XVADDDP:
15154 bcode = PLUS_EXPR;
15155 do_binary:
15156 arg0 = gimple_call_arg (stmt, 0);
15157 arg1 = gimple_call_arg (stmt, 1);
15158 lhs = gimple_call_lhs (stmt);
15159 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (lhs)))
15160 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (lhs))))
15161 {
15162 /* Ensure the binary operation is performed in a type
15163 that wraps if it is integral type. */
15164 gimple_seq stmts = NULL;
15165 tree type = unsigned_type_for (TREE_TYPE (lhs));
15166 tree uarg0 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15167 type, arg0);
15168 tree uarg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15169 type, arg1);
15170 tree res = gimple_build (&stmts, gimple_location (stmt), bcode,
15171 type, uarg0, uarg1);
15172 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15173 g = gimple_build_assign (lhs, VIEW_CONVERT_EXPR,
15174 build1 (VIEW_CONVERT_EXPR,
15175 TREE_TYPE (lhs), res));
15176 gsi_replace (gsi, g, true);
15177 return true;
15178 }
15179 g = gimple_build_assign (lhs, bcode, arg0, arg1);
15180 gimple_set_location (g, gimple_location (stmt));
15181 gsi_replace (gsi, g, true);
15182 return true;
15183 /* Flavors of vec_sub. We deliberately don't expand
15184 P8V_BUILTIN_VSUBUQM. */
15185 case ALTIVEC_BUILTIN_VSUBUBM:
15186 case ALTIVEC_BUILTIN_VSUBUHM:
15187 case ALTIVEC_BUILTIN_VSUBUWM:
15188 case P8V_BUILTIN_VSUBUDM:
15189 case ALTIVEC_BUILTIN_VSUBFP:
15190 case VSX_BUILTIN_XVSUBDP:
15191 bcode = MINUS_EXPR;
15192 goto do_binary;
15193 case VSX_BUILTIN_XVMULSP:
15194 case VSX_BUILTIN_XVMULDP:
15195 arg0 = gimple_call_arg (stmt, 0);
15196 arg1 = gimple_call_arg (stmt, 1);
15197 lhs = gimple_call_lhs (stmt);
15198 g = gimple_build_assign (lhs, MULT_EXPR, arg0, arg1);
15199 gimple_set_location (g, gimple_location (stmt));
15200 gsi_replace (gsi, g, true);
15201 return true;
15202 /* Even element flavors of vec_mul (signed). */
15203 case ALTIVEC_BUILTIN_VMULESB:
15204 case ALTIVEC_BUILTIN_VMULESH:
15205 case P8V_BUILTIN_VMULESW:
15206 /* Even element flavors of vec_mul (unsigned). */
15207 case ALTIVEC_BUILTIN_VMULEUB:
15208 case ALTIVEC_BUILTIN_VMULEUH:
15209 case P8V_BUILTIN_VMULEUW:
15210 arg0 = gimple_call_arg (stmt, 0);
15211 arg1 = gimple_call_arg (stmt, 1);
15212 lhs = gimple_call_lhs (stmt);
15213 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
15214 gimple_set_location (g, gimple_location (stmt));
15215 gsi_replace (gsi, g, true);
15216 return true;
15217 /* Odd element flavors of vec_mul (signed). */
15218 case ALTIVEC_BUILTIN_VMULOSB:
15219 case ALTIVEC_BUILTIN_VMULOSH:
15220 case P8V_BUILTIN_VMULOSW:
15221 /* Odd element flavors of vec_mul (unsigned). */
15222 case ALTIVEC_BUILTIN_VMULOUB:
15223 case ALTIVEC_BUILTIN_VMULOUH:
15224 case P8V_BUILTIN_VMULOUW:
15225 arg0 = gimple_call_arg (stmt, 0);
15226 arg1 = gimple_call_arg (stmt, 1);
15227 lhs = gimple_call_lhs (stmt);
15228 g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
15229 gimple_set_location (g, gimple_location (stmt));
15230 gsi_replace (gsi, g, true);
15231 return true;
15232 /* Flavors of vec_div (Integer). */
15233 case VSX_BUILTIN_DIV_V2DI:
15234 case VSX_BUILTIN_UDIV_V2DI:
15235 arg0 = gimple_call_arg (stmt, 0);
15236 arg1 = gimple_call_arg (stmt, 1);
15237 lhs = gimple_call_lhs (stmt);
15238 g = gimple_build_assign (lhs, TRUNC_DIV_EXPR, arg0, arg1);
15239 gimple_set_location (g, gimple_location (stmt));
15240 gsi_replace (gsi, g, true);
15241 return true;
15242 /* Flavors of vec_div (Float). */
15243 case VSX_BUILTIN_XVDIVSP:
15244 case VSX_BUILTIN_XVDIVDP:
15245 arg0 = gimple_call_arg (stmt, 0);
15246 arg1 = gimple_call_arg (stmt, 1);
15247 lhs = gimple_call_lhs (stmt);
15248 g = gimple_build_assign (lhs, RDIV_EXPR, arg0, arg1);
15249 gimple_set_location (g, gimple_location (stmt));
15250 gsi_replace (gsi, g, true);
15251 return true;
15252 /* Flavors of vec_and. */
15253 case ALTIVEC_BUILTIN_VAND:
15254 arg0 = gimple_call_arg (stmt, 0);
15255 arg1 = gimple_call_arg (stmt, 1);
15256 lhs = gimple_call_lhs (stmt);
15257 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, arg1);
15258 gimple_set_location (g, gimple_location (stmt));
15259 gsi_replace (gsi, g, true);
15260 return true;
15261 /* Flavors of vec_andc. */
15262 case ALTIVEC_BUILTIN_VANDC:
15263 arg0 = gimple_call_arg (stmt, 0);
15264 arg1 = gimple_call_arg (stmt, 1);
15265 lhs = gimple_call_lhs (stmt);
15266 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15267 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15268 gimple_set_location (g, gimple_location (stmt));
15269 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15270 g = gimple_build_assign (lhs, BIT_AND_EXPR, arg0, temp);
15271 gimple_set_location (g, gimple_location (stmt));
15272 gsi_replace (gsi, g, true);
15273 return true;
15274 /* Flavors of vec_nand. */
15275 case P8V_BUILTIN_VEC_NAND:
15276 case P8V_BUILTIN_NAND_V16QI:
15277 case P8V_BUILTIN_NAND_V8HI:
15278 case P8V_BUILTIN_NAND_V4SI:
15279 case P8V_BUILTIN_NAND_V4SF:
15280 case P8V_BUILTIN_NAND_V2DF:
15281 case P8V_BUILTIN_NAND_V2DI:
15282 arg0 = gimple_call_arg (stmt, 0);
15283 arg1 = gimple_call_arg (stmt, 1);
15284 lhs = gimple_call_lhs (stmt);
15285 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15286 g = gimple_build_assign (temp, BIT_AND_EXPR, arg0, arg1);
15287 gimple_set_location (g, gimple_location (stmt));
15288 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15289 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15290 gimple_set_location (g, gimple_location (stmt));
15291 gsi_replace (gsi, g, true);
15292 return true;
15293 /* Flavors of vec_or. */
15294 case ALTIVEC_BUILTIN_VOR:
15295 arg0 = gimple_call_arg (stmt, 0);
15296 arg1 = gimple_call_arg (stmt, 1);
15297 lhs = gimple_call_lhs (stmt);
15298 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, arg1);
15299 gimple_set_location (g, gimple_location (stmt));
15300 gsi_replace (gsi, g, true);
15301 return true;
15302 /* flavors of vec_orc. */
15303 case P8V_BUILTIN_ORC_V16QI:
15304 case P8V_BUILTIN_ORC_V8HI:
15305 case P8V_BUILTIN_ORC_V4SI:
15306 case P8V_BUILTIN_ORC_V4SF:
15307 case P8V_BUILTIN_ORC_V2DF:
15308 case P8V_BUILTIN_ORC_V2DI:
15309 arg0 = gimple_call_arg (stmt, 0);
15310 arg1 = gimple_call_arg (stmt, 1);
15311 lhs = gimple_call_lhs (stmt);
15312 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15313 g = gimple_build_assign (temp, BIT_NOT_EXPR, arg1);
15314 gimple_set_location (g, gimple_location (stmt));
15315 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15316 g = gimple_build_assign (lhs, BIT_IOR_EXPR, arg0, temp);
15317 gimple_set_location (g, gimple_location (stmt));
15318 gsi_replace (gsi, g, true);
15319 return true;
15320 /* Flavors of vec_xor. */
15321 case ALTIVEC_BUILTIN_VXOR:
15322 arg0 = gimple_call_arg (stmt, 0);
15323 arg1 = gimple_call_arg (stmt, 1);
15324 lhs = gimple_call_lhs (stmt);
15325 g = gimple_build_assign (lhs, BIT_XOR_EXPR, arg0, arg1);
15326 gimple_set_location (g, gimple_location (stmt));
15327 gsi_replace (gsi, g, true);
15328 return true;
15329 /* Flavors of vec_nor. */
15330 case ALTIVEC_BUILTIN_VNOR:
15331 arg0 = gimple_call_arg (stmt, 0);
15332 arg1 = gimple_call_arg (stmt, 1);
15333 lhs = gimple_call_lhs (stmt);
15334 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15335 g = gimple_build_assign (temp, BIT_IOR_EXPR, arg0, arg1);
15336 gimple_set_location (g, gimple_location (stmt));
15337 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15338 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15339 gimple_set_location (g, gimple_location (stmt));
15340 gsi_replace (gsi, g, true);
15341 return true;
15342 /* flavors of vec_abs. */
15343 case ALTIVEC_BUILTIN_ABS_V16QI:
15344 case ALTIVEC_BUILTIN_ABS_V8HI:
15345 case ALTIVEC_BUILTIN_ABS_V4SI:
15346 case ALTIVEC_BUILTIN_ABS_V4SF:
15347 case P8V_BUILTIN_ABS_V2DI:
15348 case VSX_BUILTIN_XVABSDP:
15349 arg0 = gimple_call_arg (stmt, 0);
15350 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0)))
15351 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0))))
15352 return false;
15353 lhs = gimple_call_lhs (stmt);
15354 g = gimple_build_assign (lhs, ABS_EXPR, arg0);
15355 gimple_set_location (g, gimple_location (stmt));
15356 gsi_replace (gsi, g, true);
15357 return true;
15358 /* flavors of vec_min. */
15359 case VSX_BUILTIN_XVMINDP:
15360 case P8V_BUILTIN_VMINSD:
15361 case P8V_BUILTIN_VMINUD:
15362 case ALTIVEC_BUILTIN_VMINSB:
15363 case ALTIVEC_BUILTIN_VMINSH:
15364 case ALTIVEC_BUILTIN_VMINSW:
15365 case ALTIVEC_BUILTIN_VMINUB:
15366 case ALTIVEC_BUILTIN_VMINUH:
15367 case ALTIVEC_BUILTIN_VMINUW:
15368 case ALTIVEC_BUILTIN_VMINFP:
15369 arg0 = gimple_call_arg (stmt, 0);
15370 arg1 = gimple_call_arg (stmt, 1);
15371 lhs = gimple_call_lhs (stmt);
15372 g = gimple_build_assign (lhs, MIN_EXPR, arg0, arg1);
15373 gimple_set_location (g, gimple_location (stmt));
15374 gsi_replace (gsi, g, true);
15375 return true;
15376 /* flavors of vec_max. */
15377 case VSX_BUILTIN_XVMAXDP:
15378 case P8V_BUILTIN_VMAXSD:
15379 case P8V_BUILTIN_VMAXUD:
15380 case ALTIVEC_BUILTIN_VMAXSB:
15381 case ALTIVEC_BUILTIN_VMAXSH:
15382 case ALTIVEC_BUILTIN_VMAXSW:
15383 case ALTIVEC_BUILTIN_VMAXUB:
15384 case ALTIVEC_BUILTIN_VMAXUH:
15385 case ALTIVEC_BUILTIN_VMAXUW:
15386 case ALTIVEC_BUILTIN_VMAXFP:
15387 arg0 = gimple_call_arg (stmt, 0);
15388 arg1 = gimple_call_arg (stmt, 1);
15389 lhs = gimple_call_lhs (stmt);
15390 g = gimple_build_assign (lhs, MAX_EXPR, arg0, arg1);
15391 gimple_set_location (g, gimple_location (stmt));
15392 gsi_replace (gsi, g, true);
15393 return true;
15394 /* Flavors of vec_eqv. */
15395 case P8V_BUILTIN_EQV_V16QI:
15396 case P8V_BUILTIN_EQV_V8HI:
15397 case P8V_BUILTIN_EQV_V4SI:
15398 case P8V_BUILTIN_EQV_V4SF:
15399 case P8V_BUILTIN_EQV_V2DF:
15400 case P8V_BUILTIN_EQV_V2DI:
15401 arg0 = gimple_call_arg (stmt, 0);
15402 arg1 = gimple_call_arg (stmt, 1);
15403 lhs = gimple_call_lhs (stmt);
15404 temp = create_tmp_reg_or_ssa_name (TREE_TYPE (arg1));
15405 g = gimple_build_assign (temp, BIT_XOR_EXPR, arg0, arg1);
15406 gimple_set_location (g, gimple_location (stmt));
15407 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15408 g = gimple_build_assign (lhs, BIT_NOT_EXPR, temp);
15409 gimple_set_location (g, gimple_location (stmt));
15410 gsi_replace (gsi, g, true);
15411 return true;
15412 /* Flavors of vec_rotate_left. */
15413 case ALTIVEC_BUILTIN_VRLB:
15414 case ALTIVEC_BUILTIN_VRLH:
15415 case ALTIVEC_BUILTIN_VRLW:
15416 case P8V_BUILTIN_VRLD:
15417 arg0 = gimple_call_arg (stmt, 0);
15418 arg1 = gimple_call_arg (stmt, 1);
15419 lhs = gimple_call_lhs (stmt);
15420 g = gimple_build_assign (lhs, LROTATE_EXPR, arg0, arg1);
15421 gimple_set_location (g, gimple_location (stmt));
15422 gsi_replace (gsi, g, true);
15423 return true;
15424 /* Flavors of vector shift right algebraic.
15425 vec_sra{b,h,w} -> vsra{b,h,w}. */
15426 case ALTIVEC_BUILTIN_VSRAB:
15427 case ALTIVEC_BUILTIN_VSRAH:
15428 case ALTIVEC_BUILTIN_VSRAW:
15429 case P8V_BUILTIN_VSRAD:
15430 {
15431 arg0 = gimple_call_arg (stmt, 0);
15432 arg1 = gimple_call_arg (stmt, 1);
15433 lhs = gimple_call_lhs (stmt);
15434 tree arg1_type = TREE_TYPE (arg1);
15435 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15436 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15437 location_t loc = gimple_location (stmt);
15438 /* Force arg1 into the range valid matching the arg0 type. */
15439 /* Build a vector consisting of the max valid bit-size values. */
15440 int n_elts = VECTOR_CST_NELTS (arg1);
15441 tree element_size = build_int_cst (unsigned_element_type,
15442 128 / n_elts);
15443 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15444 for (int i = 0; i < n_elts; i++)
15445 elts.safe_push (element_size);
15446 tree modulo_tree = elts.build ();
15447 /* Modulo the provided shift value against that vector. */
15448 gimple_seq stmts = NULL;
15449 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15450 unsigned_arg1_type, arg1);
15451 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15452 unsigned_arg1_type, unsigned_arg1,
15453 modulo_tree);
15454 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15455 /* And finally, do the shift. */
15456 g = gimple_build_assign (lhs, RSHIFT_EXPR, arg0, new_arg1);
15457 gimple_set_location (g, loc);
15458 gsi_replace (gsi, g, true);
15459 return true;
15460 }
15461 /* Flavors of vector shift left.
15462 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15463 case ALTIVEC_BUILTIN_VSLB:
15464 case ALTIVEC_BUILTIN_VSLH:
15465 case ALTIVEC_BUILTIN_VSLW:
15466 case P8V_BUILTIN_VSLD:
15467 {
15468 location_t loc;
15469 gimple_seq stmts = NULL;
15470 arg0 = gimple_call_arg (stmt, 0);
15471 tree arg0_type = TREE_TYPE (arg0);
15472 if (INTEGRAL_TYPE_P (TREE_TYPE (arg0_type))
15473 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (arg0_type)))
15474 return false;
15475 arg1 = gimple_call_arg (stmt, 1);
15476 tree arg1_type = TREE_TYPE (arg1);
15477 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15478 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15479 loc = gimple_location (stmt);
15480 lhs = gimple_call_lhs (stmt);
15481 /* Force arg1 into the range valid matching the arg0 type. */
15482 /* Build a vector consisting of the max valid bit-size values. */
15483 int n_elts = VECTOR_CST_NELTS (arg1);
15484 int tree_size_in_bits = TREE_INT_CST_LOW (size_in_bytes (arg1_type))
15485 * BITS_PER_UNIT;
15486 tree element_size = build_int_cst (unsigned_element_type,
15487 tree_size_in_bits / n_elts);
15488 tree_vector_builder elts (unsigned_type_for (arg1_type), n_elts, 1);
15489 for (int i = 0; i < n_elts; i++)
15490 elts.safe_push (element_size);
15491 tree modulo_tree = elts.build ();
15492 /* Modulo the provided shift value against that vector. */
15493 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15494 unsigned_arg1_type, arg1);
15495 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15496 unsigned_arg1_type, unsigned_arg1,
15497 modulo_tree);
15498 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15499 /* And finally, do the shift. */
15500 g = gimple_build_assign (lhs, LSHIFT_EXPR, arg0, new_arg1);
15501 gimple_set_location (g, gimple_location (stmt));
15502 gsi_replace (gsi, g, true);
15503 return true;
15504 }
15505 /* Flavors of vector shift right. */
15506 case ALTIVEC_BUILTIN_VSRB:
15507 case ALTIVEC_BUILTIN_VSRH:
15508 case ALTIVEC_BUILTIN_VSRW:
15509 case P8V_BUILTIN_VSRD:
15510 {
15511 arg0 = gimple_call_arg (stmt, 0);
15512 arg1 = gimple_call_arg (stmt, 1);
15513 lhs = gimple_call_lhs (stmt);
15514 tree arg1_type = TREE_TYPE (arg1);
15515 tree unsigned_arg1_type = unsigned_type_for (TREE_TYPE (arg1));
15516 tree unsigned_element_type = unsigned_type_for (TREE_TYPE (arg1_type));
15517 location_t loc = gimple_location (stmt);
15518 gimple_seq stmts = NULL;
15519 /* Convert arg0 to unsigned. */
15520 tree arg0_unsigned
15521 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15522 unsigned_type_for (TREE_TYPE (arg0)), arg0);
15523 /* Force arg1 into the range valid matching the arg0 type. */
15524 /* Build a vector consisting of the max valid bit-size values. */
15525 int n_elts = VECTOR_CST_NELTS (arg1);
15526 tree element_size = build_int_cst (unsigned_element_type,
15527 128 / n_elts);
15528 tree_vector_builder elts (unsigned_arg1_type, n_elts, 1);
15529 for (int i = 0; i < n_elts; i++)
15530 elts.safe_push (element_size);
15531 tree modulo_tree = elts.build ();
15532 /* Modulo the provided shift value against that vector. */
15533 tree unsigned_arg1 = gimple_build (&stmts, VIEW_CONVERT_EXPR,
15534 unsigned_arg1_type, arg1);
15535 tree new_arg1 = gimple_build (&stmts, loc, TRUNC_MOD_EXPR,
15536 unsigned_arg1_type, unsigned_arg1,
15537 modulo_tree);
15538 /* Do the shift. */
15539 tree res
15540 = gimple_build (&stmts, RSHIFT_EXPR,
15541 TREE_TYPE (arg0_unsigned), arg0_unsigned, new_arg1);
15542 /* Convert result back to the lhs type. */
15543 res = gimple_build (&stmts, VIEW_CONVERT_EXPR, TREE_TYPE (lhs), res);
15544 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15545 update_call_from_tree (gsi, res);
15546 return true;
15547 }
15548 /* Vector loads. */
15549 case ALTIVEC_BUILTIN_LVX_V16QI:
15550 case ALTIVEC_BUILTIN_LVX_V8HI:
15551 case ALTIVEC_BUILTIN_LVX_V4SI:
15552 case ALTIVEC_BUILTIN_LVX_V4SF:
15553 case ALTIVEC_BUILTIN_LVX_V2DI:
15554 case ALTIVEC_BUILTIN_LVX_V2DF:
15555 case ALTIVEC_BUILTIN_LVX_V1TI:
15556 {
15557 arg0 = gimple_call_arg (stmt, 0); // offset
15558 arg1 = gimple_call_arg (stmt, 1); // address
15559 lhs = gimple_call_lhs (stmt);
15560 location_t loc = gimple_location (stmt);
15561 /* Since arg1 may be cast to a different type, just use ptr_type_node
15562 here instead of trying to enforce TBAA on pointer types. */
15563 tree arg1_type = ptr_type_node;
15564 tree lhs_type = TREE_TYPE (lhs);
15565 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15566 the tree using the value from arg0. The resulting type will match
15567 the type of arg1. */
15568 gimple_seq stmts = NULL;
15569 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15570 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15571 arg1_type, arg1, temp_offset);
15572 /* Mask off any lower bits from the address. */
15573 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15574 arg1_type, temp_addr,
15575 build_int_cst (arg1_type, -16));
15576 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15577 if (!is_gimple_mem_ref_addr (aligned_addr))
15578 {
15579 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15580 gimple *g = gimple_build_assign (t, aligned_addr);
15581 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15582 aligned_addr = t;
15583 }
15584 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15585 take an offset, but since we've already incorporated the offset
15586 above, here we just pass in a zero. */
15587 gimple *g
15588 = gimple_build_assign (lhs, build2 (MEM_REF, lhs_type, aligned_addr,
15589 build_int_cst (arg1_type, 0)));
15590 gimple_set_location (g, loc);
15591 gsi_replace (gsi, g, true);
15592 return true;
15593 }
15594 /* Vector stores. */
15595 case ALTIVEC_BUILTIN_STVX_V16QI:
15596 case ALTIVEC_BUILTIN_STVX_V8HI:
15597 case ALTIVEC_BUILTIN_STVX_V4SI:
15598 case ALTIVEC_BUILTIN_STVX_V4SF:
15599 case ALTIVEC_BUILTIN_STVX_V2DI:
15600 case ALTIVEC_BUILTIN_STVX_V2DF:
15601 {
15602 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15603 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15604 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15605 location_t loc = gimple_location (stmt);
15606 tree arg0_type = TREE_TYPE (arg0);
15607 /* Use ptr_type_node (no TBAA) for the arg2_type.
15608 FIXME: (Richard) "A proper fix would be to transition this type as
15609 seen from the frontend to GIMPLE, for example in a similar way we
15610 do for MEM_REFs by piggy-backing that on an extra argument, a
15611 constant zero pointer of the alias pointer type to use (which would
15612 also serve as a type indicator of the store itself). I'd use a
15613 target specific internal function for this (not sure if we can have
15614 those target specific, but I guess if it's folded away then that's
15615 fine) and get away with the overload set." */
15616 tree arg2_type = ptr_type_node;
15617 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15618 the tree using the value from arg0. The resulting type will match
15619 the type of arg2. */
15620 gimple_seq stmts = NULL;
15621 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15622 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15623 arg2_type, arg2, temp_offset);
15624 /* Mask off any lower bits from the address. */
15625 tree aligned_addr = gimple_build (&stmts, loc, BIT_AND_EXPR,
15626 arg2_type, temp_addr,
15627 build_int_cst (arg2_type, -16));
15628 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15629 if (!is_gimple_mem_ref_addr (aligned_addr))
15630 {
15631 tree t = make_ssa_name (TREE_TYPE (aligned_addr));
15632 gimple *g = gimple_build_assign (t, aligned_addr);
15633 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15634 aligned_addr = t;
15635 }
15636 /* The desired gimple result should be similar to:
15637 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15638 gimple *g
15639 = gimple_build_assign (build2 (MEM_REF, arg0_type, aligned_addr,
15640 build_int_cst (arg2_type, 0)), arg0);
15641 gimple_set_location (g, loc);
15642 gsi_replace (gsi, g, true);
15643 return true;
15644 }
15645
15646 /* unaligned Vector loads. */
15647 case VSX_BUILTIN_LXVW4X_V16QI:
15648 case VSX_BUILTIN_LXVW4X_V8HI:
15649 case VSX_BUILTIN_LXVW4X_V4SF:
15650 case VSX_BUILTIN_LXVW4X_V4SI:
15651 case VSX_BUILTIN_LXVD2X_V2DF:
15652 case VSX_BUILTIN_LXVD2X_V2DI:
15653 {
15654 arg0 = gimple_call_arg (stmt, 0); // offset
15655 arg1 = gimple_call_arg (stmt, 1); // address
15656 lhs = gimple_call_lhs (stmt);
15657 location_t loc = gimple_location (stmt);
15658 /* Since arg1 may be cast to a different type, just use ptr_type_node
15659 here instead of trying to enforce TBAA on pointer types. */
15660 tree arg1_type = ptr_type_node;
15661 tree lhs_type = TREE_TYPE (lhs);
15662 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15663 required alignment (power) is 4 bytes regardless of data type. */
15664 tree align_ltype = build_aligned_type (lhs_type, 4);
15665 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15666 the tree using the value from arg0. The resulting type will match
15667 the type of arg1. */
15668 gimple_seq stmts = NULL;
15669 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg0);
15670 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15671 arg1_type, arg1, temp_offset);
15672 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15673 if (!is_gimple_mem_ref_addr (temp_addr))
15674 {
15675 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15676 gimple *g = gimple_build_assign (t, temp_addr);
15677 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15678 temp_addr = t;
15679 }
15680 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15681 take an offset, but since we've already incorporated the offset
15682 above, here we just pass in a zero. */
15683 gimple *g;
15684 g = gimple_build_assign (lhs, build2 (MEM_REF, align_ltype, temp_addr,
15685 build_int_cst (arg1_type, 0)));
15686 gimple_set_location (g, loc);
15687 gsi_replace (gsi, g, true);
15688 return true;
15689 }
15690
15691 /* unaligned Vector stores. */
15692 case VSX_BUILTIN_STXVW4X_V16QI:
15693 case VSX_BUILTIN_STXVW4X_V8HI:
15694 case VSX_BUILTIN_STXVW4X_V4SF:
15695 case VSX_BUILTIN_STXVW4X_V4SI:
15696 case VSX_BUILTIN_STXVD2X_V2DF:
15697 case VSX_BUILTIN_STXVD2X_V2DI:
15698 {
15699 arg0 = gimple_call_arg (stmt, 0); /* Value to be stored. */
15700 arg1 = gimple_call_arg (stmt, 1); /* Offset. */
15701 tree arg2 = gimple_call_arg (stmt, 2); /* Store-to address. */
15702 location_t loc = gimple_location (stmt);
15703 tree arg0_type = TREE_TYPE (arg0);
15704 /* Use ptr_type_node (no TBAA) for the arg2_type. */
15705 tree arg2_type = ptr_type_node;
15706 /* In GIMPLE the type of the MEM_REF specifies the alignment. The
15707 required alignment (power) is 4 bytes regardless of data type. */
15708 tree align_stype = build_aligned_type (arg0_type, 4);
15709 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15710 the tree using the value from arg1. */
15711 gimple_seq stmts = NULL;
15712 tree temp_offset = gimple_convert (&stmts, loc, sizetype, arg1);
15713 tree temp_addr = gimple_build (&stmts, loc, POINTER_PLUS_EXPR,
15714 arg2_type, arg2, temp_offset);
15715 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15716 if (!is_gimple_mem_ref_addr (temp_addr))
15717 {
15718 tree t = make_ssa_name (TREE_TYPE (temp_addr));
15719 gimple *g = gimple_build_assign (t, temp_addr);
15720 gsi_insert_before (gsi, g, GSI_SAME_STMT);
15721 temp_addr = t;
15722 }
15723 gimple *g;
15724 g = gimple_build_assign (build2 (MEM_REF, align_stype, temp_addr,
15725 build_int_cst (arg2_type, 0)), arg0);
15726 gimple_set_location (g, loc);
15727 gsi_replace (gsi, g, true);
15728 return true;
15729 }
15730
15731 /* Vector Fused multiply-add (fma). */
15732 case ALTIVEC_BUILTIN_VMADDFP:
15733 case VSX_BUILTIN_XVMADDDP:
15734 case ALTIVEC_BUILTIN_VMLADDUHM:
15735 {
15736 arg0 = gimple_call_arg (stmt, 0);
15737 arg1 = gimple_call_arg (stmt, 1);
15738 tree arg2 = gimple_call_arg (stmt, 2);
15739 lhs = gimple_call_lhs (stmt);
15740 gcall *g = gimple_build_call_internal (IFN_FMA, 3, arg0, arg1, arg2);
15741 gimple_call_set_lhs (g, lhs);
15742 gimple_call_set_nothrow (g, true);
15743 gimple_set_location (g, gimple_location (stmt));
15744 gsi_replace (gsi, g, true);
15745 return true;
15746 }
15747
15748 /* Vector compares; EQ, NE, GE, GT, LE. */
15749 case ALTIVEC_BUILTIN_VCMPEQUB:
15750 case ALTIVEC_BUILTIN_VCMPEQUH:
15751 case ALTIVEC_BUILTIN_VCMPEQUW:
15752 case P8V_BUILTIN_VCMPEQUD:
15753 fold_compare_helper (gsi, EQ_EXPR, stmt);
15754 return true;
15755
15756 case P9V_BUILTIN_CMPNEB:
15757 case P9V_BUILTIN_CMPNEH:
15758 case P9V_BUILTIN_CMPNEW:
15759 fold_compare_helper (gsi, NE_EXPR, stmt);
15760 return true;
15761
15762 case VSX_BUILTIN_CMPGE_16QI:
15763 case VSX_BUILTIN_CMPGE_U16QI:
15764 case VSX_BUILTIN_CMPGE_8HI:
15765 case VSX_BUILTIN_CMPGE_U8HI:
15766 case VSX_BUILTIN_CMPGE_4SI:
15767 case VSX_BUILTIN_CMPGE_U4SI:
15768 case VSX_BUILTIN_CMPGE_2DI:
15769 case VSX_BUILTIN_CMPGE_U2DI:
15770 fold_compare_helper (gsi, GE_EXPR, stmt);
15771 return true;
15772
15773 case ALTIVEC_BUILTIN_VCMPGTSB:
15774 case ALTIVEC_BUILTIN_VCMPGTUB:
15775 case ALTIVEC_BUILTIN_VCMPGTSH:
15776 case ALTIVEC_BUILTIN_VCMPGTUH:
15777 case ALTIVEC_BUILTIN_VCMPGTSW:
15778 case ALTIVEC_BUILTIN_VCMPGTUW:
15779 case P8V_BUILTIN_VCMPGTUD:
15780 case P8V_BUILTIN_VCMPGTSD:
15781 fold_compare_helper (gsi, GT_EXPR, stmt);
15782 return true;
15783
15784 case VSX_BUILTIN_CMPLE_16QI:
15785 case VSX_BUILTIN_CMPLE_U16QI:
15786 case VSX_BUILTIN_CMPLE_8HI:
15787 case VSX_BUILTIN_CMPLE_U8HI:
15788 case VSX_BUILTIN_CMPLE_4SI:
15789 case VSX_BUILTIN_CMPLE_U4SI:
15790 case VSX_BUILTIN_CMPLE_2DI:
15791 case VSX_BUILTIN_CMPLE_U2DI:
15792 fold_compare_helper (gsi, LE_EXPR, stmt);
15793 return true;
15794
15795 /* flavors of vec_splat_[us]{8,16,32}. */
15796 case ALTIVEC_BUILTIN_VSPLTISB:
15797 case ALTIVEC_BUILTIN_VSPLTISH:
15798 case ALTIVEC_BUILTIN_VSPLTISW:
15799 {
15800 arg0 = gimple_call_arg (stmt, 0);
15801 lhs = gimple_call_lhs (stmt);
15802
15803 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
15804 5-bit signed constant in range -16 to +15. */
15805 if (TREE_CODE (arg0) != INTEGER_CST
15806 || !IN_RANGE (TREE_INT_CST_LOW (arg0), -16, 15))
15807 return false;
15808 gimple_seq stmts = NULL;
15809 location_t loc = gimple_location (stmt);
15810 tree splat_value = gimple_convert (&stmts, loc,
15811 TREE_TYPE (TREE_TYPE (lhs)), arg0);
15812 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15813 tree splat_tree = build_vector_from_val (TREE_TYPE (lhs), splat_value);
15814 g = gimple_build_assign (lhs, splat_tree);
15815 gimple_set_location (g, gimple_location (stmt));
15816 gsi_replace (gsi, g, true);
15817 return true;
15818 }
15819
15820 /* Flavors of vec_splat. */
15821 /* a = vec_splat (b, 0x3) becomes a = { b[3],b[3],b[3],...}; */
15822 case ALTIVEC_BUILTIN_VSPLTB:
15823 case ALTIVEC_BUILTIN_VSPLTH:
15824 case ALTIVEC_BUILTIN_VSPLTW:
15825 case VSX_BUILTIN_XXSPLTD_V2DI:
15826 case VSX_BUILTIN_XXSPLTD_V2DF:
15827 {
15828 arg0 = gimple_call_arg (stmt, 0); /* input vector. */
15829 arg1 = gimple_call_arg (stmt, 1); /* index into arg0. */
15830 /* Only fold the vec_splat_*() if arg1 is both a constant value and
15831 is a valid index into the arg0 vector. */
15832 unsigned int n_elts = VECTOR_CST_NELTS (arg0);
15833 if (TREE_CODE (arg1) != INTEGER_CST
15834 || TREE_INT_CST_LOW (arg1) > (n_elts -1))
15835 return false;
15836 lhs = gimple_call_lhs (stmt);
15837 tree lhs_type = TREE_TYPE (lhs);
15838 tree arg0_type = TREE_TYPE (arg0);
15839 tree splat;
15840 if (TREE_CODE (arg0) == VECTOR_CST)
15841 splat = VECTOR_CST_ELT (arg0, TREE_INT_CST_LOW (arg1));
15842 else
15843 {
15844 /* Determine (in bits) the length and start location of the
15845 splat value for a call to the tree_vec_extract helper. */
15846 int splat_elem_size = TREE_INT_CST_LOW (size_in_bytes (arg0_type))
15847 * BITS_PER_UNIT / n_elts;
15848 int splat_start_bit = TREE_INT_CST_LOW (arg1) * splat_elem_size;
15849 tree len = build_int_cst (bitsizetype, splat_elem_size);
15850 tree start = build_int_cst (bitsizetype, splat_start_bit);
15851 splat = tree_vec_extract (gsi, TREE_TYPE (lhs_type), arg0,
15852 len, start);
15853 }
15854 /* And finally, build the new vector. */
15855 tree splat_tree = build_vector_from_val (lhs_type, splat);
15856 g = gimple_build_assign (lhs, splat_tree);
15857 gimple_set_location (g, gimple_location (stmt));
15858 gsi_replace (gsi, g, true);
15859 return true;
15860 }
15861
15862 /* vec_mergel (integrals). */
15863 case ALTIVEC_BUILTIN_VMRGLH:
15864 case ALTIVEC_BUILTIN_VMRGLW:
15865 case VSX_BUILTIN_XXMRGLW_4SI:
15866 case ALTIVEC_BUILTIN_VMRGLB:
15867 case VSX_BUILTIN_VEC_MERGEL_V2DI:
15868 case VSX_BUILTIN_XXMRGLW_4SF:
15869 case VSX_BUILTIN_VEC_MERGEL_V2DF:
15870 fold_mergehl_helper (gsi, stmt, 1);
15871 return true;
15872 /* vec_mergeh (integrals). */
15873 case ALTIVEC_BUILTIN_VMRGHH:
15874 case ALTIVEC_BUILTIN_VMRGHW:
15875 case VSX_BUILTIN_XXMRGHW_4SI:
15876 case ALTIVEC_BUILTIN_VMRGHB:
15877 case VSX_BUILTIN_VEC_MERGEH_V2DI:
15878 case VSX_BUILTIN_XXMRGHW_4SF:
15879 case VSX_BUILTIN_VEC_MERGEH_V2DF:
15880 fold_mergehl_helper (gsi, stmt, 0);
15881 return true;
15882
15883 /* Flavors of vec_mergee. */
15884 case P8V_BUILTIN_VMRGEW_V4SI:
15885 case P8V_BUILTIN_VMRGEW_V2DI:
15886 case P8V_BUILTIN_VMRGEW_V4SF:
15887 case P8V_BUILTIN_VMRGEW_V2DF:
15888 fold_mergeeo_helper (gsi, stmt, 0);
15889 return true;
15890 /* Flavors of vec_mergeo. */
15891 case P8V_BUILTIN_VMRGOW_V4SI:
15892 case P8V_BUILTIN_VMRGOW_V2DI:
15893 case P8V_BUILTIN_VMRGOW_V4SF:
15894 case P8V_BUILTIN_VMRGOW_V2DF:
15895 fold_mergeeo_helper (gsi, stmt, 1);
15896 return true;
15897
15898 /* d = vec_pack (a, b) */
15899 case P8V_BUILTIN_VPKUDUM:
15900 case ALTIVEC_BUILTIN_VPKUHUM:
15901 case ALTIVEC_BUILTIN_VPKUWUM:
15902 {
15903 arg0 = gimple_call_arg (stmt, 0);
15904 arg1 = gimple_call_arg (stmt, 1);
15905 lhs = gimple_call_lhs (stmt);
15906 gimple *g = gimple_build_assign (lhs, VEC_PACK_TRUNC_EXPR, arg0, arg1);
15907 gimple_set_location (g, gimple_location (stmt));
15908 gsi_replace (gsi, g, true);
15909 return true;
15910 }
15911
15912 /* d = vec_unpackh (a) */
15913 /* Note that the UNPACK_{HI,LO}_EXPR used in the gimple_build_assign call
15914 in this code is sensitive to endian-ness, and needs to be inverted to
15915 handle both LE and BE targets. */
15916 case ALTIVEC_BUILTIN_VUPKHSB:
15917 case ALTIVEC_BUILTIN_VUPKHSH:
15918 case P8V_BUILTIN_VUPKHSW:
15919 {
15920 arg0 = gimple_call_arg (stmt, 0);
15921 lhs = gimple_call_lhs (stmt);
15922 if (BYTES_BIG_ENDIAN)
15923 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15924 else
15925 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15926 gimple_set_location (g, gimple_location (stmt));
15927 gsi_replace (gsi, g, true);
15928 return true;
15929 }
15930 /* d = vec_unpackl (a) */
15931 case ALTIVEC_BUILTIN_VUPKLSB:
15932 case ALTIVEC_BUILTIN_VUPKLSH:
15933 case P8V_BUILTIN_VUPKLSW:
15934 {
15935 arg0 = gimple_call_arg (stmt, 0);
15936 lhs = gimple_call_lhs (stmt);
15937 if (BYTES_BIG_ENDIAN)
15938 g = gimple_build_assign (lhs, VEC_UNPACK_LO_EXPR, arg0);
15939 else
15940 g = gimple_build_assign (lhs, VEC_UNPACK_HI_EXPR, arg0);
15941 gimple_set_location (g, gimple_location (stmt));
15942 gsi_replace (gsi, g, true);
15943 return true;
15944 }
15945 /* There is no gimple type corresponding with pixel, so just return. */
15946 case ALTIVEC_BUILTIN_VUPKHPX:
15947 case ALTIVEC_BUILTIN_VUPKLPX:
15948 return false;
15949
15950 /* vec_perm. */
15951 case ALTIVEC_BUILTIN_VPERM_16QI:
15952 case ALTIVEC_BUILTIN_VPERM_8HI:
15953 case ALTIVEC_BUILTIN_VPERM_4SI:
15954 case ALTIVEC_BUILTIN_VPERM_2DI:
15955 case ALTIVEC_BUILTIN_VPERM_4SF:
15956 case ALTIVEC_BUILTIN_VPERM_2DF:
15957 {
15958 arg0 = gimple_call_arg (stmt, 0);
15959 arg1 = gimple_call_arg (stmt, 1);
15960 tree permute = gimple_call_arg (stmt, 2);
15961 lhs = gimple_call_lhs (stmt);
15962 location_t loc = gimple_location (stmt);
15963 gimple_seq stmts = NULL;
15964 // convert arg0 and arg1 to match the type of the permute
15965 // for the VEC_PERM_EXPR operation.
15966 tree permute_type = (TREE_TYPE (permute));
15967 tree arg0_ptype = gimple_convert (&stmts, loc, permute_type, arg0);
15968 tree arg1_ptype = gimple_convert (&stmts, loc, permute_type, arg1);
15969 tree lhs_ptype = gimple_build (&stmts, loc, VEC_PERM_EXPR,
15970 permute_type, arg0_ptype, arg1_ptype,
15971 permute);
15972 // Convert the result back to the desired lhs type upon completion.
15973 tree temp = gimple_convert (&stmts, loc, TREE_TYPE (lhs), lhs_ptype);
15974 gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
15975 g = gimple_build_assign (lhs, temp);
15976 gimple_set_location (g, loc);
15977 gsi_replace (gsi, g, true);
15978 return true;
15979 }
15980
15981 default:
15982 if (TARGET_DEBUG_BUILTIN)
15983 fprintf (stderr, "gimple builtin intrinsic not matched:%d %s %s\n",
15984 fn_code, fn_name1, fn_name2);
15985 break;
15986 }
15987
15988 return false;
15989 }
15990
15991 /* Expand an expression EXP that calls a built-in function,
15992 with result going to TARGET if that's convenient
15993 (and in mode MODE if that's convenient).
15994 SUBTARGET may be used as the target for computing one of EXP's operands.
15995 IGNORE is nonzero if the value is to be ignored. */
15996
15997 static rtx
15998 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
15999 machine_mode mode ATTRIBUTE_UNUSED,
16000 int ignore ATTRIBUTE_UNUSED)
16001 {
16002 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16003 enum rs6000_builtins fcode
16004 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16005 size_t uns_fcode = (size_t)fcode;
16006 const struct builtin_description *d;
16007 size_t i;
16008 rtx ret;
16009 bool success;
16010 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16011 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16012 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16013
16014 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16015 floating point type, depending on whether long double is the IBM extended
16016 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16017 we only define one variant of the built-in function, and switch the code
16018 when defining it, rather than defining two built-ins and using the
16019 overload table in rs6000-c.c to switch between the two. If we don't have
16020 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16021 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16022 if (FLOAT128_IEEE_P (TFmode))
16023 switch (icode)
16024 {
16025 default:
16026 break;
16027
16028 case CODE_FOR_sqrtkf2_odd: icode = CODE_FOR_sqrttf2_odd; break;
16029 case CODE_FOR_trunckfdf2_odd: icode = CODE_FOR_trunctfdf2_odd; break;
16030 case CODE_FOR_addkf3_odd: icode = CODE_FOR_addtf3_odd; break;
16031 case CODE_FOR_subkf3_odd: icode = CODE_FOR_subtf3_odd; break;
16032 case CODE_FOR_mulkf3_odd: icode = CODE_FOR_multf3_odd; break;
16033 case CODE_FOR_divkf3_odd: icode = CODE_FOR_divtf3_odd; break;
16034 case CODE_FOR_fmakf4_odd: icode = CODE_FOR_fmatf4_odd; break;
16035 case CODE_FOR_xsxexpqp_kf: icode = CODE_FOR_xsxexpqp_tf; break;
16036 case CODE_FOR_xsxsigqp_kf: icode = CODE_FOR_xsxsigqp_tf; break;
16037 case CODE_FOR_xststdcnegqp_kf: icode = CODE_FOR_xststdcnegqp_tf; break;
16038 case CODE_FOR_xsiexpqp_kf: icode = CODE_FOR_xsiexpqp_tf; break;
16039 case CODE_FOR_xsiexpqpf_kf: icode = CODE_FOR_xsiexpqpf_tf; break;
16040 case CODE_FOR_xststdcqp_kf: icode = CODE_FOR_xststdcqp_tf; break;
16041 }
16042
16043 if (TARGET_DEBUG_BUILTIN)
16044 {
16045 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16046 const char *name2 = (icode != CODE_FOR_nothing)
16047 ? get_insn_name ((int) icode)
16048 : "nothing";
16049 const char *name3;
16050
16051 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16052 {
16053 default: name3 = "unknown"; break;
16054 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16055 case RS6000_BTC_UNARY: name3 = "unary"; break;
16056 case RS6000_BTC_BINARY: name3 = "binary"; break;
16057 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16058 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16059 case RS6000_BTC_ABS: name3 = "abs"; break;
16060 case RS6000_BTC_DST: name3 = "dst"; break;
16061 }
16062
16063
16064 fprintf (stderr,
16065 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16066 (name1) ? name1 : "---", fcode,
16067 (name2) ? name2 : "---", (int) icode,
16068 name3,
16069 func_valid_p ? "" : ", not valid");
16070 }
16071
16072 if (!func_valid_p)
16073 {
16074 rs6000_invalid_builtin (fcode);
16075
16076 /* Given it is invalid, just generate a normal call. */
16077 return expand_call (exp, target, ignore);
16078 }
16079
16080 switch (fcode)
16081 {
16082 case RS6000_BUILTIN_RECIP:
16083 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16084
16085 case RS6000_BUILTIN_RECIPF:
16086 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16087
16088 case RS6000_BUILTIN_RSQRTF:
16089 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16090
16091 case RS6000_BUILTIN_RSQRT:
16092 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16093
16094 case POWER7_BUILTIN_BPERMD:
16095 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16096 ? CODE_FOR_bpermd_di
16097 : CODE_FOR_bpermd_si), exp, target);
16098
16099 case RS6000_BUILTIN_GET_TB:
16100 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16101 target);
16102
16103 case RS6000_BUILTIN_MFTB:
16104 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16105 ? CODE_FOR_rs6000_mftb_di
16106 : CODE_FOR_rs6000_mftb_si),
16107 target);
16108
16109 case RS6000_BUILTIN_MFFS:
16110 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16111
16112 case RS6000_BUILTIN_MTFSB0:
16113 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb0, exp);
16114
16115 case RS6000_BUILTIN_MTFSB1:
16116 return rs6000_expand_mtfsb_builtin (CODE_FOR_rs6000_mtfsb1, exp);
16117
16118 case RS6000_BUILTIN_SET_FPSCR_RN:
16119 return rs6000_expand_set_fpscr_rn_builtin (CODE_FOR_rs6000_set_fpscr_rn,
16120 exp);
16121
16122 case RS6000_BUILTIN_SET_FPSCR_DRN:
16123 return
16124 rs6000_expand_set_fpscr_drn_builtin (CODE_FOR_rs6000_set_fpscr_drn,
16125 exp);
16126
16127 case RS6000_BUILTIN_MFFSL:
16128 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffsl, target);
16129
16130 case RS6000_BUILTIN_MTFSF:
16131 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16132
16133 case RS6000_BUILTIN_CPU_INIT:
16134 case RS6000_BUILTIN_CPU_IS:
16135 case RS6000_BUILTIN_CPU_SUPPORTS:
16136 return cpu_expand_builtin (fcode, exp, target);
16137
16138 case MISC_BUILTIN_SPEC_BARRIER:
16139 {
16140 emit_insn (gen_speculation_barrier ());
16141 return NULL_RTX;
16142 }
16143
16144 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16145 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16146 {
16147 int icode2 = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16148 : (int) CODE_FOR_altivec_lvsl_direct);
16149 machine_mode tmode = insn_data[icode2].operand[0].mode;
16150 machine_mode mode = insn_data[icode2].operand[1].mode;
16151 tree arg;
16152 rtx op, addr, pat;
16153
16154 gcc_assert (TARGET_ALTIVEC);
16155
16156 arg = CALL_EXPR_ARG (exp, 0);
16157 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16158 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16159 addr = memory_address (mode, op);
16160 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16161 op = addr;
16162 else
16163 {
16164 /* For the load case need to negate the address. */
16165 op = gen_reg_rtx (GET_MODE (addr));
16166 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16167 }
16168 op = gen_rtx_MEM (mode, op);
16169
16170 if (target == 0
16171 || GET_MODE (target) != tmode
16172 || ! (*insn_data[icode2].operand[0].predicate) (target, tmode))
16173 target = gen_reg_rtx (tmode);
16174
16175 pat = GEN_FCN (icode2) (target, op);
16176 if (!pat)
16177 return 0;
16178 emit_insn (pat);
16179
16180 return target;
16181 }
16182
16183 case ALTIVEC_BUILTIN_VCFUX:
16184 case ALTIVEC_BUILTIN_VCFSX:
16185 case ALTIVEC_BUILTIN_VCTUXS:
16186 case ALTIVEC_BUILTIN_VCTSXS:
16187 /* FIXME: There's got to be a nicer way to handle this case than
16188 constructing a new CALL_EXPR. */
16189 if (call_expr_nargs (exp) == 1)
16190 {
16191 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16192 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16193 }
16194 break;
16195
16196 /* For the pack and unpack int128 routines, fix up the builtin so it
16197 uses the correct IBM128 type. */
16198 case MISC_BUILTIN_PACK_IF:
16199 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16200 {
16201 icode = CODE_FOR_packtf;
16202 fcode = MISC_BUILTIN_PACK_TF;
16203 uns_fcode = (size_t)fcode;
16204 }
16205 break;
16206
16207 case MISC_BUILTIN_UNPACK_IF:
16208 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
16209 {
16210 icode = CODE_FOR_unpacktf;
16211 fcode = MISC_BUILTIN_UNPACK_TF;
16212 uns_fcode = (size_t)fcode;
16213 }
16214 break;
16215
16216 default:
16217 break;
16218 }
16219
16220 if (TARGET_ALTIVEC)
16221 {
16222 ret = altivec_expand_builtin (exp, target, &success);
16223
16224 if (success)
16225 return ret;
16226 }
16227 if (TARGET_HTM)
16228 {
16229 ret = htm_expand_builtin (exp, target, &success);
16230
16231 if (success)
16232 return ret;
16233 }
16234
16235 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16236 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16237 gcc_assert (attr == RS6000_BTC_UNARY
16238 || attr == RS6000_BTC_BINARY
16239 || attr == RS6000_BTC_TERNARY
16240 || attr == RS6000_BTC_SPECIAL);
16241
16242 /* Handle simple unary operations. */
16243 d = bdesc_1arg;
16244 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16245 if (d->code == fcode)
16246 return rs6000_expand_unop_builtin (icode, exp, target);
16247
16248 /* Handle simple binary operations. */
16249 d = bdesc_2arg;
16250 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16251 if (d->code == fcode)
16252 return rs6000_expand_binop_builtin (icode, exp, target);
16253
16254 /* Handle simple ternary operations. */
16255 d = bdesc_3arg;
16256 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16257 if (d->code == fcode)
16258 return rs6000_expand_ternop_builtin (icode, exp, target);
16259
16260 /* Handle simple no-argument operations. */
16261 d = bdesc_0arg;
16262 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16263 if (d->code == fcode)
16264 return rs6000_expand_zeroop_builtin (icode, target);
16265
16266 gcc_unreachable ();
16267 }
16268
16269 /* Create a builtin vector type with a name. Taking care not to give
16270 the canonical type a name. */
16271
16272 static tree
16273 rs6000_vector_type (const char *name, tree elt_type, unsigned num_elts)
16274 {
16275 tree result = build_vector_type (elt_type, num_elts);
16276
16277 /* Copy so we don't give the canonical type a name. */
16278 result = build_variant_type_copy (result);
16279
16280 add_builtin_type (name, result);
16281
16282 return result;
16283 }
16284
16285 static void
16286 rs6000_init_builtins (void)
16287 {
16288 tree tdecl;
16289 tree ftype;
16290 machine_mode mode;
16291
16292 if (TARGET_DEBUG_BUILTIN)
16293 fprintf (stderr, "rs6000_init_builtins%s%s\n",
16294 (TARGET_ALTIVEC) ? ", altivec" : "",
16295 (TARGET_VSX) ? ", vsx" : "");
16296
16297 V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64 ? "__vector long"
16298 : "__vector long long",
16299 intDI_type_node, 2);
16300 V2DF_type_node = rs6000_vector_type ("__vector double", double_type_node, 2);
16301 V4SI_type_node = rs6000_vector_type ("__vector signed int",
16302 intSI_type_node, 4);
16303 V4SF_type_node = rs6000_vector_type ("__vector float", float_type_node, 4);
16304 V8HI_type_node = rs6000_vector_type ("__vector signed short",
16305 intHI_type_node, 8);
16306 V16QI_type_node = rs6000_vector_type ("__vector signed char",
16307 intQI_type_node, 16);
16308
16309 unsigned_V16QI_type_node = rs6000_vector_type ("__vector unsigned char",
16310 unsigned_intQI_type_node, 16);
16311 unsigned_V8HI_type_node = rs6000_vector_type ("__vector unsigned short",
16312 unsigned_intHI_type_node, 8);
16313 unsigned_V4SI_type_node = rs6000_vector_type ("__vector unsigned int",
16314 unsigned_intSI_type_node, 4);
16315 unsigned_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16316 ? "__vector unsigned long"
16317 : "__vector unsigned long long",
16318 unsigned_intDI_type_node, 2);
16319
16320 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
16321
16322 const_str_type_node
16323 = build_pointer_type (build_qualified_type (char_type_node,
16324 TYPE_QUAL_CONST));
16325
16326 /* We use V1TI mode as a special container to hold __int128_t items that
16327 must live in VSX registers. */
16328 if (intTI_type_node)
16329 {
16330 V1TI_type_node = rs6000_vector_type ("__vector __int128",
16331 intTI_type_node, 1);
16332 unsigned_V1TI_type_node
16333 = rs6000_vector_type ("__vector unsigned __int128",
16334 unsigned_intTI_type_node, 1);
16335 }
16336
16337 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16338 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16339 'vector unsigned short'. */
16340
16341 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
16342 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16343 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
16344 bool_long_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
16345 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
16346
16347 long_integer_type_internal_node = long_integer_type_node;
16348 long_unsigned_type_internal_node = long_unsigned_type_node;
16349 long_long_integer_type_internal_node = long_long_integer_type_node;
16350 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
16351 intQI_type_internal_node = intQI_type_node;
16352 uintQI_type_internal_node = unsigned_intQI_type_node;
16353 intHI_type_internal_node = intHI_type_node;
16354 uintHI_type_internal_node = unsigned_intHI_type_node;
16355 intSI_type_internal_node = intSI_type_node;
16356 uintSI_type_internal_node = unsigned_intSI_type_node;
16357 intDI_type_internal_node = intDI_type_node;
16358 uintDI_type_internal_node = unsigned_intDI_type_node;
16359 intTI_type_internal_node = intTI_type_node;
16360 uintTI_type_internal_node = unsigned_intTI_type_node;
16361 float_type_internal_node = float_type_node;
16362 double_type_internal_node = double_type_node;
16363 long_double_type_internal_node = long_double_type_node;
16364 dfloat64_type_internal_node = dfloat64_type_node;
16365 dfloat128_type_internal_node = dfloat128_type_node;
16366 void_type_internal_node = void_type_node;
16367
16368 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16369 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16370 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16371 format that uses a pair of doubles, depending on the switches and
16372 defaults.
16373
16374 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16375 floating point, we need make sure the type is non-zero or else self-test
16376 fails during bootstrap.
16377
16378 Always create __ibm128 as a separate type, even if the current long double
16379 format is IBM extended double.
16380
16381 For IEEE 128-bit floating point, always create the type __ieee128. If the
16382 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16383 __ieee128. */
16384 if (TARGET_FLOAT128_TYPE)
16385 {
16386 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16387 ibm128_float_type_node = long_double_type_node;
16388 else
16389 {
16390 ibm128_float_type_node = make_node (REAL_TYPE);
16391 TYPE_PRECISION (ibm128_float_type_node) = 128;
16392 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
16393 layout_type (ibm128_float_type_node);
16394 }
16395
16396 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
16397 "__ibm128");
16398
16399 if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128)
16400 ieee128_float_type_node = long_double_type_node;
16401 else
16402 ieee128_float_type_node = float128_type_node;
16403
16404 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
16405 "__ieee128");
16406 }
16407
16408 else
16409 ieee128_float_type_node = ibm128_float_type_node = long_double_type_node;
16410
16411 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16412 tree type node. */
16413 builtin_mode_to_type[QImode][0] = integer_type_node;
16414 builtin_mode_to_type[HImode][0] = integer_type_node;
16415 builtin_mode_to_type[SImode][0] = intSI_type_node;
16416 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
16417 builtin_mode_to_type[DImode][0] = intDI_type_node;
16418 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
16419 builtin_mode_to_type[TImode][0] = intTI_type_node;
16420 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
16421 builtin_mode_to_type[SFmode][0] = float_type_node;
16422 builtin_mode_to_type[DFmode][0] = double_type_node;
16423 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
16424 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
16425 builtin_mode_to_type[TFmode][0] = long_double_type_node;
16426 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
16427 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
16428 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
16429 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
16430 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
16431 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
16432 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
16433 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
16434 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
16435 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
16436 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
16437 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
16438 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
16439 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
16440
16441 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
16442 TYPE_NAME (bool_char_type_node) = tdecl;
16443
16444 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
16445 TYPE_NAME (bool_short_type_node) = tdecl;
16446
16447 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
16448 TYPE_NAME (bool_int_type_node) = tdecl;
16449
16450 tdecl = add_builtin_type ("__pixel", pixel_type_node);
16451 TYPE_NAME (pixel_type_node) = tdecl;
16452
16453 bool_V16QI_type_node = rs6000_vector_type ("__vector __bool char",
16454 bool_char_type_node, 16);
16455 bool_V8HI_type_node = rs6000_vector_type ("__vector __bool short",
16456 bool_short_type_node, 8);
16457 bool_V4SI_type_node = rs6000_vector_type ("__vector __bool int",
16458 bool_int_type_node, 4);
16459 bool_V2DI_type_node = rs6000_vector_type (TARGET_POWERPC64
16460 ? "__vector __bool long"
16461 : "__vector __bool long long",
16462 bool_long_long_type_node, 2);
16463 pixel_V8HI_type_node = rs6000_vector_type ("__vector __pixel",
16464 pixel_type_node, 8);
16465
16466 /* Create Altivec and VSX builtins on machines with at least the
16467 general purpose extensions (970 and newer) to allow the use of
16468 the target attribute. */
16469 if (TARGET_EXTRA_BUILTINS)
16470 altivec_init_builtins ();
16471 if (TARGET_HTM)
16472 htm_init_builtins ();
16473
16474 if (TARGET_EXTRA_BUILTINS)
16475 rs6000_common_init_builtins ();
16476
16477 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
16478 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
16479 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
16480
16481 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
16482 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
16483 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
16484
16485 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
16486 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
16487 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
16488
16489 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
16490 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
16491 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
16492
16493 mode = (TARGET_64BIT) ? DImode : SImode;
16494 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
16495 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
16496 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
16497
16498 ftype = build_function_type_list (unsigned_intDI_type_node,
16499 NULL_TREE);
16500 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
16501
16502 if (TARGET_64BIT)
16503 ftype = build_function_type_list (unsigned_intDI_type_node,
16504 NULL_TREE);
16505 else
16506 ftype = build_function_type_list (unsigned_intSI_type_node,
16507 NULL_TREE);
16508 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
16509
16510 ftype = build_function_type_list (double_type_node, NULL_TREE);
16511 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
16512
16513 ftype = build_function_type_list (double_type_node, NULL_TREE);
16514 def_builtin ("__builtin_mffsl", ftype, RS6000_BUILTIN_MFFSL);
16515
16516 ftype = build_function_type_list (void_type_node,
16517 intSI_type_node,
16518 NULL_TREE);
16519 def_builtin ("__builtin_mtfsb0", ftype, RS6000_BUILTIN_MTFSB0);
16520
16521 ftype = build_function_type_list (void_type_node,
16522 intSI_type_node,
16523 NULL_TREE);
16524 def_builtin ("__builtin_mtfsb1", ftype, RS6000_BUILTIN_MTFSB1);
16525
16526 ftype = build_function_type_list (void_type_node,
16527 intDI_type_node,
16528 NULL_TREE);
16529 def_builtin ("__builtin_set_fpscr_rn", ftype, RS6000_BUILTIN_SET_FPSCR_RN);
16530
16531 ftype = build_function_type_list (void_type_node,
16532 intDI_type_node,
16533 NULL_TREE);
16534 def_builtin ("__builtin_set_fpscr_drn", ftype, RS6000_BUILTIN_SET_FPSCR_DRN);
16535
16536 ftype = build_function_type_list (void_type_node,
16537 intSI_type_node, double_type_node,
16538 NULL_TREE);
16539 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
16540
16541 ftype = build_function_type_list (void_type_node, NULL_TREE);
16542 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
16543 def_builtin ("__builtin_ppc_speculation_barrier", ftype,
16544 MISC_BUILTIN_SPEC_BARRIER);
16545
16546 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
16547 NULL_TREE);
16548 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
16549 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
16550
16551 /* AIX libm provides clog as __clog. */
16552 if (TARGET_XCOFF &&
16553 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
16554 set_user_assembler_name (tdecl, "__clog");
16555
16556 #ifdef SUBTARGET_INIT_BUILTINS
16557 SUBTARGET_INIT_BUILTINS;
16558 #endif
16559 }
16560
16561 /* Returns the rs6000 builtin decl for CODE. */
16562
16563 static tree
16564 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
16565 {
16566 HOST_WIDE_INT fnmask;
16567
16568 if (code >= RS6000_BUILTIN_COUNT)
16569 return error_mark_node;
16570
16571 fnmask = rs6000_builtin_info[code].mask;
16572 if ((fnmask & rs6000_builtin_mask) != fnmask)
16573 {
16574 rs6000_invalid_builtin ((enum rs6000_builtins)code);
16575 return error_mark_node;
16576 }
16577
16578 return rs6000_builtin_decls[code];
16579 }
16580
16581 static void
16582 altivec_init_builtins (void)
16583 {
16584 const struct builtin_description *d;
16585 size_t i;
16586 tree ftype;
16587 tree decl;
16588 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
16589
16590 tree pvoid_type_node = build_pointer_type (void_type_node);
16591
16592 tree pcvoid_type_node
16593 = build_pointer_type (build_qualified_type (void_type_node,
16594 TYPE_QUAL_CONST));
16595
16596 tree int_ftype_opaque
16597 = build_function_type_list (integer_type_node,
16598 opaque_V4SI_type_node, NULL_TREE);
16599 tree opaque_ftype_opaque
16600 = build_function_type_list (integer_type_node, NULL_TREE);
16601 tree opaque_ftype_opaque_int
16602 = build_function_type_list (opaque_V4SI_type_node,
16603 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
16604 tree opaque_ftype_opaque_opaque_int
16605 = build_function_type_list (opaque_V4SI_type_node,
16606 opaque_V4SI_type_node, opaque_V4SI_type_node,
16607 integer_type_node, NULL_TREE);
16608 tree opaque_ftype_opaque_opaque_opaque
16609 = build_function_type_list (opaque_V4SI_type_node,
16610 opaque_V4SI_type_node, opaque_V4SI_type_node,
16611 opaque_V4SI_type_node, NULL_TREE);
16612 tree opaque_ftype_opaque_opaque
16613 = build_function_type_list (opaque_V4SI_type_node,
16614 opaque_V4SI_type_node, opaque_V4SI_type_node,
16615 NULL_TREE);
16616 tree int_ftype_int_opaque_opaque
16617 = build_function_type_list (integer_type_node,
16618 integer_type_node, opaque_V4SI_type_node,
16619 opaque_V4SI_type_node, NULL_TREE);
16620 tree int_ftype_int_v4si_v4si
16621 = build_function_type_list (integer_type_node,
16622 integer_type_node, V4SI_type_node,
16623 V4SI_type_node, NULL_TREE);
16624 tree int_ftype_int_v2di_v2di
16625 = build_function_type_list (integer_type_node,
16626 integer_type_node, V2DI_type_node,
16627 V2DI_type_node, NULL_TREE);
16628 tree void_ftype_v4si
16629 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
16630 tree v8hi_ftype_void
16631 = build_function_type_list (V8HI_type_node, NULL_TREE);
16632 tree void_ftype_void
16633 = build_function_type_list (void_type_node, NULL_TREE);
16634 tree void_ftype_int
16635 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
16636
16637 tree opaque_ftype_long_pcvoid
16638 = build_function_type_list (opaque_V4SI_type_node,
16639 long_integer_type_node, pcvoid_type_node,
16640 NULL_TREE);
16641 tree v16qi_ftype_long_pcvoid
16642 = build_function_type_list (V16QI_type_node,
16643 long_integer_type_node, pcvoid_type_node,
16644 NULL_TREE);
16645 tree v8hi_ftype_long_pcvoid
16646 = build_function_type_list (V8HI_type_node,
16647 long_integer_type_node, pcvoid_type_node,
16648 NULL_TREE);
16649 tree v4si_ftype_long_pcvoid
16650 = build_function_type_list (V4SI_type_node,
16651 long_integer_type_node, pcvoid_type_node,
16652 NULL_TREE);
16653 tree v4sf_ftype_long_pcvoid
16654 = build_function_type_list (V4SF_type_node,
16655 long_integer_type_node, pcvoid_type_node,
16656 NULL_TREE);
16657 tree v2df_ftype_long_pcvoid
16658 = build_function_type_list (V2DF_type_node,
16659 long_integer_type_node, pcvoid_type_node,
16660 NULL_TREE);
16661 tree v2di_ftype_long_pcvoid
16662 = build_function_type_list (V2DI_type_node,
16663 long_integer_type_node, pcvoid_type_node,
16664 NULL_TREE);
16665 tree v1ti_ftype_long_pcvoid
16666 = build_function_type_list (V1TI_type_node,
16667 long_integer_type_node, pcvoid_type_node,
16668 NULL_TREE);
16669
16670 tree void_ftype_opaque_long_pvoid
16671 = build_function_type_list (void_type_node,
16672 opaque_V4SI_type_node, long_integer_type_node,
16673 pvoid_type_node, NULL_TREE);
16674 tree void_ftype_v4si_long_pvoid
16675 = build_function_type_list (void_type_node,
16676 V4SI_type_node, long_integer_type_node,
16677 pvoid_type_node, NULL_TREE);
16678 tree void_ftype_v16qi_long_pvoid
16679 = build_function_type_list (void_type_node,
16680 V16QI_type_node, long_integer_type_node,
16681 pvoid_type_node, NULL_TREE);
16682
16683 tree void_ftype_v16qi_pvoid_long
16684 = build_function_type_list (void_type_node,
16685 V16QI_type_node, pvoid_type_node,
16686 long_integer_type_node, NULL_TREE);
16687
16688 tree void_ftype_v8hi_long_pvoid
16689 = build_function_type_list (void_type_node,
16690 V8HI_type_node, long_integer_type_node,
16691 pvoid_type_node, NULL_TREE);
16692 tree void_ftype_v4sf_long_pvoid
16693 = build_function_type_list (void_type_node,
16694 V4SF_type_node, long_integer_type_node,
16695 pvoid_type_node, NULL_TREE);
16696 tree void_ftype_v2df_long_pvoid
16697 = build_function_type_list (void_type_node,
16698 V2DF_type_node, long_integer_type_node,
16699 pvoid_type_node, NULL_TREE);
16700 tree void_ftype_v1ti_long_pvoid
16701 = build_function_type_list (void_type_node,
16702 V1TI_type_node, long_integer_type_node,
16703 pvoid_type_node, NULL_TREE);
16704 tree void_ftype_v2di_long_pvoid
16705 = build_function_type_list (void_type_node,
16706 V2DI_type_node, long_integer_type_node,
16707 pvoid_type_node, NULL_TREE);
16708 tree int_ftype_int_v8hi_v8hi
16709 = build_function_type_list (integer_type_node,
16710 integer_type_node, V8HI_type_node,
16711 V8HI_type_node, NULL_TREE);
16712 tree int_ftype_int_v16qi_v16qi
16713 = build_function_type_list (integer_type_node,
16714 integer_type_node, V16QI_type_node,
16715 V16QI_type_node, NULL_TREE);
16716 tree int_ftype_int_v4sf_v4sf
16717 = build_function_type_list (integer_type_node,
16718 integer_type_node, V4SF_type_node,
16719 V4SF_type_node, NULL_TREE);
16720 tree int_ftype_int_v2df_v2df
16721 = build_function_type_list (integer_type_node,
16722 integer_type_node, V2DF_type_node,
16723 V2DF_type_node, NULL_TREE);
16724 tree v2di_ftype_v2di
16725 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
16726 tree v4si_ftype_v4si
16727 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
16728 tree v8hi_ftype_v8hi
16729 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
16730 tree v16qi_ftype_v16qi
16731 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
16732 tree v4sf_ftype_v4sf
16733 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
16734 tree v2df_ftype_v2df
16735 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
16736 tree void_ftype_pcvoid_int_int
16737 = build_function_type_list (void_type_node,
16738 pcvoid_type_node, integer_type_node,
16739 integer_type_node, NULL_TREE);
16740
16741 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
16742 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
16743 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
16744 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
16745 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
16746 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
16747 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
16748 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
16749 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
16750 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
16751 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
16752 ALTIVEC_BUILTIN_LVXL_V2DF);
16753 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
16754 ALTIVEC_BUILTIN_LVXL_V2DI);
16755 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
16756 ALTIVEC_BUILTIN_LVXL_V4SF);
16757 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
16758 ALTIVEC_BUILTIN_LVXL_V4SI);
16759 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
16760 ALTIVEC_BUILTIN_LVXL_V8HI);
16761 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
16762 ALTIVEC_BUILTIN_LVXL_V16QI);
16763 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
16764 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid,
16765 ALTIVEC_BUILTIN_LVX_V1TI);
16766 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
16767 ALTIVEC_BUILTIN_LVX_V2DF);
16768 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
16769 ALTIVEC_BUILTIN_LVX_V2DI);
16770 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
16771 ALTIVEC_BUILTIN_LVX_V4SF);
16772 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
16773 ALTIVEC_BUILTIN_LVX_V4SI);
16774 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
16775 ALTIVEC_BUILTIN_LVX_V8HI);
16776 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
16777 ALTIVEC_BUILTIN_LVX_V16QI);
16778 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
16779 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
16780 ALTIVEC_BUILTIN_STVX_V2DF);
16781 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
16782 ALTIVEC_BUILTIN_STVX_V2DI);
16783 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
16784 ALTIVEC_BUILTIN_STVX_V4SF);
16785 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
16786 ALTIVEC_BUILTIN_STVX_V4SI);
16787 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
16788 ALTIVEC_BUILTIN_STVX_V8HI);
16789 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
16790 ALTIVEC_BUILTIN_STVX_V16QI);
16791 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
16792 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
16793 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
16794 ALTIVEC_BUILTIN_STVXL_V2DF);
16795 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
16796 ALTIVEC_BUILTIN_STVXL_V2DI);
16797 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
16798 ALTIVEC_BUILTIN_STVXL_V4SF);
16799 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
16800 ALTIVEC_BUILTIN_STVXL_V4SI);
16801 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
16802 ALTIVEC_BUILTIN_STVXL_V8HI);
16803 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
16804 ALTIVEC_BUILTIN_STVXL_V16QI);
16805 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
16806 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
16807 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
16808 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
16809 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
16810 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
16811 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
16812 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
16813 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
16814 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
16815 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
16816 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
16817 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
16818 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
16819 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
16820 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
16821
16822 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
16823 VSX_BUILTIN_LXVD2X_V2DF);
16824 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
16825 VSX_BUILTIN_LXVD2X_V2DI);
16826 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
16827 VSX_BUILTIN_LXVW4X_V4SF);
16828 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
16829 VSX_BUILTIN_LXVW4X_V4SI);
16830 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
16831 VSX_BUILTIN_LXVW4X_V8HI);
16832 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
16833 VSX_BUILTIN_LXVW4X_V16QI);
16834 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
16835 VSX_BUILTIN_STXVD2X_V2DF);
16836 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
16837 VSX_BUILTIN_STXVD2X_V2DI);
16838 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
16839 VSX_BUILTIN_STXVW4X_V4SF);
16840 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
16841 VSX_BUILTIN_STXVW4X_V4SI);
16842 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
16843 VSX_BUILTIN_STXVW4X_V8HI);
16844 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
16845 VSX_BUILTIN_STXVW4X_V16QI);
16846
16847 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
16848 VSX_BUILTIN_LD_ELEMREV_V2DF);
16849 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
16850 VSX_BUILTIN_LD_ELEMREV_V2DI);
16851 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
16852 VSX_BUILTIN_LD_ELEMREV_V4SF);
16853 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
16854 VSX_BUILTIN_LD_ELEMREV_V4SI);
16855 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
16856 VSX_BUILTIN_LD_ELEMREV_V8HI);
16857 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
16858 VSX_BUILTIN_LD_ELEMREV_V16QI);
16859 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
16860 VSX_BUILTIN_ST_ELEMREV_V2DF);
16861 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid,
16862 VSX_BUILTIN_ST_ELEMREV_V1TI);
16863 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
16864 VSX_BUILTIN_ST_ELEMREV_V2DI);
16865 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
16866 VSX_BUILTIN_ST_ELEMREV_V4SF);
16867 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
16868 VSX_BUILTIN_ST_ELEMREV_V4SI);
16869 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid,
16870 VSX_BUILTIN_ST_ELEMREV_V8HI);
16871 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid,
16872 VSX_BUILTIN_ST_ELEMREV_V16QI);
16873
16874 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
16875 VSX_BUILTIN_VEC_LD);
16876 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
16877 VSX_BUILTIN_VEC_ST);
16878 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
16879 VSX_BUILTIN_VEC_XL);
16880 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid,
16881 VSX_BUILTIN_VEC_XL_BE);
16882 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
16883 VSX_BUILTIN_VEC_XST);
16884 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid,
16885 VSX_BUILTIN_VEC_XST_BE);
16886
16887 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
16888 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
16889 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
16890
16891 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
16892 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
16893 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
16894 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
16895 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
16896 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
16897 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
16898 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
16899 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
16900 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
16901 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
16902 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
16903
16904 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
16905 ALTIVEC_BUILTIN_VEC_ADDE);
16906 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
16907 ALTIVEC_BUILTIN_VEC_ADDEC);
16908 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
16909 ALTIVEC_BUILTIN_VEC_CMPNE);
16910 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
16911 ALTIVEC_BUILTIN_VEC_MUL);
16912 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque,
16913 ALTIVEC_BUILTIN_VEC_SUBE);
16914 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque,
16915 ALTIVEC_BUILTIN_VEC_SUBEC);
16916
16917 /* Cell builtins. */
16918 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
16919 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
16920 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
16921 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
16922
16923 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
16924 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
16925 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
16926 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
16927
16928 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
16929 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
16930 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
16931 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
16932
16933 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
16934 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
16935 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
16936 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
16937
16938 if (TARGET_P9_VECTOR)
16939 {
16940 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
16941 P9V_BUILTIN_STXVL);
16942 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long,
16943 P9V_BUILTIN_XST_LEN_R);
16944 }
16945
16946 /* Add the DST variants. */
16947 d = bdesc_dst;
16948 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
16949 {
16950 HOST_WIDE_INT mask = d->mask;
16951
16952 /* It is expected that these dst built-in functions may have
16953 d->icode equal to CODE_FOR_nothing. */
16954 if ((mask & builtin_mask) != mask)
16955 {
16956 if (TARGET_DEBUG_BUILTIN)
16957 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
16958 d->name);
16959 continue;
16960 }
16961 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
16962 }
16963
16964 /* Initialize the predicates. */
16965 d = bdesc_altivec_preds;
16966 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
16967 {
16968 machine_mode mode1;
16969 tree type;
16970 HOST_WIDE_INT mask = d->mask;
16971
16972 if ((mask & builtin_mask) != mask)
16973 {
16974 if (TARGET_DEBUG_BUILTIN)
16975 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
16976 d->name);
16977 continue;
16978 }
16979
16980 if (rs6000_overloaded_builtin_p (d->code))
16981 mode1 = VOIDmode;
16982 else
16983 {
16984 /* Cannot define builtin if the instruction is disabled. */
16985 gcc_assert (d->icode != CODE_FOR_nothing);
16986 mode1 = insn_data[d->icode].operand[1].mode;
16987 }
16988
16989 switch (mode1)
16990 {
16991 case E_VOIDmode:
16992 type = int_ftype_int_opaque_opaque;
16993 break;
16994 case E_V2DImode:
16995 type = int_ftype_int_v2di_v2di;
16996 break;
16997 case E_V4SImode:
16998 type = int_ftype_int_v4si_v4si;
16999 break;
17000 case E_V8HImode:
17001 type = int_ftype_int_v8hi_v8hi;
17002 break;
17003 case E_V16QImode:
17004 type = int_ftype_int_v16qi_v16qi;
17005 break;
17006 case E_V4SFmode:
17007 type = int_ftype_int_v4sf_v4sf;
17008 break;
17009 case E_V2DFmode:
17010 type = int_ftype_int_v2df_v2df;
17011 break;
17012 default:
17013 gcc_unreachable ();
17014 }
17015
17016 def_builtin (d->name, type, d->code);
17017 }
17018
17019 /* Initialize the abs* operators. */
17020 d = bdesc_abs;
17021 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
17022 {
17023 machine_mode mode0;
17024 tree type;
17025 HOST_WIDE_INT mask = d->mask;
17026
17027 if ((mask & builtin_mask) != mask)
17028 {
17029 if (TARGET_DEBUG_BUILTIN)
17030 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
17031 d->name);
17032 continue;
17033 }
17034
17035 /* Cannot define builtin if the instruction is disabled. */
17036 gcc_assert (d->icode != CODE_FOR_nothing);
17037 mode0 = insn_data[d->icode].operand[0].mode;
17038
17039 switch (mode0)
17040 {
17041 case E_V2DImode:
17042 type = v2di_ftype_v2di;
17043 break;
17044 case E_V4SImode:
17045 type = v4si_ftype_v4si;
17046 break;
17047 case E_V8HImode:
17048 type = v8hi_ftype_v8hi;
17049 break;
17050 case E_V16QImode:
17051 type = v16qi_ftype_v16qi;
17052 break;
17053 case E_V4SFmode:
17054 type = v4sf_ftype_v4sf;
17055 break;
17056 case E_V2DFmode:
17057 type = v2df_ftype_v2df;
17058 break;
17059 default:
17060 gcc_unreachable ();
17061 }
17062
17063 def_builtin (d->name, type, d->code);
17064 }
17065
17066 /* Initialize target builtin that implements
17067 targetm.vectorize.builtin_mask_for_load. */
17068
17069 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
17070 v16qi_ftype_long_pcvoid,
17071 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
17072 BUILT_IN_MD, NULL, NULL_TREE);
17073 TREE_READONLY (decl) = 1;
17074 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17075 altivec_builtin_mask_for_load = decl;
17076
17077 /* Access to the vec_init patterns. */
17078 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
17079 integer_type_node, integer_type_node,
17080 integer_type_node, NULL_TREE);
17081 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
17082
17083 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
17084 short_integer_type_node,
17085 short_integer_type_node,
17086 short_integer_type_node,
17087 short_integer_type_node,
17088 short_integer_type_node,
17089 short_integer_type_node,
17090 short_integer_type_node, NULL_TREE);
17091 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
17092
17093 ftype = build_function_type_list (V16QI_type_node, char_type_node,
17094 char_type_node, char_type_node,
17095 char_type_node, char_type_node,
17096 char_type_node, char_type_node,
17097 char_type_node, char_type_node,
17098 char_type_node, char_type_node,
17099 char_type_node, char_type_node,
17100 char_type_node, char_type_node,
17101 char_type_node, NULL_TREE);
17102 def_builtin ("__builtin_vec_init_v16qi", ftype,
17103 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
17104
17105 ftype = build_function_type_list (V4SF_type_node, float_type_node,
17106 float_type_node, float_type_node,
17107 float_type_node, NULL_TREE);
17108 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
17109
17110 /* VSX builtins. */
17111 ftype = build_function_type_list (V2DF_type_node, double_type_node,
17112 double_type_node, NULL_TREE);
17113 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
17114
17115 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
17116 intDI_type_node, NULL_TREE);
17117 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
17118
17119 /* Access to the vec_set patterns. */
17120 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
17121 intSI_type_node,
17122 integer_type_node, NULL_TREE);
17123 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
17124
17125 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
17126 intHI_type_node,
17127 integer_type_node, NULL_TREE);
17128 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
17129
17130 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
17131 intQI_type_node,
17132 integer_type_node, NULL_TREE);
17133 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
17134
17135 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
17136 float_type_node,
17137 integer_type_node, NULL_TREE);
17138 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
17139
17140 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
17141 double_type_node,
17142 integer_type_node, NULL_TREE);
17143 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
17144
17145 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
17146 intDI_type_node,
17147 integer_type_node, NULL_TREE);
17148 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
17149
17150 /* Access to the vec_extract patterns. */
17151 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
17152 integer_type_node, NULL_TREE);
17153 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
17154
17155 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
17156 integer_type_node, NULL_TREE);
17157 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
17158
17159 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
17160 integer_type_node, NULL_TREE);
17161 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
17162
17163 ftype = build_function_type_list (float_type_node, V4SF_type_node,
17164 integer_type_node, NULL_TREE);
17165 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
17166
17167 ftype = build_function_type_list (double_type_node, V2DF_type_node,
17168 integer_type_node, NULL_TREE);
17169 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
17170
17171 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
17172 integer_type_node, NULL_TREE);
17173 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
17174
17175
17176 if (V1TI_type_node)
17177 {
17178 tree v1ti_ftype_long_pcvoid
17179 = build_function_type_list (V1TI_type_node,
17180 long_integer_type_node, pcvoid_type_node,
17181 NULL_TREE);
17182 tree void_ftype_v1ti_long_pvoid
17183 = build_function_type_list (void_type_node,
17184 V1TI_type_node, long_integer_type_node,
17185 pvoid_type_node, NULL_TREE);
17186 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid,
17187 VSX_BUILTIN_LD_ELEMREV_V1TI);
17188 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
17189 VSX_BUILTIN_LXVD2X_V1TI);
17190 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
17191 VSX_BUILTIN_STXVD2X_V1TI);
17192 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
17193 NULL_TREE, NULL_TREE);
17194 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
17195 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
17196 intTI_type_node,
17197 integer_type_node, NULL_TREE);
17198 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
17199 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
17200 integer_type_node, NULL_TREE);
17201 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
17202 }
17203
17204 }
17205
17206 static void
17207 htm_init_builtins (void)
17208 {
17209 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17210 const struct builtin_description *d;
17211 size_t i;
17212
17213 d = bdesc_htm;
17214 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
17215 {
17216 tree op[MAX_HTM_OPERANDS], type;
17217 HOST_WIDE_INT mask = d->mask;
17218 unsigned attr = rs6000_builtin_info[d->code].attr;
17219 bool void_func = (attr & RS6000_BTC_VOID);
17220 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
17221 int nopnds = 0;
17222 tree gpr_type_node;
17223 tree rettype;
17224 tree argtype;
17225
17226 /* It is expected that these htm built-in functions may have
17227 d->icode equal to CODE_FOR_nothing. */
17228
17229 if (TARGET_32BIT && TARGET_POWERPC64)
17230 gpr_type_node = long_long_unsigned_type_node;
17231 else
17232 gpr_type_node = long_unsigned_type_node;
17233
17234 if (attr & RS6000_BTC_SPR)
17235 {
17236 rettype = gpr_type_node;
17237 argtype = gpr_type_node;
17238 }
17239 else if (d->code == HTM_BUILTIN_TABORTDC
17240 || d->code == HTM_BUILTIN_TABORTDCI)
17241 {
17242 rettype = unsigned_type_node;
17243 argtype = gpr_type_node;
17244 }
17245 else
17246 {
17247 rettype = unsigned_type_node;
17248 argtype = unsigned_type_node;
17249 }
17250
17251 if ((mask & builtin_mask) != mask)
17252 {
17253 if (TARGET_DEBUG_BUILTIN)
17254 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
17255 continue;
17256 }
17257
17258 if (d->name == 0)
17259 {
17260 if (TARGET_DEBUG_BUILTIN)
17261 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
17262 (long unsigned) i);
17263 continue;
17264 }
17265
17266 op[nopnds++] = (void_func) ? void_type_node : rettype;
17267
17268 if (attr_args == RS6000_BTC_UNARY)
17269 op[nopnds++] = argtype;
17270 else if (attr_args == RS6000_BTC_BINARY)
17271 {
17272 op[nopnds++] = argtype;
17273 op[nopnds++] = argtype;
17274 }
17275 else if (attr_args == RS6000_BTC_TERNARY)
17276 {
17277 op[nopnds++] = argtype;
17278 op[nopnds++] = argtype;
17279 op[nopnds++] = argtype;
17280 }
17281
17282 switch (nopnds)
17283 {
17284 case 1:
17285 type = build_function_type_list (op[0], NULL_TREE);
17286 break;
17287 case 2:
17288 type = build_function_type_list (op[0], op[1], NULL_TREE);
17289 break;
17290 case 3:
17291 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
17292 break;
17293 case 4:
17294 type = build_function_type_list (op[0], op[1], op[2], op[3],
17295 NULL_TREE);
17296 break;
17297 default:
17298 gcc_unreachable ();
17299 }
17300
17301 def_builtin (d->name, type, d->code);
17302 }
17303 }
17304
17305 /* Hash function for builtin functions with up to 3 arguments and a return
17306 type. */
17307 hashval_t
17308 builtin_hasher::hash (builtin_hash_struct *bh)
17309 {
17310 unsigned ret = 0;
17311 int i;
17312
17313 for (i = 0; i < 4; i++)
17314 {
17315 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
17316 ret = (ret * 2) + bh->uns_p[i];
17317 }
17318
17319 return ret;
17320 }
17321
17322 /* Compare builtin hash entries H1 and H2 for equivalence. */
17323 bool
17324 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
17325 {
17326 return ((p1->mode[0] == p2->mode[0])
17327 && (p1->mode[1] == p2->mode[1])
17328 && (p1->mode[2] == p2->mode[2])
17329 && (p1->mode[3] == p2->mode[3])
17330 && (p1->uns_p[0] == p2->uns_p[0])
17331 && (p1->uns_p[1] == p2->uns_p[1])
17332 && (p1->uns_p[2] == p2->uns_p[2])
17333 && (p1->uns_p[3] == p2->uns_p[3]));
17334 }
17335
17336 /* Map types for builtin functions with an explicit return type and up to 3
17337 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17338 of the argument. */
17339 static tree
17340 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
17341 machine_mode mode_arg1, machine_mode mode_arg2,
17342 enum rs6000_builtins builtin, const char *name)
17343 {
17344 struct builtin_hash_struct h;
17345 struct builtin_hash_struct *h2;
17346 int num_args = 3;
17347 int i;
17348 tree ret_type = NULL_TREE;
17349 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
17350
17351 /* Create builtin_hash_table. */
17352 if (builtin_hash_table == NULL)
17353 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
17354
17355 h.type = NULL_TREE;
17356 h.mode[0] = mode_ret;
17357 h.mode[1] = mode_arg0;
17358 h.mode[2] = mode_arg1;
17359 h.mode[3] = mode_arg2;
17360 h.uns_p[0] = 0;
17361 h.uns_p[1] = 0;
17362 h.uns_p[2] = 0;
17363 h.uns_p[3] = 0;
17364
17365 /* If the builtin is a type that produces unsigned results or takes unsigned
17366 arguments, and it is returned as a decl for the vectorizer (such as
17367 widening multiplies, permute), make sure the arguments and return value
17368 are type correct. */
17369 switch (builtin)
17370 {
17371 /* unsigned 1 argument functions. */
17372 case CRYPTO_BUILTIN_VSBOX:
17373 case CRYPTO_BUILTIN_VSBOX_BE:
17374 case P8V_BUILTIN_VGBBD:
17375 case MISC_BUILTIN_CDTBCD:
17376 case MISC_BUILTIN_CBCDTD:
17377 h.uns_p[0] = 1;
17378 h.uns_p[1] = 1;
17379 break;
17380
17381 /* unsigned 2 argument functions. */
17382 case ALTIVEC_BUILTIN_VMULEUB:
17383 case ALTIVEC_BUILTIN_VMULEUH:
17384 case P8V_BUILTIN_VMULEUW:
17385 case ALTIVEC_BUILTIN_VMULOUB:
17386 case ALTIVEC_BUILTIN_VMULOUH:
17387 case P8V_BUILTIN_VMULOUW:
17388 case CRYPTO_BUILTIN_VCIPHER:
17389 case CRYPTO_BUILTIN_VCIPHER_BE:
17390 case CRYPTO_BUILTIN_VCIPHERLAST:
17391 case CRYPTO_BUILTIN_VCIPHERLAST_BE:
17392 case CRYPTO_BUILTIN_VNCIPHER:
17393 case CRYPTO_BUILTIN_VNCIPHER_BE:
17394 case CRYPTO_BUILTIN_VNCIPHERLAST:
17395 case CRYPTO_BUILTIN_VNCIPHERLAST_BE:
17396 case CRYPTO_BUILTIN_VPMSUMB:
17397 case CRYPTO_BUILTIN_VPMSUMH:
17398 case CRYPTO_BUILTIN_VPMSUMW:
17399 case CRYPTO_BUILTIN_VPMSUMD:
17400 case CRYPTO_BUILTIN_VPMSUM:
17401 case MISC_BUILTIN_ADDG6S:
17402 case MISC_BUILTIN_DIVWEU:
17403 case MISC_BUILTIN_DIVDEU:
17404 case VSX_BUILTIN_UDIV_V2DI:
17405 case ALTIVEC_BUILTIN_VMAXUB:
17406 case ALTIVEC_BUILTIN_VMINUB:
17407 case ALTIVEC_BUILTIN_VMAXUH:
17408 case ALTIVEC_BUILTIN_VMINUH:
17409 case ALTIVEC_BUILTIN_VMAXUW:
17410 case ALTIVEC_BUILTIN_VMINUW:
17411 case P8V_BUILTIN_VMAXUD:
17412 case P8V_BUILTIN_VMINUD:
17413 h.uns_p[0] = 1;
17414 h.uns_p[1] = 1;
17415 h.uns_p[2] = 1;
17416 break;
17417
17418 /* unsigned 3 argument functions. */
17419 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
17420 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
17421 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
17422 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
17423 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
17424 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
17425 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
17426 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
17427 case VSX_BUILTIN_VPERM_16QI_UNS:
17428 case VSX_BUILTIN_VPERM_8HI_UNS:
17429 case VSX_BUILTIN_VPERM_4SI_UNS:
17430 case VSX_BUILTIN_VPERM_2DI_UNS:
17431 case VSX_BUILTIN_XXSEL_16QI_UNS:
17432 case VSX_BUILTIN_XXSEL_8HI_UNS:
17433 case VSX_BUILTIN_XXSEL_4SI_UNS:
17434 case VSX_BUILTIN_XXSEL_2DI_UNS:
17435 case CRYPTO_BUILTIN_VPERMXOR:
17436 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
17437 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
17438 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
17439 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
17440 case CRYPTO_BUILTIN_VSHASIGMAW:
17441 case CRYPTO_BUILTIN_VSHASIGMAD:
17442 case CRYPTO_BUILTIN_VSHASIGMA:
17443 h.uns_p[0] = 1;
17444 h.uns_p[1] = 1;
17445 h.uns_p[2] = 1;
17446 h.uns_p[3] = 1;
17447 break;
17448
17449 /* signed permute functions with unsigned char mask. */
17450 case ALTIVEC_BUILTIN_VPERM_16QI:
17451 case ALTIVEC_BUILTIN_VPERM_8HI:
17452 case ALTIVEC_BUILTIN_VPERM_4SI:
17453 case ALTIVEC_BUILTIN_VPERM_4SF:
17454 case ALTIVEC_BUILTIN_VPERM_2DI:
17455 case ALTIVEC_BUILTIN_VPERM_2DF:
17456 case VSX_BUILTIN_VPERM_16QI:
17457 case VSX_BUILTIN_VPERM_8HI:
17458 case VSX_BUILTIN_VPERM_4SI:
17459 case VSX_BUILTIN_VPERM_4SF:
17460 case VSX_BUILTIN_VPERM_2DI:
17461 case VSX_BUILTIN_VPERM_2DF:
17462 h.uns_p[3] = 1;
17463 break;
17464
17465 /* unsigned args, signed return. */
17466 case VSX_BUILTIN_XVCVUXDSP:
17467 case VSX_BUILTIN_XVCVUXDDP_UNS:
17468 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
17469 h.uns_p[1] = 1;
17470 break;
17471
17472 /* signed args, unsigned return. */
17473 case VSX_BUILTIN_XVCVDPUXDS_UNS:
17474 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
17475 case MISC_BUILTIN_UNPACK_TD:
17476 case MISC_BUILTIN_UNPACK_V1TI:
17477 h.uns_p[0] = 1;
17478 break;
17479
17480 /* unsigned arguments, bool return (compares). */
17481 case ALTIVEC_BUILTIN_VCMPEQUB:
17482 case ALTIVEC_BUILTIN_VCMPEQUH:
17483 case ALTIVEC_BUILTIN_VCMPEQUW:
17484 case P8V_BUILTIN_VCMPEQUD:
17485 case VSX_BUILTIN_CMPGE_U16QI:
17486 case VSX_BUILTIN_CMPGE_U8HI:
17487 case VSX_BUILTIN_CMPGE_U4SI:
17488 case VSX_BUILTIN_CMPGE_U2DI:
17489 case ALTIVEC_BUILTIN_VCMPGTUB:
17490 case ALTIVEC_BUILTIN_VCMPGTUH:
17491 case ALTIVEC_BUILTIN_VCMPGTUW:
17492 case P8V_BUILTIN_VCMPGTUD:
17493 h.uns_p[1] = 1;
17494 h.uns_p[2] = 1;
17495 break;
17496
17497 /* unsigned arguments for 128-bit pack instructions. */
17498 case MISC_BUILTIN_PACK_TD:
17499 case MISC_BUILTIN_PACK_V1TI:
17500 h.uns_p[1] = 1;
17501 h.uns_p[2] = 1;
17502 break;
17503
17504 /* unsigned second arguments (vector shift right). */
17505 case ALTIVEC_BUILTIN_VSRB:
17506 case ALTIVEC_BUILTIN_VSRH:
17507 case ALTIVEC_BUILTIN_VSRW:
17508 case P8V_BUILTIN_VSRD:
17509 h.uns_p[2] = 1;
17510 break;
17511
17512 default:
17513 break;
17514 }
17515
17516 /* Figure out how many args are present. */
17517 while (num_args > 0 && h.mode[num_args] == VOIDmode)
17518 num_args--;
17519
17520 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
17521 if (!ret_type && h.uns_p[0])
17522 ret_type = builtin_mode_to_type[h.mode[0]][0];
17523
17524 if (!ret_type)
17525 fatal_error (input_location,
17526 "internal error: builtin function %qs had an unexpected "
17527 "return type %qs", name, GET_MODE_NAME (h.mode[0]));
17528
17529 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
17530 arg_type[i] = NULL_TREE;
17531
17532 for (i = 0; i < num_args; i++)
17533 {
17534 int m = (int) h.mode[i+1];
17535 int uns_p = h.uns_p[i+1];
17536
17537 arg_type[i] = builtin_mode_to_type[m][uns_p];
17538 if (!arg_type[i] && uns_p)
17539 arg_type[i] = builtin_mode_to_type[m][0];
17540
17541 if (!arg_type[i])
17542 fatal_error (input_location,
17543 "internal error: builtin function %qs, argument %d "
17544 "had unexpected argument type %qs", name, i,
17545 GET_MODE_NAME (m));
17546 }
17547
17548 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
17549 if (*found == NULL)
17550 {
17551 h2 = ggc_alloc<builtin_hash_struct> ();
17552 *h2 = h;
17553 *found = h2;
17554
17555 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
17556 arg_type[2], NULL_TREE);
17557 }
17558
17559 return (*found)->type;
17560 }
17561
17562 static void
17563 rs6000_common_init_builtins (void)
17564 {
17565 const struct builtin_description *d;
17566 size_t i;
17567
17568 tree opaque_ftype_opaque = NULL_TREE;
17569 tree opaque_ftype_opaque_opaque = NULL_TREE;
17570 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
17571 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17572
17573 /* Create Altivec and VSX builtins on machines with at least the
17574 general purpose extensions (970 and newer) to allow the use of
17575 the target attribute. */
17576
17577 if (TARGET_EXTRA_BUILTINS)
17578 builtin_mask |= RS6000_BTM_COMMON;
17579
17580 /* Add the ternary operators. */
17581 d = bdesc_3arg;
17582 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
17583 {
17584 tree type;
17585 HOST_WIDE_INT mask = d->mask;
17586
17587 if ((mask & builtin_mask) != mask)
17588 {
17589 if (TARGET_DEBUG_BUILTIN)
17590 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
17591 continue;
17592 }
17593
17594 if (rs6000_overloaded_builtin_p (d->code))
17595 {
17596 if (! (type = opaque_ftype_opaque_opaque_opaque))
17597 type = opaque_ftype_opaque_opaque_opaque
17598 = build_function_type_list (opaque_V4SI_type_node,
17599 opaque_V4SI_type_node,
17600 opaque_V4SI_type_node,
17601 opaque_V4SI_type_node,
17602 NULL_TREE);
17603 }
17604 else
17605 {
17606 enum insn_code icode = d->icode;
17607 if (d->name == 0)
17608 {
17609 if (TARGET_DEBUG_BUILTIN)
17610 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17611 (long unsigned)i);
17612
17613 continue;
17614 }
17615
17616 if (icode == CODE_FOR_nothing)
17617 {
17618 if (TARGET_DEBUG_BUILTIN)
17619 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
17620 d->name);
17621
17622 continue;
17623 }
17624
17625 type = builtin_function_type (insn_data[icode].operand[0].mode,
17626 insn_data[icode].operand[1].mode,
17627 insn_data[icode].operand[2].mode,
17628 insn_data[icode].operand[3].mode,
17629 d->code, d->name);
17630 }
17631
17632 def_builtin (d->name, type, d->code);
17633 }
17634
17635 /* Add the binary operators. */
17636 d = bdesc_2arg;
17637 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
17638 {
17639 machine_mode mode0, mode1, mode2;
17640 tree type;
17641 HOST_WIDE_INT mask = d->mask;
17642
17643 if ((mask & builtin_mask) != mask)
17644 {
17645 if (TARGET_DEBUG_BUILTIN)
17646 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
17647 continue;
17648 }
17649
17650 if (rs6000_overloaded_builtin_p (d->code))
17651 {
17652 if (! (type = opaque_ftype_opaque_opaque))
17653 type = opaque_ftype_opaque_opaque
17654 = build_function_type_list (opaque_V4SI_type_node,
17655 opaque_V4SI_type_node,
17656 opaque_V4SI_type_node,
17657 NULL_TREE);
17658 }
17659 else
17660 {
17661 enum insn_code icode = d->icode;
17662 if (d->name == 0)
17663 {
17664 if (TARGET_DEBUG_BUILTIN)
17665 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17666 (long unsigned)i);
17667
17668 continue;
17669 }
17670
17671 if (icode == CODE_FOR_nothing)
17672 {
17673 if (TARGET_DEBUG_BUILTIN)
17674 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
17675 d->name);
17676
17677 continue;
17678 }
17679
17680 mode0 = insn_data[icode].operand[0].mode;
17681 mode1 = insn_data[icode].operand[1].mode;
17682 mode2 = insn_data[icode].operand[2].mode;
17683
17684 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
17685 d->code, d->name);
17686 }
17687
17688 def_builtin (d->name, type, d->code);
17689 }
17690
17691 /* Add the simple unary operators. */
17692 d = bdesc_1arg;
17693 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
17694 {
17695 machine_mode mode0, mode1;
17696 tree type;
17697 HOST_WIDE_INT mask = d->mask;
17698
17699 if ((mask & builtin_mask) != mask)
17700 {
17701 if (TARGET_DEBUG_BUILTIN)
17702 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
17703 continue;
17704 }
17705
17706 if (rs6000_overloaded_builtin_p (d->code))
17707 {
17708 if (! (type = opaque_ftype_opaque))
17709 type = opaque_ftype_opaque
17710 = build_function_type_list (opaque_V4SI_type_node,
17711 opaque_V4SI_type_node,
17712 NULL_TREE);
17713 }
17714 else
17715 {
17716 enum insn_code icode = d->icode;
17717 if (d->name == 0)
17718 {
17719 if (TARGET_DEBUG_BUILTIN)
17720 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17721 (long unsigned)i);
17722
17723 continue;
17724 }
17725
17726 if (icode == CODE_FOR_nothing)
17727 {
17728 if (TARGET_DEBUG_BUILTIN)
17729 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
17730 d->name);
17731
17732 continue;
17733 }
17734
17735 mode0 = insn_data[icode].operand[0].mode;
17736 mode1 = insn_data[icode].operand[1].mode;
17737
17738 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
17739 d->code, d->name);
17740 }
17741
17742 def_builtin (d->name, type, d->code);
17743 }
17744
17745 /* Add the simple no-argument operators. */
17746 d = bdesc_0arg;
17747 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
17748 {
17749 machine_mode mode0;
17750 tree type;
17751 HOST_WIDE_INT mask = d->mask;
17752
17753 if ((mask & builtin_mask) != mask)
17754 {
17755 if (TARGET_DEBUG_BUILTIN)
17756 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
17757 continue;
17758 }
17759 if (rs6000_overloaded_builtin_p (d->code))
17760 {
17761 if (!opaque_ftype_opaque)
17762 opaque_ftype_opaque
17763 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
17764 type = opaque_ftype_opaque;
17765 }
17766 else
17767 {
17768 enum insn_code icode = d->icode;
17769 if (d->name == 0)
17770 {
17771 if (TARGET_DEBUG_BUILTIN)
17772 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17773 (long unsigned) i);
17774 continue;
17775 }
17776 if (icode == CODE_FOR_nothing)
17777 {
17778 if (TARGET_DEBUG_BUILTIN)
17779 fprintf (stderr,
17780 "rs6000_builtin, skip no-argument %s (no code)\n",
17781 d->name);
17782 continue;
17783 }
17784 mode0 = insn_data[icode].operand[0].mode;
17785 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
17786 d->code, d->name);
17787 }
17788 def_builtin (d->name, type, d->code);
17789 }
17790 }
17791
17792 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17793 static void
17794 init_float128_ibm (machine_mode mode)
17795 {
17796 if (!TARGET_XL_COMPAT)
17797 {
17798 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
17799 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
17800 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
17801 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
17802
17803 if (!TARGET_HARD_FLOAT)
17804 {
17805 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
17806 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
17807 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
17808 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
17809 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
17810 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
17811 set_optab_libfunc (le_optab, mode, "__gcc_qle");
17812 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
17813
17814 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
17815 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
17816 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
17817 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
17818 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
17819 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
17820 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
17821 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
17822 }
17823 }
17824 else
17825 {
17826 set_optab_libfunc (add_optab, mode, "_xlqadd");
17827 set_optab_libfunc (sub_optab, mode, "_xlqsub");
17828 set_optab_libfunc (smul_optab, mode, "_xlqmul");
17829 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
17830 }
17831
17832 /* Add various conversions for IFmode to use the traditional TFmode
17833 names. */
17834 if (mode == IFmode)
17835 {
17836 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf");
17837 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf");
17838 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdtf");
17839 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd");
17840 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd");
17841 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtftd");
17842
17843 if (TARGET_POWERPC64)
17844 {
17845 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
17846 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
17847 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
17848 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
17849 }
17850 }
17851 }
17852
17853 /* Create a decl for either complex long double multiply or complex long double
17854 divide when long double is IEEE 128-bit floating point. We can't use
17855 __multc3 and __divtc3 because the original long double using IBM extended
17856 double used those names. The complex multiply/divide functions are encoded
17857 as builtin functions with a complex result and 4 scalar inputs. */
17858
17859 static void
17860 create_complex_muldiv (const char *name, built_in_function fncode, tree fntype)
17861 {
17862 tree fndecl = add_builtin_function (name, fntype, fncode, BUILT_IN_NORMAL,
17863 name, NULL_TREE);
17864
17865 set_builtin_decl (fncode, fndecl, true);
17866
17867 if (TARGET_DEBUG_BUILTIN)
17868 fprintf (stderr, "create complex %s, fncode: %d\n", name, (int) fncode);
17869
17870 return;
17871 }
17872
17873 /* Set up IEEE 128-bit floating point routines. Use different names if the
17874 arguments can be passed in a vector register. The historical PowerPC
17875 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
17876 continue to use that if we aren't using vector registers to pass IEEE
17877 128-bit floating point. */
17878
17879 static void
17880 init_float128_ieee (machine_mode mode)
17881 {
17882 if (FLOAT128_VECTOR_P (mode))
17883 {
17884 static bool complex_muldiv_init_p = false;
17885
17886 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
17887 we have clone or target attributes, this will be called a second
17888 time. We want to create the built-in function only once. */
17889 if (mode == TFmode && TARGET_IEEEQUAD && !complex_muldiv_init_p)
17890 {
17891 complex_muldiv_init_p = true;
17892 built_in_function fncode_mul =
17893 (built_in_function) (BUILT_IN_COMPLEX_MUL_MIN + TCmode
17894 - MIN_MODE_COMPLEX_FLOAT);
17895 built_in_function fncode_div =
17896 (built_in_function) (BUILT_IN_COMPLEX_DIV_MIN + TCmode
17897 - MIN_MODE_COMPLEX_FLOAT);
17898
17899 tree fntype = build_function_type_list (complex_long_double_type_node,
17900 long_double_type_node,
17901 long_double_type_node,
17902 long_double_type_node,
17903 long_double_type_node,
17904 NULL_TREE);
17905
17906 create_complex_muldiv ("__mulkc3", fncode_mul, fntype);
17907 create_complex_muldiv ("__divkc3", fncode_div, fntype);
17908 }
17909
17910 set_optab_libfunc (add_optab, mode, "__addkf3");
17911 set_optab_libfunc (sub_optab, mode, "__subkf3");
17912 set_optab_libfunc (neg_optab, mode, "__negkf2");
17913 set_optab_libfunc (smul_optab, mode, "__mulkf3");
17914 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
17915 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
17916 set_optab_libfunc (abs_optab, mode, "__abskf2");
17917 set_optab_libfunc (powi_optab, mode, "__powikf2");
17918
17919 set_optab_libfunc (eq_optab, mode, "__eqkf2");
17920 set_optab_libfunc (ne_optab, mode, "__nekf2");
17921 set_optab_libfunc (gt_optab, mode, "__gtkf2");
17922 set_optab_libfunc (ge_optab, mode, "__gekf2");
17923 set_optab_libfunc (lt_optab, mode, "__ltkf2");
17924 set_optab_libfunc (le_optab, mode, "__lekf2");
17925 set_optab_libfunc (unord_optab, mode, "__unordkf2");
17926
17927 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
17928 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
17929 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
17930 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
17931
17932 set_conv_libfunc (sext_optab, mode, IFmode, "__trunctfkf2");
17933 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17934 set_conv_libfunc (sext_optab, mode, TFmode, "__trunctfkf2");
17935
17936 set_conv_libfunc (trunc_optab, IFmode, mode, "__extendkftf2");
17937 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
17938 set_conv_libfunc (trunc_optab, TFmode, mode, "__extendkftf2");
17939
17940 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf");
17941 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf");
17942 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctdkf");
17943 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd");
17944 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd");
17945 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendkftd");
17946
17947 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
17948 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
17949 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
17950 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
17951
17952 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
17953 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
17954 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
17955 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
17956
17957 if (TARGET_POWERPC64)
17958 {
17959 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
17960 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
17961 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
17962 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
17963 }
17964 }
17965
17966 else
17967 {
17968 set_optab_libfunc (add_optab, mode, "_q_add");
17969 set_optab_libfunc (sub_optab, mode, "_q_sub");
17970 set_optab_libfunc (neg_optab, mode, "_q_neg");
17971 set_optab_libfunc (smul_optab, mode, "_q_mul");
17972 set_optab_libfunc (sdiv_optab, mode, "_q_div");
17973 if (TARGET_PPC_GPOPT)
17974 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
17975
17976 set_optab_libfunc (eq_optab, mode, "_q_feq");
17977 set_optab_libfunc (ne_optab, mode, "_q_fne");
17978 set_optab_libfunc (gt_optab, mode, "_q_fgt");
17979 set_optab_libfunc (ge_optab, mode, "_q_fge");
17980 set_optab_libfunc (lt_optab, mode, "_q_flt");
17981 set_optab_libfunc (le_optab, mode, "_q_fle");
17982
17983 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
17984 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
17985 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
17986 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
17987 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
17988 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
17989 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
17990 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
17991 }
17992 }
17993
17994 static void
17995 rs6000_init_libfuncs (void)
17996 {
17997 /* __float128 support. */
17998 if (TARGET_FLOAT128_TYPE)
17999 {
18000 init_float128_ibm (IFmode);
18001 init_float128_ieee (KFmode);
18002 }
18003
18004 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18005 if (TARGET_LONG_DOUBLE_128)
18006 {
18007 if (!TARGET_IEEEQUAD)
18008 init_float128_ibm (TFmode);
18009
18010 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18011 else
18012 init_float128_ieee (TFmode);
18013 }
18014 }
18015
18016 /* Emit a potentially record-form instruction, setting DST from SRC.
18017 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18018 signed comparison of DST with zero. If DOT is 1, the generated RTL
18019 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18020 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18021 a separate COMPARE. */
18022
18023 void
18024 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
18025 {
18026 if (dot == 0)
18027 {
18028 emit_move_insn (dst, src);
18029 return;
18030 }
18031
18032 if (cc_reg_not_cr0_operand (ccreg, CCmode))
18033 {
18034 emit_move_insn (dst, src);
18035 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
18036 return;
18037 }
18038
18039 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
18040 if (dot == 1)
18041 {
18042 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
18043 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
18044 }
18045 else
18046 {
18047 rtx set = gen_rtx_SET (dst, src);
18048 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
18049 }
18050 }
18051
18052 \f
18053 /* A validation routine: say whether CODE, a condition code, and MODE
18054 match. The other alternatives either don't make sense or should
18055 never be generated. */
18056
18057 void
18058 validate_condition_mode (enum rtx_code code, machine_mode mode)
18059 {
18060 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
18061 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
18062 && GET_MODE_CLASS (mode) == MODE_CC);
18063
18064 /* These don't make sense. */
18065 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
18066 || mode != CCUNSmode);
18067
18068 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
18069 || mode == CCUNSmode);
18070
18071 gcc_assert (mode == CCFPmode
18072 || (code != ORDERED && code != UNORDERED
18073 && code != UNEQ && code != LTGT
18074 && code != UNGT && code != UNLT
18075 && code != UNGE && code != UNLE));
18076
18077 /* These should never be generated except for
18078 flag_finite_math_only. */
18079 gcc_assert (mode != CCFPmode
18080 || flag_finite_math_only
18081 || (code != LE && code != GE
18082 && code != UNEQ && code != LTGT
18083 && code != UNGT && code != UNLT));
18084
18085 /* These are invalid; the information is not there. */
18086 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
18087 }
18088
18089 \f
18090 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18091 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18092 not zero, store there the bit offset (counted from the right) where
18093 the single stretch of 1 bits begins; and similarly for B, the bit
18094 offset where it ends. */
18095
18096 bool
18097 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
18098 {
18099 unsigned HOST_WIDE_INT val = INTVAL (mask);
18100 unsigned HOST_WIDE_INT bit;
18101 int nb, ne;
18102 int n = GET_MODE_PRECISION (mode);
18103
18104 if (mode != DImode && mode != SImode)
18105 return false;
18106
18107 if (INTVAL (mask) >= 0)
18108 {
18109 bit = val & -val;
18110 ne = exact_log2 (bit);
18111 nb = exact_log2 (val + bit);
18112 }
18113 else if (val + 1 == 0)
18114 {
18115 nb = n;
18116 ne = 0;
18117 }
18118 else if (val & 1)
18119 {
18120 val = ~val;
18121 bit = val & -val;
18122 nb = exact_log2 (bit);
18123 ne = exact_log2 (val + bit);
18124 }
18125 else
18126 {
18127 bit = val & -val;
18128 ne = exact_log2 (bit);
18129 if (val + bit == 0)
18130 nb = n;
18131 else
18132 nb = 0;
18133 }
18134
18135 nb--;
18136
18137 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
18138 return false;
18139
18140 if (b)
18141 *b = nb;
18142 if (e)
18143 *e = ne;
18144
18145 return true;
18146 }
18147
18148 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18149 or rldicr instruction, to implement an AND with it in mode MODE. */
18150
18151 bool
18152 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
18153 {
18154 int nb, ne;
18155
18156 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18157 return false;
18158
18159 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18160 does not wrap. */
18161 if (mode == DImode)
18162 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
18163
18164 /* For SImode, rlwinm can do everything. */
18165 if (mode == SImode)
18166 return (nb < 32 && ne < 32);
18167
18168 return false;
18169 }
18170
18171 /* Return the instruction template for an AND with mask in mode MODE, with
18172 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18173
18174 const char *
18175 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
18176 {
18177 int nb, ne;
18178
18179 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
18180 gcc_unreachable ();
18181
18182 if (mode == DImode && ne == 0)
18183 {
18184 operands[3] = GEN_INT (63 - nb);
18185 if (dot)
18186 return "rldicl. %0,%1,0,%3";
18187 return "rldicl %0,%1,0,%3";
18188 }
18189
18190 if (mode == DImode && nb == 63)
18191 {
18192 operands[3] = GEN_INT (63 - ne);
18193 if (dot)
18194 return "rldicr. %0,%1,0,%3";
18195 return "rldicr %0,%1,0,%3";
18196 }
18197
18198 if (nb < 32 && ne < 32)
18199 {
18200 operands[3] = GEN_INT (31 - nb);
18201 operands[4] = GEN_INT (31 - ne);
18202 if (dot)
18203 return "rlwinm. %0,%1,0,%3,%4";
18204 return "rlwinm %0,%1,0,%3,%4";
18205 }
18206
18207 gcc_unreachable ();
18208 }
18209
18210 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18211 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18212 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18213
18214 bool
18215 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
18216 {
18217 int nb, ne;
18218
18219 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18220 return false;
18221
18222 int n = GET_MODE_PRECISION (mode);
18223 int sh = -1;
18224
18225 if (CONST_INT_P (XEXP (shift, 1)))
18226 {
18227 sh = INTVAL (XEXP (shift, 1));
18228 if (sh < 0 || sh >= n)
18229 return false;
18230 }
18231
18232 rtx_code code = GET_CODE (shift);
18233
18234 /* Convert any shift by 0 to a rotate, to simplify below code. */
18235 if (sh == 0)
18236 code = ROTATE;
18237
18238 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18239 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18240 code = ASHIFT;
18241 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18242 {
18243 code = LSHIFTRT;
18244 sh = n - sh;
18245 }
18246
18247 /* DImode rotates need rld*. */
18248 if (mode == DImode && code == ROTATE)
18249 return (nb == 63 || ne == 0 || ne == sh);
18250
18251 /* SImode rotates need rlw*. */
18252 if (mode == SImode && code == ROTATE)
18253 return (nb < 32 && ne < 32 && sh < 32);
18254
18255 /* Wrap-around masks are only okay for rotates. */
18256 if (ne > nb)
18257 return false;
18258
18259 /* Variable shifts are only okay for rotates. */
18260 if (sh < 0)
18261 return false;
18262
18263 /* Don't allow ASHIFT if the mask is wrong for that. */
18264 if (code == ASHIFT && ne < sh)
18265 return false;
18266
18267 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18268 if the mask is wrong for that. */
18269 if (nb < 32 && ne < 32 && sh < 32
18270 && !(code == LSHIFTRT && nb >= 32 - sh))
18271 return true;
18272
18273 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18274 if the mask is wrong for that. */
18275 if (code == LSHIFTRT)
18276 sh = 64 - sh;
18277 if (nb == 63 || ne == 0 || ne == sh)
18278 return !(code == LSHIFTRT && nb >= sh);
18279
18280 return false;
18281 }
18282
18283 /* Return the instruction template for a shift with mask in mode MODE, with
18284 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18285
18286 const char *
18287 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
18288 {
18289 int nb, ne;
18290
18291 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18292 gcc_unreachable ();
18293
18294 if (mode == DImode && ne == 0)
18295 {
18296 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18297 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
18298 operands[3] = GEN_INT (63 - nb);
18299 if (dot)
18300 return "rld%I2cl. %0,%1,%2,%3";
18301 return "rld%I2cl %0,%1,%2,%3";
18302 }
18303
18304 if (mode == DImode && nb == 63)
18305 {
18306 operands[3] = GEN_INT (63 - ne);
18307 if (dot)
18308 return "rld%I2cr. %0,%1,%2,%3";
18309 return "rld%I2cr %0,%1,%2,%3";
18310 }
18311
18312 if (mode == DImode
18313 && GET_CODE (operands[4]) != LSHIFTRT
18314 && CONST_INT_P (operands[2])
18315 && ne == INTVAL (operands[2]))
18316 {
18317 operands[3] = GEN_INT (63 - nb);
18318 if (dot)
18319 return "rld%I2c. %0,%1,%2,%3";
18320 return "rld%I2c %0,%1,%2,%3";
18321 }
18322
18323 if (nb < 32 && ne < 32)
18324 {
18325 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18326 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18327 operands[3] = GEN_INT (31 - nb);
18328 operands[4] = GEN_INT (31 - ne);
18329 /* This insn can also be a 64-bit rotate with mask that really makes
18330 it just a shift right (with mask); the %h below are to adjust for
18331 that situation (shift count is >= 32 in that case). */
18332 if (dot)
18333 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18334 return "rlw%I2nm %0,%1,%h2,%3,%4";
18335 }
18336
18337 gcc_unreachable ();
18338 }
18339
18340 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18341 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18342 ASHIFT, or LSHIFTRT) in mode MODE. */
18343
18344 bool
18345 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
18346 {
18347 int nb, ne;
18348
18349 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
18350 return false;
18351
18352 int n = GET_MODE_PRECISION (mode);
18353
18354 int sh = INTVAL (XEXP (shift, 1));
18355 if (sh < 0 || sh >= n)
18356 return false;
18357
18358 rtx_code code = GET_CODE (shift);
18359
18360 /* Convert any shift by 0 to a rotate, to simplify below code. */
18361 if (sh == 0)
18362 code = ROTATE;
18363
18364 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18365 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
18366 code = ASHIFT;
18367 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
18368 {
18369 code = LSHIFTRT;
18370 sh = n - sh;
18371 }
18372
18373 /* DImode rotates need rldimi. */
18374 if (mode == DImode && code == ROTATE)
18375 return (ne == sh);
18376
18377 /* SImode rotates need rlwimi. */
18378 if (mode == SImode && code == ROTATE)
18379 return (nb < 32 && ne < 32 && sh < 32);
18380
18381 /* Wrap-around masks are only okay for rotates. */
18382 if (ne > nb)
18383 return false;
18384
18385 /* Don't allow ASHIFT if the mask is wrong for that. */
18386 if (code == ASHIFT && ne < sh)
18387 return false;
18388
18389 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18390 if the mask is wrong for that. */
18391 if (nb < 32 && ne < 32 && sh < 32
18392 && !(code == LSHIFTRT && nb >= 32 - sh))
18393 return true;
18394
18395 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18396 if the mask is wrong for that. */
18397 if (code == LSHIFTRT)
18398 sh = 64 - sh;
18399 if (ne == sh)
18400 return !(code == LSHIFTRT && nb >= sh);
18401
18402 return false;
18403 }
18404
18405 /* Return the instruction template for an insert with mask in mode MODE, with
18406 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18407
18408 const char *
18409 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
18410 {
18411 int nb, ne;
18412
18413 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
18414 gcc_unreachable ();
18415
18416 /* Prefer rldimi because rlwimi is cracked. */
18417 if (TARGET_POWERPC64
18418 && (!dot || mode == DImode)
18419 && GET_CODE (operands[4]) != LSHIFTRT
18420 && ne == INTVAL (operands[2]))
18421 {
18422 operands[3] = GEN_INT (63 - nb);
18423 if (dot)
18424 return "rldimi. %0,%1,%2,%3";
18425 return "rldimi %0,%1,%2,%3";
18426 }
18427
18428 if (nb < 32 && ne < 32)
18429 {
18430 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
18431 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
18432 operands[3] = GEN_INT (31 - nb);
18433 operands[4] = GEN_INT (31 - ne);
18434 if (dot)
18435 return "rlwimi. %0,%1,%2,%3,%4";
18436 return "rlwimi %0,%1,%2,%3,%4";
18437 }
18438
18439 gcc_unreachable ();
18440 }
18441
18442 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18443 using two machine instructions. */
18444
18445 bool
18446 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
18447 {
18448 /* There are two kinds of AND we can handle with two insns:
18449 1) those we can do with two rl* insn;
18450 2) ori[s];xori[s].
18451
18452 We do not handle that last case yet. */
18453
18454 /* If there is just one stretch of ones, we can do it. */
18455 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
18456 return true;
18457
18458 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18459 one insn, we can do the whole thing with two. */
18460 unsigned HOST_WIDE_INT val = INTVAL (c);
18461 unsigned HOST_WIDE_INT bit1 = val & -val;
18462 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18463 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18464 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18465 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
18466 }
18467
18468 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18469 If EXPAND is true, split rotate-and-mask instructions we generate to
18470 their constituent parts as well (this is used during expand); if DOT
18471 is 1, make the last insn a record-form instruction clobbering the
18472 destination GPR and setting the CC reg (from operands[3]); if 2, set
18473 that GPR as well as the CC reg. */
18474
18475 void
18476 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
18477 {
18478 gcc_assert (!(expand && dot));
18479
18480 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
18481
18482 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18483 shift right. This generates better code than doing the masks without
18484 shifts, or shifting first right and then left. */
18485 int nb, ne;
18486 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
18487 {
18488 gcc_assert (mode == DImode);
18489
18490 int shift = 63 - nb;
18491 if (expand)
18492 {
18493 rtx tmp1 = gen_reg_rtx (DImode);
18494 rtx tmp2 = gen_reg_rtx (DImode);
18495 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
18496 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
18497 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
18498 }
18499 else
18500 {
18501 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
18502 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
18503 emit_move_insn (operands[0], tmp);
18504 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
18505 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18506 }
18507 return;
18508 }
18509
18510 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18511 that does the rest. */
18512 unsigned HOST_WIDE_INT bit1 = val & -val;
18513 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
18514 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
18515 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
18516
18517 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
18518 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
18519
18520 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
18521
18522 /* Two "no-rotate"-and-mask instructions, for SImode. */
18523 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
18524 {
18525 gcc_assert (mode == SImode);
18526
18527 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18528 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
18529 emit_move_insn (reg, tmp);
18530 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18531 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18532 return;
18533 }
18534
18535 gcc_assert (mode == DImode);
18536
18537 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18538 insns; we have to do the first in SImode, because it wraps. */
18539 if (mask2 <= 0xffffffff
18540 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
18541 {
18542 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
18543 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
18544 GEN_INT (mask1));
18545 rtx reg_low = gen_lowpart (SImode, reg);
18546 emit_move_insn (reg_low, tmp);
18547 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
18548 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18549 return;
18550 }
18551
18552 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18553 at the top end), rotate back and clear the other hole. */
18554 int right = exact_log2 (bit3);
18555 int left = 64 - right;
18556
18557 /* Rotate the mask too. */
18558 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
18559
18560 if (expand)
18561 {
18562 rtx tmp1 = gen_reg_rtx (DImode);
18563 rtx tmp2 = gen_reg_rtx (DImode);
18564 rtx tmp3 = gen_reg_rtx (DImode);
18565 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
18566 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
18567 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
18568 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
18569 }
18570 else
18571 {
18572 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
18573 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
18574 emit_move_insn (operands[0], tmp);
18575 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
18576 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
18577 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
18578 }
18579 }
18580 \f
18581 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18582 for lfq and stfq insns iff the registers are hard registers. */
18583
18584 int
18585 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
18586 {
18587 /* We might have been passed a SUBREG. */
18588 if (!REG_P (reg1) || !REG_P (reg2))
18589 return 0;
18590
18591 /* We might have been passed non floating point registers. */
18592 if (!FP_REGNO_P (REGNO (reg1))
18593 || !FP_REGNO_P (REGNO (reg2)))
18594 return 0;
18595
18596 return (REGNO (reg1) == REGNO (reg2) - 1);
18597 }
18598
18599 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18600 addr1 and addr2 must be in consecutive memory locations
18601 (addr2 == addr1 + 8). */
18602
18603 int
18604 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
18605 {
18606 rtx addr1, addr2;
18607 unsigned int reg1, reg2;
18608 int offset1, offset2;
18609
18610 /* The mems cannot be volatile. */
18611 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
18612 return 0;
18613
18614 addr1 = XEXP (mem1, 0);
18615 addr2 = XEXP (mem2, 0);
18616
18617 /* Extract an offset (if used) from the first addr. */
18618 if (GET_CODE (addr1) == PLUS)
18619 {
18620 /* If not a REG, return zero. */
18621 if (!REG_P (XEXP (addr1, 0)))
18622 return 0;
18623 else
18624 {
18625 reg1 = REGNO (XEXP (addr1, 0));
18626 /* The offset must be constant! */
18627 if (!CONST_INT_P (XEXP (addr1, 1)))
18628 return 0;
18629 offset1 = INTVAL (XEXP (addr1, 1));
18630 }
18631 }
18632 else if (!REG_P (addr1))
18633 return 0;
18634 else
18635 {
18636 reg1 = REGNO (addr1);
18637 /* This was a simple (mem (reg)) expression. Offset is 0. */
18638 offset1 = 0;
18639 }
18640
18641 /* And now for the second addr. */
18642 if (GET_CODE (addr2) == PLUS)
18643 {
18644 /* If not a REG, return zero. */
18645 if (!REG_P (XEXP (addr2, 0)))
18646 return 0;
18647 else
18648 {
18649 reg2 = REGNO (XEXP (addr2, 0));
18650 /* The offset must be constant. */
18651 if (!CONST_INT_P (XEXP (addr2, 1)))
18652 return 0;
18653 offset2 = INTVAL (XEXP (addr2, 1));
18654 }
18655 }
18656 else if (!REG_P (addr2))
18657 return 0;
18658 else
18659 {
18660 reg2 = REGNO (addr2);
18661 /* This was a simple (mem (reg)) expression. Offset is 0. */
18662 offset2 = 0;
18663 }
18664
18665 /* Both of these must have the same base register. */
18666 if (reg1 != reg2)
18667 return 0;
18668
18669 /* The offset for the second addr must be 8 more than the first addr. */
18670 if (offset2 != offset1 + 8)
18671 return 0;
18672
18673 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18674 instructions. */
18675 return 1;
18676 }
18677 \f
18678 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18679 need to use DDmode, in all other cases we can use the same mode. */
18680 static machine_mode
18681 rs6000_secondary_memory_needed_mode (machine_mode mode)
18682 {
18683 if (lra_in_progress && mode == SDmode)
18684 return DDmode;
18685 return mode;
18686 }
18687
18688 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18689 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18690 only work on the traditional altivec registers, note if an altivec register
18691 was chosen. */
18692
18693 static enum rs6000_reg_type
18694 register_to_reg_type (rtx reg, bool *is_altivec)
18695 {
18696 HOST_WIDE_INT regno;
18697 enum reg_class rclass;
18698
18699 if (SUBREG_P (reg))
18700 reg = SUBREG_REG (reg);
18701
18702 if (!REG_P (reg))
18703 return NO_REG_TYPE;
18704
18705 regno = REGNO (reg);
18706 if (!HARD_REGISTER_NUM_P (regno))
18707 {
18708 if (!lra_in_progress && !reload_completed)
18709 return PSEUDO_REG_TYPE;
18710
18711 regno = true_regnum (reg);
18712 if (regno < 0 || !HARD_REGISTER_NUM_P (regno))
18713 return PSEUDO_REG_TYPE;
18714 }
18715
18716 gcc_assert (regno >= 0);
18717
18718 if (is_altivec && ALTIVEC_REGNO_P (regno))
18719 *is_altivec = true;
18720
18721 rclass = rs6000_regno_regclass[regno];
18722 return reg_class_to_reg_type[(int)rclass];
18723 }
18724
18725 /* Helper function to return the cost of adding a TOC entry address. */
18726
18727 static inline int
18728 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
18729 {
18730 int ret;
18731
18732 if (TARGET_CMODEL != CMODEL_SMALL)
18733 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
18734
18735 else
18736 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
18737
18738 return ret;
18739 }
18740
18741 /* Helper function for rs6000_secondary_reload to determine whether the memory
18742 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18743 needs reloading. Return negative if the memory is not handled by the memory
18744 helper functions and to try a different reload method, 0 if no additional
18745 instructions are need, and positive to give the extra cost for the
18746 memory. */
18747
18748 static int
18749 rs6000_secondary_reload_memory (rtx addr,
18750 enum reg_class rclass,
18751 machine_mode mode)
18752 {
18753 int extra_cost = 0;
18754 rtx reg, and_arg, plus_arg0, plus_arg1;
18755 addr_mask_type addr_mask;
18756 const char *type = NULL;
18757 const char *fail_msg = NULL;
18758
18759 if (GPR_REG_CLASS_P (rclass))
18760 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
18761
18762 else if (rclass == FLOAT_REGS)
18763 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
18764
18765 else if (rclass == ALTIVEC_REGS)
18766 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
18767
18768 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18769 else if (rclass == VSX_REGS)
18770 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
18771 & ~RELOAD_REG_AND_M16);
18772
18773 /* If the register allocator hasn't made up its mind yet on the register
18774 class to use, settle on defaults to use. */
18775 else if (rclass == NO_REGS)
18776 {
18777 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
18778 & ~RELOAD_REG_AND_M16);
18779
18780 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
18781 addr_mask &= ~(RELOAD_REG_INDEXED
18782 | RELOAD_REG_PRE_INCDEC
18783 | RELOAD_REG_PRE_MODIFY);
18784 }
18785
18786 else
18787 addr_mask = 0;
18788
18789 /* If the register isn't valid in this register class, just return now. */
18790 if ((addr_mask & RELOAD_REG_VALID) == 0)
18791 {
18792 if (TARGET_DEBUG_ADDR)
18793 {
18794 fprintf (stderr,
18795 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18796 "not valid in class\n",
18797 GET_MODE_NAME (mode), reg_class_names[rclass]);
18798 debug_rtx (addr);
18799 }
18800
18801 return -1;
18802 }
18803
18804 switch (GET_CODE (addr))
18805 {
18806 /* Does the register class supports auto update forms for this mode? We
18807 don't need a scratch register, since the powerpc only supports
18808 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18809 case PRE_INC:
18810 case PRE_DEC:
18811 reg = XEXP (addr, 0);
18812 if (!base_reg_operand (addr, GET_MODE (reg)))
18813 {
18814 fail_msg = "no base register #1";
18815 extra_cost = -1;
18816 }
18817
18818 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
18819 {
18820 extra_cost = 1;
18821 type = "update";
18822 }
18823 break;
18824
18825 case PRE_MODIFY:
18826 reg = XEXP (addr, 0);
18827 plus_arg1 = XEXP (addr, 1);
18828 if (!base_reg_operand (reg, GET_MODE (reg))
18829 || GET_CODE (plus_arg1) != PLUS
18830 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
18831 {
18832 fail_msg = "bad PRE_MODIFY";
18833 extra_cost = -1;
18834 }
18835
18836 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
18837 {
18838 extra_cost = 1;
18839 type = "update";
18840 }
18841 break;
18842
18843 /* Do we need to simulate AND -16 to clear the bottom address bits used
18844 in VMX load/stores? Only allow the AND for vector sizes. */
18845 case AND:
18846 and_arg = XEXP (addr, 0);
18847 if (GET_MODE_SIZE (mode) != 16
18848 || !CONST_INT_P (XEXP (addr, 1))
18849 || INTVAL (XEXP (addr, 1)) != -16)
18850 {
18851 fail_msg = "bad Altivec AND #1";
18852 extra_cost = -1;
18853 }
18854
18855 if (rclass != ALTIVEC_REGS)
18856 {
18857 if (legitimate_indirect_address_p (and_arg, false))
18858 extra_cost = 1;
18859
18860 else if (legitimate_indexed_address_p (and_arg, false))
18861 extra_cost = 2;
18862
18863 else
18864 {
18865 fail_msg = "bad Altivec AND #2";
18866 extra_cost = -1;
18867 }
18868
18869 type = "and";
18870 }
18871 break;
18872
18873 /* If this is an indirect address, make sure it is a base register. */
18874 case REG:
18875 case SUBREG:
18876 if (!legitimate_indirect_address_p (addr, false))
18877 {
18878 extra_cost = 1;
18879 type = "move";
18880 }
18881 break;
18882
18883 /* If this is an indexed address, make sure the register class can handle
18884 indexed addresses for this mode. */
18885 case PLUS:
18886 plus_arg0 = XEXP (addr, 0);
18887 plus_arg1 = XEXP (addr, 1);
18888
18889 /* (plus (plus (reg) (constant)) (constant)) is generated during
18890 push_reload processing, so handle it now. */
18891 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
18892 {
18893 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18894 {
18895 extra_cost = 1;
18896 type = "offset";
18897 }
18898 }
18899
18900 /* (plus (plus (reg) (constant)) (reg)) is also generated during
18901 push_reload processing, so handle it now. */
18902 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
18903 {
18904 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
18905 {
18906 extra_cost = 1;
18907 type = "indexed #2";
18908 }
18909 }
18910
18911 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
18912 {
18913 fail_msg = "no base register #2";
18914 extra_cost = -1;
18915 }
18916
18917 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
18918 {
18919 if ((addr_mask & RELOAD_REG_INDEXED) == 0
18920 || !legitimate_indexed_address_p (addr, false))
18921 {
18922 extra_cost = 1;
18923 type = "indexed";
18924 }
18925 }
18926
18927 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
18928 && CONST_INT_P (plus_arg1))
18929 {
18930 if (!quad_address_offset_p (INTVAL (plus_arg1)))
18931 {
18932 extra_cost = 1;
18933 type = "vector d-form offset";
18934 }
18935 }
18936
18937 /* Make sure the register class can handle offset addresses. */
18938 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
18939 {
18940 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18941 {
18942 extra_cost = 1;
18943 type = "offset #2";
18944 }
18945 }
18946
18947 else
18948 {
18949 fail_msg = "bad PLUS";
18950 extra_cost = -1;
18951 }
18952
18953 break;
18954
18955 case LO_SUM:
18956 /* Quad offsets are restricted and can't handle normal addresses. */
18957 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18958 {
18959 extra_cost = -1;
18960 type = "vector d-form lo_sum";
18961 }
18962
18963 else if (!legitimate_lo_sum_address_p (mode, addr, false))
18964 {
18965 fail_msg = "bad LO_SUM";
18966 extra_cost = -1;
18967 }
18968
18969 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
18970 {
18971 extra_cost = 1;
18972 type = "lo_sum";
18973 }
18974 break;
18975
18976 /* Static addresses need to create a TOC entry. */
18977 case CONST:
18978 case SYMBOL_REF:
18979 case LABEL_REF:
18980 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
18981 {
18982 extra_cost = -1;
18983 type = "vector d-form lo_sum #2";
18984 }
18985
18986 else
18987 {
18988 type = "address";
18989 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
18990 }
18991 break;
18992
18993 /* TOC references look like offsetable memory. */
18994 case UNSPEC:
18995 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
18996 {
18997 fail_msg = "bad UNSPEC";
18998 extra_cost = -1;
18999 }
19000
19001 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
19002 {
19003 extra_cost = -1;
19004 type = "vector d-form lo_sum #3";
19005 }
19006
19007 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19008 {
19009 extra_cost = 1;
19010 type = "toc reference";
19011 }
19012 break;
19013
19014 default:
19015 {
19016 fail_msg = "bad address";
19017 extra_cost = -1;
19018 }
19019 }
19020
19021 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
19022 {
19023 if (extra_cost < 0)
19024 fprintf (stderr,
19025 "rs6000_secondary_reload_memory error: mode = %s, "
19026 "class = %s, addr_mask = '%s', %s\n",
19027 GET_MODE_NAME (mode),
19028 reg_class_names[rclass],
19029 rs6000_debug_addr_mask (addr_mask, false),
19030 (fail_msg != NULL) ? fail_msg : "<bad address>");
19031
19032 else
19033 fprintf (stderr,
19034 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19035 "addr_mask = '%s', extra cost = %d, %s\n",
19036 GET_MODE_NAME (mode),
19037 reg_class_names[rclass],
19038 rs6000_debug_addr_mask (addr_mask, false),
19039 extra_cost,
19040 (type) ? type : "<none>");
19041
19042 debug_rtx (addr);
19043 }
19044
19045 return extra_cost;
19046 }
19047
19048 /* Helper function for rs6000_secondary_reload to return true if a move to a
19049 different register classe is really a simple move. */
19050
19051 static bool
19052 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
19053 enum rs6000_reg_type from_type,
19054 machine_mode mode)
19055 {
19056 int size = GET_MODE_SIZE (mode);
19057
19058 /* Add support for various direct moves available. In this function, we only
19059 look at cases where we don't need any extra registers, and one or more
19060 simple move insns are issued. Originally small integers are not allowed
19061 in FPR/VSX registers. Single precision binary floating is not a simple
19062 move because we need to convert to the single precision memory layout.
19063 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19064 need special direct move handling, which we do not support yet. */
19065 if (TARGET_DIRECT_MOVE
19066 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19067 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
19068 {
19069 if (TARGET_POWERPC64)
19070 {
19071 /* ISA 2.07: MTVSRD or MVFVSRD. */
19072 if (size == 8)
19073 return true;
19074
19075 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19076 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
19077 return true;
19078 }
19079
19080 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19081 if (TARGET_P8_VECTOR)
19082 {
19083 if (mode == SImode)
19084 return true;
19085
19086 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
19087 return true;
19088 }
19089
19090 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19091 if (mode == SDmode)
19092 return true;
19093 }
19094
19095 /* Power6+: MFTGPR or MFFGPR. */
19096 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
19097 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
19098 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19099 return true;
19100
19101 /* Move to/from SPR. */
19102 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
19103 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
19104 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
19105 return true;
19106
19107 return false;
19108 }
19109
19110 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19111 special direct moves that involve allocating an extra register, return the
19112 insn code of the helper function if there is such a function or
19113 CODE_FOR_nothing if not. */
19114
19115 static bool
19116 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
19117 enum rs6000_reg_type from_type,
19118 machine_mode mode,
19119 secondary_reload_info *sri,
19120 bool altivec_p)
19121 {
19122 bool ret = false;
19123 enum insn_code icode = CODE_FOR_nothing;
19124 int cost = 0;
19125 int size = GET_MODE_SIZE (mode);
19126
19127 if (TARGET_POWERPC64 && size == 16)
19128 {
19129 /* Handle moving 128-bit values from GPRs to VSX point registers on
19130 ISA 2.07 (power8, power9) when running in 64-bit mode using
19131 XXPERMDI to glue the two 64-bit values back together. */
19132 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19133 {
19134 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
19135 icode = reg_addr[mode].reload_vsx_gpr;
19136 }
19137
19138 /* Handle moving 128-bit values from VSX point registers to GPRs on
19139 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19140 bottom 64-bit value. */
19141 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19142 {
19143 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
19144 icode = reg_addr[mode].reload_gpr_vsx;
19145 }
19146 }
19147
19148 else if (TARGET_POWERPC64 && mode == SFmode)
19149 {
19150 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
19151 {
19152 cost = 3; /* xscvdpspn, mfvsrd, and. */
19153 icode = reg_addr[mode].reload_gpr_vsx;
19154 }
19155
19156 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
19157 {
19158 cost = 2; /* mtvsrz, xscvspdpn. */
19159 icode = reg_addr[mode].reload_vsx_gpr;
19160 }
19161 }
19162
19163 else if (!TARGET_POWERPC64 && size == 8)
19164 {
19165 /* Handle moving 64-bit values from GPRs to floating point registers on
19166 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19167 32-bit values back together. Altivec register classes must be handled
19168 specially since a different instruction is used, and the secondary
19169 reload support requires a single instruction class in the scratch
19170 register constraint. However, right now TFmode is not allowed in
19171 Altivec registers, so the pattern will never match. */
19172 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
19173 {
19174 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
19175 icode = reg_addr[mode].reload_fpr_gpr;
19176 }
19177 }
19178
19179 if (icode != CODE_FOR_nothing)
19180 {
19181 ret = true;
19182 if (sri)
19183 {
19184 sri->icode = icode;
19185 sri->extra_cost = cost;
19186 }
19187 }
19188
19189 return ret;
19190 }
19191
19192 /* Return whether a move between two register classes can be done either
19193 directly (simple move) or via a pattern that uses a single extra temporary
19194 (using ISA 2.07's direct move in this case. */
19195
19196 static bool
19197 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
19198 enum rs6000_reg_type from_type,
19199 machine_mode mode,
19200 secondary_reload_info *sri,
19201 bool altivec_p)
19202 {
19203 /* Fall back to load/store reloads if either type is not a register. */
19204 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
19205 return false;
19206
19207 /* If we haven't allocated registers yet, assume the move can be done for the
19208 standard register types. */
19209 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
19210 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
19211 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
19212 return true;
19213
19214 /* Moves to the same set of registers is a simple move for non-specialized
19215 registers. */
19216 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
19217 return true;
19218
19219 /* Check whether a simple move can be done directly. */
19220 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
19221 {
19222 if (sri)
19223 {
19224 sri->icode = CODE_FOR_nothing;
19225 sri->extra_cost = 0;
19226 }
19227 return true;
19228 }
19229
19230 /* Now check if we can do it in a few steps. */
19231 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
19232 altivec_p);
19233 }
19234
19235 /* Inform reload about cases where moving X with a mode MODE to a register in
19236 RCLASS requires an extra scratch or immediate register. Return the class
19237 needed for the immediate register.
19238
19239 For VSX and Altivec, we may need a register to convert sp+offset into
19240 reg+sp.
19241
19242 For misaligned 64-bit gpr loads and stores we need a register to
19243 convert an offset address to indirect. */
19244
19245 static reg_class_t
19246 rs6000_secondary_reload (bool in_p,
19247 rtx x,
19248 reg_class_t rclass_i,
19249 machine_mode mode,
19250 secondary_reload_info *sri)
19251 {
19252 enum reg_class rclass = (enum reg_class) rclass_i;
19253 reg_class_t ret = ALL_REGS;
19254 enum insn_code icode;
19255 bool default_p = false;
19256 bool done_p = false;
19257
19258 /* Allow subreg of memory before/during reload. */
19259 bool memory_p = (MEM_P (x)
19260 || (!reload_completed && SUBREG_P (x)
19261 && MEM_P (SUBREG_REG (x))));
19262
19263 sri->icode = CODE_FOR_nothing;
19264 sri->t_icode = CODE_FOR_nothing;
19265 sri->extra_cost = 0;
19266 icode = ((in_p)
19267 ? reg_addr[mode].reload_load
19268 : reg_addr[mode].reload_store);
19269
19270 if (REG_P (x) || register_operand (x, mode))
19271 {
19272 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
19273 bool altivec_p = (rclass == ALTIVEC_REGS);
19274 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
19275
19276 if (!in_p)
19277 std::swap (to_type, from_type);
19278
19279 /* Can we do a direct move of some sort? */
19280 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
19281 altivec_p))
19282 {
19283 icode = (enum insn_code)sri->icode;
19284 default_p = false;
19285 done_p = true;
19286 ret = NO_REGS;
19287 }
19288 }
19289
19290 /* Make sure 0.0 is not reloaded or forced into memory. */
19291 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
19292 {
19293 ret = NO_REGS;
19294 default_p = false;
19295 done_p = true;
19296 }
19297
19298 /* If this is a scalar floating point value and we want to load it into the
19299 traditional Altivec registers, do it via a move via a traditional floating
19300 point register, unless we have D-form addressing. Also make sure that
19301 non-zero constants use a FPR. */
19302 if (!done_p && reg_addr[mode].scalar_in_vmx_p
19303 && !mode_supports_vmx_dform (mode)
19304 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
19305 && (memory_p || CONST_DOUBLE_P (x)))
19306 {
19307 ret = FLOAT_REGS;
19308 default_p = false;
19309 done_p = true;
19310 }
19311
19312 /* Handle reload of load/stores if we have reload helper functions. */
19313 if (!done_p && icode != CODE_FOR_nothing && memory_p)
19314 {
19315 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
19316 mode);
19317
19318 if (extra_cost >= 0)
19319 {
19320 done_p = true;
19321 ret = NO_REGS;
19322 if (extra_cost > 0)
19323 {
19324 sri->extra_cost = extra_cost;
19325 sri->icode = icode;
19326 }
19327 }
19328 }
19329
19330 /* Handle unaligned loads and stores of integer registers. */
19331 if (!done_p && TARGET_POWERPC64
19332 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19333 && memory_p
19334 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
19335 {
19336 rtx addr = XEXP (x, 0);
19337 rtx off = address_offset (addr);
19338
19339 if (off != NULL_RTX)
19340 {
19341 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19342 unsigned HOST_WIDE_INT offset = INTVAL (off);
19343
19344 /* We need a secondary reload when our legitimate_address_p
19345 says the address is good (as otherwise the entire address
19346 will be reloaded), and the offset is not a multiple of
19347 four or we have an address wrap. Address wrap will only
19348 occur for LO_SUMs since legitimate_offset_address_p
19349 rejects addresses for 16-byte mems that will wrap. */
19350 if (GET_CODE (addr) == LO_SUM
19351 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19352 && ((offset & 3) != 0
19353 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
19354 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
19355 && (offset & 3) != 0))
19356 {
19357 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19358 if (in_p)
19359 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
19360 : CODE_FOR_reload_di_load);
19361 else
19362 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
19363 : CODE_FOR_reload_di_store);
19364 sri->extra_cost = 2;
19365 ret = NO_REGS;
19366 done_p = true;
19367 }
19368 else
19369 default_p = true;
19370 }
19371 else
19372 default_p = true;
19373 }
19374
19375 if (!done_p && !TARGET_POWERPC64
19376 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
19377 && memory_p
19378 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
19379 {
19380 rtx addr = XEXP (x, 0);
19381 rtx off = address_offset (addr);
19382
19383 if (off != NULL_RTX)
19384 {
19385 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
19386 unsigned HOST_WIDE_INT offset = INTVAL (off);
19387
19388 /* We need a secondary reload when our legitimate_address_p
19389 says the address is good (as otherwise the entire address
19390 will be reloaded), and we have a wrap.
19391
19392 legitimate_lo_sum_address_p allows LO_SUM addresses to
19393 have any offset so test for wrap in the low 16 bits.
19394
19395 legitimate_offset_address_p checks for the range
19396 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19397 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19398 [0x7ff4,0x7fff] respectively, so test for the
19399 intersection of these ranges, [0x7ffc,0x7fff] and
19400 [0x7ff4,0x7ff7] respectively.
19401
19402 Note that the address we see here may have been
19403 manipulated by legitimize_reload_address. */
19404 if (GET_CODE (addr) == LO_SUM
19405 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
19406 : offset - (0x8000 - extra) < UNITS_PER_WORD)
19407 {
19408 if (in_p)
19409 sri->icode = CODE_FOR_reload_si_load;
19410 else
19411 sri->icode = CODE_FOR_reload_si_store;
19412 sri->extra_cost = 2;
19413 ret = NO_REGS;
19414 done_p = true;
19415 }
19416 else
19417 default_p = true;
19418 }
19419 else
19420 default_p = true;
19421 }
19422
19423 if (!done_p)
19424 default_p = true;
19425
19426 if (default_p)
19427 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
19428
19429 gcc_assert (ret != ALL_REGS);
19430
19431 if (TARGET_DEBUG_ADDR)
19432 {
19433 fprintf (stderr,
19434 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19435 "mode = %s",
19436 reg_class_names[ret],
19437 in_p ? "true" : "false",
19438 reg_class_names[rclass],
19439 GET_MODE_NAME (mode));
19440
19441 if (reload_completed)
19442 fputs (", after reload", stderr);
19443
19444 if (!done_p)
19445 fputs (", done_p not set", stderr);
19446
19447 if (default_p)
19448 fputs (", default secondary reload", stderr);
19449
19450 if (sri->icode != CODE_FOR_nothing)
19451 fprintf (stderr, ", reload func = %s, extra cost = %d",
19452 insn_data[sri->icode].name, sri->extra_cost);
19453
19454 else if (sri->extra_cost > 0)
19455 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
19456
19457 fputs ("\n", stderr);
19458 debug_rtx (x);
19459 }
19460
19461 return ret;
19462 }
19463
19464 /* Better tracing for rs6000_secondary_reload_inner. */
19465
19466 static void
19467 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
19468 bool store_p)
19469 {
19470 rtx set, clobber;
19471
19472 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
19473
19474 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
19475 store_p ? "store" : "load");
19476
19477 if (store_p)
19478 set = gen_rtx_SET (mem, reg);
19479 else
19480 set = gen_rtx_SET (reg, mem);
19481
19482 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
19483 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
19484 }
19485
19486 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
19487 ATTRIBUTE_NORETURN;
19488
19489 static void
19490 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
19491 bool store_p)
19492 {
19493 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
19494 gcc_unreachable ();
19495 }
19496
19497 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19498 reload helper functions. These were identified in
19499 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19500 reload, it calls the insns:
19501 reload_<RELOAD:mode>_<P:mptrsize>_store
19502 reload_<RELOAD:mode>_<P:mptrsize>_load
19503
19504 which in turn calls this function, to do whatever is necessary to create
19505 valid addresses. */
19506
19507 void
19508 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
19509 {
19510 int regno = true_regnum (reg);
19511 machine_mode mode = GET_MODE (reg);
19512 addr_mask_type addr_mask;
19513 rtx addr;
19514 rtx new_addr;
19515 rtx op_reg, op0, op1;
19516 rtx and_op;
19517 rtx cc_clobber;
19518 rtvec rv;
19519
19520 if (regno < 0 || !HARD_REGISTER_NUM_P (regno) || !MEM_P (mem)
19521 || !base_reg_operand (scratch, GET_MODE (scratch)))
19522 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19523
19524 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
19525 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
19526
19527 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
19528 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
19529
19530 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
19531 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
19532
19533 else
19534 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19535
19536 /* Make sure the mode is valid in this register class. */
19537 if ((addr_mask & RELOAD_REG_VALID) == 0)
19538 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19539
19540 if (TARGET_DEBUG_ADDR)
19541 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
19542
19543 new_addr = addr = XEXP (mem, 0);
19544 switch (GET_CODE (addr))
19545 {
19546 /* Does the register class support auto update forms for this mode? If
19547 not, do the update now. We don't need a scratch register, since the
19548 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19549 case PRE_INC:
19550 case PRE_DEC:
19551 op_reg = XEXP (addr, 0);
19552 if (!base_reg_operand (op_reg, Pmode))
19553 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19554
19555 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
19556 {
19557 int delta = GET_MODE_SIZE (mode);
19558 if (GET_CODE (addr) == PRE_DEC)
19559 delta = -delta;
19560 emit_insn (gen_add2_insn (op_reg, GEN_INT (delta)));
19561 new_addr = op_reg;
19562 }
19563 break;
19564
19565 case PRE_MODIFY:
19566 op0 = XEXP (addr, 0);
19567 op1 = XEXP (addr, 1);
19568 if (!base_reg_operand (op0, Pmode)
19569 || GET_CODE (op1) != PLUS
19570 || !rtx_equal_p (op0, XEXP (op1, 0)))
19571 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19572
19573 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
19574 {
19575 emit_insn (gen_rtx_SET (op0, op1));
19576 new_addr = reg;
19577 }
19578 break;
19579
19580 /* Do we need to simulate AND -16 to clear the bottom address bits used
19581 in VMX load/stores? */
19582 case AND:
19583 op0 = XEXP (addr, 0);
19584 op1 = XEXP (addr, 1);
19585 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
19586 {
19587 if (REG_P (op0) || SUBREG_P (op0))
19588 op_reg = op0;
19589
19590 else if (GET_CODE (op1) == PLUS)
19591 {
19592 emit_insn (gen_rtx_SET (scratch, op1));
19593 op_reg = scratch;
19594 }
19595
19596 else
19597 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19598
19599 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
19600 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
19601 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
19602 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
19603 new_addr = scratch;
19604 }
19605 break;
19606
19607 /* If this is an indirect address, make sure it is a base register. */
19608 case REG:
19609 case SUBREG:
19610 if (!base_reg_operand (addr, GET_MODE (addr)))
19611 {
19612 emit_insn (gen_rtx_SET (scratch, addr));
19613 new_addr = scratch;
19614 }
19615 break;
19616
19617 /* If this is an indexed address, make sure the register class can handle
19618 indexed addresses for this mode. */
19619 case PLUS:
19620 op0 = XEXP (addr, 0);
19621 op1 = XEXP (addr, 1);
19622 if (!base_reg_operand (op0, Pmode))
19623 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19624
19625 else if (int_reg_operand (op1, Pmode))
19626 {
19627 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19628 {
19629 emit_insn (gen_rtx_SET (scratch, addr));
19630 new_addr = scratch;
19631 }
19632 }
19633
19634 else if (mode_supports_dq_form (mode) && CONST_INT_P (op1))
19635 {
19636 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
19637 || !quad_address_p (addr, mode, false))
19638 {
19639 emit_insn (gen_rtx_SET (scratch, addr));
19640 new_addr = scratch;
19641 }
19642 }
19643
19644 /* Make sure the register class can handle offset addresses. */
19645 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
19646 {
19647 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19648 {
19649 emit_insn (gen_rtx_SET (scratch, addr));
19650 new_addr = scratch;
19651 }
19652 }
19653
19654 else
19655 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19656
19657 break;
19658
19659 case LO_SUM:
19660 op0 = XEXP (addr, 0);
19661 op1 = XEXP (addr, 1);
19662 if (!base_reg_operand (op0, Pmode))
19663 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19664
19665 else if (int_reg_operand (op1, Pmode))
19666 {
19667 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
19668 {
19669 emit_insn (gen_rtx_SET (scratch, addr));
19670 new_addr = scratch;
19671 }
19672 }
19673
19674 /* Quad offsets are restricted and can't handle normal addresses. */
19675 else if (mode_supports_dq_form (mode))
19676 {
19677 emit_insn (gen_rtx_SET (scratch, addr));
19678 new_addr = scratch;
19679 }
19680
19681 /* Make sure the register class can handle offset addresses. */
19682 else if (legitimate_lo_sum_address_p (mode, addr, false))
19683 {
19684 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
19685 {
19686 emit_insn (gen_rtx_SET (scratch, addr));
19687 new_addr = scratch;
19688 }
19689 }
19690
19691 else
19692 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19693
19694 break;
19695
19696 case SYMBOL_REF:
19697 case CONST:
19698 case LABEL_REF:
19699 rs6000_emit_move (scratch, addr, Pmode);
19700 new_addr = scratch;
19701 break;
19702
19703 default:
19704 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
19705 }
19706
19707 /* Adjust the address if it changed. */
19708 if (addr != new_addr)
19709 {
19710 mem = replace_equiv_address_nv (mem, new_addr);
19711 if (TARGET_DEBUG_ADDR)
19712 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19713 }
19714
19715 /* Now create the move. */
19716 if (store_p)
19717 emit_insn (gen_rtx_SET (mem, reg));
19718 else
19719 emit_insn (gen_rtx_SET (reg, mem));
19720
19721 return;
19722 }
19723
19724 /* Convert reloads involving 64-bit gprs and misaligned offset
19725 addressing, or multiple 32-bit gprs and offsets that are too large,
19726 to use indirect addressing. */
19727
19728 void
19729 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
19730 {
19731 int regno = true_regnum (reg);
19732 enum reg_class rclass;
19733 rtx addr;
19734 rtx scratch_or_premodify = scratch;
19735
19736 if (TARGET_DEBUG_ADDR)
19737 {
19738 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
19739 store_p ? "store" : "load");
19740 fprintf (stderr, "reg:\n");
19741 debug_rtx (reg);
19742 fprintf (stderr, "mem:\n");
19743 debug_rtx (mem);
19744 fprintf (stderr, "scratch:\n");
19745 debug_rtx (scratch);
19746 }
19747
19748 gcc_assert (regno >= 0 && HARD_REGISTER_NUM_P (regno));
19749 gcc_assert (MEM_P (mem));
19750 rclass = REGNO_REG_CLASS (regno);
19751 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
19752 addr = XEXP (mem, 0);
19753
19754 if (GET_CODE (addr) == PRE_MODIFY)
19755 {
19756 gcc_assert (REG_P (XEXP (addr, 0))
19757 && GET_CODE (XEXP (addr, 1)) == PLUS
19758 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
19759 scratch_or_premodify = XEXP (addr, 0);
19760 addr = XEXP (addr, 1);
19761 }
19762 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
19763
19764 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
19765
19766 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
19767
19768 /* Now create the move. */
19769 if (store_p)
19770 emit_insn (gen_rtx_SET (mem, reg));
19771 else
19772 emit_insn (gen_rtx_SET (reg, mem));
19773
19774 return;
19775 }
19776
19777 /* Given an rtx X being reloaded into a reg required to be
19778 in class CLASS, return the class of reg to actually use.
19779 In general this is just CLASS; but on some machines
19780 in some cases it is preferable to use a more restrictive class.
19781
19782 On the RS/6000, we have to return NO_REGS when we want to reload a
19783 floating-point CONST_DOUBLE to force it to be copied to memory.
19784
19785 We also don't want to reload integer values into floating-point
19786 registers if we can at all help it. In fact, this can
19787 cause reload to die, if it tries to generate a reload of CTR
19788 into a FP register and discovers it doesn't have the memory location
19789 required.
19790
19791 ??? Would it be a good idea to have reload do the converse, that is
19792 try to reload floating modes into FP registers if possible?
19793 */
19794
19795 static enum reg_class
19796 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
19797 {
19798 machine_mode mode = GET_MODE (x);
19799 bool is_constant = CONSTANT_P (x);
19800
19801 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19802 reload class for it. */
19803 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19804 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
19805 return NO_REGS;
19806
19807 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
19808 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
19809 return NO_REGS;
19810
19811 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
19812 the reloading of address expressions using PLUS into floating point
19813 registers. */
19814 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
19815 {
19816 if (is_constant)
19817 {
19818 /* Zero is always allowed in all VSX registers. */
19819 if (x == CONST0_RTX (mode))
19820 return rclass;
19821
19822 /* If this is a vector constant that can be formed with a few Altivec
19823 instructions, we want altivec registers. */
19824 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
19825 return ALTIVEC_REGS;
19826
19827 /* If this is an integer constant that can easily be loaded into
19828 vector registers, allow it. */
19829 if (CONST_INT_P (x))
19830 {
19831 HOST_WIDE_INT value = INTVAL (x);
19832
19833 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
19834 2.06 can generate it in the Altivec registers with
19835 VSPLTI<x>. */
19836 if (value == -1)
19837 {
19838 if (TARGET_P8_VECTOR)
19839 return rclass;
19840 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
19841 return ALTIVEC_REGS;
19842 else
19843 return NO_REGS;
19844 }
19845
19846 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
19847 a sign extend in the Altivec registers. */
19848 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
19849 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
19850 return ALTIVEC_REGS;
19851 }
19852
19853 /* Force constant to memory. */
19854 return NO_REGS;
19855 }
19856
19857 /* D-form addressing can easily reload the value. */
19858 if (mode_supports_vmx_dform (mode)
19859 || mode_supports_dq_form (mode))
19860 return rclass;
19861
19862 /* If this is a scalar floating point value and we don't have D-form
19863 addressing, prefer the traditional floating point registers so that we
19864 can use D-form (register+offset) addressing. */
19865 if (rclass == VSX_REGS
19866 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
19867 return FLOAT_REGS;
19868
19869 /* Prefer the Altivec registers if Altivec is handling the vector
19870 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
19871 loads. */
19872 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
19873 || mode == V1TImode)
19874 return ALTIVEC_REGS;
19875
19876 return rclass;
19877 }
19878
19879 if (is_constant || GET_CODE (x) == PLUS)
19880 {
19881 if (reg_class_subset_p (GENERAL_REGS, rclass))
19882 return GENERAL_REGS;
19883 if (reg_class_subset_p (BASE_REGS, rclass))
19884 return BASE_REGS;
19885 return NO_REGS;
19886 }
19887
19888 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == GEN_OR_FLOAT_REGS)
19889 return GENERAL_REGS;
19890
19891 return rclass;
19892 }
19893
19894 /* Debug version of rs6000_preferred_reload_class. */
19895 static enum reg_class
19896 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
19897 {
19898 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
19899
19900 fprintf (stderr,
19901 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
19902 "mode = %s, x:\n",
19903 reg_class_names[ret], reg_class_names[rclass],
19904 GET_MODE_NAME (GET_MODE (x)));
19905 debug_rtx (x);
19906
19907 return ret;
19908 }
19909
19910 /* If we are copying between FP or AltiVec registers and anything else, we need
19911 a memory location. The exception is when we are targeting ppc64 and the
19912 move to/from fpr to gpr instructions are available. Also, under VSX, you
19913 can copy vector registers from the FP register set to the Altivec register
19914 set and vice versa. */
19915
19916 static bool
19917 rs6000_secondary_memory_needed (machine_mode mode,
19918 reg_class_t from_class,
19919 reg_class_t to_class)
19920 {
19921 enum rs6000_reg_type from_type, to_type;
19922 bool altivec_p = ((from_class == ALTIVEC_REGS)
19923 || (to_class == ALTIVEC_REGS));
19924
19925 /* If a simple/direct move is available, we don't need secondary memory */
19926 from_type = reg_class_to_reg_type[(int)from_class];
19927 to_type = reg_class_to_reg_type[(int)to_class];
19928
19929 if (rs6000_secondary_reload_move (to_type, from_type, mode,
19930 (secondary_reload_info *)0, altivec_p))
19931 return false;
19932
19933 /* If we have a floating point or vector register class, we need to use
19934 memory to transfer the data. */
19935 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
19936 return true;
19937
19938 return false;
19939 }
19940
19941 /* Debug version of rs6000_secondary_memory_needed. */
19942 static bool
19943 rs6000_debug_secondary_memory_needed (machine_mode mode,
19944 reg_class_t from_class,
19945 reg_class_t to_class)
19946 {
19947 bool ret = rs6000_secondary_memory_needed (mode, from_class, to_class);
19948
19949 fprintf (stderr,
19950 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
19951 "to_class = %s, mode = %s\n",
19952 ret ? "true" : "false",
19953 reg_class_names[from_class],
19954 reg_class_names[to_class],
19955 GET_MODE_NAME (mode));
19956
19957 return ret;
19958 }
19959
19960 /* Return the register class of a scratch register needed to copy IN into
19961 or out of a register in RCLASS in MODE. If it can be done directly,
19962 NO_REGS is returned. */
19963
19964 static enum reg_class
19965 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
19966 rtx in)
19967 {
19968 int regno;
19969
19970 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
19971 #if TARGET_MACHO
19972 && MACHOPIC_INDIRECT
19973 #endif
19974 ))
19975 {
19976 /* We cannot copy a symbolic operand directly into anything
19977 other than BASE_REGS for TARGET_ELF. So indicate that a
19978 register from BASE_REGS is needed as an intermediate
19979 register.
19980
19981 On Darwin, pic addresses require a load from memory, which
19982 needs a base register. */
19983 if (rclass != BASE_REGS
19984 && (SYMBOL_REF_P (in)
19985 || GET_CODE (in) == HIGH
19986 || GET_CODE (in) == LABEL_REF
19987 || GET_CODE (in) == CONST))
19988 return BASE_REGS;
19989 }
19990
19991 if (REG_P (in))
19992 {
19993 regno = REGNO (in);
19994 if (!HARD_REGISTER_NUM_P (regno))
19995 {
19996 regno = true_regnum (in);
19997 if (!HARD_REGISTER_NUM_P (regno))
19998 regno = -1;
19999 }
20000 }
20001 else if (SUBREG_P (in))
20002 {
20003 regno = true_regnum (in);
20004 if (!HARD_REGISTER_NUM_P (regno))
20005 regno = -1;
20006 }
20007 else
20008 regno = -1;
20009
20010 /* If we have VSX register moves, prefer moving scalar values between
20011 Altivec registers and GPR by going via an FPR (and then via memory)
20012 instead of reloading the secondary memory address for Altivec moves. */
20013 if (TARGET_VSX
20014 && GET_MODE_SIZE (mode) < 16
20015 && !mode_supports_vmx_dform (mode)
20016 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
20017 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
20018 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
20019 && (regno >= 0 && INT_REGNO_P (regno)))))
20020 return FLOAT_REGS;
20021
20022 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20023 into anything. */
20024 if (rclass == GENERAL_REGS || rclass == BASE_REGS
20025 || (regno >= 0 && INT_REGNO_P (regno)))
20026 return NO_REGS;
20027
20028 /* Constants, memory, and VSX registers can go into VSX registers (both the
20029 traditional floating point and the altivec registers). */
20030 if (rclass == VSX_REGS
20031 && (regno == -1 || VSX_REGNO_P (regno)))
20032 return NO_REGS;
20033
20034 /* Constants, memory, and FP registers can go into FP registers. */
20035 if ((regno == -1 || FP_REGNO_P (regno))
20036 && (rclass == FLOAT_REGS || rclass == GEN_OR_FLOAT_REGS))
20037 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
20038
20039 /* Memory, and AltiVec registers can go into AltiVec registers. */
20040 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
20041 && rclass == ALTIVEC_REGS)
20042 return NO_REGS;
20043
20044 /* We can copy among the CR registers. */
20045 if ((rclass == CR_REGS || rclass == CR0_REGS)
20046 && regno >= 0 && CR_REGNO_P (regno))
20047 return NO_REGS;
20048
20049 /* Otherwise, we need GENERAL_REGS. */
20050 return GENERAL_REGS;
20051 }
20052
20053 /* Debug version of rs6000_secondary_reload_class. */
20054 static enum reg_class
20055 rs6000_debug_secondary_reload_class (enum reg_class rclass,
20056 machine_mode mode, rtx in)
20057 {
20058 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
20059 fprintf (stderr,
20060 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20061 "mode = %s, input rtx:\n",
20062 reg_class_names[ret], reg_class_names[rclass],
20063 GET_MODE_NAME (mode));
20064 debug_rtx (in);
20065
20066 return ret;
20067 }
20068
20069 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20070
20071 static bool
20072 rs6000_can_change_mode_class (machine_mode from,
20073 machine_mode to,
20074 reg_class_t rclass)
20075 {
20076 unsigned from_size = GET_MODE_SIZE (from);
20077 unsigned to_size = GET_MODE_SIZE (to);
20078
20079 if (from_size != to_size)
20080 {
20081 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
20082
20083 if (reg_classes_intersect_p (xclass, rclass))
20084 {
20085 unsigned to_nregs = hard_regno_nregs (FIRST_FPR_REGNO, to);
20086 unsigned from_nregs = hard_regno_nregs (FIRST_FPR_REGNO, from);
20087 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
20088 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
20089
20090 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20091 single register under VSX because the scalar part of the register
20092 is in the upper 64-bits, and not the lower 64-bits. Types like
20093 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20094 IEEE floating point can't overlap, and neither can small
20095 values. */
20096
20097 if (to_float128_vector_p && from_float128_vector_p)
20098 return true;
20099
20100 else if (to_float128_vector_p || from_float128_vector_p)
20101 return false;
20102
20103 /* TDmode in floating-mode registers must always go into a register
20104 pair with the most significant word in the even-numbered register
20105 to match ISA requirements. In little-endian mode, this does not
20106 match subreg numbering, so we cannot allow subregs. */
20107 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
20108 return false;
20109
20110 if (from_size < 8 || to_size < 8)
20111 return false;
20112
20113 if (from_size == 8 && (8 * to_nregs) != to_size)
20114 return false;
20115
20116 if (to_size == 8 && (8 * from_nregs) != from_size)
20117 return false;
20118
20119 return true;
20120 }
20121 else
20122 return true;
20123 }
20124
20125 /* Since the VSX register set includes traditional floating point registers
20126 and altivec registers, just check for the size being different instead of
20127 trying to check whether the modes are vector modes. Otherwise it won't
20128 allow say DF and DI to change classes. For types like TFmode and TDmode
20129 that take 2 64-bit registers, rather than a single 128-bit register, don't
20130 allow subregs of those types to other 128 bit types. */
20131 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
20132 {
20133 unsigned num_regs = (from_size + 15) / 16;
20134 if (hard_regno_nregs (FIRST_FPR_REGNO, to) > num_regs
20135 || hard_regno_nregs (FIRST_FPR_REGNO, from) > num_regs)
20136 return false;
20137
20138 return (from_size == 8 || from_size == 16);
20139 }
20140
20141 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
20142 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
20143 return false;
20144
20145 return true;
20146 }
20147
20148 /* Debug version of rs6000_can_change_mode_class. */
20149 static bool
20150 rs6000_debug_can_change_mode_class (machine_mode from,
20151 machine_mode to,
20152 reg_class_t rclass)
20153 {
20154 bool ret = rs6000_can_change_mode_class (from, to, rclass);
20155
20156 fprintf (stderr,
20157 "rs6000_can_change_mode_class, return %s, from = %s, "
20158 "to = %s, rclass = %s\n",
20159 ret ? "true" : "false",
20160 GET_MODE_NAME (from), GET_MODE_NAME (to),
20161 reg_class_names[rclass]);
20162
20163 return ret;
20164 }
20165 \f
20166 /* Return a string to do a move operation of 128 bits of data. */
20167
20168 const char *
20169 rs6000_output_move_128bit (rtx operands[])
20170 {
20171 rtx dest = operands[0];
20172 rtx src = operands[1];
20173 machine_mode mode = GET_MODE (dest);
20174 int dest_regno;
20175 int src_regno;
20176 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
20177 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
20178
20179 if (REG_P (dest))
20180 {
20181 dest_regno = REGNO (dest);
20182 dest_gpr_p = INT_REGNO_P (dest_regno);
20183 dest_fp_p = FP_REGNO_P (dest_regno);
20184 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
20185 dest_vsx_p = dest_fp_p | dest_vmx_p;
20186 }
20187 else
20188 {
20189 dest_regno = -1;
20190 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
20191 }
20192
20193 if (REG_P (src))
20194 {
20195 src_regno = REGNO (src);
20196 src_gpr_p = INT_REGNO_P (src_regno);
20197 src_fp_p = FP_REGNO_P (src_regno);
20198 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
20199 src_vsx_p = src_fp_p | src_vmx_p;
20200 }
20201 else
20202 {
20203 src_regno = -1;
20204 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
20205 }
20206
20207 /* Register moves. */
20208 if (dest_regno >= 0 && src_regno >= 0)
20209 {
20210 if (dest_gpr_p)
20211 {
20212 if (src_gpr_p)
20213 return "#";
20214
20215 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
20216 return (WORDS_BIG_ENDIAN
20217 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20218 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20219
20220 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
20221 return "#";
20222 }
20223
20224 else if (TARGET_VSX && dest_vsx_p)
20225 {
20226 if (src_vsx_p)
20227 return "xxlor %x0,%x1,%x1";
20228
20229 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
20230 return (WORDS_BIG_ENDIAN
20231 ? "mtvsrdd %x0,%1,%L1"
20232 : "mtvsrdd %x0,%L1,%1");
20233
20234 else if (TARGET_DIRECT_MOVE && src_gpr_p)
20235 return "#";
20236 }
20237
20238 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
20239 return "vor %0,%1,%1";
20240
20241 else if (dest_fp_p && src_fp_p)
20242 return "#";
20243 }
20244
20245 /* Loads. */
20246 else if (dest_regno >= 0 && MEM_P (src))
20247 {
20248 if (dest_gpr_p)
20249 {
20250 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20251 return "lq %0,%1";
20252 else
20253 return "#";
20254 }
20255
20256 else if (TARGET_ALTIVEC && dest_vmx_p
20257 && altivec_indexed_or_indirect_operand (src, mode))
20258 return "lvx %0,%y1";
20259
20260 else if (TARGET_VSX && dest_vsx_p)
20261 {
20262 if (mode_supports_dq_form (mode)
20263 && quad_address_p (XEXP (src, 0), mode, true))
20264 return "lxv %x0,%1";
20265
20266 else if (TARGET_P9_VECTOR)
20267 return "lxvx %x0,%y1";
20268
20269 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20270 return "lxvw4x %x0,%y1";
20271
20272 else
20273 return "lxvd2x %x0,%y1";
20274 }
20275
20276 else if (TARGET_ALTIVEC && dest_vmx_p)
20277 return "lvx %0,%y1";
20278
20279 else if (dest_fp_p)
20280 return "#";
20281 }
20282
20283 /* Stores. */
20284 else if (src_regno >= 0 && MEM_P (dest))
20285 {
20286 if (src_gpr_p)
20287 {
20288 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
20289 return "stq %1,%0";
20290 else
20291 return "#";
20292 }
20293
20294 else if (TARGET_ALTIVEC && src_vmx_p
20295 && altivec_indexed_or_indirect_operand (dest, mode))
20296 return "stvx %1,%y0";
20297
20298 else if (TARGET_VSX && src_vsx_p)
20299 {
20300 if (mode_supports_dq_form (mode)
20301 && quad_address_p (XEXP (dest, 0), mode, true))
20302 return "stxv %x1,%0";
20303
20304 else if (TARGET_P9_VECTOR)
20305 return "stxvx %x1,%y0";
20306
20307 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
20308 return "stxvw4x %x1,%y0";
20309
20310 else
20311 return "stxvd2x %x1,%y0";
20312 }
20313
20314 else if (TARGET_ALTIVEC && src_vmx_p)
20315 return "stvx %1,%y0";
20316
20317 else if (src_fp_p)
20318 return "#";
20319 }
20320
20321 /* Constants. */
20322 else if (dest_regno >= 0
20323 && (CONST_INT_P (src)
20324 || CONST_WIDE_INT_P (src)
20325 || CONST_DOUBLE_P (src)
20326 || GET_CODE (src) == CONST_VECTOR))
20327 {
20328 if (dest_gpr_p)
20329 return "#";
20330
20331 else if ((dest_vmx_p && TARGET_ALTIVEC)
20332 || (dest_vsx_p && TARGET_VSX))
20333 return output_vec_const_move (operands);
20334 }
20335
20336 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
20337 }
20338
20339 /* Validate a 128-bit move. */
20340 bool
20341 rs6000_move_128bit_ok_p (rtx operands[])
20342 {
20343 machine_mode mode = GET_MODE (operands[0]);
20344 return (gpc_reg_operand (operands[0], mode)
20345 || gpc_reg_operand (operands[1], mode));
20346 }
20347
20348 /* Return true if a 128-bit move needs to be split. */
20349 bool
20350 rs6000_split_128bit_ok_p (rtx operands[])
20351 {
20352 if (!reload_completed)
20353 return false;
20354
20355 if (!gpr_or_gpr_p (operands[0], operands[1]))
20356 return false;
20357
20358 if (quad_load_store_p (operands[0], operands[1]))
20359 return false;
20360
20361 return true;
20362 }
20363
20364 \f
20365 /* Given a comparison operation, return the bit number in CCR to test. We
20366 know this is a valid comparison.
20367
20368 SCC_P is 1 if this is for an scc. That means that %D will have been
20369 used instead of %C, so the bits will be in different places.
20370
20371 Return -1 if OP isn't a valid comparison for some reason. */
20372
20373 int
20374 ccr_bit (rtx op, int scc_p)
20375 {
20376 enum rtx_code code = GET_CODE (op);
20377 machine_mode cc_mode;
20378 int cc_regnum;
20379 int base_bit;
20380 rtx reg;
20381
20382 if (!COMPARISON_P (op))
20383 return -1;
20384
20385 reg = XEXP (op, 0);
20386
20387 if (!REG_P (reg) || !CR_REGNO_P (REGNO (reg)))
20388 return -1;
20389
20390 cc_mode = GET_MODE (reg);
20391 cc_regnum = REGNO (reg);
20392 base_bit = 4 * (cc_regnum - CR0_REGNO);
20393
20394 validate_condition_mode (code, cc_mode);
20395
20396 /* When generating a sCOND operation, only positive conditions are
20397 allowed. */
20398 if (scc_p)
20399 switch (code)
20400 {
20401 case EQ:
20402 case GT:
20403 case LT:
20404 case UNORDERED:
20405 case GTU:
20406 case LTU:
20407 break;
20408 default:
20409 return -1;
20410 }
20411
20412 switch (code)
20413 {
20414 case NE:
20415 return scc_p ? base_bit + 3 : base_bit + 2;
20416 case EQ:
20417 return base_bit + 2;
20418 case GT: case GTU: case UNLE:
20419 return base_bit + 1;
20420 case LT: case LTU: case UNGE:
20421 return base_bit;
20422 case ORDERED: case UNORDERED:
20423 return base_bit + 3;
20424
20425 case GE: case GEU:
20426 /* If scc, we will have done a cror to put the bit in the
20427 unordered position. So test that bit. For integer, this is ! LT
20428 unless this is an scc insn. */
20429 return scc_p ? base_bit + 3 : base_bit;
20430
20431 case LE: case LEU:
20432 return scc_p ? base_bit + 3 : base_bit + 1;
20433
20434 default:
20435 return -1;
20436 }
20437 }
20438 \f
20439 /* Return the GOT register. */
20440
20441 rtx
20442 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
20443 {
20444 /* The second flow pass currently (June 1999) can't update
20445 regs_ever_live without disturbing other parts of the compiler, so
20446 update it here to make the prolog/epilogue code happy. */
20447 if (!can_create_pseudo_p ()
20448 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20449 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
20450
20451 crtl->uses_pic_offset_table = 1;
20452
20453 return pic_offset_table_rtx;
20454 }
20455 \f
20456 static rs6000_stack_t stack_info;
20457
20458 /* Function to init struct machine_function.
20459 This will be called, via a pointer variable,
20460 from push_function_context. */
20461
20462 static struct machine_function *
20463 rs6000_init_machine_status (void)
20464 {
20465 stack_info.reload_completed = 0;
20466 return ggc_cleared_alloc<machine_function> ();
20467 }
20468 \f
20469 #define INT_P(X) (CONST_INT_P (X) && GET_MODE (X) == VOIDmode)
20470
20471 /* Write out a function code label. */
20472
20473 void
20474 rs6000_output_function_entry (FILE *file, const char *fname)
20475 {
20476 if (fname[0] != '.')
20477 {
20478 switch (DEFAULT_ABI)
20479 {
20480 default:
20481 gcc_unreachable ();
20482
20483 case ABI_AIX:
20484 if (DOT_SYMBOLS)
20485 putc ('.', file);
20486 else
20487 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
20488 break;
20489
20490 case ABI_ELFv2:
20491 case ABI_V4:
20492 case ABI_DARWIN:
20493 break;
20494 }
20495 }
20496
20497 RS6000_OUTPUT_BASENAME (file, fname);
20498 }
20499
20500 /* Print an operand. Recognize special options, documented below. */
20501
20502 #if TARGET_ELF
20503 /* Access to .sdata2 through r2 (see -msdata=eabi in invoke.texi) is
20504 only introduced by the linker, when applying the sda21
20505 relocation. */
20506 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20507 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20508 #else
20509 #define SMALL_DATA_RELOC "sda21"
20510 #define SMALL_DATA_REG 0
20511 #endif
20512
20513 void
20514 print_operand (FILE *file, rtx x, int code)
20515 {
20516 int i;
20517 unsigned HOST_WIDE_INT uval;
20518
20519 switch (code)
20520 {
20521 /* %a is output_address. */
20522
20523 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20524 output_operand. */
20525
20526 case 'D':
20527 /* Like 'J' but get to the GT bit only. */
20528 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20529 {
20530 output_operand_lossage ("invalid %%D value");
20531 return;
20532 }
20533
20534 /* Bit 1 is GT bit. */
20535 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
20536
20537 /* Add one for shift count in rlinm for scc. */
20538 fprintf (file, "%d", i + 1);
20539 return;
20540
20541 case 'e':
20542 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20543 if (! INT_P (x))
20544 {
20545 output_operand_lossage ("invalid %%e value");
20546 return;
20547 }
20548
20549 uval = INTVAL (x);
20550 if ((uval & 0xffff) == 0 && uval != 0)
20551 putc ('s', file);
20552 return;
20553
20554 case 'E':
20555 /* X is a CR register. Print the number of the EQ bit of the CR */
20556 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20557 output_operand_lossage ("invalid %%E value");
20558 else
20559 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
20560 return;
20561
20562 case 'f':
20563 /* X is a CR register. Print the shift count needed to move it
20564 to the high-order four bits. */
20565 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20566 output_operand_lossage ("invalid %%f value");
20567 else
20568 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
20569 return;
20570
20571 case 'F':
20572 /* Similar, but print the count for the rotate in the opposite
20573 direction. */
20574 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20575 output_operand_lossage ("invalid %%F value");
20576 else
20577 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
20578 return;
20579
20580 case 'G':
20581 /* X is a constant integer. If it is negative, print "m",
20582 otherwise print "z". This is to make an aze or ame insn. */
20583 if (!CONST_INT_P (x))
20584 output_operand_lossage ("invalid %%G value");
20585 else if (INTVAL (x) >= 0)
20586 putc ('z', file);
20587 else
20588 putc ('m', file);
20589 return;
20590
20591 case 'h':
20592 /* If constant, output low-order five bits. Otherwise, write
20593 normally. */
20594 if (INT_P (x))
20595 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
20596 else
20597 print_operand (file, x, 0);
20598 return;
20599
20600 case 'H':
20601 /* If constant, output low-order six bits. Otherwise, write
20602 normally. */
20603 if (INT_P (x))
20604 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
20605 else
20606 print_operand (file, x, 0);
20607 return;
20608
20609 case 'I':
20610 /* Print `i' if this is a constant, else nothing. */
20611 if (INT_P (x))
20612 putc ('i', file);
20613 return;
20614
20615 case 'j':
20616 /* Write the bit number in CCR for jump. */
20617 i = ccr_bit (x, 0);
20618 if (i == -1)
20619 output_operand_lossage ("invalid %%j code");
20620 else
20621 fprintf (file, "%d", i);
20622 return;
20623
20624 case 'J':
20625 /* Similar, but add one for shift count in rlinm for scc and pass
20626 scc flag to `ccr_bit'. */
20627 i = ccr_bit (x, 1);
20628 if (i == -1)
20629 output_operand_lossage ("invalid %%J code");
20630 else
20631 /* If we want bit 31, write a shift count of zero, not 32. */
20632 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20633 return;
20634
20635 case 'k':
20636 /* X must be a constant. Write the 1's complement of the
20637 constant. */
20638 if (! INT_P (x))
20639 output_operand_lossage ("invalid %%k value");
20640 else
20641 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
20642 return;
20643
20644 case 'K':
20645 /* X must be a symbolic constant on ELF. Write an
20646 expression suitable for an 'addi' that adds in the low 16
20647 bits of the MEM. */
20648 if (GET_CODE (x) == CONST)
20649 {
20650 if (GET_CODE (XEXP (x, 0)) != PLUS
20651 || (!SYMBOL_REF_P (XEXP (XEXP (x, 0), 0))
20652 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
20653 || !CONST_INT_P (XEXP (XEXP (x, 0), 1)))
20654 output_operand_lossage ("invalid %%K value");
20655 }
20656 print_operand_address (file, x);
20657 fputs ("@l", file);
20658 return;
20659
20660 /* %l is output_asm_label. */
20661
20662 case 'L':
20663 /* Write second word of DImode or DFmode reference. Works on register
20664 or non-indexed memory only. */
20665 if (REG_P (x))
20666 fputs (reg_names[REGNO (x) + 1], file);
20667 else if (MEM_P (x))
20668 {
20669 machine_mode mode = GET_MODE (x);
20670 /* Handle possible auto-increment. Since it is pre-increment and
20671 we have already done it, we can just use an offset of word. */
20672 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20673 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20674 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20675 UNITS_PER_WORD));
20676 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20677 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
20678 UNITS_PER_WORD));
20679 else
20680 output_address (mode, XEXP (adjust_address_nv (x, SImode,
20681 UNITS_PER_WORD),
20682 0));
20683
20684 if (small_data_operand (x, GET_MODE (x)))
20685 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20686 reg_names[SMALL_DATA_REG]);
20687 }
20688 return;
20689
20690 case 'N': /* Unused */
20691 /* Write the number of elements in the vector times 4. */
20692 if (GET_CODE (x) != PARALLEL)
20693 output_operand_lossage ("invalid %%N value");
20694 else
20695 fprintf (file, "%d", XVECLEN (x, 0) * 4);
20696 return;
20697
20698 case 'O': /* Unused */
20699 /* Similar, but subtract 1 first. */
20700 if (GET_CODE (x) != PARALLEL)
20701 output_operand_lossage ("invalid %%O value");
20702 else
20703 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
20704 return;
20705
20706 case 'p':
20707 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20708 if (! INT_P (x)
20709 || INTVAL (x) < 0
20710 || (i = exact_log2 (INTVAL (x))) < 0)
20711 output_operand_lossage ("invalid %%p value");
20712 else
20713 fprintf (file, "%d", i);
20714 return;
20715
20716 case 'P':
20717 /* The operand must be an indirect memory reference. The result
20718 is the register name. */
20719 if (!MEM_P (x) || !REG_P (XEXP (x, 0))
20720 || REGNO (XEXP (x, 0)) >= 32)
20721 output_operand_lossage ("invalid %%P value");
20722 else
20723 fputs (reg_names[REGNO (XEXP (x, 0))], file);
20724 return;
20725
20726 case 'q':
20727 /* This outputs the logical code corresponding to a boolean
20728 expression. The expression may have one or both operands
20729 negated (if one, only the first one). For condition register
20730 logical operations, it will also treat the negated
20731 CR codes as NOTs, but not handle NOTs of them. */
20732 {
20733 const char *const *t = 0;
20734 const char *s;
20735 enum rtx_code code = GET_CODE (x);
20736 static const char * const tbl[3][3] = {
20737 { "and", "andc", "nor" },
20738 { "or", "orc", "nand" },
20739 { "xor", "eqv", "xor" } };
20740
20741 if (code == AND)
20742 t = tbl[0];
20743 else if (code == IOR)
20744 t = tbl[1];
20745 else if (code == XOR)
20746 t = tbl[2];
20747 else
20748 output_operand_lossage ("invalid %%q value");
20749
20750 if (GET_CODE (XEXP (x, 0)) != NOT)
20751 s = t[0];
20752 else
20753 {
20754 if (GET_CODE (XEXP (x, 1)) == NOT)
20755 s = t[2];
20756 else
20757 s = t[1];
20758 }
20759
20760 fputs (s, file);
20761 }
20762 return;
20763
20764 case 'Q':
20765 if (! TARGET_MFCRF)
20766 return;
20767 fputc (',', file);
20768 /* FALLTHRU */
20769
20770 case 'R':
20771 /* X is a CR register. Print the mask for `mtcrf'. */
20772 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20773 output_operand_lossage ("invalid %%R value");
20774 else
20775 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
20776 return;
20777
20778 case 's':
20779 /* Low 5 bits of 32 - value */
20780 if (! INT_P (x))
20781 output_operand_lossage ("invalid %%s value");
20782 else
20783 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
20784 return;
20785
20786 case 't':
20787 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20788 if (!REG_P (x) || !CR_REGNO_P (REGNO (x)))
20789 {
20790 output_operand_lossage ("invalid %%t value");
20791 return;
20792 }
20793
20794 /* Bit 3 is OV bit. */
20795 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
20796
20797 /* If we want bit 31, write a shift count of zero, not 32. */
20798 fprintf (file, "%d", i == 31 ? 0 : i + 1);
20799 return;
20800
20801 case 'T':
20802 /* Print the symbolic name of a branch target register. */
20803 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20804 x = XVECEXP (x, 0, 0);
20805 if (!REG_P (x) || (REGNO (x) != LR_REGNO
20806 && REGNO (x) != CTR_REGNO))
20807 output_operand_lossage ("invalid %%T value");
20808 else if (REGNO (x) == LR_REGNO)
20809 fputs ("lr", file);
20810 else
20811 fputs ("ctr", file);
20812 return;
20813
20814 case 'u':
20815 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20816 for use in unsigned operand. */
20817 if (! INT_P (x))
20818 {
20819 output_operand_lossage ("invalid %%u value");
20820 return;
20821 }
20822
20823 uval = INTVAL (x);
20824 if ((uval & 0xffff) == 0)
20825 uval >>= 16;
20826
20827 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
20828 return;
20829
20830 case 'v':
20831 /* High-order 16 bits of constant for use in signed operand. */
20832 if (! INT_P (x))
20833 output_operand_lossage ("invalid %%v value");
20834 else
20835 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
20836 (INTVAL (x) >> 16) & 0xffff);
20837 return;
20838
20839 case 'U':
20840 /* Print `u' if this has an auto-increment or auto-decrement. */
20841 if (MEM_P (x)
20842 && (GET_CODE (XEXP (x, 0)) == PRE_INC
20843 || GET_CODE (XEXP (x, 0)) == PRE_DEC
20844 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
20845 putc ('u', file);
20846 return;
20847
20848 case 'V':
20849 /* Print the trap code for this operand. */
20850 switch (GET_CODE (x))
20851 {
20852 case EQ:
20853 fputs ("eq", file); /* 4 */
20854 break;
20855 case NE:
20856 fputs ("ne", file); /* 24 */
20857 break;
20858 case LT:
20859 fputs ("lt", file); /* 16 */
20860 break;
20861 case LE:
20862 fputs ("le", file); /* 20 */
20863 break;
20864 case GT:
20865 fputs ("gt", file); /* 8 */
20866 break;
20867 case GE:
20868 fputs ("ge", file); /* 12 */
20869 break;
20870 case LTU:
20871 fputs ("llt", file); /* 2 */
20872 break;
20873 case LEU:
20874 fputs ("lle", file); /* 6 */
20875 break;
20876 case GTU:
20877 fputs ("lgt", file); /* 1 */
20878 break;
20879 case GEU:
20880 fputs ("lge", file); /* 5 */
20881 break;
20882 default:
20883 output_operand_lossage ("invalid %%V value");
20884 }
20885 break;
20886
20887 case 'w':
20888 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
20889 normally. */
20890 if (INT_P (x))
20891 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
20892 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
20893 else
20894 print_operand (file, x, 0);
20895 return;
20896
20897 case 'x':
20898 /* X is a FPR or Altivec register used in a VSX context. */
20899 if (!REG_P (x) || !VSX_REGNO_P (REGNO (x)))
20900 output_operand_lossage ("invalid %%x value");
20901 else
20902 {
20903 int reg = REGNO (x);
20904 int vsx_reg = (FP_REGNO_P (reg)
20905 ? reg - 32
20906 : reg - FIRST_ALTIVEC_REGNO + 32);
20907
20908 #ifdef TARGET_REGNAMES
20909 if (TARGET_REGNAMES)
20910 fprintf (file, "%%vs%d", vsx_reg);
20911 else
20912 #endif
20913 fprintf (file, "%d", vsx_reg);
20914 }
20915 return;
20916
20917 case 'X':
20918 if (MEM_P (x)
20919 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
20920 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
20921 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
20922 putc ('x', file);
20923 return;
20924
20925 case 'Y':
20926 /* Like 'L', for third word of TImode/PTImode */
20927 if (REG_P (x))
20928 fputs (reg_names[REGNO (x) + 2], file);
20929 else if (MEM_P (x))
20930 {
20931 machine_mode mode = GET_MODE (x);
20932 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20933 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20934 output_address (mode, plus_constant (Pmode,
20935 XEXP (XEXP (x, 0), 0), 8));
20936 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20937 output_address (mode, plus_constant (Pmode,
20938 XEXP (XEXP (x, 0), 0), 8));
20939 else
20940 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
20941 if (small_data_operand (x, GET_MODE (x)))
20942 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20943 reg_names[SMALL_DATA_REG]);
20944 }
20945 return;
20946
20947 case 'z':
20948 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
20949 x = XVECEXP (x, 0, 1);
20950 /* X is a SYMBOL_REF. Write out the name preceded by a
20951 period and without any trailing data in brackets. Used for function
20952 names. If we are configured for System V (or the embedded ABI) on
20953 the PowerPC, do not emit the period, since those systems do not use
20954 TOCs and the like. */
20955 if (!SYMBOL_REF_P (x))
20956 {
20957 output_operand_lossage ("invalid %%z value");
20958 return;
20959 }
20960
20961 /* For macho, check to see if we need a stub. */
20962 if (TARGET_MACHO)
20963 {
20964 const char *name = XSTR (x, 0);
20965 #if TARGET_MACHO
20966 if (darwin_emit_branch_islands
20967 && MACHOPIC_INDIRECT
20968 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
20969 name = machopic_indirection_name (x, /*stub_p=*/true);
20970 #endif
20971 assemble_name (file, name);
20972 }
20973 else if (!DOT_SYMBOLS)
20974 assemble_name (file, XSTR (x, 0));
20975 else
20976 rs6000_output_function_entry (file, XSTR (x, 0));
20977 return;
20978
20979 case 'Z':
20980 /* Like 'L', for last word of TImode/PTImode. */
20981 if (REG_P (x))
20982 fputs (reg_names[REGNO (x) + 3], file);
20983 else if (MEM_P (x))
20984 {
20985 machine_mode mode = GET_MODE (x);
20986 if (GET_CODE (XEXP (x, 0)) == PRE_INC
20987 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
20988 output_address (mode, plus_constant (Pmode,
20989 XEXP (XEXP (x, 0), 0), 12));
20990 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
20991 output_address (mode, plus_constant (Pmode,
20992 XEXP (XEXP (x, 0), 0), 12));
20993 else
20994 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
20995 if (small_data_operand (x, GET_MODE (x)))
20996 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
20997 reg_names[SMALL_DATA_REG]);
20998 }
20999 return;
21000
21001 /* Print AltiVec memory operand. */
21002 case 'y':
21003 {
21004 rtx tmp;
21005
21006 gcc_assert (MEM_P (x));
21007
21008 tmp = XEXP (x, 0);
21009
21010 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x))
21011 && GET_CODE (tmp) == AND
21012 && CONST_INT_P (XEXP (tmp, 1))
21013 && INTVAL (XEXP (tmp, 1)) == -16)
21014 tmp = XEXP (tmp, 0);
21015 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
21016 && GET_CODE (tmp) == PRE_MODIFY)
21017 tmp = XEXP (tmp, 1);
21018 if (REG_P (tmp))
21019 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
21020 else
21021 {
21022 if (GET_CODE (tmp) != PLUS
21023 || !REG_P (XEXP (tmp, 0))
21024 || !REG_P (XEXP (tmp, 1)))
21025 {
21026 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21027 break;
21028 }
21029
21030 if (REGNO (XEXP (tmp, 0)) == 0)
21031 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
21032 reg_names[ REGNO (XEXP (tmp, 0)) ]);
21033 else
21034 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
21035 reg_names[ REGNO (XEXP (tmp, 1)) ]);
21036 }
21037 break;
21038 }
21039
21040 case 0:
21041 if (REG_P (x))
21042 fprintf (file, "%s", reg_names[REGNO (x)]);
21043 else if (MEM_P (x))
21044 {
21045 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21046 know the width from the mode. */
21047 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
21048 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
21049 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21050 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
21051 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
21052 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
21053 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
21054 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
21055 else
21056 output_address (GET_MODE (x), XEXP (x, 0));
21057 }
21058 else if (toc_relative_expr_p (x, false,
21059 &tocrel_base_oac, &tocrel_offset_oac))
21060 /* This hack along with a corresponding hack in
21061 rs6000_output_addr_const_extra arranges to output addends
21062 where the assembler expects to find them. eg.
21063 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21064 without this hack would be output as "x@toc+4". We
21065 want "x+4@toc". */
21066 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21067 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD)
21068 output_addr_const (file, XVECEXP (x, 0, 0));
21069 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PLTSEQ)
21070 output_addr_const (file, XVECEXP (x, 0, 1));
21071 else
21072 output_addr_const (file, x);
21073 return;
21074
21075 case '&':
21076 if (const char *name = get_some_local_dynamic_name ())
21077 assemble_name (file, name);
21078 else
21079 output_operand_lossage ("'%%&' used without any "
21080 "local dynamic TLS references");
21081 return;
21082
21083 default:
21084 output_operand_lossage ("invalid %%xn code");
21085 }
21086 }
21087 \f
21088 /* Print the address of an operand. */
21089
21090 void
21091 print_operand_address (FILE *file, rtx x)
21092 {
21093 if (REG_P (x))
21094 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
21095 else if (SYMBOL_REF_P (x) || GET_CODE (x) == CONST
21096 || GET_CODE (x) == LABEL_REF)
21097 {
21098 output_addr_const (file, x);
21099 if (small_data_operand (x, GET_MODE (x)))
21100 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
21101 reg_names[SMALL_DATA_REG]);
21102 else
21103 gcc_assert (!TARGET_TOC);
21104 }
21105 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21106 && REG_P (XEXP (x, 1)))
21107 {
21108 if (REGNO (XEXP (x, 0)) == 0)
21109 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
21110 reg_names[ REGNO (XEXP (x, 0)) ]);
21111 else
21112 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
21113 reg_names[ REGNO (XEXP (x, 1)) ]);
21114 }
21115 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
21116 && CONST_INT_P (XEXP (x, 1)))
21117 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
21118 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
21119 #if TARGET_MACHO
21120 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21121 && CONSTANT_P (XEXP (x, 1)))
21122 {
21123 fprintf (file, "lo16(");
21124 output_addr_const (file, XEXP (x, 1));
21125 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21126 }
21127 #endif
21128 #if TARGET_ELF
21129 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
21130 && CONSTANT_P (XEXP (x, 1)))
21131 {
21132 output_addr_const (file, XEXP (x, 1));
21133 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
21134 }
21135 #endif
21136 else if (toc_relative_expr_p (x, false, &tocrel_base_oac, &tocrel_offset_oac))
21137 {
21138 /* This hack along with a corresponding hack in
21139 rs6000_output_addr_const_extra arranges to output addends
21140 where the assembler expects to find them. eg.
21141 (lo_sum (reg 9)
21142 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21143 without this hack would be output as "x@toc+8@l(9)". We
21144 want "x+8@toc@l(9)". */
21145 output_addr_const (file, CONST_CAST_RTX (tocrel_base_oac));
21146 if (GET_CODE (x) == LO_SUM)
21147 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
21148 else
21149 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base_oac, 0, 1))]);
21150 }
21151 else
21152 output_addr_const (file, x);
21153 }
21154 \f
21155 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21156
21157 static bool
21158 rs6000_output_addr_const_extra (FILE *file, rtx x)
21159 {
21160 if (GET_CODE (x) == UNSPEC)
21161 switch (XINT (x, 1))
21162 {
21163 case UNSPEC_TOCREL:
21164 gcc_checking_assert (SYMBOL_REF_P (XVECEXP (x, 0, 0))
21165 && REG_P (XVECEXP (x, 0, 1))
21166 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
21167 output_addr_const (file, XVECEXP (x, 0, 0));
21168 if (x == tocrel_base_oac && tocrel_offset_oac != const0_rtx)
21169 {
21170 if (INTVAL (tocrel_offset_oac) >= 0)
21171 fprintf (file, "+");
21172 output_addr_const (file, CONST_CAST_RTX (tocrel_offset_oac));
21173 }
21174 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
21175 {
21176 putc ('-', file);
21177 assemble_name (file, toc_label_name);
21178 need_toc_init = 1;
21179 }
21180 else if (TARGET_ELF)
21181 fputs ("@toc", file);
21182 return true;
21183
21184 #if TARGET_MACHO
21185 case UNSPEC_MACHOPIC_OFFSET:
21186 output_addr_const (file, XVECEXP (x, 0, 0));
21187 putc ('-', file);
21188 machopic_output_function_base_name (file);
21189 return true;
21190 #endif
21191 }
21192 return false;
21193 }
21194 \f
21195 /* Target hook for assembling integer objects. The PowerPC version has
21196 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21197 is defined. It also needs to handle DI-mode objects on 64-bit
21198 targets. */
21199
21200 static bool
21201 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
21202 {
21203 #ifdef RELOCATABLE_NEEDS_FIXUP
21204 /* Special handling for SI values. */
21205 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
21206 {
21207 static int recurse = 0;
21208
21209 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21210 the .fixup section. Since the TOC section is already relocated, we
21211 don't need to mark it here. We used to skip the text section, but it
21212 should never be valid for relocated addresses to be placed in the text
21213 section. */
21214 if (DEFAULT_ABI == ABI_V4
21215 && (TARGET_RELOCATABLE || flag_pic > 1)
21216 && in_section != toc_section
21217 && !recurse
21218 && !CONST_SCALAR_INT_P (x)
21219 && CONSTANT_P (x))
21220 {
21221 char buf[256];
21222
21223 recurse = 1;
21224 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
21225 fixuplabelno++;
21226 ASM_OUTPUT_LABEL (asm_out_file, buf);
21227 fprintf (asm_out_file, "\t.long\t(");
21228 output_addr_const (asm_out_file, x);
21229 fprintf (asm_out_file, ")@fixup\n");
21230 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
21231 ASM_OUTPUT_ALIGN (asm_out_file, 2);
21232 fprintf (asm_out_file, "\t.long\t");
21233 assemble_name (asm_out_file, buf);
21234 fprintf (asm_out_file, "\n\t.previous\n");
21235 recurse = 0;
21236 return true;
21237 }
21238 /* Remove initial .'s to turn a -mcall-aixdesc function
21239 address into the address of the descriptor, not the function
21240 itself. */
21241 else if (SYMBOL_REF_P (x)
21242 && XSTR (x, 0)[0] == '.'
21243 && DEFAULT_ABI == ABI_AIX)
21244 {
21245 const char *name = XSTR (x, 0);
21246 while (*name == '.')
21247 name++;
21248
21249 fprintf (asm_out_file, "\t.long\t%s\n", name);
21250 return true;
21251 }
21252 }
21253 #endif /* RELOCATABLE_NEEDS_FIXUP */
21254 return default_assemble_integer (x, size, aligned_p);
21255 }
21256
21257 /* Return a template string for assembly to emit when making an
21258 external call. FUNOP is the call mem argument operand number. */
21259
21260 static const char *
21261 rs6000_call_template_1 (rtx *operands, unsigned int funop, bool sibcall)
21262 {
21263 /* -Wformat-overflow workaround, without which gcc thinks that %u
21264 might produce 10 digits. */
21265 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21266
21267 char arg[12];
21268 arg[0] = 0;
21269 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21270 {
21271 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21272 sprintf (arg, "(%%%u@tlsgd)", funop + 1);
21273 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21274 sprintf (arg, "(%%&@tlsld)");
21275 else
21276 gcc_unreachable ();
21277 }
21278
21279 /* The magic 32768 offset here corresponds to the offset of
21280 r30 in .got2, as given by LCTOC1. See sysv4.h:toc_section. */
21281 char z[11];
21282 sprintf (z, "%%z%u%s", funop,
21283 (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic == 2
21284 ? "+32768" : ""));
21285
21286 static char str[32]; /* 2 spare */
21287 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21288 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21289 sibcall ? "" : "\n\tnop");
21290 else if (DEFAULT_ABI == ABI_V4)
21291 sprintf (str, "b%s %s%s%s", sibcall ? "" : "l", z, arg,
21292 flag_pic ? "@plt" : "");
21293 #if TARGET_MACHO
21294 /* If/when we remove the mlongcall opt, we can share the AIX/ELGv2 case. */
21295 else if (DEFAULT_ABI == ABI_DARWIN)
21296 {
21297 /* The cookie is in operand func+2. */
21298 gcc_checking_assert (GET_CODE (operands[funop + 2]) == CONST_INT);
21299 int cookie = INTVAL (operands[funop + 2]);
21300 if (cookie & CALL_LONG)
21301 {
21302 tree funname = get_identifier (XSTR (operands[funop], 0));
21303 tree labelname = get_prev_label (funname);
21304 gcc_checking_assert (labelname && !sibcall);
21305
21306 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21307 instruction will reach 'foo', otherwise link as 'bl L42'".
21308 "L42" should be a 'branch island', that will do a far jump to
21309 'foo'. Branch islands are generated in
21310 macho_branch_islands(). */
21311 sprintf (str, "jbsr %%z%u,%.10s", funop,
21312 IDENTIFIER_POINTER (labelname));
21313 }
21314 else
21315 /* Same as AIX or ELFv2, except to keep backwards compat, no nop
21316 after the call. */
21317 sprintf (str, "b%s %s%s", sibcall ? "" : "l", z, arg);
21318 }
21319 #endif
21320 else
21321 gcc_unreachable ();
21322 return str;
21323 }
21324
21325 const char *
21326 rs6000_call_template (rtx *operands, unsigned int funop)
21327 {
21328 return rs6000_call_template_1 (operands, funop, false);
21329 }
21330
21331 const char *
21332 rs6000_sibcall_template (rtx *operands, unsigned int funop)
21333 {
21334 return rs6000_call_template_1 (operands, funop, true);
21335 }
21336
21337 /* As above, for indirect calls. */
21338
21339 static const char *
21340 rs6000_indirect_call_template_1 (rtx *operands, unsigned int funop,
21341 bool sibcall)
21342 {
21343 /* -Wformat-overflow workaround, without which gcc thinks that %u
21344 might produce 10 digits. Note that -Wformat-overflow will not
21345 currently warn here for str[], so do not rely on a warning to
21346 ensure str[] is correctly sized. */
21347 gcc_assert (funop <= MAX_RECOG_OPERANDS);
21348
21349 /* Currently, funop is either 0 or 1. The maximum string is always
21350 a !speculate 64-bit __tls_get_addr call.
21351
21352 ABI_AIX:
21353 . 9 ld 2,%3\n\t
21354 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21355 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21356 . 9 crset 2\n\t
21357 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21358 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21359 . 10 beq%T1l-\n\t
21360 . 10 ld 2,%4(1)
21361 .---
21362 .151
21363
21364 ABI_ELFv2:
21365 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21366 . 29 .reloc .,R_PPC64_PLTSEQ,%z1\n\t
21367 . 9 crset 2\n\t
21368 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21369 . 30 .reloc .,R_PPC64_PLTCALL,%z1\n\t
21370 . 10 beq%T1l-\n\t
21371 . 10 ld 2,%3(1)
21372 .---
21373 .142
21374
21375 ABI_V4:
21376 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21377 . 35 .reloc .,R_PPC64_PLTSEQ,%z1+32768\n\t
21378 . 9 crset 2\n\t
21379 . 27 .reloc .,R_PPC64_TLSGD,%2\n\t
21380 . 36 .reloc .,R_PPC64_PLTCALL,%z1+32768\n\t
21381 . 8 beq%T1l-
21382 .---
21383 .141 */
21384 static char str[160]; /* 8 spare */
21385 char *s = str;
21386 const char *ptrload = TARGET_64BIT ? "d" : "wz";
21387
21388 if (DEFAULT_ABI == ABI_AIX)
21389 s += sprintf (s,
21390 "l%s 2,%%%u\n\t",
21391 ptrload, funop + 2);
21392
21393 /* We don't need the extra code to stop indirect call speculation if
21394 calling via LR. */
21395 bool speculate = (TARGET_MACHO
21396 || rs6000_speculate_indirect_jumps
21397 || (REG_P (operands[funop])
21398 && REGNO (operands[funop]) == LR_REGNO));
21399
21400 if (TARGET_PLTSEQ && GET_CODE (operands[funop]) == UNSPEC)
21401 {
21402 const char *rel64 = TARGET_64BIT ? "64" : "";
21403 char tls[29];
21404 tls[0] = 0;
21405 if (TARGET_TLS_MARKERS && GET_CODE (operands[funop + 1]) == UNSPEC)
21406 {
21407 if (XINT (operands[funop + 1], 1) == UNSPEC_TLSGD)
21408 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%%u\n\t",
21409 rel64, funop + 1);
21410 else if (XINT (operands[funop + 1], 1) == UNSPEC_TLSLD)
21411 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21412 rel64);
21413 else
21414 gcc_unreachable ();
21415 }
21416
21417 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21418 && flag_pic == 2 ? "+32768" : "");
21419 if (!speculate)
21420 {
21421 s += sprintf (s,
21422 "%s.reloc .,R_PPC%s_PLTSEQ,%%z%u%s\n\t",
21423 tls, rel64, funop, addend);
21424 s += sprintf (s, "crset 2\n\t");
21425 }
21426 s += sprintf (s,
21427 "%s.reloc .,R_PPC%s_PLTCALL,%%z%u%s\n\t",
21428 tls, rel64, funop, addend);
21429 }
21430 else if (!speculate)
21431 s += sprintf (s, "crset 2\n\t");
21432
21433 if (DEFAULT_ABI == ABI_AIX)
21434 {
21435 if (speculate)
21436 sprintf (s,
21437 "b%%T%ul\n\t"
21438 "l%s 2,%%%u(1)",
21439 funop, ptrload, funop + 3);
21440 else
21441 sprintf (s,
21442 "beq%%T%ul-\n\t"
21443 "l%s 2,%%%u(1)",
21444 funop, ptrload, funop + 3);
21445 }
21446 else if (DEFAULT_ABI == ABI_ELFv2)
21447 {
21448 if (speculate)
21449 sprintf (s,
21450 "b%%T%ul\n\t"
21451 "l%s 2,%%%u(1)",
21452 funop, ptrload, funop + 2);
21453 else
21454 sprintf (s,
21455 "beq%%T%ul-\n\t"
21456 "l%s 2,%%%u(1)",
21457 funop, ptrload, funop + 2);
21458 }
21459 else
21460 {
21461 if (speculate)
21462 sprintf (s,
21463 "b%%T%u%s",
21464 funop, sibcall ? "" : "l");
21465 else
21466 sprintf (s,
21467 "beq%%T%u%s-%s",
21468 funop, sibcall ? "" : "l", sibcall ? "\n\tb $" : "");
21469 }
21470 return str;
21471 }
21472
21473 const char *
21474 rs6000_indirect_call_template (rtx *operands, unsigned int funop)
21475 {
21476 return rs6000_indirect_call_template_1 (operands, funop, false);
21477 }
21478
21479 const char *
21480 rs6000_indirect_sibcall_template (rtx *operands, unsigned int funop)
21481 {
21482 return rs6000_indirect_call_template_1 (operands, funop, true);
21483 }
21484
21485 #if HAVE_AS_PLTSEQ
21486 /* Output indirect call insns.
21487 WHICH is 0 for tocsave, 1 for plt16_ha, 2 for plt16_lo, 3 for mtctr. */
21488 const char *
21489 rs6000_pltseq_template (rtx *operands, int which)
21490 {
21491 const char *rel64 = TARGET_64BIT ? "64" : "";
21492 char tls[28];
21493 tls[0] = 0;
21494 if (TARGET_TLS_MARKERS && GET_CODE (operands[3]) == UNSPEC)
21495 {
21496 if (XINT (operands[3], 1) == UNSPEC_TLSGD)
21497 sprintf (tls, ".reloc .,R_PPC%s_TLSGD,%%3\n\t",
21498 rel64);
21499 else if (XINT (operands[3], 1) == UNSPEC_TLSLD)
21500 sprintf (tls, ".reloc .,R_PPC%s_TLSLD,%%&\n\t",
21501 rel64);
21502 else
21503 gcc_unreachable ();
21504 }
21505
21506 gcc_assert (DEFAULT_ABI == ABI_ELFv2 || DEFAULT_ABI == ABI_V4);
21507 static char str[96]; /* 15 spare */
21508 const char *off = WORDS_BIG_ENDIAN ? "+2" : "";
21509 const char *addend = (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT
21510 && flag_pic == 2 ? "+32768" : "");
21511 switch (which)
21512 {
21513 case 0:
21514 sprintf (str,
21515 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2\n\t"
21516 "st%s",
21517 tls, rel64, TARGET_64BIT ? "d 2,24(1)" : "w 2,12(1)");
21518 break;
21519 case 1:
21520 if (DEFAULT_ABI == ABI_V4 && !flag_pic)
21521 sprintf (str,
21522 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2\n\t"
21523 "lis %%0,0",
21524 tls, off, rel64);
21525 else
21526 sprintf (str,
21527 "%s.reloc .%s,R_PPC%s_PLT16_HA,%%z2%s\n\t"
21528 "addis %%0,%%1,0",
21529 tls, off, rel64, addend);
21530 break;
21531 case 2:
21532 sprintf (str,
21533 "%s.reloc .%s,R_PPC%s_PLT16_LO%s,%%z2%s\n\t"
21534 "l%s %%0,0(%%1)",
21535 tls, off, rel64, TARGET_64BIT ? "_DS" : "", addend,
21536 TARGET_64BIT ? "d" : "wz");
21537 break;
21538 case 3:
21539 sprintf (str,
21540 "%s.reloc .,R_PPC%s_PLTSEQ,%%z2%s\n\t"
21541 "mtctr %%1",
21542 tls, rel64, addend);
21543 break;
21544 default:
21545 gcc_unreachable ();
21546 }
21547 return str;
21548 }
21549 #endif
21550
21551 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21552 /* Emit an assembler directive to set symbol visibility for DECL to
21553 VISIBILITY_TYPE. */
21554
21555 static void
21556 rs6000_assemble_visibility (tree decl, int vis)
21557 {
21558 if (TARGET_XCOFF)
21559 return;
21560
21561 /* Functions need to have their entry point symbol visibility set as
21562 well as their descriptor symbol visibility. */
21563 if (DEFAULT_ABI == ABI_AIX
21564 && DOT_SYMBOLS
21565 && TREE_CODE (decl) == FUNCTION_DECL)
21566 {
21567 static const char * const visibility_types[] = {
21568 NULL, "protected", "hidden", "internal"
21569 };
21570
21571 const char *name, *type;
21572
21573 name = ((* targetm.strip_name_encoding)
21574 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
21575 type = visibility_types[vis];
21576
21577 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
21578 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
21579 }
21580 else
21581 default_assemble_visibility (decl, vis);
21582 }
21583 #endif
21584 \f
21585 enum rtx_code
21586 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
21587 {
21588 /* Reversal of FP compares takes care -- an ordered compare
21589 becomes an unordered compare and vice versa. */
21590 if (mode == CCFPmode
21591 && (!flag_finite_math_only
21592 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
21593 || code == UNEQ || code == LTGT))
21594 return reverse_condition_maybe_unordered (code);
21595 else
21596 return reverse_condition (code);
21597 }
21598
21599 /* Generate a compare for CODE. Return a brand-new rtx that
21600 represents the result of the compare. */
21601
21602 static rtx
21603 rs6000_generate_compare (rtx cmp, machine_mode mode)
21604 {
21605 machine_mode comp_mode;
21606 rtx compare_result;
21607 enum rtx_code code = GET_CODE (cmp);
21608 rtx op0 = XEXP (cmp, 0);
21609 rtx op1 = XEXP (cmp, 1);
21610
21611 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21612 comp_mode = CCmode;
21613 else if (FLOAT_MODE_P (mode))
21614 comp_mode = CCFPmode;
21615 else if (code == GTU || code == LTU
21616 || code == GEU || code == LEU)
21617 comp_mode = CCUNSmode;
21618 else if ((code == EQ || code == NE)
21619 && unsigned_reg_p (op0)
21620 && (unsigned_reg_p (op1)
21621 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
21622 /* These are unsigned values, perhaps there will be a later
21623 ordering compare that can be shared with this one. */
21624 comp_mode = CCUNSmode;
21625 else
21626 comp_mode = CCmode;
21627
21628 /* If we have an unsigned compare, make sure we don't have a signed value as
21629 an immediate. */
21630 if (comp_mode == CCUNSmode && CONST_INT_P (op1)
21631 && INTVAL (op1) < 0)
21632 {
21633 op0 = copy_rtx_if_shared (op0);
21634 op1 = force_reg (GET_MODE (op0), op1);
21635 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
21636 }
21637
21638 /* First, the compare. */
21639 compare_result = gen_reg_rtx (comp_mode);
21640
21641 /* IEEE 128-bit support in VSX registers when we do not have hardware
21642 support. */
21643 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
21644 {
21645 rtx libfunc = NULL_RTX;
21646 bool check_nan = false;
21647 rtx dest;
21648
21649 switch (code)
21650 {
21651 case EQ:
21652 case NE:
21653 libfunc = optab_libfunc (eq_optab, mode);
21654 break;
21655
21656 case GT:
21657 case GE:
21658 libfunc = optab_libfunc (ge_optab, mode);
21659 break;
21660
21661 case LT:
21662 case LE:
21663 libfunc = optab_libfunc (le_optab, mode);
21664 break;
21665
21666 case UNORDERED:
21667 case ORDERED:
21668 libfunc = optab_libfunc (unord_optab, mode);
21669 code = (code == UNORDERED) ? NE : EQ;
21670 break;
21671
21672 case UNGE:
21673 case UNGT:
21674 check_nan = true;
21675 libfunc = optab_libfunc (ge_optab, mode);
21676 code = (code == UNGE) ? GE : GT;
21677 break;
21678
21679 case UNLE:
21680 case UNLT:
21681 check_nan = true;
21682 libfunc = optab_libfunc (le_optab, mode);
21683 code = (code == UNLE) ? LE : LT;
21684 break;
21685
21686 case UNEQ:
21687 case LTGT:
21688 check_nan = true;
21689 libfunc = optab_libfunc (eq_optab, mode);
21690 code = (code = UNEQ) ? EQ : NE;
21691 break;
21692
21693 default:
21694 gcc_unreachable ();
21695 }
21696
21697 gcc_assert (libfunc);
21698
21699 if (!check_nan)
21700 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21701 SImode, op0, mode, op1, mode);
21702
21703 /* The library signals an exception for signalling NaNs, so we need to
21704 handle isgreater, etc. by first checking isordered. */
21705 else
21706 {
21707 rtx ne_rtx, normal_dest, unord_dest;
21708 rtx unord_func = optab_libfunc (unord_optab, mode);
21709 rtx join_label = gen_label_rtx ();
21710 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
21711 rtx unord_cmp = gen_reg_rtx (comp_mode);
21712
21713
21714 /* Test for either value being a NaN. */
21715 gcc_assert (unord_func);
21716 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
21717 SImode, op0, mode, op1, mode);
21718
21719 /* Set value (0) if either value is a NaN, and jump to the join
21720 label. */
21721 dest = gen_reg_rtx (SImode);
21722 emit_move_insn (dest, const1_rtx);
21723 emit_insn (gen_rtx_SET (unord_cmp,
21724 gen_rtx_COMPARE (comp_mode, unord_dest,
21725 const0_rtx)));
21726
21727 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
21728 emit_jump_insn (gen_rtx_SET (pc_rtx,
21729 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
21730 join_ref,
21731 pc_rtx)));
21732
21733 /* Do the normal comparison, knowing that the values are not
21734 NaNs. */
21735 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
21736 SImode, op0, mode, op1, mode);
21737
21738 emit_insn (gen_cstoresi4 (dest,
21739 gen_rtx_fmt_ee (code, SImode, normal_dest,
21740 const0_rtx),
21741 normal_dest, const0_rtx));
21742
21743 /* Join NaN and non-Nan paths. Compare dest against 0. */
21744 emit_label (join_label);
21745 code = NE;
21746 }
21747
21748 emit_insn (gen_rtx_SET (compare_result,
21749 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
21750 }
21751
21752 else
21753 {
21754 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21755 CLOBBERs to match cmptf_internal2 pattern. */
21756 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
21757 && FLOAT128_IBM_P (GET_MODE (op0))
21758 && TARGET_HARD_FLOAT)
21759 emit_insn (gen_rtx_PARALLEL (VOIDmode,
21760 gen_rtvec (10,
21761 gen_rtx_SET (compare_result,
21762 gen_rtx_COMPARE (comp_mode, op0, op1)),
21763 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21764 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21765 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21766 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21767 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21768 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21769 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21770 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
21771 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
21772 else if (GET_CODE (op1) == UNSPEC
21773 && XINT (op1, 1) == UNSPEC_SP_TEST)
21774 {
21775 rtx op1b = XVECEXP (op1, 0, 0);
21776 comp_mode = CCEQmode;
21777 compare_result = gen_reg_rtx (CCEQmode);
21778 if (TARGET_64BIT)
21779 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
21780 else
21781 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
21782 }
21783 else
21784 emit_insn (gen_rtx_SET (compare_result,
21785 gen_rtx_COMPARE (comp_mode, op0, op1)));
21786 }
21787
21788 /* Some kinds of FP comparisons need an OR operation;
21789 under flag_finite_math_only we don't bother. */
21790 if (FLOAT_MODE_P (mode)
21791 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
21792 && !flag_finite_math_only
21793 && (code == LE || code == GE
21794 || code == UNEQ || code == LTGT
21795 || code == UNGT || code == UNLT))
21796 {
21797 enum rtx_code or1, or2;
21798 rtx or1_rtx, or2_rtx, compare2_rtx;
21799 rtx or_result = gen_reg_rtx (CCEQmode);
21800
21801 switch (code)
21802 {
21803 case LE: or1 = LT; or2 = EQ; break;
21804 case GE: or1 = GT; or2 = EQ; break;
21805 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
21806 case LTGT: or1 = LT; or2 = GT; break;
21807 case UNGT: or1 = UNORDERED; or2 = GT; break;
21808 case UNLT: or1 = UNORDERED; or2 = LT; break;
21809 default: gcc_unreachable ();
21810 }
21811 validate_condition_mode (or1, comp_mode);
21812 validate_condition_mode (or2, comp_mode);
21813 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
21814 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
21815 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
21816 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
21817 const_true_rtx);
21818 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
21819
21820 compare_result = or_result;
21821 code = EQ;
21822 }
21823
21824 validate_condition_mode (code, GET_MODE (compare_result));
21825
21826 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
21827 }
21828
21829 \f
21830 /* Return the diagnostic message string if the binary operation OP is
21831 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21832
21833 static const char*
21834 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
21835 const_tree type1,
21836 const_tree type2)
21837 {
21838 machine_mode mode1 = TYPE_MODE (type1);
21839 machine_mode mode2 = TYPE_MODE (type2);
21840
21841 /* For complex modes, use the inner type. */
21842 if (COMPLEX_MODE_P (mode1))
21843 mode1 = GET_MODE_INNER (mode1);
21844
21845 if (COMPLEX_MODE_P (mode2))
21846 mode2 = GET_MODE_INNER (mode2);
21847
21848 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21849 double to intermix unless -mfloat128-convert. */
21850 if (mode1 == mode2)
21851 return NULL;
21852
21853 if (!TARGET_FLOAT128_CVT)
21854 {
21855 if ((mode1 == KFmode && mode2 == IFmode)
21856 || (mode1 == IFmode && mode2 == KFmode))
21857 return N_("__float128 and __ibm128 cannot be used in the same "
21858 "expression");
21859
21860 if (TARGET_IEEEQUAD
21861 && ((mode1 == IFmode && mode2 == TFmode)
21862 || (mode1 == TFmode && mode2 == IFmode)))
21863 return N_("__ibm128 and long double cannot be used in the same "
21864 "expression");
21865
21866 if (!TARGET_IEEEQUAD
21867 && ((mode1 == KFmode && mode2 == TFmode)
21868 || (mode1 == TFmode && mode2 == KFmode)))
21869 return N_("__float128 and long double cannot be used in the same "
21870 "expression");
21871 }
21872
21873 return NULL;
21874 }
21875
21876 \f
21877 /* Expand floating point conversion to/from __float128 and __ibm128. */
21878
21879 void
21880 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
21881 {
21882 machine_mode dest_mode = GET_MODE (dest);
21883 machine_mode src_mode = GET_MODE (src);
21884 convert_optab cvt = unknown_optab;
21885 bool do_move = false;
21886 rtx libfunc = NULL_RTX;
21887 rtx dest2;
21888 typedef rtx (*rtx_2func_t) (rtx, rtx);
21889 rtx_2func_t hw_convert = (rtx_2func_t)0;
21890 size_t kf_or_tf;
21891
21892 struct hw_conv_t {
21893 rtx_2func_t from_df;
21894 rtx_2func_t from_sf;
21895 rtx_2func_t from_si_sign;
21896 rtx_2func_t from_si_uns;
21897 rtx_2func_t from_di_sign;
21898 rtx_2func_t from_di_uns;
21899 rtx_2func_t to_df;
21900 rtx_2func_t to_sf;
21901 rtx_2func_t to_si_sign;
21902 rtx_2func_t to_si_uns;
21903 rtx_2func_t to_di_sign;
21904 rtx_2func_t to_di_uns;
21905 } hw_conversions[2] = {
21906 /* convertions to/from KFmode */
21907 {
21908 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
21909 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
21910 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
21911 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
21912 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
21913 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
21914 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
21915 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
21916 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
21917 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
21918 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
21919 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
21920 },
21921
21922 /* convertions to/from TFmode */
21923 {
21924 gen_extenddftf2_hw, /* TFmode <- DFmode. */
21925 gen_extendsftf2_hw, /* TFmode <- SFmode. */
21926 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
21927 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
21928 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
21929 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
21930 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
21931 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
21932 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
21933 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
21934 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
21935 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
21936 },
21937 };
21938
21939 if (dest_mode == src_mode)
21940 gcc_unreachable ();
21941
21942 /* Eliminate memory operations. */
21943 if (MEM_P (src))
21944 src = force_reg (src_mode, src);
21945
21946 if (MEM_P (dest))
21947 {
21948 rtx tmp = gen_reg_rtx (dest_mode);
21949 rs6000_expand_float128_convert (tmp, src, unsigned_p);
21950 rs6000_emit_move (dest, tmp, dest_mode);
21951 return;
21952 }
21953
21954 /* Convert to IEEE 128-bit floating point. */
21955 if (FLOAT128_IEEE_P (dest_mode))
21956 {
21957 if (dest_mode == KFmode)
21958 kf_or_tf = 0;
21959 else if (dest_mode == TFmode)
21960 kf_or_tf = 1;
21961 else
21962 gcc_unreachable ();
21963
21964 switch (src_mode)
21965 {
21966 case E_DFmode:
21967 cvt = sext_optab;
21968 hw_convert = hw_conversions[kf_or_tf].from_df;
21969 break;
21970
21971 case E_SFmode:
21972 cvt = sext_optab;
21973 hw_convert = hw_conversions[kf_or_tf].from_sf;
21974 break;
21975
21976 case E_KFmode:
21977 case E_IFmode:
21978 case E_TFmode:
21979 if (FLOAT128_IBM_P (src_mode))
21980 cvt = sext_optab;
21981 else
21982 do_move = true;
21983 break;
21984
21985 case E_SImode:
21986 if (unsigned_p)
21987 {
21988 cvt = ufloat_optab;
21989 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
21990 }
21991 else
21992 {
21993 cvt = sfloat_optab;
21994 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
21995 }
21996 break;
21997
21998 case E_DImode:
21999 if (unsigned_p)
22000 {
22001 cvt = ufloat_optab;
22002 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
22003 }
22004 else
22005 {
22006 cvt = sfloat_optab;
22007 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
22008 }
22009 break;
22010
22011 default:
22012 gcc_unreachable ();
22013 }
22014 }
22015
22016 /* Convert from IEEE 128-bit floating point. */
22017 else if (FLOAT128_IEEE_P (src_mode))
22018 {
22019 if (src_mode == KFmode)
22020 kf_or_tf = 0;
22021 else if (src_mode == TFmode)
22022 kf_or_tf = 1;
22023 else
22024 gcc_unreachable ();
22025
22026 switch (dest_mode)
22027 {
22028 case E_DFmode:
22029 cvt = trunc_optab;
22030 hw_convert = hw_conversions[kf_or_tf].to_df;
22031 break;
22032
22033 case E_SFmode:
22034 cvt = trunc_optab;
22035 hw_convert = hw_conversions[kf_or_tf].to_sf;
22036 break;
22037
22038 case E_KFmode:
22039 case E_IFmode:
22040 case E_TFmode:
22041 if (FLOAT128_IBM_P (dest_mode))
22042 cvt = trunc_optab;
22043 else
22044 do_move = true;
22045 break;
22046
22047 case E_SImode:
22048 if (unsigned_p)
22049 {
22050 cvt = ufix_optab;
22051 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
22052 }
22053 else
22054 {
22055 cvt = sfix_optab;
22056 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
22057 }
22058 break;
22059
22060 case E_DImode:
22061 if (unsigned_p)
22062 {
22063 cvt = ufix_optab;
22064 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
22065 }
22066 else
22067 {
22068 cvt = sfix_optab;
22069 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
22070 }
22071 break;
22072
22073 default:
22074 gcc_unreachable ();
22075 }
22076 }
22077
22078 /* Both IBM format. */
22079 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
22080 do_move = true;
22081
22082 else
22083 gcc_unreachable ();
22084
22085 /* Handle conversion between TFmode/KFmode/IFmode. */
22086 if (do_move)
22087 emit_insn (gen_rtx_SET (dest, gen_rtx_FLOAT_EXTEND (dest_mode, src)));
22088
22089 /* Handle conversion if we have hardware support. */
22090 else if (TARGET_FLOAT128_HW && hw_convert)
22091 emit_insn ((hw_convert) (dest, src));
22092
22093 /* Call an external function to do the conversion. */
22094 else if (cvt != unknown_optab)
22095 {
22096 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
22097 gcc_assert (libfunc != NULL_RTX);
22098
22099 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode,
22100 src, src_mode);
22101
22102 gcc_assert (dest2 != NULL_RTX);
22103 if (!rtx_equal_p (dest, dest2))
22104 emit_move_insn (dest, dest2);
22105 }
22106
22107 else
22108 gcc_unreachable ();
22109
22110 return;
22111 }
22112
22113 \f
22114 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
22115 can be used as that dest register. Return the dest register. */
22116
22117 rtx
22118 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
22119 {
22120 if (op2 == const0_rtx)
22121 return op1;
22122
22123 if (GET_CODE (scratch) == SCRATCH)
22124 scratch = gen_reg_rtx (mode);
22125
22126 if (logical_operand (op2, mode))
22127 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
22128 else
22129 emit_insn (gen_rtx_SET (scratch,
22130 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
22131
22132 return scratch;
22133 }
22134
22135 void
22136 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
22137 {
22138 rtx condition_rtx;
22139 machine_mode op_mode;
22140 enum rtx_code cond_code;
22141 rtx result = operands[0];
22142
22143 condition_rtx = rs6000_generate_compare (operands[1], mode);
22144 cond_code = GET_CODE (condition_rtx);
22145
22146 if (cond_code == NE
22147 || cond_code == GE || cond_code == LE
22148 || cond_code == GEU || cond_code == LEU
22149 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
22150 {
22151 rtx not_result = gen_reg_rtx (CCEQmode);
22152 rtx not_op, rev_cond_rtx;
22153 machine_mode cc_mode;
22154
22155 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
22156
22157 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
22158 SImode, XEXP (condition_rtx, 0), const0_rtx);
22159 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
22160 emit_insn (gen_rtx_SET (not_result, not_op));
22161 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
22162 }
22163
22164 op_mode = GET_MODE (XEXP (operands[1], 0));
22165 if (op_mode == VOIDmode)
22166 op_mode = GET_MODE (XEXP (operands[1], 1));
22167
22168 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
22169 {
22170 PUT_MODE (condition_rtx, DImode);
22171 convert_move (result, condition_rtx, 0);
22172 }
22173 else
22174 {
22175 PUT_MODE (condition_rtx, SImode);
22176 emit_insn (gen_rtx_SET (result, condition_rtx));
22177 }
22178 }
22179
22180 /* Emit a branch of kind CODE to location LOC. */
22181
22182 void
22183 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
22184 {
22185 rtx condition_rtx, loc_ref;
22186
22187 condition_rtx = rs6000_generate_compare (operands[0], mode);
22188 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
22189 emit_jump_insn (gen_rtx_SET (pc_rtx,
22190 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
22191 loc_ref, pc_rtx)));
22192 }
22193
22194 /* Return the string to output a conditional branch to LABEL, which is
22195 the operand template of the label, or NULL if the branch is really a
22196 conditional return.
22197
22198 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
22199 condition code register and its mode specifies what kind of
22200 comparison we made.
22201
22202 REVERSED is nonzero if we should reverse the sense of the comparison.
22203
22204 INSN is the insn. */
22205
22206 char *
22207 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
22208 {
22209 static char string[64];
22210 enum rtx_code code = GET_CODE (op);
22211 rtx cc_reg = XEXP (op, 0);
22212 machine_mode mode = GET_MODE (cc_reg);
22213 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
22214 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
22215 int really_reversed = reversed ^ need_longbranch;
22216 char *s = string;
22217 const char *ccode;
22218 const char *pred;
22219 rtx note;
22220
22221 validate_condition_mode (code, mode);
22222
22223 /* Work out which way this really branches. We could use
22224 reverse_condition_maybe_unordered here always but this
22225 makes the resulting assembler clearer. */
22226 if (really_reversed)
22227 {
22228 /* Reversal of FP compares takes care -- an ordered compare
22229 becomes an unordered compare and vice versa. */
22230 if (mode == CCFPmode)
22231 code = reverse_condition_maybe_unordered (code);
22232 else
22233 code = reverse_condition (code);
22234 }
22235
22236 switch (code)
22237 {
22238 /* Not all of these are actually distinct opcodes, but
22239 we distinguish them for clarity of the resulting assembler. */
22240 case NE: case LTGT:
22241 ccode = "ne"; break;
22242 case EQ: case UNEQ:
22243 ccode = "eq"; break;
22244 case GE: case GEU:
22245 ccode = "ge"; break;
22246 case GT: case GTU: case UNGT:
22247 ccode = "gt"; break;
22248 case LE: case LEU:
22249 ccode = "le"; break;
22250 case LT: case LTU: case UNLT:
22251 ccode = "lt"; break;
22252 case UNORDERED: ccode = "un"; break;
22253 case ORDERED: ccode = "nu"; break;
22254 case UNGE: ccode = "nl"; break;
22255 case UNLE: ccode = "ng"; break;
22256 default:
22257 gcc_unreachable ();
22258 }
22259
22260 /* Maybe we have a guess as to how likely the branch is. */
22261 pred = "";
22262 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
22263 if (note != NULL_RTX)
22264 {
22265 /* PROB is the difference from 50%. */
22266 int prob = profile_probability::from_reg_br_prob_note (XINT (note, 0))
22267 .to_reg_br_prob_base () - REG_BR_PROB_BASE / 2;
22268
22269 /* Only hint for highly probable/improbable branches on newer cpus when
22270 we have real profile data, as static prediction overrides processor
22271 dynamic prediction. For older cpus we may as well always hint, but
22272 assume not taken for branches that are very close to 50% as a
22273 mispredicted taken branch is more expensive than a
22274 mispredicted not-taken branch. */
22275 if (rs6000_always_hint
22276 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
22277 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
22278 && br_prob_note_reliable_p (note)))
22279 {
22280 if (abs (prob) > REG_BR_PROB_BASE / 20
22281 && ((prob > 0) ^ need_longbranch))
22282 pred = "+";
22283 else
22284 pred = "-";
22285 }
22286 }
22287
22288 if (label == NULL)
22289 s += sprintf (s, "b%slr%s ", ccode, pred);
22290 else
22291 s += sprintf (s, "b%s%s ", ccode, pred);
22292
22293 /* We need to escape any '%' characters in the reg_names string.
22294 Assume they'd only be the first character.... */
22295 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
22296 *s++ = '%';
22297 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
22298
22299 if (label != NULL)
22300 {
22301 /* If the branch distance was too far, we may have to use an
22302 unconditional branch to go the distance. */
22303 if (need_longbranch)
22304 s += sprintf (s, ",$+8\n\tb %s", label);
22305 else
22306 s += sprintf (s, ",%s", label);
22307 }
22308
22309 return string;
22310 }
22311
22312 /* Return insn for VSX or Altivec comparisons. */
22313
22314 static rtx
22315 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
22316 {
22317 rtx mask;
22318 machine_mode mode = GET_MODE (op0);
22319
22320 switch (code)
22321 {
22322 default:
22323 break;
22324
22325 case GE:
22326 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22327 return NULL_RTX;
22328 /* FALLTHRU */
22329
22330 case EQ:
22331 case GT:
22332 case GTU:
22333 case ORDERED:
22334 case UNORDERED:
22335 case UNEQ:
22336 case LTGT:
22337 mask = gen_reg_rtx (mode);
22338 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
22339 return mask;
22340 }
22341
22342 return NULL_RTX;
22343 }
22344
22345 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22346 DMODE is expected destination mode. This is a recursive function. */
22347
22348 static rtx
22349 rs6000_emit_vector_compare (enum rtx_code rcode,
22350 rtx op0, rtx op1,
22351 machine_mode dmode)
22352 {
22353 rtx mask;
22354 bool swap_operands = false;
22355 bool try_again = false;
22356
22357 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
22358 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
22359
22360 /* See if the comparison works as is. */
22361 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22362 if (mask)
22363 return mask;
22364
22365 switch (rcode)
22366 {
22367 case LT:
22368 rcode = GT;
22369 swap_operands = true;
22370 try_again = true;
22371 break;
22372 case LTU:
22373 rcode = GTU;
22374 swap_operands = true;
22375 try_again = true;
22376 break;
22377 case NE:
22378 case UNLE:
22379 case UNLT:
22380 case UNGE:
22381 case UNGT:
22382 /* Invert condition and try again.
22383 e.g., A != B becomes ~(A==B). */
22384 {
22385 enum rtx_code rev_code;
22386 enum insn_code nor_code;
22387 rtx mask2;
22388
22389 rev_code = reverse_condition_maybe_unordered (rcode);
22390 if (rev_code == UNKNOWN)
22391 return NULL_RTX;
22392
22393 nor_code = optab_handler (one_cmpl_optab, dmode);
22394 if (nor_code == CODE_FOR_nothing)
22395 return NULL_RTX;
22396
22397 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
22398 if (!mask2)
22399 return NULL_RTX;
22400
22401 mask = gen_reg_rtx (dmode);
22402 emit_insn (GEN_FCN (nor_code) (mask, mask2));
22403 return mask;
22404 }
22405 break;
22406 case GE:
22407 case GEU:
22408 case LE:
22409 case LEU:
22410 /* Try GT/GTU/LT/LTU OR EQ */
22411 {
22412 rtx c_rtx, eq_rtx;
22413 enum insn_code ior_code;
22414 enum rtx_code new_code;
22415
22416 switch (rcode)
22417 {
22418 case GE:
22419 new_code = GT;
22420 break;
22421
22422 case GEU:
22423 new_code = GTU;
22424 break;
22425
22426 case LE:
22427 new_code = LT;
22428 break;
22429
22430 case LEU:
22431 new_code = LTU;
22432 break;
22433
22434 default:
22435 gcc_unreachable ();
22436 }
22437
22438 ior_code = optab_handler (ior_optab, dmode);
22439 if (ior_code == CODE_FOR_nothing)
22440 return NULL_RTX;
22441
22442 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
22443 if (!c_rtx)
22444 return NULL_RTX;
22445
22446 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
22447 if (!eq_rtx)
22448 return NULL_RTX;
22449
22450 mask = gen_reg_rtx (dmode);
22451 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
22452 return mask;
22453 }
22454 break;
22455 default:
22456 return NULL_RTX;
22457 }
22458
22459 if (try_again)
22460 {
22461 if (swap_operands)
22462 std::swap (op0, op1);
22463
22464 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
22465 if (mask)
22466 return mask;
22467 }
22468
22469 /* You only get two chances. */
22470 return NULL_RTX;
22471 }
22472
22473 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22474 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22475 operands for the relation operation COND. */
22476
22477 int
22478 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
22479 rtx cond, rtx cc_op0, rtx cc_op1)
22480 {
22481 machine_mode dest_mode = GET_MODE (dest);
22482 machine_mode mask_mode = GET_MODE (cc_op0);
22483 enum rtx_code rcode = GET_CODE (cond);
22484 machine_mode cc_mode = CCmode;
22485 rtx mask;
22486 rtx cond2;
22487 bool invert_move = false;
22488
22489 if (VECTOR_UNIT_NONE_P (dest_mode))
22490 return 0;
22491
22492 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
22493 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
22494
22495 switch (rcode)
22496 {
22497 /* Swap operands if we can, and fall back to doing the operation as
22498 specified, and doing a NOR to invert the test. */
22499 case NE:
22500 case UNLE:
22501 case UNLT:
22502 case UNGE:
22503 case UNGT:
22504 /* Invert condition and try again.
22505 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22506 invert_move = true;
22507 rcode = reverse_condition_maybe_unordered (rcode);
22508 if (rcode == UNKNOWN)
22509 return 0;
22510 break;
22511
22512 case GE:
22513 case LE:
22514 if (GET_MODE_CLASS (mask_mode) == MODE_VECTOR_INT)
22515 {
22516 /* Invert condition to avoid compound test. */
22517 invert_move = true;
22518 rcode = reverse_condition (rcode);
22519 }
22520 break;
22521
22522 case GTU:
22523 case GEU:
22524 case LTU:
22525 case LEU:
22526 /* Mark unsigned tests with CCUNSmode. */
22527 cc_mode = CCUNSmode;
22528
22529 /* Invert condition to avoid compound test if necessary. */
22530 if (rcode == GEU || rcode == LEU)
22531 {
22532 invert_move = true;
22533 rcode = reverse_condition (rcode);
22534 }
22535 break;
22536
22537 default:
22538 break;
22539 }
22540
22541 /* Get the vector mask for the given relational operations. */
22542 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
22543
22544 if (!mask)
22545 return 0;
22546
22547 if (invert_move)
22548 std::swap (op_true, op_false);
22549
22550 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22551 if (GET_MODE_CLASS (dest_mode) == MODE_VECTOR_INT
22552 && (GET_CODE (op_true) == CONST_VECTOR
22553 || GET_CODE (op_false) == CONST_VECTOR))
22554 {
22555 rtx constant_0 = CONST0_RTX (dest_mode);
22556 rtx constant_m1 = CONSTM1_RTX (dest_mode);
22557
22558 if (op_true == constant_m1 && op_false == constant_0)
22559 {
22560 emit_move_insn (dest, mask);
22561 return 1;
22562 }
22563
22564 else if (op_true == constant_0 && op_false == constant_m1)
22565 {
22566 emit_insn (gen_rtx_SET (dest, gen_rtx_NOT (dest_mode, mask)));
22567 return 1;
22568 }
22569
22570 /* If we can't use the vector comparison directly, perhaps we can use
22571 the mask for the true or false fields, instead of loading up a
22572 constant. */
22573 if (op_true == constant_m1)
22574 op_true = mask;
22575
22576 if (op_false == constant_0)
22577 op_false = mask;
22578 }
22579
22580 if (!REG_P (op_true) && !SUBREG_P (op_true))
22581 op_true = force_reg (dest_mode, op_true);
22582
22583 if (!REG_P (op_false) && !SUBREG_P (op_false))
22584 op_false = force_reg (dest_mode, op_false);
22585
22586 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
22587 CONST0_RTX (dest_mode));
22588 emit_insn (gen_rtx_SET (dest,
22589 gen_rtx_IF_THEN_ELSE (dest_mode,
22590 cond2,
22591 op_true,
22592 op_false)));
22593 return 1;
22594 }
22595
22596 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22597 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22598 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22599 hardware has no such operation. */
22600
22601 static int
22602 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22603 {
22604 enum rtx_code code = GET_CODE (op);
22605 rtx op0 = XEXP (op, 0);
22606 rtx op1 = XEXP (op, 1);
22607 machine_mode compare_mode = GET_MODE (op0);
22608 machine_mode result_mode = GET_MODE (dest);
22609 bool max_p = false;
22610
22611 if (result_mode != compare_mode)
22612 return 0;
22613
22614 if (code == GE || code == GT)
22615 max_p = true;
22616 else if (code == LE || code == LT)
22617 max_p = false;
22618 else
22619 return 0;
22620
22621 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
22622 ;
22623
22624 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
22625 max_p = !max_p;
22626
22627 else
22628 return 0;
22629
22630 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
22631 return 1;
22632 }
22633
22634 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22635 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22636 operands of the last comparison is nonzero/true, FALSE_COND if it is
22637 zero/false. Return 0 if the hardware has no such operation. */
22638
22639 static int
22640 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22641 {
22642 enum rtx_code code = GET_CODE (op);
22643 rtx op0 = XEXP (op, 0);
22644 rtx op1 = XEXP (op, 1);
22645 machine_mode result_mode = GET_MODE (dest);
22646 rtx compare_rtx;
22647 rtx cmove_rtx;
22648 rtx clobber_rtx;
22649
22650 if (!can_create_pseudo_p ())
22651 return 0;
22652
22653 switch (code)
22654 {
22655 case EQ:
22656 case GE:
22657 case GT:
22658 break;
22659
22660 case NE:
22661 case LT:
22662 case LE:
22663 code = swap_condition (code);
22664 std::swap (op0, op1);
22665 break;
22666
22667 default:
22668 return 0;
22669 }
22670
22671 /* Generate: [(parallel [(set (dest)
22672 (if_then_else (op (cmp1) (cmp2))
22673 (true)
22674 (false)))
22675 (clobber (scratch))])]. */
22676
22677 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
22678 cmove_rtx = gen_rtx_SET (dest,
22679 gen_rtx_IF_THEN_ELSE (result_mode,
22680 compare_rtx,
22681 true_cond,
22682 false_cond));
22683
22684 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
22685 emit_insn (gen_rtx_PARALLEL (VOIDmode,
22686 gen_rtvec (2, cmove_rtx, clobber_rtx)));
22687
22688 return 1;
22689 }
22690
22691 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22692 operands of the last comparison is nonzero/true, FALSE_COND if it
22693 is zero/false. Return 0 if the hardware has no such operation. */
22694
22695 int
22696 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22697 {
22698 enum rtx_code code = GET_CODE (op);
22699 rtx op0 = XEXP (op, 0);
22700 rtx op1 = XEXP (op, 1);
22701 machine_mode compare_mode = GET_MODE (op0);
22702 machine_mode result_mode = GET_MODE (dest);
22703 rtx temp;
22704 bool is_against_zero;
22705
22706 /* These modes should always match. */
22707 if (GET_MODE (op1) != compare_mode
22708 /* In the isel case however, we can use a compare immediate, so
22709 op1 may be a small constant. */
22710 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
22711 return 0;
22712 if (GET_MODE (true_cond) != result_mode)
22713 return 0;
22714 if (GET_MODE (false_cond) != result_mode)
22715 return 0;
22716
22717 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22718 if (TARGET_P9_MINMAX
22719 && (compare_mode == SFmode || compare_mode == DFmode)
22720 && (result_mode == SFmode || result_mode == DFmode))
22721 {
22722 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
22723 return 1;
22724
22725 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
22726 return 1;
22727 }
22728
22729 /* Don't allow using floating point comparisons for integer results for
22730 now. */
22731 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
22732 return 0;
22733
22734 /* First, work out if the hardware can do this at all, or
22735 if it's too slow.... */
22736 if (!FLOAT_MODE_P (compare_mode))
22737 {
22738 if (TARGET_ISEL)
22739 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
22740 return 0;
22741 }
22742
22743 is_against_zero = op1 == CONST0_RTX (compare_mode);
22744
22745 /* A floating-point subtract might overflow, underflow, or produce
22746 an inexact result, thus changing the floating-point flags, so it
22747 can't be generated if we care about that. It's safe if one side
22748 of the construct is zero, since then no subtract will be
22749 generated. */
22750 if (SCALAR_FLOAT_MODE_P (compare_mode)
22751 && flag_trapping_math && ! is_against_zero)
22752 return 0;
22753
22754 /* Eliminate half of the comparisons by switching operands, this
22755 makes the remaining code simpler. */
22756 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
22757 || code == LTGT || code == LT || code == UNLE)
22758 {
22759 code = reverse_condition_maybe_unordered (code);
22760 temp = true_cond;
22761 true_cond = false_cond;
22762 false_cond = temp;
22763 }
22764
22765 /* UNEQ and LTGT take four instructions for a comparison with zero,
22766 it'll probably be faster to use a branch here too. */
22767 if (code == UNEQ && HONOR_NANS (compare_mode))
22768 return 0;
22769
22770 /* We're going to try to implement comparisons by performing
22771 a subtract, then comparing against zero. Unfortunately,
22772 Inf - Inf is NaN which is not zero, and so if we don't
22773 know that the operand is finite and the comparison
22774 would treat EQ different to UNORDERED, we can't do it. */
22775 if (HONOR_INFINITIES (compare_mode)
22776 && code != GT && code != UNGE
22777 && (!CONST_DOUBLE_P (op1)
22778 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
22779 /* Constructs of the form (a OP b ? a : b) are safe. */
22780 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
22781 || (! rtx_equal_p (op0, true_cond)
22782 && ! rtx_equal_p (op1, true_cond))))
22783 return 0;
22784
22785 /* At this point we know we can use fsel. */
22786
22787 /* Reduce the comparison to a comparison against zero. */
22788 if (! is_against_zero)
22789 {
22790 temp = gen_reg_rtx (compare_mode);
22791 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
22792 op0 = temp;
22793 op1 = CONST0_RTX (compare_mode);
22794 }
22795
22796 /* If we don't care about NaNs we can reduce some of the comparisons
22797 down to faster ones. */
22798 if (! HONOR_NANS (compare_mode))
22799 switch (code)
22800 {
22801 case GT:
22802 code = LE;
22803 temp = true_cond;
22804 true_cond = false_cond;
22805 false_cond = temp;
22806 break;
22807 case UNGE:
22808 code = GE;
22809 break;
22810 case UNEQ:
22811 code = EQ;
22812 break;
22813 default:
22814 break;
22815 }
22816
22817 /* Now, reduce everything down to a GE. */
22818 switch (code)
22819 {
22820 case GE:
22821 break;
22822
22823 case LE:
22824 temp = gen_reg_rtx (compare_mode);
22825 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22826 op0 = temp;
22827 break;
22828
22829 case ORDERED:
22830 temp = gen_reg_rtx (compare_mode);
22831 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
22832 op0 = temp;
22833 break;
22834
22835 case EQ:
22836 temp = gen_reg_rtx (compare_mode);
22837 emit_insn (gen_rtx_SET (temp,
22838 gen_rtx_NEG (compare_mode,
22839 gen_rtx_ABS (compare_mode, op0))));
22840 op0 = temp;
22841 break;
22842
22843 case UNGE:
22844 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22845 temp = gen_reg_rtx (result_mode);
22846 emit_insn (gen_rtx_SET (temp,
22847 gen_rtx_IF_THEN_ELSE (result_mode,
22848 gen_rtx_GE (VOIDmode,
22849 op0, op1),
22850 true_cond, false_cond)));
22851 false_cond = true_cond;
22852 true_cond = temp;
22853
22854 temp = gen_reg_rtx (compare_mode);
22855 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22856 op0 = temp;
22857 break;
22858
22859 case GT:
22860 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22861 temp = gen_reg_rtx (result_mode);
22862 emit_insn (gen_rtx_SET (temp,
22863 gen_rtx_IF_THEN_ELSE (result_mode,
22864 gen_rtx_GE (VOIDmode,
22865 op0, op1),
22866 true_cond, false_cond)));
22867 true_cond = false_cond;
22868 false_cond = temp;
22869
22870 temp = gen_reg_rtx (compare_mode);
22871 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
22872 op0 = temp;
22873 break;
22874
22875 default:
22876 gcc_unreachable ();
22877 }
22878
22879 emit_insn (gen_rtx_SET (dest,
22880 gen_rtx_IF_THEN_ELSE (result_mode,
22881 gen_rtx_GE (VOIDmode,
22882 op0, op1),
22883 true_cond, false_cond)));
22884 return 1;
22885 }
22886
22887 /* Same as above, but for ints (isel). */
22888
22889 int
22890 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
22891 {
22892 rtx condition_rtx, cr;
22893 machine_mode mode = GET_MODE (dest);
22894 enum rtx_code cond_code;
22895 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
22896 bool signedp;
22897
22898 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
22899 return 0;
22900
22901 /* We still have to do the compare, because isel doesn't do a
22902 compare, it just looks at the CRx bits set by a previous compare
22903 instruction. */
22904 condition_rtx = rs6000_generate_compare (op, mode);
22905 cond_code = GET_CODE (condition_rtx);
22906 cr = XEXP (condition_rtx, 0);
22907 signedp = GET_MODE (cr) == CCmode;
22908
22909 isel_func = (mode == SImode
22910 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
22911 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
22912
22913 switch (cond_code)
22914 {
22915 case LT: case GT: case LTU: case GTU: case EQ:
22916 /* isel handles these directly. */
22917 break;
22918
22919 default:
22920 /* We need to swap the sense of the comparison. */
22921 {
22922 std::swap (false_cond, true_cond);
22923 PUT_CODE (condition_rtx, reverse_condition (cond_code));
22924 }
22925 break;
22926 }
22927
22928 false_cond = force_reg (mode, false_cond);
22929 if (true_cond != const0_rtx)
22930 true_cond = force_reg (mode, true_cond);
22931
22932 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
22933
22934 return 1;
22935 }
22936
22937 void
22938 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
22939 {
22940 machine_mode mode = GET_MODE (op0);
22941 enum rtx_code c;
22942 rtx target;
22943
22944 /* VSX/altivec have direct min/max insns. */
22945 if ((code == SMAX || code == SMIN)
22946 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
22947 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
22948 {
22949 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
22950 return;
22951 }
22952
22953 if (code == SMAX || code == SMIN)
22954 c = GE;
22955 else
22956 c = GEU;
22957
22958 if (code == SMAX || code == UMAX)
22959 target = emit_conditional_move (dest, c, op0, op1, mode,
22960 op0, op1, mode, 0);
22961 else
22962 target = emit_conditional_move (dest, c, op0, op1, mode,
22963 op1, op0, mode, 0);
22964 gcc_assert (target);
22965 if (target != dest)
22966 emit_move_insn (dest, target);
22967 }
22968
22969 /* A subroutine of the atomic operation splitters. Jump to LABEL if
22970 COND is true. Mark the jump as unlikely to be taken. */
22971
22972 static void
22973 emit_unlikely_jump (rtx cond, rtx label)
22974 {
22975 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
22976 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
22977 add_reg_br_prob_note (insn, profile_probability::very_unlikely ());
22978 }
22979
22980 /* A subroutine of the atomic operation splitters. Emit a load-locked
22981 instruction in MODE. For QI/HImode, possibly use a pattern than includes
22982 the zero_extend operation. */
22983
22984 static void
22985 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
22986 {
22987 rtx (*fn) (rtx, rtx) = NULL;
22988
22989 switch (mode)
22990 {
22991 case E_QImode:
22992 fn = gen_load_lockedqi;
22993 break;
22994 case E_HImode:
22995 fn = gen_load_lockedhi;
22996 break;
22997 case E_SImode:
22998 if (GET_MODE (mem) == QImode)
22999 fn = gen_load_lockedqi_si;
23000 else if (GET_MODE (mem) == HImode)
23001 fn = gen_load_lockedhi_si;
23002 else
23003 fn = gen_load_lockedsi;
23004 break;
23005 case E_DImode:
23006 fn = gen_load_lockeddi;
23007 break;
23008 case E_TImode:
23009 fn = gen_load_lockedti;
23010 break;
23011 default:
23012 gcc_unreachable ();
23013 }
23014 emit_insn (fn (reg, mem));
23015 }
23016
23017 /* A subroutine of the atomic operation splitters. Emit a store-conditional
23018 instruction in MODE. */
23019
23020 static void
23021 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
23022 {
23023 rtx (*fn) (rtx, rtx, rtx) = NULL;
23024
23025 switch (mode)
23026 {
23027 case E_QImode:
23028 fn = gen_store_conditionalqi;
23029 break;
23030 case E_HImode:
23031 fn = gen_store_conditionalhi;
23032 break;
23033 case E_SImode:
23034 fn = gen_store_conditionalsi;
23035 break;
23036 case E_DImode:
23037 fn = gen_store_conditionaldi;
23038 break;
23039 case E_TImode:
23040 fn = gen_store_conditionalti;
23041 break;
23042 default:
23043 gcc_unreachable ();
23044 }
23045
23046 /* Emit sync before stwcx. to address PPC405 Erratum. */
23047 if (PPC405_ERRATUM77)
23048 emit_insn (gen_hwsync ());
23049
23050 emit_insn (fn (res, mem, val));
23051 }
23052
23053 /* Expand barriers before and after a load_locked/store_cond sequence. */
23054
23055 static rtx
23056 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
23057 {
23058 rtx addr = XEXP (mem, 0);
23059
23060 if (!legitimate_indirect_address_p (addr, reload_completed)
23061 && !legitimate_indexed_address_p (addr, reload_completed))
23062 {
23063 addr = force_reg (Pmode, addr);
23064 mem = replace_equiv_address_nv (mem, addr);
23065 }
23066
23067 switch (model)
23068 {
23069 case MEMMODEL_RELAXED:
23070 case MEMMODEL_CONSUME:
23071 case MEMMODEL_ACQUIRE:
23072 break;
23073 case MEMMODEL_RELEASE:
23074 case MEMMODEL_ACQ_REL:
23075 emit_insn (gen_lwsync ());
23076 break;
23077 case MEMMODEL_SEQ_CST:
23078 emit_insn (gen_hwsync ());
23079 break;
23080 default:
23081 gcc_unreachable ();
23082 }
23083 return mem;
23084 }
23085
23086 static void
23087 rs6000_post_atomic_barrier (enum memmodel model)
23088 {
23089 switch (model)
23090 {
23091 case MEMMODEL_RELAXED:
23092 case MEMMODEL_CONSUME:
23093 case MEMMODEL_RELEASE:
23094 break;
23095 case MEMMODEL_ACQUIRE:
23096 case MEMMODEL_ACQ_REL:
23097 case MEMMODEL_SEQ_CST:
23098 emit_insn (gen_isync ());
23099 break;
23100 default:
23101 gcc_unreachable ();
23102 }
23103 }
23104
23105 /* A subroutine of the various atomic expanders. For sub-word operations,
23106 we must adjust things to operate on SImode. Given the original MEM,
23107 return a new aligned memory. Also build and return the quantities by
23108 which to shift and mask. */
23109
23110 static rtx
23111 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
23112 {
23113 rtx addr, align, shift, mask, mem;
23114 HOST_WIDE_INT shift_mask;
23115 machine_mode mode = GET_MODE (orig_mem);
23116
23117 /* For smaller modes, we have to implement this via SImode. */
23118 shift_mask = (mode == QImode ? 0x18 : 0x10);
23119
23120 addr = XEXP (orig_mem, 0);
23121 addr = force_reg (GET_MODE (addr), addr);
23122
23123 /* Aligned memory containing subword. Generate a new memory. We
23124 do not want any of the existing MEM_ATTR data, as we're now
23125 accessing memory outside the original object. */
23126 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
23127 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23128 mem = gen_rtx_MEM (SImode, align);
23129 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
23130 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
23131 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
23132
23133 /* Shift amount for subword relative to aligned word. */
23134 shift = gen_reg_rtx (SImode);
23135 addr = gen_lowpart (SImode, addr);
23136 rtx tmp = gen_reg_rtx (SImode);
23137 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
23138 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
23139 if (BYTES_BIG_ENDIAN)
23140 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
23141 shift, 1, OPTAB_LIB_WIDEN);
23142 *pshift = shift;
23143
23144 /* Mask for insertion. */
23145 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
23146 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
23147 *pmask = mask;
23148
23149 return mem;
23150 }
23151
23152 /* A subroutine of the various atomic expanders. For sub-word operands,
23153 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
23154
23155 static rtx
23156 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
23157 {
23158 rtx x;
23159
23160 x = gen_reg_rtx (SImode);
23161 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
23162 gen_rtx_NOT (SImode, mask),
23163 oldval)));
23164
23165 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
23166
23167 return x;
23168 }
23169
23170 /* A subroutine of the various atomic expanders. For sub-word operands,
23171 extract WIDE to NARROW via SHIFT. */
23172
23173 static void
23174 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
23175 {
23176 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
23177 wide, 1, OPTAB_LIB_WIDEN);
23178 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
23179 }
23180
23181 /* Expand an atomic compare and swap operation. */
23182
23183 void
23184 rs6000_expand_atomic_compare_and_swap (rtx operands[])
23185 {
23186 rtx boolval, retval, mem, oldval, newval, cond;
23187 rtx label1, label2, x, mask, shift;
23188 machine_mode mode, orig_mode;
23189 enum memmodel mod_s, mod_f;
23190 bool is_weak;
23191
23192 boolval = operands[0];
23193 retval = operands[1];
23194 mem = operands[2];
23195 oldval = operands[3];
23196 newval = operands[4];
23197 is_weak = (INTVAL (operands[5]) != 0);
23198 mod_s = memmodel_base (INTVAL (operands[6]));
23199 mod_f = memmodel_base (INTVAL (operands[7]));
23200 orig_mode = mode = GET_MODE (mem);
23201
23202 mask = shift = NULL_RTX;
23203 if (mode == QImode || mode == HImode)
23204 {
23205 /* Before power8, we didn't have access to lbarx/lharx, so generate a
23206 lwarx and shift/mask operations. With power8, we need to do the
23207 comparison in SImode, but the store is still done in QI/HImode. */
23208 oldval = convert_modes (SImode, mode, oldval, 1);
23209
23210 if (!TARGET_SYNC_HI_QI)
23211 {
23212 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23213
23214 /* Shift and mask OLDVAL into position with the word. */
23215 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
23216 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23217
23218 /* Shift and mask NEWVAL into position within the word. */
23219 newval = convert_modes (SImode, mode, newval, 1);
23220 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
23221 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23222 }
23223
23224 /* Prepare to adjust the return value. */
23225 retval = gen_reg_rtx (SImode);
23226 mode = SImode;
23227 }
23228 else if (reg_overlap_mentioned_p (retval, oldval))
23229 oldval = copy_to_reg (oldval);
23230
23231 if (mode != TImode && !reg_or_short_operand (oldval, mode))
23232 oldval = copy_to_mode_reg (mode, oldval);
23233
23234 if (reg_overlap_mentioned_p (retval, newval))
23235 newval = copy_to_reg (newval);
23236
23237 mem = rs6000_pre_atomic_barrier (mem, mod_s);
23238
23239 label1 = NULL_RTX;
23240 if (!is_weak)
23241 {
23242 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23243 emit_label (XEXP (label1, 0));
23244 }
23245 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23246
23247 emit_load_locked (mode, retval, mem);
23248
23249 x = retval;
23250 if (mask)
23251 x = expand_simple_binop (SImode, AND, retval, mask,
23252 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23253
23254 cond = gen_reg_rtx (CCmode);
23255 /* If we have TImode, synthesize a comparison. */
23256 if (mode != TImode)
23257 x = gen_rtx_COMPARE (CCmode, x, oldval);
23258 else
23259 {
23260 rtx xor1_result = gen_reg_rtx (DImode);
23261 rtx xor2_result = gen_reg_rtx (DImode);
23262 rtx or_result = gen_reg_rtx (DImode);
23263 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
23264 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
23265 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
23266 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
23267
23268 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
23269 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
23270 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
23271 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
23272 }
23273
23274 emit_insn (gen_rtx_SET (cond, x));
23275
23276 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23277 emit_unlikely_jump (x, label2);
23278
23279 x = newval;
23280 if (mask)
23281 x = rs6000_mask_atomic_subword (retval, newval, mask);
23282
23283 emit_store_conditional (orig_mode, cond, mem, x);
23284
23285 if (!is_weak)
23286 {
23287 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23288 emit_unlikely_jump (x, label1);
23289 }
23290
23291 if (!is_mm_relaxed (mod_f))
23292 emit_label (XEXP (label2, 0));
23293
23294 rs6000_post_atomic_barrier (mod_s);
23295
23296 if (is_mm_relaxed (mod_f))
23297 emit_label (XEXP (label2, 0));
23298
23299 if (shift)
23300 rs6000_finish_atomic_subword (operands[1], retval, shift);
23301 else if (mode != GET_MODE (operands[1]))
23302 convert_move (operands[1], retval, 1);
23303
23304 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23305 x = gen_rtx_EQ (SImode, cond, const0_rtx);
23306 emit_insn (gen_rtx_SET (boolval, x));
23307 }
23308
23309 /* Expand an atomic exchange operation. */
23310
23311 void
23312 rs6000_expand_atomic_exchange (rtx operands[])
23313 {
23314 rtx retval, mem, val, cond;
23315 machine_mode mode;
23316 enum memmodel model;
23317 rtx label, x, mask, shift;
23318
23319 retval = operands[0];
23320 mem = operands[1];
23321 val = operands[2];
23322 model = memmodel_base (INTVAL (operands[3]));
23323 mode = GET_MODE (mem);
23324
23325 mask = shift = NULL_RTX;
23326 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
23327 {
23328 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23329
23330 /* Shift and mask VAL into position with the word. */
23331 val = convert_modes (SImode, mode, val, 1);
23332 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23333 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23334
23335 /* Prepare to adjust the return value. */
23336 retval = gen_reg_rtx (SImode);
23337 mode = SImode;
23338 }
23339
23340 mem = rs6000_pre_atomic_barrier (mem, model);
23341
23342 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
23343 emit_label (XEXP (label, 0));
23344
23345 emit_load_locked (mode, retval, mem);
23346
23347 x = val;
23348 if (mask)
23349 x = rs6000_mask_atomic_subword (retval, val, mask);
23350
23351 cond = gen_reg_rtx (CCmode);
23352 emit_store_conditional (mode, cond, mem, x);
23353
23354 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23355 emit_unlikely_jump (x, label);
23356
23357 rs6000_post_atomic_barrier (model);
23358
23359 if (shift)
23360 rs6000_finish_atomic_subword (operands[0], retval, shift);
23361 }
23362
23363 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23364 to perform. MEM is the memory on which to operate. VAL is the second
23365 operand of the binary operator. BEFORE and AFTER are optional locations to
23366 return the value of MEM either before of after the operation. MODEL_RTX
23367 is a CONST_INT containing the memory model to use. */
23368
23369 void
23370 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
23371 rtx orig_before, rtx orig_after, rtx model_rtx)
23372 {
23373 enum memmodel model = memmodel_base (INTVAL (model_rtx));
23374 machine_mode mode = GET_MODE (mem);
23375 machine_mode store_mode = mode;
23376 rtx label, x, cond, mask, shift;
23377 rtx before = orig_before, after = orig_after;
23378
23379 mask = shift = NULL_RTX;
23380 /* On power8, we want to use SImode for the operation. On previous systems,
23381 use the operation in a subword and shift/mask to get the proper byte or
23382 halfword. */
23383 if (mode == QImode || mode == HImode)
23384 {
23385 if (TARGET_SYNC_HI_QI)
23386 {
23387 val = convert_modes (SImode, mode, val, 1);
23388
23389 /* Prepare to adjust the return value. */
23390 before = gen_reg_rtx (SImode);
23391 if (after)
23392 after = gen_reg_rtx (SImode);
23393 mode = SImode;
23394 }
23395 else
23396 {
23397 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
23398
23399 /* Shift and mask VAL into position with the word. */
23400 val = convert_modes (SImode, mode, val, 1);
23401 val = expand_simple_binop (SImode, ASHIFT, val, shift,
23402 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23403
23404 switch (code)
23405 {
23406 case IOR:
23407 case XOR:
23408 /* We've already zero-extended VAL. That is sufficient to
23409 make certain that it does not affect other bits. */
23410 mask = NULL;
23411 break;
23412
23413 case AND:
23414 /* If we make certain that all of the other bits in VAL are
23415 set, that will be sufficient to not affect other bits. */
23416 x = gen_rtx_NOT (SImode, mask);
23417 x = gen_rtx_IOR (SImode, x, val);
23418 emit_insn (gen_rtx_SET (val, x));
23419 mask = NULL;
23420 break;
23421
23422 case NOT:
23423 case PLUS:
23424 case MINUS:
23425 /* These will all affect bits outside the field and need
23426 adjustment via MASK within the loop. */
23427 break;
23428
23429 default:
23430 gcc_unreachable ();
23431 }
23432
23433 /* Prepare to adjust the return value. */
23434 before = gen_reg_rtx (SImode);
23435 if (after)
23436 after = gen_reg_rtx (SImode);
23437 store_mode = mode = SImode;
23438 }
23439 }
23440
23441 mem = rs6000_pre_atomic_barrier (mem, model);
23442
23443 label = gen_label_rtx ();
23444 emit_label (label);
23445 label = gen_rtx_LABEL_REF (VOIDmode, label);
23446
23447 if (before == NULL_RTX)
23448 before = gen_reg_rtx (mode);
23449
23450 emit_load_locked (mode, before, mem);
23451
23452 if (code == NOT)
23453 {
23454 x = expand_simple_binop (mode, AND, before, val,
23455 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23456 after = expand_simple_unop (mode, NOT, x, after, 1);
23457 }
23458 else
23459 {
23460 after = expand_simple_binop (mode, code, before, val,
23461 after, 1, OPTAB_LIB_WIDEN);
23462 }
23463
23464 x = after;
23465 if (mask)
23466 {
23467 x = expand_simple_binop (SImode, AND, after, mask,
23468 NULL_RTX, 1, OPTAB_LIB_WIDEN);
23469 x = rs6000_mask_atomic_subword (before, x, mask);
23470 }
23471 else if (store_mode != mode)
23472 x = convert_modes (store_mode, mode, x, 1);
23473
23474 cond = gen_reg_rtx (CCmode);
23475 emit_store_conditional (store_mode, cond, mem, x);
23476
23477 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
23478 emit_unlikely_jump (x, label);
23479
23480 rs6000_post_atomic_barrier (model);
23481
23482 if (shift)
23483 {
23484 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23485 then do the calcuations in a SImode register. */
23486 if (orig_before)
23487 rs6000_finish_atomic_subword (orig_before, before, shift);
23488 if (orig_after)
23489 rs6000_finish_atomic_subword (orig_after, after, shift);
23490 }
23491 else if (store_mode != mode)
23492 {
23493 /* QImode/HImode on machines with lbarx/lharx where we do the native
23494 operation and then do the calcuations in a SImode register. */
23495 if (orig_before)
23496 convert_move (orig_before, before, 1);
23497 if (orig_after)
23498 convert_move (orig_after, after, 1);
23499 }
23500 else if (orig_after && after != orig_after)
23501 emit_move_insn (orig_after, after);
23502 }
23503
23504 /* Emit instructions to move SRC to DST. Called by splitters for
23505 multi-register moves. It will emit at most one instruction for
23506 each register that is accessed; that is, it won't emit li/lis pairs
23507 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23508 register. */
23509
23510 void
23511 rs6000_split_multireg_move (rtx dst, rtx src)
23512 {
23513 /* The register number of the first register being moved. */
23514 int reg;
23515 /* The mode that is to be moved. */
23516 machine_mode mode;
23517 /* The mode that the move is being done in, and its size. */
23518 machine_mode reg_mode;
23519 int reg_mode_size;
23520 /* The number of registers that will be moved. */
23521 int nregs;
23522
23523 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
23524 mode = GET_MODE (dst);
23525 nregs = hard_regno_nregs (reg, mode);
23526 if (FP_REGNO_P (reg))
23527 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
23528 (TARGET_HARD_FLOAT ? DFmode : SFmode);
23529 else if (ALTIVEC_REGNO_P (reg))
23530 reg_mode = V16QImode;
23531 else
23532 reg_mode = word_mode;
23533 reg_mode_size = GET_MODE_SIZE (reg_mode);
23534
23535 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
23536
23537 /* TDmode residing in FP registers is special, since the ISA requires that
23538 the lower-numbered word of a register pair is always the most significant
23539 word, even in little-endian mode. This does not match the usual subreg
23540 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23541 the appropriate constituent registers "by hand" in little-endian mode.
23542
23543 Note we do not need to check for destructive overlap here since TDmode
23544 can only reside in even/odd register pairs. */
23545 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
23546 {
23547 rtx p_src, p_dst;
23548 int i;
23549
23550 for (i = 0; i < nregs; i++)
23551 {
23552 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
23553 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
23554 else
23555 p_src = simplify_gen_subreg (reg_mode, src, mode,
23556 i * reg_mode_size);
23557
23558 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
23559 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
23560 else
23561 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
23562 i * reg_mode_size);
23563
23564 emit_insn (gen_rtx_SET (p_dst, p_src));
23565 }
23566
23567 return;
23568 }
23569
23570 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
23571 {
23572 /* Move register range backwards, if we might have destructive
23573 overlap. */
23574 int i;
23575 for (i = nregs - 1; i >= 0; i--)
23576 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23577 i * reg_mode_size),
23578 simplify_gen_subreg (reg_mode, src, mode,
23579 i * reg_mode_size)));
23580 }
23581 else
23582 {
23583 int i;
23584 int j = -1;
23585 bool used_update = false;
23586 rtx restore_basereg = NULL_RTX;
23587
23588 if (MEM_P (src) && INT_REGNO_P (reg))
23589 {
23590 rtx breg;
23591
23592 if (GET_CODE (XEXP (src, 0)) == PRE_INC
23593 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
23594 {
23595 rtx delta_rtx;
23596 breg = XEXP (XEXP (src, 0), 0);
23597 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
23598 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
23599 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
23600 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23601 src = replace_equiv_address (src, breg);
23602 }
23603 else if (! rs6000_offsettable_memref_p (src, reg_mode, true))
23604 {
23605 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
23606 {
23607 rtx basereg = XEXP (XEXP (src, 0), 0);
23608 if (TARGET_UPDATE)
23609 {
23610 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
23611 emit_insn (gen_rtx_SET (ndst,
23612 gen_rtx_MEM (reg_mode,
23613 XEXP (src, 0))));
23614 used_update = true;
23615 }
23616 else
23617 emit_insn (gen_rtx_SET (basereg,
23618 XEXP (XEXP (src, 0), 1)));
23619 src = replace_equiv_address (src, basereg);
23620 }
23621 else
23622 {
23623 rtx basereg = gen_rtx_REG (Pmode, reg);
23624 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
23625 src = replace_equiv_address (src, basereg);
23626 }
23627 }
23628
23629 breg = XEXP (src, 0);
23630 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
23631 breg = XEXP (breg, 0);
23632
23633 /* If the base register we are using to address memory is
23634 also a destination reg, then change that register last. */
23635 if (REG_P (breg)
23636 && REGNO (breg) >= REGNO (dst)
23637 && REGNO (breg) < REGNO (dst) + nregs)
23638 j = REGNO (breg) - REGNO (dst);
23639 }
23640 else if (MEM_P (dst) && INT_REGNO_P (reg))
23641 {
23642 rtx breg;
23643
23644 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
23645 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
23646 {
23647 rtx delta_rtx;
23648 breg = XEXP (XEXP (dst, 0), 0);
23649 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
23650 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
23651 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
23652
23653 /* We have to update the breg before doing the store.
23654 Use store with update, if available. */
23655
23656 if (TARGET_UPDATE)
23657 {
23658 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23659 emit_insn (TARGET_32BIT
23660 ? (TARGET_POWERPC64
23661 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
23662 : gen_movsi_si_update (breg, breg, delta_rtx, nsrc))
23663 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
23664 used_update = true;
23665 }
23666 else
23667 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
23668 dst = replace_equiv_address (dst, breg);
23669 }
23670 else if (!rs6000_offsettable_memref_p (dst, reg_mode, true)
23671 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
23672 {
23673 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
23674 {
23675 rtx basereg = XEXP (XEXP (dst, 0), 0);
23676 if (TARGET_UPDATE)
23677 {
23678 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
23679 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
23680 XEXP (dst, 0)),
23681 nsrc));
23682 used_update = true;
23683 }
23684 else
23685 emit_insn (gen_rtx_SET (basereg,
23686 XEXP (XEXP (dst, 0), 1)));
23687 dst = replace_equiv_address (dst, basereg);
23688 }
23689 else
23690 {
23691 rtx basereg = XEXP (XEXP (dst, 0), 0);
23692 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
23693 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
23694 && REG_P (basereg)
23695 && REG_P (offsetreg)
23696 && REGNO (basereg) != REGNO (offsetreg));
23697 if (REGNO (basereg) == 0)
23698 {
23699 rtx tmp = offsetreg;
23700 offsetreg = basereg;
23701 basereg = tmp;
23702 }
23703 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
23704 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
23705 dst = replace_equiv_address (dst, basereg);
23706 }
23707 }
23708 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
23709 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode, true));
23710 }
23711
23712 for (i = 0; i < nregs; i++)
23713 {
23714 /* Calculate index to next subword. */
23715 ++j;
23716 if (j == nregs)
23717 j = 0;
23718
23719 /* If compiler already emitted move of first word by
23720 store with update, no need to do anything. */
23721 if (j == 0 && used_update)
23722 continue;
23723
23724 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
23725 j * reg_mode_size),
23726 simplify_gen_subreg (reg_mode, src, mode,
23727 j * reg_mode_size)));
23728 }
23729 if (restore_basereg != NULL_RTX)
23730 emit_insn (restore_basereg);
23731 }
23732 }
23733
23734 \f
23735 /* This page contains routines that are used to determine what the
23736 function prologue and epilogue code will do and write them out. */
23737
23738 /* Determine whether the REG is really used. */
23739
23740 static bool
23741 save_reg_p (int reg)
23742 {
23743 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
23744 {
23745 /* When calling eh_return, we must return true for all the cases
23746 where conditional_register_usage marks the PIC offset reg
23747 call used or fixed. */
23748 if (crtl->calls_eh_return
23749 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
23750 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
23751 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
23752 return true;
23753
23754 /* We need to mark the PIC offset register live for the same
23755 conditions as it is set up in rs6000_emit_prologue, or
23756 otherwise it won't be saved before we clobber it. */
23757 if (TARGET_TOC && TARGET_MINIMAL_TOC
23758 && !constant_pool_empty_p ())
23759 return true;
23760
23761 if (DEFAULT_ABI == ABI_V4
23762 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
23763 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
23764 return true;
23765
23766 if (DEFAULT_ABI == ABI_DARWIN
23767 && flag_pic && crtl->uses_pic_offset_table)
23768 return true;
23769 }
23770
23771 return !call_used_regs[reg] && df_regs_ever_live_p (reg);
23772 }
23773
23774 /* Return the first fixed-point register that is required to be
23775 saved. 32 if none. */
23776
23777 int
23778 first_reg_to_save (void)
23779 {
23780 int first_reg;
23781
23782 /* Find lowest numbered live register. */
23783 for (first_reg = 13; first_reg <= 31; first_reg++)
23784 if (save_reg_p (first_reg))
23785 break;
23786
23787 return first_reg;
23788 }
23789
23790 /* Similar, for FP regs. */
23791
23792 int
23793 first_fp_reg_to_save (void)
23794 {
23795 int first_reg;
23796
23797 /* Find lowest numbered live register. */
23798 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
23799 if (save_reg_p (first_reg))
23800 break;
23801
23802 return first_reg;
23803 }
23804
23805 /* Similar, for AltiVec regs. */
23806
23807 static int
23808 first_altivec_reg_to_save (void)
23809 {
23810 int i;
23811
23812 /* Stack frame remains as is unless we are in AltiVec ABI. */
23813 if (! TARGET_ALTIVEC_ABI)
23814 return LAST_ALTIVEC_REGNO + 1;
23815
23816 /* On Darwin, the unwind routines are compiled without
23817 TARGET_ALTIVEC, and use save_world to save/restore the
23818 altivec registers when necessary. */
23819 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23820 && ! TARGET_ALTIVEC)
23821 return FIRST_ALTIVEC_REGNO + 20;
23822
23823 /* Find lowest numbered live register. */
23824 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
23825 if (save_reg_p (i))
23826 break;
23827
23828 return i;
23829 }
23830
23831 /* Return a 32-bit mask of the AltiVec registers we need to set in
23832 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
23833 the 32-bit word is 0. */
23834
23835 static unsigned int
23836 compute_vrsave_mask (void)
23837 {
23838 unsigned int i, mask = 0;
23839
23840 /* On Darwin, the unwind routines are compiled without
23841 TARGET_ALTIVEC, and use save_world to save/restore the
23842 call-saved altivec registers when necessary. */
23843 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
23844 && ! TARGET_ALTIVEC)
23845 mask |= 0xFFF;
23846
23847 /* First, find out if we use _any_ altivec registers. */
23848 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
23849 if (df_regs_ever_live_p (i))
23850 mask |= ALTIVEC_REG_BIT (i);
23851
23852 if (mask == 0)
23853 return mask;
23854
23855 /* Next, remove the argument registers from the set. These must
23856 be in the VRSAVE mask set by the caller, so we don't need to add
23857 them in again. More importantly, the mask we compute here is
23858 used to generate CLOBBERs in the set_vrsave insn, and we do not
23859 wish the argument registers to die. */
23860 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
23861 mask &= ~ALTIVEC_REG_BIT (i);
23862
23863 /* Similarly, remove the return value from the set. */
23864 {
23865 bool yes = false;
23866 diddle_return_value (is_altivec_return_reg, &yes);
23867 if (yes)
23868 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
23869 }
23870
23871 return mask;
23872 }
23873
23874 /* For a very restricted set of circumstances, we can cut down the
23875 size of prologues/epilogues by calling our own save/restore-the-world
23876 routines. */
23877
23878 static void
23879 compute_save_world_info (rs6000_stack_t *info)
23880 {
23881 info->world_save_p = 1;
23882 info->world_save_p
23883 = (WORLD_SAVE_P (info)
23884 && DEFAULT_ABI == ABI_DARWIN
23885 && !cfun->has_nonlocal_label
23886 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
23887 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
23888 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
23889 && info->cr_save_p);
23890
23891 /* This will not work in conjunction with sibcalls. Make sure there
23892 are none. (This check is expensive, but seldom executed.) */
23893 if (WORLD_SAVE_P (info))
23894 {
23895 rtx_insn *insn;
23896 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
23897 if (CALL_P (insn) && SIBLING_CALL_P (insn))
23898 {
23899 info->world_save_p = 0;
23900 break;
23901 }
23902 }
23903
23904 if (WORLD_SAVE_P (info))
23905 {
23906 /* Even if we're not touching VRsave, make sure there's room on the
23907 stack for it, if it looks like we're calling SAVE_WORLD, which
23908 will attempt to save it. */
23909 info->vrsave_size = 4;
23910
23911 /* If we are going to save the world, we need to save the link register too. */
23912 info->lr_save_p = 1;
23913
23914 /* "Save" the VRsave register too if we're saving the world. */
23915 if (info->vrsave_mask == 0)
23916 info->vrsave_mask = compute_vrsave_mask ();
23917
23918 /* Because the Darwin register save/restore routines only handle
23919 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
23920 check. */
23921 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
23922 && (info->first_altivec_reg_save
23923 >= FIRST_SAVED_ALTIVEC_REGNO));
23924 }
23925
23926 return;
23927 }
23928
23929
23930 static void
23931 is_altivec_return_reg (rtx reg, void *xyes)
23932 {
23933 bool *yes = (bool *) xyes;
23934 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
23935 *yes = true;
23936 }
23937
23938 \f
23939 /* Return whether REG is a global user reg or has been specifed by
23940 -ffixed-REG. We should not restore these, and so cannot use
23941 lmw or out-of-line restore functions if there are any. We also
23942 can't save them (well, emit frame notes for them), because frame
23943 unwinding during exception handling will restore saved registers. */
23944
23945 static bool
23946 fixed_reg_p (int reg)
23947 {
23948 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
23949 backend sets it, overriding anything the user might have given. */
23950 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
23951 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
23952 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
23953 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
23954 return false;
23955
23956 return fixed_regs[reg];
23957 }
23958
23959 /* Determine the strategy for savings/restoring registers. */
23960
23961 enum {
23962 SAVE_MULTIPLE = 0x1,
23963 SAVE_INLINE_GPRS = 0x2,
23964 SAVE_INLINE_FPRS = 0x4,
23965 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
23966 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
23967 SAVE_INLINE_VRS = 0x20,
23968 REST_MULTIPLE = 0x100,
23969 REST_INLINE_GPRS = 0x200,
23970 REST_INLINE_FPRS = 0x400,
23971 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
23972 REST_INLINE_VRS = 0x1000
23973 };
23974
23975 static int
23976 rs6000_savres_strategy (rs6000_stack_t *info,
23977 bool using_static_chain_p)
23978 {
23979 int strategy = 0;
23980
23981 /* Select between in-line and out-of-line save and restore of regs.
23982 First, all the obvious cases where we don't use out-of-line. */
23983 if (crtl->calls_eh_return
23984 || cfun->machine->ra_need_lr)
23985 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
23986 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
23987 | SAVE_INLINE_VRS | REST_INLINE_VRS);
23988
23989 if (info->first_gp_reg_save == 32)
23990 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
23991
23992 if (info->first_fp_reg_save == 64)
23993 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
23994
23995 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
23996 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
23997
23998 /* Define cutoff for using out-of-line functions to save registers. */
23999 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
24000 {
24001 if (!optimize_size)
24002 {
24003 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24004 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24005 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24006 }
24007 else
24008 {
24009 /* Prefer out-of-line restore if it will exit. */
24010 if (info->first_fp_reg_save > 61)
24011 strategy |= SAVE_INLINE_FPRS;
24012 if (info->first_gp_reg_save > 29)
24013 {
24014 if (info->first_fp_reg_save == 64)
24015 strategy |= SAVE_INLINE_GPRS;
24016 else
24017 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24018 }
24019 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
24020 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24021 }
24022 }
24023 else if (DEFAULT_ABI == ABI_DARWIN)
24024 {
24025 if (info->first_fp_reg_save > 60)
24026 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24027 if (info->first_gp_reg_save > 29)
24028 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24029 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24030 }
24031 else
24032 {
24033 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24034 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
24035 || info->first_fp_reg_save > 61)
24036 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
24037 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24038 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
24039 }
24040
24041 /* Don't bother to try to save things out-of-line if r11 is occupied
24042 by the static chain. It would require too much fiddling and the
24043 static chain is rarely used anyway. FPRs are saved w.r.t the stack
24044 pointer on Darwin, and AIX uses r1 or r12. */
24045 if (using_static_chain_p
24046 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
24047 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
24048 | SAVE_INLINE_GPRS
24049 | SAVE_INLINE_VRS);
24050
24051 /* Don't ever restore fixed regs. That means we can't use the
24052 out-of-line register restore functions if a fixed reg is in the
24053 range of regs restored. */
24054 if (!(strategy & REST_INLINE_FPRS))
24055 for (int i = info->first_fp_reg_save; i < 64; i++)
24056 if (fixed_regs[i])
24057 {
24058 strategy |= REST_INLINE_FPRS;
24059 break;
24060 }
24061
24062 /* We can only use the out-of-line routines to restore fprs if we've
24063 saved all the registers from first_fp_reg_save in the prologue.
24064 Otherwise, we risk loading garbage. Of course, if we have saved
24065 out-of-line then we know we haven't skipped any fprs. */
24066 if ((strategy & SAVE_INLINE_FPRS)
24067 && !(strategy & REST_INLINE_FPRS))
24068 for (int i = info->first_fp_reg_save; i < 64; i++)
24069 if (!save_reg_p (i))
24070 {
24071 strategy |= REST_INLINE_FPRS;
24072 break;
24073 }
24074
24075 /* Similarly, for altivec regs. */
24076 if (!(strategy & REST_INLINE_VRS))
24077 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24078 if (fixed_regs[i])
24079 {
24080 strategy |= REST_INLINE_VRS;
24081 break;
24082 }
24083
24084 if ((strategy & SAVE_INLINE_VRS)
24085 && !(strategy & REST_INLINE_VRS))
24086 for (int i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
24087 if (!save_reg_p (i))
24088 {
24089 strategy |= REST_INLINE_VRS;
24090 break;
24091 }
24092
24093 /* info->lr_save_p isn't yet set if the only reason lr needs to be
24094 saved is an out-of-line save or restore. Set up the value for
24095 the next test (excluding out-of-line gprs). */
24096 bool lr_save_p = (info->lr_save_p
24097 || !(strategy & SAVE_INLINE_FPRS)
24098 || !(strategy & SAVE_INLINE_VRS)
24099 || !(strategy & REST_INLINE_FPRS)
24100 || !(strategy & REST_INLINE_VRS));
24101
24102 if (TARGET_MULTIPLE
24103 && !TARGET_POWERPC64
24104 && info->first_gp_reg_save < 31
24105 && !(flag_shrink_wrap
24106 && flag_shrink_wrap_separate
24107 && optimize_function_for_speed_p (cfun)))
24108 {
24109 int count = 0;
24110 for (int i = info->first_gp_reg_save; i < 32; i++)
24111 if (save_reg_p (i))
24112 count++;
24113
24114 if (count <= 1)
24115 /* Don't use store multiple if only one reg needs to be
24116 saved. This can occur for example when the ABI_V4 pic reg
24117 (r30) needs to be saved to make calls, but r31 is not
24118 used. */
24119 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24120 else
24121 {
24122 /* Prefer store multiple for saves over out-of-line
24123 routines, since the store-multiple instruction will
24124 always be smaller. */
24125 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
24126
24127 /* The situation is more complicated with load multiple.
24128 We'd prefer to use the out-of-line routines for restores,
24129 since the "exit" out-of-line routines can handle the
24130 restore of LR and the frame teardown. However if doesn't
24131 make sense to use the out-of-line routine if that is the
24132 only reason we'd need to save LR, and we can't use the
24133 "exit" out-of-line gpr restore if we have saved some
24134 fprs; In those cases it is advantageous to use load
24135 multiple when available. */
24136 if (info->first_fp_reg_save != 64 || !lr_save_p)
24137 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
24138 }
24139 }
24140
24141 /* Using the "exit" out-of-line routine does not improve code size
24142 if using it would require lr to be saved and if only saving one
24143 or two gprs. */
24144 else if (!lr_save_p && info->first_gp_reg_save > 29)
24145 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
24146
24147 /* Don't ever restore fixed regs. */
24148 if ((strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24149 for (int i = info->first_gp_reg_save; i < 32; i++)
24150 if (fixed_reg_p (i))
24151 {
24152 strategy |= REST_INLINE_GPRS;
24153 strategy &= ~REST_MULTIPLE;
24154 break;
24155 }
24156
24157 /* We can only use load multiple or the out-of-line routines to
24158 restore gprs if we've saved all the registers from
24159 first_gp_reg_save. Otherwise, we risk loading garbage.
24160 Of course, if we have saved out-of-line or used stmw then we know
24161 we haven't skipped any gprs. */
24162 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
24163 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
24164 for (int i = info->first_gp_reg_save; i < 32; i++)
24165 if (!save_reg_p (i))
24166 {
24167 strategy |= REST_INLINE_GPRS;
24168 strategy &= ~REST_MULTIPLE;
24169 break;
24170 }
24171
24172 if (TARGET_ELF && TARGET_64BIT)
24173 {
24174 if (!(strategy & SAVE_INLINE_FPRS))
24175 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24176 else if (!(strategy & SAVE_INLINE_GPRS)
24177 && info->first_fp_reg_save == 64)
24178 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
24179 }
24180 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
24181 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
24182
24183 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
24184 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
24185
24186 return strategy;
24187 }
24188
24189 /* Calculate the stack information for the current function. This is
24190 complicated by having two separate calling sequences, the AIX calling
24191 sequence and the V.4 calling sequence.
24192
24193 AIX (and Darwin/Mac OS X) stack frames look like:
24194 32-bit 64-bit
24195 SP----> +---------------------------------------+
24196 | back chain to caller | 0 0
24197 +---------------------------------------+
24198 | saved CR | 4 8 (8-11)
24199 +---------------------------------------+
24200 | saved LR | 8 16
24201 +---------------------------------------+
24202 | reserved for compilers | 12 24
24203 +---------------------------------------+
24204 | reserved for binders | 16 32
24205 +---------------------------------------+
24206 | saved TOC pointer | 20 40
24207 +---------------------------------------+
24208 | Parameter save area (+padding*) (P) | 24 48
24209 +---------------------------------------+
24210 | Alloca space (A) | 24+P etc.
24211 +---------------------------------------+
24212 | Local variable space (L) | 24+P+A
24213 +---------------------------------------+
24214 | Float/int conversion temporary (X) | 24+P+A+L
24215 +---------------------------------------+
24216 | Save area for AltiVec registers (W) | 24+P+A+L+X
24217 +---------------------------------------+
24218 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
24219 +---------------------------------------+
24220 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
24221 +---------------------------------------+
24222 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
24223 +---------------------------------------+
24224 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
24225 +---------------------------------------+
24226 old SP->| back chain to caller's caller |
24227 +---------------------------------------+
24228
24229 * If the alloca area is present, the parameter save area is
24230 padded so that the former starts 16-byte aligned.
24231
24232 The required alignment for AIX configurations is two words (i.e., 8
24233 or 16 bytes).
24234
24235 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
24236
24237 SP----> +---------------------------------------+
24238 | Back chain to caller | 0
24239 +---------------------------------------+
24240 | Save area for CR | 8
24241 +---------------------------------------+
24242 | Saved LR | 16
24243 +---------------------------------------+
24244 | Saved TOC pointer | 24
24245 +---------------------------------------+
24246 | Parameter save area (+padding*) (P) | 32
24247 +---------------------------------------+
24248 | Alloca space (A) | 32+P
24249 +---------------------------------------+
24250 | Local variable space (L) | 32+P+A
24251 +---------------------------------------+
24252 | Save area for AltiVec registers (W) | 32+P+A+L
24253 +---------------------------------------+
24254 | AltiVec alignment padding (Y) | 32+P+A+L+W
24255 +---------------------------------------+
24256 | Save area for GP registers (G) | 32+P+A+L+W+Y
24257 +---------------------------------------+
24258 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
24259 +---------------------------------------+
24260 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
24261 +---------------------------------------+
24262
24263 * If the alloca area is present, the parameter save area is
24264 padded so that the former starts 16-byte aligned.
24265
24266 V.4 stack frames look like:
24267
24268 SP----> +---------------------------------------+
24269 | back chain to caller | 0
24270 +---------------------------------------+
24271 | caller's saved LR | 4
24272 +---------------------------------------+
24273 | Parameter save area (+padding*) (P) | 8
24274 +---------------------------------------+
24275 | Alloca space (A) | 8+P
24276 +---------------------------------------+
24277 | Varargs save area (V) | 8+P+A
24278 +---------------------------------------+
24279 | Local variable space (L) | 8+P+A+V
24280 +---------------------------------------+
24281 | Float/int conversion temporary (X) | 8+P+A+V+L
24282 +---------------------------------------+
24283 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
24284 +---------------------------------------+
24285 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
24286 +---------------------------------------+
24287 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
24288 +---------------------------------------+
24289 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
24290 +---------------------------------------+
24291 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
24292 +---------------------------------------+
24293 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
24294 +---------------------------------------+
24295 old SP->| back chain to caller's caller |
24296 +---------------------------------------+
24297
24298 * If the alloca area is present and the required alignment is
24299 16 bytes, the parameter save area is padded so that the
24300 alloca area starts 16-byte aligned.
24301
24302 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
24303 given. (But note below and in sysv4.h that we require only 8 and
24304 may round up the size of our stack frame anyways. The historical
24305 reason is early versions of powerpc-linux which didn't properly
24306 align the stack at program startup. A happy side-effect is that
24307 -mno-eabi libraries can be used with -meabi programs.)
24308
24309 The EABI configuration defaults to the V.4 layout. However,
24310 the stack alignment requirements may differ. If -mno-eabi is not
24311 given, the required stack alignment is 8 bytes; if -mno-eabi is
24312 given, the required alignment is 16 bytes. (But see V.4 comment
24313 above.) */
24314
24315 #ifndef ABI_STACK_BOUNDARY
24316 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24317 #endif
24318
24319 static rs6000_stack_t *
24320 rs6000_stack_info (void)
24321 {
24322 /* We should never be called for thunks, we are not set up for that. */
24323 gcc_assert (!cfun->is_thunk);
24324
24325 rs6000_stack_t *info = &stack_info;
24326 int reg_size = TARGET_32BIT ? 4 : 8;
24327 int ehrd_size;
24328 int ehcr_size;
24329 int save_align;
24330 int first_gp;
24331 HOST_WIDE_INT non_fixed_size;
24332 bool using_static_chain_p;
24333
24334 if (reload_completed && info->reload_completed)
24335 return info;
24336
24337 memset (info, 0, sizeof (*info));
24338 info->reload_completed = reload_completed;
24339
24340 /* Select which calling sequence. */
24341 info->abi = DEFAULT_ABI;
24342
24343 /* Calculate which registers need to be saved & save area size. */
24344 info->first_gp_reg_save = first_reg_to_save ();
24345 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24346 even if it currently looks like we won't. Reload may need it to
24347 get at a constant; if so, it will have already created a constant
24348 pool entry for it. */
24349 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
24350 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
24351 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
24352 && crtl->uses_const_pool
24353 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
24354 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
24355 else
24356 first_gp = info->first_gp_reg_save;
24357
24358 info->gp_size = reg_size * (32 - first_gp);
24359
24360 info->first_fp_reg_save = first_fp_reg_to_save ();
24361 info->fp_size = 8 * (64 - info->first_fp_reg_save);
24362
24363 info->first_altivec_reg_save = first_altivec_reg_to_save ();
24364 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
24365 - info->first_altivec_reg_save);
24366
24367 /* Does this function call anything? */
24368 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
24369
24370 /* Determine if we need to save the condition code registers. */
24371 if (save_reg_p (CR2_REGNO)
24372 || save_reg_p (CR3_REGNO)
24373 || save_reg_p (CR4_REGNO))
24374 {
24375 info->cr_save_p = 1;
24376 if (DEFAULT_ABI == ABI_V4)
24377 info->cr_size = reg_size;
24378 }
24379
24380 /* If the current function calls __builtin_eh_return, then we need
24381 to allocate stack space for registers that will hold data for
24382 the exception handler. */
24383 if (crtl->calls_eh_return)
24384 {
24385 unsigned int i;
24386 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
24387 continue;
24388
24389 ehrd_size = i * UNITS_PER_WORD;
24390 }
24391 else
24392 ehrd_size = 0;
24393
24394 /* In the ELFv2 ABI, we also need to allocate space for separate
24395 CR field save areas if the function calls __builtin_eh_return. */
24396 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24397 {
24398 /* This hard-codes that we have three call-saved CR fields. */
24399 ehcr_size = 3 * reg_size;
24400 /* We do *not* use the regular CR save mechanism. */
24401 info->cr_save_p = 0;
24402 }
24403 else
24404 ehcr_size = 0;
24405
24406 /* Determine various sizes. */
24407 info->reg_size = reg_size;
24408 info->fixed_size = RS6000_SAVE_AREA;
24409 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
24410 if (cfun->calls_alloca)
24411 info->parm_size =
24412 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
24413 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
24414 else
24415 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
24416 TARGET_ALTIVEC ? 16 : 8);
24417 if (FRAME_GROWS_DOWNWARD)
24418 info->vars_size
24419 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
24420 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
24421 - (info->fixed_size + info->vars_size + info->parm_size);
24422
24423 if (TARGET_ALTIVEC_ABI)
24424 info->vrsave_mask = compute_vrsave_mask ();
24425
24426 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
24427 info->vrsave_size = 4;
24428
24429 compute_save_world_info (info);
24430
24431 /* Calculate the offsets. */
24432 switch (DEFAULT_ABI)
24433 {
24434 case ABI_NONE:
24435 default:
24436 gcc_unreachable ();
24437
24438 case ABI_AIX:
24439 case ABI_ELFv2:
24440 case ABI_DARWIN:
24441 info->fp_save_offset = -info->fp_size;
24442 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24443
24444 if (TARGET_ALTIVEC_ABI)
24445 {
24446 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
24447
24448 /* Align stack so vector save area is on a quadword boundary.
24449 The padding goes above the vectors. */
24450 if (info->altivec_size != 0)
24451 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
24452
24453 info->altivec_save_offset = info->vrsave_save_offset
24454 - info->altivec_padding_size
24455 - info->altivec_size;
24456 gcc_assert (info->altivec_size == 0
24457 || info->altivec_save_offset % 16 == 0);
24458
24459 /* Adjust for AltiVec case. */
24460 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
24461 }
24462 else
24463 info->ehrd_offset = info->gp_save_offset - ehrd_size;
24464
24465 info->ehcr_offset = info->ehrd_offset - ehcr_size;
24466 info->cr_save_offset = reg_size; /* first word when 64-bit. */
24467 info->lr_save_offset = 2*reg_size;
24468 break;
24469
24470 case ABI_V4:
24471 info->fp_save_offset = -info->fp_size;
24472 info->gp_save_offset = info->fp_save_offset - info->gp_size;
24473 info->cr_save_offset = info->gp_save_offset - info->cr_size;
24474
24475 if (TARGET_ALTIVEC_ABI)
24476 {
24477 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
24478
24479 /* Align stack so vector save area is on a quadword boundary. */
24480 if (info->altivec_size != 0)
24481 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
24482
24483 info->altivec_save_offset = info->vrsave_save_offset
24484 - info->altivec_padding_size
24485 - info->altivec_size;
24486
24487 /* Adjust for AltiVec case. */
24488 info->ehrd_offset = info->altivec_save_offset;
24489 }
24490 else
24491 info->ehrd_offset = info->cr_save_offset;
24492
24493 info->ehrd_offset -= ehrd_size;
24494 info->lr_save_offset = reg_size;
24495 }
24496
24497 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
24498 info->save_size = RS6000_ALIGN (info->fp_size
24499 + info->gp_size
24500 + info->altivec_size
24501 + info->altivec_padding_size
24502 + ehrd_size
24503 + ehcr_size
24504 + info->cr_size
24505 + info->vrsave_size,
24506 save_align);
24507
24508 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
24509
24510 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
24511 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
24512
24513 /* Determine if we need to save the link register. */
24514 if (info->calls_p
24515 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24516 && crtl->profile
24517 && !TARGET_PROFILE_KERNEL)
24518 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
24519 #ifdef TARGET_RELOCATABLE
24520 || (DEFAULT_ABI == ABI_V4
24521 && (TARGET_RELOCATABLE || flag_pic > 1)
24522 && !constant_pool_empty_p ())
24523 #endif
24524 || rs6000_ra_ever_killed ())
24525 info->lr_save_p = 1;
24526
24527 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
24528 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
24529 && call_used_regs[STATIC_CHAIN_REGNUM]);
24530 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
24531
24532 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
24533 || !(info->savres_strategy & SAVE_INLINE_FPRS)
24534 || !(info->savres_strategy & SAVE_INLINE_VRS)
24535 || !(info->savres_strategy & REST_INLINE_GPRS)
24536 || !(info->savres_strategy & REST_INLINE_FPRS)
24537 || !(info->savres_strategy & REST_INLINE_VRS))
24538 info->lr_save_p = 1;
24539
24540 if (info->lr_save_p)
24541 df_set_regs_ever_live (LR_REGNO, true);
24542
24543 /* Determine if we need to allocate any stack frame:
24544
24545 For AIX we need to push the stack if a frame pointer is needed
24546 (because the stack might be dynamically adjusted), if we are
24547 debugging, if we make calls, or if the sum of fp_save, gp_save,
24548 and local variables are more than the space needed to save all
24549 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24550 + 18*8 = 288 (GPR13 reserved).
24551
24552 For V.4 we don't have the stack cushion that AIX uses, but assume
24553 that the debugger can handle stackless frames. */
24554
24555 if (info->calls_p)
24556 info->push_p = 1;
24557
24558 else if (DEFAULT_ABI == ABI_V4)
24559 info->push_p = non_fixed_size != 0;
24560
24561 else if (frame_pointer_needed)
24562 info->push_p = 1;
24563
24564 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
24565 info->push_p = 1;
24566
24567 else
24568 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
24569
24570 return info;
24571 }
24572
24573 static void
24574 debug_stack_info (rs6000_stack_t *info)
24575 {
24576 const char *abi_string;
24577
24578 if (! info)
24579 info = rs6000_stack_info ();
24580
24581 fprintf (stderr, "\nStack information for function %s:\n",
24582 ((current_function_decl && DECL_NAME (current_function_decl))
24583 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
24584 : "<unknown>"));
24585
24586 switch (info->abi)
24587 {
24588 default: abi_string = "Unknown"; break;
24589 case ABI_NONE: abi_string = "NONE"; break;
24590 case ABI_AIX: abi_string = "AIX"; break;
24591 case ABI_ELFv2: abi_string = "ELFv2"; break;
24592 case ABI_DARWIN: abi_string = "Darwin"; break;
24593 case ABI_V4: abi_string = "V.4"; break;
24594 }
24595
24596 fprintf (stderr, "\tABI = %5s\n", abi_string);
24597
24598 if (TARGET_ALTIVEC_ABI)
24599 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
24600
24601 if (info->first_gp_reg_save != 32)
24602 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
24603
24604 if (info->first_fp_reg_save != 64)
24605 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
24606
24607 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
24608 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
24609 info->first_altivec_reg_save);
24610
24611 if (info->lr_save_p)
24612 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
24613
24614 if (info->cr_save_p)
24615 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
24616
24617 if (info->vrsave_mask)
24618 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
24619
24620 if (info->push_p)
24621 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
24622
24623 if (info->calls_p)
24624 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
24625
24626 if (info->gp_size)
24627 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
24628
24629 if (info->fp_size)
24630 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
24631
24632 if (info->altivec_size)
24633 fprintf (stderr, "\taltivec_save_offset = %5d\n",
24634 info->altivec_save_offset);
24635
24636 if (info->vrsave_size)
24637 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
24638 info->vrsave_save_offset);
24639
24640 if (info->lr_save_p)
24641 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
24642
24643 if (info->cr_save_p)
24644 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
24645
24646 if (info->varargs_save_offset)
24647 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
24648
24649 if (info->total_size)
24650 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24651 info->total_size);
24652
24653 if (info->vars_size)
24654 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
24655 info->vars_size);
24656
24657 if (info->parm_size)
24658 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
24659
24660 if (info->fixed_size)
24661 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
24662
24663 if (info->gp_size)
24664 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
24665
24666 if (info->fp_size)
24667 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
24668
24669 if (info->altivec_size)
24670 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
24671
24672 if (info->vrsave_size)
24673 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
24674
24675 if (info->altivec_padding_size)
24676 fprintf (stderr, "\taltivec_padding_size= %5d\n",
24677 info->altivec_padding_size);
24678
24679 if (info->cr_size)
24680 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
24681
24682 if (info->save_size)
24683 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
24684
24685 if (info->reg_size != 4)
24686 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
24687
24688 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
24689
24690 if (info->abi == ABI_DARWIN)
24691 fprintf (stderr, "\tWORLD_SAVE_P = %5d\n", WORLD_SAVE_P(info));
24692
24693 fprintf (stderr, "\n");
24694 }
24695
24696 rtx
24697 rs6000_return_addr (int count, rtx frame)
24698 {
24699 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24700 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24701 if (count != 0
24702 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
24703 {
24704 cfun->machine->ra_needs_full_frame = 1;
24705
24706 if (count == 0)
24707 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24708 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24709 frame = stack_pointer_rtx;
24710 rtx prev_frame_addr = memory_address (Pmode, frame);
24711 rtx prev_frame = copy_to_reg (gen_rtx_MEM (Pmode, prev_frame_addr));
24712 rtx lr_save_off = plus_constant (Pmode,
24713 prev_frame, RETURN_ADDRESS_OFFSET);
24714 rtx lr_save_addr = memory_address (Pmode, lr_save_off);
24715 return gen_rtx_MEM (Pmode, lr_save_addr);
24716 }
24717
24718 cfun->machine->ra_need_lr = 1;
24719 return get_hard_reg_initial_val (Pmode, LR_REGNO);
24720 }
24721
24722 /* Say whether a function is a candidate for sibcall handling or not. */
24723
24724 static bool
24725 rs6000_function_ok_for_sibcall (tree decl, tree exp)
24726 {
24727 tree fntype;
24728
24729 /* The sibcall epilogue may clobber the static chain register.
24730 ??? We could work harder and avoid that, but it's probably
24731 not worth the hassle in practice. */
24732 if (CALL_EXPR_STATIC_CHAIN (exp))
24733 return false;
24734
24735 if (decl)
24736 fntype = TREE_TYPE (decl);
24737 else
24738 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
24739
24740 /* We can't do it if the called function has more vector parameters
24741 than the current function; there's nowhere to put the VRsave code. */
24742 if (TARGET_ALTIVEC_ABI
24743 && TARGET_ALTIVEC_VRSAVE
24744 && !(decl && decl == current_function_decl))
24745 {
24746 function_args_iterator args_iter;
24747 tree type;
24748 int nvreg = 0;
24749
24750 /* Functions with vector parameters are required to have a
24751 prototype, so the argument type info must be available
24752 here. */
24753 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
24754 if (TREE_CODE (type) == VECTOR_TYPE
24755 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24756 nvreg++;
24757
24758 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
24759 if (TREE_CODE (type) == VECTOR_TYPE
24760 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
24761 nvreg--;
24762
24763 if (nvreg > 0)
24764 return false;
24765 }
24766
24767 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24768 functions, because the callee may have a different TOC pointer to
24769 the caller and there's no way to ensure we restore the TOC when
24770 we return. With the secure-plt SYSV ABI we can't make non-local
24771 calls when -fpic/PIC because the plt call stubs use r30. */
24772 if (DEFAULT_ABI == ABI_DARWIN
24773 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
24774 && decl
24775 && !DECL_EXTERNAL (decl)
24776 && !DECL_WEAK (decl)
24777 && (*targetm.binds_local_p) (decl))
24778 || (DEFAULT_ABI == ABI_V4
24779 && (!TARGET_SECURE_PLT
24780 || !flag_pic
24781 || (decl
24782 && (*targetm.binds_local_p) (decl)))))
24783 {
24784 tree attr_list = TYPE_ATTRIBUTES (fntype);
24785
24786 if (!lookup_attribute ("longcall", attr_list)
24787 || lookup_attribute ("shortcall", attr_list))
24788 return true;
24789 }
24790
24791 return false;
24792 }
24793
24794 static int
24795 rs6000_ra_ever_killed (void)
24796 {
24797 rtx_insn *top;
24798 rtx reg;
24799 rtx_insn *insn;
24800
24801 if (cfun->is_thunk)
24802 return 0;
24803
24804 if (cfun->machine->lr_save_state)
24805 return cfun->machine->lr_save_state - 1;
24806
24807 /* regs_ever_live has LR marked as used if any sibcalls are present,
24808 but this should not force saving and restoring in the
24809 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
24810 clobbers LR, so that is inappropriate. */
24811
24812 /* Also, the prologue can generate a store into LR that
24813 doesn't really count, like this:
24814
24815 move LR->R0
24816 bcl to set PIC register
24817 move LR->R31
24818 move R0->LR
24819
24820 When we're called from the epilogue, we need to avoid counting
24821 this as a store. */
24822
24823 push_topmost_sequence ();
24824 top = get_insns ();
24825 pop_topmost_sequence ();
24826 reg = gen_rtx_REG (Pmode, LR_REGNO);
24827
24828 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
24829 {
24830 if (INSN_P (insn))
24831 {
24832 if (CALL_P (insn))
24833 {
24834 if (!SIBLING_CALL_P (insn))
24835 return 1;
24836 }
24837 else if (find_regno_note (insn, REG_INC, LR_REGNO))
24838 return 1;
24839 else if (set_of (reg, insn) != NULL_RTX
24840 && !prologue_epilogue_contains (insn))
24841 return 1;
24842 }
24843 }
24844 return 0;
24845 }
24846 \f
24847 /* Emit instructions needed to load the TOC register.
24848 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
24849 a constant pool; or for SVR4 -fpic. */
24850
24851 void
24852 rs6000_emit_load_toc_table (int fromprolog)
24853 {
24854 rtx dest;
24855 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
24856
24857 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
24858 {
24859 char buf[30];
24860 rtx lab, tmp1, tmp2, got;
24861
24862 lab = gen_label_rtx ();
24863 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
24864 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24865 if (flag_pic == 2)
24866 {
24867 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24868 need_toc_init = 1;
24869 }
24870 else
24871 got = rs6000_got_sym ();
24872 tmp1 = tmp2 = dest;
24873 if (!fromprolog)
24874 {
24875 tmp1 = gen_reg_rtx (Pmode);
24876 tmp2 = gen_reg_rtx (Pmode);
24877 }
24878 emit_insn (gen_load_toc_v4_PIC_1 (lab));
24879 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
24880 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
24881 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
24882 }
24883 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
24884 {
24885 emit_insn (gen_load_toc_v4_pic_si ());
24886 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24887 }
24888 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
24889 {
24890 char buf[30];
24891 rtx temp0 = (fromprolog
24892 ? gen_rtx_REG (Pmode, 0)
24893 : gen_reg_rtx (Pmode));
24894
24895 if (fromprolog)
24896 {
24897 rtx symF, symL;
24898
24899 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
24900 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24901
24902 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
24903 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
24904
24905 emit_insn (gen_load_toc_v4_PIC_1 (symF));
24906 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24907 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
24908 }
24909 else
24910 {
24911 rtx tocsym, lab;
24912
24913 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24914 need_toc_init = 1;
24915 lab = gen_label_rtx ();
24916 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
24917 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
24918 if (TARGET_LINK_STACK)
24919 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
24920 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
24921 }
24922 emit_insn (gen_addsi3 (dest, temp0, dest));
24923 }
24924 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
24925 {
24926 /* This is for AIX code running in non-PIC ELF32. */
24927 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
24928
24929 need_toc_init = 1;
24930 emit_insn (gen_elf_high (dest, realsym));
24931 emit_insn (gen_elf_low (dest, dest, realsym));
24932 }
24933 else
24934 {
24935 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24936
24937 if (TARGET_32BIT)
24938 emit_insn (gen_load_toc_aix_si (dest));
24939 else
24940 emit_insn (gen_load_toc_aix_di (dest));
24941 }
24942 }
24943
24944 /* Emit instructions to restore the link register after determining where
24945 its value has been stored. */
24946
24947 void
24948 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
24949 {
24950 rs6000_stack_t *info = rs6000_stack_info ();
24951 rtx operands[2];
24952
24953 operands[0] = source;
24954 operands[1] = scratch;
24955
24956 if (info->lr_save_p)
24957 {
24958 rtx frame_rtx = stack_pointer_rtx;
24959 HOST_WIDE_INT sp_offset = 0;
24960 rtx tmp;
24961
24962 if (frame_pointer_needed
24963 || cfun->calls_alloca
24964 || info->total_size > 32767)
24965 {
24966 tmp = gen_frame_mem (Pmode, frame_rtx);
24967 emit_move_insn (operands[1], tmp);
24968 frame_rtx = operands[1];
24969 }
24970 else if (info->push_p)
24971 sp_offset = info->total_size;
24972
24973 tmp = plus_constant (Pmode, frame_rtx,
24974 info->lr_save_offset + sp_offset);
24975 tmp = gen_frame_mem (Pmode, tmp);
24976 emit_move_insn (tmp, operands[0]);
24977 }
24978 else
24979 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
24980
24981 /* Freeze lr_save_p. We've just emitted rtl that depends on the
24982 state of lr_save_p so any change from here on would be a bug. In
24983 particular, stop rs6000_ra_ever_killed from considering the SET
24984 of lr we may have added just above. */
24985 cfun->machine->lr_save_state = info->lr_save_p + 1;
24986 }
24987
24988 static GTY(()) alias_set_type set = -1;
24989
24990 alias_set_type
24991 get_TOC_alias_set (void)
24992 {
24993 if (set == -1)
24994 set = new_alias_set ();
24995 return set;
24996 }
24997
24998 /* This returns nonzero if the current function uses the TOC. This is
24999 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
25000 is generated by the ABI_V4 load_toc_* patterns.
25001 Return 2 instead of 1 if the load_toc_* pattern is in the function
25002 partition that doesn't start the function. */
25003 #if TARGET_ELF
25004 static int
25005 uses_TOC (void)
25006 {
25007 rtx_insn *insn;
25008 int ret = 1;
25009
25010 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
25011 {
25012 if (INSN_P (insn))
25013 {
25014 rtx pat = PATTERN (insn);
25015 int i;
25016
25017 if (GET_CODE (pat) == PARALLEL)
25018 for (i = 0; i < XVECLEN (pat, 0); i++)
25019 {
25020 rtx sub = XVECEXP (pat, 0, i);
25021 if (GET_CODE (sub) == USE)
25022 {
25023 sub = XEXP (sub, 0);
25024 if (GET_CODE (sub) == UNSPEC
25025 && XINT (sub, 1) == UNSPEC_TOC)
25026 return ret;
25027 }
25028 }
25029 }
25030 else if (crtl->has_bb_partition
25031 && NOTE_P (insn)
25032 && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS)
25033 ret = 2;
25034 }
25035 return 0;
25036 }
25037 #endif
25038
25039 rtx
25040 create_TOC_reference (rtx symbol, rtx largetoc_reg)
25041 {
25042 rtx tocrel, tocreg, hi;
25043
25044 if (TARGET_DEBUG_ADDR)
25045 {
25046 if (SYMBOL_REF_P (symbol))
25047 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
25048 XSTR (symbol, 0));
25049 else
25050 {
25051 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
25052 GET_RTX_NAME (GET_CODE (symbol)));
25053 debug_rtx (symbol);
25054 }
25055 }
25056
25057 if (!can_create_pseudo_p ())
25058 df_set_regs_ever_live (TOC_REGISTER, true);
25059
25060 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
25061 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
25062 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
25063 return tocrel;
25064
25065 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
25066 if (largetoc_reg != NULL)
25067 {
25068 emit_move_insn (largetoc_reg, hi);
25069 hi = largetoc_reg;
25070 }
25071 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
25072 }
25073
25074 /* Issue assembly directives that create a reference to the given DWARF
25075 FRAME_TABLE_LABEL from the current function section. */
25076 void
25077 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
25078 {
25079 fprintf (asm_out_file, "\t.ref %s\n",
25080 (* targetm.strip_name_encoding) (frame_table_label));
25081 }
25082 \f
25083 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
25084 and the change to the stack pointer. */
25085
25086 static void
25087 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
25088 {
25089 rtvec p;
25090 int i;
25091 rtx regs[3];
25092
25093 i = 0;
25094 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25095 if (hard_frame_needed)
25096 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
25097 if (!(REGNO (fp) == STACK_POINTER_REGNUM
25098 || (hard_frame_needed
25099 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
25100 regs[i++] = fp;
25101
25102 p = rtvec_alloc (i);
25103 while (--i >= 0)
25104 {
25105 rtx mem = gen_frame_mem (BLKmode, regs[i]);
25106 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
25107 }
25108
25109 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
25110 }
25111
25112 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
25113 and set the appropriate attributes for the generated insn. Return the
25114 first insn which adjusts the stack pointer or the last insn before
25115 the stack adjustment loop.
25116
25117 SIZE_INT is used to create the CFI note for the allocation.
25118
25119 SIZE_RTX is an rtx containing the size of the adjustment. Note that
25120 since stacks grow to lower addresses its runtime value is -SIZE_INT.
25121
25122 ORIG_SP contains the backchain value that must be stored at *sp. */
25123
25124 static rtx_insn *
25125 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int, rtx orig_sp)
25126 {
25127 rtx_insn *insn;
25128
25129 rtx size_rtx = GEN_INT (-size_int);
25130 if (size_int > 32767)
25131 {
25132 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25133 /* Need a note here so that try_split doesn't get confused. */
25134 if (get_last_insn () == NULL_RTX)
25135 emit_note (NOTE_INSN_DELETED);
25136 insn = emit_move_insn (tmp_reg, size_rtx);
25137 try_split (PATTERN (insn), insn, 0);
25138 size_rtx = tmp_reg;
25139 }
25140
25141 if (TARGET_32BIT)
25142 insn = emit_insn (gen_movsi_update_stack (stack_pointer_rtx,
25143 stack_pointer_rtx,
25144 size_rtx,
25145 orig_sp));
25146 else
25147 insn = emit_insn (gen_movdi_update_stack (stack_pointer_rtx,
25148 stack_pointer_rtx,
25149 size_rtx,
25150 orig_sp));
25151 rtx par = PATTERN (insn);
25152 gcc_assert (GET_CODE (par) == PARALLEL);
25153 rtx set = XVECEXP (par, 0, 0);
25154 gcc_assert (GET_CODE (set) == SET);
25155 rtx mem = SET_DEST (set);
25156 gcc_assert (MEM_P (mem));
25157 MEM_NOTRAP_P (mem) = 1;
25158 set_mem_alias_set (mem, get_frame_alias_set ());
25159
25160 RTX_FRAME_RELATED_P (insn) = 1;
25161 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25162 gen_rtx_SET (stack_pointer_rtx,
25163 gen_rtx_PLUS (Pmode,
25164 stack_pointer_rtx,
25165 GEN_INT (-size_int))));
25166
25167 /* Emit a blockage to ensure the allocation/probing insns are
25168 not optimized, combined, removed, etc. Add REG_STACK_CHECK
25169 note for similar reasons. */
25170 if (flag_stack_clash_protection)
25171 {
25172 add_reg_note (insn, REG_STACK_CHECK, const0_rtx);
25173 emit_insn (gen_blockage ());
25174 }
25175
25176 return insn;
25177 }
25178
25179 static HOST_WIDE_INT
25180 get_stack_clash_protection_probe_interval (void)
25181 {
25182 return (HOST_WIDE_INT_1U
25183 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL));
25184 }
25185
25186 static HOST_WIDE_INT
25187 get_stack_clash_protection_guard_size (void)
25188 {
25189 return (HOST_WIDE_INT_1U
25190 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE));
25191 }
25192
25193 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
25194 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
25195
25196 COPY_REG, if non-null, should contain a copy of the original
25197 stack pointer at exit from this function.
25198
25199 This is subtly different than the Ada probing in that it tries hard to
25200 prevent attacks that jump the stack guard. Thus it is never allowed to
25201 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
25202 space without a suitable probe. */
25203 static rtx_insn *
25204 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size,
25205 rtx copy_reg)
25206 {
25207 rtx orig_sp = copy_reg;
25208
25209 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25210
25211 /* Round the size down to a multiple of PROBE_INTERVAL. */
25212 HOST_WIDE_INT rounded_size = ROUND_DOWN (orig_size, probe_interval);
25213
25214 /* If explicitly requested,
25215 or the rounded size is not the same as the original size
25216 or the the rounded size is greater than a page,
25217 then we will need a copy of the original stack pointer. */
25218 if (rounded_size != orig_size
25219 || rounded_size > probe_interval
25220 || copy_reg)
25221 {
25222 /* If the caller did not request a copy of the incoming stack
25223 pointer, then we use r0 to hold the copy. */
25224 if (!copy_reg)
25225 orig_sp = gen_rtx_REG (Pmode, 0);
25226 emit_move_insn (orig_sp, stack_pointer_rtx);
25227 }
25228
25229 /* There's three cases here.
25230
25231 One is a single probe which is the most common and most efficiently
25232 implemented as it does not have to have a copy of the original
25233 stack pointer if there are no residuals.
25234
25235 Second is unrolled allocation/probes which we use if there's just
25236 a few of them. It needs to save the original stack pointer into a
25237 temporary for use as a source register in the allocation/probe.
25238
25239 Last is a loop. This is the most uncommon case and least efficient. */
25240 rtx_insn *retval = NULL;
25241 if (rounded_size == probe_interval)
25242 {
25243 retval = rs6000_emit_allocate_stack_1 (probe_interval, stack_pointer_rtx);
25244
25245 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25246 }
25247 else if (rounded_size <= 8 * probe_interval)
25248 {
25249 /* The ABI requires using the store with update insns to allocate
25250 space and store the backchain into the stack
25251
25252 So we save the current stack pointer into a temporary, then
25253 emit the store-with-update insns to store the saved stack pointer
25254 into the right location in each new page. */
25255 for (int i = 0; i < rounded_size; i += probe_interval)
25256 {
25257 rtx_insn *insn
25258 = rs6000_emit_allocate_stack_1 (probe_interval, orig_sp);
25259
25260 /* Save the first stack adjustment in RETVAL. */
25261 if (i == 0)
25262 retval = insn;
25263 }
25264
25265 dump_stack_clash_frame_info (PROBE_INLINE, rounded_size != orig_size);
25266 }
25267 else
25268 {
25269 /* Compute the ending address. */
25270 rtx end_addr
25271 = copy_reg ? gen_rtx_REG (Pmode, 0) : gen_rtx_REG (Pmode, 12);
25272 rtx rs = GEN_INT (-rounded_size);
25273 rtx_insn *insn;
25274 if (add_operand (rs, Pmode))
25275 insn = emit_insn (gen_add3_insn (end_addr, stack_pointer_rtx, rs));
25276 else
25277 {
25278 emit_move_insn (end_addr, GEN_INT (-rounded_size));
25279 insn = emit_insn (gen_add3_insn (end_addr, end_addr,
25280 stack_pointer_rtx));
25281 /* Describe the effect of INSN to the CFI engine. */
25282 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
25283 gen_rtx_SET (end_addr,
25284 gen_rtx_PLUS (Pmode, stack_pointer_rtx,
25285 rs)));
25286 }
25287 RTX_FRAME_RELATED_P (insn) = 1;
25288
25289 /* Emit the loop. */
25290 if (TARGET_64BIT)
25291 retval = emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx,
25292 stack_pointer_rtx, orig_sp,
25293 end_addr));
25294 else
25295 retval = emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx,
25296 stack_pointer_rtx, orig_sp,
25297 end_addr));
25298 RTX_FRAME_RELATED_P (retval) = 1;
25299 /* Describe the effect of INSN to the CFI engine. */
25300 add_reg_note (retval, REG_FRAME_RELATED_EXPR,
25301 gen_rtx_SET (stack_pointer_rtx, end_addr));
25302
25303 /* Emit a blockage to ensure the allocation/probing insns are
25304 not optimized, combined, removed, etc. Other cases handle this
25305 within their call to rs6000_emit_allocate_stack_1. */
25306 emit_insn (gen_blockage ());
25307
25308 dump_stack_clash_frame_info (PROBE_LOOP, rounded_size != orig_size);
25309 }
25310
25311 if (orig_size != rounded_size)
25312 {
25313 /* Allocate (and implicitly probe) any residual space. */
25314 HOST_WIDE_INT residual = orig_size - rounded_size;
25315
25316 rtx_insn *insn = rs6000_emit_allocate_stack_1 (residual, orig_sp);
25317
25318 /* If the residual was the only allocation, then we can return the
25319 allocating insn. */
25320 if (!retval)
25321 retval = insn;
25322 }
25323
25324 return retval;
25325 }
25326
25327 /* Emit the correct code for allocating stack space, as insns.
25328 If COPY_REG, make sure a copy of the old frame is left there.
25329 The generated code may use hard register 0 as a temporary. */
25330
25331 static rtx_insn *
25332 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
25333 {
25334 rtx_insn *insn;
25335 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
25336 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
25337 rtx todec = gen_int_mode (-size, Pmode);
25338
25339 if (INTVAL (todec) != -size)
25340 {
25341 warning (0, "stack frame too large");
25342 emit_insn (gen_trap ());
25343 return 0;
25344 }
25345
25346 if (crtl->limit_stack)
25347 {
25348 if (REG_P (stack_limit_rtx)
25349 && REGNO (stack_limit_rtx) > 1
25350 && REGNO (stack_limit_rtx) <= 31)
25351 {
25352 rtx_insn *insn
25353 = gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size));
25354 gcc_assert (insn);
25355 emit_insn (insn);
25356 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg, const0_rtx));
25357 }
25358 else if (SYMBOL_REF_P (stack_limit_rtx)
25359 && TARGET_32BIT
25360 && DEFAULT_ABI == ABI_V4
25361 && !flag_pic)
25362 {
25363 rtx toload = gen_rtx_CONST (VOIDmode,
25364 gen_rtx_PLUS (Pmode,
25365 stack_limit_rtx,
25366 GEN_INT (size)));
25367
25368 emit_insn (gen_elf_high (tmp_reg, toload));
25369 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
25370 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
25371 const0_rtx));
25372 }
25373 else
25374 warning (0, "stack limit expression is not supported");
25375 }
25376
25377 if (flag_stack_clash_protection)
25378 {
25379 if (size < get_stack_clash_protection_guard_size ())
25380 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
25381 else
25382 {
25383 rtx_insn *insn = rs6000_emit_probe_stack_range_stack_clash (size,
25384 copy_reg);
25385
25386 /* If we asked for a copy with an offset, then we still need add in
25387 the offset. */
25388 if (copy_reg && copy_off)
25389 emit_insn (gen_add3_insn (copy_reg, copy_reg, GEN_INT (copy_off)));
25390 return insn;
25391 }
25392 }
25393
25394 if (copy_reg)
25395 {
25396 if (copy_off != 0)
25397 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
25398 else
25399 emit_move_insn (copy_reg, stack_reg);
25400 }
25401
25402 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25403 it now and set the alias set/attributes. The above gen_*_update
25404 calls will generate a PARALLEL with the MEM set being the first
25405 operation. */
25406 insn = rs6000_emit_allocate_stack_1 (size, stack_reg);
25407 return insn;
25408 }
25409
25410 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25411
25412 #if PROBE_INTERVAL > 32768
25413 #error Cannot use indexed addressing mode for stack probing
25414 #endif
25415
25416 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25417 inclusive. These are offsets from the current stack pointer. */
25418
25419 static void
25420 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
25421 {
25422 /* See if we have a constant small number of probes to generate. If so,
25423 that's the easy case. */
25424 if (first + size <= 32768)
25425 {
25426 HOST_WIDE_INT i;
25427
25428 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25429 it exceeds SIZE. If only one probe is needed, this will not
25430 generate any code. Then probe at FIRST + SIZE. */
25431 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
25432 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25433 -(first + i)));
25434
25435 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
25436 -(first + size)));
25437 }
25438
25439 /* Otherwise, do the same as above, but in a loop. Note that we must be
25440 extra careful with variables wrapping around because we might be at
25441 the very top (or the very bottom) of the address space and we have
25442 to be able to handle this case properly; in particular, we use an
25443 equality test for the loop condition. */
25444 else
25445 {
25446 HOST_WIDE_INT rounded_size;
25447 rtx r12 = gen_rtx_REG (Pmode, 12);
25448 rtx r0 = gen_rtx_REG (Pmode, 0);
25449
25450 /* Sanity check for the addressing mode we're going to use. */
25451 gcc_assert (first <= 32768);
25452
25453 /* Step 1: round SIZE to the previous multiple of the interval. */
25454
25455 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
25456
25457
25458 /* Step 2: compute initial and final value of the loop counter. */
25459
25460 /* TEST_ADDR = SP + FIRST. */
25461 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
25462 -first)));
25463
25464 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25465 if (rounded_size > 32768)
25466 {
25467 emit_move_insn (r0, GEN_INT (-rounded_size));
25468 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
25469 }
25470 else
25471 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
25472 -rounded_size)));
25473
25474
25475 /* Step 3: the loop
25476
25477 do
25478 {
25479 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25480 probe at TEST_ADDR
25481 }
25482 while (TEST_ADDR != LAST_ADDR)
25483
25484 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25485 until it is equal to ROUNDED_SIZE. */
25486
25487 if (TARGET_64BIT)
25488 emit_insn (gen_probe_stack_rangedi (r12, r12, stack_pointer_rtx, r0));
25489 else
25490 emit_insn (gen_probe_stack_rangesi (r12, r12, stack_pointer_rtx, r0));
25491
25492
25493 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25494 that SIZE is equal to ROUNDED_SIZE. */
25495
25496 if (size != rounded_size)
25497 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
25498 }
25499 }
25500
25501 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25502 addresses, not offsets. */
25503
25504 static const char *
25505 output_probe_stack_range_1 (rtx reg1, rtx reg2)
25506 {
25507 static int labelno = 0;
25508 char loop_lab[32];
25509 rtx xops[2];
25510
25511 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25512
25513 /* Loop. */
25514 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25515
25516 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25517 xops[0] = reg1;
25518 xops[1] = GEN_INT (-PROBE_INTERVAL);
25519 output_asm_insn ("addi %0,%0,%1", xops);
25520
25521 /* Probe at TEST_ADDR. */
25522 xops[1] = gen_rtx_REG (Pmode, 0);
25523 output_asm_insn ("stw %1,0(%0)", xops);
25524
25525 /* Test if TEST_ADDR == LAST_ADDR. */
25526 xops[1] = reg2;
25527 if (TARGET_64BIT)
25528 output_asm_insn ("cmpd 0,%0,%1", xops);
25529 else
25530 output_asm_insn ("cmpw 0,%0,%1", xops);
25531
25532 /* Branch. */
25533 fputs ("\tbne 0,", asm_out_file);
25534 assemble_name_raw (asm_out_file, loop_lab);
25535 fputc ('\n', asm_out_file);
25536
25537 return "";
25538 }
25539
25540 /* This function is called when rs6000_frame_related is processing
25541 SETs within a PARALLEL, and returns whether the REGNO save ought to
25542 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25543 for out-of-line register save functions, store multiple, and the
25544 Darwin world_save. They may contain registers that don't really
25545 need saving. */
25546
25547 static bool
25548 interesting_frame_related_regno (unsigned int regno)
25549 {
25550 /* Saves apparently of r0 are actually saving LR. It doesn't make
25551 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25552 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25553 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25554 as frame related. */
25555 if (regno == 0)
25556 return true;
25557 /* If we see CR2 then we are here on a Darwin world save. Saves of
25558 CR2 signify the whole CR is being saved. This is a long-standing
25559 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25560 that CR needs to be saved. */
25561 if (regno == CR2_REGNO)
25562 return true;
25563 /* Omit frame info for any user-defined global regs. If frame info
25564 is supplied for them, frame unwinding will restore a user reg.
25565 Also omit frame info for any reg we don't need to save, as that
25566 bloats frame info and can cause problems with shrink wrapping.
25567 Since global regs won't be seen as needing to be saved, both of
25568 these conditions are covered by save_reg_p. */
25569 return save_reg_p (regno);
25570 }
25571
25572 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25573 addresses, not offsets.
25574
25575 REG2 contains the backchain that must be stored into *sp at each allocation.
25576
25577 This is subtly different than the Ada probing above in that it tries hard
25578 to prevent attacks that jump the stack guard. Thus, it is never allowed
25579 to allocate more than PROBE_INTERVAL bytes of stack space without a
25580 suitable probe. */
25581
25582 static const char *
25583 output_probe_stack_range_stack_clash (rtx reg1, rtx reg2, rtx reg3)
25584 {
25585 static int labelno = 0;
25586 char loop_lab[32];
25587 rtx xops[3];
25588
25589 HOST_WIDE_INT probe_interval = get_stack_clash_protection_probe_interval ();
25590
25591 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
25592
25593 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
25594
25595 /* This allocates and probes. */
25596 xops[0] = reg1;
25597 xops[1] = reg2;
25598 xops[2] = GEN_INT (-probe_interval);
25599 if (TARGET_64BIT)
25600 output_asm_insn ("stdu %1,%2(%0)", xops);
25601 else
25602 output_asm_insn ("stwu %1,%2(%0)", xops);
25603
25604 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25605 xops[0] = reg1;
25606 xops[1] = reg3;
25607 if (TARGET_64BIT)
25608 output_asm_insn ("cmpd 0,%0,%1", xops);
25609 else
25610 output_asm_insn ("cmpw 0,%0,%1", xops);
25611
25612 fputs ("\tbne 0,", asm_out_file);
25613 assemble_name_raw (asm_out_file, loop_lab);
25614 fputc ('\n', asm_out_file);
25615
25616 return "";
25617 }
25618
25619 /* Wrapper around the output_probe_stack_range routines. */
25620 const char *
25621 output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
25622 {
25623 if (flag_stack_clash_protection)
25624 return output_probe_stack_range_stack_clash (reg1, reg2, reg3);
25625 else
25626 return output_probe_stack_range_1 (reg1, reg3);
25627 }
25628
25629 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25630 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25631 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25632 deduce these equivalences by itself so it wasn't necessary to hold
25633 its hand so much. Don't be tempted to always supply d2_f_d_e with
25634 the actual cfa register, ie. r31 when we are using a hard frame
25635 pointer. That fails when saving regs off r1, and sched moves the
25636 r31 setup past the reg saves. */
25637
25638 static rtx_insn *
25639 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
25640 rtx reg2, rtx repl2)
25641 {
25642 rtx repl;
25643
25644 if (REGNO (reg) == STACK_POINTER_REGNUM)
25645 {
25646 gcc_checking_assert (val == 0);
25647 repl = NULL_RTX;
25648 }
25649 else
25650 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
25651 GEN_INT (val));
25652
25653 rtx pat = PATTERN (insn);
25654 if (!repl && !reg2)
25655 {
25656 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25657 if (GET_CODE (pat) == PARALLEL)
25658 for (int i = 0; i < XVECLEN (pat, 0); i++)
25659 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25660 {
25661 rtx set = XVECEXP (pat, 0, i);
25662
25663 if (!REG_P (SET_SRC (set))
25664 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25665 RTX_FRAME_RELATED_P (set) = 1;
25666 }
25667 RTX_FRAME_RELATED_P (insn) = 1;
25668 return insn;
25669 }
25670
25671 /* We expect that 'pat' is either a SET or a PARALLEL containing
25672 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25673 are important so they all have to be marked RTX_FRAME_RELATED_P.
25674 Call simplify_replace_rtx on the SETs rather than the whole insn
25675 so as to leave the other stuff alone (for example USE of r12). */
25676
25677 set_used_flags (pat);
25678 if (GET_CODE (pat) == SET)
25679 {
25680 if (repl)
25681 pat = simplify_replace_rtx (pat, reg, repl);
25682 if (reg2)
25683 pat = simplify_replace_rtx (pat, reg2, repl2);
25684 }
25685 else if (GET_CODE (pat) == PARALLEL)
25686 {
25687 pat = shallow_copy_rtx (pat);
25688 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
25689
25690 for (int i = 0; i < XVECLEN (pat, 0); i++)
25691 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
25692 {
25693 rtx set = XVECEXP (pat, 0, i);
25694
25695 if (repl)
25696 set = simplify_replace_rtx (set, reg, repl);
25697 if (reg2)
25698 set = simplify_replace_rtx (set, reg2, repl2);
25699 XVECEXP (pat, 0, i) = set;
25700
25701 if (!REG_P (SET_SRC (set))
25702 || interesting_frame_related_regno (REGNO (SET_SRC (set))))
25703 RTX_FRAME_RELATED_P (set) = 1;
25704 }
25705 }
25706 else
25707 gcc_unreachable ();
25708
25709 RTX_FRAME_RELATED_P (insn) = 1;
25710 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
25711
25712 return insn;
25713 }
25714
25715 /* Returns an insn that has a vrsave set operation with the
25716 appropriate CLOBBERs. */
25717
25718 static rtx
25719 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
25720 {
25721 int nclobs, i;
25722 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
25723 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
25724
25725 clobs[0]
25726 = gen_rtx_SET (vrsave,
25727 gen_rtx_UNSPEC_VOLATILE (SImode,
25728 gen_rtvec (2, reg, vrsave),
25729 UNSPECV_SET_VRSAVE));
25730
25731 nclobs = 1;
25732
25733 /* We need to clobber the registers in the mask so the scheduler
25734 does not move sets to VRSAVE before sets of AltiVec registers.
25735
25736 However, if the function receives nonlocal gotos, reload will set
25737 all call saved registers live. We will end up with:
25738
25739 (set (reg 999) (mem))
25740 (parallel [ (set (reg vrsave) (unspec blah))
25741 (clobber (reg 999))])
25742
25743 The clobber will cause the store into reg 999 to be dead, and
25744 flow will attempt to delete an epilogue insn. In this case, we
25745 need an unspec use/set of the register. */
25746
25747 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
25748 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
25749 {
25750 if (!epiloguep || call_used_regs [i])
25751 clobs[nclobs++] = gen_hard_reg_clobber (V4SImode, i);
25752 else
25753 {
25754 rtx reg = gen_rtx_REG (V4SImode, i);
25755
25756 clobs[nclobs++]
25757 = gen_rtx_SET (reg,
25758 gen_rtx_UNSPEC (V4SImode,
25759 gen_rtvec (1, reg), 27));
25760 }
25761 }
25762
25763 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
25764
25765 for (i = 0; i < nclobs; ++i)
25766 XVECEXP (insn, 0, i) = clobs[i];
25767
25768 return insn;
25769 }
25770
25771 static rtx
25772 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
25773 {
25774 rtx addr, mem;
25775
25776 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
25777 mem = gen_frame_mem (GET_MODE (reg), addr);
25778 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
25779 }
25780
25781 static rtx
25782 gen_frame_load (rtx reg, rtx frame_reg, int offset)
25783 {
25784 return gen_frame_set (reg, frame_reg, offset, false);
25785 }
25786
25787 static rtx
25788 gen_frame_store (rtx reg, rtx frame_reg, int offset)
25789 {
25790 return gen_frame_set (reg, frame_reg, offset, true);
25791 }
25792
25793 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25794 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25795
25796 static rtx_insn *
25797 emit_frame_save (rtx frame_reg, machine_mode mode,
25798 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
25799 {
25800 rtx reg;
25801
25802 /* Some cases that need register indexed addressing. */
25803 gcc_checking_assert (!(TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
25804 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode)));
25805
25806 reg = gen_rtx_REG (mode, regno);
25807 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
25808 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
25809 NULL_RTX, NULL_RTX);
25810 }
25811
25812 /* Emit an offset memory reference suitable for a frame store, while
25813 converting to a valid addressing mode. */
25814
25815 static rtx
25816 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
25817 {
25818 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, GEN_INT (offset)));
25819 }
25820
25821 #ifndef TARGET_FIX_AND_CONTINUE
25822 #define TARGET_FIX_AND_CONTINUE 0
25823 #endif
25824
25825 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25826 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25827 #define LAST_SAVRES_REGISTER 31
25828 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25829
25830 enum {
25831 SAVRES_LR = 0x1,
25832 SAVRES_SAVE = 0x2,
25833 SAVRES_REG = 0x0c,
25834 SAVRES_GPR = 0,
25835 SAVRES_FPR = 4,
25836 SAVRES_VR = 8
25837 };
25838
25839 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
25840
25841 /* Temporary holding space for an out-of-line register save/restore
25842 routine name. */
25843 static char savres_routine_name[30];
25844
25845 /* Return the name for an out-of-line register save/restore routine.
25846 We are saving/restoring GPRs if GPR is true. */
25847
25848 static char *
25849 rs6000_savres_routine_name (int regno, int sel)
25850 {
25851 const char *prefix = "";
25852 const char *suffix = "";
25853
25854 /* Different targets are supposed to define
25855 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25856 routine name could be defined with:
25857
25858 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25859
25860 This is a nice idea in practice, but in reality, things are
25861 complicated in several ways:
25862
25863 - ELF targets have save/restore routines for GPRs.
25864
25865 - PPC64 ELF targets have routines for save/restore of GPRs that
25866 differ in what they do with the link register, so having a set
25867 prefix doesn't work. (We only use one of the save routines at
25868 the moment, though.)
25869
25870 - PPC32 elf targets have "exit" versions of the restore routines
25871 that restore the link register and can save some extra space.
25872 These require an extra suffix. (There are also "tail" versions
25873 of the restore routines and "GOT" versions of the save routines,
25874 but we don't generate those at present. Same problems apply,
25875 though.)
25876
25877 We deal with all this by synthesizing our own prefix/suffix and
25878 using that for the simple sprintf call shown above. */
25879 if (DEFAULT_ABI == ABI_V4)
25880 {
25881 if (TARGET_64BIT)
25882 goto aix_names;
25883
25884 if ((sel & SAVRES_REG) == SAVRES_GPR)
25885 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
25886 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25887 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
25888 else if ((sel & SAVRES_REG) == SAVRES_VR)
25889 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25890 else
25891 abort ();
25892
25893 if ((sel & SAVRES_LR))
25894 suffix = "_x";
25895 }
25896 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25897 {
25898 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25899 /* No out-of-line save/restore routines for GPRs on AIX. */
25900 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
25901 #endif
25902
25903 aix_names:
25904 if ((sel & SAVRES_REG) == SAVRES_GPR)
25905 prefix = ((sel & SAVRES_SAVE)
25906 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
25907 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
25908 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25909 {
25910 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25911 if ((sel & SAVRES_LR))
25912 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
25913 else
25914 #endif
25915 {
25916 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
25917 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
25918 }
25919 }
25920 else if ((sel & SAVRES_REG) == SAVRES_VR)
25921 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
25922 else
25923 abort ();
25924 }
25925
25926 if (DEFAULT_ABI == ABI_DARWIN)
25927 {
25928 /* The Darwin approach is (slightly) different, in order to be
25929 compatible with code generated by the system toolchain. There is a
25930 single symbol for the start of save sequence, and the code here
25931 embeds an offset into that code on the basis of the first register
25932 to be saved. */
25933 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
25934 if ((sel & SAVRES_REG) == SAVRES_GPR)
25935 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
25936 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
25937 (regno - 13) * 4, prefix, regno);
25938 else if ((sel & SAVRES_REG) == SAVRES_FPR)
25939 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
25940 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
25941 else if ((sel & SAVRES_REG) == SAVRES_VR)
25942 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
25943 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
25944 else
25945 abort ();
25946 }
25947 else
25948 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
25949
25950 return savres_routine_name;
25951 }
25952
25953 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25954 We are saving/restoring GPRs if GPR is true. */
25955
25956 static rtx
25957 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
25958 {
25959 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
25960 ? info->first_gp_reg_save
25961 : (sel & SAVRES_REG) == SAVRES_FPR
25962 ? info->first_fp_reg_save - 32
25963 : (sel & SAVRES_REG) == SAVRES_VR
25964 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
25965 : -1);
25966 rtx sym;
25967 int select = sel;
25968
25969 /* Don't generate bogus routine names. */
25970 gcc_assert (FIRST_SAVRES_REGISTER <= regno
25971 && regno <= LAST_SAVRES_REGISTER
25972 && select >= 0 && select <= 12);
25973
25974 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
25975
25976 if (sym == NULL)
25977 {
25978 char *name;
25979
25980 name = rs6000_savres_routine_name (regno, sel);
25981
25982 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
25983 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
25984 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
25985 }
25986
25987 return sym;
25988 }
25989
25990 /* Emit a sequence of insns, including a stack tie if needed, for
25991 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
25992 reset the stack pointer, but move the base of the frame into
25993 reg UPDT_REGNO for use by out-of-line register restore routines. */
25994
25995 static rtx
25996 rs6000_emit_stack_reset (rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
25997 unsigned updt_regno)
25998 {
25999 /* If there is nothing to do, don't do anything. */
26000 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
26001 return NULL_RTX;
26002
26003 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
26004
26005 /* This blockage is needed so that sched doesn't decide to move
26006 the sp change before the register restores. */
26007 if (DEFAULT_ABI == ABI_V4)
26008 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
26009 GEN_INT (frame_off)));
26010
26011 /* If we are restoring registers out-of-line, we will be using the
26012 "exit" variants of the restore routines, which will reset the
26013 stack for us. But we do need to point updt_reg into the
26014 right place for those routines. */
26015 if (frame_off != 0)
26016 return emit_insn (gen_add3_insn (updt_reg_rtx,
26017 frame_reg_rtx, GEN_INT (frame_off)));
26018 else
26019 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
26020
26021 return NULL_RTX;
26022 }
26023
26024 /* Return the register number used as a pointer by out-of-line
26025 save/restore functions. */
26026
26027 static inline unsigned
26028 ptr_regno_for_savres (int sel)
26029 {
26030 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26031 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
26032 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
26033 }
26034
26035 /* Construct a parallel rtx describing the effect of a call to an
26036 out-of-line register save/restore routine, and emit the insn
26037 or jump_insn as appropriate. */
26038
26039 static rtx_insn *
26040 rs6000_emit_savres_rtx (rs6000_stack_t *info,
26041 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
26042 machine_mode reg_mode, int sel)
26043 {
26044 int i;
26045 int offset, start_reg, end_reg, n_regs, use_reg;
26046 int reg_size = GET_MODE_SIZE (reg_mode);
26047 rtx sym;
26048 rtvec p;
26049 rtx par;
26050 rtx_insn *insn;
26051
26052 offset = 0;
26053 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26054 ? info->first_gp_reg_save
26055 : (sel & SAVRES_REG) == SAVRES_FPR
26056 ? info->first_fp_reg_save
26057 : (sel & SAVRES_REG) == SAVRES_VR
26058 ? info->first_altivec_reg_save
26059 : -1);
26060 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
26061 ? 32
26062 : (sel & SAVRES_REG) == SAVRES_FPR
26063 ? 64
26064 : (sel & SAVRES_REG) == SAVRES_VR
26065 ? LAST_ALTIVEC_REGNO + 1
26066 : -1);
26067 n_regs = end_reg - start_reg;
26068 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
26069 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
26070 + n_regs);
26071
26072 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26073 RTVEC_ELT (p, offset++) = ret_rtx;
26074
26075 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
26076
26077 sym = rs6000_savres_routine_sym (info, sel);
26078 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
26079
26080 use_reg = ptr_regno_for_savres (sel);
26081 if ((sel & SAVRES_REG) == SAVRES_VR)
26082 {
26083 /* Vector regs are saved/restored using [reg+reg] addressing. */
26084 RTVEC_ELT (p, offset++) = gen_hard_reg_clobber (Pmode, use_reg);
26085 RTVEC_ELT (p, offset++)
26086 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
26087 }
26088 else
26089 RTVEC_ELT (p, offset++)
26090 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
26091
26092 for (i = 0; i < end_reg - start_reg; i++)
26093 RTVEC_ELT (p, i + offset)
26094 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
26095 frame_reg_rtx, save_area_offset + reg_size * i,
26096 (sel & SAVRES_SAVE) != 0);
26097
26098 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26099 RTVEC_ELT (p, i + offset)
26100 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
26101
26102 par = gen_rtx_PARALLEL (VOIDmode, p);
26103
26104 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
26105 {
26106 insn = emit_jump_insn (par);
26107 JUMP_LABEL (insn) = ret_rtx;
26108 }
26109 else
26110 insn = emit_insn (par);
26111 return insn;
26112 }
26113
26114 /* Emit prologue code to store CR fields that need to be saved into REG. This
26115 function should only be called when moving the non-volatile CRs to REG, it
26116 is not a general purpose routine to move the entire set of CRs to REG.
26117 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
26118 volatile CRs. */
26119
26120 static void
26121 rs6000_emit_prologue_move_from_cr (rtx reg)
26122 {
26123 /* Only the ELFv2 ABI allows storing only selected fields. */
26124 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
26125 {
26126 int i, cr_reg[8], count = 0;
26127
26128 /* Collect CR fields that must be saved. */
26129 for (i = 0; i < 8; i++)
26130 if (save_reg_p (CR0_REGNO + i))
26131 cr_reg[count++] = i;
26132
26133 /* If it's just a single one, use mfcrf. */
26134 if (count == 1)
26135 {
26136 rtvec p = rtvec_alloc (1);
26137 rtvec r = rtvec_alloc (2);
26138 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
26139 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
26140 RTVEC_ELT (p, 0)
26141 = gen_rtx_SET (reg,
26142 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
26143
26144 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26145 return;
26146 }
26147
26148 /* ??? It might be better to handle count == 2 / 3 cases here
26149 as well, using logical operations to combine the values. */
26150 }
26151
26152 emit_insn (gen_prologue_movesi_from_cr (reg));
26153 }
26154
26155 /* Return whether the split-stack arg pointer (r12) is used. */
26156
26157 static bool
26158 split_stack_arg_pointer_used_p (void)
26159 {
26160 /* If the pseudo holding the arg pointer is no longer a pseudo,
26161 then the arg pointer is used. */
26162 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
26163 && (!REG_P (cfun->machine->split_stack_arg_pointer)
26164 || HARD_REGISTER_P (cfun->machine->split_stack_arg_pointer)))
26165 return true;
26166
26167 /* Unfortunately we also need to do some code scanning, since
26168 r12 may have been substituted for the pseudo. */
26169 rtx_insn *insn;
26170 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
26171 FOR_BB_INSNS (bb, insn)
26172 if (NONDEBUG_INSN_P (insn))
26173 {
26174 /* A call destroys r12. */
26175 if (CALL_P (insn))
26176 return false;
26177
26178 df_ref use;
26179 FOR_EACH_INSN_USE (use, insn)
26180 {
26181 rtx x = DF_REF_REG (use);
26182 if (REG_P (x) && REGNO (x) == 12)
26183 return true;
26184 }
26185 df_ref def;
26186 FOR_EACH_INSN_DEF (def, insn)
26187 {
26188 rtx x = DF_REF_REG (def);
26189 if (REG_P (x) && REGNO (x) == 12)
26190 return false;
26191 }
26192 }
26193 return bitmap_bit_p (DF_LR_OUT (bb), 12);
26194 }
26195
26196 /* Return whether we need to emit an ELFv2 global entry point prologue. */
26197
26198 static bool
26199 rs6000_global_entry_point_needed_p (void)
26200 {
26201 /* Only needed for the ELFv2 ABI. */
26202 if (DEFAULT_ABI != ABI_ELFv2)
26203 return false;
26204
26205 /* With -msingle-pic-base, we assume the whole program shares the same
26206 TOC, so no global entry point prologues are needed anywhere. */
26207 if (TARGET_SINGLE_PIC_BASE)
26208 return false;
26209
26210 /* Ensure we have a global entry point for thunks. ??? We could
26211 avoid that if the target routine doesn't need a global entry point,
26212 but we do not know whether this is the case at this point. */
26213 if (cfun->is_thunk)
26214 return true;
26215
26216 /* For regular functions, rs6000_emit_prologue sets this flag if the
26217 routine ever uses the TOC pointer. */
26218 return cfun->machine->r2_setup_needed;
26219 }
26220
26221 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
26222 static sbitmap
26223 rs6000_get_separate_components (void)
26224 {
26225 rs6000_stack_t *info = rs6000_stack_info ();
26226
26227 if (WORLD_SAVE_P (info))
26228 return NULL;
26229
26230 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
26231 && !(info->savres_strategy & REST_MULTIPLE));
26232
26233 /* Component 0 is the save/restore of LR (done via GPR0).
26234 Component 2 is the save of the TOC (GPR2).
26235 Components 13..31 are the save/restore of GPR13..GPR31.
26236 Components 46..63 are the save/restore of FPR14..FPR31. */
26237
26238 cfun->machine->n_components = 64;
26239
26240 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26241 bitmap_clear (components);
26242
26243 int reg_size = TARGET_32BIT ? 4 : 8;
26244 int fp_reg_size = 8;
26245
26246 /* The GPRs we need saved to the frame. */
26247 if ((info->savres_strategy & SAVE_INLINE_GPRS)
26248 && (info->savres_strategy & REST_INLINE_GPRS))
26249 {
26250 int offset = info->gp_save_offset;
26251 if (info->push_p)
26252 offset += info->total_size;
26253
26254 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26255 {
26256 if (IN_RANGE (offset, -0x8000, 0x7fff)
26257 && save_reg_p (regno))
26258 bitmap_set_bit (components, regno);
26259
26260 offset += reg_size;
26261 }
26262 }
26263
26264 /* Don't mess with the hard frame pointer. */
26265 if (frame_pointer_needed)
26266 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
26267
26268 /* Don't mess with the fixed TOC register. */
26269 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
26270 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26271 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26272 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
26273
26274 /* The FPRs we need saved to the frame. */
26275 if ((info->savres_strategy & SAVE_INLINE_FPRS)
26276 && (info->savres_strategy & REST_INLINE_FPRS))
26277 {
26278 int offset = info->fp_save_offset;
26279 if (info->push_p)
26280 offset += info->total_size;
26281
26282 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26283 {
26284 if (IN_RANGE (offset, -0x8000, 0x7fff) && save_reg_p (regno))
26285 bitmap_set_bit (components, regno);
26286
26287 offset += fp_reg_size;
26288 }
26289 }
26290
26291 /* Optimize LR save and restore if we can. This is component 0. Any
26292 out-of-line register save/restore routines need LR. */
26293 if (info->lr_save_p
26294 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26295 && (info->savres_strategy & SAVE_INLINE_GPRS)
26296 && (info->savres_strategy & REST_INLINE_GPRS)
26297 && (info->savres_strategy & SAVE_INLINE_FPRS)
26298 && (info->savres_strategy & REST_INLINE_FPRS)
26299 && (info->savres_strategy & SAVE_INLINE_VRS)
26300 && (info->savres_strategy & REST_INLINE_VRS))
26301 {
26302 int offset = info->lr_save_offset;
26303 if (info->push_p)
26304 offset += info->total_size;
26305 if (IN_RANGE (offset, -0x8000, 0x7fff))
26306 bitmap_set_bit (components, 0);
26307 }
26308
26309 /* Optimize saving the TOC. This is component 2. */
26310 if (cfun->machine->save_toc_in_prologue)
26311 bitmap_set_bit (components, 2);
26312
26313 return components;
26314 }
26315
26316 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26317 static sbitmap
26318 rs6000_components_for_bb (basic_block bb)
26319 {
26320 rs6000_stack_t *info = rs6000_stack_info ();
26321
26322 bitmap in = DF_LIVE_IN (bb);
26323 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
26324 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
26325
26326 sbitmap components = sbitmap_alloc (cfun->machine->n_components);
26327 bitmap_clear (components);
26328
26329 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26330
26331 /* GPRs. */
26332 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
26333 if (bitmap_bit_p (in, regno)
26334 || bitmap_bit_p (gen, regno)
26335 || bitmap_bit_p (kill, regno))
26336 bitmap_set_bit (components, regno);
26337
26338 /* FPRs. */
26339 for (unsigned regno = info->first_fp_reg_save; regno < 64; regno++)
26340 if (bitmap_bit_p (in, regno)
26341 || bitmap_bit_p (gen, regno)
26342 || bitmap_bit_p (kill, regno))
26343 bitmap_set_bit (components, regno);
26344
26345 /* The link register. */
26346 if (bitmap_bit_p (in, LR_REGNO)
26347 || bitmap_bit_p (gen, LR_REGNO)
26348 || bitmap_bit_p (kill, LR_REGNO))
26349 bitmap_set_bit (components, 0);
26350
26351 /* The TOC save. */
26352 if (bitmap_bit_p (in, TOC_REGNUM)
26353 || bitmap_bit_p (gen, TOC_REGNUM)
26354 || bitmap_bit_p (kill, TOC_REGNUM))
26355 bitmap_set_bit (components, 2);
26356
26357 return components;
26358 }
26359
26360 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26361 static void
26362 rs6000_disqualify_components (sbitmap components, edge e,
26363 sbitmap edge_components, bool /*is_prologue*/)
26364 {
26365 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26366 live where we want to place that code. */
26367 if (bitmap_bit_p (edge_components, 0)
26368 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
26369 {
26370 if (dump_file)
26371 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
26372 "on entry to bb %d\n", e->dest->index);
26373 bitmap_clear_bit (components, 0);
26374 }
26375 }
26376
26377 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26378 static void
26379 rs6000_emit_prologue_components (sbitmap components)
26380 {
26381 rs6000_stack_t *info = rs6000_stack_info ();
26382 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26383 ? HARD_FRAME_POINTER_REGNUM
26384 : STACK_POINTER_REGNUM);
26385
26386 machine_mode reg_mode = Pmode;
26387 int reg_size = TARGET_32BIT ? 4 : 8;
26388 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26389 int fp_reg_size = 8;
26390
26391 /* Prologue for LR. */
26392 if (bitmap_bit_p (components, 0))
26393 {
26394 rtx lr = gen_rtx_REG (reg_mode, LR_REGNO);
26395 rtx reg = gen_rtx_REG (reg_mode, 0);
26396 rtx_insn *insn = emit_move_insn (reg, lr);
26397 RTX_FRAME_RELATED_P (insn) = 1;
26398 add_reg_note (insn, REG_CFA_REGISTER, gen_rtx_SET (reg, lr));
26399
26400 int offset = info->lr_save_offset;
26401 if (info->push_p)
26402 offset += info->total_size;
26403
26404 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26405 RTX_FRAME_RELATED_P (insn) = 1;
26406 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
26407 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
26408 }
26409
26410 /* Prologue for TOC. */
26411 if (bitmap_bit_p (components, 2))
26412 {
26413 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
26414 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26415 emit_insn (gen_frame_store (reg, sp_reg, RS6000_TOC_SAVE_SLOT));
26416 }
26417
26418 /* Prologue for the GPRs. */
26419 int offset = info->gp_save_offset;
26420 if (info->push_p)
26421 offset += info->total_size;
26422
26423 for (int i = info->first_gp_reg_save; i < 32; i++)
26424 {
26425 if (bitmap_bit_p (components, i))
26426 {
26427 rtx reg = gen_rtx_REG (reg_mode, i);
26428 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26429 RTX_FRAME_RELATED_P (insn) = 1;
26430 rtx set = copy_rtx (single_set (insn));
26431 add_reg_note (insn, REG_CFA_OFFSET, set);
26432 }
26433
26434 offset += reg_size;
26435 }
26436
26437 /* Prologue for the FPRs. */
26438 offset = info->fp_save_offset;
26439 if (info->push_p)
26440 offset += info->total_size;
26441
26442 for (int i = info->first_fp_reg_save; i < 64; i++)
26443 {
26444 if (bitmap_bit_p (components, i))
26445 {
26446 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26447 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
26448 RTX_FRAME_RELATED_P (insn) = 1;
26449 rtx set = copy_rtx (single_set (insn));
26450 add_reg_note (insn, REG_CFA_OFFSET, set);
26451 }
26452
26453 offset += fp_reg_size;
26454 }
26455 }
26456
26457 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26458 static void
26459 rs6000_emit_epilogue_components (sbitmap components)
26460 {
26461 rs6000_stack_t *info = rs6000_stack_info ();
26462 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
26463 ? HARD_FRAME_POINTER_REGNUM
26464 : STACK_POINTER_REGNUM);
26465
26466 machine_mode reg_mode = Pmode;
26467 int reg_size = TARGET_32BIT ? 4 : 8;
26468
26469 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26470 int fp_reg_size = 8;
26471
26472 /* Epilogue for the FPRs. */
26473 int offset = info->fp_save_offset;
26474 if (info->push_p)
26475 offset += info->total_size;
26476
26477 for (int i = info->first_fp_reg_save; i < 64; i++)
26478 {
26479 if (bitmap_bit_p (components, i))
26480 {
26481 rtx reg = gen_rtx_REG (fp_reg_mode, i);
26482 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26483 RTX_FRAME_RELATED_P (insn) = 1;
26484 add_reg_note (insn, REG_CFA_RESTORE, reg);
26485 }
26486
26487 offset += fp_reg_size;
26488 }
26489
26490 /* Epilogue for the GPRs. */
26491 offset = info->gp_save_offset;
26492 if (info->push_p)
26493 offset += info->total_size;
26494
26495 for (int i = info->first_gp_reg_save; i < 32; i++)
26496 {
26497 if (bitmap_bit_p (components, i))
26498 {
26499 rtx reg = gen_rtx_REG (reg_mode, i);
26500 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26501 RTX_FRAME_RELATED_P (insn) = 1;
26502 add_reg_note (insn, REG_CFA_RESTORE, reg);
26503 }
26504
26505 offset += reg_size;
26506 }
26507
26508 /* Epilogue for LR. */
26509 if (bitmap_bit_p (components, 0))
26510 {
26511 int offset = info->lr_save_offset;
26512 if (info->push_p)
26513 offset += info->total_size;
26514
26515 rtx reg = gen_rtx_REG (reg_mode, 0);
26516 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
26517
26518 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
26519 insn = emit_move_insn (lr, reg);
26520 RTX_FRAME_RELATED_P (insn) = 1;
26521 add_reg_note (insn, REG_CFA_RESTORE, lr);
26522 }
26523 }
26524
26525 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26526 static void
26527 rs6000_set_handled_components (sbitmap components)
26528 {
26529 rs6000_stack_t *info = rs6000_stack_info ();
26530
26531 for (int i = info->first_gp_reg_save; i < 32; i++)
26532 if (bitmap_bit_p (components, i))
26533 cfun->machine->gpr_is_wrapped_separately[i] = true;
26534
26535 for (int i = info->first_fp_reg_save; i < 64; i++)
26536 if (bitmap_bit_p (components, i))
26537 cfun->machine->fpr_is_wrapped_separately[i - 32] = true;
26538
26539 if (bitmap_bit_p (components, 0))
26540 cfun->machine->lr_is_wrapped_separately = true;
26541
26542 if (bitmap_bit_p (components, 2))
26543 cfun->machine->toc_is_wrapped_separately = true;
26544 }
26545
26546 /* VRSAVE is a bit vector representing which AltiVec registers
26547 are used. The OS uses this to determine which vector
26548 registers to save on a context switch. We need to save
26549 VRSAVE on the stack frame, add whatever AltiVec registers we
26550 used in this function, and do the corresponding magic in the
26551 epilogue. */
26552 static void
26553 emit_vrsave_prologue (rs6000_stack_t *info, int save_regno,
26554 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26555 {
26556 /* Get VRSAVE into a GPR. */
26557 rtx reg = gen_rtx_REG (SImode, save_regno);
26558 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
26559 if (TARGET_MACHO)
26560 emit_insn (gen_get_vrsave_internal (reg));
26561 else
26562 emit_insn (gen_rtx_SET (reg, vrsave));
26563
26564 /* Save VRSAVE. */
26565 int offset = info->vrsave_save_offset + frame_off;
26566 emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
26567
26568 /* Include the registers in the mask. */
26569 emit_insn (gen_iorsi3 (reg, reg, GEN_INT (info->vrsave_mask)));
26570
26571 emit_insn (generate_set_vrsave (reg, info, 0));
26572 }
26573
26574 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26575 called, it left the arg pointer to the old stack in r29. Otherwise, the
26576 arg pointer is the top of the current frame. */
26577 static void
26578 emit_split_stack_prologue (rs6000_stack_t *info, rtx_insn *sp_adjust,
26579 HOST_WIDE_INT frame_off, rtx frame_reg_rtx)
26580 {
26581 cfun->machine->split_stack_argp_used = true;
26582
26583 if (sp_adjust)
26584 {
26585 rtx r12 = gen_rtx_REG (Pmode, 12);
26586 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26587 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
26588 emit_insn_before (set_r12, sp_adjust);
26589 }
26590 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
26591 {
26592 rtx r12 = gen_rtx_REG (Pmode, 12);
26593 if (frame_off == 0)
26594 emit_move_insn (r12, frame_reg_rtx);
26595 else
26596 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
26597 }
26598
26599 if (info->push_p)
26600 {
26601 rtx r12 = gen_rtx_REG (Pmode, 12);
26602 rtx r29 = gen_rtx_REG (Pmode, 29);
26603 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
26604 rtx not_more = gen_label_rtx ();
26605 rtx jump;
26606
26607 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
26608 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
26609 gen_rtx_LABEL_REF (VOIDmode, not_more),
26610 pc_rtx);
26611 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
26612 JUMP_LABEL (jump) = not_more;
26613 LABEL_NUSES (not_more) += 1;
26614 emit_move_insn (r12, r29);
26615 emit_label (not_more);
26616 }
26617 }
26618
26619 /* Emit function prologue as insns. */
26620
26621 void
26622 rs6000_emit_prologue (void)
26623 {
26624 rs6000_stack_t *info = rs6000_stack_info ();
26625 machine_mode reg_mode = Pmode;
26626 int reg_size = TARGET_32BIT ? 4 : 8;
26627 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
26628 int fp_reg_size = 8;
26629 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
26630 rtx frame_reg_rtx = sp_reg_rtx;
26631 unsigned int cr_save_regno;
26632 rtx cr_save_rtx = NULL_RTX;
26633 rtx_insn *insn;
26634 int strategy;
26635 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26636 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26637 && call_used_regs[STATIC_CHAIN_REGNUM]);
26638 int using_split_stack = (flag_split_stack
26639 && (lookup_attribute ("no_split_stack",
26640 DECL_ATTRIBUTES (cfun->decl))
26641 == NULL));
26642
26643 /* Offset to top of frame for frame_reg and sp respectively. */
26644 HOST_WIDE_INT frame_off = 0;
26645 HOST_WIDE_INT sp_off = 0;
26646 /* sp_adjust is the stack adjusting instruction, tracked so that the
26647 insn setting up the split-stack arg pointer can be emitted just
26648 prior to it, when r12 is not used here for other purposes. */
26649 rtx_insn *sp_adjust = 0;
26650
26651 #if CHECKING_P
26652 /* Track and check usage of r0, r11, r12. */
26653 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
26654 #define START_USE(R) do \
26655 { \
26656 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26657 reg_inuse |= 1 << (R); \
26658 } while (0)
26659 #define END_USE(R) do \
26660 { \
26661 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26662 reg_inuse &= ~(1 << (R)); \
26663 } while (0)
26664 #define NOT_INUSE(R) do \
26665 { \
26666 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26667 } while (0)
26668 #else
26669 #define START_USE(R) do {} while (0)
26670 #define END_USE(R) do {} while (0)
26671 #define NOT_INUSE(R) do {} while (0)
26672 #endif
26673
26674 if (DEFAULT_ABI == ABI_ELFv2
26675 && !TARGET_SINGLE_PIC_BASE)
26676 {
26677 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
26678
26679 /* With -mminimal-toc we may generate an extra use of r2 below. */
26680 if (TARGET_TOC && TARGET_MINIMAL_TOC
26681 && !constant_pool_empty_p ())
26682 cfun->machine->r2_setup_needed = true;
26683 }
26684
26685
26686 if (flag_stack_usage_info)
26687 current_function_static_stack_size = info->total_size;
26688
26689 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
26690 {
26691 HOST_WIDE_INT size = info->total_size;
26692
26693 if (crtl->is_leaf && !cfun->calls_alloca)
26694 {
26695 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
26696 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26697 size - get_stack_check_protect ());
26698 }
26699 else if (size > 0)
26700 rs6000_emit_probe_stack_range (get_stack_check_protect (), size);
26701 }
26702
26703 if (TARGET_FIX_AND_CONTINUE)
26704 {
26705 /* gdb on darwin arranges to forward a function from the old
26706 address by modifying the first 5 instructions of the function
26707 to branch to the overriding function. This is necessary to
26708 permit function pointers that point to the old function to
26709 actually forward to the new function. */
26710 emit_insn (gen_nop ());
26711 emit_insn (gen_nop ());
26712 emit_insn (gen_nop ());
26713 emit_insn (gen_nop ());
26714 emit_insn (gen_nop ());
26715 }
26716
26717 /* Handle world saves specially here. */
26718 if (WORLD_SAVE_P (info))
26719 {
26720 int i, j, sz;
26721 rtx treg;
26722 rtvec p;
26723 rtx reg0;
26724
26725 /* save_world expects lr in r0. */
26726 reg0 = gen_rtx_REG (Pmode, 0);
26727 if (info->lr_save_p)
26728 {
26729 insn = emit_move_insn (reg0,
26730 gen_rtx_REG (Pmode, LR_REGNO));
26731 RTX_FRAME_RELATED_P (insn) = 1;
26732 }
26733
26734 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26735 assumptions about the offsets of various bits of the stack
26736 frame. */
26737 gcc_assert (info->gp_save_offset == -220
26738 && info->fp_save_offset == -144
26739 && info->lr_save_offset == 8
26740 && info->cr_save_offset == 4
26741 && info->push_p
26742 && info->lr_save_p
26743 && (!crtl->calls_eh_return
26744 || info->ehrd_offset == -432)
26745 && info->vrsave_save_offset == -224
26746 && info->altivec_save_offset == -416);
26747
26748 treg = gen_rtx_REG (SImode, 11);
26749 emit_move_insn (treg, GEN_INT (-info->total_size));
26750
26751 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26752 in R11. It also clobbers R12, so beware! */
26753
26754 /* Preserve CR2 for save_world prologues */
26755 sz = 5;
26756 sz += 32 - info->first_gp_reg_save;
26757 sz += 64 - info->first_fp_reg_save;
26758 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
26759 p = rtvec_alloc (sz);
26760 j = 0;
26761 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, LR_REGNO);
26762 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
26763 gen_rtx_SYMBOL_REF (Pmode,
26764 "*save_world"));
26765 /* We do floats first so that the instruction pattern matches
26766 properly. */
26767 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
26768 RTVEC_ELT (p, j++)
26769 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
26770 info->first_fp_reg_save + i),
26771 frame_reg_rtx,
26772 info->fp_save_offset + frame_off + 8 * i);
26773 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
26774 RTVEC_ELT (p, j++)
26775 = gen_frame_store (gen_rtx_REG (V4SImode,
26776 info->first_altivec_reg_save + i),
26777 frame_reg_rtx,
26778 info->altivec_save_offset + frame_off + 16 * i);
26779 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
26780 RTVEC_ELT (p, j++)
26781 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
26782 frame_reg_rtx,
26783 info->gp_save_offset + frame_off + reg_size * i);
26784
26785 /* CR register traditionally saved as CR2. */
26786 RTVEC_ELT (p, j++)
26787 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
26788 frame_reg_rtx, info->cr_save_offset + frame_off);
26789 /* Explain about use of R0. */
26790 if (info->lr_save_p)
26791 RTVEC_ELT (p, j++)
26792 = gen_frame_store (reg0,
26793 frame_reg_rtx, info->lr_save_offset + frame_off);
26794 /* Explain what happens to the stack pointer. */
26795 {
26796 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
26797 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
26798 }
26799
26800 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
26801 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26802 treg, GEN_INT (-info->total_size));
26803 sp_off = frame_off = info->total_size;
26804 }
26805
26806 strategy = info->savres_strategy;
26807
26808 /* For V.4, update stack before we do any saving and set back pointer. */
26809 if (! WORLD_SAVE_P (info)
26810 && info->push_p
26811 && (DEFAULT_ABI == ABI_V4
26812 || crtl->calls_eh_return))
26813 {
26814 bool need_r11 = (!(strategy & SAVE_INLINE_FPRS)
26815 || !(strategy & SAVE_INLINE_GPRS)
26816 || !(strategy & SAVE_INLINE_VRS));
26817 int ptr_regno = -1;
26818 rtx ptr_reg = NULL_RTX;
26819 int ptr_off = 0;
26820
26821 if (info->total_size < 32767)
26822 frame_off = info->total_size;
26823 else if (need_r11)
26824 ptr_regno = 11;
26825 else if (info->cr_save_p
26826 || info->lr_save_p
26827 || info->first_fp_reg_save < 64
26828 || info->first_gp_reg_save < 32
26829 || info->altivec_size != 0
26830 || info->vrsave_size != 0
26831 || crtl->calls_eh_return)
26832 ptr_regno = 12;
26833 else
26834 {
26835 /* The prologue won't be saving any regs so there is no need
26836 to set up a frame register to access any frame save area.
26837 We also won't be using frame_off anywhere below, but set
26838 the correct value anyway to protect against future
26839 changes to this function. */
26840 frame_off = info->total_size;
26841 }
26842 if (ptr_regno != -1)
26843 {
26844 /* Set up the frame offset to that needed by the first
26845 out-of-line save function. */
26846 START_USE (ptr_regno);
26847 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26848 frame_reg_rtx = ptr_reg;
26849 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
26850 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
26851 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
26852 ptr_off = info->gp_save_offset + info->gp_size;
26853 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
26854 ptr_off = info->altivec_save_offset + info->altivec_size;
26855 frame_off = -ptr_off;
26856 }
26857 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
26858 ptr_reg, ptr_off);
26859 if (REGNO (frame_reg_rtx) == 12)
26860 sp_adjust = 0;
26861 sp_off = info->total_size;
26862 if (frame_reg_rtx != sp_reg_rtx)
26863 rs6000_emit_stack_tie (frame_reg_rtx, false);
26864 }
26865
26866 /* If we use the link register, get it into r0. */
26867 if (!WORLD_SAVE_P (info) && info->lr_save_p
26868 && !cfun->machine->lr_is_wrapped_separately)
26869 {
26870 rtx addr, reg, mem;
26871
26872 reg = gen_rtx_REG (Pmode, 0);
26873 START_USE (0);
26874 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
26875 RTX_FRAME_RELATED_P (insn) = 1;
26876
26877 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
26878 | SAVE_NOINLINE_FPRS_SAVES_LR)))
26879 {
26880 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
26881 GEN_INT (info->lr_save_offset + frame_off));
26882 mem = gen_rtx_MEM (Pmode, addr);
26883 /* This should not be of rs6000_sr_alias_set, because of
26884 __builtin_return_address. */
26885
26886 insn = emit_move_insn (mem, reg);
26887 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
26888 NULL_RTX, NULL_RTX);
26889 END_USE (0);
26890 }
26891 }
26892
26893 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26894 r12 will be needed by out-of-line gpr save. */
26895 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26896 && !(strategy & (SAVE_INLINE_GPRS
26897 | SAVE_NOINLINE_GPRS_SAVES_LR))
26898 ? 11 : 12);
26899 if (!WORLD_SAVE_P (info)
26900 && info->cr_save_p
26901 && REGNO (frame_reg_rtx) != cr_save_regno
26902 && !(using_static_chain_p && cr_save_regno == 11)
26903 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
26904 {
26905 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
26906 START_USE (cr_save_regno);
26907 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
26908 }
26909
26910 /* Do any required saving of fpr's. If only one or two to save, do
26911 it ourselves. Otherwise, call function. */
26912 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
26913 {
26914 int offset = info->fp_save_offset + frame_off;
26915 for (int i = info->first_fp_reg_save; i < 64; i++)
26916 {
26917 if (save_reg_p (i)
26918 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
26919 emit_frame_save (frame_reg_rtx, fp_reg_mode, i, offset,
26920 sp_off - frame_off);
26921
26922 offset += fp_reg_size;
26923 }
26924 }
26925 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
26926 {
26927 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
26928 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
26929 unsigned ptr_regno = ptr_regno_for_savres (sel);
26930 rtx ptr_reg = frame_reg_rtx;
26931
26932 if (REGNO (frame_reg_rtx) == ptr_regno)
26933 gcc_checking_assert (frame_off == 0);
26934 else
26935 {
26936 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26937 NOT_INUSE (ptr_regno);
26938 emit_insn (gen_add3_insn (ptr_reg,
26939 frame_reg_rtx, GEN_INT (frame_off)));
26940 }
26941 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26942 info->fp_save_offset,
26943 info->lr_save_offset,
26944 DFmode, sel);
26945 rs6000_frame_related (insn, ptr_reg, sp_off,
26946 NULL_RTX, NULL_RTX);
26947 if (lr)
26948 END_USE (0);
26949 }
26950
26951 /* Save GPRs. This is done as a PARALLEL if we are using
26952 the store-multiple instructions. */
26953 if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
26954 {
26955 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
26956 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
26957 unsigned ptr_regno = ptr_regno_for_savres (sel);
26958 rtx ptr_reg = frame_reg_rtx;
26959 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
26960 int end_save = info->gp_save_offset + info->gp_size;
26961 int ptr_off;
26962
26963 if (ptr_regno == 12)
26964 sp_adjust = 0;
26965 if (!ptr_set_up)
26966 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
26967
26968 /* Need to adjust r11 (r12) if we saved any FPRs. */
26969 if (end_save + frame_off != 0)
26970 {
26971 rtx offset = GEN_INT (end_save + frame_off);
26972
26973 if (ptr_set_up)
26974 frame_off = -end_save;
26975 else
26976 NOT_INUSE (ptr_regno);
26977 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
26978 }
26979 else if (!ptr_set_up)
26980 {
26981 NOT_INUSE (ptr_regno);
26982 emit_move_insn (ptr_reg, frame_reg_rtx);
26983 }
26984 ptr_off = -end_save;
26985 insn = rs6000_emit_savres_rtx (info, ptr_reg,
26986 info->gp_save_offset + ptr_off,
26987 info->lr_save_offset + ptr_off,
26988 reg_mode, sel);
26989 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
26990 NULL_RTX, NULL_RTX);
26991 if (lr)
26992 END_USE (0);
26993 }
26994 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
26995 {
26996 rtvec p;
26997 int i;
26998 p = rtvec_alloc (32 - info->first_gp_reg_save);
26999 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27000 RTVEC_ELT (p, i)
27001 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
27002 frame_reg_rtx,
27003 info->gp_save_offset + frame_off + reg_size * i);
27004 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27005 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27006 NULL_RTX, NULL_RTX);
27007 }
27008 else if (!WORLD_SAVE_P (info))
27009 {
27010 int offset = info->gp_save_offset + frame_off;
27011 for (int i = info->first_gp_reg_save; i < 32; i++)
27012 {
27013 if (save_reg_p (i)
27014 && !cfun->machine->gpr_is_wrapped_separately[i])
27015 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
27016 sp_off - frame_off);
27017
27018 offset += reg_size;
27019 }
27020 }
27021
27022 if (crtl->calls_eh_return)
27023 {
27024 unsigned int i;
27025 rtvec p;
27026
27027 for (i = 0; ; ++i)
27028 {
27029 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27030 if (regno == INVALID_REGNUM)
27031 break;
27032 }
27033
27034 p = rtvec_alloc (i);
27035
27036 for (i = 0; ; ++i)
27037 {
27038 unsigned int regno = EH_RETURN_DATA_REGNO (i);
27039 if (regno == INVALID_REGNUM)
27040 break;
27041
27042 rtx set
27043 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
27044 sp_reg_rtx,
27045 info->ehrd_offset + sp_off + reg_size * (int) i);
27046 RTVEC_ELT (p, i) = set;
27047 RTX_FRAME_RELATED_P (set) = 1;
27048 }
27049
27050 insn = emit_insn (gen_blockage ());
27051 RTX_FRAME_RELATED_P (insn) = 1;
27052 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
27053 }
27054
27055 /* In AIX ABI we need to make sure r2 is really saved. */
27056 if (TARGET_AIX && crtl->calls_eh_return)
27057 {
27058 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
27059 rtx join_insn, note;
27060 rtx_insn *save_insn;
27061 long toc_restore_insn;
27062
27063 tmp_reg = gen_rtx_REG (Pmode, 11);
27064 tmp_reg_si = gen_rtx_REG (SImode, 11);
27065 if (using_static_chain_p)
27066 {
27067 START_USE (0);
27068 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
27069 }
27070 else
27071 START_USE (11);
27072 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
27073 /* Peek at instruction to which this function returns. If it's
27074 restoring r2, then we know we've already saved r2. We can't
27075 unconditionally save r2 because the value we have will already
27076 be updated if we arrived at this function via a plt call or
27077 toc adjusting stub. */
27078 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
27079 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
27080 + RS6000_TOC_SAVE_SLOT);
27081 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
27082 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
27083 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
27084 validate_condition_mode (EQ, CCUNSmode);
27085 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
27086 emit_insn (gen_rtx_SET (compare_result,
27087 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
27088 toc_save_done = gen_label_rtx ();
27089 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
27090 gen_rtx_EQ (VOIDmode, compare_result,
27091 const0_rtx),
27092 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
27093 pc_rtx);
27094 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
27095 JUMP_LABEL (jump) = toc_save_done;
27096 LABEL_NUSES (toc_save_done) += 1;
27097
27098 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
27099 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
27100 sp_off - frame_off);
27101
27102 emit_label (toc_save_done);
27103
27104 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
27105 have a CFG that has different saves along different paths.
27106 Move the note to a dummy blockage insn, which describes that
27107 R2 is unconditionally saved after the label. */
27108 /* ??? An alternate representation might be a special insn pattern
27109 containing both the branch and the store. That might let the
27110 code that minimizes the number of DW_CFA_advance opcodes better
27111 freedom in placing the annotations. */
27112 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
27113 if (note)
27114 remove_note (save_insn, note);
27115 else
27116 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
27117 copy_rtx (PATTERN (save_insn)), NULL_RTX);
27118 RTX_FRAME_RELATED_P (save_insn) = 0;
27119
27120 join_insn = emit_insn (gen_blockage ());
27121 REG_NOTES (join_insn) = note;
27122 RTX_FRAME_RELATED_P (join_insn) = 1;
27123
27124 if (using_static_chain_p)
27125 {
27126 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
27127 END_USE (0);
27128 }
27129 else
27130 END_USE (11);
27131 }
27132
27133 /* Save CR if we use any that must be preserved. */
27134 if (!WORLD_SAVE_P (info) && info->cr_save_p)
27135 {
27136 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
27137 GEN_INT (info->cr_save_offset + frame_off));
27138 rtx mem = gen_frame_mem (SImode, addr);
27139
27140 /* If we didn't copy cr before, do so now using r0. */
27141 if (cr_save_rtx == NULL_RTX)
27142 {
27143 START_USE (0);
27144 cr_save_rtx = gen_rtx_REG (SImode, 0);
27145 rs6000_emit_prologue_move_from_cr (cr_save_rtx);
27146 }
27147
27148 /* Saving CR requires a two-instruction sequence: one instruction
27149 to move the CR to a general-purpose register, and a second
27150 instruction that stores the GPR to memory.
27151
27152 We do not emit any DWARF CFI records for the first of these,
27153 because we cannot properly represent the fact that CR is saved in
27154 a register. One reason is that we cannot express that multiple
27155 CR fields are saved; another reason is that on 64-bit, the size
27156 of the CR register in DWARF (4 bytes) differs from the size of
27157 a general-purpose register.
27158
27159 This means if any intervening instruction were to clobber one of
27160 the call-saved CR fields, we'd have incorrect CFI. To prevent
27161 this from happening, we mark the store to memory as a use of
27162 those CR fields, which prevents any such instruction from being
27163 scheduled in between the two instructions. */
27164 rtx crsave_v[9];
27165 int n_crsave = 0;
27166 int i;
27167
27168 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
27169 for (i = 0; i < 8; i++)
27170 if (save_reg_p (CR0_REGNO + i))
27171 crsave_v[n_crsave++]
27172 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27173
27174 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
27175 gen_rtvec_v (n_crsave, crsave_v)));
27176 END_USE (REGNO (cr_save_rtx));
27177
27178 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
27179 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
27180 so we need to construct a frame expression manually. */
27181 RTX_FRAME_RELATED_P (insn) = 1;
27182
27183 /* Update address to be stack-pointer relative, like
27184 rs6000_frame_related would do. */
27185 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27186 GEN_INT (info->cr_save_offset + sp_off));
27187 mem = gen_frame_mem (SImode, addr);
27188
27189 if (DEFAULT_ABI == ABI_ELFv2)
27190 {
27191 /* In the ELFv2 ABI we generate separate CFI records for each
27192 CR field that was actually saved. They all point to the
27193 same 32-bit stack slot. */
27194 rtx crframe[8];
27195 int n_crframe = 0;
27196
27197 for (i = 0; i < 8; i++)
27198 if (save_reg_p (CR0_REGNO + i))
27199 {
27200 crframe[n_crframe]
27201 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
27202
27203 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
27204 n_crframe++;
27205 }
27206
27207 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27208 gen_rtx_PARALLEL (VOIDmode,
27209 gen_rtvec_v (n_crframe, crframe)));
27210 }
27211 else
27212 {
27213 /* In other ABIs, by convention, we use a single CR regnum to
27214 represent the fact that all call-saved CR fields are saved.
27215 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
27216 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
27217 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
27218 }
27219 }
27220
27221 /* In the ELFv2 ABI we need to save all call-saved CR fields into
27222 *separate* slots if the routine calls __builtin_eh_return, so
27223 that they can be independently restored by the unwinder. */
27224 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
27225 {
27226 int i, cr_off = info->ehcr_offset;
27227 rtx crsave;
27228
27229 /* ??? We might get better performance by using multiple mfocrf
27230 instructions. */
27231 crsave = gen_rtx_REG (SImode, 0);
27232 emit_insn (gen_prologue_movesi_from_cr (crsave));
27233
27234 for (i = 0; i < 8; i++)
27235 if (!call_used_regs[CR0_REGNO + i])
27236 {
27237 rtvec p = rtvec_alloc (2);
27238 RTVEC_ELT (p, 0)
27239 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
27240 RTVEC_ELT (p, 1)
27241 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
27242
27243 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27244
27245 RTX_FRAME_RELATED_P (insn) = 1;
27246 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27247 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
27248 sp_reg_rtx, cr_off + sp_off));
27249
27250 cr_off += reg_size;
27251 }
27252 }
27253
27254 /* If we are emitting stack probes, but allocate no stack, then
27255 just note that in the dump file. */
27256 if (flag_stack_clash_protection
27257 && dump_file
27258 && !info->push_p)
27259 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
27260
27261 /* Update stack and set back pointer unless this is V.4,
27262 for which it was done previously. */
27263 if (!WORLD_SAVE_P (info) && info->push_p
27264 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
27265 {
27266 rtx ptr_reg = NULL;
27267 int ptr_off = 0;
27268
27269 /* If saving altivec regs we need to be able to address all save
27270 locations using a 16-bit offset. */
27271 if ((strategy & SAVE_INLINE_VRS) == 0
27272 || (info->altivec_size != 0
27273 && (info->altivec_save_offset + info->altivec_size - 16
27274 + info->total_size - frame_off) > 32767)
27275 || (info->vrsave_size != 0
27276 && (info->vrsave_save_offset
27277 + info->total_size - frame_off) > 32767))
27278 {
27279 int sel = SAVRES_SAVE | SAVRES_VR;
27280 unsigned ptr_regno = ptr_regno_for_savres (sel);
27281
27282 if (using_static_chain_p
27283 && ptr_regno == STATIC_CHAIN_REGNUM)
27284 ptr_regno = 12;
27285 if (REGNO (frame_reg_rtx) != ptr_regno)
27286 START_USE (ptr_regno);
27287 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
27288 frame_reg_rtx = ptr_reg;
27289 ptr_off = info->altivec_save_offset + info->altivec_size;
27290 frame_off = -ptr_off;
27291 }
27292 else if (REGNO (frame_reg_rtx) == 1)
27293 frame_off = info->total_size;
27294 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
27295 ptr_reg, ptr_off);
27296 if (REGNO (frame_reg_rtx) == 12)
27297 sp_adjust = 0;
27298 sp_off = info->total_size;
27299 if (frame_reg_rtx != sp_reg_rtx)
27300 rs6000_emit_stack_tie (frame_reg_rtx, false);
27301 }
27302
27303 /* Set frame pointer, if needed. */
27304 if (frame_pointer_needed)
27305 {
27306 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
27307 sp_reg_rtx);
27308 RTX_FRAME_RELATED_P (insn) = 1;
27309 }
27310
27311 /* Save AltiVec registers if needed. Save here because the red zone does
27312 not always include AltiVec registers. */
27313 if (!WORLD_SAVE_P (info)
27314 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
27315 {
27316 int end_save = info->altivec_save_offset + info->altivec_size;
27317 int ptr_off;
27318 /* Oddly, the vector save/restore functions point r0 at the end
27319 of the save area, then use r11 or r12 to load offsets for
27320 [reg+reg] addressing. */
27321 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
27322 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
27323 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
27324
27325 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
27326 NOT_INUSE (0);
27327 if (scratch_regno == 12)
27328 sp_adjust = 0;
27329 if (end_save + frame_off != 0)
27330 {
27331 rtx offset = GEN_INT (end_save + frame_off);
27332
27333 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
27334 }
27335 else
27336 emit_move_insn (ptr_reg, frame_reg_rtx);
27337
27338 ptr_off = -end_save;
27339 insn = rs6000_emit_savres_rtx (info, scratch_reg,
27340 info->altivec_save_offset + ptr_off,
27341 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
27342 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
27343 NULL_RTX, NULL_RTX);
27344 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
27345 {
27346 /* The oddity mentioned above clobbered our frame reg. */
27347 emit_move_insn (frame_reg_rtx, ptr_reg);
27348 frame_off = ptr_off;
27349 }
27350 }
27351 else if (!WORLD_SAVE_P (info)
27352 && info->altivec_size != 0)
27353 {
27354 int i;
27355
27356 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
27357 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27358 {
27359 rtx areg, savereg, mem;
27360 HOST_WIDE_INT offset;
27361
27362 offset = (info->altivec_save_offset + frame_off
27363 + 16 * (i - info->first_altivec_reg_save));
27364
27365 savereg = gen_rtx_REG (V4SImode, i);
27366
27367 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
27368 {
27369 mem = gen_frame_mem (V4SImode,
27370 gen_rtx_PLUS (Pmode, frame_reg_rtx,
27371 GEN_INT (offset)));
27372 insn = emit_insn (gen_rtx_SET (mem, savereg));
27373 areg = NULL_RTX;
27374 }
27375 else
27376 {
27377 NOT_INUSE (0);
27378 areg = gen_rtx_REG (Pmode, 0);
27379 emit_move_insn (areg, GEN_INT (offset));
27380
27381 /* AltiVec addressing mode is [reg+reg]. */
27382 mem = gen_frame_mem (V4SImode,
27383 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
27384
27385 /* Rather than emitting a generic move, force use of the stvx
27386 instruction, which we always want on ISA 2.07 (power8) systems.
27387 In particular we don't want xxpermdi/stxvd2x for little
27388 endian. */
27389 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
27390 }
27391
27392 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
27393 areg, GEN_INT (offset));
27394 }
27395 }
27396
27397 /* VRSAVE is a bit vector representing which AltiVec registers
27398 are used. The OS uses this to determine which vector
27399 registers to save on a context switch. We need to save
27400 VRSAVE on the stack frame, add whatever AltiVec registers we
27401 used in this function, and do the corresponding magic in the
27402 epilogue. */
27403
27404 if (!WORLD_SAVE_P (info) && info->vrsave_size != 0)
27405 {
27406 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27407 be using r12 as frame_reg_rtx and r11 as the static chain
27408 pointer for nested functions. */
27409 int save_regno = 12;
27410 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27411 && !using_static_chain_p)
27412 save_regno = 11;
27413 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
27414 {
27415 save_regno = 11;
27416 if (using_static_chain_p)
27417 save_regno = 0;
27418 }
27419 NOT_INUSE (save_regno);
27420
27421 emit_vrsave_prologue (info, save_regno, frame_off, frame_reg_rtx);
27422 }
27423
27424 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27425 if (!TARGET_SINGLE_PIC_BASE
27426 && ((TARGET_TOC && TARGET_MINIMAL_TOC
27427 && !constant_pool_empty_p ())
27428 || (DEFAULT_ABI == ABI_V4
27429 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
27430 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
27431 {
27432 /* If emit_load_toc_table will use the link register, we need to save
27433 it. We use R12 for this purpose because emit_load_toc_table
27434 can use register 0. This allows us to use a plain 'blr' to return
27435 from the procedure more often. */
27436 int save_LR_around_toc_setup = (TARGET_ELF
27437 && DEFAULT_ABI == ABI_V4
27438 && flag_pic
27439 && ! info->lr_save_p
27440 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
27441 if (save_LR_around_toc_setup)
27442 {
27443 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27444 rtx tmp = gen_rtx_REG (Pmode, 12);
27445
27446 sp_adjust = 0;
27447 insn = emit_move_insn (tmp, lr);
27448 RTX_FRAME_RELATED_P (insn) = 1;
27449
27450 rs6000_emit_load_toc_table (TRUE);
27451
27452 insn = emit_move_insn (lr, tmp);
27453 add_reg_note (insn, REG_CFA_RESTORE, lr);
27454 RTX_FRAME_RELATED_P (insn) = 1;
27455 }
27456 else
27457 rs6000_emit_load_toc_table (TRUE);
27458 }
27459
27460 #if TARGET_MACHO
27461 if (!TARGET_SINGLE_PIC_BASE
27462 && DEFAULT_ABI == ABI_DARWIN
27463 && flag_pic && crtl->uses_pic_offset_table)
27464 {
27465 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27466 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
27467
27468 /* Save and restore LR locally around this call (in R0). */
27469 if (!info->lr_save_p)
27470 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
27471
27472 emit_insn (gen_load_macho_picbase (src));
27473
27474 emit_move_insn (gen_rtx_REG (Pmode,
27475 RS6000_PIC_OFFSET_TABLE_REGNUM),
27476 lr);
27477
27478 if (!info->lr_save_p)
27479 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
27480 }
27481 #endif
27482
27483 /* If we need to, save the TOC register after doing the stack setup.
27484 Do not emit eh frame info for this save. The unwinder wants info,
27485 conceptually attached to instructions in this function, about
27486 register values in the caller of this function. This R2 may have
27487 already been changed from the value in the caller.
27488 We don't attempt to write accurate DWARF EH frame info for R2
27489 because code emitted by gcc for a (non-pointer) function call
27490 doesn't save and restore R2. Instead, R2 is managed out-of-line
27491 by a linker generated plt call stub when the function resides in
27492 a shared library. This behavior is costly to describe in DWARF,
27493 both in terms of the size of DWARF info and the time taken in the
27494 unwinder to interpret it. R2 changes, apart from the
27495 calls_eh_return case earlier in this function, are handled by
27496 linux-unwind.h frob_update_context. */
27497 if (rs6000_save_toc_in_prologue_p ()
27498 && !cfun->machine->toc_is_wrapped_separately)
27499 {
27500 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
27501 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
27502 }
27503
27504 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27505 if (using_split_stack && split_stack_arg_pointer_used_p ())
27506 emit_split_stack_prologue (info, sp_adjust, frame_off, frame_reg_rtx);
27507 }
27508
27509 /* Output .extern statements for the save/restore routines we use. */
27510
27511 static void
27512 rs6000_output_savres_externs (FILE *file)
27513 {
27514 rs6000_stack_t *info = rs6000_stack_info ();
27515
27516 if (TARGET_DEBUG_STACK)
27517 debug_stack_info (info);
27518
27519 /* Write .extern for any function we will call to save and restore
27520 fp values. */
27521 if (info->first_fp_reg_save < 64
27522 && !TARGET_MACHO
27523 && !TARGET_ELF)
27524 {
27525 char *name;
27526 int regno = info->first_fp_reg_save - 32;
27527
27528 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
27529 {
27530 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
27531 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
27532 name = rs6000_savres_routine_name (regno, sel);
27533 fprintf (file, "\t.extern %s\n", name);
27534 }
27535 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
27536 {
27537 bool lr = (info->savres_strategy
27538 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
27539 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
27540 name = rs6000_savres_routine_name (regno, sel);
27541 fprintf (file, "\t.extern %s\n", name);
27542 }
27543 }
27544 }
27545
27546 /* Write function prologue. */
27547
27548 static void
27549 rs6000_output_function_prologue (FILE *file)
27550 {
27551 if (!cfun->is_thunk)
27552 rs6000_output_savres_externs (file);
27553
27554 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27555 immediately after the global entry point label. */
27556 if (rs6000_global_entry_point_needed_p ())
27557 {
27558 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
27559
27560 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
27561
27562 if (TARGET_CMODEL != CMODEL_LARGE)
27563 {
27564 /* In the small and medium code models, we assume the TOC is less
27565 2 GB away from the text section, so it can be computed via the
27566 following two-instruction sequence. */
27567 char buf[256];
27568
27569 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27570 fprintf (file, "0:\taddis 2,12,.TOC.-");
27571 assemble_name (file, buf);
27572 fprintf (file, "@ha\n");
27573 fprintf (file, "\taddi 2,2,.TOC.-");
27574 assemble_name (file, buf);
27575 fprintf (file, "@l\n");
27576 }
27577 else
27578 {
27579 /* In the large code model, we allow arbitrary offsets between the
27580 TOC and the text section, so we have to load the offset from
27581 memory. The data field is emitted directly before the global
27582 entry point in rs6000_elf_declare_function_name. */
27583 char buf[256];
27584
27585 #ifdef HAVE_AS_ENTRY_MARKERS
27586 /* If supported by the linker, emit a marker relocation. If the
27587 total code size of the final executable or shared library
27588 happens to fit into 2 GB after all, the linker will replace
27589 this code sequence with the sequence for the small or medium
27590 code model. */
27591 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
27592 #endif
27593 fprintf (file, "\tld 2,");
27594 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27595 assemble_name (file, buf);
27596 fprintf (file, "-");
27597 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27598 assemble_name (file, buf);
27599 fprintf (file, "(12)\n");
27600 fprintf (file, "\tadd 2,2,12\n");
27601 }
27602
27603 fputs ("\t.localentry\t", file);
27604 assemble_name (file, name);
27605 fputs (",.-", file);
27606 assemble_name (file, name);
27607 fputs ("\n", file);
27608 }
27609
27610 /* Output -mprofile-kernel code. This needs to be done here instead of
27611 in output_function_profile since it must go after the ELFv2 ABI
27612 local entry point. */
27613 if (TARGET_PROFILE_KERNEL && crtl->profile)
27614 {
27615 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27616 gcc_assert (!TARGET_32BIT);
27617
27618 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
27619
27620 /* In the ELFv2 ABI we have no compiler stack word. It must be
27621 the resposibility of _mcount to preserve the static chain
27622 register if required. */
27623 if (DEFAULT_ABI != ABI_ELFv2
27624 && cfun->static_chain_decl != NULL)
27625 {
27626 asm_fprintf (file, "\tstd %s,24(%s)\n",
27627 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27628 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27629 asm_fprintf (file, "\tld %s,24(%s)\n",
27630 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
27631 }
27632 else
27633 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
27634 }
27635
27636 rs6000_pic_labelno++;
27637 }
27638
27639 /* -mprofile-kernel code calls mcount before the function prolog,
27640 so a profiled leaf function should stay a leaf function. */
27641 static bool
27642 rs6000_keep_leaf_when_profiled ()
27643 {
27644 return TARGET_PROFILE_KERNEL;
27645 }
27646
27647 /* Non-zero if vmx regs are restored before the frame pop, zero if
27648 we restore after the pop when possible. */
27649 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27650
27651 /* Restoring cr is a two step process: loading a reg from the frame
27652 save, then moving the reg to cr. For ABI_V4 we must let the
27653 unwinder know that the stack location is no longer valid at or
27654 before the stack deallocation, but we can't emit a cfa_restore for
27655 cr at the stack deallocation like we do for other registers.
27656 The trouble is that it is possible for the move to cr to be
27657 scheduled after the stack deallocation. So say exactly where cr
27658 is located on each of the two insns. */
27659
27660 static rtx
27661 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
27662 {
27663 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
27664 rtx reg = gen_rtx_REG (SImode, regno);
27665 rtx_insn *insn = emit_move_insn (reg, mem);
27666
27667 if (!exit_func && DEFAULT_ABI == ABI_V4)
27668 {
27669 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27670 rtx set = gen_rtx_SET (reg, cr);
27671
27672 add_reg_note (insn, REG_CFA_REGISTER, set);
27673 RTX_FRAME_RELATED_P (insn) = 1;
27674 }
27675 return reg;
27676 }
27677
27678 /* Reload CR from REG. */
27679
27680 static void
27681 restore_saved_cr (rtx reg, bool using_mfcr_multiple, bool exit_func)
27682 {
27683 int count = 0;
27684 int i;
27685
27686 if (using_mfcr_multiple)
27687 {
27688 for (i = 0; i < 8; i++)
27689 if (save_reg_p (CR0_REGNO + i))
27690 count++;
27691 gcc_assert (count);
27692 }
27693
27694 if (using_mfcr_multiple && count > 1)
27695 {
27696 rtx_insn *insn;
27697 rtvec p;
27698 int ndx;
27699
27700 p = rtvec_alloc (count);
27701
27702 ndx = 0;
27703 for (i = 0; i < 8; i++)
27704 if (save_reg_p (CR0_REGNO + i))
27705 {
27706 rtvec r = rtvec_alloc (2);
27707 RTVEC_ELT (r, 0) = reg;
27708 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
27709 RTVEC_ELT (p, ndx) =
27710 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
27711 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
27712 ndx++;
27713 }
27714 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
27715 gcc_assert (ndx == count);
27716
27717 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27718 CR field separately. */
27719 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27720 {
27721 for (i = 0; i < 8; i++)
27722 if (save_reg_p (CR0_REGNO + i))
27723 add_reg_note (insn, REG_CFA_RESTORE,
27724 gen_rtx_REG (SImode, CR0_REGNO + i));
27725
27726 RTX_FRAME_RELATED_P (insn) = 1;
27727 }
27728 }
27729 else
27730 for (i = 0; i < 8; i++)
27731 if (save_reg_p (CR0_REGNO + i))
27732 {
27733 rtx insn = emit_insn (gen_movsi_to_cr_one
27734 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
27735
27736 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27737 CR field separately, attached to the insn that in fact
27738 restores this particular CR field. */
27739 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
27740 {
27741 add_reg_note (insn, REG_CFA_RESTORE,
27742 gen_rtx_REG (SImode, CR0_REGNO + i));
27743
27744 RTX_FRAME_RELATED_P (insn) = 1;
27745 }
27746 }
27747
27748 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27749 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
27750 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
27751 {
27752 rtx_insn *insn = get_last_insn ();
27753 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
27754
27755 add_reg_note (insn, REG_CFA_RESTORE, cr);
27756 RTX_FRAME_RELATED_P (insn) = 1;
27757 }
27758 }
27759
27760 /* Like cr, the move to lr instruction can be scheduled after the
27761 stack deallocation, but unlike cr, its stack frame save is still
27762 valid. So we only need to emit the cfa_restore on the correct
27763 instruction. */
27764
27765 static void
27766 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
27767 {
27768 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
27769 rtx reg = gen_rtx_REG (Pmode, regno);
27770
27771 emit_move_insn (reg, mem);
27772 }
27773
27774 static void
27775 restore_saved_lr (int regno, bool exit_func)
27776 {
27777 rtx reg = gen_rtx_REG (Pmode, regno);
27778 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
27779 rtx_insn *insn = emit_move_insn (lr, reg);
27780
27781 if (!exit_func && flag_shrink_wrap)
27782 {
27783 add_reg_note (insn, REG_CFA_RESTORE, lr);
27784 RTX_FRAME_RELATED_P (insn) = 1;
27785 }
27786 }
27787
27788 static rtx
27789 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
27790 {
27791 if (DEFAULT_ABI == ABI_ELFv2)
27792 {
27793 int i;
27794 for (i = 0; i < 8; i++)
27795 if (save_reg_p (CR0_REGNO + i))
27796 {
27797 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
27798 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
27799 cfa_restores);
27800 }
27801 }
27802 else if (info->cr_save_p)
27803 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27804 gen_rtx_REG (SImode, CR2_REGNO),
27805 cfa_restores);
27806
27807 if (info->lr_save_p)
27808 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27809 gen_rtx_REG (Pmode, LR_REGNO),
27810 cfa_restores);
27811 return cfa_restores;
27812 }
27813
27814 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27815 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27816 below stack pointer not cloberred by signals. */
27817
27818 static inline bool
27819 offset_below_red_zone_p (HOST_WIDE_INT offset)
27820 {
27821 return offset < (DEFAULT_ABI == ABI_V4
27822 ? 0
27823 : TARGET_32BIT ? -220 : -288);
27824 }
27825
27826 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27827
27828 static void
27829 emit_cfa_restores (rtx cfa_restores)
27830 {
27831 rtx_insn *insn = get_last_insn ();
27832 rtx *loc = &REG_NOTES (insn);
27833
27834 while (*loc)
27835 loc = &XEXP (*loc, 1);
27836 *loc = cfa_restores;
27837 RTX_FRAME_RELATED_P (insn) = 1;
27838 }
27839
27840 /* Emit function epilogue as insns. */
27841
27842 void
27843 rs6000_emit_epilogue (enum epilogue_type epilogue_type)
27844 {
27845 HOST_WIDE_INT frame_off = 0;
27846 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
27847 rtx frame_reg_rtx = sp_reg_rtx;
27848 rtx cfa_restores = NULL_RTX;
27849 rtx insn;
27850 rtx cr_save_reg = NULL_RTX;
27851 machine_mode reg_mode = Pmode;
27852 int reg_size = TARGET_32BIT ? 4 : 8;
27853 machine_mode fp_reg_mode = TARGET_HARD_FLOAT ? DFmode : SFmode;
27854 int fp_reg_size = 8;
27855 int i;
27856 unsigned ptr_regno;
27857
27858 rs6000_stack_t *info = rs6000_stack_info ();
27859
27860 if (epilogue_type == EPILOGUE_TYPE_NORMAL && crtl->calls_eh_return)
27861 epilogue_type = EPILOGUE_TYPE_EH_RETURN;
27862
27863 int strategy = info->savres_strategy;
27864 bool using_load_multiple = !!(strategy & REST_MULTIPLE);
27865 bool restoring_GPRs_inline = !!(strategy & REST_INLINE_GPRS);
27866 bool restoring_FPRs_inline = !!(strategy & REST_INLINE_FPRS);
27867 if (epilogue_type == EPILOGUE_TYPE_SIBCALL)
27868 {
27869 restoring_GPRs_inline = true;
27870 restoring_FPRs_inline = true;
27871 }
27872
27873 bool using_mtcr_multiple = (rs6000_tune == PROCESSOR_PPC601
27874 || rs6000_tune == PROCESSOR_PPC603
27875 || rs6000_tune == PROCESSOR_PPC750
27876 || optimize_size);
27877
27878 /* Restore via the backchain when we have a large frame, since this
27879 is more efficient than an addis, addi pair. The second condition
27880 here will not trigger at the moment; We don't actually need a
27881 frame pointer for alloca, but the generic parts of the compiler
27882 give us one anyway. */
27883 bool use_backchain_to_restore_sp
27884 = (info->total_size + (info->lr_save_p ? info->lr_save_offset : 0) > 32767
27885 || (cfun->calls_alloca && !frame_pointer_needed));
27886
27887 bool restore_lr = (info->lr_save_p
27888 && (restoring_FPRs_inline
27889 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
27890 && (restoring_GPRs_inline
27891 || info->first_fp_reg_save < 64)
27892 && !cfun->machine->lr_is_wrapped_separately);
27893
27894
27895 if (WORLD_SAVE_P (info))
27896 {
27897 gcc_assert (epilogue_type != EPILOGUE_TYPE_SIBCALL);
27898
27899 /* eh_rest_world_r10 will return to the location saved in the LR
27900 stack slot (which is not likely to be our caller.)
27901 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27902 rest_world is similar, except any R10 parameter is ignored.
27903 The exception-handling stuff that was here in 2.95 is no
27904 longer necessary. */
27905
27906 rtvec p;
27907 p = rtvec_alloc (9
27908 + 32 - info->first_gp_reg_save
27909 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
27910 + 63 + 1 - info->first_fp_reg_save);
27911
27912 const char *rname;
27913 switch (epilogue_type)
27914 {
27915 case EPILOGUE_TYPE_NORMAL:
27916 rname = ggc_strdup ("*rest_world");
27917 break;
27918
27919 case EPILOGUE_TYPE_EH_RETURN:
27920 rname = ggc_strdup ("*eh_rest_world_r10");
27921 break;
27922
27923 default:
27924 gcc_unreachable ();
27925 }
27926
27927 int j = 0;
27928 RTVEC_ELT (p, j++) = ret_rtx;
27929 RTVEC_ELT (p, j++)
27930 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, rname));
27931 /* The instruction pattern requires a clobber here;
27932 it is shared with the restVEC helper. */
27933 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 11);
27934
27935 {
27936 /* CR register traditionally saved as CR2. */
27937 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
27938 RTVEC_ELT (p, j++)
27939 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
27940 if (flag_shrink_wrap)
27941 {
27942 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
27943 gen_rtx_REG (Pmode, LR_REGNO),
27944 cfa_restores);
27945 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27946 }
27947 }
27948
27949 int i;
27950 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
27951 {
27952 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
27953 RTVEC_ELT (p, j++)
27954 = gen_frame_load (reg,
27955 frame_reg_rtx, info->gp_save_offset + reg_size * i);
27956 if (flag_shrink_wrap
27957 && save_reg_p (info->first_gp_reg_save + i))
27958 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27959 }
27960 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
27961 {
27962 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
27963 RTVEC_ELT (p, j++)
27964 = gen_frame_load (reg,
27965 frame_reg_rtx, info->altivec_save_offset + 16 * i);
27966 if (flag_shrink_wrap
27967 && save_reg_p (info->first_altivec_reg_save + i))
27968 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27969 }
27970 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
27971 {
27972 rtx reg = gen_rtx_REG (TARGET_HARD_FLOAT ? DFmode : SFmode,
27973 info->first_fp_reg_save + i);
27974 RTVEC_ELT (p, j++)
27975 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
27976 if (flag_shrink_wrap
27977 && save_reg_p (info->first_fp_reg_save + i))
27978 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
27979 }
27980 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (Pmode, 0);
27981 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 12);
27982 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 7);
27983 RTVEC_ELT (p, j++) = gen_hard_reg_clobber (SImode, 8);
27984 RTVEC_ELT (p, j++)
27985 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
27986 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
27987
27988 if (flag_shrink_wrap)
27989 {
27990 REG_NOTES (insn) = cfa_restores;
27991 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
27992 RTX_FRAME_RELATED_P (insn) = 1;
27993 }
27994 return;
27995 }
27996
27997 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27998 if (info->push_p)
27999 frame_off = info->total_size;
28000
28001 /* Restore AltiVec registers if we must do so before adjusting the
28002 stack. */
28003 if (info->altivec_size != 0
28004 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28005 || (DEFAULT_ABI != ABI_V4
28006 && offset_below_red_zone_p (info->altivec_save_offset))))
28007 {
28008 int i;
28009 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28010
28011 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
28012 if (use_backchain_to_restore_sp)
28013 {
28014 int frame_regno = 11;
28015
28016 if ((strategy & REST_INLINE_VRS) == 0)
28017 {
28018 /* Of r11 and r12, select the one not clobbered by an
28019 out-of-line restore function for the frame register. */
28020 frame_regno = 11 + 12 - scratch_regno;
28021 }
28022 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
28023 emit_move_insn (frame_reg_rtx,
28024 gen_rtx_MEM (Pmode, sp_reg_rtx));
28025 frame_off = 0;
28026 }
28027 else if (frame_pointer_needed)
28028 frame_reg_rtx = hard_frame_pointer_rtx;
28029
28030 if ((strategy & REST_INLINE_VRS) == 0)
28031 {
28032 int end_save = info->altivec_save_offset + info->altivec_size;
28033 int ptr_off;
28034 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28035 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28036
28037 if (end_save + frame_off != 0)
28038 {
28039 rtx offset = GEN_INT (end_save + frame_off);
28040
28041 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28042 }
28043 else
28044 emit_move_insn (ptr_reg, frame_reg_rtx);
28045
28046 ptr_off = -end_save;
28047 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28048 info->altivec_save_offset + ptr_off,
28049 0, V4SImode, SAVRES_VR);
28050 }
28051 else
28052 {
28053 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28054 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28055 {
28056 rtx addr, areg, mem, insn;
28057 rtx reg = gen_rtx_REG (V4SImode, i);
28058 HOST_WIDE_INT offset
28059 = (info->altivec_save_offset + frame_off
28060 + 16 * (i - info->first_altivec_reg_save));
28061
28062 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28063 {
28064 mem = gen_frame_mem (V4SImode,
28065 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28066 GEN_INT (offset)));
28067 insn = gen_rtx_SET (reg, mem);
28068 }
28069 else
28070 {
28071 areg = gen_rtx_REG (Pmode, 0);
28072 emit_move_insn (areg, GEN_INT (offset));
28073
28074 /* AltiVec addressing mode is [reg+reg]. */
28075 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28076 mem = gen_frame_mem (V4SImode, addr);
28077
28078 /* Rather than emitting a generic move, force use of the
28079 lvx instruction, which we always want. In particular we
28080 don't want lxvd2x/xxpermdi for little endian. */
28081 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28082 }
28083
28084 (void) emit_insn (insn);
28085 }
28086 }
28087
28088 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28089 if (((strategy & REST_INLINE_VRS) == 0
28090 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28091 && (flag_shrink_wrap
28092 || (offset_below_red_zone_p
28093 (info->altivec_save_offset
28094 + 16 * (i - info->first_altivec_reg_save))))
28095 && save_reg_p (i))
28096 {
28097 rtx reg = gen_rtx_REG (V4SImode, i);
28098 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28099 }
28100 }
28101
28102 /* Restore VRSAVE if we must do so before adjusting the stack. */
28103 if (info->vrsave_size != 0
28104 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28105 || (DEFAULT_ABI != ABI_V4
28106 && offset_below_red_zone_p (info->vrsave_save_offset))))
28107 {
28108 rtx reg;
28109
28110 if (frame_reg_rtx == sp_reg_rtx)
28111 {
28112 if (use_backchain_to_restore_sp)
28113 {
28114 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28115 emit_move_insn (frame_reg_rtx,
28116 gen_rtx_MEM (Pmode, sp_reg_rtx));
28117 frame_off = 0;
28118 }
28119 else if (frame_pointer_needed)
28120 frame_reg_rtx = hard_frame_pointer_rtx;
28121 }
28122
28123 reg = gen_rtx_REG (SImode, 12);
28124 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28125 info->vrsave_save_offset + frame_off));
28126
28127 emit_insn (generate_set_vrsave (reg, info, 1));
28128 }
28129
28130 insn = NULL_RTX;
28131 /* If we have a large stack frame, restore the old stack pointer
28132 using the backchain. */
28133 if (use_backchain_to_restore_sp)
28134 {
28135 if (frame_reg_rtx == sp_reg_rtx)
28136 {
28137 /* Under V.4, don't reset the stack pointer until after we're done
28138 loading the saved registers. */
28139 if (DEFAULT_ABI == ABI_V4)
28140 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28141
28142 insn = emit_move_insn (frame_reg_rtx,
28143 gen_rtx_MEM (Pmode, sp_reg_rtx));
28144 frame_off = 0;
28145 }
28146 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28147 && DEFAULT_ABI == ABI_V4)
28148 /* frame_reg_rtx has been set up by the altivec restore. */
28149 ;
28150 else
28151 {
28152 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
28153 frame_reg_rtx = sp_reg_rtx;
28154 }
28155 }
28156 /* If we have a frame pointer, we can restore the old stack pointer
28157 from it. */
28158 else if (frame_pointer_needed)
28159 {
28160 frame_reg_rtx = sp_reg_rtx;
28161 if (DEFAULT_ABI == ABI_V4)
28162 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
28163 /* Prevent reordering memory accesses against stack pointer restore. */
28164 else if (cfun->calls_alloca
28165 || offset_below_red_zone_p (-info->total_size))
28166 rs6000_emit_stack_tie (frame_reg_rtx, true);
28167
28168 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
28169 GEN_INT (info->total_size)));
28170 frame_off = 0;
28171 }
28172 else if (info->push_p
28173 && DEFAULT_ABI != ABI_V4
28174 && epilogue_type != EPILOGUE_TYPE_EH_RETURN)
28175 {
28176 /* Prevent reordering memory accesses against stack pointer restore. */
28177 if (cfun->calls_alloca
28178 || offset_below_red_zone_p (-info->total_size))
28179 rs6000_emit_stack_tie (frame_reg_rtx, false);
28180 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
28181 GEN_INT (info->total_size)));
28182 frame_off = 0;
28183 }
28184 if (insn && frame_reg_rtx == sp_reg_rtx)
28185 {
28186 if (cfa_restores)
28187 {
28188 REG_NOTES (insn) = cfa_restores;
28189 cfa_restores = NULL_RTX;
28190 }
28191 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28192 RTX_FRAME_RELATED_P (insn) = 1;
28193 }
28194
28195 /* Restore AltiVec registers if we have not done so already. */
28196 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28197 && info->altivec_size != 0
28198 && (DEFAULT_ABI == ABI_V4
28199 || !offset_below_red_zone_p (info->altivec_save_offset)))
28200 {
28201 int i;
28202
28203 if ((strategy & REST_INLINE_VRS) == 0)
28204 {
28205 int end_save = info->altivec_save_offset + info->altivec_size;
28206 int ptr_off;
28207 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
28208 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
28209 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
28210
28211 if (end_save + frame_off != 0)
28212 {
28213 rtx offset = GEN_INT (end_save + frame_off);
28214
28215 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
28216 }
28217 else
28218 emit_move_insn (ptr_reg, frame_reg_rtx);
28219
28220 ptr_off = -end_save;
28221 insn = rs6000_emit_savres_rtx (info, scratch_reg,
28222 info->altivec_save_offset + ptr_off,
28223 0, V4SImode, SAVRES_VR);
28224 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
28225 {
28226 /* Frame reg was clobbered by out-of-line save. Restore it
28227 from ptr_reg, and if we are calling out-of-line gpr or
28228 fpr restore set up the correct pointer and offset. */
28229 unsigned newptr_regno = 1;
28230 if (!restoring_GPRs_inline)
28231 {
28232 bool lr = info->gp_save_offset + info->gp_size == 0;
28233 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28234 newptr_regno = ptr_regno_for_savres (sel);
28235 end_save = info->gp_save_offset + info->gp_size;
28236 }
28237 else if (!restoring_FPRs_inline)
28238 {
28239 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
28240 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28241 newptr_regno = ptr_regno_for_savres (sel);
28242 end_save = info->fp_save_offset + info->fp_size;
28243 }
28244
28245 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
28246 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
28247
28248 if (end_save + ptr_off != 0)
28249 {
28250 rtx offset = GEN_INT (end_save + ptr_off);
28251
28252 frame_off = -end_save;
28253 if (TARGET_32BIT)
28254 emit_insn (gen_addsi3_carry (frame_reg_rtx,
28255 ptr_reg, offset));
28256 else
28257 emit_insn (gen_adddi3_carry (frame_reg_rtx,
28258 ptr_reg, offset));
28259 }
28260 else
28261 {
28262 frame_off = ptr_off;
28263 emit_move_insn (frame_reg_rtx, ptr_reg);
28264 }
28265 }
28266 }
28267 else
28268 {
28269 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28270 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
28271 {
28272 rtx addr, areg, mem, insn;
28273 rtx reg = gen_rtx_REG (V4SImode, i);
28274 HOST_WIDE_INT offset
28275 = (info->altivec_save_offset + frame_off
28276 + 16 * (i - info->first_altivec_reg_save));
28277
28278 if (TARGET_P9_VECTOR && quad_address_offset_p (offset))
28279 {
28280 mem = gen_frame_mem (V4SImode,
28281 gen_rtx_PLUS (Pmode, frame_reg_rtx,
28282 GEN_INT (offset)));
28283 insn = gen_rtx_SET (reg, mem);
28284 }
28285 else
28286 {
28287 areg = gen_rtx_REG (Pmode, 0);
28288 emit_move_insn (areg, GEN_INT (offset));
28289
28290 /* AltiVec addressing mode is [reg+reg]. */
28291 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
28292 mem = gen_frame_mem (V4SImode, addr);
28293
28294 /* Rather than emitting a generic move, force use of the
28295 lvx instruction, which we always want. In particular we
28296 don't want lxvd2x/xxpermdi for little endian. */
28297 insn = gen_altivec_lvx_v4si_internal (reg, mem);
28298 }
28299
28300 (void) emit_insn (insn);
28301 }
28302 }
28303
28304 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
28305 if (((strategy & REST_INLINE_VRS) == 0
28306 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
28307 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28308 && save_reg_p (i))
28309 {
28310 rtx reg = gen_rtx_REG (V4SImode, i);
28311 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28312 }
28313 }
28314
28315 /* Restore VRSAVE if we have not done so already. */
28316 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28317 && info->vrsave_size != 0
28318 && (DEFAULT_ABI == ABI_V4
28319 || !offset_below_red_zone_p (info->vrsave_save_offset)))
28320 {
28321 rtx reg;
28322
28323 reg = gen_rtx_REG (SImode, 12);
28324 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28325 info->vrsave_save_offset + frame_off));
28326
28327 emit_insn (generate_set_vrsave (reg, info, 1));
28328 }
28329
28330 /* If we exit by an out-of-line restore function on ABI_V4 then that
28331 function will deallocate the stack, so we don't need to worry
28332 about the unwinder restoring cr from an invalid stack frame
28333 location. */
28334 bool exit_func = (!restoring_FPRs_inline
28335 || (!restoring_GPRs_inline
28336 && info->first_fp_reg_save == 64));
28337
28338 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28339 *separate* slots if the routine calls __builtin_eh_return, so
28340 that they can be independently restored by the unwinder. */
28341 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
28342 {
28343 int i, cr_off = info->ehcr_offset;
28344
28345 for (i = 0; i < 8; i++)
28346 if (!call_used_regs[CR0_REGNO + i])
28347 {
28348 rtx reg = gen_rtx_REG (SImode, 0);
28349 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28350 cr_off + frame_off));
28351
28352 insn = emit_insn (gen_movsi_to_cr_one
28353 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
28354
28355 if (!exit_func && flag_shrink_wrap)
28356 {
28357 add_reg_note (insn, REG_CFA_RESTORE,
28358 gen_rtx_REG (SImode, CR0_REGNO + i));
28359
28360 RTX_FRAME_RELATED_P (insn) = 1;
28361 }
28362
28363 cr_off += reg_size;
28364 }
28365 }
28366
28367 /* Get the old lr if we saved it. If we are restoring registers
28368 out-of-line, then the out-of-line routines can do this for us. */
28369 if (restore_lr && restoring_GPRs_inline)
28370 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28371
28372 /* Get the old cr if we saved it. */
28373 if (info->cr_save_p)
28374 {
28375 unsigned cr_save_regno = 12;
28376
28377 if (!restoring_GPRs_inline)
28378 {
28379 /* Ensure we don't use the register used by the out-of-line
28380 gpr register restore below. */
28381 bool lr = info->gp_save_offset + info->gp_size == 0;
28382 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
28383 int gpr_ptr_regno = ptr_regno_for_savres (sel);
28384
28385 if (gpr_ptr_regno == 12)
28386 cr_save_regno = 11;
28387 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
28388 }
28389 else if (REGNO (frame_reg_rtx) == 12)
28390 cr_save_regno = 11;
28391
28392 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
28393 info->cr_save_offset + frame_off,
28394 exit_func);
28395 }
28396
28397 /* Set LR here to try to overlap restores below. */
28398 if (restore_lr && restoring_GPRs_inline)
28399 restore_saved_lr (0, exit_func);
28400
28401 /* Load exception handler data registers, if needed. */
28402 if (epilogue_type == EPILOGUE_TYPE_EH_RETURN)
28403 {
28404 unsigned int i, regno;
28405
28406 if (TARGET_AIX)
28407 {
28408 rtx reg = gen_rtx_REG (reg_mode, 2);
28409 emit_insn (gen_frame_load (reg, frame_reg_rtx,
28410 frame_off + RS6000_TOC_SAVE_SLOT));
28411 }
28412
28413 for (i = 0; ; ++i)
28414 {
28415 rtx mem;
28416
28417 regno = EH_RETURN_DATA_REGNO (i);
28418 if (regno == INVALID_REGNUM)
28419 break;
28420
28421 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
28422 info->ehrd_offset + frame_off
28423 + reg_size * (int) i);
28424
28425 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
28426 }
28427 }
28428
28429 /* Restore GPRs. This is done as a PARALLEL if we are using
28430 the load-multiple instructions. */
28431 if (!restoring_GPRs_inline)
28432 {
28433 /* We are jumping to an out-of-line function. */
28434 rtx ptr_reg;
28435 int end_save = info->gp_save_offset + info->gp_size;
28436 bool can_use_exit = end_save == 0;
28437 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
28438 int ptr_off;
28439
28440 /* Emit stack reset code if we need it. */
28441 ptr_regno = ptr_regno_for_savres (sel);
28442 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28443 if (can_use_exit)
28444 rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28445 else if (end_save + frame_off != 0)
28446 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
28447 GEN_INT (end_save + frame_off)));
28448 else if (REGNO (frame_reg_rtx) != ptr_regno)
28449 emit_move_insn (ptr_reg, frame_reg_rtx);
28450 if (REGNO (frame_reg_rtx) == ptr_regno)
28451 frame_off = -end_save;
28452
28453 if (can_use_exit && info->cr_save_p)
28454 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
28455
28456 ptr_off = -end_save;
28457 rs6000_emit_savres_rtx (info, ptr_reg,
28458 info->gp_save_offset + ptr_off,
28459 info->lr_save_offset + ptr_off,
28460 reg_mode, sel);
28461 }
28462 else if (using_load_multiple)
28463 {
28464 rtvec p;
28465 p = rtvec_alloc (32 - info->first_gp_reg_save);
28466 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28467 RTVEC_ELT (p, i)
28468 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28469 frame_reg_rtx,
28470 info->gp_save_offset + frame_off + reg_size * i);
28471 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28472 }
28473 else
28474 {
28475 int offset = info->gp_save_offset + frame_off;
28476 for (i = info->first_gp_reg_save; i < 32; i++)
28477 {
28478 if (save_reg_p (i)
28479 && !cfun->machine->gpr_is_wrapped_separately[i])
28480 {
28481 rtx reg = gen_rtx_REG (reg_mode, i);
28482 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28483 }
28484
28485 offset += reg_size;
28486 }
28487 }
28488
28489 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28490 {
28491 /* If the frame pointer was used then we can't delay emitting
28492 a REG_CFA_DEF_CFA note. This must happen on the insn that
28493 restores the frame pointer, r31. We may have already emitted
28494 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28495 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28496 be harmless if emitted. */
28497 if (frame_pointer_needed)
28498 {
28499 insn = get_last_insn ();
28500 add_reg_note (insn, REG_CFA_DEF_CFA,
28501 plus_constant (Pmode, frame_reg_rtx, frame_off));
28502 RTX_FRAME_RELATED_P (insn) = 1;
28503 }
28504
28505 /* Set up cfa_restores. We always need these when
28506 shrink-wrapping. If not shrink-wrapping then we only need
28507 the cfa_restore when the stack location is no longer valid.
28508 The cfa_restores must be emitted on or before the insn that
28509 invalidates the stack, and of course must not be emitted
28510 before the insn that actually does the restore. The latter
28511 is why it is a bad idea to emit the cfa_restores as a group
28512 on the last instruction here that actually does a restore:
28513 That insn may be reordered with respect to others doing
28514 restores. */
28515 if (flag_shrink_wrap
28516 && !restoring_GPRs_inline
28517 && info->first_fp_reg_save == 64)
28518 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28519
28520 for (i = info->first_gp_reg_save; i < 32; i++)
28521 if (save_reg_p (i)
28522 && !cfun->machine->gpr_is_wrapped_separately[i])
28523 {
28524 rtx reg = gen_rtx_REG (reg_mode, i);
28525 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28526 }
28527 }
28528
28529 if (!restoring_GPRs_inline
28530 && info->first_fp_reg_save == 64)
28531 {
28532 /* We are jumping to an out-of-line function. */
28533 if (cfa_restores)
28534 emit_cfa_restores (cfa_restores);
28535 return;
28536 }
28537
28538 if (restore_lr && !restoring_GPRs_inline)
28539 {
28540 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
28541 restore_saved_lr (0, exit_func);
28542 }
28543
28544 /* Restore fpr's if we need to do it without calling a function. */
28545 if (restoring_FPRs_inline)
28546 {
28547 int offset = info->fp_save_offset + frame_off;
28548 for (i = info->first_fp_reg_save; i < 64; i++)
28549 {
28550 if (save_reg_p (i)
28551 && !cfun->machine->fpr_is_wrapped_separately[i - 32])
28552 {
28553 rtx reg = gen_rtx_REG (fp_reg_mode, i);
28554 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
28555 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
28556 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
28557 cfa_restores);
28558 }
28559
28560 offset += fp_reg_size;
28561 }
28562 }
28563
28564 /* If we saved cr, restore it here. Just those that were used. */
28565 if (info->cr_save_p)
28566 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
28567
28568 /* If this is V.4, unwind the stack pointer after all of the loads
28569 have been done, or set up r11 if we are restoring fp out of line. */
28570 ptr_regno = 1;
28571 if (!restoring_FPRs_inline)
28572 {
28573 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28574 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
28575 ptr_regno = ptr_regno_for_savres (sel);
28576 }
28577
28578 insn = rs6000_emit_stack_reset (frame_reg_rtx, frame_off, ptr_regno);
28579 if (REGNO (frame_reg_rtx) == ptr_regno)
28580 frame_off = 0;
28581
28582 if (insn && restoring_FPRs_inline)
28583 {
28584 if (cfa_restores)
28585 {
28586 REG_NOTES (insn) = cfa_restores;
28587 cfa_restores = NULL_RTX;
28588 }
28589 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
28590 RTX_FRAME_RELATED_P (insn) = 1;
28591 }
28592
28593 if (epilogue_type == EPILOGUE_TYPE_EH_RETURN)
28594 {
28595 rtx sa = EH_RETURN_STACKADJ_RTX;
28596 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
28597 }
28598
28599 if (epilogue_type != EPILOGUE_TYPE_SIBCALL && restoring_FPRs_inline)
28600 {
28601 if (cfa_restores)
28602 {
28603 /* We can't hang the cfa_restores off a simple return,
28604 since the shrink-wrap code sometimes uses an existing
28605 return. This means there might be a path from
28606 pre-prologue code to this return, and dwarf2cfi code
28607 wants the eh_frame unwinder state to be the same on
28608 all paths to any point. So we need to emit the
28609 cfa_restores before the return. For -m64 we really
28610 don't need epilogue cfa_restores at all, except for
28611 this irritating dwarf2cfi with shrink-wrap
28612 requirement; The stack red-zone means eh_frame info
28613 from the prologue telling the unwinder to restore
28614 from the stack is perfectly good right to the end of
28615 the function. */
28616 emit_insn (gen_blockage ());
28617 emit_cfa_restores (cfa_restores);
28618 cfa_restores = NULL_RTX;
28619 }
28620
28621 emit_jump_insn (targetm.gen_simple_return ());
28622 }
28623
28624 if (epilogue_type != EPILOGUE_TYPE_SIBCALL && !restoring_FPRs_inline)
28625 {
28626 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
28627 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
28628 int elt = 0;
28629 RTVEC_ELT (p, elt++) = ret_rtx;
28630 if (lr)
28631 RTVEC_ELT (p, elt++) = gen_hard_reg_clobber (Pmode, LR_REGNO);
28632
28633 /* We have to restore more than two FP registers, so branch to the
28634 restore function. It will return to our caller. */
28635 int i;
28636 int reg;
28637 rtx sym;
28638
28639 if (flag_shrink_wrap)
28640 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
28641
28642 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
28643 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
28644 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
28645 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
28646
28647 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28648 {
28649 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
28650
28651 RTVEC_ELT (p, elt++)
28652 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
28653 if (flag_shrink_wrap
28654 && save_reg_p (info->first_fp_reg_save + i))
28655 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
28656 }
28657
28658 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
28659 }
28660
28661 if (cfa_restores)
28662 {
28663 if (epilogue_type == EPILOGUE_TYPE_SIBCALL)
28664 /* Ensure the cfa_restores are hung off an insn that won't
28665 be reordered above other restores. */
28666 emit_insn (gen_blockage ());
28667
28668 emit_cfa_restores (cfa_restores);
28669 }
28670 }
28671
28672 /* Write function epilogue. */
28673
28674 static void
28675 rs6000_output_function_epilogue (FILE *file)
28676 {
28677 #if TARGET_MACHO
28678 macho_branch_islands ();
28679
28680 {
28681 rtx_insn *insn = get_last_insn ();
28682 rtx_insn *deleted_debug_label = NULL;
28683
28684 /* Mach-O doesn't support labels at the end of objects, so if
28685 it looks like we might want one, take special action.
28686
28687 First, collect any sequence of deleted debug labels. */
28688 while (insn
28689 && NOTE_P (insn)
28690 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
28691 {
28692 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28693 notes only, instead set their CODE_LABEL_NUMBER to -1,
28694 otherwise there would be code generation differences
28695 in between -g and -g0. */
28696 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28697 deleted_debug_label = insn;
28698 insn = PREV_INSN (insn);
28699 }
28700
28701 /* Second, if we have:
28702 label:
28703 barrier
28704 then this needs to be detected, so skip past the barrier. */
28705
28706 if (insn && BARRIER_P (insn))
28707 insn = PREV_INSN (insn);
28708
28709 /* Up to now we've only seen notes or barriers. */
28710 if (insn)
28711 {
28712 if (LABEL_P (insn)
28713 || (NOTE_P (insn)
28714 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
28715 /* Trailing label: <barrier>. */
28716 fputs ("\tnop\n", file);
28717 else
28718 {
28719 /* Lastly, see if we have a completely empty function body. */
28720 while (insn && ! INSN_P (insn))
28721 insn = PREV_INSN (insn);
28722 /* If we don't find any insns, we've got an empty function body;
28723 I.e. completely empty - without a return or branch. This is
28724 taken as the case where a function body has been removed
28725 because it contains an inline __builtin_unreachable(). GCC
28726 states that reaching __builtin_unreachable() means UB so we're
28727 not obliged to do anything special; however, we want
28728 non-zero-sized function bodies. To meet this, and help the
28729 user out, let's trap the case. */
28730 if (insn == NULL)
28731 fputs ("\ttrap\n", file);
28732 }
28733 }
28734 else if (deleted_debug_label)
28735 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
28736 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
28737 CODE_LABEL_NUMBER (insn) = -1;
28738 }
28739 #endif
28740
28741 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28742 on its format.
28743
28744 We don't output a traceback table if -finhibit-size-directive was
28745 used. The documentation for -finhibit-size-directive reads
28746 ``don't output a @code{.size} assembler directive, or anything
28747 else that would cause trouble if the function is split in the
28748 middle, and the two halves are placed at locations far apart in
28749 memory.'' The traceback table has this property, since it
28750 includes the offset from the start of the function to the
28751 traceback table itself.
28752
28753 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28754 different traceback table. */
28755 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28756 && ! flag_inhibit_size_directive
28757 && rs6000_traceback != traceback_none && !cfun->is_thunk)
28758 {
28759 const char *fname = NULL;
28760 const char *language_string = lang_hooks.name;
28761 int fixed_parms = 0, float_parms = 0, parm_info = 0;
28762 int i;
28763 int optional_tbtab;
28764 rs6000_stack_t *info = rs6000_stack_info ();
28765
28766 if (rs6000_traceback == traceback_full)
28767 optional_tbtab = 1;
28768 else if (rs6000_traceback == traceback_part)
28769 optional_tbtab = 0;
28770 else
28771 optional_tbtab = !optimize_size && !TARGET_ELF;
28772
28773 if (optional_tbtab)
28774 {
28775 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
28776 while (*fname == '.') /* V.4 encodes . in the name */
28777 fname++;
28778
28779 /* Need label immediately before tbtab, so we can compute
28780 its offset from the function start. */
28781 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28782 ASM_OUTPUT_LABEL (file, fname);
28783 }
28784
28785 /* The .tbtab pseudo-op can only be used for the first eight
28786 expressions, since it can't handle the possibly variable
28787 length fields that follow. However, if you omit the optional
28788 fields, the assembler outputs zeros for all optional fields
28789 anyways, giving each variable length field is minimum length
28790 (as defined in sys/debug.h). Thus we cannot use the .tbtab
28791 pseudo-op at all. */
28792
28793 /* An all-zero word flags the start of the tbtab, for debuggers
28794 that have to find it by searching forward from the entry
28795 point or from the current pc. */
28796 fputs ("\t.long 0\n", file);
28797
28798 /* Tbtab format type. Use format type 0. */
28799 fputs ("\t.byte 0,", file);
28800
28801 /* Language type. Unfortunately, there does not seem to be any
28802 official way to discover the language being compiled, so we
28803 use language_string.
28804 C is 0. Fortran is 1. Ada is 3. C++ is 9.
28805 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28806 a number, so for now use 9. LTO, Go, D, and JIT aren't assigned
28807 numbers either, so for now use 0. */
28808 if (lang_GNU_C ()
28809 || ! strcmp (language_string, "GNU GIMPLE")
28810 || ! strcmp (language_string, "GNU Go")
28811 || ! strcmp (language_string, "GNU D")
28812 || ! strcmp (language_string, "libgccjit"))
28813 i = 0;
28814 else if (! strcmp (language_string, "GNU F77")
28815 || lang_GNU_Fortran ())
28816 i = 1;
28817 else if (! strcmp (language_string, "GNU Ada"))
28818 i = 3;
28819 else if (lang_GNU_CXX ()
28820 || ! strcmp (language_string, "GNU Objective-C++"))
28821 i = 9;
28822 else if (! strcmp (language_string, "GNU Java"))
28823 i = 13;
28824 else if (! strcmp (language_string, "GNU Objective-C"))
28825 i = 14;
28826 else
28827 gcc_unreachable ();
28828 fprintf (file, "%d,", i);
28829
28830 /* 8 single bit fields: global linkage (not set for C extern linkage,
28831 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28832 from start of procedure stored in tbtab, internal function, function
28833 has controlled storage, function has no toc, function uses fp,
28834 function logs/aborts fp operations. */
28835 /* Assume that fp operations are used if any fp reg must be saved. */
28836 fprintf (file, "%d,",
28837 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
28838
28839 /* 6 bitfields: function is interrupt handler, name present in
28840 proc table, function calls alloca, on condition directives
28841 (controls stack walks, 3 bits), saves condition reg, saves
28842 link reg. */
28843 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28844 set up as a frame pointer, even when there is no alloca call. */
28845 fprintf (file, "%d,",
28846 ((optional_tbtab << 6)
28847 | ((optional_tbtab & frame_pointer_needed) << 5)
28848 | (info->cr_save_p << 1)
28849 | (info->lr_save_p)));
28850
28851 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28852 (6 bits). */
28853 fprintf (file, "%d,",
28854 (info->push_p << 7) | (64 - info->first_fp_reg_save));
28855
28856 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28857 fprintf (file, "%d,", (32 - first_reg_to_save ()));
28858
28859 if (optional_tbtab)
28860 {
28861 /* Compute the parameter info from the function decl argument
28862 list. */
28863 tree decl;
28864 int next_parm_info_bit = 31;
28865
28866 for (decl = DECL_ARGUMENTS (current_function_decl);
28867 decl; decl = DECL_CHAIN (decl))
28868 {
28869 rtx parameter = DECL_INCOMING_RTL (decl);
28870 machine_mode mode = GET_MODE (parameter);
28871
28872 if (REG_P (parameter))
28873 {
28874 if (SCALAR_FLOAT_MODE_P (mode))
28875 {
28876 int bits;
28877
28878 float_parms++;
28879
28880 switch (mode)
28881 {
28882 case E_SFmode:
28883 case E_SDmode:
28884 bits = 0x2;
28885 break;
28886
28887 case E_DFmode:
28888 case E_DDmode:
28889 case E_TFmode:
28890 case E_TDmode:
28891 case E_IFmode:
28892 case E_KFmode:
28893 bits = 0x3;
28894 break;
28895
28896 default:
28897 gcc_unreachable ();
28898 }
28899
28900 /* If only one bit will fit, don't or in this entry. */
28901 if (next_parm_info_bit > 0)
28902 parm_info |= (bits << (next_parm_info_bit - 1));
28903 next_parm_info_bit -= 2;
28904 }
28905 else
28906 {
28907 fixed_parms += ((GET_MODE_SIZE (mode)
28908 + (UNITS_PER_WORD - 1))
28909 / UNITS_PER_WORD);
28910 next_parm_info_bit -= 1;
28911 }
28912 }
28913 }
28914 }
28915
28916 /* Number of fixed point parameters. */
28917 /* This is actually the number of words of fixed point parameters; thus
28918 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28919 fprintf (file, "%d,", fixed_parms);
28920
28921 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28922 all on stack. */
28923 /* This is actually the number of fp registers that hold parameters;
28924 and thus the maximum value is 13. */
28925 /* Set parameters on stack bit if parameters are not in their original
28926 registers, regardless of whether they are on the stack? Xlc
28927 seems to set the bit when not optimizing. */
28928 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
28929
28930 if (optional_tbtab)
28931 {
28932 /* Optional fields follow. Some are variable length. */
28933
28934 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28935 float, 11 double float. */
28936 /* There is an entry for each parameter in a register, in the order
28937 that they occur in the parameter list. Any intervening arguments
28938 on the stack are ignored. If the list overflows a long (max
28939 possible length 34 bits) then completely leave off all elements
28940 that don't fit. */
28941 /* Only emit this long if there was at least one parameter. */
28942 if (fixed_parms || float_parms)
28943 fprintf (file, "\t.long %d\n", parm_info);
28944
28945 /* Offset from start of code to tb table. */
28946 fputs ("\t.long ", file);
28947 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
28948 RS6000_OUTPUT_BASENAME (file, fname);
28949 putc ('-', file);
28950 rs6000_output_function_entry (file, fname);
28951 putc ('\n', file);
28952
28953 /* Interrupt handler mask. */
28954 /* Omit this long, since we never set the interrupt handler bit
28955 above. */
28956
28957 /* Number of CTL (controlled storage) anchors. */
28958 /* Omit this long, since the has_ctl bit is never set above. */
28959
28960 /* Displacement into stack of each CTL anchor. */
28961 /* Omit this list of longs, because there are no CTL anchors. */
28962
28963 /* Length of function name. */
28964 if (*fname == '*')
28965 ++fname;
28966 fprintf (file, "\t.short %d\n", (int) strlen (fname));
28967
28968 /* Function name. */
28969 assemble_string (fname, strlen (fname));
28970
28971 /* Register for alloca automatic storage; this is always reg 31.
28972 Only emit this if the alloca bit was set above. */
28973 if (frame_pointer_needed)
28974 fputs ("\t.byte 31\n", file);
28975
28976 fputs ("\t.align 2\n", file);
28977 }
28978 }
28979
28980 /* Arrange to define .LCTOC1 label, if not already done. */
28981 if (need_toc_init)
28982 {
28983 need_toc_init = 0;
28984 if (!toc_initialized)
28985 {
28986 switch_to_section (toc_section);
28987 switch_to_section (current_function_section ());
28988 }
28989 }
28990 }
28991
28992 /* -fsplit-stack support. */
28993
28994 /* A SYMBOL_REF for __morestack. */
28995 static GTY(()) rtx morestack_ref;
28996
28997 static rtx
28998 gen_add3_const (rtx rt, rtx ra, long c)
28999 {
29000 if (TARGET_64BIT)
29001 return gen_adddi3 (rt, ra, GEN_INT (c));
29002 else
29003 return gen_addsi3 (rt, ra, GEN_INT (c));
29004 }
29005
29006 /* Emit -fsplit-stack prologue, which goes before the regular function
29007 prologue (at local entry point in the case of ELFv2). */
29008
29009 void
29010 rs6000_expand_split_stack_prologue (void)
29011 {
29012 rs6000_stack_t *info = rs6000_stack_info ();
29013 unsigned HOST_WIDE_INT allocate;
29014 long alloc_hi, alloc_lo;
29015 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
29016 rtx_insn *insn;
29017
29018 gcc_assert (flag_split_stack && reload_completed);
29019
29020 if (!info->push_p)
29021 return;
29022
29023 if (global_regs[29])
29024 {
29025 error ("%qs uses register r29", "%<-fsplit-stack%>");
29026 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
29027 "conflicts with %qD", global_regs_decl[29]);
29028 }
29029
29030 allocate = info->total_size;
29031 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
29032 {
29033 sorry ("Stack frame larger than 2G is not supported for "
29034 "%<-fsplit-stack%>");
29035 return;
29036 }
29037 if (morestack_ref == NULL_RTX)
29038 {
29039 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
29040 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
29041 | SYMBOL_FLAG_FUNCTION);
29042 }
29043
29044 r0 = gen_rtx_REG (Pmode, 0);
29045 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29046 r12 = gen_rtx_REG (Pmode, 12);
29047 emit_insn (gen_load_split_stack_limit (r0));
29048 /* Always emit two insns here to calculate the requested stack,
29049 so that the linker can edit them when adjusting size for calling
29050 non-split-stack code. */
29051 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
29052 alloc_lo = -allocate - alloc_hi;
29053 if (alloc_hi != 0)
29054 {
29055 emit_insn (gen_add3_const (r12, r1, alloc_hi));
29056 if (alloc_lo != 0)
29057 emit_insn (gen_add3_const (r12, r12, alloc_lo));
29058 else
29059 emit_insn (gen_nop ());
29060 }
29061 else
29062 {
29063 emit_insn (gen_add3_const (r12, r1, alloc_lo));
29064 emit_insn (gen_nop ());
29065 }
29066
29067 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29068 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
29069 ok_label = gen_label_rtx ();
29070 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29071 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
29072 gen_rtx_LABEL_REF (VOIDmode, ok_label),
29073 pc_rtx);
29074 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29075 JUMP_LABEL (insn) = ok_label;
29076 /* Mark the jump as very likely to be taken. */
29077 add_reg_br_prob_note (insn, profile_probability::very_likely ());
29078
29079 lr = gen_rtx_REG (Pmode, LR_REGNO);
29080 insn = emit_move_insn (r0, lr);
29081 RTX_FRAME_RELATED_P (insn) = 1;
29082 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
29083 RTX_FRAME_RELATED_P (insn) = 1;
29084
29085 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
29086 const0_rtx, const0_rtx));
29087 call_fusage = NULL_RTX;
29088 use_reg (&call_fusage, r12);
29089 /* Say the call uses r0, even though it doesn't, to stop regrename
29090 from twiddling with the insns saving lr, trashing args for cfun.
29091 The insns restoring lr are similarly protected by making
29092 split_stack_return use r0. */
29093 use_reg (&call_fusage, r0);
29094 add_function_usage_to (insn, call_fusage);
29095 /* Indicate that this function can't jump to non-local gotos. */
29096 make_reg_eh_region_note_nothrow_nononlocal (insn);
29097 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
29098 insn = emit_move_insn (lr, r0);
29099 add_reg_note (insn, REG_CFA_RESTORE, lr);
29100 RTX_FRAME_RELATED_P (insn) = 1;
29101 emit_insn (gen_split_stack_return ());
29102
29103 emit_label (ok_label);
29104 LABEL_NUSES (ok_label) = 1;
29105 }
29106
29107 /* Return the internal arg pointer used for function incoming
29108 arguments. When -fsplit-stack, the arg pointer is r12 so we need
29109 to copy it to a pseudo in order for it to be preserved over calls
29110 and suchlike. We'd really like to use a pseudo here for the
29111 internal arg pointer but data-flow analysis is not prepared to
29112 accept pseudos as live at the beginning of a function. */
29113
29114 static rtx
29115 rs6000_internal_arg_pointer (void)
29116 {
29117 if (flag_split_stack
29118 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
29119 == NULL))
29120
29121 {
29122 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
29123 {
29124 rtx pat;
29125
29126 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
29127 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
29128
29129 /* Put the pseudo initialization right after the note at the
29130 beginning of the function. */
29131 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
29132 gen_rtx_REG (Pmode, 12));
29133 push_topmost_sequence ();
29134 emit_insn_after (pat, get_insns ());
29135 pop_topmost_sequence ();
29136 }
29137 rtx ret = plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
29138 FIRST_PARM_OFFSET (current_function_decl));
29139 return copy_to_reg (ret);
29140 }
29141 return virtual_incoming_args_rtx;
29142 }
29143
29144 /* We may have to tell the dataflow pass that the split stack prologue
29145 is initializing a register. */
29146
29147 static void
29148 rs6000_live_on_entry (bitmap regs)
29149 {
29150 if (flag_split_stack)
29151 bitmap_set_bit (regs, 12);
29152 }
29153
29154 /* Emit -fsplit-stack dynamic stack allocation space check. */
29155
29156 void
29157 rs6000_split_stack_space_check (rtx size, rtx label)
29158 {
29159 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
29160 rtx limit = gen_reg_rtx (Pmode);
29161 rtx requested = gen_reg_rtx (Pmode);
29162 rtx cmp = gen_reg_rtx (CCUNSmode);
29163 rtx jump;
29164
29165 emit_insn (gen_load_split_stack_limit (limit));
29166 if (CONST_INT_P (size))
29167 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
29168 else
29169 {
29170 size = force_reg (Pmode, size);
29171 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
29172 }
29173 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
29174 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29175 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
29176 gen_rtx_LABEL_REF (VOIDmode, label),
29177 pc_rtx);
29178 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29179 JUMP_LABEL (jump) = label;
29180 }
29181 \f
29182 /* A C compound statement that outputs the assembler code for a thunk
29183 function, used to implement C++ virtual function calls with
29184 multiple inheritance. The thunk acts as a wrapper around a virtual
29185 function, adjusting the implicit object parameter before handing
29186 control off to the real function.
29187
29188 First, emit code to add the integer DELTA to the location that
29189 contains the incoming first argument. Assume that this argument
29190 contains a pointer, and is the one used to pass the `this' pointer
29191 in C++. This is the incoming argument *before* the function
29192 prologue, e.g. `%o0' on a sparc. The addition must preserve the
29193 values of all other incoming arguments.
29194
29195 After the addition, emit code to jump to FUNCTION, which is a
29196 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
29197 not touch the return address. Hence returning from FUNCTION will
29198 return to whoever called the current `thunk'.
29199
29200 The effect must be as if FUNCTION had been called directly with the
29201 adjusted first argument. This macro is responsible for emitting
29202 all of the code for a thunk function; output_function_prologue()
29203 and output_function_epilogue() are not invoked.
29204
29205 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
29206 been extracted from it.) It might possibly be useful on some
29207 targets, but probably not.
29208
29209 If you do not define this macro, the target-independent code in the
29210 C++ frontend will generate a less efficient heavyweight thunk that
29211 calls FUNCTION instead of jumping to it. The generic approach does
29212 not support varargs. */
29213
29214 static void
29215 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
29216 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
29217 tree function)
29218 {
29219 const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl));
29220 rtx this_rtx, funexp;
29221 rtx_insn *insn;
29222
29223 reload_completed = 1;
29224 epilogue_completed = 1;
29225
29226 /* Mark the end of the (empty) prologue. */
29227 emit_note (NOTE_INSN_PROLOGUE_END);
29228
29229 /* Find the "this" pointer. If the function returns a structure,
29230 the structure return pointer is in r3. */
29231 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
29232 this_rtx = gen_rtx_REG (Pmode, 4);
29233 else
29234 this_rtx = gen_rtx_REG (Pmode, 3);
29235
29236 /* Apply the constant offset, if required. */
29237 if (delta)
29238 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
29239
29240 /* Apply the offset from the vtable, if required. */
29241 if (vcall_offset)
29242 {
29243 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
29244 rtx tmp = gen_rtx_REG (Pmode, 12);
29245
29246 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
29247 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
29248 {
29249 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
29250 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
29251 }
29252 else
29253 {
29254 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
29255
29256 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
29257 }
29258 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
29259 }
29260
29261 /* Generate a tail call to the target function. */
29262 if (!TREE_USED (function))
29263 {
29264 assemble_external (function);
29265 TREE_USED (function) = 1;
29266 }
29267 funexp = XEXP (DECL_RTL (function), 0);
29268 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
29269
29270 #if TARGET_MACHO
29271 if (MACHOPIC_INDIRECT)
29272 funexp = machopic_indirect_call_target (funexp);
29273 #endif
29274
29275 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
29276 generate sibcall RTL explicitly. */
29277 insn = emit_call_insn (
29278 gen_rtx_PARALLEL (VOIDmode,
29279 gen_rtvec (3,
29280 gen_rtx_CALL (VOIDmode,
29281 funexp, const0_rtx),
29282 gen_rtx_USE (VOIDmode, const0_rtx),
29283 simple_return_rtx)));
29284 SIBLING_CALL_P (insn) = 1;
29285 emit_barrier ();
29286
29287 /* Run just enough of rest_of_compilation to get the insns emitted.
29288 There's not really enough bulk here to make other passes such as
29289 instruction scheduling worth while. Note that use_thunk calls
29290 assemble_start_function and assemble_end_function. */
29291 insn = get_insns ();
29292 shorten_branches (insn);
29293 assemble_start_function (thunk_fndecl, fnname);
29294 final_start_function (insn, file, 1);
29295 final (insn, file, 1);
29296 final_end_function ();
29297 assemble_end_function (thunk_fndecl, fnname);
29298
29299 reload_completed = 0;
29300 epilogue_completed = 0;
29301 }
29302 \f
29303 /* A quick summary of the various types of 'constant-pool tables'
29304 under PowerPC:
29305
29306 Target Flags Name One table per
29307 AIX (none) AIX TOC object file
29308 AIX -mfull-toc AIX TOC object file
29309 AIX -mminimal-toc AIX minimal TOC translation unit
29310 SVR4/EABI (none) SVR4 SDATA object file
29311 SVR4/EABI -fpic SVR4 pic object file
29312 SVR4/EABI -fPIC SVR4 PIC translation unit
29313 SVR4/EABI -mrelocatable EABI TOC function
29314 SVR4/EABI -maix AIX TOC object file
29315 SVR4/EABI -maix -mminimal-toc
29316 AIX minimal TOC translation unit
29317
29318 Name Reg. Set by entries contains:
29319 made by addrs? fp? sum?
29320
29321 AIX TOC 2 crt0 as Y option option
29322 AIX minimal TOC 30 prolog gcc Y Y option
29323 SVR4 SDATA 13 crt0 gcc N Y N
29324 SVR4 pic 30 prolog ld Y not yet N
29325 SVR4 PIC 30 prolog gcc Y option option
29326 EABI TOC 30 prolog gcc Y option option
29327
29328 */
29329
29330 /* Hash functions for the hash table. */
29331
29332 static unsigned
29333 rs6000_hash_constant (rtx k)
29334 {
29335 enum rtx_code code = GET_CODE (k);
29336 machine_mode mode = GET_MODE (k);
29337 unsigned result = (code << 3) ^ mode;
29338 const char *format;
29339 int flen, fidx;
29340
29341 format = GET_RTX_FORMAT (code);
29342 flen = strlen (format);
29343 fidx = 0;
29344
29345 switch (code)
29346 {
29347 case LABEL_REF:
29348 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
29349
29350 case CONST_WIDE_INT:
29351 {
29352 int i;
29353 flen = CONST_WIDE_INT_NUNITS (k);
29354 for (i = 0; i < flen; i++)
29355 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
29356 return result;
29357 }
29358
29359 case CONST_DOUBLE:
29360 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
29361
29362 case CODE_LABEL:
29363 fidx = 3;
29364 break;
29365
29366 default:
29367 break;
29368 }
29369
29370 for (; fidx < flen; fidx++)
29371 switch (format[fidx])
29372 {
29373 case 's':
29374 {
29375 unsigned i, len;
29376 const char *str = XSTR (k, fidx);
29377 len = strlen (str);
29378 result = result * 613 + len;
29379 for (i = 0; i < len; i++)
29380 result = result * 613 + (unsigned) str[i];
29381 break;
29382 }
29383 case 'u':
29384 case 'e':
29385 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
29386 break;
29387 case 'i':
29388 case 'n':
29389 result = result * 613 + (unsigned) XINT (k, fidx);
29390 break;
29391 case 'w':
29392 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
29393 result = result * 613 + (unsigned) XWINT (k, fidx);
29394 else
29395 {
29396 size_t i;
29397 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
29398 result = result * 613 + (unsigned) (XWINT (k, fidx)
29399 >> CHAR_BIT * i);
29400 }
29401 break;
29402 case '0':
29403 break;
29404 default:
29405 gcc_unreachable ();
29406 }
29407
29408 return result;
29409 }
29410
29411 hashval_t
29412 toc_hasher::hash (toc_hash_struct *thc)
29413 {
29414 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
29415 }
29416
29417 /* Compare H1 and H2 for equivalence. */
29418
29419 bool
29420 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
29421 {
29422 rtx r1 = h1->key;
29423 rtx r2 = h2->key;
29424
29425 if (h1->key_mode != h2->key_mode)
29426 return 0;
29427
29428 return rtx_equal_p (r1, r2);
29429 }
29430
29431 /* These are the names given by the C++ front-end to vtables, and
29432 vtable-like objects. Ideally, this logic should not be here;
29433 instead, there should be some programmatic way of inquiring as
29434 to whether or not an object is a vtable. */
29435
29436 #define VTABLE_NAME_P(NAME) \
29437 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29438 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29439 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29440 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29441 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29442
29443 #ifdef NO_DOLLAR_IN_LABEL
29444 /* Return a GGC-allocated character string translating dollar signs in
29445 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29446
29447 const char *
29448 rs6000_xcoff_strip_dollar (const char *name)
29449 {
29450 char *strip, *p;
29451 const char *q;
29452 size_t len;
29453
29454 q = (const char *) strchr (name, '$');
29455
29456 if (q == 0 || q == name)
29457 return name;
29458
29459 len = strlen (name);
29460 strip = XALLOCAVEC (char, len + 1);
29461 strcpy (strip, name);
29462 p = strip + (q - name);
29463 while (p)
29464 {
29465 *p = '_';
29466 p = strchr (p + 1, '$');
29467 }
29468
29469 return ggc_alloc_string (strip, len);
29470 }
29471 #endif
29472
29473 void
29474 rs6000_output_symbol_ref (FILE *file, rtx x)
29475 {
29476 const char *name = XSTR (x, 0);
29477
29478 /* Currently C++ toc references to vtables can be emitted before it
29479 is decided whether the vtable is public or private. If this is
29480 the case, then the linker will eventually complain that there is
29481 a reference to an unknown section. Thus, for vtables only,
29482 we emit the TOC reference to reference the identifier and not the
29483 symbol. */
29484 if (VTABLE_NAME_P (name))
29485 {
29486 RS6000_OUTPUT_BASENAME (file, name);
29487 }
29488 else
29489 assemble_name (file, name);
29490 }
29491
29492 /* Output a TOC entry. We derive the entry name from what is being
29493 written. */
29494
29495 void
29496 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
29497 {
29498 char buf[256];
29499 const char *name = buf;
29500 rtx base = x;
29501 HOST_WIDE_INT offset = 0;
29502
29503 gcc_assert (!TARGET_NO_TOC);
29504
29505 /* When the linker won't eliminate them, don't output duplicate
29506 TOC entries (this happens on AIX if there is any kind of TOC,
29507 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29508 CODE_LABELs. */
29509 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
29510 {
29511 struct toc_hash_struct *h;
29512
29513 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29514 time because GGC is not initialized at that point. */
29515 if (toc_hash_table == NULL)
29516 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
29517
29518 h = ggc_alloc<toc_hash_struct> ();
29519 h->key = x;
29520 h->key_mode = mode;
29521 h->labelno = labelno;
29522
29523 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
29524 if (*found == NULL)
29525 *found = h;
29526 else /* This is indeed a duplicate.
29527 Set this label equal to that label. */
29528 {
29529 fputs ("\t.set ", file);
29530 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29531 fprintf (file, "%d,", labelno);
29532 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
29533 fprintf (file, "%d\n", ((*found)->labelno));
29534
29535 #ifdef HAVE_AS_TLS
29536 if (TARGET_XCOFF && SYMBOL_REF_P (x)
29537 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
29538 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
29539 {
29540 fputs ("\t.set ", file);
29541 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29542 fprintf (file, "%d,", labelno);
29543 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
29544 fprintf (file, "%d\n", ((*found)->labelno));
29545 }
29546 #endif
29547 return;
29548 }
29549 }
29550
29551 /* If we're going to put a double constant in the TOC, make sure it's
29552 aligned properly when strict alignment is on. */
29553 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
29554 && STRICT_ALIGNMENT
29555 && GET_MODE_BITSIZE (mode) >= 64
29556 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
29557 ASM_OUTPUT_ALIGN (file, 3);
29558 }
29559
29560 (*targetm.asm_out.internal_label) (file, "LC", labelno);
29561
29562 /* Handle FP constants specially. Note that if we have a minimal
29563 TOC, things we put here aren't actually in the TOC, so we can allow
29564 FP constants. */
29565 if (CONST_DOUBLE_P (x)
29566 && (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
29567 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
29568 {
29569 long k[4];
29570
29571 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29572 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
29573 else
29574 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29575
29576 if (TARGET_64BIT)
29577 {
29578 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29579 fputs (DOUBLE_INT_ASM_OP, file);
29580 else
29581 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29582 k[0] & 0xffffffff, k[1] & 0xffffffff,
29583 k[2] & 0xffffffff, k[3] & 0xffffffff);
29584 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
29585 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29586 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
29587 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
29588 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
29589 return;
29590 }
29591 else
29592 {
29593 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29594 fputs ("\t.long ", file);
29595 else
29596 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29597 k[0] & 0xffffffff, k[1] & 0xffffffff,
29598 k[2] & 0xffffffff, k[3] & 0xffffffff);
29599 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29600 k[0] & 0xffffffff, k[1] & 0xffffffff,
29601 k[2] & 0xffffffff, k[3] & 0xffffffff);
29602 return;
29603 }
29604 }
29605 else if (CONST_DOUBLE_P (x)
29606 && (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
29607 {
29608 long k[2];
29609
29610 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29611 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
29612 else
29613 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
29614
29615 if (TARGET_64BIT)
29616 {
29617 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29618 fputs (DOUBLE_INT_ASM_OP, file);
29619 else
29620 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29621 k[0] & 0xffffffff, k[1] & 0xffffffff);
29622 fprintf (file, "0x%lx%08lx\n",
29623 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
29624 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
29625 return;
29626 }
29627 else
29628 {
29629 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29630 fputs ("\t.long ", file);
29631 else
29632 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
29633 k[0] & 0xffffffff, k[1] & 0xffffffff);
29634 fprintf (file, "0x%lx,0x%lx\n",
29635 k[0] & 0xffffffff, k[1] & 0xffffffff);
29636 return;
29637 }
29638 }
29639 else if (CONST_DOUBLE_P (x)
29640 && (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
29641 {
29642 long l;
29643
29644 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
29645 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
29646 else
29647 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
29648
29649 if (TARGET_64BIT)
29650 {
29651 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29652 fputs (DOUBLE_INT_ASM_OP, file);
29653 else
29654 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29655 if (WORDS_BIG_ENDIAN)
29656 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
29657 else
29658 fprintf (file, "0x%lx\n", l & 0xffffffff);
29659 return;
29660 }
29661 else
29662 {
29663 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29664 fputs ("\t.long ", file);
29665 else
29666 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
29667 fprintf (file, "0x%lx\n", l & 0xffffffff);
29668 return;
29669 }
29670 }
29671 else if (GET_MODE (x) == VOIDmode && CONST_INT_P (x))
29672 {
29673 unsigned HOST_WIDE_INT low;
29674 HOST_WIDE_INT high;
29675
29676 low = INTVAL (x) & 0xffffffff;
29677 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
29678
29679 /* TOC entries are always Pmode-sized, so when big-endian
29680 smaller integer constants in the TOC need to be padded.
29681 (This is still a win over putting the constants in
29682 a separate constant pool, because then we'd have
29683 to have both a TOC entry _and_ the actual constant.)
29684
29685 For a 32-bit target, CONST_INT values are loaded and shifted
29686 entirely within `low' and can be stored in one TOC entry. */
29687
29688 /* It would be easy to make this work, but it doesn't now. */
29689 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
29690
29691 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
29692 {
29693 low |= high << 32;
29694 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
29695 high = (HOST_WIDE_INT) low >> 32;
29696 low &= 0xffffffff;
29697 }
29698
29699 if (TARGET_64BIT)
29700 {
29701 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29702 fputs (DOUBLE_INT_ASM_OP, file);
29703 else
29704 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29705 (long) high & 0xffffffff, (long) low & 0xffffffff);
29706 fprintf (file, "0x%lx%08lx\n",
29707 (long) high & 0xffffffff, (long) low & 0xffffffff);
29708 return;
29709 }
29710 else
29711 {
29712 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
29713 {
29714 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29715 fputs ("\t.long ", file);
29716 else
29717 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
29718 (long) high & 0xffffffff, (long) low & 0xffffffff);
29719 fprintf (file, "0x%lx,0x%lx\n",
29720 (long) high & 0xffffffff, (long) low & 0xffffffff);
29721 }
29722 else
29723 {
29724 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29725 fputs ("\t.long ", file);
29726 else
29727 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
29728 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
29729 }
29730 return;
29731 }
29732 }
29733
29734 if (GET_CODE (x) == CONST)
29735 {
29736 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
29737 && CONST_INT_P (XEXP (XEXP (x, 0), 1)));
29738
29739 base = XEXP (XEXP (x, 0), 0);
29740 offset = INTVAL (XEXP (XEXP (x, 0), 1));
29741 }
29742
29743 switch (GET_CODE (base))
29744 {
29745 case SYMBOL_REF:
29746 name = XSTR (base, 0);
29747 break;
29748
29749 case LABEL_REF:
29750 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
29751 CODE_LABEL_NUMBER (XEXP (base, 0)));
29752 break;
29753
29754 case CODE_LABEL:
29755 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
29756 break;
29757
29758 default:
29759 gcc_unreachable ();
29760 }
29761
29762 if (TARGET_ELF || TARGET_MINIMAL_TOC)
29763 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
29764 else
29765 {
29766 fputs ("\t.tc ", file);
29767 RS6000_OUTPUT_BASENAME (file, name);
29768
29769 if (offset < 0)
29770 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
29771 else if (offset)
29772 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
29773
29774 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29775 after other TOC symbols, reducing overflow of small TOC access
29776 to [TC] symbols. */
29777 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
29778 ? "[TE]," : "[TC],", file);
29779 }
29780
29781 /* Currently C++ toc references to vtables can be emitted before it
29782 is decided whether the vtable is public or private. If this is
29783 the case, then the linker will eventually complain that there is
29784 a TOC reference to an unknown section. Thus, for vtables only,
29785 we emit the TOC reference to reference the symbol and not the
29786 section. */
29787 if (VTABLE_NAME_P (name))
29788 {
29789 RS6000_OUTPUT_BASENAME (file, name);
29790 if (offset < 0)
29791 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
29792 else if (offset > 0)
29793 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
29794 }
29795 else
29796 output_addr_const (file, x);
29797
29798 #if HAVE_AS_TLS
29799 if (TARGET_XCOFF && SYMBOL_REF_P (base))
29800 {
29801 switch (SYMBOL_REF_TLS_MODEL (base))
29802 {
29803 case 0:
29804 break;
29805 case TLS_MODEL_LOCAL_EXEC:
29806 fputs ("@le", file);
29807 break;
29808 case TLS_MODEL_INITIAL_EXEC:
29809 fputs ("@ie", file);
29810 break;
29811 /* Use global-dynamic for local-dynamic. */
29812 case TLS_MODEL_GLOBAL_DYNAMIC:
29813 case TLS_MODEL_LOCAL_DYNAMIC:
29814 putc ('\n', file);
29815 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
29816 fputs ("\t.tc .", file);
29817 RS6000_OUTPUT_BASENAME (file, name);
29818 fputs ("[TC],", file);
29819 output_addr_const (file, x);
29820 fputs ("@m", file);
29821 break;
29822 default:
29823 gcc_unreachable ();
29824 }
29825 }
29826 #endif
29827
29828 putc ('\n', file);
29829 }
29830 \f
29831 /* Output an assembler pseudo-op to write an ASCII string of N characters
29832 starting at P to FILE.
29833
29834 On the RS/6000, we have to do this using the .byte operation and
29835 write out special characters outside the quoted string.
29836 Also, the assembler is broken; very long strings are truncated,
29837 so we must artificially break them up early. */
29838
29839 void
29840 output_ascii (FILE *file, const char *p, int n)
29841 {
29842 char c;
29843 int i, count_string;
29844 const char *for_string = "\t.byte \"";
29845 const char *for_decimal = "\t.byte ";
29846 const char *to_close = NULL;
29847
29848 count_string = 0;
29849 for (i = 0; i < n; i++)
29850 {
29851 c = *p++;
29852 if (c >= ' ' && c < 0177)
29853 {
29854 if (for_string)
29855 fputs (for_string, file);
29856 putc (c, file);
29857
29858 /* Write two quotes to get one. */
29859 if (c == '"')
29860 {
29861 putc (c, file);
29862 ++count_string;
29863 }
29864
29865 for_string = NULL;
29866 for_decimal = "\"\n\t.byte ";
29867 to_close = "\"\n";
29868 ++count_string;
29869
29870 if (count_string >= 512)
29871 {
29872 fputs (to_close, file);
29873
29874 for_string = "\t.byte \"";
29875 for_decimal = "\t.byte ";
29876 to_close = NULL;
29877 count_string = 0;
29878 }
29879 }
29880 else
29881 {
29882 if (for_decimal)
29883 fputs (for_decimal, file);
29884 fprintf (file, "%d", c);
29885
29886 for_string = "\n\t.byte \"";
29887 for_decimal = ", ";
29888 to_close = "\n";
29889 count_string = 0;
29890 }
29891 }
29892
29893 /* Now close the string if we have written one. Then end the line. */
29894 if (to_close)
29895 fputs (to_close, file);
29896 }
29897 \f
29898 /* Generate a unique section name for FILENAME for a section type
29899 represented by SECTION_DESC. Output goes into BUF.
29900
29901 SECTION_DESC can be any string, as long as it is different for each
29902 possible section type.
29903
29904 We name the section in the same manner as xlc. The name begins with an
29905 underscore followed by the filename (after stripping any leading directory
29906 names) with the last period replaced by the string SECTION_DESC. If
29907 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29908 the name. */
29909
29910 void
29911 rs6000_gen_section_name (char **buf, const char *filename,
29912 const char *section_desc)
29913 {
29914 const char *q, *after_last_slash, *last_period = 0;
29915 char *p;
29916 int len;
29917
29918 after_last_slash = filename;
29919 for (q = filename; *q; q++)
29920 {
29921 if (*q == '/')
29922 after_last_slash = q + 1;
29923 else if (*q == '.')
29924 last_period = q;
29925 }
29926
29927 len = strlen (after_last_slash) + strlen (section_desc) + 2;
29928 *buf = (char *) xmalloc (len);
29929
29930 p = *buf;
29931 *p++ = '_';
29932
29933 for (q = after_last_slash; *q; q++)
29934 {
29935 if (q == last_period)
29936 {
29937 strcpy (p, section_desc);
29938 p += strlen (section_desc);
29939 break;
29940 }
29941
29942 else if (ISALNUM (*q))
29943 *p++ = *q;
29944 }
29945
29946 if (last_period == 0)
29947 strcpy (p, section_desc);
29948 else
29949 *p = '\0';
29950 }
29951 \f
29952 /* Emit profile function. */
29953
29954 void
29955 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
29956 {
29957 /* Non-standard profiling for kernels, which just saves LR then calls
29958 _mcount without worrying about arg saves. The idea is to change
29959 the function prologue as little as possible as it isn't easy to
29960 account for arg save/restore code added just for _mcount. */
29961 if (TARGET_PROFILE_KERNEL)
29962 return;
29963
29964 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29965 {
29966 #ifndef NO_PROFILE_COUNTERS
29967 # define NO_PROFILE_COUNTERS 0
29968 #endif
29969 if (NO_PROFILE_COUNTERS)
29970 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29971 LCT_NORMAL, VOIDmode);
29972 else
29973 {
29974 char buf[30];
29975 const char *label_name;
29976 rtx fun;
29977
29978 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
29979 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
29980 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
29981
29982 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
29983 LCT_NORMAL, VOIDmode, fun, Pmode);
29984 }
29985 }
29986 else if (DEFAULT_ABI == ABI_DARWIN)
29987 {
29988 const char *mcount_name = RS6000_MCOUNT;
29989 int caller_addr_regno = LR_REGNO;
29990
29991 /* Be conservative and always set this, at least for now. */
29992 crtl->uses_pic_offset_table = 1;
29993
29994 #if TARGET_MACHO
29995 /* For PIC code, set up a stub and collect the caller's address
29996 from r0, which is where the prologue puts it. */
29997 if (MACHOPIC_INDIRECT
29998 && crtl->uses_pic_offset_table)
29999 caller_addr_regno = 0;
30000 #endif
30001 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
30002 LCT_NORMAL, VOIDmode,
30003 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
30004 }
30005 }
30006
30007 /* Write function profiler code. */
30008
30009 void
30010 output_function_profiler (FILE *file, int labelno)
30011 {
30012 char buf[100];
30013
30014 switch (DEFAULT_ABI)
30015 {
30016 default:
30017 gcc_unreachable ();
30018
30019 case ABI_V4:
30020 if (!TARGET_32BIT)
30021 {
30022 warning (0, "no profiling of 64-bit code for this ABI");
30023 return;
30024 }
30025 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
30026 fprintf (file, "\tmflr %s\n", reg_names[0]);
30027 if (NO_PROFILE_COUNTERS)
30028 {
30029 asm_fprintf (file, "\tstw %s,4(%s)\n",
30030 reg_names[0], reg_names[1]);
30031 }
30032 else if (TARGET_SECURE_PLT && flag_pic)
30033 {
30034 if (TARGET_LINK_STACK)
30035 {
30036 char name[32];
30037 get_ppc476_thunk_name (name);
30038 asm_fprintf (file, "\tbl %s\n", name);
30039 }
30040 else
30041 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
30042 asm_fprintf (file, "\tstw %s,4(%s)\n",
30043 reg_names[0], reg_names[1]);
30044 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30045 asm_fprintf (file, "\taddis %s,%s,",
30046 reg_names[12], reg_names[12]);
30047 assemble_name (file, buf);
30048 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
30049 assemble_name (file, buf);
30050 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
30051 }
30052 else if (flag_pic == 1)
30053 {
30054 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
30055 asm_fprintf (file, "\tstw %s,4(%s)\n",
30056 reg_names[0], reg_names[1]);
30057 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
30058 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
30059 assemble_name (file, buf);
30060 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
30061 }
30062 else if (flag_pic > 1)
30063 {
30064 asm_fprintf (file, "\tstw %s,4(%s)\n",
30065 reg_names[0], reg_names[1]);
30066 /* Now, we need to get the address of the label. */
30067 if (TARGET_LINK_STACK)
30068 {
30069 char name[32];
30070 get_ppc476_thunk_name (name);
30071 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
30072 assemble_name (file, buf);
30073 fputs ("-.\n1:", file);
30074 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30075 asm_fprintf (file, "\taddi %s,%s,4\n",
30076 reg_names[11], reg_names[11]);
30077 }
30078 else
30079 {
30080 fputs ("\tbcl 20,31,1f\n\t.long ", file);
30081 assemble_name (file, buf);
30082 fputs ("-.\n1:", file);
30083 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
30084 }
30085 asm_fprintf (file, "\tlwz %s,0(%s)\n",
30086 reg_names[0], reg_names[11]);
30087 asm_fprintf (file, "\tadd %s,%s,%s\n",
30088 reg_names[0], reg_names[0], reg_names[11]);
30089 }
30090 else
30091 {
30092 asm_fprintf (file, "\tlis %s,", reg_names[12]);
30093 assemble_name (file, buf);
30094 fputs ("@ha\n", file);
30095 asm_fprintf (file, "\tstw %s,4(%s)\n",
30096 reg_names[0], reg_names[1]);
30097 asm_fprintf (file, "\tla %s,", reg_names[0]);
30098 assemble_name (file, buf);
30099 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
30100 }
30101
30102 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
30103 fprintf (file, "\tbl %s%s\n",
30104 RS6000_MCOUNT, flag_pic ? "@plt" : "");
30105 break;
30106
30107 case ABI_AIX:
30108 case ABI_ELFv2:
30109 case ABI_DARWIN:
30110 /* Don't do anything, done in output_profile_hook (). */
30111 break;
30112 }
30113 }
30114
30115 \f
30116
30117 /* The following variable value is the last issued insn. */
30118
30119 static rtx_insn *last_scheduled_insn;
30120
30121 /* The following variable helps to balance issuing of load and
30122 store instructions */
30123
30124 static int load_store_pendulum;
30125
30126 /* The following variable helps pair divide insns during scheduling. */
30127 static int divide_cnt;
30128 /* The following variable helps pair and alternate vector and vector load
30129 insns during scheduling. */
30130 static int vec_pairing;
30131
30132
30133 /* Power4 load update and store update instructions are cracked into a
30134 load or store and an integer insn which are executed in the same cycle.
30135 Branches have their own dispatch slot which does not count against the
30136 GCC issue rate, but it changes the program flow so there are no other
30137 instructions to issue in this cycle. */
30138
30139 static int
30140 rs6000_variable_issue_1 (rtx_insn *insn, int more)
30141 {
30142 last_scheduled_insn = insn;
30143 if (GET_CODE (PATTERN (insn)) == USE
30144 || GET_CODE (PATTERN (insn)) == CLOBBER)
30145 {
30146 cached_can_issue_more = more;
30147 return cached_can_issue_more;
30148 }
30149
30150 if (insn_terminates_group_p (insn, current_group))
30151 {
30152 cached_can_issue_more = 0;
30153 return cached_can_issue_more;
30154 }
30155
30156 /* If no reservation, but reach here */
30157 if (recog_memoized (insn) < 0)
30158 return more;
30159
30160 if (rs6000_sched_groups)
30161 {
30162 if (is_microcoded_insn (insn))
30163 cached_can_issue_more = 0;
30164 else if (is_cracked_insn (insn))
30165 cached_can_issue_more = more > 2 ? more - 2 : 0;
30166 else
30167 cached_can_issue_more = more - 1;
30168
30169 return cached_can_issue_more;
30170 }
30171
30172 if (rs6000_tune == PROCESSOR_CELL && is_nonpipeline_insn (insn))
30173 return 0;
30174
30175 cached_can_issue_more = more - 1;
30176 return cached_can_issue_more;
30177 }
30178
30179 static int
30180 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
30181 {
30182 int r = rs6000_variable_issue_1 (insn, more);
30183 if (verbose)
30184 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
30185 return r;
30186 }
30187
30188 /* Adjust the cost of a scheduling dependency. Return the new cost of
30189 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
30190
30191 static int
30192 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
30193 unsigned int)
30194 {
30195 enum attr_type attr_type;
30196
30197 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
30198 return cost;
30199
30200 switch (dep_type)
30201 {
30202 case REG_DEP_TRUE:
30203 {
30204 /* Data dependency; DEP_INSN writes a register that INSN reads
30205 some cycles later. */
30206
30207 /* Separate a load from a narrower, dependent store. */
30208 if ((rs6000_sched_groups || rs6000_tune == PROCESSOR_POWER9)
30209 && GET_CODE (PATTERN (insn)) == SET
30210 && GET_CODE (PATTERN (dep_insn)) == SET
30211 && MEM_P (XEXP (PATTERN (insn), 1))
30212 && MEM_P (XEXP (PATTERN (dep_insn), 0))
30213 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
30214 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
30215 return cost + 14;
30216
30217 attr_type = get_attr_type (insn);
30218
30219 switch (attr_type)
30220 {
30221 case TYPE_JMPREG:
30222 /* Tell the first scheduling pass about the latency between
30223 a mtctr and bctr (and mtlr and br/blr). The first
30224 scheduling pass will not know about this latency since
30225 the mtctr instruction, which has the latency associated
30226 to it, will be generated by reload. */
30227 return 4;
30228 case TYPE_BRANCH:
30229 /* Leave some extra cycles between a compare and its
30230 dependent branch, to inhibit expensive mispredicts. */
30231 if ((rs6000_tune == PROCESSOR_PPC603
30232 || rs6000_tune == PROCESSOR_PPC604
30233 || rs6000_tune == PROCESSOR_PPC604e
30234 || rs6000_tune == PROCESSOR_PPC620
30235 || rs6000_tune == PROCESSOR_PPC630
30236 || rs6000_tune == PROCESSOR_PPC750
30237 || rs6000_tune == PROCESSOR_PPC7400
30238 || rs6000_tune == PROCESSOR_PPC7450
30239 || rs6000_tune == PROCESSOR_PPCE5500
30240 || rs6000_tune == PROCESSOR_PPCE6500
30241 || rs6000_tune == PROCESSOR_POWER4
30242 || rs6000_tune == PROCESSOR_POWER5
30243 || rs6000_tune == PROCESSOR_POWER7
30244 || rs6000_tune == PROCESSOR_POWER8
30245 || rs6000_tune == PROCESSOR_POWER9
30246 || rs6000_tune == PROCESSOR_CELL)
30247 && recog_memoized (dep_insn)
30248 && (INSN_CODE (dep_insn) >= 0))
30249
30250 switch (get_attr_type (dep_insn))
30251 {
30252 case TYPE_CMP:
30253 case TYPE_FPCOMPARE:
30254 case TYPE_CR_LOGICAL:
30255 return cost + 2;
30256 case TYPE_EXTS:
30257 case TYPE_MUL:
30258 if (get_attr_dot (dep_insn) == DOT_YES)
30259 return cost + 2;
30260 else
30261 break;
30262 case TYPE_SHIFT:
30263 if (get_attr_dot (dep_insn) == DOT_YES
30264 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
30265 return cost + 2;
30266 else
30267 break;
30268 default:
30269 break;
30270 }
30271 break;
30272
30273 case TYPE_STORE:
30274 case TYPE_FPSTORE:
30275 if ((rs6000_tune == PROCESSOR_POWER6)
30276 && recog_memoized (dep_insn)
30277 && (INSN_CODE (dep_insn) >= 0))
30278 {
30279
30280 if (GET_CODE (PATTERN (insn)) != SET)
30281 /* If this happens, we have to extend this to schedule
30282 optimally. Return default for now. */
30283 return cost;
30284
30285 /* Adjust the cost for the case where the value written
30286 by a fixed point operation is used as the address
30287 gen value on a store. */
30288 switch (get_attr_type (dep_insn))
30289 {
30290 case TYPE_LOAD:
30291 case TYPE_CNTLZ:
30292 {
30293 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30294 return get_attr_sign_extend (dep_insn)
30295 == SIGN_EXTEND_YES ? 6 : 4;
30296 break;
30297 }
30298 case TYPE_SHIFT:
30299 {
30300 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30301 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30302 6 : 3;
30303 break;
30304 }
30305 case TYPE_INTEGER:
30306 case TYPE_ADD:
30307 case TYPE_LOGICAL:
30308 case TYPE_EXTS:
30309 case TYPE_INSERT:
30310 {
30311 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30312 return 3;
30313 break;
30314 }
30315 case TYPE_STORE:
30316 case TYPE_FPLOAD:
30317 case TYPE_FPSTORE:
30318 {
30319 if (get_attr_update (dep_insn) == UPDATE_YES
30320 && ! rs6000_store_data_bypass_p (dep_insn, insn))
30321 return 3;
30322 break;
30323 }
30324 case TYPE_MUL:
30325 {
30326 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30327 return 17;
30328 break;
30329 }
30330 case TYPE_DIV:
30331 {
30332 if (! rs6000_store_data_bypass_p (dep_insn, insn))
30333 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30334 break;
30335 }
30336 default:
30337 break;
30338 }
30339 }
30340 break;
30341
30342 case TYPE_LOAD:
30343 if ((rs6000_tune == PROCESSOR_POWER6)
30344 && recog_memoized (dep_insn)
30345 && (INSN_CODE (dep_insn) >= 0))
30346 {
30347
30348 /* Adjust the cost for the case where the value written
30349 by a fixed point instruction is used within the address
30350 gen portion of a subsequent load(u)(x) */
30351 switch (get_attr_type (dep_insn))
30352 {
30353 case TYPE_LOAD:
30354 case TYPE_CNTLZ:
30355 {
30356 if (set_to_load_agen (dep_insn, insn))
30357 return get_attr_sign_extend (dep_insn)
30358 == SIGN_EXTEND_YES ? 6 : 4;
30359 break;
30360 }
30361 case TYPE_SHIFT:
30362 {
30363 if (set_to_load_agen (dep_insn, insn))
30364 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
30365 6 : 3;
30366 break;
30367 }
30368 case TYPE_INTEGER:
30369 case TYPE_ADD:
30370 case TYPE_LOGICAL:
30371 case TYPE_EXTS:
30372 case TYPE_INSERT:
30373 {
30374 if (set_to_load_agen (dep_insn, insn))
30375 return 3;
30376 break;
30377 }
30378 case TYPE_STORE:
30379 case TYPE_FPLOAD:
30380 case TYPE_FPSTORE:
30381 {
30382 if (get_attr_update (dep_insn) == UPDATE_YES
30383 && set_to_load_agen (dep_insn, insn))
30384 return 3;
30385 break;
30386 }
30387 case TYPE_MUL:
30388 {
30389 if (set_to_load_agen (dep_insn, insn))
30390 return 17;
30391 break;
30392 }
30393 case TYPE_DIV:
30394 {
30395 if (set_to_load_agen (dep_insn, insn))
30396 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
30397 break;
30398 }
30399 default:
30400 break;
30401 }
30402 }
30403 break;
30404
30405 case TYPE_FPLOAD:
30406 if ((rs6000_tune == PROCESSOR_POWER6)
30407 && get_attr_update (insn) == UPDATE_NO
30408 && recog_memoized (dep_insn)
30409 && (INSN_CODE (dep_insn) >= 0)
30410 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
30411 return 2;
30412
30413 default:
30414 break;
30415 }
30416
30417 /* Fall out to return default cost. */
30418 }
30419 break;
30420
30421 case REG_DEP_OUTPUT:
30422 /* Output dependency; DEP_INSN writes a register that INSN writes some
30423 cycles later. */
30424 if ((rs6000_tune == PROCESSOR_POWER6)
30425 && recog_memoized (dep_insn)
30426 && (INSN_CODE (dep_insn) >= 0))
30427 {
30428 attr_type = get_attr_type (insn);
30429
30430 switch (attr_type)
30431 {
30432 case TYPE_FP:
30433 case TYPE_FPSIMPLE:
30434 if (get_attr_type (dep_insn) == TYPE_FP
30435 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
30436 return 1;
30437 break;
30438 case TYPE_FPLOAD:
30439 if (get_attr_update (insn) == UPDATE_NO
30440 && get_attr_type (dep_insn) == TYPE_MFFGPR)
30441 return 2;
30442 break;
30443 default:
30444 break;
30445 }
30446 }
30447 /* Fall through, no cost for output dependency. */
30448 /* FALLTHRU */
30449
30450 case REG_DEP_ANTI:
30451 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30452 cycles later. */
30453 return 0;
30454
30455 default:
30456 gcc_unreachable ();
30457 }
30458
30459 return cost;
30460 }
30461
30462 /* Debug version of rs6000_adjust_cost. */
30463
30464 static int
30465 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
30466 int cost, unsigned int dw)
30467 {
30468 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
30469
30470 if (ret != cost)
30471 {
30472 const char *dep;
30473
30474 switch (dep_type)
30475 {
30476 default: dep = "unknown depencency"; break;
30477 case REG_DEP_TRUE: dep = "data dependency"; break;
30478 case REG_DEP_OUTPUT: dep = "output dependency"; break;
30479 case REG_DEP_ANTI: dep = "anti depencency"; break;
30480 }
30481
30482 fprintf (stderr,
30483 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30484 "%s, insn:\n", ret, cost, dep);
30485
30486 debug_rtx (insn);
30487 }
30488
30489 return ret;
30490 }
30491
30492 /* The function returns a true if INSN is microcoded.
30493 Return false otherwise. */
30494
30495 static bool
30496 is_microcoded_insn (rtx_insn *insn)
30497 {
30498 if (!insn || !NONDEBUG_INSN_P (insn)
30499 || GET_CODE (PATTERN (insn)) == USE
30500 || GET_CODE (PATTERN (insn)) == CLOBBER)
30501 return false;
30502
30503 if (rs6000_tune == PROCESSOR_CELL)
30504 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
30505
30506 if (rs6000_sched_groups
30507 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30508 {
30509 enum attr_type type = get_attr_type (insn);
30510 if ((type == TYPE_LOAD
30511 && get_attr_update (insn) == UPDATE_YES
30512 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
30513 || ((type == TYPE_LOAD || type == TYPE_STORE)
30514 && get_attr_update (insn) == UPDATE_YES
30515 && get_attr_indexed (insn) == INDEXED_YES)
30516 || type == TYPE_MFCR)
30517 return true;
30518 }
30519
30520 return false;
30521 }
30522
30523 /* The function returns true if INSN is cracked into 2 instructions
30524 by the processor (and therefore occupies 2 issue slots). */
30525
30526 static bool
30527 is_cracked_insn (rtx_insn *insn)
30528 {
30529 if (!insn || !NONDEBUG_INSN_P (insn)
30530 || GET_CODE (PATTERN (insn)) == USE
30531 || GET_CODE (PATTERN (insn)) == CLOBBER)
30532 return false;
30533
30534 if (rs6000_sched_groups
30535 && (rs6000_tune == PROCESSOR_POWER4 || rs6000_tune == PROCESSOR_POWER5))
30536 {
30537 enum attr_type type = get_attr_type (insn);
30538 if ((type == TYPE_LOAD
30539 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
30540 && get_attr_update (insn) == UPDATE_NO)
30541 || (type == TYPE_LOAD
30542 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
30543 && get_attr_update (insn) == UPDATE_YES
30544 && get_attr_indexed (insn) == INDEXED_NO)
30545 || (type == TYPE_STORE
30546 && get_attr_update (insn) == UPDATE_YES
30547 && get_attr_indexed (insn) == INDEXED_NO)
30548 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
30549 && get_attr_update (insn) == UPDATE_YES)
30550 || (type == TYPE_CR_LOGICAL
30551 && get_attr_cr_logical_3op (insn) == CR_LOGICAL_3OP_YES)
30552 || (type == TYPE_EXTS
30553 && get_attr_dot (insn) == DOT_YES)
30554 || (type == TYPE_SHIFT
30555 && get_attr_dot (insn) == DOT_YES
30556 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
30557 || (type == TYPE_MUL
30558 && get_attr_dot (insn) == DOT_YES)
30559 || type == TYPE_DIV
30560 || (type == TYPE_INSERT
30561 && get_attr_size (insn) == SIZE_32))
30562 return true;
30563 }
30564
30565 return false;
30566 }
30567
30568 /* The function returns true if INSN can be issued only from
30569 the branch slot. */
30570
30571 static bool
30572 is_branch_slot_insn (rtx_insn *insn)
30573 {
30574 if (!insn || !NONDEBUG_INSN_P (insn)
30575 || GET_CODE (PATTERN (insn)) == USE
30576 || GET_CODE (PATTERN (insn)) == CLOBBER)
30577 return false;
30578
30579 if (rs6000_sched_groups)
30580 {
30581 enum attr_type type = get_attr_type (insn);
30582 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
30583 return true;
30584 return false;
30585 }
30586
30587 return false;
30588 }
30589
30590 /* The function returns true if out_inst sets a value that is
30591 used in the address generation computation of in_insn */
30592 static bool
30593 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
30594 {
30595 rtx out_set, in_set;
30596
30597 /* For performance reasons, only handle the simple case where
30598 both loads are a single_set. */
30599 out_set = single_set (out_insn);
30600 if (out_set)
30601 {
30602 in_set = single_set (in_insn);
30603 if (in_set)
30604 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
30605 }
30606
30607 return false;
30608 }
30609
30610 /* Try to determine base/offset/size parts of the given MEM.
30611 Return true if successful, false if all the values couldn't
30612 be determined.
30613
30614 This function only looks for REG or REG+CONST address forms.
30615 REG+REG address form will return false. */
30616
30617 static bool
30618 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
30619 HOST_WIDE_INT *size)
30620 {
30621 rtx addr_rtx;
30622 if MEM_SIZE_KNOWN_P (mem)
30623 *size = MEM_SIZE (mem);
30624 else
30625 return false;
30626
30627 addr_rtx = (XEXP (mem, 0));
30628 if (GET_CODE (addr_rtx) == PRE_MODIFY)
30629 addr_rtx = XEXP (addr_rtx, 1);
30630
30631 *offset = 0;
30632 while (GET_CODE (addr_rtx) == PLUS
30633 && CONST_INT_P (XEXP (addr_rtx, 1)))
30634 {
30635 *offset += INTVAL (XEXP (addr_rtx, 1));
30636 addr_rtx = XEXP (addr_rtx, 0);
30637 }
30638 if (!REG_P (addr_rtx))
30639 return false;
30640
30641 *base = addr_rtx;
30642 return true;
30643 }
30644
30645 /* The function returns true if the target storage location of
30646 mem1 is adjacent to the target storage location of mem2 */
30647 /* Return 1 if memory locations are adjacent. */
30648
30649 static bool
30650 adjacent_mem_locations (rtx mem1, rtx mem2)
30651 {
30652 rtx reg1, reg2;
30653 HOST_WIDE_INT off1, size1, off2, size2;
30654
30655 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30656 && get_memref_parts (mem2, &reg2, &off2, &size2))
30657 return ((REGNO (reg1) == REGNO (reg2))
30658 && ((off1 + size1 == off2)
30659 || (off2 + size2 == off1)));
30660
30661 return false;
30662 }
30663
30664 /* This function returns true if it can be determined that the two MEM
30665 locations overlap by at least 1 byte based on base reg/offset/size. */
30666
30667 static bool
30668 mem_locations_overlap (rtx mem1, rtx mem2)
30669 {
30670 rtx reg1, reg2;
30671 HOST_WIDE_INT off1, size1, off2, size2;
30672
30673 if (get_memref_parts (mem1, &reg1, &off1, &size1)
30674 && get_memref_parts (mem2, &reg2, &off2, &size2))
30675 return ((REGNO (reg1) == REGNO (reg2))
30676 && (((off1 <= off2) && (off1 + size1 > off2))
30677 || ((off2 <= off1) && (off2 + size2 > off1))));
30678
30679 return false;
30680 }
30681
30682 /* A C statement (sans semicolon) to update the integer scheduling
30683 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30684 INSN earlier, reduce the priority to execute INSN later. Do not
30685 define this macro if you do not need to adjust the scheduling
30686 priorities of insns. */
30687
30688 static int
30689 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
30690 {
30691 rtx load_mem, str_mem;
30692 /* On machines (like the 750) which have asymmetric integer units,
30693 where one integer unit can do multiply and divides and the other
30694 can't, reduce the priority of multiply/divide so it is scheduled
30695 before other integer operations. */
30696
30697 #if 0
30698 if (! INSN_P (insn))
30699 return priority;
30700
30701 if (GET_CODE (PATTERN (insn)) == USE)
30702 return priority;
30703
30704 switch (rs6000_tune) {
30705 case PROCESSOR_PPC750:
30706 switch (get_attr_type (insn))
30707 {
30708 default:
30709 break;
30710
30711 case TYPE_MUL:
30712 case TYPE_DIV:
30713 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
30714 priority, priority);
30715 if (priority >= 0 && priority < 0x01000000)
30716 priority >>= 3;
30717 break;
30718 }
30719 }
30720 #endif
30721
30722 if (insn_must_be_first_in_group (insn)
30723 && reload_completed
30724 && current_sched_info->sched_max_insns_priority
30725 && rs6000_sched_restricted_insns_priority)
30726 {
30727
30728 /* Prioritize insns that can be dispatched only in the first
30729 dispatch slot. */
30730 if (rs6000_sched_restricted_insns_priority == 1)
30731 /* Attach highest priority to insn. This means that in
30732 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30733 precede 'priority' (critical path) considerations. */
30734 return current_sched_info->sched_max_insns_priority;
30735 else if (rs6000_sched_restricted_insns_priority == 2)
30736 /* Increase priority of insn by a minimal amount. This means that in
30737 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30738 considerations precede dispatch-slot restriction considerations. */
30739 return (priority + 1);
30740 }
30741
30742 if (rs6000_tune == PROCESSOR_POWER6
30743 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
30744 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
30745 /* Attach highest priority to insn if the scheduler has just issued two
30746 stores and this instruction is a load, or two loads and this instruction
30747 is a store. Power6 wants loads and stores scheduled alternately
30748 when possible */
30749 return current_sched_info->sched_max_insns_priority;
30750
30751 return priority;
30752 }
30753
30754 /* Return true if the instruction is nonpipelined on the Cell. */
30755 static bool
30756 is_nonpipeline_insn (rtx_insn *insn)
30757 {
30758 enum attr_type type;
30759 if (!insn || !NONDEBUG_INSN_P (insn)
30760 || GET_CODE (PATTERN (insn)) == USE
30761 || GET_CODE (PATTERN (insn)) == CLOBBER)
30762 return false;
30763
30764 type = get_attr_type (insn);
30765 if (type == TYPE_MUL
30766 || type == TYPE_DIV
30767 || type == TYPE_SDIV
30768 || type == TYPE_DDIV
30769 || type == TYPE_SSQRT
30770 || type == TYPE_DSQRT
30771 || type == TYPE_MFCR
30772 || type == TYPE_MFCRF
30773 || type == TYPE_MFJMPR)
30774 {
30775 return true;
30776 }
30777 return false;
30778 }
30779
30780
30781 /* Return how many instructions the machine can issue per cycle. */
30782
30783 static int
30784 rs6000_issue_rate (void)
30785 {
30786 /* Unless scheduling for register pressure, use issue rate of 1 for
30787 first scheduling pass to decrease degradation. */
30788 if (!reload_completed && !flag_sched_pressure)
30789 return 1;
30790
30791 switch (rs6000_tune) {
30792 case PROCESSOR_RS64A:
30793 case PROCESSOR_PPC601: /* ? */
30794 case PROCESSOR_PPC7450:
30795 return 3;
30796 case PROCESSOR_PPC440:
30797 case PROCESSOR_PPC603:
30798 case PROCESSOR_PPC750:
30799 case PROCESSOR_PPC7400:
30800 case PROCESSOR_PPC8540:
30801 case PROCESSOR_PPC8548:
30802 case PROCESSOR_CELL:
30803 case PROCESSOR_PPCE300C2:
30804 case PROCESSOR_PPCE300C3:
30805 case PROCESSOR_PPCE500MC:
30806 case PROCESSOR_PPCE500MC64:
30807 case PROCESSOR_PPCE5500:
30808 case PROCESSOR_PPCE6500:
30809 case PROCESSOR_TITAN:
30810 return 2;
30811 case PROCESSOR_PPC476:
30812 case PROCESSOR_PPC604:
30813 case PROCESSOR_PPC604e:
30814 case PROCESSOR_PPC620:
30815 case PROCESSOR_PPC630:
30816 return 4;
30817 case PROCESSOR_POWER4:
30818 case PROCESSOR_POWER5:
30819 case PROCESSOR_POWER6:
30820 case PROCESSOR_POWER7:
30821 return 5;
30822 case PROCESSOR_POWER8:
30823 return 7;
30824 case PROCESSOR_POWER9:
30825 return 6;
30826 default:
30827 return 1;
30828 }
30829 }
30830
30831 /* Return how many instructions to look ahead for better insn
30832 scheduling. */
30833
30834 static int
30835 rs6000_use_sched_lookahead (void)
30836 {
30837 switch (rs6000_tune)
30838 {
30839 case PROCESSOR_PPC8540:
30840 case PROCESSOR_PPC8548:
30841 return 4;
30842
30843 case PROCESSOR_CELL:
30844 return (reload_completed ? 8 : 0);
30845
30846 default:
30847 return 0;
30848 }
30849 }
30850
30851 /* We are choosing insn from the ready queue. Return zero if INSN can be
30852 chosen. */
30853 static int
30854 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
30855 {
30856 if (ready_index == 0)
30857 return 0;
30858
30859 if (rs6000_tune != PROCESSOR_CELL)
30860 return 0;
30861
30862 gcc_assert (insn != NULL_RTX && INSN_P (insn));
30863
30864 if (!reload_completed
30865 || is_nonpipeline_insn (insn)
30866 || is_microcoded_insn (insn))
30867 return 1;
30868
30869 return 0;
30870 }
30871
30872 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30873 and return true. */
30874
30875 static bool
30876 find_mem_ref (rtx pat, rtx *mem_ref)
30877 {
30878 const char * fmt;
30879 int i, j;
30880
30881 /* stack_tie does not produce any real memory traffic. */
30882 if (tie_operand (pat, VOIDmode))
30883 return false;
30884
30885 if (MEM_P (pat))
30886 {
30887 *mem_ref = pat;
30888 return true;
30889 }
30890
30891 /* Recursively process the pattern. */
30892 fmt = GET_RTX_FORMAT (GET_CODE (pat));
30893
30894 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
30895 {
30896 if (fmt[i] == 'e')
30897 {
30898 if (find_mem_ref (XEXP (pat, i), mem_ref))
30899 return true;
30900 }
30901 else if (fmt[i] == 'E')
30902 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
30903 {
30904 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
30905 return true;
30906 }
30907 }
30908
30909 return false;
30910 }
30911
30912 /* Determine if PAT is a PATTERN of a load insn. */
30913
30914 static bool
30915 is_load_insn1 (rtx pat, rtx *load_mem)
30916 {
30917 if (!pat || pat == NULL_RTX)
30918 return false;
30919
30920 if (GET_CODE (pat) == SET)
30921 return find_mem_ref (SET_SRC (pat), load_mem);
30922
30923 if (GET_CODE (pat) == PARALLEL)
30924 {
30925 int i;
30926
30927 for (i = 0; i < XVECLEN (pat, 0); i++)
30928 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
30929 return true;
30930 }
30931
30932 return false;
30933 }
30934
30935 /* Determine if INSN loads from memory. */
30936
30937 static bool
30938 is_load_insn (rtx insn, rtx *load_mem)
30939 {
30940 if (!insn || !INSN_P (insn))
30941 return false;
30942
30943 if (CALL_P (insn))
30944 return false;
30945
30946 return is_load_insn1 (PATTERN (insn), load_mem);
30947 }
30948
30949 /* Determine if PAT is a PATTERN of a store insn. */
30950
30951 static bool
30952 is_store_insn1 (rtx pat, rtx *str_mem)
30953 {
30954 if (!pat || pat == NULL_RTX)
30955 return false;
30956
30957 if (GET_CODE (pat) == SET)
30958 return find_mem_ref (SET_DEST (pat), str_mem);
30959
30960 if (GET_CODE (pat) == PARALLEL)
30961 {
30962 int i;
30963
30964 for (i = 0; i < XVECLEN (pat, 0); i++)
30965 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
30966 return true;
30967 }
30968
30969 return false;
30970 }
30971
30972 /* Determine if INSN stores to memory. */
30973
30974 static bool
30975 is_store_insn (rtx insn, rtx *str_mem)
30976 {
30977 if (!insn || !INSN_P (insn))
30978 return false;
30979
30980 return is_store_insn1 (PATTERN (insn), str_mem);
30981 }
30982
30983 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30984
30985 static bool
30986 is_power9_pairable_vec_type (enum attr_type type)
30987 {
30988 switch (type)
30989 {
30990 case TYPE_VECSIMPLE:
30991 case TYPE_VECCOMPLEX:
30992 case TYPE_VECDIV:
30993 case TYPE_VECCMP:
30994 case TYPE_VECPERM:
30995 case TYPE_VECFLOAT:
30996 case TYPE_VECFDIV:
30997 case TYPE_VECDOUBLE:
30998 return true;
30999 default:
31000 break;
31001 }
31002 return false;
31003 }
31004
31005 /* Returns whether the dependence between INSN and NEXT is considered
31006 costly by the given target. */
31007
31008 static bool
31009 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
31010 {
31011 rtx insn;
31012 rtx next;
31013 rtx load_mem, str_mem;
31014
31015 /* If the flag is not enabled - no dependence is considered costly;
31016 allow all dependent insns in the same group.
31017 This is the most aggressive option. */
31018 if (rs6000_sched_costly_dep == no_dep_costly)
31019 return false;
31020
31021 /* If the flag is set to 1 - a dependence is always considered costly;
31022 do not allow dependent instructions in the same group.
31023 This is the most conservative option. */
31024 if (rs6000_sched_costly_dep == all_deps_costly)
31025 return true;
31026
31027 insn = DEP_PRO (dep);
31028 next = DEP_CON (dep);
31029
31030 if (rs6000_sched_costly_dep == store_to_load_dep_costly
31031 && is_load_insn (next, &load_mem)
31032 && is_store_insn (insn, &str_mem))
31033 /* Prevent load after store in the same group. */
31034 return true;
31035
31036 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
31037 && is_load_insn (next, &load_mem)
31038 && is_store_insn (insn, &str_mem)
31039 && DEP_TYPE (dep) == REG_DEP_TRUE
31040 && mem_locations_overlap(str_mem, load_mem))
31041 /* Prevent load after store in the same group if it is a true
31042 dependence. */
31043 return true;
31044
31045 /* The flag is set to X; dependences with latency >= X are considered costly,
31046 and will not be scheduled in the same group. */
31047 if (rs6000_sched_costly_dep <= max_dep_latency
31048 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
31049 return true;
31050
31051 return false;
31052 }
31053
31054 /* Return the next insn after INSN that is found before TAIL is reached,
31055 skipping any "non-active" insns - insns that will not actually occupy
31056 an issue slot. Return NULL_RTX if such an insn is not found. */
31057
31058 static rtx_insn *
31059 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
31060 {
31061 if (insn == NULL_RTX || insn == tail)
31062 return NULL;
31063
31064 while (1)
31065 {
31066 insn = NEXT_INSN (insn);
31067 if (insn == NULL_RTX || insn == tail)
31068 return NULL;
31069
31070 if (CALL_P (insn)
31071 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
31072 || (NONJUMP_INSN_P (insn)
31073 && GET_CODE (PATTERN (insn)) != USE
31074 && GET_CODE (PATTERN (insn)) != CLOBBER
31075 && INSN_CODE (insn) != CODE_FOR_stack_tie))
31076 break;
31077 }
31078 return insn;
31079 }
31080
31081 /* Do Power9 specific sched_reorder2 reordering of ready list. */
31082
31083 static int
31084 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
31085 {
31086 int pos;
31087 int i;
31088 rtx_insn *tmp;
31089 enum attr_type type, type2;
31090
31091 type = get_attr_type (last_scheduled_insn);
31092
31093 /* Try to issue fixed point divides back-to-back in pairs so they will be
31094 routed to separate execution units and execute in parallel. */
31095 if (type == TYPE_DIV && divide_cnt == 0)
31096 {
31097 /* First divide has been scheduled. */
31098 divide_cnt = 1;
31099
31100 /* Scan the ready list looking for another divide, if found move it
31101 to the end of the list so it is chosen next. */
31102 pos = lastpos;
31103 while (pos >= 0)
31104 {
31105 if (recog_memoized (ready[pos]) >= 0
31106 && get_attr_type (ready[pos]) == TYPE_DIV)
31107 {
31108 tmp = ready[pos];
31109 for (i = pos; i < lastpos; i++)
31110 ready[i] = ready[i + 1];
31111 ready[lastpos] = tmp;
31112 break;
31113 }
31114 pos--;
31115 }
31116 }
31117 else
31118 {
31119 /* Last insn was the 2nd divide or not a divide, reset the counter. */
31120 divide_cnt = 0;
31121
31122 /* The best dispatch throughput for vector and vector load insns can be
31123 achieved by interleaving a vector and vector load such that they'll
31124 dispatch to the same superslice. If this pairing cannot be achieved
31125 then it is best to pair vector insns together and vector load insns
31126 together.
31127
31128 To aid in this pairing, vec_pairing maintains the current state with
31129 the following values:
31130
31131 0 : Initial state, no vecload/vector pairing has been started.
31132
31133 1 : A vecload or vector insn has been issued and a candidate for
31134 pairing has been found and moved to the end of the ready
31135 list. */
31136 if (type == TYPE_VECLOAD)
31137 {
31138 /* Issued a vecload. */
31139 if (vec_pairing == 0)
31140 {
31141 int vecload_pos = -1;
31142 /* We issued a single vecload, look for a vector insn to pair it
31143 with. If one isn't found, try to pair another vecload. */
31144 pos = lastpos;
31145 while (pos >= 0)
31146 {
31147 if (recog_memoized (ready[pos]) >= 0)
31148 {
31149 type2 = get_attr_type (ready[pos]);
31150 if (is_power9_pairable_vec_type (type2))
31151 {
31152 /* Found a vector insn to pair with, move it to the
31153 end of the ready list so it is scheduled next. */
31154 tmp = ready[pos];
31155 for (i = pos; i < lastpos; i++)
31156 ready[i] = ready[i + 1];
31157 ready[lastpos] = tmp;
31158 vec_pairing = 1;
31159 return cached_can_issue_more;
31160 }
31161 else if (type2 == TYPE_VECLOAD && vecload_pos == -1)
31162 /* Remember position of first vecload seen. */
31163 vecload_pos = pos;
31164 }
31165 pos--;
31166 }
31167 if (vecload_pos >= 0)
31168 {
31169 /* Didn't find a vector to pair with but did find a vecload,
31170 move it to the end of the ready list. */
31171 tmp = ready[vecload_pos];
31172 for (i = vecload_pos; i < lastpos; i++)
31173 ready[i] = ready[i + 1];
31174 ready[lastpos] = tmp;
31175 vec_pairing = 1;
31176 return cached_can_issue_more;
31177 }
31178 }
31179 }
31180 else if (is_power9_pairable_vec_type (type))
31181 {
31182 /* Issued a vector operation. */
31183 if (vec_pairing == 0)
31184 {
31185 int vec_pos = -1;
31186 /* We issued a single vector insn, look for a vecload to pair it
31187 with. If one isn't found, try to pair another vector. */
31188 pos = lastpos;
31189 while (pos >= 0)
31190 {
31191 if (recog_memoized (ready[pos]) >= 0)
31192 {
31193 type2 = get_attr_type (ready[pos]);
31194 if (type2 == TYPE_VECLOAD)
31195 {
31196 /* Found a vecload insn to pair with, move it to the
31197 end of the ready list so it is scheduled next. */
31198 tmp = ready[pos];
31199 for (i = pos; i < lastpos; i++)
31200 ready[i] = ready[i + 1];
31201 ready[lastpos] = tmp;
31202 vec_pairing = 1;
31203 return cached_can_issue_more;
31204 }
31205 else if (is_power9_pairable_vec_type (type2)
31206 && vec_pos == -1)
31207 /* Remember position of first vector insn seen. */
31208 vec_pos = pos;
31209 }
31210 pos--;
31211 }
31212 if (vec_pos >= 0)
31213 {
31214 /* Didn't find a vecload to pair with but did find a vector
31215 insn, move it to the end of the ready list. */
31216 tmp = ready[vec_pos];
31217 for (i = vec_pos; i < lastpos; i++)
31218 ready[i] = ready[i + 1];
31219 ready[lastpos] = tmp;
31220 vec_pairing = 1;
31221 return cached_can_issue_more;
31222 }
31223 }
31224 }
31225
31226 /* We've either finished a vec/vecload pair, couldn't find an insn to
31227 continue the current pair, or the last insn had nothing to do with
31228 with pairing. In any case, reset the state. */
31229 vec_pairing = 0;
31230 }
31231
31232 return cached_can_issue_more;
31233 }
31234
31235 /* We are about to begin issuing insns for this clock cycle. */
31236
31237 static int
31238 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
31239 rtx_insn **ready ATTRIBUTE_UNUSED,
31240 int *pn_ready ATTRIBUTE_UNUSED,
31241 int clock_var ATTRIBUTE_UNUSED)
31242 {
31243 int n_ready = *pn_ready;
31244
31245 if (sched_verbose)
31246 fprintf (dump, "// rs6000_sched_reorder :\n");
31247
31248 /* Reorder the ready list, if the second to last ready insn
31249 is a nonepipeline insn. */
31250 if (rs6000_tune == PROCESSOR_CELL && n_ready > 1)
31251 {
31252 if (is_nonpipeline_insn (ready[n_ready - 1])
31253 && (recog_memoized (ready[n_ready - 2]) > 0))
31254 /* Simply swap first two insns. */
31255 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
31256 }
31257
31258 if (rs6000_tune == PROCESSOR_POWER6)
31259 load_store_pendulum = 0;
31260
31261 return rs6000_issue_rate ();
31262 }
31263
31264 /* Like rs6000_sched_reorder, but called after issuing each insn. */
31265
31266 static int
31267 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
31268 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
31269 {
31270 if (sched_verbose)
31271 fprintf (dump, "// rs6000_sched_reorder2 :\n");
31272
31273 /* For Power6, we need to handle some special cases to try and keep the
31274 store queue from overflowing and triggering expensive flushes.
31275
31276 This code monitors how load and store instructions are being issued
31277 and skews the ready list one way or the other to increase the likelihood
31278 that a desired instruction is issued at the proper time.
31279
31280 A couple of things are done. First, we maintain a "load_store_pendulum"
31281 to track the current state of load/store issue.
31282
31283 - If the pendulum is at zero, then no loads or stores have been
31284 issued in the current cycle so we do nothing.
31285
31286 - If the pendulum is 1, then a single load has been issued in this
31287 cycle and we attempt to locate another load in the ready list to
31288 issue with it.
31289
31290 - If the pendulum is -2, then two stores have already been
31291 issued in this cycle, so we increase the priority of the first load
31292 in the ready list to increase it's likelihood of being chosen first
31293 in the next cycle.
31294
31295 - If the pendulum is -1, then a single store has been issued in this
31296 cycle and we attempt to locate another store in the ready list to
31297 issue with it, preferring a store to an adjacent memory location to
31298 facilitate store pairing in the store queue.
31299
31300 - If the pendulum is 2, then two loads have already been
31301 issued in this cycle, so we increase the priority of the first store
31302 in the ready list to increase it's likelihood of being chosen first
31303 in the next cycle.
31304
31305 - If the pendulum < -2 or > 2, then do nothing.
31306
31307 Note: This code covers the most common scenarios. There exist non
31308 load/store instructions which make use of the LSU and which
31309 would need to be accounted for to strictly model the behavior
31310 of the machine. Those instructions are currently unaccounted
31311 for to help minimize compile time overhead of this code.
31312 */
31313 if (rs6000_tune == PROCESSOR_POWER6 && last_scheduled_insn)
31314 {
31315 int pos;
31316 int i;
31317 rtx_insn *tmp;
31318 rtx load_mem, str_mem;
31319
31320 if (is_store_insn (last_scheduled_insn, &str_mem))
31321 /* Issuing a store, swing the load_store_pendulum to the left */
31322 load_store_pendulum--;
31323 else if (is_load_insn (last_scheduled_insn, &load_mem))
31324 /* Issuing a load, swing the load_store_pendulum to the right */
31325 load_store_pendulum++;
31326 else
31327 return cached_can_issue_more;
31328
31329 /* If the pendulum is balanced, or there is only one instruction on
31330 the ready list, then all is well, so return. */
31331 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
31332 return cached_can_issue_more;
31333
31334 if (load_store_pendulum == 1)
31335 {
31336 /* A load has been issued in this cycle. Scan the ready list
31337 for another load to issue with it */
31338 pos = *pn_ready-1;
31339
31340 while (pos >= 0)
31341 {
31342 if (is_load_insn (ready[pos], &load_mem))
31343 {
31344 /* Found a load. Move it to the head of the ready list,
31345 and adjust it's priority so that it is more likely to
31346 stay there */
31347 tmp = ready[pos];
31348 for (i=pos; i<*pn_ready-1; i++)
31349 ready[i] = ready[i + 1];
31350 ready[*pn_ready-1] = tmp;
31351
31352 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31353 INSN_PRIORITY (tmp)++;
31354 break;
31355 }
31356 pos--;
31357 }
31358 }
31359 else if (load_store_pendulum == -2)
31360 {
31361 /* Two stores have been issued in this cycle. Increase the
31362 priority of the first load in the ready list to favor it for
31363 issuing in the next cycle. */
31364 pos = *pn_ready-1;
31365
31366 while (pos >= 0)
31367 {
31368 if (is_load_insn (ready[pos], &load_mem)
31369 && !sel_sched_p ()
31370 && INSN_PRIORITY_KNOWN (ready[pos]))
31371 {
31372 INSN_PRIORITY (ready[pos])++;
31373
31374 /* Adjust the pendulum to account for the fact that a load
31375 was found and increased in priority. This is to prevent
31376 increasing the priority of multiple loads */
31377 load_store_pendulum--;
31378
31379 break;
31380 }
31381 pos--;
31382 }
31383 }
31384 else if (load_store_pendulum == -1)
31385 {
31386 /* A store has been issued in this cycle. Scan the ready list for
31387 another store to issue with it, preferring a store to an adjacent
31388 memory location */
31389 int first_store_pos = -1;
31390
31391 pos = *pn_ready-1;
31392
31393 while (pos >= 0)
31394 {
31395 if (is_store_insn (ready[pos], &str_mem))
31396 {
31397 rtx str_mem2;
31398 /* Maintain the index of the first store found on the
31399 list */
31400 if (first_store_pos == -1)
31401 first_store_pos = pos;
31402
31403 if (is_store_insn (last_scheduled_insn, &str_mem2)
31404 && adjacent_mem_locations (str_mem, str_mem2))
31405 {
31406 /* Found an adjacent store. Move it to the head of the
31407 ready list, and adjust it's priority so that it is
31408 more likely to stay there */
31409 tmp = ready[pos];
31410 for (i=pos; i<*pn_ready-1; i++)
31411 ready[i] = ready[i + 1];
31412 ready[*pn_ready-1] = tmp;
31413
31414 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31415 INSN_PRIORITY (tmp)++;
31416
31417 first_store_pos = -1;
31418
31419 break;
31420 };
31421 }
31422 pos--;
31423 }
31424
31425 if (first_store_pos >= 0)
31426 {
31427 /* An adjacent store wasn't found, but a non-adjacent store was,
31428 so move the non-adjacent store to the front of the ready
31429 list, and adjust its priority so that it is more likely to
31430 stay there. */
31431 tmp = ready[first_store_pos];
31432 for (i=first_store_pos; i<*pn_ready-1; i++)
31433 ready[i] = ready[i + 1];
31434 ready[*pn_ready-1] = tmp;
31435 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
31436 INSN_PRIORITY (tmp)++;
31437 }
31438 }
31439 else if (load_store_pendulum == 2)
31440 {
31441 /* Two loads have been issued in this cycle. Increase the priority
31442 of the first store in the ready list to favor it for issuing in
31443 the next cycle. */
31444 pos = *pn_ready-1;
31445
31446 while (pos >= 0)
31447 {
31448 if (is_store_insn (ready[pos], &str_mem)
31449 && !sel_sched_p ()
31450 && INSN_PRIORITY_KNOWN (ready[pos]))
31451 {
31452 INSN_PRIORITY (ready[pos])++;
31453
31454 /* Adjust the pendulum to account for the fact that a store
31455 was found and increased in priority. This is to prevent
31456 increasing the priority of multiple stores */
31457 load_store_pendulum++;
31458
31459 break;
31460 }
31461 pos--;
31462 }
31463 }
31464 }
31465
31466 /* Do Power9 dependent reordering if necessary. */
31467 if (rs6000_tune == PROCESSOR_POWER9 && last_scheduled_insn
31468 && recog_memoized (last_scheduled_insn) >= 0)
31469 return power9_sched_reorder2 (ready, *pn_ready - 1);
31470
31471 return cached_can_issue_more;
31472 }
31473
31474 /* Return whether the presence of INSN causes a dispatch group termination
31475 of group WHICH_GROUP.
31476
31477 If WHICH_GROUP == current_group, this function will return true if INSN
31478 causes the termination of the current group (i.e, the dispatch group to
31479 which INSN belongs). This means that INSN will be the last insn in the
31480 group it belongs to.
31481
31482 If WHICH_GROUP == previous_group, this function will return true if INSN
31483 causes the termination of the previous group (i.e, the dispatch group that
31484 precedes the group to which INSN belongs). This means that INSN will be
31485 the first insn in the group it belongs to). */
31486
31487 static bool
31488 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
31489 {
31490 bool first, last;
31491
31492 if (! insn)
31493 return false;
31494
31495 first = insn_must_be_first_in_group (insn);
31496 last = insn_must_be_last_in_group (insn);
31497
31498 if (first && last)
31499 return true;
31500
31501 if (which_group == current_group)
31502 return last;
31503 else if (which_group == previous_group)
31504 return first;
31505
31506 return false;
31507 }
31508
31509
31510 static bool
31511 insn_must_be_first_in_group (rtx_insn *insn)
31512 {
31513 enum attr_type type;
31514
31515 if (!insn
31516 || NOTE_P (insn)
31517 || DEBUG_INSN_P (insn)
31518 || GET_CODE (PATTERN (insn)) == USE
31519 || GET_CODE (PATTERN (insn)) == CLOBBER)
31520 return false;
31521
31522 switch (rs6000_tune)
31523 {
31524 case PROCESSOR_POWER5:
31525 if (is_cracked_insn (insn))
31526 return true;
31527 /* FALLTHRU */
31528 case PROCESSOR_POWER4:
31529 if (is_microcoded_insn (insn))
31530 return true;
31531
31532 if (!rs6000_sched_groups)
31533 return false;
31534
31535 type = get_attr_type (insn);
31536
31537 switch (type)
31538 {
31539 case TYPE_MFCR:
31540 case TYPE_MFCRF:
31541 case TYPE_MTCR:
31542 case TYPE_CR_LOGICAL:
31543 case TYPE_MTJMPR:
31544 case TYPE_MFJMPR:
31545 case TYPE_DIV:
31546 case TYPE_LOAD_L:
31547 case TYPE_STORE_C:
31548 case TYPE_ISYNC:
31549 case TYPE_SYNC:
31550 return true;
31551 default:
31552 break;
31553 }
31554 break;
31555 case PROCESSOR_POWER6:
31556 type = get_attr_type (insn);
31557
31558 switch (type)
31559 {
31560 case TYPE_EXTS:
31561 case TYPE_CNTLZ:
31562 case TYPE_TRAP:
31563 case TYPE_MUL:
31564 case TYPE_INSERT:
31565 case TYPE_FPCOMPARE:
31566 case TYPE_MFCR:
31567 case TYPE_MTCR:
31568 case TYPE_MFJMPR:
31569 case TYPE_MTJMPR:
31570 case TYPE_ISYNC:
31571 case TYPE_SYNC:
31572 case TYPE_LOAD_L:
31573 case TYPE_STORE_C:
31574 return true;
31575 case TYPE_SHIFT:
31576 if (get_attr_dot (insn) == DOT_NO
31577 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31578 return true;
31579 else
31580 break;
31581 case TYPE_DIV:
31582 if (get_attr_size (insn) == SIZE_32)
31583 return true;
31584 else
31585 break;
31586 case TYPE_LOAD:
31587 case TYPE_STORE:
31588 case TYPE_FPLOAD:
31589 case TYPE_FPSTORE:
31590 if (get_attr_update (insn) == UPDATE_YES)
31591 return true;
31592 else
31593 break;
31594 default:
31595 break;
31596 }
31597 break;
31598 case PROCESSOR_POWER7:
31599 type = get_attr_type (insn);
31600
31601 switch (type)
31602 {
31603 case TYPE_CR_LOGICAL:
31604 case TYPE_MFCR:
31605 case TYPE_MFCRF:
31606 case TYPE_MTCR:
31607 case TYPE_DIV:
31608 case TYPE_ISYNC:
31609 case TYPE_LOAD_L:
31610 case TYPE_STORE_C:
31611 case TYPE_MFJMPR:
31612 case TYPE_MTJMPR:
31613 return true;
31614 case TYPE_MUL:
31615 case TYPE_SHIFT:
31616 case TYPE_EXTS:
31617 if (get_attr_dot (insn) == DOT_YES)
31618 return true;
31619 else
31620 break;
31621 case TYPE_LOAD:
31622 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31623 || get_attr_update (insn) == UPDATE_YES)
31624 return true;
31625 else
31626 break;
31627 case TYPE_STORE:
31628 case TYPE_FPLOAD:
31629 case TYPE_FPSTORE:
31630 if (get_attr_update (insn) == UPDATE_YES)
31631 return true;
31632 else
31633 break;
31634 default:
31635 break;
31636 }
31637 break;
31638 case PROCESSOR_POWER8:
31639 type = get_attr_type (insn);
31640
31641 switch (type)
31642 {
31643 case TYPE_CR_LOGICAL:
31644 case TYPE_MFCR:
31645 case TYPE_MFCRF:
31646 case TYPE_MTCR:
31647 case TYPE_SYNC:
31648 case TYPE_ISYNC:
31649 case TYPE_LOAD_L:
31650 case TYPE_STORE_C:
31651 case TYPE_VECSTORE:
31652 case TYPE_MFJMPR:
31653 case TYPE_MTJMPR:
31654 return true;
31655 case TYPE_SHIFT:
31656 case TYPE_EXTS:
31657 case TYPE_MUL:
31658 if (get_attr_dot (insn) == DOT_YES)
31659 return true;
31660 else
31661 break;
31662 case TYPE_LOAD:
31663 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31664 || get_attr_update (insn) == UPDATE_YES)
31665 return true;
31666 else
31667 break;
31668 case TYPE_STORE:
31669 if (get_attr_update (insn) == UPDATE_YES
31670 && get_attr_indexed (insn) == INDEXED_YES)
31671 return true;
31672 else
31673 break;
31674 default:
31675 break;
31676 }
31677 break;
31678 default:
31679 break;
31680 }
31681
31682 return false;
31683 }
31684
31685 static bool
31686 insn_must_be_last_in_group (rtx_insn *insn)
31687 {
31688 enum attr_type type;
31689
31690 if (!insn
31691 || NOTE_P (insn)
31692 || DEBUG_INSN_P (insn)
31693 || GET_CODE (PATTERN (insn)) == USE
31694 || GET_CODE (PATTERN (insn)) == CLOBBER)
31695 return false;
31696
31697 switch (rs6000_tune) {
31698 case PROCESSOR_POWER4:
31699 case PROCESSOR_POWER5:
31700 if (is_microcoded_insn (insn))
31701 return true;
31702
31703 if (is_branch_slot_insn (insn))
31704 return true;
31705
31706 break;
31707 case PROCESSOR_POWER6:
31708 type = get_attr_type (insn);
31709
31710 switch (type)
31711 {
31712 case TYPE_EXTS:
31713 case TYPE_CNTLZ:
31714 case TYPE_TRAP:
31715 case TYPE_MUL:
31716 case TYPE_FPCOMPARE:
31717 case TYPE_MFCR:
31718 case TYPE_MTCR:
31719 case TYPE_MFJMPR:
31720 case TYPE_MTJMPR:
31721 case TYPE_ISYNC:
31722 case TYPE_SYNC:
31723 case TYPE_LOAD_L:
31724 case TYPE_STORE_C:
31725 return true;
31726 case TYPE_SHIFT:
31727 if (get_attr_dot (insn) == DOT_NO
31728 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
31729 return true;
31730 else
31731 break;
31732 case TYPE_DIV:
31733 if (get_attr_size (insn) == SIZE_32)
31734 return true;
31735 else
31736 break;
31737 default:
31738 break;
31739 }
31740 break;
31741 case PROCESSOR_POWER7:
31742 type = get_attr_type (insn);
31743
31744 switch (type)
31745 {
31746 case TYPE_ISYNC:
31747 case TYPE_SYNC:
31748 case TYPE_LOAD_L:
31749 case TYPE_STORE_C:
31750 return true;
31751 case TYPE_LOAD:
31752 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31753 && get_attr_update (insn) == UPDATE_YES)
31754 return true;
31755 else
31756 break;
31757 case TYPE_STORE:
31758 if (get_attr_update (insn) == UPDATE_YES
31759 && get_attr_indexed (insn) == INDEXED_YES)
31760 return true;
31761 else
31762 break;
31763 default:
31764 break;
31765 }
31766 break;
31767 case PROCESSOR_POWER8:
31768 type = get_attr_type (insn);
31769
31770 switch (type)
31771 {
31772 case TYPE_MFCR:
31773 case TYPE_MTCR:
31774 case TYPE_ISYNC:
31775 case TYPE_SYNC:
31776 case TYPE_LOAD_L:
31777 case TYPE_STORE_C:
31778 return true;
31779 case TYPE_LOAD:
31780 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
31781 && get_attr_update (insn) == UPDATE_YES)
31782 return true;
31783 else
31784 break;
31785 case TYPE_STORE:
31786 if (get_attr_update (insn) == UPDATE_YES
31787 && get_attr_indexed (insn) == INDEXED_YES)
31788 return true;
31789 else
31790 break;
31791 default:
31792 break;
31793 }
31794 break;
31795 default:
31796 break;
31797 }
31798
31799 return false;
31800 }
31801
31802 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31803 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31804
31805 static bool
31806 is_costly_group (rtx *group_insns, rtx next_insn)
31807 {
31808 int i;
31809 int issue_rate = rs6000_issue_rate ();
31810
31811 for (i = 0; i < issue_rate; i++)
31812 {
31813 sd_iterator_def sd_it;
31814 dep_t dep;
31815 rtx insn = group_insns[i];
31816
31817 if (!insn)
31818 continue;
31819
31820 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
31821 {
31822 rtx next = DEP_CON (dep);
31823
31824 if (next == next_insn
31825 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
31826 return true;
31827 }
31828 }
31829
31830 return false;
31831 }
31832
31833 /* Utility of the function redefine_groups.
31834 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31835 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31836 to keep it "far" (in a separate group) from GROUP_INSNS, following
31837 one of the following schemes, depending on the value of the flag
31838 -minsert_sched_nops = X:
31839 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31840 in order to force NEXT_INSN into a separate group.
31841 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31842 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31843 insertion (has a group just ended, how many vacant issue slots remain in the
31844 last group, and how many dispatch groups were encountered so far). */
31845
31846 static int
31847 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
31848 rtx_insn *next_insn, bool *group_end, int can_issue_more,
31849 int *group_count)
31850 {
31851 rtx nop;
31852 bool force;
31853 int issue_rate = rs6000_issue_rate ();
31854 bool end = *group_end;
31855 int i;
31856
31857 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
31858 return can_issue_more;
31859
31860 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
31861 return can_issue_more;
31862
31863 force = is_costly_group (group_insns, next_insn);
31864 if (!force)
31865 return can_issue_more;
31866
31867 if (sched_verbose > 6)
31868 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
31869 *group_count ,can_issue_more);
31870
31871 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
31872 {
31873 if (*group_end)
31874 can_issue_more = 0;
31875
31876 /* Since only a branch can be issued in the last issue_slot, it is
31877 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31878 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31879 in this case the last nop will start a new group and the branch
31880 will be forced to the new group. */
31881 if (can_issue_more && !is_branch_slot_insn (next_insn))
31882 can_issue_more--;
31883
31884 /* Do we have a special group ending nop? */
31885 if (rs6000_tune == PROCESSOR_POWER6 || rs6000_tune == PROCESSOR_POWER7
31886 || rs6000_tune == PROCESSOR_POWER8)
31887 {
31888 nop = gen_group_ending_nop ();
31889 emit_insn_before (nop, next_insn);
31890 can_issue_more = 0;
31891 }
31892 else
31893 while (can_issue_more > 0)
31894 {
31895 nop = gen_nop ();
31896 emit_insn_before (nop, next_insn);
31897 can_issue_more--;
31898 }
31899
31900 *group_end = true;
31901 return 0;
31902 }
31903
31904 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
31905 {
31906 int n_nops = rs6000_sched_insert_nops;
31907
31908 /* Nops can't be issued from the branch slot, so the effective
31909 issue_rate for nops is 'issue_rate - 1'. */
31910 if (can_issue_more == 0)
31911 can_issue_more = issue_rate;
31912 can_issue_more--;
31913 if (can_issue_more == 0)
31914 {
31915 can_issue_more = issue_rate - 1;
31916 (*group_count)++;
31917 end = true;
31918 for (i = 0; i < issue_rate; i++)
31919 {
31920 group_insns[i] = 0;
31921 }
31922 }
31923
31924 while (n_nops > 0)
31925 {
31926 nop = gen_nop ();
31927 emit_insn_before (nop, next_insn);
31928 if (can_issue_more == issue_rate - 1) /* new group begins */
31929 end = false;
31930 can_issue_more--;
31931 if (can_issue_more == 0)
31932 {
31933 can_issue_more = issue_rate - 1;
31934 (*group_count)++;
31935 end = true;
31936 for (i = 0; i < issue_rate; i++)
31937 {
31938 group_insns[i] = 0;
31939 }
31940 }
31941 n_nops--;
31942 }
31943
31944 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31945 can_issue_more++;
31946
31947 /* Is next_insn going to start a new group? */
31948 *group_end
31949 = (end
31950 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
31951 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
31952 || (can_issue_more < issue_rate &&
31953 insn_terminates_group_p (next_insn, previous_group)));
31954 if (*group_end && end)
31955 (*group_count)--;
31956
31957 if (sched_verbose > 6)
31958 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
31959 *group_count, can_issue_more);
31960 return can_issue_more;
31961 }
31962
31963 return can_issue_more;
31964 }
31965
31966 /* This function tries to synch the dispatch groups that the compiler "sees"
31967 with the dispatch groups that the processor dispatcher is expected to
31968 form in practice. It tries to achieve this synchronization by forcing the
31969 estimated processor grouping on the compiler (as opposed to the function
31970 'pad_goups' which tries to force the scheduler's grouping on the processor).
31971
31972 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31973 examines the (estimated) dispatch groups that will be formed by the processor
31974 dispatcher. It marks these group boundaries to reflect the estimated
31975 processor grouping, overriding the grouping that the scheduler had marked.
31976 Depending on the value of the flag '-minsert-sched-nops' this function can
31977 force certain insns into separate groups or force a certain distance between
31978 them by inserting nops, for example, if there exists a "costly dependence"
31979 between the insns.
31980
31981 The function estimates the group boundaries that the processor will form as
31982 follows: It keeps track of how many vacant issue slots are available after
31983 each insn. A subsequent insn will start a new group if one of the following
31984 4 cases applies:
31985 - no more vacant issue slots remain in the current dispatch group.
31986 - only the last issue slot, which is the branch slot, is vacant, but the next
31987 insn is not a branch.
31988 - only the last 2 or less issue slots, including the branch slot, are vacant,
31989 which means that a cracked insn (which occupies two issue slots) can't be
31990 issued in this group.
31991 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31992 start a new group. */
31993
31994 static int
31995 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
31996 rtx_insn *tail)
31997 {
31998 rtx_insn *insn, *next_insn;
31999 int issue_rate;
32000 int can_issue_more;
32001 int slot, i;
32002 bool group_end;
32003 int group_count = 0;
32004 rtx *group_insns;
32005
32006 /* Initialize. */
32007 issue_rate = rs6000_issue_rate ();
32008 group_insns = XALLOCAVEC (rtx, issue_rate);
32009 for (i = 0; i < issue_rate; i++)
32010 {
32011 group_insns[i] = 0;
32012 }
32013 can_issue_more = issue_rate;
32014 slot = 0;
32015 insn = get_next_active_insn (prev_head_insn, tail);
32016 group_end = false;
32017
32018 while (insn != NULL_RTX)
32019 {
32020 slot = (issue_rate - can_issue_more);
32021 group_insns[slot] = insn;
32022 can_issue_more =
32023 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32024 if (insn_terminates_group_p (insn, current_group))
32025 can_issue_more = 0;
32026
32027 next_insn = get_next_active_insn (insn, tail);
32028 if (next_insn == NULL_RTX)
32029 return group_count + 1;
32030
32031 /* Is next_insn going to start a new group? */
32032 group_end
32033 = (can_issue_more == 0
32034 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
32035 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
32036 || (can_issue_more < issue_rate &&
32037 insn_terminates_group_p (next_insn, previous_group)));
32038
32039 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
32040 next_insn, &group_end, can_issue_more,
32041 &group_count);
32042
32043 if (group_end)
32044 {
32045 group_count++;
32046 can_issue_more = 0;
32047 for (i = 0; i < issue_rate; i++)
32048 {
32049 group_insns[i] = 0;
32050 }
32051 }
32052
32053 if (GET_MODE (next_insn) == TImode && can_issue_more)
32054 PUT_MODE (next_insn, VOIDmode);
32055 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
32056 PUT_MODE (next_insn, TImode);
32057
32058 insn = next_insn;
32059 if (can_issue_more == 0)
32060 can_issue_more = issue_rate;
32061 } /* while */
32062
32063 return group_count;
32064 }
32065
32066 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
32067 dispatch group boundaries that the scheduler had marked. Pad with nops
32068 any dispatch groups which have vacant issue slots, in order to force the
32069 scheduler's grouping on the processor dispatcher. The function
32070 returns the number of dispatch groups found. */
32071
32072 static int
32073 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
32074 rtx_insn *tail)
32075 {
32076 rtx_insn *insn, *next_insn;
32077 rtx nop;
32078 int issue_rate;
32079 int can_issue_more;
32080 int group_end;
32081 int group_count = 0;
32082
32083 /* Initialize issue_rate. */
32084 issue_rate = rs6000_issue_rate ();
32085 can_issue_more = issue_rate;
32086
32087 insn = get_next_active_insn (prev_head_insn, tail);
32088 next_insn = get_next_active_insn (insn, tail);
32089
32090 while (insn != NULL_RTX)
32091 {
32092 can_issue_more =
32093 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
32094
32095 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
32096
32097 if (next_insn == NULL_RTX)
32098 break;
32099
32100 if (group_end)
32101 {
32102 /* If the scheduler had marked group termination at this location
32103 (between insn and next_insn), and neither insn nor next_insn will
32104 force group termination, pad the group with nops to force group
32105 termination. */
32106 if (can_issue_more
32107 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
32108 && !insn_terminates_group_p (insn, current_group)
32109 && !insn_terminates_group_p (next_insn, previous_group))
32110 {
32111 if (!is_branch_slot_insn (next_insn))
32112 can_issue_more--;
32113
32114 while (can_issue_more)
32115 {
32116 nop = gen_nop ();
32117 emit_insn_before (nop, next_insn);
32118 can_issue_more--;
32119 }
32120 }
32121
32122 can_issue_more = issue_rate;
32123 group_count++;
32124 }
32125
32126 insn = next_insn;
32127 next_insn = get_next_active_insn (insn, tail);
32128 }
32129
32130 return group_count;
32131 }
32132
32133 /* We're beginning a new block. Initialize data structures as necessary. */
32134
32135 static void
32136 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
32137 int sched_verbose ATTRIBUTE_UNUSED,
32138 int max_ready ATTRIBUTE_UNUSED)
32139 {
32140 last_scheduled_insn = NULL;
32141 load_store_pendulum = 0;
32142 divide_cnt = 0;
32143 vec_pairing = 0;
32144 }
32145
32146 /* The following function is called at the end of scheduling BB.
32147 After reload, it inserts nops at insn group bundling. */
32148
32149 static void
32150 rs6000_sched_finish (FILE *dump, int sched_verbose)
32151 {
32152 int n_groups;
32153
32154 if (sched_verbose)
32155 fprintf (dump, "=== Finishing schedule.\n");
32156
32157 if (reload_completed && rs6000_sched_groups)
32158 {
32159 /* Do not run sched_finish hook when selective scheduling enabled. */
32160 if (sel_sched_p ())
32161 return;
32162
32163 if (rs6000_sched_insert_nops == sched_finish_none)
32164 return;
32165
32166 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
32167 n_groups = pad_groups (dump, sched_verbose,
32168 current_sched_info->prev_head,
32169 current_sched_info->next_tail);
32170 else
32171 n_groups = redefine_groups (dump, sched_verbose,
32172 current_sched_info->prev_head,
32173 current_sched_info->next_tail);
32174
32175 if (sched_verbose >= 6)
32176 {
32177 fprintf (dump, "ngroups = %d\n", n_groups);
32178 print_rtl (dump, current_sched_info->prev_head);
32179 fprintf (dump, "Done finish_sched\n");
32180 }
32181 }
32182 }
32183
32184 struct rs6000_sched_context
32185 {
32186 short cached_can_issue_more;
32187 rtx_insn *last_scheduled_insn;
32188 int load_store_pendulum;
32189 int divide_cnt;
32190 int vec_pairing;
32191 };
32192
32193 typedef struct rs6000_sched_context rs6000_sched_context_def;
32194 typedef rs6000_sched_context_def *rs6000_sched_context_t;
32195
32196 /* Allocate store for new scheduling context. */
32197 static void *
32198 rs6000_alloc_sched_context (void)
32199 {
32200 return xmalloc (sizeof (rs6000_sched_context_def));
32201 }
32202
32203 /* If CLEAN_P is true then initializes _SC with clean data,
32204 and from the global context otherwise. */
32205 static void
32206 rs6000_init_sched_context (void *_sc, bool clean_p)
32207 {
32208 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32209
32210 if (clean_p)
32211 {
32212 sc->cached_can_issue_more = 0;
32213 sc->last_scheduled_insn = NULL;
32214 sc->load_store_pendulum = 0;
32215 sc->divide_cnt = 0;
32216 sc->vec_pairing = 0;
32217 }
32218 else
32219 {
32220 sc->cached_can_issue_more = cached_can_issue_more;
32221 sc->last_scheduled_insn = last_scheduled_insn;
32222 sc->load_store_pendulum = load_store_pendulum;
32223 sc->divide_cnt = divide_cnt;
32224 sc->vec_pairing = vec_pairing;
32225 }
32226 }
32227
32228 /* Sets the global scheduling context to the one pointed to by _SC. */
32229 static void
32230 rs6000_set_sched_context (void *_sc)
32231 {
32232 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
32233
32234 gcc_assert (sc != NULL);
32235
32236 cached_can_issue_more = sc->cached_can_issue_more;
32237 last_scheduled_insn = sc->last_scheduled_insn;
32238 load_store_pendulum = sc->load_store_pendulum;
32239 divide_cnt = sc->divide_cnt;
32240 vec_pairing = sc->vec_pairing;
32241 }
32242
32243 /* Free _SC. */
32244 static void
32245 rs6000_free_sched_context (void *_sc)
32246 {
32247 gcc_assert (_sc != NULL);
32248
32249 free (_sc);
32250 }
32251
32252 static bool
32253 rs6000_sched_can_speculate_insn (rtx_insn *insn)
32254 {
32255 switch (get_attr_type (insn))
32256 {
32257 case TYPE_DIV:
32258 case TYPE_SDIV:
32259 case TYPE_DDIV:
32260 case TYPE_VECDIV:
32261 case TYPE_SSQRT:
32262 case TYPE_DSQRT:
32263 return false;
32264
32265 default:
32266 return true;
32267 }
32268 }
32269 \f
32270 /* Length in units of the trampoline for entering a nested function. */
32271
32272 int
32273 rs6000_trampoline_size (void)
32274 {
32275 int ret = 0;
32276
32277 switch (DEFAULT_ABI)
32278 {
32279 default:
32280 gcc_unreachable ();
32281
32282 case ABI_AIX:
32283 ret = (TARGET_32BIT) ? 12 : 24;
32284 break;
32285
32286 case ABI_ELFv2:
32287 gcc_assert (!TARGET_32BIT);
32288 ret = 32;
32289 break;
32290
32291 case ABI_DARWIN:
32292 case ABI_V4:
32293 ret = (TARGET_32BIT) ? 40 : 48;
32294 break;
32295 }
32296
32297 return ret;
32298 }
32299
32300 /* Emit RTL insns to initialize the variable parts of a trampoline.
32301 FNADDR is an RTX for the address of the function's pure code.
32302 CXT is an RTX for the static chain value for the function. */
32303
32304 static void
32305 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
32306 {
32307 int regsize = (TARGET_32BIT) ? 4 : 8;
32308 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
32309 rtx ctx_reg = force_reg (Pmode, cxt);
32310 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
32311
32312 switch (DEFAULT_ABI)
32313 {
32314 default:
32315 gcc_unreachable ();
32316
32317 /* Under AIX, just build the 3 word function descriptor */
32318 case ABI_AIX:
32319 {
32320 rtx fnmem, fn_reg, toc_reg;
32321
32322 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32323 error ("you cannot take the address of a nested function if you use "
32324 "the %qs option", "-mno-pointers-to-nested-functions");
32325
32326 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
32327 fn_reg = gen_reg_rtx (Pmode);
32328 toc_reg = gen_reg_rtx (Pmode);
32329
32330 /* Macro to shorten the code expansions below. */
32331 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32332
32333 m_tramp = replace_equiv_address (m_tramp, addr);
32334
32335 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
32336 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
32337 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
32338 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
32339 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
32340
32341 # undef MEM_PLUS
32342 }
32343 break;
32344
32345 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32346 case ABI_ELFv2:
32347 case ABI_DARWIN:
32348 case ABI_V4:
32349 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
32350 LCT_NORMAL, VOIDmode,
32351 addr, Pmode,
32352 GEN_INT (rs6000_trampoline_size ()), SImode,
32353 fnaddr, Pmode,
32354 ctx_reg, Pmode);
32355 break;
32356 }
32357 }
32358
32359 \f
32360 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32361 identifier as an argument, so the front end shouldn't look it up. */
32362
32363 static bool
32364 rs6000_attribute_takes_identifier_p (const_tree attr_id)
32365 {
32366 return is_attribute_p ("altivec", attr_id);
32367 }
32368
32369 /* Handle the "altivec" attribute. The attribute may have
32370 arguments as follows:
32371
32372 __attribute__((altivec(vector__)))
32373 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32374 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32375
32376 and may appear more than once (e.g., 'vector bool char') in a
32377 given declaration. */
32378
32379 static tree
32380 rs6000_handle_altivec_attribute (tree *node,
32381 tree name ATTRIBUTE_UNUSED,
32382 tree args,
32383 int flags ATTRIBUTE_UNUSED,
32384 bool *no_add_attrs)
32385 {
32386 tree type = *node, result = NULL_TREE;
32387 machine_mode mode;
32388 int unsigned_p;
32389 char altivec_type
32390 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
32391 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
32392 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
32393 : '?');
32394
32395 while (POINTER_TYPE_P (type)
32396 || TREE_CODE (type) == FUNCTION_TYPE
32397 || TREE_CODE (type) == METHOD_TYPE
32398 || TREE_CODE (type) == ARRAY_TYPE)
32399 type = TREE_TYPE (type);
32400
32401 mode = TYPE_MODE (type);
32402
32403 /* Check for invalid AltiVec type qualifiers. */
32404 if (type == long_double_type_node)
32405 error ("use of %<long double%> in AltiVec types is invalid");
32406 else if (type == boolean_type_node)
32407 error ("use of boolean types in AltiVec types is invalid");
32408 else if (TREE_CODE (type) == COMPLEX_TYPE)
32409 error ("use of %<complex%> in AltiVec types is invalid");
32410 else if (DECIMAL_FLOAT_MODE_P (mode))
32411 error ("use of decimal floating point types in AltiVec types is invalid");
32412 else if (!TARGET_VSX)
32413 {
32414 if (type == long_unsigned_type_node || type == long_integer_type_node)
32415 {
32416 if (TARGET_64BIT)
32417 error ("use of %<long%> in AltiVec types is invalid for "
32418 "64-bit code without %qs", "-mvsx");
32419 else if (rs6000_warn_altivec_long)
32420 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32421 "use %<int%>");
32422 }
32423 else if (type == long_long_unsigned_type_node
32424 || type == long_long_integer_type_node)
32425 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32426 "-mvsx");
32427 else if (type == double_type_node)
32428 error ("use of %<double%> in AltiVec types is invalid without %qs",
32429 "-mvsx");
32430 }
32431
32432 switch (altivec_type)
32433 {
32434 case 'v':
32435 unsigned_p = TYPE_UNSIGNED (type);
32436 switch (mode)
32437 {
32438 case E_TImode:
32439 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
32440 break;
32441 case E_DImode:
32442 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
32443 break;
32444 case E_SImode:
32445 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
32446 break;
32447 case E_HImode:
32448 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
32449 break;
32450 case E_QImode:
32451 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
32452 break;
32453 case E_SFmode: result = V4SF_type_node; break;
32454 case E_DFmode: result = V2DF_type_node; break;
32455 /* If the user says 'vector int bool', we may be handed the 'bool'
32456 attribute _before_ the 'vector' attribute, and so select the
32457 proper type in the 'b' case below. */
32458 case E_V4SImode: case E_V8HImode: case E_V16QImode: case E_V4SFmode:
32459 case E_V2DImode: case E_V2DFmode:
32460 result = type;
32461 default: break;
32462 }
32463 break;
32464 case 'b':
32465 switch (mode)
32466 {
32467 case E_DImode: case E_V2DImode: result = bool_V2DI_type_node; break;
32468 case E_SImode: case E_V4SImode: result = bool_V4SI_type_node; break;
32469 case E_HImode: case E_V8HImode: result = bool_V8HI_type_node; break;
32470 case E_QImode: case E_V16QImode: result = bool_V16QI_type_node;
32471 default: break;
32472 }
32473 break;
32474 case 'p':
32475 switch (mode)
32476 {
32477 case E_V8HImode: result = pixel_V8HI_type_node;
32478 default: break;
32479 }
32480 default: break;
32481 }
32482
32483 /* Propagate qualifiers attached to the element type
32484 onto the vector type. */
32485 if (result && result != type && TYPE_QUALS (type))
32486 result = build_qualified_type (result, TYPE_QUALS (type));
32487
32488 *no_add_attrs = true; /* No need to hang on to the attribute. */
32489
32490 if (result)
32491 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
32492
32493 return NULL_TREE;
32494 }
32495
32496 /* AltiVec defines five built-in scalar types that serve as vector
32497 elements; we must teach the compiler how to mangle them. The 128-bit
32498 floating point mangling is target-specific as well. */
32499
32500 static const char *
32501 rs6000_mangle_type (const_tree type)
32502 {
32503 type = TYPE_MAIN_VARIANT (type);
32504
32505 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
32506 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
32507 return NULL;
32508
32509 if (type == bool_char_type_node) return "U6__boolc";
32510 if (type == bool_short_type_node) return "U6__bools";
32511 if (type == pixel_type_node) return "u7__pixel";
32512 if (type == bool_int_type_node) return "U6__booli";
32513 if (type == bool_long_long_type_node) return "U6__boolx";
32514
32515 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IBM_P (TYPE_MODE (type)))
32516 return "g";
32517 if (SCALAR_FLOAT_TYPE_P (type) && FLOAT128_IEEE_P (TYPE_MODE (type)))
32518 return ieee128_mangling_gcc_8_1 ? "U10__float128" : "u9__ieee128";
32519
32520 /* For all other types, use the default mangling. */
32521 return NULL;
32522 }
32523
32524 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32525 struct attribute_spec.handler. */
32526
32527 static tree
32528 rs6000_handle_longcall_attribute (tree *node, tree name,
32529 tree args ATTRIBUTE_UNUSED,
32530 int flags ATTRIBUTE_UNUSED,
32531 bool *no_add_attrs)
32532 {
32533 if (TREE_CODE (*node) != FUNCTION_TYPE
32534 && TREE_CODE (*node) != FIELD_DECL
32535 && TREE_CODE (*node) != TYPE_DECL)
32536 {
32537 warning (OPT_Wattributes, "%qE attribute only applies to functions",
32538 name);
32539 *no_add_attrs = true;
32540 }
32541
32542 return NULL_TREE;
32543 }
32544
32545 /* Set longcall attributes on all functions declared when
32546 rs6000_default_long_calls is true. */
32547 static void
32548 rs6000_set_default_type_attributes (tree type)
32549 {
32550 if (rs6000_default_long_calls
32551 && (TREE_CODE (type) == FUNCTION_TYPE
32552 || TREE_CODE (type) == METHOD_TYPE))
32553 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
32554 NULL_TREE,
32555 TYPE_ATTRIBUTES (type));
32556
32557 #if TARGET_MACHO
32558 darwin_set_default_type_attributes (type);
32559 #endif
32560 }
32561
32562 /* Return a reference suitable for calling a function with the
32563 longcall attribute. */
32564
32565 static rtx
32566 rs6000_longcall_ref (rtx call_ref, rtx arg)
32567 {
32568 /* System V adds '.' to the internal name, so skip them. */
32569 const char *call_name = XSTR (call_ref, 0);
32570 if (*call_name == '.')
32571 {
32572 while (*call_name == '.')
32573 call_name++;
32574
32575 tree node = get_identifier (call_name);
32576 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
32577 }
32578
32579 if (TARGET_PLTSEQ)
32580 {
32581 rtx base = const0_rtx;
32582 int regno;
32583 if (DEFAULT_ABI == ABI_ELFv2)
32584 {
32585 base = gen_rtx_REG (Pmode, TOC_REGISTER);
32586 regno = 12;
32587 }
32588 else
32589 {
32590 if (flag_pic)
32591 base = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32592 regno = 11;
32593 }
32594 /* Reg must match that used by linker PLT stubs. For ELFv2, r12
32595 may be used by a function global entry point. For SysV4, r11
32596 is used by __glink_PLTresolve lazy resolver entry. */
32597 rtx reg = gen_rtx_REG (Pmode, regno);
32598 rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, base, call_ref, arg),
32599 UNSPEC_PLT16_HA);
32600 rtx lo = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, reg, call_ref, arg),
32601 UNSPEC_PLT16_LO);
32602 emit_insn (gen_rtx_SET (reg, hi));
32603 emit_insn (gen_rtx_SET (reg, lo));
32604 return reg;
32605 }
32606
32607 return force_reg (Pmode, call_ref);
32608 }
32609 \f
32610 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32611 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32612 #endif
32613
32614 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32615 struct attribute_spec.handler. */
32616 static tree
32617 rs6000_handle_struct_attribute (tree *node, tree name,
32618 tree args ATTRIBUTE_UNUSED,
32619 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
32620 {
32621 tree *type = NULL;
32622 if (DECL_P (*node))
32623 {
32624 if (TREE_CODE (*node) == TYPE_DECL)
32625 type = &TREE_TYPE (*node);
32626 }
32627 else
32628 type = node;
32629
32630 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
32631 || TREE_CODE (*type) == UNION_TYPE)))
32632 {
32633 warning (OPT_Wattributes, "%qE attribute ignored", name);
32634 *no_add_attrs = true;
32635 }
32636
32637 else if ((is_attribute_p ("ms_struct", name)
32638 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
32639 || ((is_attribute_p ("gcc_struct", name)
32640 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
32641 {
32642 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
32643 name);
32644 *no_add_attrs = true;
32645 }
32646
32647 return NULL_TREE;
32648 }
32649
32650 static bool
32651 rs6000_ms_bitfield_layout_p (const_tree record_type)
32652 {
32653 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
32654 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
32655 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
32656 }
32657 \f
32658 #ifdef USING_ELFOS_H
32659
32660 /* A get_unnamed_section callback, used for switching to toc_section. */
32661
32662 static void
32663 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
32664 {
32665 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32666 && TARGET_MINIMAL_TOC)
32667 {
32668 if (!toc_initialized)
32669 {
32670 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32671 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32672 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
32673 fprintf (asm_out_file, "\t.tc ");
32674 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
32675 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32676 fprintf (asm_out_file, "\n");
32677
32678 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32679 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32680 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32681 fprintf (asm_out_file, " = .+32768\n");
32682 toc_initialized = 1;
32683 }
32684 else
32685 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32686 }
32687 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32688 {
32689 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
32690 if (!toc_initialized)
32691 {
32692 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32693 toc_initialized = 1;
32694 }
32695 }
32696 else
32697 {
32698 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
32699 if (!toc_initialized)
32700 {
32701 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
32702 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
32703 fprintf (asm_out_file, " = .+32768\n");
32704 toc_initialized = 1;
32705 }
32706 }
32707 }
32708
32709 /* Implement TARGET_ASM_INIT_SECTIONS. */
32710
32711 static void
32712 rs6000_elf_asm_init_sections (void)
32713 {
32714 toc_section
32715 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
32716
32717 sdata2_section
32718 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
32719 SDATA2_SECTION_ASM_OP);
32720 }
32721
32722 /* Implement TARGET_SELECT_RTX_SECTION. */
32723
32724 static section *
32725 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
32726 unsigned HOST_WIDE_INT align)
32727 {
32728 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
32729 return toc_section;
32730 else
32731 return default_elf_select_rtx_section (mode, x, align);
32732 }
32733 \f
32734 /* For a SYMBOL_REF, set generic flags and then perform some
32735 target-specific processing.
32736
32737 When the AIX ABI is requested on a non-AIX system, replace the
32738 function name with the real name (with a leading .) rather than the
32739 function descriptor name. This saves a lot of overriding code to
32740 read the prefixes. */
32741
32742 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
32743 static void
32744 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
32745 {
32746 default_encode_section_info (decl, rtl, first);
32747
32748 if (first
32749 && TREE_CODE (decl) == FUNCTION_DECL
32750 && !TARGET_AIX
32751 && DEFAULT_ABI == ABI_AIX)
32752 {
32753 rtx sym_ref = XEXP (rtl, 0);
32754 size_t len = strlen (XSTR (sym_ref, 0));
32755 char *str = XALLOCAVEC (char, len + 2);
32756 str[0] = '.';
32757 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
32758 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
32759 }
32760 }
32761
32762 static inline bool
32763 compare_section_name (const char *section, const char *templ)
32764 {
32765 int len;
32766
32767 len = strlen (templ);
32768 return (strncmp (section, templ, len) == 0
32769 && (section[len] == 0 || section[len] == '.'));
32770 }
32771
32772 bool
32773 rs6000_elf_in_small_data_p (const_tree decl)
32774 {
32775 if (rs6000_sdata == SDATA_NONE)
32776 return false;
32777
32778 /* We want to merge strings, so we never consider them small data. */
32779 if (TREE_CODE (decl) == STRING_CST)
32780 return false;
32781
32782 /* Functions are never in the small data area. */
32783 if (TREE_CODE (decl) == FUNCTION_DECL)
32784 return false;
32785
32786 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
32787 {
32788 const char *section = DECL_SECTION_NAME (decl);
32789 if (compare_section_name (section, ".sdata")
32790 || compare_section_name (section, ".sdata2")
32791 || compare_section_name (section, ".gnu.linkonce.s")
32792 || compare_section_name (section, ".sbss")
32793 || compare_section_name (section, ".sbss2")
32794 || compare_section_name (section, ".gnu.linkonce.sb")
32795 || strcmp (section, ".PPC.EMB.sdata0") == 0
32796 || strcmp (section, ".PPC.EMB.sbss0") == 0)
32797 return true;
32798 }
32799 else
32800 {
32801 /* If we are told not to put readonly data in sdata, then don't. */
32802 if (TREE_READONLY (decl) && rs6000_sdata != SDATA_EABI
32803 && !rs6000_readonly_in_sdata)
32804 return false;
32805
32806 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
32807
32808 if (size > 0
32809 && size <= g_switch_value
32810 /* If it's not public, and we're not going to reference it there,
32811 there's no need to put it in the small data section. */
32812 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
32813 return true;
32814 }
32815
32816 return false;
32817 }
32818
32819 #endif /* USING_ELFOS_H */
32820 \f
32821 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32822
32823 static bool
32824 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
32825 {
32826 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
32827 }
32828
32829 /* Do not place thread-local symbols refs in the object blocks. */
32830
32831 static bool
32832 rs6000_use_blocks_for_decl_p (const_tree decl)
32833 {
32834 return !DECL_THREAD_LOCAL_P (decl);
32835 }
32836 \f
32837 /* Return a REG that occurs in ADDR with coefficient 1.
32838 ADDR can be effectively incremented by incrementing REG.
32839
32840 r0 is special and we must not select it as an address
32841 register by this routine since our caller will try to
32842 increment the returned register via an "la" instruction. */
32843
32844 rtx
32845 find_addr_reg (rtx addr)
32846 {
32847 while (GET_CODE (addr) == PLUS)
32848 {
32849 if (REG_P (XEXP (addr, 0))
32850 && REGNO (XEXP (addr, 0)) != 0)
32851 addr = XEXP (addr, 0);
32852 else if (REG_P (XEXP (addr, 1))
32853 && REGNO (XEXP (addr, 1)) != 0)
32854 addr = XEXP (addr, 1);
32855 else if (CONSTANT_P (XEXP (addr, 0)))
32856 addr = XEXP (addr, 1);
32857 else if (CONSTANT_P (XEXP (addr, 1)))
32858 addr = XEXP (addr, 0);
32859 else
32860 gcc_unreachable ();
32861 }
32862 gcc_assert (REG_P (addr) && REGNO (addr) != 0);
32863 return addr;
32864 }
32865
32866 void
32867 rs6000_fatal_bad_address (rtx op)
32868 {
32869 fatal_insn ("bad address", op);
32870 }
32871
32872 #if TARGET_MACHO
32873
32874 typedef struct branch_island_d {
32875 tree function_name;
32876 tree label_name;
32877 int line_number;
32878 } branch_island;
32879
32880
32881 static vec<branch_island, va_gc> *branch_islands;
32882
32883 /* Remember to generate a branch island for far calls to the given
32884 function. */
32885
32886 static void
32887 add_compiler_branch_island (tree label_name, tree function_name,
32888 int line_number)
32889 {
32890 branch_island bi = {function_name, label_name, line_number};
32891 vec_safe_push (branch_islands, bi);
32892 }
32893
32894 /* Generate far-jump branch islands for everything recorded in
32895 branch_islands. Invoked immediately after the last instruction of
32896 the epilogue has been emitted; the branch islands must be appended
32897 to, and contiguous with, the function body. Mach-O stubs are
32898 generated in machopic_output_stub(). */
32899
32900 static void
32901 macho_branch_islands (void)
32902 {
32903 char tmp_buf[512];
32904
32905 while (!vec_safe_is_empty (branch_islands))
32906 {
32907 branch_island *bi = &branch_islands->last ();
32908 const char *label = IDENTIFIER_POINTER (bi->label_name);
32909 const char *name = IDENTIFIER_POINTER (bi->function_name);
32910 char name_buf[512];
32911 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32912 if (name[0] == '*' || name[0] == '&')
32913 strcpy (name_buf, name+1);
32914 else
32915 {
32916 name_buf[0] = '_';
32917 strcpy (name_buf+1, name);
32918 }
32919 strcpy (tmp_buf, "\n");
32920 strcat (tmp_buf, label);
32921 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32922 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32923 dbxout_stabd (N_SLINE, bi->line_number);
32924 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32925 if (flag_pic)
32926 {
32927 if (TARGET_LINK_STACK)
32928 {
32929 char name[32];
32930 get_ppc476_thunk_name (name);
32931 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
32932 strcat (tmp_buf, name);
32933 strcat (tmp_buf, "\n");
32934 strcat (tmp_buf, label);
32935 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32936 }
32937 else
32938 {
32939 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
32940 strcat (tmp_buf, label);
32941 strcat (tmp_buf, "_pic\n");
32942 strcat (tmp_buf, label);
32943 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
32944 }
32945
32946 strcat (tmp_buf, "\taddis r11,r11,ha16(");
32947 strcat (tmp_buf, name_buf);
32948 strcat (tmp_buf, " - ");
32949 strcat (tmp_buf, label);
32950 strcat (tmp_buf, "_pic)\n");
32951
32952 strcat (tmp_buf, "\tmtlr r0\n");
32953
32954 strcat (tmp_buf, "\taddi r12,r11,lo16(");
32955 strcat (tmp_buf, name_buf);
32956 strcat (tmp_buf, " - ");
32957 strcat (tmp_buf, label);
32958 strcat (tmp_buf, "_pic)\n");
32959
32960 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
32961 }
32962 else
32963 {
32964 strcat (tmp_buf, ":\n\tlis r12,hi16(");
32965 strcat (tmp_buf, name_buf);
32966 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
32967 strcat (tmp_buf, name_buf);
32968 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
32969 }
32970 output_asm_insn (tmp_buf, 0);
32971 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32972 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
32973 dbxout_stabd (N_SLINE, bi->line_number);
32974 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32975 branch_islands->pop ();
32976 }
32977 }
32978
32979 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32980 already there or not. */
32981
32982 static int
32983 no_previous_def (tree function_name)
32984 {
32985 branch_island *bi;
32986 unsigned ix;
32987
32988 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
32989 if (function_name == bi->function_name)
32990 return 0;
32991 return 1;
32992 }
32993
32994 /* GET_PREV_LABEL gets the label name from the previous definition of
32995 the function. */
32996
32997 static tree
32998 get_prev_label (tree function_name)
32999 {
33000 branch_island *bi;
33001 unsigned ix;
33002
33003 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
33004 if (function_name == bi->function_name)
33005 return bi->label_name;
33006 return NULL_TREE;
33007 }
33008
33009 /* Generate PIC and indirect symbol stubs. */
33010
33011 void
33012 machopic_output_stub (FILE *file, const char *symb, const char *stub)
33013 {
33014 unsigned int length;
33015 char *symbol_name, *lazy_ptr_name;
33016 char *local_label_0;
33017 static unsigned label = 0;
33018
33019 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
33020 symb = (*targetm.strip_name_encoding) (symb);
33021
33022
33023 length = strlen (symb);
33024 symbol_name = XALLOCAVEC (char, length + 32);
33025 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
33026
33027 lazy_ptr_name = XALLOCAVEC (char, length + 32);
33028 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
33029
33030 if (flag_pic == 2)
33031 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
33032 else
33033 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
33034
33035 if (flag_pic == 2)
33036 {
33037 fprintf (file, "\t.align 5\n");
33038
33039 fprintf (file, "%s:\n", stub);
33040 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33041
33042 label++;
33043 local_label_0 = XALLOCAVEC (char, 16);
33044 sprintf (local_label_0, "L%u$spb", label);
33045
33046 fprintf (file, "\tmflr r0\n");
33047 if (TARGET_LINK_STACK)
33048 {
33049 char name[32];
33050 get_ppc476_thunk_name (name);
33051 fprintf (file, "\tbl %s\n", name);
33052 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33053 }
33054 else
33055 {
33056 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
33057 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
33058 }
33059 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
33060 lazy_ptr_name, local_label_0);
33061 fprintf (file, "\tmtlr r0\n");
33062 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
33063 (TARGET_64BIT ? "ldu" : "lwzu"),
33064 lazy_ptr_name, local_label_0);
33065 fprintf (file, "\tmtctr r12\n");
33066 fprintf (file, "\tbctr\n");
33067 }
33068 else
33069 {
33070 fprintf (file, "\t.align 4\n");
33071
33072 fprintf (file, "%s:\n", stub);
33073 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33074
33075 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
33076 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
33077 (TARGET_64BIT ? "ldu" : "lwzu"),
33078 lazy_ptr_name);
33079 fprintf (file, "\tmtctr r12\n");
33080 fprintf (file, "\tbctr\n");
33081 }
33082
33083 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
33084 fprintf (file, "%s:\n", lazy_ptr_name);
33085 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
33086 fprintf (file, "%sdyld_stub_binding_helper\n",
33087 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
33088 }
33089
33090 /* Legitimize PIC addresses. If the address is already
33091 position-independent, we return ORIG. Newly generated
33092 position-independent addresses go into a reg. This is REG if non
33093 zero, otherwise we allocate register(s) as necessary. */
33094
33095 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
33096
33097 rtx
33098 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
33099 rtx reg)
33100 {
33101 rtx base, offset;
33102
33103 if (reg == NULL && !reload_completed)
33104 reg = gen_reg_rtx (Pmode);
33105
33106 if (GET_CODE (orig) == CONST)
33107 {
33108 rtx reg_temp;
33109
33110 if (GET_CODE (XEXP (orig, 0)) == PLUS
33111 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
33112 return orig;
33113
33114 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
33115
33116 /* Use a different reg for the intermediate value, as
33117 it will be marked UNCHANGING. */
33118 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
33119 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
33120 Pmode, reg_temp);
33121 offset =
33122 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
33123 Pmode, reg);
33124
33125 if (CONST_INT_P (offset))
33126 {
33127 if (SMALL_INT (offset))
33128 return plus_constant (Pmode, base, INTVAL (offset));
33129 else if (!reload_completed)
33130 offset = force_reg (Pmode, offset);
33131 else
33132 {
33133 rtx mem = force_const_mem (Pmode, orig);
33134 return machopic_legitimize_pic_address (mem, Pmode, reg);
33135 }
33136 }
33137 return gen_rtx_PLUS (Pmode, base, offset);
33138 }
33139
33140 /* Fall back on generic machopic code. */
33141 return machopic_legitimize_pic_address (orig, mode, reg);
33142 }
33143
33144 /* Output a .machine directive for the Darwin assembler, and call
33145 the generic start_file routine. */
33146
33147 static void
33148 rs6000_darwin_file_start (void)
33149 {
33150 static const struct
33151 {
33152 const char *arg;
33153 const char *name;
33154 HOST_WIDE_INT if_set;
33155 } mapping[] = {
33156 { "ppc64", "ppc64", MASK_64BIT },
33157 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
33158 { "power4", "ppc970", 0 },
33159 { "G5", "ppc970", 0 },
33160 { "7450", "ppc7450", 0 },
33161 { "7400", "ppc7400", MASK_ALTIVEC },
33162 { "G4", "ppc7400", 0 },
33163 { "750", "ppc750", 0 },
33164 { "740", "ppc750", 0 },
33165 { "G3", "ppc750", 0 },
33166 { "604e", "ppc604e", 0 },
33167 { "604", "ppc604", 0 },
33168 { "603e", "ppc603", 0 },
33169 { "603", "ppc603", 0 },
33170 { "601", "ppc601", 0 },
33171 { NULL, "ppc", 0 } };
33172 const char *cpu_id = "";
33173 size_t i;
33174
33175 rs6000_file_start ();
33176 darwin_file_start ();
33177
33178 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
33179
33180 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
33181 cpu_id = rs6000_default_cpu;
33182
33183 if (global_options_set.x_rs6000_cpu_index)
33184 cpu_id = processor_target_table[rs6000_cpu_index].name;
33185
33186 /* Look through the mapping array. Pick the first name that either
33187 matches the argument, has a bit set in IF_SET that is also set
33188 in the target flags, or has a NULL name. */
33189
33190 i = 0;
33191 while (mapping[i].arg != NULL
33192 && strcmp (mapping[i].arg, cpu_id) != 0
33193 && (mapping[i].if_set & rs6000_isa_flags) == 0)
33194 i++;
33195
33196 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
33197 }
33198
33199 #endif /* TARGET_MACHO */
33200
33201 #if TARGET_ELF
33202 static int
33203 rs6000_elf_reloc_rw_mask (void)
33204 {
33205 if (flag_pic)
33206 return 3;
33207 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
33208 return 2;
33209 else
33210 return 0;
33211 }
33212
33213 /* Record an element in the table of global constructors. SYMBOL is
33214 a SYMBOL_REF of the function to be called; PRIORITY is a number
33215 between 0 and MAX_INIT_PRIORITY.
33216
33217 This differs from default_named_section_asm_out_constructor in
33218 that we have special handling for -mrelocatable. */
33219
33220 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
33221 static void
33222 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
33223 {
33224 const char *section = ".ctors";
33225 char buf[18];
33226
33227 if (priority != DEFAULT_INIT_PRIORITY)
33228 {
33229 sprintf (buf, ".ctors.%.5u",
33230 /* Invert the numbering so the linker puts us in the proper
33231 order; constructors are run from right to left, and the
33232 linker sorts in increasing order. */
33233 MAX_INIT_PRIORITY - priority);
33234 section = buf;
33235 }
33236
33237 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33238 assemble_align (POINTER_SIZE);
33239
33240 if (DEFAULT_ABI == ABI_V4
33241 && (TARGET_RELOCATABLE || flag_pic > 1))
33242 {
33243 fputs ("\t.long (", asm_out_file);
33244 output_addr_const (asm_out_file, symbol);
33245 fputs (")@fixup\n", asm_out_file);
33246 }
33247 else
33248 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33249 }
33250
33251 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
33252 static void
33253 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
33254 {
33255 const char *section = ".dtors";
33256 char buf[18];
33257
33258 if (priority != DEFAULT_INIT_PRIORITY)
33259 {
33260 sprintf (buf, ".dtors.%.5u",
33261 /* Invert the numbering so the linker puts us in the proper
33262 order; constructors are run from right to left, and the
33263 linker sorts in increasing order. */
33264 MAX_INIT_PRIORITY - priority);
33265 section = buf;
33266 }
33267
33268 switch_to_section (get_section (section, SECTION_WRITE, NULL));
33269 assemble_align (POINTER_SIZE);
33270
33271 if (DEFAULT_ABI == ABI_V4
33272 && (TARGET_RELOCATABLE || flag_pic > 1))
33273 {
33274 fputs ("\t.long (", asm_out_file);
33275 output_addr_const (asm_out_file, symbol);
33276 fputs (")@fixup\n", asm_out_file);
33277 }
33278 else
33279 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
33280 }
33281
33282 void
33283 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
33284 {
33285 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
33286 {
33287 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
33288 ASM_OUTPUT_LABEL (file, name);
33289 fputs (DOUBLE_INT_ASM_OP, file);
33290 rs6000_output_function_entry (file, name);
33291 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
33292 if (DOT_SYMBOLS)
33293 {
33294 fputs ("\t.size\t", file);
33295 assemble_name (file, name);
33296 fputs (",24\n\t.type\t.", file);
33297 assemble_name (file, name);
33298 fputs (",@function\n", file);
33299 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
33300 {
33301 fputs ("\t.globl\t.", file);
33302 assemble_name (file, name);
33303 putc ('\n', file);
33304 }
33305 }
33306 else
33307 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33308 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33309 rs6000_output_function_entry (file, name);
33310 fputs (":\n", file);
33311 return;
33312 }
33313
33314 int uses_toc;
33315 if (DEFAULT_ABI == ABI_V4
33316 && (TARGET_RELOCATABLE || flag_pic > 1)
33317 && !TARGET_SECURE_PLT
33318 && (!constant_pool_empty_p () || crtl->profile)
33319 && (uses_toc = uses_TOC ()))
33320 {
33321 char buf[256];
33322
33323 if (uses_toc == 2)
33324 switch_to_other_text_partition ();
33325 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33326
33327 fprintf (file, "\t.long ");
33328 assemble_name (file, toc_label_name);
33329 need_toc_init = 1;
33330 putc ('-', file);
33331 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33332 assemble_name (file, buf);
33333 putc ('\n', file);
33334 if (uses_toc == 2)
33335 switch_to_other_text_partition ();
33336 }
33337
33338 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
33339 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
33340
33341 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
33342 {
33343 char buf[256];
33344
33345 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
33346
33347 fprintf (file, "\t.quad .TOC.-");
33348 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
33349 assemble_name (file, buf);
33350 putc ('\n', file);
33351 }
33352
33353 if (DEFAULT_ABI == ABI_AIX)
33354 {
33355 const char *desc_name, *orig_name;
33356
33357 orig_name = (*targetm.strip_name_encoding) (name);
33358 desc_name = orig_name;
33359 while (*desc_name == '.')
33360 desc_name++;
33361
33362 if (TREE_PUBLIC (decl))
33363 fprintf (file, "\t.globl %s\n", desc_name);
33364
33365 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
33366 fprintf (file, "%s:\n", desc_name);
33367 fprintf (file, "\t.long %s\n", orig_name);
33368 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
33369 fputs ("\t.long 0\n", file);
33370 fprintf (file, "\t.previous\n");
33371 }
33372 ASM_OUTPUT_LABEL (file, name);
33373 }
33374
33375 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
33376 static void
33377 rs6000_elf_file_end (void)
33378 {
33379 #ifdef HAVE_AS_GNU_ATTRIBUTE
33380 /* ??? The value emitted depends on options active at file end.
33381 Assume anyone using #pragma or attributes that might change
33382 options knows what they are doing. */
33383 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
33384 && rs6000_passes_float)
33385 {
33386 int fp;
33387
33388 if (TARGET_HARD_FLOAT)
33389 fp = 1;
33390 else
33391 fp = 2;
33392 if (rs6000_passes_long_double)
33393 {
33394 if (!TARGET_LONG_DOUBLE_128)
33395 fp |= 2 * 4;
33396 else if (TARGET_IEEEQUAD)
33397 fp |= 3 * 4;
33398 else
33399 fp |= 1 * 4;
33400 }
33401 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
33402 }
33403 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
33404 {
33405 if (rs6000_passes_vector)
33406 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
33407 (TARGET_ALTIVEC_ABI ? 2 : 1));
33408 if (rs6000_returns_struct)
33409 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
33410 aix_struct_return ? 2 : 1);
33411 }
33412 #endif
33413 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33414 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
33415 file_end_indicate_exec_stack ();
33416 #endif
33417
33418 if (flag_split_stack)
33419 file_end_indicate_split_stack ();
33420
33421 if (cpu_builtin_p)
33422 {
33423 /* We have expanded a CPU builtin, so we need to emit a reference to
33424 the special symbol that LIBC uses to declare it supports the
33425 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33426 switch_to_section (data_section);
33427 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
33428 fprintf (asm_out_file, "\t%s %s\n",
33429 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
33430 }
33431 }
33432 #endif
33433
33434 #if TARGET_XCOFF
33435
33436 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33437 #define HAVE_XCOFF_DWARF_EXTRAS 0
33438 #endif
33439
33440 static enum unwind_info_type
33441 rs6000_xcoff_debug_unwind_info (void)
33442 {
33443 return UI_NONE;
33444 }
33445
33446 static void
33447 rs6000_xcoff_asm_output_anchor (rtx symbol)
33448 {
33449 char buffer[100];
33450
33451 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
33452 SYMBOL_REF_BLOCK_OFFSET (symbol));
33453 fprintf (asm_out_file, "%s", SET_ASM_OP);
33454 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
33455 fprintf (asm_out_file, ",");
33456 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
33457 fprintf (asm_out_file, "\n");
33458 }
33459
33460 static void
33461 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
33462 {
33463 fputs (GLOBAL_ASM_OP, stream);
33464 RS6000_OUTPUT_BASENAME (stream, name);
33465 putc ('\n', stream);
33466 }
33467
33468 /* A get_unnamed_decl callback, used for read-only sections. PTR
33469 points to the section string variable. */
33470
33471 static void
33472 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
33473 {
33474 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
33475 *(const char *const *) directive,
33476 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33477 }
33478
33479 /* Likewise for read-write sections. */
33480
33481 static void
33482 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
33483 {
33484 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
33485 *(const char *const *) directive,
33486 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33487 }
33488
33489 static void
33490 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
33491 {
33492 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
33493 *(const char *const *) directive,
33494 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
33495 }
33496
33497 /* A get_unnamed_section callback, used for switching to toc_section. */
33498
33499 static void
33500 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
33501 {
33502 if (TARGET_MINIMAL_TOC)
33503 {
33504 /* toc_section is always selected at least once from
33505 rs6000_xcoff_file_start, so this is guaranteed to
33506 always be defined once and only once in each file. */
33507 if (!toc_initialized)
33508 {
33509 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
33510 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
33511 toc_initialized = 1;
33512 }
33513 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
33514 (TARGET_32BIT ? "" : ",3"));
33515 }
33516 else
33517 fputs ("\t.toc\n", asm_out_file);
33518 }
33519
33520 /* Implement TARGET_ASM_INIT_SECTIONS. */
33521
33522 static void
33523 rs6000_xcoff_asm_init_sections (void)
33524 {
33525 read_only_data_section
33526 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33527 &xcoff_read_only_section_name);
33528
33529 private_data_section
33530 = get_unnamed_section (SECTION_WRITE,
33531 rs6000_xcoff_output_readwrite_section_asm_op,
33532 &xcoff_private_data_section_name);
33533
33534 read_only_private_data_section
33535 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
33536 &xcoff_private_rodata_section_name);
33537
33538 tls_data_section
33539 = get_unnamed_section (SECTION_TLS,
33540 rs6000_xcoff_output_tls_section_asm_op,
33541 &xcoff_tls_data_section_name);
33542
33543 tls_private_data_section
33544 = get_unnamed_section (SECTION_TLS,
33545 rs6000_xcoff_output_tls_section_asm_op,
33546 &xcoff_private_data_section_name);
33547
33548 toc_section
33549 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
33550
33551 readonly_data_section = read_only_data_section;
33552 }
33553
33554 static int
33555 rs6000_xcoff_reloc_rw_mask (void)
33556 {
33557 return 3;
33558 }
33559
33560 static void
33561 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
33562 tree decl ATTRIBUTE_UNUSED)
33563 {
33564 int smclass;
33565 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
33566
33567 if (flags & SECTION_EXCLUDE)
33568 smclass = 4;
33569 else if (flags & SECTION_DEBUG)
33570 {
33571 fprintf (asm_out_file, "\t.dwsect %s\n", name);
33572 return;
33573 }
33574 else if (flags & SECTION_CODE)
33575 smclass = 0;
33576 else if (flags & SECTION_TLS)
33577 smclass = 3;
33578 else if (flags & SECTION_WRITE)
33579 smclass = 2;
33580 else
33581 smclass = 1;
33582
33583 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
33584 (flags & SECTION_CODE) ? "." : "",
33585 name, suffix[smclass], flags & SECTION_ENTSIZE);
33586 }
33587
33588 #define IN_NAMED_SECTION(DECL) \
33589 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33590 && DECL_SECTION_NAME (DECL) != NULL)
33591
33592 static section *
33593 rs6000_xcoff_select_section (tree decl, int reloc,
33594 unsigned HOST_WIDE_INT align)
33595 {
33596 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33597 named section. */
33598 if (align > BIGGEST_ALIGNMENT)
33599 {
33600 resolve_unique_section (decl, reloc, true);
33601 if (IN_NAMED_SECTION (decl))
33602 return get_named_section (decl, NULL, reloc);
33603 }
33604
33605 if (decl_readonly_section (decl, reloc))
33606 {
33607 if (TREE_PUBLIC (decl))
33608 return read_only_data_section;
33609 else
33610 return read_only_private_data_section;
33611 }
33612 else
33613 {
33614 #if HAVE_AS_TLS
33615 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
33616 {
33617 if (TREE_PUBLIC (decl))
33618 return tls_data_section;
33619 else if (bss_initializer_p (decl))
33620 {
33621 /* Convert to COMMON to emit in BSS. */
33622 DECL_COMMON (decl) = 1;
33623 return tls_comm_section;
33624 }
33625 else
33626 return tls_private_data_section;
33627 }
33628 else
33629 #endif
33630 if (TREE_PUBLIC (decl))
33631 return data_section;
33632 else
33633 return private_data_section;
33634 }
33635 }
33636
33637 static void
33638 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
33639 {
33640 const char *name;
33641
33642 /* Use select_section for private data and uninitialized data with
33643 alignment <= BIGGEST_ALIGNMENT. */
33644 if (!TREE_PUBLIC (decl)
33645 || DECL_COMMON (decl)
33646 || (DECL_INITIAL (decl) == NULL_TREE
33647 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
33648 || DECL_INITIAL (decl) == error_mark_node
33649 || (flag_zero_initialized_in_bss
33650 && initializer_zerop (DECL_INITIAL (decl))))
33651 return;
33652
33653 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
33654 name = (*targetm.strip_name_encoding) (name);
33655 set_decl_section_name (decl, name);
33656 }
33657
33658 /* Select section for constant in constant pool.
33659
33660 On RS/6000, all constants are in the private read-only data area.
33661 However, if this is being placed in the TOC it must be output as a
33662 toc entry. */
33663
33664 static section *
33665 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
33666 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
33667 {
33668 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
33669 return toc_section;
33670 else
33671 return read_only_private_data_section;
33672 }
33673
33674 /* Remove any trailing [DS] or the like from the symbol name. */
33675
33676 static const char *
33677 rs6000_xcoff_strip_name_encoding (const char *name)
33678 {
33679 size_t len;
33680 if (*name == '*')
33681 name++;
33682 len = strlen (name);
33683 if (name[len - 1] == ']')
33684 return ggc_alloc_string (name, len - 4);
33685 else
33686 return name;
33687 }
33688
33689 /* Section attributes. AIX is always PIC. */
33690
33691 static unsigned int
33692 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
33693 {
33694 unsigned int align;
33695 unsigned int flags = default_section_type_flags (decl, name, reloc);
33696
33697 /* Align to at least UNIT size. */
33698 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
33699 align = MIN_UNITS_PER_WORD;
33700 else
33701 /* Increase alignment of large objects if not already stricter. */
33702 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
33703 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
33704 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
33705
33706 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
33707 }
33708
33709 /* Output at beginning of assembler file.
33710
33711 Initialize the section names for the RS/6000 at this point.
33712
33713 Specify filename, including full path, to assembler.
33714
33715 We want to go into the TOC section so at least one .toc will be emitted.
33716 Also, in order to output proper .bs/.es pairs, we need at least one static
33717 [RW] section emitted.
33718
33719 Finally, declare mcount when profiling to make the assembler happy. */
33720
33721 static void
33722 rs6000_xcoff_file_start (void)
33723 {
33724 rs6000_gen_section_name (&xcoff_bss_section_name,
33725 main_input_filename, ".bss_");
33726 rs6000_gen_section_name (&xcoff_private_data_section_name,
33727 main_input_filename, ".rw_");
33728 rs6000_gen_section_name (&xcoff_private_rodata_section_name,
33729 main_input_filename, ".rop_");
33730 rs6000_gen_section_name (&xcoff_read_only_section_name,
33731 main_input_filename, ".ro_");
33732 rs6000_gen_section_name (&xcoff_tls_data_section_name,
33733 main_input_filename, ".tls_");
33734 rs6000_gen_section_name (&xcoff_tbss_section_name,
33735 main_input_filename, ".tbss_[UL]");
33736
33737 fputs ("\t.file\t", asm_out_file);
33738 output_quoted_string (asm_out_file, main_input_filename);
33739 fputc ('\n', asm_out_file);
33740 if (write_symbols != NO_DEBUG)
33741 switch_to_section (private_data_section);
33742 switch_to_section (toc_section);
33743 switch_to_section (text_section);
33744 if (profile_flag)
33745 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
33746 rs6000_file_start ();
33747 }
33748
33749 /* Output at end of assembler file.
33750 On the RS/6000, referencing data should automatically pull in text. */
33751
33752 static void
33753 rs6000_xcoff_file_end (void)
33754 {
33755 switch_to_section (text_section);
33756 fputs ("_section_.text:\n", asm_out_file);
33757 switch_to_section (data_section);
33758 fputs (TARGET_32BIT
33759 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33760 asm_out_file);
33761 }
33762
33763 struct declare_alias_data
33764 {
33765 FILE *file;
33766 bool function_descriptor;
33767 };
33768
33769 /* Declare alias N. A helper function for for_node_and_aliases. */
33770
33771 static bool
33772 rs6000_declare_alias (struct symtab_node *n, void *d)
33773 {
33774 struct declare_alias_data *data = (struct declare_alias_data *)d;
33775 /* Main symbol is output specially, because varasm machinery does part of
33776 the job for us - we do not need to declare .globl/lglobs and such. */
33777 if (!n->alias || n->weakref)
33778 return false;
33779
33780 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
33781 return false;
33782
33783 /* Prevent assemble_alias from trying to use .set pseudo operation
33784 that does not behave as expected by the middle-end. */
33785 TREE_ASM_WRITTEN (n->decl) = true;
33786
33787 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
33788 char *buffer = (char *) alloca (strlen (name) + 2);
33789 char *p;
33790 int dollar_inside = 0;
33791
33792 strcpy (buffer, name);
33793 p = strchr (buffer, '$');
33794 while (p) {
33795 *p = '_';
33796 dollar_inside++;
33797 p = strchr (p + 1, '$');
33798 }
33799 if (TREE_PUBLIC (n->decl))
33800 {
33801 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
33802 {
33803 if (dollar_inside) {
33804 if (data->function_descriptor)
33805 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33806 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33807 }
33808 if (data->function_descriptor)
33809 {
33810 fputs ("\t.globl .", data->file);
33811 RS6000_OUTPUT_BASENAME (data->file, buffer);
33812 putc ('\n', data->file);
33813 }
33814 fputs ("\t.globl ", data->file);
33815 RS6000_OUTPUT_BASENAME (data->file, buffer);
33816 putc ('\n', data->file);
33817 }
33818 #ifdef ASM_WEAKEN_DECL
33819 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
33820 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
33821 #endif
33822 }
33823 else
33824 {
33825 if (dollar_inside)
33826 {
33827 if (data->function_descriptor)
33828 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
33829 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
33830 }
33831 if (data->function_descriptor)
33832 {
33833 fputs ("\t.lglobl .", data->file);
33834 RS6000_OUTPUT_BASENAME (data->file, buffer);
33835 putc ('\n', data->file);
33836 }
33837 fputs ("\t.lglobl ", data->file);
33838 RS6000_OUTPUT_BASENAME (data->file, buffer);
33839 putc ('\n', data->file);
33840 }
33841 if (data->function_descriptor)
33842 fputs (".", data->file);
33843 RS6000_OUTPUT_BASENAME (data->file, buffer);
33844 fputs (":\n", data->file);
33845 return false;
33846 }
33847
33848
33849 #ifdef HAVE_GAS_HIDDEN
33850 /* Helper function to calculate visibility of a DECL
33851 and return the value as a const string. */
33852
33853 static const char *
33854 rs6000_xcoff_visibility (tree decl)
33855 {
33856 static const char * const visibility_types[] = {
33857 "", ",protected", ",hidden", ",internal"
33858 };
33859
33860 enum symbol_visibility vis = DECL_VISIBILITY (decl);
33861 return visibility_types[vis];
33862 }
33863 #endif
33864
33865
33866 /* This macro produces the initial definition of a function name.
33867 On the RS/6000, we need to place an extra '.' in the function name and
33868 output the function descriptor.
33869 Dollar signs are converted to underscores.
33870
33871 The csect for the function will have already been created when
33872 text_section was selected. We do have to go back to that csect, however.
33873
33874 The third and fourth parameters to the .function pseudo-op (16 and 044)
33875 are placeholders which no longer have any use.
33876
33877 Because AIX assembler's .set command has unexpected semantics, we output
33878 all aliases as alternative labels in front of the definition. */
33879
33880 void
33881 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
33882 {
33883 char *buffer = (char *) alloca (strlen (name) + 1);
33884 char *p;
33885 int dollar_inside = 0;
33886 struct declare_alias_data data = {file, false};
33887
33888 strcpy (buffer, name);
33889 p = strchr (buffer, '$');
33890 while (p) {
33891 *p = '_';
33892 dollar_inside++;
33893 p = strchr (p + 1, '$');
33894 }
33895 if (TREE_PUBLIC (decl))
33896 {
33897 if (!RS6000_WEAK || !DECL_WEAK (decl))
33898 {
33899 if (dollar_inside) {
33900 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33901 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33902 }
33903 fputs ("\t.globl .", file);
33904 RS6000_OUTPUT_BASENAME (file, buffer);
33905 #ifdef HAVE_GAS_HIDDEN
33906 fputs (rs6000_xcoff_visibility (decl), file);
33907 #endif
33908 putc ('\n', file);
33909 }
33910 }
33911 else
33912 {
33913 if (dollar_inside) {
33914 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
33915 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
33916 }
33917 fputs ("\t.lglobl .", file);
33918 RS6000_OUTPUT_BASENAME (file, buffer);
33919 putc ('\n', file);
33920 }
33921 fputs ("\t.csect ", file);
33922 RS6000_OUTPUT_BASENAME (file, buffer);
33923 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
33924 RS6000_OUTPUT_BASENAME (file, buffer);
33925 fputs (":\n", file);
33926 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33927 &data, true);
33928 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
33929 RS6000_OUTPUT_BASENAME (file, buffer);
33930 fputs (", TOC[tc0], 0\n", file);
33931 in_section = NULL;
33932 switch_to_section (function_section (decl));
33933 putc ('.', file);
33934 RS6000_OUTPUT_BASENAME (file, buffer);
33935 fputs (":\n", file);
33936 data.function_descriptor = true;
33937 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
33938 &data, true);
33939 if (!DECL_IGNORED_P (decl))
33940 {
33941 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
33942 xcoffout_declare_function (file, decl, buffer);
33943 else if (write_symbols == DWARF2_DEBUG)
33944 {
33945 name = (*targetm.strip_name_encoding) (name);
33946 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
33947 }
33948 }
33949 return;
33950 }
33951
33952
33953 /* Output assembly language to globalize a symbol from a DECL,
33954 possibly with visibility. */
33955
33956 void
33957 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
33958 {
33959 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
33960 fputs (GLOBAL_ASM_OP, stream);
33961 RS6000_OUTPUT_BASENAME (stream, name);
33962 #ifdef HAVE_GAS_HIDDEN
33963 fputs (rs6000_xcoff_visibility (decl), stream);
33964 #endif
33965 putc ('\n', stream);
33966 }
33967
33968 /* Output assembly language to define a symbol as COMMON from a DECL,
33969 possibly with visibility. */
33970
33971 void
33972 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
33973 tree decl ATTRIBUTE_UNUSED,
33974 const char *name,
33975 unsigned HOST_WIDE_INT size,
33976 unsigned HOST_WIDE_INT align)
33977 {
33978 unsigned HOST_WIDE_INT align2 = 2;
33979
33980 if (align > 32)
33981 align2 = floor_log2 (align / BITS_PER_UNIT);
33982 else if (size > 4)
33983 align2 = 3;
33984
33985 fputs (COMMON_ASM_OP, stream);
33986 RS6000_OUTPUT_BASENAME (stream, name);
33987
33988 fprintf (stream,
33989 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
33990 size, align2);
33991
33992 #ifdef HAVE_GAS_HIDDEN
33993 if (decl != NULL)
33994 fputs (rs6000_xcoff_visibility (decl), stream);
33995 #endif
33996 putc ('\n', stream);
33997 }
33998
33999 /* This macro produces the initial definition of a object (variable) name.
34000 Because AIX assembler's .set command has unexpected semantics, we output
34001 all aliases as alternative labels in front of the definition. */
34002
34003 void
34004 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
34005 {
34006 struct declare_alias_data data = {file, false};
34007 RS6000_OUTPUT_BASENAME (file, name);
34008 fputs (":\n", file);
34009 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
34010 &data, true);
34011 }
34012
34013 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
34014
34015 void
34016 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
34017 {
34018 fputs (integer_asm_op (size, FALSE), file);
34019 assemble_name (file, label);
34020 fputs ("-$", file);
34021 }
34022
34023 /* Output a symbol offset relative to the dbase for the current object.
34024 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
34025 signed offsets.
34026
34027 __gcc_unwind_dbase is embedded in all executables/libraries through
34028 libgcc/config/rs6000/crtdbase.S. */
34029
34030 void
34031 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
34032 {
34033 fputs (integer_asm_op (size, FALSE), file);
34034 assemble_name (file, label);
34035 fputs("-__gcc_unwind_dbase", file);
34036 }
34037
34038 #ifdef HAVE_AS_TLS
34039 static void
34040 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
34041 {
34042 rtx symbol;
34043 int flags;
34044 const char *symname;
34045
34046 default_encode_section_info (decl, rtl, first);
34047
34048 /* Careful not to prod global register variables. */
34049 if (!MEM_P (rtl))
34050 return;
34051 symbol = XEXP (rtl, 0);
34052 if (!SYMBOL_REF_P (symbol))
34053 return;
34054
34055 flags = SYMBOL_REF_FLAGS (symbol);
34056
34057 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
34058 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
34059
34060 SYMBOL_REF_FLAGS (symbol) = flags;
34061
34062 /* Append mapping class to extern decls. */
34063 symname = XSTR (symbol, 0);
34064 if (decl /* sync condition with assemble_external () */
34065 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
34066 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
34067 || TREE_CODE (decl) == FUNCTION_DECL)
34068 && symname[strlen (symname) - 1] != ']')
34069 {
34070 char *newname = (char *) alloca (strlen (symname) + 5);
34071 strcpy (newname, symname);
34072 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
34073 ? "[DS]" : "[UA]"));
34074 XSTR (symbol, 0) = ggc_strdup (newname);
34075 }
34076 }
34077 #endif /* HAVE_AS_TLS */
34078 #endif /* TARGET_XCOFF */
34079
34080 void
34081 rs6000_asm_weaken_decl (FILE *stream, tree decl,
34082 const char *name, const char *val)
34083 {
34084 fputs ("\t.weak\t", stream);
34085 RS6000_OUTPUT_BASENAME (stream, name);
34086 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34087 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34088 {
34089 if (TARGET_XCOFF)
34090 fputs ("[DS]", stream);
34091 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34092 if (TARGET_XCOFF)
34093 fputs (rs6000_xcoff_visibility (decl), stream);
34094 #endif
34095 fputs ("\n\t.weak\t.", stream);
34096 RS6000_OUTPUT_BASENAME (stream, name);
34097 }
34098 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
34099 if (TARGET_XCOFF)
34100 fputs (rs6000_xcoff_visibility (decl), stream);
34101 #endif
34102 fputc ('\n', stream);
34103 if (val)
34104 {
34105 #ifdef ASM_OUTPUT_DEF
34106 ASM_OUTPUT_DEF (stream, name, val);
34107 #endif
34108 if (decl && TREE_CODE (decl) == FUNCTION_DECL
34109 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
34110 {
34111 fputs ("\t.set\t.", stream);
34112 RS6000_OUTPUT_BASENAME (stream, name);
34113 fputs (",.", stream);
34114 RS6000_OUTPUT_BASENAME (stream, val);
34115 fputc ('\n', stream);
34116 }
34117 }
34118 }
34119
34120
34121 /* Return true if INSN should not be copied. */
34122
34123 static bool
34124 rs6000_cannot_copy_insn_p (rtx_insn *insn)
34125 {
34126 return recog_memoized (insn) >= 0
34127 && get_attr_cannot_copy (insn);
34128 }
34129
34130 /* Compute a (partial) cost for rtx X. Return true if the complete
34131 cost has been computed, and false if subexpressions should be
34132 scanned. In either case, *TOTAL contains the cost result. */
34133
34134 static bool
34135 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
34136 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
34137 {
34138 int code = GET_CODE (x);
34139
34140 switch (code)
34141 {
34142 /* On the RS/6000, if it is valid in the insn, it is free. */
34143 case CONST_INT:
34144 if (((outer_code == SET
34145 || outer_code == PLUS
34146 || outer_code == MINUS)
34147 && (satisfies_constraint_I (x)
34148 || satisfies_constraint_L (x)))
34149 || (outer_code == AND
34150 && (satisfies_constraint_K (x)
34151 || (mode == SImode
34152 ? satisfies_constraint_L (x)
34153 : satisfies_constraint_J (x))))
34154 || ((outer_code == IOR || outer_code == XOR)
34155 && (satisfies_constraint_K (x)
34156 || (mode == SImode
34157 ? satisfies_constraint_L (x)
34158 : satisfies_constraint_J (x))))
34159 || outer_code == ASHIFT
34160 || outer_code == ASHIFTRT
34161 || outer_code == LSHIFTRT
34162 || outer_code == ROTATE
34163 || outer_code == ROTATERT
34164 || outer_code == ZERO_EXTRACT
34165 || (outer_code == MULT
34166 && satisfies_constraint_I (x))
34167 || ((outer_code == DIV || outer_code == UDIV
34168 || outer_code == MOD || outer_code == UMOD)
34169 && exact_log2 (INTVAL (x)) >= 0)
34170 || (outer_code == COMPARE
34171 && (satisfies_constraint_I (x)
34172 || satisfies_constraint_K (x)))
34173 || ((outer_code == EQ || outer_code == NE)
34174 && (satisfies_constraint_I (x)
34175 || satisfies_constraint_K (x)
34176 || (mode == SImode
34177 ? satisfies_constraint_L (x)
34178 : satisfies_constraint_J (x))))
34179 || (outer_code == GTU
34180 && satisfies_constraint_I (x))
34181 || (outer_code == LTU
34182 && satisfies_constraint_P (x)))
34183 {
34184 *total = 0;
34185 return true;
34186 }
34187 else if ((outer_code == PLUS
34188 && reg_or_add_cint_operand (x, VOIDmode))
34189 || (outer_code == MINUS
34190 && reg_or_sub_cint_operand (x, VOIDmode))
34191 || ((outer_code == SET
34192 || outer_code == IOR
34193 || outer_code == XOR)
34194 && (INTVAL (x)
34195 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
34196 {
34197 *total = COSTS_N_INSNS (1);
34198 return true;
34199 }
34200 /* FALLTHRU */
34201
34202 case CONST_DOUBLE:
34203 case CONST_WIDE_INT:
34204 case CONST:
34205 case HIGH:
34206 case SYMBOL_REF:
34207 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34208 return true;
34209
34210 case MEM:
34211 /* When optimizing for size, MEM should be slightly more expensive
34212 than generating address, e.g., (plus (reg) (const)).
34213 L1 cache latency is about two instructions. */
34214 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
34215 if (rs6000_slow_unaligned_access (mode, MEM_ALIGN (x)))
34216 *total += COSTS_N_INSNS (100);
34217 return true;
34218
34219 case LABEL_REF:
34220 *total = 0;
34221 return true;
34222
34223 case PLUS:
34224 case MINUS:
34225 if (FLOAT_MODE_P (mode))
34226 *total = rs6000_cost->fp;
34227 else
34228 *total = COSTS_N_INSNS (1);
34229 return false;
34230
34231 case MULT:
34232 if (CONST_INT_P (XEXP (x, 1))
34233 && satisfies_constraint_I (XEXP (x, 1)))
34234 {
34235 if (INTVAL (XEXP (x, 1)) >= -256
34236 && INTVAL (XEXP (x, 1)) <= 255)
34237 *total = rs6000_cost->mulsi_const9;
34238 else
34239 *total = rs6000_cost->mulsi_const;
34240 }
34241 else if (mode == SFmode)
34242 *total = rs6000_cost->fp;
34243 else if (FLOAT_MODE_P (mode))
34244 *total = rs6000_cost->dmul;
34245 else if (mode == DImode)
34246 *total = rs6000_cost->muldi;
34247 else
34248 *total = rs6000_cost->mulsi;
34249 return false;
34250
34251 case FMA:
34252 if (mode == SFmode)
34253 *total = rs6000_cost->fp;
34254 else
34255 *total = rs6000_cost->dmul;
34256 break;
34257
34258 case DIV:
34259 case MOD:
34260 if (FLOAT_MODE_P (mode))
34261 {
34262 *total = mode == DFmode ? rs6000_cost->ddiv
34263 : rs6000_cost->sdiv;
34264 return false;
34265 }
34266 /* FALLTHRU */
34267
34268 case UDIV:
34269 case UMOD:
34270 if (CONST_INT_P (XEXP (x, 1))
34271 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
34272 {
34273 if (code == DIV || code == MOD)
34274 /* Shift, addze */
34275 *total = COSTS_N_INSNS (2);
34276 else
34277 /* Shift */
34278 *total = COSTS_N_INSNS (1);
34279 }
34280 else
34281 {
34282 if (GET_MODE (XEXP (x, 1)) == DImode)
34283 *total = rs6000_cost->divdi;
34284 else
34285 *total = rs6000_cost->divsi;
34286 }
34287 /* Add in shift and subtract for MOD unless we have a mod instruction. */
34288 if (!TARGET_MODULO && (code == MOD || code == UMOD))
34289 *total += COSTS_N_INSNS (2);
34290 return false;
34291
34292 case CTZ:
34293 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
34294 return false;
34295
34296 case FFS:
34297 *total = COSTS_N_INSNS (4);
34298 return false;
34299
34300 case POPCOUNT:
34301 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
34302 return false;
34303
34304 case PARITY:
34305 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
34306 return false;
34307
34308 case NOT:
34309 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
34310 *total = 0;
34311 else
34312 *total = COSTS_N_INSNS (1);
34313 return false;
34314
34315 case AND:
34316 if (CONST_INT_P (XEXP (x, 1)))
34317 {
34318 rtx left = XEXP (x, 0);
34319 rtx_code left_code = GET_CODE (left);
34320
34321 /* rotate-and-mask: 1 insn. */
34322 if ((left_code == ROTATE
34323 || left_code == ASHIFT
34324 || left_code == LSHIFTRT)
34325 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
34326 {
34327 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
34328 if (!CONST_INT_P (XEXP (left, 1)))
34329 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
34330 *total += COSTS_N_INSNS (1);
34331 return true;
34332 }
34333
34334 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34335 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
34336 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
34337 || (val & 0xffff) == val
34338 || (val & 0xffff0000) == val
34339 || ((val & 0xffff) == 0 && mode == SImode))
34340 {
34341 *total = rtx_cost (left, mode, AND, 0, speed);
34342 *total += COSTS_N_INSNS (1);
34343 return true;
34344 }
34345
34346 /* 2 insns. */
34347 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
34348 {
34349 *total = rtx_cost (left, mode, AND, 0, speed);
34350 *total += COSTS_N_INSNS (2);
34351 return true;
34352 }
34353 }
34354
34355 *total = COSTS_N_INSNS (1);
34356 return false;
34357
34358 case IOR:
34359 /* FIXME */
34360 *total = COSTS_N_INSNS (1);
34361 return true;
34362
34363 case CLZ:
34364 case XOR:
34365 case ZERO_EXTRACT:
34366 *total = COSTS_N_INSNS (1);
34367 return false;
34368
34369 case ASHIFT:
34370 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34371 the sign extend and shift separately within the insn. */
34372 if (TARGET_EXTSWSLI && mode == DImode
34373 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
34374 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
34375 {
34376 *total = 0;
34377 return false;
34378 }
34379 /* fall through */
34380
34381 case ASHIFTRT:
34382 case LSHIFTRT:
34383 case ROTATE:
34384 case ROTATERT:
34385 /* Handle mul_highpart. */
34386 if (outer_code == TRUNCATE
34387 && GET_CODE (XEXP (x, 0)) == MULT)
34388 {
34389 if (mode == DImode)
34390 *total = rs6000_cost->muldi;
34391 else
34392 *total = rs6000_cost->mulsi;
34393 return true;
34394 }
34395 else if (outer_code == AND)
34396 *total = 0;
34397 else
34398 *total = COSTS_N_INSNS (1);
34399 return false;
34400
34401 case SIGN_EXTEND:
34402 case ZERO_EXTEND:
34403 if (MEM_P (XEXP (x, 0)))
34404 *total = 0;
34405 else
34406 *total = COSTS_N_INSNS (1);
34407 return false;
34408
34409 case COMPARE:
34410 case NEG:
34411 case ABS:
34412 if (!FLOAT_MODE_P (mode))
34413 {
34414 *total = COSTS_N_INSNS (1);
34415 return false;
34416 }
34417 /* FALLTHRU */
34418
34419 case FLOAT:
34420 case UNSIGNED_FLOAT:
34421 case FIX:
34422 case UNSIGNED_FIX:
34423 case FLOAT_TRUNCATE:
34424 *total = rs6000_cost->fp;
34425 return false;
34426
34427 case FLOAT_EXTEND:
34428 if (mode == DFmode)
34429 *total = rs6000_cost->sfdf_convert;
34430 else
34431 *total = rs6000_cost->fp;
34432 return false;
34433
34434 case UNSPEC:
34435 switch (XINT (x, 1))
34436 {
34437 case UNSPEC_FRSP:
34438 *total = rs6000_cost->fp;
34439 return true;
34440
34441 default:
34442 break;
34443 }
34444 break;
34445
34446 case CALL:
34447 case IF_THEN_ELSE:
34448 if (!speed)
34449 {
34450 *total = COSTS_N_INSNS (1);
34451 return true;
34452 }
34453 else if (FLOAT_MODE_P (mode) && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT)
34454 {
34455 *total = rs6000_cost->fp;
34456 return false;
34457 }
34458 break;
34459
34460 case NE:
34461 case EQ:
34462 case GTU:
34463 case LTU:
34464 /* Carry bit requires mode == Pmode.
34465 NEG or PLUS already counted so only add one. */
34466 if (mode == Pmode
34467 && (outer_code == NEG || outer_code == PLUS))
34468 {
34469 *total = COSTS_N_INSNS (1);
34470 return true;
34471 }
34472 /* FALLTHRU */
34473
34474 case GT:
34475 case LT:
34476 case UNORDERED:
34477 if (outer_code == SET)
34478 {
34479 if (XEXP (x, 1) == const0_rtx)
34480 {
34481 *total = COSTS_N_INSNS (2);
34482 return true;
34483 }
34484 else
34485 {
34486 *total = COSTS_N_INSNS (3);
34487 return false;
34488 }
34489 }
34490 /* CC COMPARE. */
34491 if (outer_code == COMPARE)
34492 {
34493 *total = 0;
34494 return true;
34495 }
34496 break;
34497
34498 default:
34499 break;
34500 }
34501
34502 return false;
34503 }
34504
34505 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34506
34507 static bool
34508 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
34509 int opno, int *total, bool speed)
34510 {
34511 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
34512
34513 fprintf (stderr,
34514 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34515 "opno = %d, total = %d, speed = %s, x:\n",
34516 ret ? "complete" : "scan inner",
34517 GET_MODE_NAME (mode),
34518 GET_RTX_NAME (outer_code),
34519 opno,
34520 *total,
34521 speed ? "true" : "false");
34522
34523 debug_rtx (x);
34524
34525 return ret;
34526 }
34527
34528 static int
34529 rs6000_insn_cost (rtx_insn *insn, bool speed)
34530 {
34531 if (recog_memoized (insn) < 0)
34532 return 0;
34533
34534 if (!speed)
34535 return get_attr_length (insn);
34536
34537 int cost = get_attr_cost (insn);
34538 if (cost > 0)
34539 return cost;
34540
34541 int n = get_attr_length (insn) / 4;
34542 enum attr_type type = get_attr_type (insn);
34543
34544 switch (type)
34545 {
34546 case TYPE_LOAD:
34547 case TYPE_FPLOAD:
34548 case TYPE_VECLOAD:
34549 cost = COSTS_N_INSNS (n + 1);
34550 break;
34551
34552 case TYPE_MUL:
34553 switch (get_attr_size (insn))
34554 {
34555 case SIZE_8:
34556 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const9;
34557 break;
34558 case SIZE_16:
34559 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi_const;
34560 break;
34561 case SIZE_32:
34562 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->mulsi;
34563 break;
34564 case SIZE_64:
34565 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->muldi;
34566 break;
34567 default:
34568 gcc_unreachable ();
34569 }
34570 break;
34571 case TYPE_DIV:
34572 switch (get_attr_size (insn))
34573 {
34574 case SIZE_32:
34575 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divsi;
34576 break;
34577 case SIZE_64:
34578 cost = COSTS_N_INSNS (n - 1) + rs6000_cost->divdi;
34579 break;
34580 default:
34581 gcc_unreachable ();
34582 }
34583 break;
34584
34585 case TYPE_FP:
34586 cost = n * rs6000_cost->fp;
34587 break;
34588 case TYPE_DMUL:
34589 cost = n * rs6000_cost->dmul;
34590 break;
34591 case TYPE_SDIV:
34592 cost = n * rs6000_cost->sdiv;
34593 break;
34594 case TYPE_DDIV:
34595 cost = n * rs6000_cost->ddiv;
34596 break;
34597
34598 case TYPE_SYNC:
34599 case TYPE_LOAD_L:
34600 case TYPE_MFCR:
34601 case TYPE_MFCRF:
34602 cost = COSTS_N_INSNS (n + 2);
34603 break;
34604
34605 default:
34606 cost = COSTS_N_INSNS (n);
34607 }
34608
34609 return cost;
34610 }
34611
34612 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34613
34614 static int
34615 rs6000_debug_address_cost (rtx x, machine_mode mode,
34616 addr_space_t as, bool speed)
34617 {
34618 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
34619
34620 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34621 ret, speed ? "true" : "false");
34622 debug_rtx (x);
34623
34624 return ret;
34625 }
34626
34627
34628 /* A C expression returning the cost of moving data from a register of class
34629 CLASS1 to one of CLASS2. */
34630
34631 static int
34632 rs6000_register_move_cost (machine_mode mode,
34633 reg_class_t from, reg_class_t to)
34634 {
34635 int ret;
34636 reg_class_t rclass;
34637
34638 if (TARGET_DEBUG_COST)
34639 dbg_cost_ctrl++;
34640
34641 /* If we have VSX, we can easily move between FPR or Altivec registers,
34642 otherwise we can only easily move within classes.
34643 Do this first so we give best-case answers for union classes
34644 containing both gprs and vsx regs. */
34645 HARD_REG_SET to_vsx, from_vsx;
34646 COPY_HARD_REG_SET (to_vsx, reg_class_contents[to]);
34647 AND_HARD_REG_SET (to_vsx, reg_class_contents[VSX_REGS]);
34648 COPY_HARD_REG_SET (from_vsx, reg_class_contents[from]);
34649 AND_HARD_REG_SET (from_vsx, reg_class_contents[VSX_REGS]);
34650 if (!hard_reg_set_empty_p (to_vsx)
34651 && !hard_reg_set_empty_p (from_vsx)
34652 && (TARGET_VSX
34653 || hard_reg_set_intersect_p (to_vsx, from_vsx)))
34654 {
34655 int reg = FIRST_FPR_REGNO;
34656 if (TARGET_VSX
34657 || (TEST_HARD_REG_BIT (to_vsx, FIRST_ALTIVEC_REGNO)
34658 && TEST_HARD_REG_BIT (from_vsx, FIRST_ALTIVEC_REGNO)))
34659 reg = FIRST_ALTIVEC_REGNO;
34660 ret = 2 * hard_regno_nregs (reg, mode);
34661 }
34662
34663 /* Moves from/to GENERAL_REGS. */
34664 else if ((rclass = from, reg_classes_intersect_p (to, GENERAL_REGS))
34665 || (rclass = to, reg_classes_intersect_p (from, GENERAL_REGS)))
34666 {
34667 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
34668 {
34669 if (TARGET_DIRECT_MOVE)
34670 {
34671 if (rs6000_tune == PROCESSOR_POWER9)
34672 ret = 2 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34673 else
34674 ret = 4 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34675 /* SFmode requires a conversion when moving between gprs
34676 and vsx. */
34677 if (mode == SFmode)
34678 ret += 2;
34679 }
34680 else
34681 ret = (rs6000_memory_move_cost (mode, rclass, false)
34682 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
34683 }
34684
34685 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34686 shift. */
34687 else if (rclass == CR_REGS)
34688 ret = 4;
34689
34690 /* For those processors that have slow LR/CTR moves, make them more
34691 expensive than memory in order to bias spills to memory .*/
34692 else if ((rs6000_tune == PROCESSOR_POWER6
34693 || rs6000_tune == PROCESSOR_POWER7
34694 || rs6000_tune == PROCESSOR_POWER8
34695 || rs6000_tune == PROCESSOR_POWER9)
34696 && reg_class_subset_p (rclass, SPECIAL_REGS))
34697 ret = 6 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34698
34699 else
34700 /* A move will cost one instruction per GPR moved. */
34701 ret = 2 * hard_regno_nregs (FIRST_GPR_REGNO, mode);
34702 }
34703
34704 /* Everything else has to go through GENERAL_REGS. */
34705 else
34706 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
34707 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
34708
34709 if (TARGET_DEBUG_COST)
34710 {
34711 if (dbg_cost_ctrl == 1)
34712 fprintf (stderr,
34713 "rs6000_register_move_cost: ret=%d, mode=%s, from=%s, to=%s\n",
34714 ret, GET_MODE_NAME (mode), reg_class_names[from],
34715 reg_class_names[to]);
34716 dbg_cost_ctrl--;
34717 }
34718
34719 return ret;
34720 }
34721
34722 /* A C expressions returning the cost of moving data of MODE from a register to
34723 or from memory. */
34724
34725 static int
34726 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
34727 bool in ATTRIBUTE_UNUSED)
34728 {
34729 int ret;
34730
34731 if (TARGET_DEBUG_COST)
34732 dbg_cost_ctrl++;
34733
34734 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
34735 ret = 4 * hard_regno_nregs (0, mode);
34736 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
34737 || reg_classes_intersect_p (rclass, VSX_REGS)))
34738 ret = 4 * hard_regno_nregs (32, mode);
34739 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
34740 ret = 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO, mode);
34741 else
34742 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
34743
34744 if (TARGET_DEBUG_COST)
34745 {
34746 if (dbg_cost_ctrl == 1)
34747 fprintf (stderr,
34748 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34749 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
34750 dbg_cost_ctrl--;
34751 }
34752
34753 return ret;
34754 }
34755
34756 /* Implement TARGET_IRA_CHANGE_PSEUDO_ALLOCNO_CLASS.
34757
34758 The register allocator chooses GEN_OR_VSX_REGS for the allocno
34759 class if GENERAL_REGS and VSX_REGS cost is lower than the memory
34760 cost. This happens a lot when TARGET_DIRECT_MOVE makes the register
34761 move cost between GENERAL_REGS and VSX_REGS low.
34762
34763 It might seem reasonable to use a union class. After all, if usage
34764 of vsr is low and gpr high, it might make sense to spill gpr to vsr
34765 rather than memory. However, in cases where register pressure of
34766 both is high, like the cactus_adm spec test, allowing
34767 GEN_OR_VSX_REGS as the allocno class results in bad decisions in
34768 the first scheduling pass. This is partly due to an allocno of
34769 GEN_OR_VSX_REGS wrongly contributing to the GENERAL_REGS pressure
34770 class, which gives too high a pressure for GENERAL_REGS and too low
34771 for VSX_REGS. So, force a choice of the subclass here.
34772
34773 The best class is also the union if GENERAL_REGS and VSX_REGS have
34774 the same cost. In that case we do use GEN_OR_VSX_REGS as the
34775 allocno class, since trying to narrow down the class by regno mode
34776 is prone to error. For example, SImode is allowed in VSX regs and
34777 in some cases (eg. gcc.target/powerpc/p9-xxbr-3.c do_bswap32_vect)
34778 it would be wrong to choose an allocno of GENERAL_REGS based on
34779 SImode. */
34780
34781 static reg_class_t
34782 rs6000_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED,
34783 reg_class_t allocno_class,
34784 reg_class_t best_class)
34785 {
34786 switch (allocno_class)
34787 {
34788 case GEN_OR_VSX_REGS:
34789 /* best_class must be a subset of allocno_class. */
34790 gcc_checking_assert (best_class == GEN_OR_VSX_REGS
34791 || best_class == GEN_OR_FLOAT_REGS
34792 || best_class == VSX_REGS
34793 || best_class == ALTIVEC_REGS
34794 || best_class == FLOAT_REGS
34795 || best_class == GENERAL_REGS
34796 || best_class == BASE_REGS);
34797 /* Use best_class but choose wider classes when copying from the
34798 wider class to best_class is cheap. This mimics IRA choice
34799 of allocno class. */
34800 if (best_class == BASE_REGS)
34801 return GENERAL_REGS;
34802 if (TARGET_VSX
34803 && (best_class == FLOAT_REGS || best_class == ALTIVEC_REGS))
34804 return VSX_REGS;
34805 return best_class;
34806
34807 default:
34808 break;
34809 }
34810
34811 return allocno_class;
34812 }
34813
34814 /* Returns a code for a target-specific builtin that implements
34815 reciprocal of the function, or NULL_TREE if not available. */
34816
34817 static tree
34818 rs6000_builtin_reciprocal (tree fndecl)
34819 {
34820 switch (DECL_FUNCTION_CODE (fndecl))
34821 {
34822 case VSX_BUILTIN_XVSQRTDP:
34823 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
34824 return NULL_TREE;
34825
34826 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
34827
34828 case VSX_BUILTIN_XVSQRTSP:
34829 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
34830 return NULL_TREE;
34831
34832 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
34833
34834 default:
34835 return NULL_TREE;
34836 }
34837 }
34838
34839 /* Load up a constant. If the mode is a vector mode, splat the value across
34840 all of the vector elements. */
34841
34842 static rtx
34843 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
34844 {
34845 rtx reg;
34846
34847 if (mode == SFmode || mode == DFmode)
34848 {
34849 rtx d = const_double_from_real_value (dconst, mode);
34850 reg = force_reg (mode, d);
34851 }
34852 else if (mode == V4SFmode)
34853 {
34854 rtx d = const_double_from_real_value (dconst, SFmode);
34855 rtvec v = gen_rtvec (4, d, d, d, d);
34856 reg = gen_reg_rtx (mode);
34857 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34858 }
34859 else if (mode == V2DFmode)
34860 {
34861 rtx d = const_double_from_real_value (dconst, DFmode);
34862 rtvec v = gen_rtvec (2, d, d);
34863 reg = gen_reg_rtx (mode);
34864 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
34865 }
34866 else
34867 gcc_unreachable ();
34868
34869 return reg;
34870 }
34871
34872 /* Generate an FMA instruction. */
34873
34874 static void
34875 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
34876 {
34877 machine_mode mode = GET_MODE (target);
34878 rtx dst;
34879
34880 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
34881 gcc_assert (dst != NULL);
34882
34883 if (dst != target)
34884 emit_move_insn (target, dst);
34885 }
34886
34887 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34888
34889 static void
34890 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
34891 {
34892 machine_mode mode = GET_MODE (dst);
34893 rtx r;
34894
34895 /* This is a tad more complicated, since the fnma_optab is for
34896 a different expression: fma(-m1, m2, a), which is the same
34897 thing except in the case of signed zeros.
34898
34899 Fortunately we know that if FMA is supported that FNMSUB is
34900 also supported in the ISA. Just expand it directly. */
34901
34902 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
34903
34904 r = gen_rtx_NEG (mode, a);
34905 r = gen_rtx_FMA (mode, m1, m2, r);
34906 r = gen_rtx_NEG (mode, r);
34907 emit_insn (gen_rtx_SET (dst, r));
34908 }
34909
34910 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34911 add a reg_note saying that this was a division. Support both scalar and
34912 vector divide. Assumes no trapping math and finite arguments. */
34913
34914 void
34915 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
34916 {
34917 machine_mode mode = GET_MODE (dst);
34918 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
34919 int i;
34920
34921 /* Low precision estimates guarantee 5 bits of accuracy. High
34922 precision estimates guarantee 14 bits of accuracy. SFmode
34923 requires 23 bits of accuracy. DFmode requires 52 bits of
34924 accuracy. Each pass at least doubles the accuracy, leading
34925 to the following. */
34926 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
34927 if (mode == DFmode || mode == V2DFmode)
34928 passes++;
34929
34930 enum insn_code code = optab_handler (smul_optab, mode);
34931 insn_gen_fn gen_mul = GEN_FCN (code);
34932
34933 gcc_assert (code != CODE_FOR_nothing);
34934
34935 one = rs6000_load_constant_and_splat (mode, dconst1);
34936
34937 /* x0 = 1./d estimate */
34938 x0 = gen_reg_rtx (mode);
34939 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
34940 UNSPEC_FRES)));
34941
34942 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34943 if (passes > 1) {
34944
34945 /* e0 = 1. - d * x0 */
34946 e0 = gen_reg_rtx (mode);
34947 rs6000_emit_nmsub (e0, d, x0, one);
34948
34949 /* x1 = x0 + e0 * x0 */
34950 x1 = gen_reg_rtx (mode);
34951 rs6000_emit_madd (x1, e0, x0, x0);
34952
34953 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
34954 ++i, xprev = xnext, eprev = enext) {
34955
34956 /* enext = eprev * eprev */
34957 enext = gen_reg_rtx (mode);
34958 emit_insn (gen_mul (enext, eprev, eprev));
34959
34960 /* xnext = xprev + enext * xprev */
34961 xnext = gen_reg_rtx (mode);
34962 rs6000_emit_madd (xnext, enext, xprev, xprev);
34963 }
34964
34965 } else
34966 xprev = x0;
34967
34968 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34969
34970 /* u = n * xprev */
34971 u = gen_reg_rtx (mode);
34972 emit_insn (gen_mul (u, n, xprev));
34973
34974 /* v = n - (d * u) */
34975 v = gen_reg_rtx (mode);
34976 rs6000_emit_nmsub (v, d, u, n);
34977
34978 /* dst = (v * xprev) + u */
34979 rs6000_emit_madd (dst, v, xprev, u);
34980
34981 if (note_p)
34982 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
34983 }
34984
34985 /* Goldschmidt's Algorithm for single/double-precision floating point
34986 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34987
34988 void
34989 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
34990 {
34991 machine_mode mode = GET_MODE (src);
34992 rtx e = gen_reg_rtx (mode);
34993 rtx g = gen_reg_rtx (mode);
34994 rtx h = gen_reg_rtx (mode);
34995
34996 /* Low precision estimates guarantee 5 bits of accuracy. High
34997 precision estimates guarantee 14 bits of accuracy. SFmode
34998 requires 23 bits of accuracy. DFmode requires 52 bits of
34999 accuracy. Each pass at least doubles the accuracy, leading
35000 to the following. */
35001 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
35002 if (mode == DFmode || mode == V2DFmode)
35003 passes++;
35004
35005 int i;
35006 rtx mhalf;
35007 enum insn_code code = optab_handler (smul_optab, mode);
35008 insn_gen_fn gen_mul = GEN_FCN (code);
35009
35010 gcc_assert (code != CODE_FOR_nothing);
35011
35012 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
35013
35014 /* e = rsqrt estimate */
35015 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
35016 UNSPEC_RSQRT)));
35017
35018 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
35019 if (!recip)
35020 {
35021 rtx zero = force_reg (mode, CONST0_RTX (mode));
35022
35023 if (mode == SFmode)
35024 {
35025 rtx target = emit_conditional_move (e, GT, src, zero, mode,
35026 e, zero, mode, 0);
35027 if (target != e)
35028 emit_move_insn (e, target);
35029 }
35030 else
35031 {
35032 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
35033 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
35034 }
35035 }
35036
35037 /* g = sqrt estimate. */
35038 emit_insn (gen_mul (g, e, src));
35039 /* h = 1/(2*sqrt) estimate. */
35040 emit_insn (gen_mul (h, e, mhalf));
35041
35042 if (recip)
35043 {
35044 if (passes == 1)
35045 {
35046 rtx t = gen_reg_rtx (mode);
35047 rs6000_emit_nmsub (t, g, h, mhalf);
35048 /* Apply correction directly to 1/rsqrt estimate. */
35049 rs6000_emit_madd (dst, e, t, e);
35050 }
35051 else
35052 {
35053 for (i = 0; i < passes; i++)
35054 {
35055 rtx t1 = gen_reg_rtx (mode);
35056 rtx g1 = gen_reg_rtx (mode);
35057 rtx h1 = gen_reg_rtx (mode);
35058
35059 rs6000_emit_nmsub (t1, g, h, mhalf);
35060 rs6000_emit_madd (g1, g, t1, g);
35061 rs6000_emit_madd (h1, h, t1, h);
35062
35063 g = g1;
35064 h = h1;
35065 }
35066 /* Multiply by 2 for 1/rsqrt. */
35067 emit_insn (gen_add3_insn (dst, h, h));
35068 }
35069 }
35070 else
35071 {
35072 rtx t = gen_reg_rtx (mode);
35073 rs6000_emit_nmsub (t, g, h, mhalf);
35074 rs6000_emit_madd (dst, g, t, g);
35075 }
35076
35077 return;
35078 }
35079
35080 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
35081 (Power7) targets. DST is the target, and SRC is the argument operand. */
35082
35083 void
35084 rs6000_emit_popcount (rtx dst, rtx src)
35085 {
35086 machine_mode mode = GET_MODE (dst);
35087 rtx tmp1, tmp2;
35088
35089 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
35090 if (TARGET_POPCNTD)
35091 {
35092 if (mode == SImode)
35093 emit_insn (gen_popcntdsi2 (dst, src));
35094 else
35095 emit_insn (gen_popcntddi2 (dst, src));
35096 return;
35097 }
35098
35099 tmp1 = gen_reg_rtx (mode);
35100
35101 if (mode == SImode)
35102 {
35103 emit_insn (gen_popcntbsi2 (tmp1, src));
35104 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
35105 NULL_RTX, 0);
35106 tmp2 = force_reg (SImode, tmp2);
35107 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
35108 }
35109 else
35110 {
35111 emit_insn (gen_popcntbdi2 (tmp1, src));
35112 tmp2 = expand_mult (DImode, tmp1,
35113 GEN_INT ((HOST_WIDE_INT)
35114 0x01010101 << 32 | 0x01010101),
35115 NULL_RTX, 0);
35116 tmp2 = force_reg (DImode, tmp2);
35117 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
35118 }
35119 }
35120
35121
35122 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
35123 target, and SRC is the argument operand. */
35124
35125 void
35126 rs6000_emit_parity (rtx dst, rtx src)
35127 {
35128 machine_mode mode = GET_MODE (dst);
35129 rtx tmp;
35130
35131 tmp = gen_reg_rtx (mode);
35132
35133 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
35134 if (TARGET_CMPB)
35135 {
35136 if (mode == SImode)
35137 {
35138 emit_insn (gen_popcntbsi2 (tmp, src));
35139 emit_insn (gen_paritysi2_cmpb (dst, tmp));
35140 }
35141 else
35142 {
35143 emit_insn (gen_popcntbdi2 (tmp, src));
35144 emit_insn (gen_paritydi2_cmpb (dst, tmp));
35145 }
35146 return;
35147 }
35148
35149 if (mode == SImode)
35150 {
35151 /* Is mult+shift >= shift+xor+shift+xor? */
35152 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
35153 {
35154 rtx tmp1, tmp2, tmp3, tmp4;
35155
35156 tmp1 = gen_reg_rtx (SImode);
35157 emit_insn (gen_popcntbsi2 (tmp1, src));
35158
35159 tmp2 = gen_reg_rtx (SImode);
35160 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
35161 tmp3 = gen_reg_rtx (SImode);
35162 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
35163
35164 tmp4 = gen_reg_rtx (SImode);
35165 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
35166 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
35167 }
35168 else
35169 rs6000_emit_popcount (tmp, src);
35170 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
35171 }
35172 else
35173 {
35174 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
35175 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
35176 {
35177 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
35178
35179 tmp1 = gen_reg_rtx (DImode);
35180 emit_insn (gen_popcntbdi2 (tmp1, src));
35181
35182 tmp2 = gen_reg_rtx (DImode);
35183 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
35184 tmp3 = gen_reg_rtx (DImode);
35185 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
35186
35187 tmp4 = gen_reg_rtx (DImode);
35188 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
35189 tmp5 = gen_reg_rtx (DImode);
35190 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
35191
35192 tmp6 = gen_reg_rtx (DImode);
35193 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
35194 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
35195 }
35196 else
35197 rs6000_emit_popcount (tmp, src);
35198 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
35199 }
35200 }
35201
35202 /* Expand an Altivec constant permutation for little endian mode.
35203 OP0 and OP1 are the input vectors and TARGET is the output vector.
35204 SEL specifies the constant permutation vector.
35205
35206 There are two issues: First, the two input operands must be
35207 swapped so that together they form a double-wide array in LE
35208 order. Second, the vperm instruction has surprising behavior
35209 in LE mode: it interprets the elements of the source vectors
35210 in BE mode ("left to right") and interprets the elements of
35211 the destination vector in LE mode ("right to left"). To
35212 correct for this, we must subtract each element of the permute
35213 control vector from 31.
35214
35215 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
35216 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
35217 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
35218 serve as the permute control vector. Then, in BE mode,
35219
35220 vperm 9,10,11,12
35221
35222 places the desired result in vr9. However, in LE mode the
35223 vector contents will be
35224
35225 vr10 = 00000003 00000002 00000001 00000000
35226 vr11 = 00000007 00000006 00000005 00000004
35227
35228 The result of the vperm using the same permute control vector is
35229
35230 vr9 = 05000000 07000000 01000000 03000000
35231
35232 That is, the leftmost 4 bytes of vr10 are interpreted as the
35233 source for the rightmost 4 bytes of vr9, and so on.
35234
35235 If we change the permute control vector to
35236
35237 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
35238
35239 and issue
35240
35241 vperm 9,11,10,12
35242
35243 we get the desired
35244
35245 vr9 = 00000006 00000004 00000002 00000000. */
35246
35247 static void
35248 altivec_expand_vec_perm_const_le (rtx target, rtx op0, rtx op1,
35249 const vec_perm_indices &sel)
35250 {
35251 unsigned int i;
35252 rtx perm[16];
35253 rtx constv, unspec;
35254
35255 /* Unpack and adjust the constant selector. */
35256 for (i = 0; i < 16; ++i)
35257 {
35258 unsigned int elt = 31 - (sel[i] & 31);
35259 perm[i] = GEN_INT (elt);
35260 }
35261
35262 /* Expand to a permute, swapping the inputs and using the
35263 adjusted selector. */
35264 if (!REG_P (op0))
35265 op0 = force_reg (V16QImode, op0);
35266 if (!REG_P (op1))
35267 op1 = force_reg (V16QImode, op1);
35268
35269 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
35270 constv = force_reg (V16QImode, constv);
35271 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
35272 UNSPEC_VPERM);
35273 if (!REG_P (target))
35274 {
35275 rtx tmp = gen_reg_rtx (V16QImode);
35276 emit_move_insn (tmp, unspec);
35277 unspec = tmp;
35278 }
35279
35280 emit_move_insn (target, unspec);
35281 }
35282
35283 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
35284 permute control vector. But here it's not a constant, so we must
35285 generate a vector NAND or NOR to do the adjustment. */
35286
35287 void
35288 altivec_expand_vec_perm_le (rtx operands[4])
35289 {
35290 rtx notx, iorx, unspec;
35291 rtx target = operands[0];
35292 rtx op0 = operands[1];
35293 rtx op1 = operands[2];
35294 rtx sel = operands[3];
35295 rtx tmp = target;
35296 rtx norreg = gen_reg_rtx (V16QImode);
35297 machine_mode mode = GET_MODE (target);
35298
35299 /* Get everything in regs so the pattern matches. */
35300 if (!REG_P (op0))
35301 op0 = force_reg (mode, op0);
35302 if (!REG_P (op1))
35303 op1 = force_reg (mode, op1);
35304 if (!REG_P (sel))
35305 sel = force_reg (V16QImode, sel);
35306 if (!REG_P (target))
35307 tmp = gen_reg_rtx (mode);
35308
35309 if (TARGET_P9_VECTOR)
35310 {
35311 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, sel),
35312 UNSPEC_VPERMR);
35313 }
35314 else
35315 {
35316 /* Invert the selector with a VNAND if available, else a VNOR.
35317 The VNAND is preferred for future fusion opportunities. */
35318 notx = gen_rtx_NOT (V16QImode, sel);
35319 iorx = (TARGET_P8_VECTOR
35320 ? gen_rtx_IOR (V16QImode, notx, notx)
35321 : gen_rtx_AND (V16QImode, notx, notx));
35322 emit_insn (gen_rtx_SET (norreg, iorx));
35323
35324 /* Permute with operands reversed and adjusted selector. */
35325 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
35326 UNSPEC_VPERM);
35327 }
35328
35329 /* Copy into target, possibly by way of a register. */
35330 if (!REG_P (target))
35331 {
35332 emit_move_insn (tmp, unspec);
35333 unspec = tmp;
35334 }
35335
35336 emit_move_insn (target, unspec);
35337 }
35338
35339 /* Expand an Altivec constant permutation. Return true if we match
35340 an efficient implementation; false to fall back to VPERM.
35341
35342 OP0 and OP1 are the input vectors and TARGET is the output vector.
35343 SEL specifies the constant permutation vector. */
35344
35345 static bool
35346 altivec_expand_vec_perm_const (rtx target, rtx op0, rtx op1,
35347 const vec_perm_indices &sel)
35348 {
35349 struct altivec_perm_insn {
35350 HOST_WIDE_INT mask;
35351 enum insn_code impl;
35352 unsigned char perm[16];
35353 };
35354 static const struct altivec_perm_insn patterns[] = {
35355 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
35356 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
35357 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
35358 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
35359 { OPTION_MASK_ALTIVEC,
35360 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
35361 : CODE_FOR_altivec_vmrglb_direct),
35362 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
35363 { OPTION_MASK_ALTIVEC,
35364 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
35365 : CODE_FOR_altivec_vmrglh_direct),
35366 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
35367 { OPTION_MASK_ALTIVEC,
35368 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
35369 : CODE_FOR_altivec_vmrglw_direct),
35370 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
35371 { OPTION_MASK_ALTIVEC,
35372 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
35373 : CODE_FOR_altivec_vmrghb_direct),
35374 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35375 { OPTION_MASK_ALTIVEC,
35376 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
35377 : CODE_FOR_altivec_vmrghh_direct),
35378 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35379 { OPTION_MASK_ALTIVEC,
35380 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
35381 : CODE_FOR_altivec_vmrghw_direct),
35382 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35383 { OPTION_MASK_P8_VECTOR,
35384 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
35385 : CODE_FOR_p8_vmrgow_v4sf_direct),
35386 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35387 { OPTION_MASK_P8_VECTOR,
35388 (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
35389 : CODE_FOR_p8_vmrgew_v4sf_direct),
35390 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35391 };
35392
35393 unsigned int i, j, elt, which;
35394 unsigned char perm[16];
35395 rtx x;
35396 bool one_vec;
35397
35398 /* Unpack the constant selector. */
35399 for (i = which = 0; i < 16; ++i)
35400 {
35401 elt = sel[i] & 31;
35402 which |= (elt < 16 ? 1 : 2);
35403 perm[i] = elt;
35404 }
35405
35406 /* Simplify the constant selector based on operands. */
35407 switch (which)
35408 {
35409 default:
35410 gcc_unreachable ();
35411
35412 case 3:
35413 one_vec = false;
35414 if (!rtx_equal_p (op0, op1))
35415 break;
35416 /* FALLTHRU */
35417
35418 case 2:
35419 for (i = 0; i < 16; ++i)
35420 perm[i] &= 15;
35421 op0 = op1;
35422 one_vec = true;
35423 break;
35424
35425 case 1:
35426 op1 = op0;
35427 one_vec = true;
35428 break;
35429 }
35430
35431 /* Look for splat patterns. */
35432 if (one_vec)
35433 {
35434 elt = perm[0];
35435
35436 for (i = 0; i < 16; ++i)
35437 if (perm[i] != elt)
35438 break;
35439 if (i == 16)
35440 {
35441 if (!BYTES_BIG_ENDIAN)
35442 elt = 15 - elt;
35443 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
35444 return true;
35445 }
35446
35447 if (elt % 2 == 0)
35448 {
35449 for (i = 0; i < 16; i += 2)
35450 if (perm[i] != elt || perm[i + 1] != elt + 1)
35451 break;
35452 if (i == 16)
35453 {
35454 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
35455 x = gen_reg_rtx (V8HImode);
35456 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
35457 GEN_INT (field)));
35458 emit_move_insn (target, gen_lowpart (V16QImode, x));
35459 return true;
35460 }
35461 }
35462
35463 if (elt % 4 == 0)
35464 {
35465 for (i = 0; i < 16; i += 4)
35466 if (perm[i] != elt
35467 || perm[i + 1] != elt + 1
35468 || perm[i + 2] != elt + 2
35469 || perm[i + 3] != elt + 3)
35470 break;
35471 if (i == 16)
35472 {
35473 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
35474 x = gen_reg_rtx (V4SImode);
35475 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
35476 GEN_INT (field)));
35477 emit_move_insn (target, gen_lowpart (V16QImode, x));
35478 return true;
35479 }
35480 }
35481 }
35482
35483 /* Look for merge and pack patterns. */
35484 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
35485 {
35486 bool swapped;
35487
35488 if ((patterns[j].mask & rs6000_isa_flags) == 0)
35489 continue;
35490
35491 elt = patterns[j].perm[0];
35492 if (perm[0] == elt)
35493 swapped = false;
35494 else if (perm[0] == elt + 16)
35495 swapped = true;
35496 else
35497 continue;
35498 for (i = 1; i < 16; ++i)
35499 {
35500 elt = patterns[j].perm[i];
35501 if (swapped)
35502 elt = (elt >= 16 ? elt - 16 : elt + 16);
35503 else if (one_vec && elt >= 16)
35504 elt -= 16;
35505 if (perm[i] != elt)
35506 break;
35507 }
35508 if (i == 16)
35509 {
35510 enum insn_code icode = patterns[j].impl;
35511 machine_mode omode = insn_data[icode].operand[0].mode;
35512 machine_mode imode = insn_data[icode].operand[1].mode;
35513
35514 /* For little-endian, don't use vpkuwum and vpkuhum if the
35515 underlying vector type is not V4SI and V8HI, respectively.
35516 For example, using vpkuwum with a V8HI picks up the even
35517 halfwords (BE numbering) when the even halfwords (LE
35518 numbering) are what we need. */
35519 if (!BYTES_BIG_ENDIAN
35520 && icode == CODE_FOR_altivec_vpkuwum_direct
35521 && ((REG_P (op0)
35522 && GET_MODE (op0) != V4SImode)
35523 || (SUBREG_P (op0)
35524 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
35525 continue;
35526 if (!BYTES_BIG_ENDIAN
35527 && icode == CODE_FOR_altivec_vpkuhum_direct
35528 && ((REG_P (op0)
35529 && GET_MODE (op0) != V8HImode)
35530 || (SUBREG_P (op0)
35531 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
35532 continue;
35533
35534 /* For little-endian, the two input operands must be swapped
35535 (or swapped back) to ensure proper right-to-left numbering
35536 from 0 to 2N-1. */
35537 if (swapped ^ !BYTES_BIG_ENDIAN)
35538 std::swap (op0, op1);
35539 if (imode != V16QImode)
35540 {
35541 op0 = gen_lowpart (imode, op0);
35542 op1 = gen_lowpart (imode, op1);
35543 }
35544 if (omode == V16QImode)
35545 x = target;
35546 else
35547 x = gen_reg_rtx (omode);
35548 emit_insn (GEN_FCN (icode) (x, op0, op1));
35549 if (omode != V16QImode)
35550 emit_move_insn (target, gen_lowpart (V16QImode, x));
35551 return true;
35552 }
35553 }
35554
35555 if (!BYTES_BIG_ENDIAN)
35556 {
35557 altivec_expand_vec_perm_const_le (target, op0, op1, sel);
35558 return true;
35559 }
35560
35561 return false;
35562 }
35563
35564 /* Expand a VSX Permute Doubleword constant permutation.
35565 Return true if we match an efficient implementation. */
35566
35567 static bool
35568 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
35569 unsigned char perm0, unsigned char perm1)
35570 {
35571 rtx x;
35572
35573 /* If both selectors come from the same operand, fold to single op. */
35574 if ((perm0 & 2) == (perm1 & 2))
35575 {
35576 if (perm0 & 2)
35577 op0 = op1;
35578 else
35579 op1 = op0;
35580 }
35581 /* If both operands are equal, fold to simpler permutation. */
35582 if (rtx_equal_p (op0, op1))
35583 {
35584 perm0 = perm0 & 1;
35585 perm1 = (perm1 & 1) + 2;
35586 }
35587 /* If the first selector comes from the second operand, swap. */
35588 else if (perm0 & 2)
35589 {
35590 if (perm1 & 2)
35591 return false;
35592 perm0 -= 2;
35593 perm1 += 2;
35594 std::swap (op0, op1);
35595 }
35596 /* If the second selector does not come from the second operand, fail. */
35597 else if ((perm1 & 2) == 0)
35598 return false;
35599
35600 /* Success! */
35601 if (target != NULL)
35602 {
35603 machine_mode vmode, dmode;
35604 rtvec v;
35605
35606 vmode = GET_MODE (target);
35607 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
35608 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4).require ();
35609 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
35610 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
35611 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
35612 emit_insn (gen_rtx_SET (target, x));
35613 }
35614 return true;
35615 }
35616
35617 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35618
35619 static bool
35620 rs6000_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0,
35621 rtx op1, const vec_perm_indices &sel)
35622 {
35623 bool testing_p = !target;
35624
35625 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35626 if (TARGET_ALTIVEC && testing_p)
35627 return true;
35628
35629 /* Check for ps_merge* or xxpermdi insns. */
35630 if ((vmode == V2DFmode || vmode == V2DImode) && VECTOR_MEM_VSX_P (vmode))
35631 {
35632 if (testing_p)
35633 {
35634 op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
35635 op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
35636 }
35637 if (rs6000_expand_vec_perm_const_1 (target, op0, op1, sel[0], sel[1]))
35638 return true;
35639 }
35640
35641 if (TARGET_ALTIVEC)
35642 {
35643 /* Force the target-independent code to lower to V16QImode. */
35644 if (vmode != V16QImode)
35645 return false;
35646 if (altivec_expand_vec_perm_const (target, op0, op1, sel))
35647 return true;
35648 }
35649
35650 return false;
35651 }
35652
35653 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35654 OP0 and OP1 are the input vectors and TARGET is the output vector.
35655 PERM specifies the constant permutation vector. */
35656
35657 static void
35658 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
35659 machine_mode vmode, const vec_perm_builder &perm)
35660 {
35661 rtx x = expand_vec_perm_const (vmode, op0, op1, perm, BLKmode, target);
35662 if (x != target)
35663 emit_move_insn (target, x);
35664 }
35665
35666 /* Expand an extract even operation. */
35667
35668 void
35669 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
35670 {
35671 machine_mode vmode = GET_MODE (target);
35672 unsigned i, nelt = GET_MODE_NUNITS (vmode);
35673 vec_perm_builder perm (nelt, nelt, 1);
35674
35675 for (i = 0; i < nelt; i++)
35676 perm.quick_push (i * 2);
35677
35678 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35679 }
35680
35681 /* Expand a vector interleave operation. */
35682
35683 void
35684 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
35685 {
35686 machine_mode vmode = GET_MODE (target);
35687 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
35688 vec_perm_builder perm (nelt, nelt, 1);
35689
35690 high = (highp ? 0 : nelt / 2);
35691 for (i = 0; i < nelt / 2; i++)
35692 {
35693 perm.quick_push (i + high);
35694 perm.quick_push (i + nelt + high);
35695 }
35696
35697 rs6000_do_expand_vec_perm (target, op0, op1, vmode, perm);
35698 }
35699
35700 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35701 void
35702 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
35703 {
35704 HOST_WIDE_INT hwi_scale (scale);
35705 REAL_VALUE_TYPE r_pow;
35706 rtvec v = rtvec_alloc (2);
35707 rtx elt;
35708 rtx scale_vec = gen_reg_rtx (V2DFmode);
35709 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
35710 elt = const_double_from_real_value (r_pow, DFmode);
35711 RTVEC_ELT (v, 0) = elt;
35712 RTVEC_ELT (v, 1) = elt;
35713 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
35714 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
35715 }
35716
35717 /* Return an RTX representing where to find the function value of a
35718 function returning MODE. */
35719 static rtx
35720 rs6000_complex_function_value (machine_mode mode)
35721 {
35722 unsigned int regno;
35723 rtx r1, r2;
35724 machine_mode inner = GET_MODE_INNER (mode);
35725 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
35726
35727 if (TARGET_FLOAT128_TYPE
35728 && (mode == KCmode
35729 || (mode == TCmode && TARGET_IEEEQUAD)))
35730 regno = ALTIVEC_ARG_RETURN;
35731
35732 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35733 regno = FP_ARG_RETURN;
35734
35735 else
35736 {
35737 regno = GP_ARG_RETURN;
35738
35739 /* 32-bit is OK since it'll go in r3/r4. */
35740 if (TARGET_32BIT && inner_bytes >= 4)
35741 return gen_rtx_REG (mode, regno);
35742 }
35743
35744 if (inner_bytes >= 8)
35745 return gen_rtx_REG (mode, regno);
35746
35747 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
35748 const0_rtx);
35749 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
35750 GEN_INT (inner_bytes));
35751 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
35752 }
35753
35754 /* Return an rtx describing a return value of MODE as a PARALLEL
35755 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35756 stride REG_STRIDE. */
35757
35758 static rtx
35759 rs6000_parallel_return (machine_mode mode,
35760 int n_elts, machine_mode elt_mode,
35761 unsigned int regno, unsigned int reg_stride)
35762 {
35763 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
35764
35765 int i;
35766 for (i = 0; i < n_elts; i++)
35767 {
35768 rtx r = gen_rtx_REG (elt_mode, regno);
35769 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
35770 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
35771 regno += reg_stride;
35772 }
35773
35774 return par;
35775 }
35776
35777 /* Target hook for TARGET_FUNCTION_VALUE.
35778
35779 An integer value is in r3 and a floating-point value is in fp1,
35780 unless -msoft-float. */
35781
35782 static rtx
35783 rs6000_function_value (const_tree valtype,
35784 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
35785 bool outgoing ATTRIBUTE_UNUSED)
35786 {
35787 machine_mode mode;
35788 unsigned int regno;
35789 machine_mode elt_mode;
35790 int n_elts;
35791
35792 /* Special handling for structs in darwin64. */
35793 if (TARGET_MACHO
35794 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
35795 {
35796 CUMULATIVE_ARGS valcum;
35797 rtx valret;
35798
35799 valcum.words = 0;
35800 valcum.fregno = FP_ARG_MIN_REG;
35801 valcum.vregno = ALTIVEC_ARG_MIN_REG;
35802 /* Do a trial code generation as if this were going to be passed as
35803 an argument; if any part goes in memory, we return NULL. */
35804 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
35805 if (valret)
35806 return valret;
35807 /* Otherwise fall through to standard ABI rules. */
35808 }
35809
35810 mode = TYPE_MODE (valtype);
35811
35812 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35813 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
35814 {
35815 int first_reg, n_regs;
35816
35817 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
35818 {
35819 /* _Decimal128 must use even/odd register pairs. */
35820 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35821 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
35822 }
35823 else
35824 {
35825 first_reg = ALTIVEC_ARG_RETURN;
35826 n_regs = 1;
35827 }
35828
35829 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
35830 }
35831
35832 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35833 if (TARGET_32BIT && TARGET_POWERPC64)
35834 switch (mode)
35835 {
35836 default:
35837 break;
35838 case E_DImode:
35839 case E_SCmode:
35840 case E_DCmode:
35841 case E_TCmode:
35842 int count = GET_MODE_SIZE (mode) / 4;
35843 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
35844 }
35845
35846 if ((INTEGRAL_TYPE_P (valtype)
35847 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
35848 || POINTER_TYPE_P (valtype))
35849 mode = TARGET_32BIT ? SImode : DImode;
35850
35851 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35852 /* _Decimal128 must use an even/odd register pair. */
35853 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35854 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT
35855 && !FLOAT128_VECTOR_P (mode))
35856 regno = FP_ARG_RETURN;
35857 else if (TREE_CODE (valtype) == COMPLEX_TYPE
35858 && targetm.calls.split_complex_arg)
35859 return rs6000_complex_function_value (mode);
35860 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35861 return register is used in both cases, and we won't see V2DImode/V2DFmode
35862 for pure altivec, combine the two cases. */
35863 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
35864 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
35865 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
35866 regno = ALTIVEC_ARG_RETURN;
35867 else
35868 regno = GP_ARG_RETURN;
35869
35870 return gen_rtx_REG (mode, regno);
35871 }
35872
35873 /* Define how to find the value returned by a library function
35874 assuming the value has mode MODE. */
35875 rtx
35876 rs6000_libcall_value (machine_mode mode)
35877 {
35878 unsigned int regno;
35879
35880 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35881 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
35882 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
35883
35884 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT)
35885 /* _Decimal128 must use an even/odd register pair. */
35886 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
35887 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode) && TARGET_HARD_FLOAT)
35888 regno = FP_ARG_RETURN;
35889 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35890 return register is used in both cases, and we won't see V2DImode/V2DFmode
35891 for pure altivec, combine the two cases. */
35892 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
35893 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
35894 regno = ALTIVEC_ARG_RETURN;
35895 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
35896 return rs6000_complex_function_value (mode);
35897 else
35898 regno = GP_ARG_RETURN;
35899
35900 return gen_rtx_REG (mode, regno);
35901 }
35902
35903 /* Compute register pressure classes. We implement the target hook to avoid
35904 IRA picking something like GEN_OR_FLOAT_REGS as a pressure class, which can
35905 lead to incorrect estimates of number of available registers and therefor
35906 increased register pressure/spill. */
35907 static int
35908 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
35909 {
35910 int n;
35911
35912 n = 0;
35913 pressure_classes[n++] = GENERAL_REGS;
35914 if (TARGET_VSX)
35915 pressure_classes[n++] = VSX_REGS;
35916 else
35917 {
35918 if (TARGET_ALTIVEC)
35919 pressure_classes[n++] = ALTIVEC_REGS;
35920 if (TARGET_HARD_FLOAT)
35921 pressure_classes[n++] = FLOAT_REGS;
35922 }
35923 pressure_classes[n++] = CR_REGS;
35924 pressure_classes[n++] = SPECIAL_REGS;
35925
35926 return n;
35927 }
35928
35929 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35930 Frame pointer elimination is automatically handled.
35931
35932 For the RS/6000, if frame pointer elimination is being done, we would like
35933 to convert ap into fp, not sp.
35934
35935 We need r30 if -mminimal-toc was specified, and there are constant pool
35936 references. */
35937
35938 static bool
35939 rs6000_can_eliminate (const int from, const int to)
35940 {
35941 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
35942 ? ! frame_pointer_needed
35943 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
35944 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
35945 || constant_pool_empty_p ()
35946 : true);
35947 }
35948
35949 /* Define the offset between two registers, FROM to be eliminated and its
35950 replacement TO, at the start of a routine. */
35951 HOST_WIDE_INT
35952 rs6000_initial_elimination_offset (int from, int to)
35953 {
35954 rs6000_stack_t *info = rs6000_stack_info ();
35955 HOST_WIDE_INT offset;
35956
35957 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35958 offset = info->push_p ? 0 : -info->total_size;
35959 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35960 {
35961 offset = info->push_p ? 0 : -info->total_size;
35962 if (FRAME_GROWS_DOWNWARD)
35963 offset += info->fixed_size + info->vars_size + info->parm_size;
35964 }
35965 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35966 offset = FRAME_GROWS_DOWNWARD
35967 ? info->fixed_size + info->vars_size + info->parm_size
35968 : 0;
35969 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
35970 offset = info->total_size;
35971 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
35972 offset = info->push_p ? info->total_size : 0;
35973 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
35974 offset = 0;
35975 else
35976 gcc_unreachable ();
35977
35978 return offset;
35979 }
35980
35981 /* Fill in sizes of registers used by unwinder. */
35982
35983 static void
35984 rs6000_init_dwarf_reg_sizes_extra (tree address)
35985 {
35986 if (TARGET_MACHO && ! TARGET_ALTIVEC)
35987 {
35988 int i;
35989 machine_mode mode = TYPE_MODE (char_type_node);
35990 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
35991 rtx mem = gen_rtx_MEM (BLKmode, addr);
35992 rtx value = gen_int_mode (16, mode);
35993
35994 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35995 The unwinder still needs to know the size of Altivec registers. */
35996
35997 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
35998 {
35999 int column = DWARF_REG_TO_UNWIND_COLUMN
36000 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
36001 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
36002
36003 emit_move_insn (adjust_address (mem, mode, offset), value);
36004 }
36005 }
36006 }
36007
36008 /* Map internal gcc register numbers to debug format register numbers.
36009 FORMAT specifies the type of debug register number to use:
36010 0 -- debug information, except for frame-related sections
36011 1 -- DWARF .debug_frame section
36012 2 -- DWARF .eh_frame section */
36013
36014 unsigned int
36015 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
36016 {
36017 /* On some platforms, we use the standard DWARF register
36018 numbering for .debug_info and .debug_frame. */
36019 if ((format == 0 && write_symbols == DWARF2_DEBUG) || format == 1)
36020 {
36021 #ifdef RS6000_USE_DWARF_NUMBERING
36022 if (regno <= 31)
36023 return regno;
36024 if (FP_REGNO_P (regno))
36025 return regno - FIRST_FPR_REGNO + 32;
36026 if (ALTIVEC_REGNO_P (regno))
36027 return regno - FIRST_ALTIVEC_REGNO + 1124;
36028 if (regno == LR_REGNO)
36029 return 108;
36030 if (regno == CTR_REGNO)
36031 return 109;
36032 if (regno == CA_REGNO)
36033 return 101; /* XER */
36034 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
36035 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
36036 The actual code emitted saves the whole of CR, so we map CR2_REGNO
36037 to the DWARF reg for CR. */
36038 if (format == 1 && regno == CR2_REGNO)
36039 return 64;
36040 if (CR_REGNO_P (regno))
36041 return regno - CR0_REGNO + 86;
36042 if (regno == VRSAVE_REGNO)
36043 return 356;
36044 if (regno == VSCR_REGNO)
36045 return 67;
36046
36047 /* These do not make much sense. */
36048 if (regno == FRAME_POINTER_REGNUM)
36049 return 111;
36050 if (regno == ARG_POINTER_REGNUM)
36051 return 67;
36052 if (regno == 64)
36053 return 100;
36054
36055 gcc_unreachable ();
36056 #endif
36057 }
36058
36059 /* We use the GCC 7 (and before) internal number for non-DWARF debug
36060 information, and also for .eh_frame. */
36061 /* Translate the regnos to their numbers in GCC 7 (and before). */
36062 if (regno <= 31)
36063 return regno;
36064 if (FP_REGNO_P (regno))
36065 return regno - FIRST_FPR_REGNO + 32;
36066 if (ALTIVEC_REGNO_P (regno))
36067 return regno - FIRST_ALTIVEC_REGNO + 77;
36068 if (regno == LR_REGNO)
36069 return 65;
36070 if (regno == CTR_REGNO)
36071 return 66;
36072 if (regno == CA_REGNO)
36073 return 76; /* XER */
36074 if (CR_REGNO_P (regno))
36075 return regno - CR0_REGNO + 68;
36076 if (regno == VRSAVE_REGNO)
36077 return 109;
36078 if (regno == VSCR_REGNO)
36079 return 110;
36080
36081 if (regno == FRAME_POINTER_REGNUM)
36082 return 111;
36083 if (regno == ARG_POINTER_REGNUM)
36084 return 67;
36085 if (regno == 64)
36086 return 64;
36087
36088 gcc_unreachable ();
36089 }
36090
36091 /* target hook eh_return_filter_mode */
36092 static scalar_int_mode
36093 rs6000_eh_return_filter_mode (void)
36094 {
36095 return TARGET_32BIT ? SImode : word_mode;
36096 }
36097
36098 /* Target hook for translate_mode_attribute. */
36099 static machine_mode
36100 rs6000_translate_mode_attribute (machine_mode mode)
36101 {
36102 if ((FLOAT128_IEEE_P (mode)
36103 && ieee128_float_type_node == long_double_type_node)
36104 || (FLOAT128_IBM_P (mode)
36105 && ibm128_float_type_node == long_double_type_node))
36106 return COMPLEX_MODE_P (mode) ? E_TCmode : E_TFmode;
36107 return mode;
36108 }
36109
36110 /* Target hook for scalar_mode_supported_p. */
36111 static bool
36112 rs6000_scalar_mode_supported_p (scalar_mode mode)
36113 {
36114 /* -m32 does not support TImode. This is the default, from
36115 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
36116 same ABI as for -m32. But default_scalar_mode_supported_p allows
36117 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
36118 for -mpowerpc64. */
36119 if (TARGET_32BIT && mode == TImode)
36120 return false;
36121
36122 if (DECIMAL_FLOAT_MODE_P (mode))
36123 return default_decimal_float_supported_p ();
36124 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
36125 return true;
36126 else
36127 return default_scalar_mode_supported_p (mode);
36128 }
36129
36130 /* Target hook for vector_mode_supported_p. */
36131 static bool
36132 rs6000_vector_mode_supported_p (machine_mode mode)
36133 {
36134 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
36135 128-bit, the compiler might try to widen IEEE 128-bit to IBM
36136 double-double. */
36137 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
36138 return true;
36139
36140 else
36141 return false;
36142 }
36143
36144 /* Target hook for floatn_mode. */
36145 static opt_scalar_float_mode
36146 rs6000_floatn_mode (int n, bool extended)
36147 {
36148 if (extended)
36149 {
36150 switch (n)
36151 {
36152 case 32:
36153 return DFmode;
36154
36155 case 64:
36156 if (TARGET_FLOAT128_TYPE)
36157 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36158 else
36159 return opt_scalar_float_mode ();
36160
36161 case 128:
36162 return opt_scalar_float_mode ();
36163
36164 default:
36165 /* Those are the only valid _FloatNx types. */
36166 gcc_unreachable ();
36167 }
36168 }
36169 else
36170 {
36171 switch (n)
36172 {
36173 case 32:
36174 return SFmode;
36175
36176 case 64:
36177 return DFmode;
36178
36179 case 128:
36180 if (TARGET_FLOAT128_TYPE)
36181 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36182 else
36183 return opt_scalar_float_mode ();
36184
36185 default:
36186 return opt_scalar_float_mode ();
36187 }
36188 }
36189
36190 }
36191
36192 /* Target hook for c_mode_for_suffix. */
36193 static machine_mode
36194 rs6000_c_mode_for_suffix (char suffix)
36195 {
36196 if (TARGET_FLOAT128_TYPE)
36197 {
36198 if (suffix == 'q' || suffix == 'Q')
36199 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
36200
36201 /* At the moment, we are not defining a suffix for IBM extended double.
36202 If/when the default for -mabi=ieeelongdouble is changed, and we want
36203 to support __ibm128 constants in legacy library code, we may need to
36204 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
36205 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
36206 __float80 constants. */
36207 }
36208
36209 return VOIDmode;
36210 }
36211
36212 /* Target hook for invalid_arg_for_unprototyped_fn. */
36213 static const char *
36214 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
36215 {
36216 return (!rs6000_darwin64_abi
36217 && typelist == 0
36218 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
36219 && (funcdecl == NULL_TREE
36220 || (TREE_CODE (funcdecl) == FUNCTION_DECL
36221 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
36222 ? N_("AltiVec argument passed to unprototyped function")
36223 : NULL;
36224 }
36225
36226 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
36227 setup by using __stack_chk_fail_local hidden function instead of
36228 calling __stack_chk_fail directly. Otherwise it is better to call
36229 __stack_chk_fail directly. */
36230
36231 static tree ATTRIBUTE_UNUSED
36232 rs6000_stack_protect_fail (void)
36233 {
36234 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
36235 ? default_hidden_stack_protect_fail ()
36236 : default_external_stack_protect_fail ();
36237 }
36238
36239 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
36240
36241 #if TARGET_ELF
36242 static unsigned HOST_WIDE_INT
36243 rs6000_asan_shadow_offset (void)
36244 {
36245 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
36246 }
36247 #endif
36248 \f
36249 /* Mask options that we want to support inside of attribute((target)) and
36250 #pragma GCC target operations. Note, we do not include things like
36251 64/32-bit, endianness, hard/soft floating point, etc. that would have
36252 different calling sequences. */
36253
36254 struct rs6000_opt_mask {
36255 const char *name; /* option name */
36256 HOST_WIDE_INT mask; /* mask to set */
36257 bool invert; /* invert sense of mask */
36258 bool valid_target; /* option is a target option */
36259 };
36260
36261 static struct rs6000_opt_mask const rs6000_opt_masks[] =
36262 {
36263 { "altivec", OPTION_MASK_ALTIVEC, false, true },
36264 { "cmpb", OPTION_MASK_CMPB, false, true },
36265 { "crypto", OPTION_MASK_CRYPTO, false, true },
36266 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
36267 { "dlmzb", OPTION_MASK_DLMZB, false, true },
36268 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
36269 false, true },
36270 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, true },
36271 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, true },
36272 { "fprnd", OPTION_MASK_FPRND, false, true },
36273 { "hard-dfp", OPTION_MASK_DFP, false, true },
36274 { "htm", OPTION_MASK_HTM, false, true },
36275 { "isel", OPTION_MASK_ISEL, false, true },
36276 { "mfcrf", OPTION_MASK_MFCRF, false, true },
36277 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
36278 { "modulo", OPTION_MASK_MODULO, false, true },
36279 { "mulhw", OPTION_MASK_MULHW, false, true },
36280 { "multiple", OPTION_MASK_MULTIPLE, false, true },
36281 { "popcntb", OPTION_MASK_POPCNTB, false, true },
36282 { "popcntd", OPTION_MASK_POPCNTD, false, true },
36283 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
36284 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
36285 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
36286 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
36287 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
36288 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
36289 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
36290 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
36291 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
36292 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
36293 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
36294 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
36295 { "string", 0, false, true },
36296 { "update", OPTION_MASK_NO_UPDATE, true , true },
36297 { "vsx", OPTION_MASK_VSX, false, true },
36298 #ifdef OPTION_MASK_64BIT
36299 #if TARGET_AIX_OS
36300 { "aix64", OPTION_MASK_64BIT, false, false },
36301 { "aix32", OPTION_MASK_64BIT, true, false },
36302 #else
36303 { "64", OPTION_MASK_64BIT, false, false },
36304 { "32", OPTION_MASK_64BIT, true, false },
36305 #endif
36306 #endif
36307 #ifdef OPTION_MASK_EABI
36308 { "eabi", OPTION_MASK_EABI, false, false },
36309 #endif
36310 #ifdef OPTION_MASK_LITTLE_ENDIAN
36311 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
36312 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
36313 #endif
36314 #ifdef OPTION_MASK_RELOCATABLE
36315 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
36316 #endif
36317 #ifdef OPTION_MASK_STRICT_ALIGN
36318 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
36319 #endif
36320 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
36321 { "string", 0, false, false },
36322 };
36323
36324 /* Builtin mask mapping for printing the flags. */
36325 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
36326 {
36327 { "altivec", RS6000_BTM_ALTIVEC, false, false },
36328 { "vsx", RS6000_BTM_VSX, false, false },
36329 { "fre", RS6000_BTM_FRE, false, false },
36330 { "fres", RS6000_BTM_FRES, false, false },
36331 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
36332 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
36333 { "popcntd", RS6000_BTM_POPCNTD, false, false },
36334 { "cell", RS6000_BTM_CELL, false, false },
36335 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
36336 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
36337 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
36338 { "crypto", RS6000_BTM_CRYPTO, false, false },
36339 { "htm", RS6000_BTM_HTM, false, false },
36340 { "hard-dfp", RS6000_BTM_DFP, false, false },
36341 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
36342 { "long-double-128", RS6000_BTM_LDBL128, false, false },
36343 { "powerpc64", RS6000_BTM_POWERPC64, false, false },
36344 { "float128", RS6000_BTM_FLOAT128, false, false },
36345 { "float128-hw", RS6000_BTM_FLOAT128_HW,false, false },
36346 };
36347
36348 /* Option variables that we want to support inside attribute((target)) and
36349 #pragma GCC target operations. */
36350
36351 struct rs6000_opt_var {
36352 const char *name; /* option name */
36353 size_t global_offset; /* offset of the option in global_options. */
36354 size_t target_offset; /* offset of the option in target options. */
36355 };
36356
36357 static struct rs6000_opt_var const rs6000_opt_vars[] =
36358 {
36359 { "friz",
36360 offsetof (struct gcc_options, x_TARGET_FRIZ),
36361 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
36362 { "avoid-indexed-addresses",
36363 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
36364 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
36365 { "longcall",
36366 offsetof (struct gcc_options, x_rs6000_default_long_calls),
36367 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
36368 { "optimize-swaps",
36369 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
36370 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
36371 { "allow-movmisalign",
36372 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
36373 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
36374 { "sched-groups",
36375 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
36376 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
36377 { "always-hint",
36378 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
36379 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
36380 { "align-branch-targets",
36381 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
36382 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
36383 { "tls-markers",
36384 offsetof (struct gcc_options, x_tls_markers),
36385 offsetof (struct cl_target_option, x_tls_markers), },
36386 { "sched-prolog",
36387 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36388 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36389 { "sched-epilog",
36390 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
36391 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
36392 { "speculate-indirect-jumps",
36393 offsetof (struct gcc_options, x_rs6000_speculate_indirect_jumps),
36394 offsetof (struct cl_target_option, x_rs6000_speculate_indirect_jumps), },
36395 };
36396
36397 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
36398 parsing. Return true if there were no errors. */
36399
36400 static bool
36401 rs6000_inner_target_options (tree args, bool attr_p)
36402 {
36403 bool ret = true;
36404
36405 if (args == NULL_TREE)
36406 ;
36407
36408 else if (TREE_CODE (args) == STRING_CST)
36409 {
36410 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36411 char *q;
36412
36413 while ((q = strtok (p, ",")) != NULL)
36414 {
36415 bool error_p = false;
36416 bool not_valid_p = false;
36417 const char *cpu_opt = NULL;
36418
36419 p = NULL;
36420 if (strncmp (q, "cpu=", 4) == 0)
36421 {
36422 int cpu_index = rs6000_cpu_name_lookup (q+4);
36423 if (cpu_index >= 0)
36424 rs6000_cpu_index = cpu_index;
36425 else
36426 {
36427 error_p = true;
36428 cpu_opt = q+4;
36429 }
36430 }
36431 else if (strncmp (q, "tune=", 5) == 0)
36432 {
36433 int tune_index = rs6000_cpu_name_lookup (q+5);
36434 if (tune_index >= 0)
36435 rs6000_tune_index = tune_index;
36436 else
36437 {
36438 error_p = true;
36439 cpu_opt = q+5;
36440 }
36441 }
36442 else
36443 {
36444 size_t i;
36445 bool invert = false;
36446 char *r = q;
36447
36448 error_p = true;
36449 if (strncmp (r, "no-", 3) == 0)
36450 {
36451 invert = true;
36452 r += 3;
36453 }
36454
36455 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
36456 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
36457 {
36458 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
36459
36460 if (!rs6000_opt_masks[i].valid_target)
36461 not_valid_p = true;
36462 else
36463 {
36464 error_p = false;
36465 rs6000_isa_flags_explicit |= mask;
36466
36467 /* VSX needs altivec, so -mvsx automagically sets
36468 altivec and disables -mavoid-indexed-addresses. */
36469 if (!invert)
36470 {
36471 if (mask == OPTION_MASK_VSX)
36472 {
36473 mask |= OPTION_MASK_ALTIVEC;
36474 TARGET_AVOID_XFORM = 0;
36475 }
36476 }
36477
36478 if (rs6000_opt_masks[i].invert)
36479 invert = !invert;
36480
36481 if (invert)
36482 rs6000_isa_flags &= ~mask;
36483 else
36484 rs6000_isa_flags |= mask;
36485 }
36486 break;
36487 }
36488
36489 if (error_p && !not_valid_p)
36490 {
36491 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
36492 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
36493 {
36494 size_t j = rs6000_opt_vars[i].global_offset;
36495 *((int *) ((char *)&global_options + j)) = !invert;
36496 error_p = false;
36497 not_valid_p = false;
36498 break;
36499 }
36500 }
36501 }
36502
36503 if (error_p)
36504 {
36505 const char *eprefix, *esuffix;
36506
36507 ret = false;
36508 if (attr_p)
36509 {
36510 eprefix = "__attribute__((__target__(";
36511 esuffix = ")))";
36512 }
36513 else
36514 {
36515 eprefix = "#pragma GCC target ";
36516 esuffix = "";
36517 }
36518
36519 if (cpu_opt)
36520 error ("invalid cpu %qs for %s%qs%s", cpu_opt, eprefix,
36521 q, esuffix);
36522 else if (not_valid_p)
36523 error ("%s%qs%s is not allowed", eprefix, q, esuffix);
36524 else
36525 error ("%s%qs%s is invalid", eprefix, q, esuffix);
36526 }
36527 }
36528 }
36529
36530 else if (TREE_CODE (args) == TREE_LIST)
36531 {
36532 do
36533 {
36534 tree value = TREE_VALUE (args);
36535 if (value)
36536 {
36537 bool ret2 = rs6000_inner_target_options (value, attr_p);
36538 if (!ret2)
36539 ret = false;
36540 }
36541 args = TREE_CHAIN (args);
36542 }
36543 while (args != NULL_TREE);
36544 }
36545
36546 else
36547 {
36548 error ("attribute %<target%> argument not a string");
36549 return false;
36550 }
36551
36552 return ret;
36553 }
36554
36555 /* Print out the target options as a list for -mdebug=target. */
36556
36557 static void
36558 rs6000_debug_target_options (tree args, const char *prefix)
36559 {
36560 if (args == NULL_TREE)
36561 fprintf (stderr, "%s<NULL>", prefix);
36562
36563 else if (TREE_CODE (args) == STRING_CST)
36564 {
36565 char *p = ASTRDUP (TREE_STRING_POINTER (args));
36566 char *q;
36567
36568 while ((q = strtok (p, ",")) != NULL)
36569 {
36570 p = NULL;
36571 fprintf (stderr, "%s\"%s\"", prefix, q);
36572 prefix = ", ";
36573 }
36574 }
36575
36576 else if (TREE_CODE (args) == TREE_LIST)
36577 {
36578 do
36579 {
36580 tree value = TREE_VALUE (args);
36581 if (value)
36582 {
36583 rs6000_debug_target_options (value, prefix);
36584 prefix = ", ";
36585 }
36586 args = TREE_CHAIN (args);
36587 }
36588 while (args != NULL_TREE);
36589 }
36590
36591 else
36592 gcc_unreachable ();
36593
36594 return;
36595 }
36596
36597 \f
36598 /* Hook to validate attribute((target("..."))). */
36599
36600 static bool
36601 rs6000_valid_attribute_p (tree fndecl,
36602 tree ARG_UNUSED (name),
36603 tree args,
36604 int flags)
36605 {
36606 struct cl_target_option cur_target;
36607 bool ret;
36608 tree old_optimize;
36609 tree new_target, new_optimize;
36610 tree func_optimize;
36611
36612 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
36613
36614 if (TARGET_DEBUG_TARGET)
36615 {
36616 tree tname = DECL_NAME (fndecl);
36617 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
36618 if (tname)
36619 fprintf (stderr, "function: %.*s\n",
36620 (int) IDENTIFIER_LENGTH (tname),
36621 IDENTIFIER_POINTER (tname));
36622 else
36623 fprintf (stderr, "function: unknown\n");
36624
36625 fprintf (stderr, "args:");
36626 rs6000_debug_target_options (args, " ");
36627 fprintf (stderr, "\n");
36628
36629 if (flags)
36630 fprintf (stderr, "flags: 0x%x\n", flags);
36631
36632 fprintf (stderr, "--------------------\n");
36633 }
36634
36635 /* attribute((target("default"))) does nothing, beyond
36636 affecting multi-versioning. */
36637 if (TREE_VALUE (args)
36638 && TREE_CODE (TREE_VALUE (args)) == STRING_CST
36639 && TREE_CHAIN (args) == NULL_TREE
36640 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args)), "default") == 0)
36641 return true;
36642
36643 old_optimize = build_optimization_node (&global_options);
36644 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
36645
36646 /* If the function changed the optimization levels as well as setting target
36647 options, start with the optimizations specified. */
36648 if (func_optimize && func_optimize != old_optimize)
36649 cl_optimization_restore (&global_options,
36650 TREE_OPTIMIZATION (func_optimize));
36651
36652 /* The target attributes may also change some optimization flags, so update
36653 the optimization options if necessary. */
36654 cl_target_option_save (&cur_target, &global_options);
36655 rs6000_cpu_index = rs6000_tune_index = -1;
36656 ret = rs6000_inner_target_options (args, true);
36657
36658 /* Set up any additional state. */
36659 if (ret)
36660 {
36661 ret = rs6000_option_override_internal (false);
36662 new_target = build_target_option_node (&global_options);
36663 }
36664 else
36665 new_target = NULL;
36666
36667 new_optimize = build_optimization_node (&global_options);
36668
36669 if (!new_target)
36670 ret = false;
36671
36672 else if (fndecl)
36673 {
36674 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
36675
36676 if (old_optimize != new_optimize)
36677 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
36678 }
36679
36680 cl_target_option_restore (&global_options, &cur_target);
36681
36682 if (old_optimize != new_optimize)
36683 cl_optimization_restore (&global_options,
36684 TREE_OPTIMIZATION (old_optimize));
36685
36686 return ret;
36687 }
36688
36689 \f
36690 /* Hook to validate the current #pragma GCC target and set the state, and
36691 update the macros based on what was changed. If ARGS is NULL, then
36692 POP_TARGET is used to reset the options. */
36693
36694 bool
36695 rs6000_pragma_target_parse (tree args, tree pop_target)
36696 {
36697 tree prev_tree = build_target_option_node (&global_options);
36698 tree cur_tree;
36699 struct cl_target_option *prev_opt, *cur_opt;
36700 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
36701 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
36702
36703 if (TARGET_DEBUG_TARGET)
36704 {
36705 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
36706 fprintf (stderr, "args:");
36707 rs6000_debug_target_options (args, " ");
36708 fprintf (stderr, "\n");
36709
36710 if (pop_target)
36711 {
36712 fprintf (stderr, "pop_target:\n");
36713 debug_tree (pop_target);
36714 }
36715 else
36716 fprintf (stderr, "pop_target: <NULL>\n");
36717
36718 fprintf (stderr, "--------------------\n");
36719 }
36720
36721 if (! args)
36722 {
36723 cur_tree = ((pop_target)
36724 ? pop_target
36725 : target_option_default_node);
36726 cl_target_option_restore (&global_options,
36727 TREE_TARGET_OPTION (cur_tree));
36728 }
36729 else
36730 {
36731 rs6000_cpu_index = rs6000_tune_index = -1;
36732 if (!rs6000_inner_target_options (args, false)
36733 || !rs6000_option_override_internal (false)
36734 || (cur_tree = build_target_option_node (&global_options))
36735 == NULL_TREE)
36736 {
36737 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
36738 fprintf (stderr, "invalid pragma\n");
36739
36740 return false;
36741 }
36742 }
36743
36744 target_option_current_node = cur_tree;
36745 rs6000_activate_target_options (target_option_current_node);
36746
36747 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36748 change the macros that are defined. */
36749 if (rs6000_target_modify_macros_ptr)
36750 {
36751 prev_opt = TREE_TARGET_OPTION (prev_tree);
36752 prev_bumask = prev_opt->x_rs6000_builtin_mask;
36753 prev_flags = prev_opt->x_rs6000_isa_flags;
36754
36755 cur_opt = TREE_TARGET_OPTION (cur_tree);
36756 cur_flags = cur_opt->x_rs6000_isa_flags;
36757 cur_bumask = cur_opt->x_rs6000_builtin_mask;
36758
36759 diff_bumask = (prev_bumask ^ cur_bumask);
36760 diff_flags = (prev_flags ^ cur_flags);
36761
36762 if ((diff_flags != 0) || (diff_bumask != 0))
36763 {
36764 /* Delete old macros. */
36765 rs6000_target_modify_macros_ptr (false,
36766 prev_flags & diff_flags,
36767 prev_bumask & diff_bumask);
36768
36769 /* Define new macros. */
36770 rs6000_target_modify_macros_ptr (true,
36771 cur_flags & diff_flags,
36772 cur_bumask & diff_bumask);
36773 }
36774 }
36775
36776 return true;
36777 }
36778
36779 \f
36780 /* Remember the last target of rs6000_set_current_function. */
36781 static GTY(()) tree rs6000_previous_fndecl;
36782
36783 /* Restore target's globals from NEW_TREE and invalidate the
36784 rs6000_previous_fndecl cache. */
36785
36786 void
36787 rs6000_activate_target_options (tree new_tree)
36788 {
36789 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
36790 if (TREE_TARGET_GLOBALS (new_tree))
36791 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
36792 else if (new_tree == target_option_default_node)
36793 restore_target_globals (&default_target_globals);
36794 else
36795 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
36796 rs6000_previous_fndecl = NULL_TREE;
36797 }
36798
36799 /* Establish appropriate back-end context for processing the function
36800 FNDECL. The argument might be NULL to indicate processing at top
36801 level, outside of any function scope. */
36802 static void
36803 rs6000_set_current_function (tree fndecl)
36804 {
36805 if (TARGET_DEBUG_TARGET)
36806 {
36807 fprintf (stderr, "\n==================== rs6000_set_current_function");
36808
36809 if (fndecl)
36810 fprintf (stderr, ", fndecl %s (%p)",
36811 (DECL_NAME (fndecl)
36812 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
36813 : "<unknown>"), (void *)fndecl);
36814
36815 if (rs6000_previous_fndecl)
36816 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
36817
36818 fprintf (stderr, "\n");
36819 }
36820
36821 /* Only change the context if the function changes. This hook is called
36822 several times in the course of compiling a function, and we don't want to
36823 slow things down too much or call target_reinit when it isn't safe. */
36824 if (fndecl == rs6000_previous_fndecl)
36825 return;
36826
36827 tree old_tree;
36828 if (rs6000_previous_fndecl == NULL_TREE)
36829 old_tree = target_option_current_node;
36830 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl))
36831 old_tree = DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl);
36832 else
36833 old_tree = target_option_default_node;
36834
36835 tree new_tree;
36836 if (fndecl == NULL_TREE)
36837 {
36838 if (old_tree != target_option_current_node)
36839 new_tree = target_option_current_node;
36840 else
36841 new_tree = NULL_TREE;
36842 }
36843 else
36844 {
36845 new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
36846 if (new_tree == NULL_TREE)
36847 new_tree = target_option_default_node;
36848 }
36849
36850 if (TARGET_DEBUG_TARGET)
36851 {
36852 if (new_tree)
36853 {
36854 fprintf (stderr, "\nnew fndecl target specific options:\n");
36855 debug_tree (new_tree);
36856 }
36857
36858 if (old_tree)
36859 {
36860 fprintf (stderr, "\nold fndecl target specific options:\n");
36861 debug_tree (old_tree);
36862 }
36863
36864 if (old_tree != NULL_TREE || new_tree != NULL_TREE)
36865 fprintf (stderr, "--------------------\n");
36866 }
36867
36868 if (new_tree && old_tree != new_tree)
36869 rs6000_activate_target_options (new_tree);
36870
36871 if (fndecl)
36872 rs6000_previous_fndecl = fndecl;
36873 }
36874
36875 \f
36876 /* Save the current options */
36877
36878 static void
36879 rs6000_function_specific_save (struct cl_target_option *ptr,
36880 struct gcc_options *opts)
36881 {
36882 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
36883 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
36884 }
36885
36886 /* Restore the current options */
36887
36888 static void
36889 rs6000_function_specific_restore (struct gcc_options *opts,
36890 struct cl_target_option *ptr)
36891
36892 {
36893 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
36894 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
36895 (void) rs6000_option_override_internal (false);
36896 }
36897
36898 /* Print the current options */
36899
36900 static void
36901 rs6000_function_specific_print (FILE *file, int indent,
36902 struct cl_target_option *ptr)
36903 {
36904 rs6000_print_isa_options (file, indent, "Isa options set",
36905 ptr->x_rs6000_isa_flags);
36906
36907 rs6000_print_isa_options (file, indent, "Isa options explicit",
36908 ptr->x_rs6000_isa_flags_explicit);
36909 }
36910
36911 /* Helper function to print the current isa or misc options on a line. */
36912
36913 static void
36914 rs6000_print_options_internal (FILE *file,
36915 int indent,
36916 const char *string,
36917 HOST_WIDE_INT flags,
36918 const char *prefix,
36919 const struct rs6000_opt_mask *opts,
36920 size_t num_elements)
36921 {
36922 size_t i;
36923 size_t start_column = 0;
36924 size_t cur_column;
36925 size_t max_column = 120;
36926 size_t prefix_len = strlen (prefix);
36927 size_t comma_len = 0;
36928 const char *comma = "";
36929
36930 if (indent)
36931 start_column += fprintf (file, "%*s", indent, "");
36932
36933 if (!flags)
36934 {
36935 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
36936 return;
36937 }
36938
36939 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
36940
36941 /* Print the various mask options. */
36942 cur_column = start_column;
36943 for (i = 0; i < num_elements; i++)
36944 {
36945 bool invert = opts[i].invert;
36946 const char *name = opts[i].name;
36947 const char *no_str = "";
36948 HOST_WIDE_INT mask = opts[i].mask;
36949 size_t len = comma_len + prefix_len + strlen (name);
36950
36951 if (!invert)
36952 {
36953 if ((flags & mask) == 0)
36954 {
36955 no_str = "no-";
36956 len += sizeof ("no-") - 1;
36957 }
36958
36959 flags &= ~mask;
36960 }
36961
36962 else
36963 {
36964 if ((flags & mask) != 0)
36965 {
36966 no_str = "no-";
36967 len += sizeof ("no-") - 1;
36968 }
36969
36970 flags |= mask;
36971 }
36972
36973 cur_column += len;
36974 if (cur_column > max_column)
36975 {
36976 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
36977 cur_column = start_column + len;
36978 comma = "";
36979 }
36980
36981 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
36982 comma = ", ";
36983 comma_len = sizeof (", ") - 1;
36984 }
36985
36986 fputs ("\n", file);
36987 }
36988
36989 /* Helper function to print the current isa options on a line. */
36990
36991 static void
36992 rs6000_print_isa_options (FILE *file, int indent, const char *string,
36993 HOST_WIDE_INT flags)
36994 {
36995 rs6000_print_options_internal (file, indent, string, flags, "-m",
36996 &rs6000_opt_masks[0],
36997 ARRAY_SIZE (rs6000_opt_masks));
36998 }
36999
37000 static void
37001 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
37002 HOST_WIDE_INT flags)
37003 {
37004 rs6000_print_options_internal (file, indent, string, flags, "",
37005 &rs6000_builtin_mask_names[0],
37006 ARRAY_SIZE (rs6000_builtin_mask_names));
37007 }
37008
37009 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
37010 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
37011 -mupper-regs-df, etc.).
37012
37013 If the user used -mno-power8-vector, we need to turn off all of the implicit
37014 ISA 2.07 and 3.0 options that relate to the vector unit.
37015
37016 If the user used -mno-power9-vector, we need to turn off all of the implicit
37017 ISA 3.0 options that relate to the vector unit.
37018
37019 This function does not handle explicit options such as the user specifying
37020 -mdirect-move. These are handled in rs6000_option_override_internal, and
37021 the appropriate error is given if needed.
37022
37023 We return a mask of all of the implicit options that should not be enabled
37024 by default. */
37025
37026 static HOST_WIDE_INT
37027 rs6000_disable_incompatible_switches (void)
37028 {
37029 HOST_WIDE_INT ignore_masks = rs6000_isa_flags_explicit;
37030 size_t i, j;
37031
37032 static const struct {
37033 const HOST_WIDE_INT no_flag; /* flag explicitly turned off. */
37034 const HOST_WIDE_INT dep_flags; /* flags that depend on this option. */
37035 const char *const name; /* name of the switch. */
37036 } flags[] = {
37037 { OPTION_MASK_P9_VECTOR, OTHER_P9_VECTOR_MASKS, "power9-vector" },
37038 { OPTION_MASK_P8_VECTOR, OTHER_P8_VECTOR_MASKS, "power8-vector" },
37039 { OPTION_MASK_VSX, OTHER_VSX_VECTOR_MASKS, "vsx" },
37040 };
37041
37042 for (i = 0; i < ARRAY_SIZE (flags); i++)
37043 {
37044 HOST_WIDE_INT no_flag = flags[i].no_flag;
37045
37046 if ((rs6000_isa_flags & no_flag) == 0
37047 && (rs6000_isa_flags_explicit & no_flag) != 0)
37048 {
37049 HOST_WIDE_INT dep_flags = flags[i].dep_flags;
37050 HOST_WIDE_INT set_flags = (rs6000_isa_flags_explicit
37051 & rs6000_isa_flags
37052 & dep_flags);
37053
37054 if (set_flags)
37055 {
37056 for (j = 0; j < ARRAY_SIZE (rs6000_opt_masks); j++)
37057 if ((set_flags & rs6000_opt_masks[j].mask) != 0)
37058 {
37059 set_flags &= ~rs6000_opt_masks[j].mask;
37060 error ("%<-mno-%s%> turns off %<-m%s%>",
37061 flags[i].name,
37062 rs6000_opt_masks[j].name);
37063 }
37064
37065 gcc_assert (!set_flags);
37066 }
37067
37068 rs6000_isa_flags &= ~dep_flags;
37069 ignore_masks |= no_flag | dep_flags;
37070 }
37071 }
37072
37073 return ignore_masks;
37074 }
37075
37076 \f
37077 /* Helper function for printing the function name when debugging. */
37078
37079 static const char *
37080 get_decl_name (tree fn)
37081 {
37082 tree name;
37083
37084 if (!fn)
37085 return "<null>";
37086
37087 name = DECL_NAME (fn);
37088 if (!name)
37089 return "<no-name>";
37090
37091 return IDENTIFIER_POINTER (name);
37092 }
37093
37094 /* Return the clone id of the target we are compiling code for in a target
37095 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
37096 the priority list for the target clones (ordered from lowest to
37097 highest). */
37098
37099 static int
37100 rs6000_clone_priority (tree fndecl)
37101 {
37102 tree fn_opts = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
37103 HOST_WIDE_INT isa_masks;
37104 int ret = CLONE_DEFAULT;
37105 tree attrs = lookup_attribute ("target", DECL_ATTRIBUTES (fndecl));
37106 const char *attrs_str = NULL;
37107
37108 attrs = TREE_VALUE (TREE_VALUE (attrs));
37109 attrs_str = TREE_STRING_POINTER (attrs);
37110
37111 /* Return priority zero for default function. Return the ISA needed for the
37112 function if it is not the default. */
37113 if (strcmp (attrs_str, "default") != 0)
37114 {
37115 if (fn_opts == NULL_TREE)
37116 fn_opts = target_option_default_node;
37117
37118 if (!fn_opts || !TREE_TARGET_OPTION (fn_opts))
37119 isa_masks = rs6000_isa_flags;
37120 else
37121 isa_masks = TREE_TARGET_OPTION (fn_opts)->x_rs6000_isa_flags;
37122
37123 for (ret = CLONE_MAX - 1; ret != 0; ret--)
37124 if ((rs6000_clone_map[ret].isa_mask & isa_masks) != 0)
37125 break;
37126 }
37127
37128 if (TARGET_DEBUG_TARGET)
37129 fprintf (stderr, "rs6000_get_function_version_priority (%s) => %d\n",
37130 get_decl_name (fndecl), ret);
37131
37132 return ret;
37133 }
37134
37135 /* This compares the priority of target features in function DECL1 and DECL2.
37136 It returns positive value if DECL1 is higher priority, negative value if
37137 DECL2 is higher priority and 0 if they are the same. Note, priorities are
37138 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
37139
37140 static int
37141 rs6000_compare_version_priority (tree decl1, tree decl2)
37142 {
37143 int priority1 = rs6000_clone_priority (decl1);
37144 int priority2 = rs6000_clone_priority (decl2);
37145 int ret = priority1 - priority2;
37146
37147 if (TARGET_DEBUG_TARGET)
37148 fprintf (stderr, "rs6000_compare_version_priority (%s, %s) => %d\n",
37149 get_decl_name (decl1), get_decl_name (decl2), ret);
37150
37151 return ret;
37152 }
37153
37154 /* Make a dispatcher declaration for the multi-versioned function DECL.
37155 Calls to DECL function will be replaced with calls to the dispatcher
37156 by the front-end. Returns the decl of the dispatcher function. */
37157
37158 static tree
37159 rs6000_get_function_versions_dispatcher (void *decl)
37160 {
37161 tree fn = (tree) decl;
37162 struct cgraph_node *node = NULL;
37163 struct cgraph_node *default_node = NULL;
37164 struct cgraph_function_version_info *node_v = NULL;
37165 struct cgraph_function_version_info *first_v = NULL;
37166
37167 tree dispatch_decl = NULL;
37168
37169 struct cgraph_function_version_info *default_version_info = NULL;
37170 gcc_assert (fn != NULL && DECL_FUNCTION_VERSIONED (fn));
37171
37172 if (TARGET_DEBUG_TARGET)
37173 fprintf (stderr, "rs6000_get_function_versions_dispatcher (%s)\n",
37174 get_decl_name (fn));
37175
37176 node = cgraph_node::get (fn);
37177 gcc_assert (node != NULL);
37178
37179 node_v = node->function_version ();
37180 gcc_assert (node_v != NULL);
37181
37182 if (node_v->dispatcher_resolver != NULL)
37183 return node_v->dispatcher_resolver;
37184
37185 /* Find the default version and make it the first node. */
37186 first_v = node_v;
37187 /* Go to the beginning of the chain. */
37188 while (first_v->prev != NULL)
37189 first_v = first_v->prev;
37190
37191 default_version_info = first_v;
37192 while (default_version_info != NULL)
37193 {
37194 const tree decl2 = default_version_info->this_node->decl;
37195 if (is_function_default_version (decl2))
37196 break;
37197 default_version_info = default_version_info->next;
37198 }
37199
37200 /* If there is no default node, just return NULL. */
37201 if (default_version_info == NULL)
37202 return NULL;
37203
37204 /* Make default info the first node. */
37205 if (first_v != default_version_info)
37206 {
37207 default_version_info->prev->next = default_version_info->next;
37208 if (default_version_info->next)
37209 default_version_info->next->prev = default_version_info->prev;
37210 first_v->prev = default_version_info;
37211 default_version_info->next = first_v;
37212 default_version_info->prev = NULL;
37213 }
37214
37215 default_node = default_version_info->this_node;
37216
37217 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
37218 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37219 "target_clones attribute needs GLIBC (2.23 and newer) that "
37220 "exports hardware capability bits");
37221 #else
37222
37223 if (targetm.has_ifunc_p ())
37224 {
37225 struct cgraph_function_version_info *it_v = NULL;
37226 struct cgraph_node *dispatcher_node = NULL;
37227 struct cgraph_function_version_info *dispatcher_version_info = NULL;
37228
37229 /* Right now, the dispatching is done via ifunc. */
37230 dispatch_decl = make_dispatcher_decl (default_node->decl);
37231
37232 dispatcher_node = cgraph_node::get_create (dispatch_decl);
37233 gcc_assert (dispatcher_node != NULL);
37234 dispatcher_node->dispatcher_function = 1;
37235 dispatcher_version_info
37236 = dispatcher_node->insert_new_function_version ();
37237 dispatcher_version_info->next = default_version_info;
37238 dispatcher_node->definition = 1;
37239
37240 /* Set the dispatcher for all the versions. */
37241 it_v = default_version_info;
37242 while (it_v != NULL)
37243 {
37244 it_v->dispatcher_resolver = dispatch_decl;
37245 it_v = it_v->next;
37246 }
37247 }
37248 else
37249 {
37250 error_at (DECL_SOURCE_LOCATION (default_node->decl),
37251 "multiversioning needs ifunc which is not supported "
37252 "on this target");
37253 }
37254 #endif
37255
37256 return dispatch_decl;
37257 }
37258
37259 /* Make the resolver function decl to dispatch the versions of a multi-
37260 versioned function, DEFAULT_DECL. Create an empty basic block in the
37261 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
37262 function. */
37263
37264 static tree
37265 make_resolver_func (const tree default_decl,
37266 const tree dispatch_decl,
37267 basic_block *empty_bb)
37268 {
37269 /* Make the resolver function static. The resolver function returns
37270 void *. */
37271 tree decl_name = clone_function_name (default_decl, "resolver");
37272 const char *resolver_name = IDENTIFIER_POINTER (decl_name);
37273 tree type = build_function_type_list (ptr_type_node, NULL_TREE);
37274 tree decl = build_fn_decl (resolver_name, type);
37275 SET_DECL_ASSEMBLER_NAME (decl, decl_name);
37276
37277 DECL_NAME (decl) = decl_name;
37278 TREE_USED (decl) = 1;
37279 DECL_ARTIFICIAL (decl) = 1;
37280 DECL_IGNORED_P (decl) = 0;
37281 TREE_PUBLIC (decl) = 0;
37282 DECL_UNINLINABLE (decl) = 1;
37283
37284 /* Resolver is not external, body is generated. */
37285 DECL_EXTERNAL (decl) = 0;
37286 DECL_EXTERNAL (dispatch_decl) = 0;
37287
37288 DECL_CONTEXT (decl) = NULL_TREE;
37289 DECL_INITIAL (decl) = make_node (BLOCK);
37290 DECL_STATIC_CONSTRUCTOR (decl) = 0;
37291
37292 /* Build result decl and add to function_decl. */
37293 tree t = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, ptr_type_node);
37294 DECL_CONTEXT (t) = decl;
37295 DECL_ARTIFICIAL (t) = 1;
37296 DECL_IGNORED_P (t) = 1;
37297 DECL_RESULT (decl) = t;
37298
37299 gimplify_function_tree (decl);
37300 push_cfun (DECL_STRUCT_FUNCTION (decl));
37301 *empty_bb = init_lowered_empty_function (decl, false,
37302 profile_count::uninitialized ());
37303
37304 cgraph_node::add_new_function (decl, true);
37305 symtab->call_cgraph_insertion_hooks (cgraph_node::get_create (decl));
37306
37307 pop_cfun ();
37308
37309 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
37310 DECL_ATTRIBUTES (dispatch_decl)
37311 = make_attribute ("ifunc", resolver_name, DECL_ATTRIBUTES (dispatch_decl));
37312
37313 cgraph_node::create_same_body_alias (dispatch_decl, decl);
37314
37315 return decl;
37316 }
37317
37318 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
37319 return a pointer to VERSION_DECL if we are running on a machine that
37320 supports the index CLONE_ISA hardware architecture bits. This function will
37321 be called during version dispatch to decide which function version to
37322 execute. It returns the basic block at the end, to which more conditions
37323 can be added. */
37324
37325 static basic_block
37326 add_condition_to_bb (tree function_decl, tree version_decl,
37327 int clone_isa, basic_block new_bb)
37328 {
37329 push_cfun (DECL_STRUCT_FUNCTION (function_decl));
37330
37331 gcc_assert (new_bb != NULL);
37332 gimple_seq gseq = bb_seq (new_bb);
37333
37334
37335 tree convert_expr = build1 (CONVERT_EXPR, ptr_type_node,
37336 build_fold_addr_expr (version_decl));
37337 tree result_var = create_tmp_var (ptr_type_node);
37338 gimple *convert_stmt = gimple_build_assign (result_var, convert_expr);
37339 gimple *return_stmt = gimple_build_return (result_var);
37340
37341 if (clone_isa == CLONE_DEFAULT)
37342 {
37343 gimple_seq_add_stmt (&gseq, convert_stmt);
37344 gimple_seq_add_stmt (&gseq, return_stmt);
37345 set_bb_seq (new_bb, gseq);
37346 gimple_set_bb (convert_stmt, new_bb);
37347 gimple_set_bb (return_stmt, new_bb);
37348 pop_cfun ();
37349 return new_bb;
37350 }
37351
37352 tree bool_zero = build_int_cst (bool_int_type_node, 0);
37353 tree cond_var = create_tmp_var (bool_int_type_node);
37354 tree predicate_decl = rs6000_builtin_decls [(int) RS6000_BUILTIN_CPU_SUPPORTS];
37355 const char *arg_str = rs6000_clone_map[clone_isa].name;
37356 tree predicate_arg = build_string_literal (strlen (arg_str) + 1, arg_str);
37357 gimple *call_cond_stmt = gimple_build_call (predicate_decl, 1, predicate_arg);
37358 gimple_call_set_lhs (call_cond_stmt, cond_var);
37359
37360 gimple_set_block (call_cond_stmt, DECL_INITIAL (function_decl));
37361 gimple_set_bb (call_cond_stmt, new_bb);
37362 gimple_seq_add_stmt (&gseq, call_cond_stmt);
37363
37364 gimple *if_else_stmt = gimple_build_cond (NE_EXPR, cond_var, bool_zero,
37365 NULL_TREE, NULL_TREE);
37366 gimple_set_block (if_else_stmt, DECL_INITIAL (function_decl));
37367 gimple_set_bb (if_else_stmt, new_bb);
37368 gimple_seq_add_stmt (&gseq, if_else_stmt);
37369
37370 gimple_seq_add_stmt (&gseq, convert_stmt);
37371 gimple_seq_add_stmt (&gseq, return_stmt);
37372 set_bb_seq (new_bb, gseq);
37373
37374 basic_block bb1 = new_bb;
37375 edge e12 = split_block (bb1, if_else_stmt);
37376 basic_block bb2 = e12->dest;
37377 e12->flags &= ~EDGE_FALLTHRU;
37378 e12->flags |= EDGE_TRUE_VALUE;
37379
37380 edge e23 = split_block (bb2, return_stmt);
37381 gimple_set_bb (convert_stmt, bb2);
37382 gimple_set_bb (return_stmt, bb2);
37383
37384 basic_block bb3 = e23->dest;
37385 make_edge (bb1, bb3, EDGE_FALSE_VALUE);
37386
37387 remove_edge (e23);
37388 make_edge (bb2, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
37389
37390 pop_cfun ();
37391 return bb3;
37392 }
37393
37394 /* This function generates the dispatch function for multi-versioned functions.
37395 DISPATCH_DECL is the function which will contain the dispatch logic.
37396 FNDECLS are the function choices for dispatch, and is a tree chain.
37397 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
37398 code is generated. */
37399
37400 static int
37401 dispatch_function_versions (tree dispatch_decl,
37402 void *fndecls_p,
37403 basic_block *empty_bb)
37404 {
37405 int ix;
37406 tree ele;
37407 vec<tree> *fndecls;
37408 tree clones[CLONE_MAX];
37409
37410 if (TARGET_DEBUG_TARGET)
37411 fputs ("dispatch_function_versions, top\n", stderr);
37412
37413 gcc_assert (dispatch_decl != NULL
37414 && fndecls_p != NULL
37415 && empty_bb != NULL);
37416
37417 /* fndecls_p is actually a vector. */
37418 fndecls = static_cast<vec<tree> *> (fndecls_p);
37419
37420 /* At least one more version other than the default. */
37421 gcc_assert (fndecls->length () >= 2);
37422
37423 /* The first version in the vector is the default decl. */
37424 memset ((void *) clones, '\0', sizeof (clones));
37425 clones[CLONE_DEFAULT] = (*fndecls)[0];
37426
37427 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37428 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37429 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37430 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37431 to insert the code here to do the call. */
37432
37433 for (ix = 1; fndecls->iterate (ix, &ele); ++ix)
37434 {
37435 int priority = rs6000_clone_priority (ele);
37436 if (!clones[priority])
37437 clones[priority] = ele;
37438 }
37439
37440 for (ix = CLONE_MAX - 1; ix >= 0; ix--)
37441 if (clones[ix])
37442 {
37443 if (TARGET_DEBUG_TARGET)
37444 fprintf (stderr, "dispatch_function_versions, clone %d, %s\n",
37445 ix, get_decl_name (clones[ix]));
37446
37447 *empty_bb = add_condition_to_bb (dispatch_decl, clones[ix], ix,
37448 *empty_bb);
37449 }
37450
37451 return 0;
37452 }
37453
37454 /* Generate the dispatching code body to dispatch multi-versioned function
37455 DECL. The target hook is called to process the "target" attributes and
37456 provide the code to dispatch the right function at run-time. NODE points
37457 to the dispatcher decl whose body will be created. */
37458
37459 static tree
37460 rs6000_generate_version_dispatcher_body (void *node_p)
37461 {
37462 tree resolver;
37463 basic_block empty_bb;
37464 struct cgraph_node *node = (cgraph_node *) node_p;
37465 struct cgraph_function_version_info *ninfo = node->function_version ();
37466
37467 if (ninfo->dispatcher_resolver)
37468 return ninfo->dispatcher_resolver;
37469
37470 /* node is going to be an alias, so remove the finalized bit. */
37471 node->definition = false;
37472
37473 /* The first version in the chain corresponds to the default version. */
37474 ninfo->dispatcher_resolver = resolver
37475 = make_resolver_func (ninfo->next->this_node->decl, node->decl, &empty_bb);
37476
37477 if (TARGET_DEBUG_TARGET)
37478 fprintf (stderr, "rs6000_get_function_versions_dispatcher, %s\n",
37479 get_decl_name (resolver));
37480
37481 push_cfun (DECL_STRUCT_FUNCTION (resolver));
37482 auto_vec<tree, 2> fn_ver_vec;
37483
37484 for (struct cgraph_function_version_info *vinfo = ninfo->next;
37485 vinfo;
37486 vinfo = vinfo->next)
37487 {
37488 struct cgraph_node *version = vinfo->this_node;
37489 /* Check for virtual functions here again, as by this time it should
37490 have been determined if this function needs a vtable index or
37491 not. This happens for methods in derived classes that override
37492 virtual methods in base classes but are not explicitly marked as
37493 virtual. */
37494 if (DECL_VINDEX (version->decl))
37495 sorry ("Virtual function multiversioning not supported");
37496
37497 fn_ver_vec.safe_push (version->decl);
37498 }
37499
37500 dispatch_function_versions (resolver, &fn_ver_vec, &empty_bb);
37501 cgraph_edge::rebuild_edges ();
37502 pop_cfun ();
37503 return resolver;
37504 }
37505
37506 \f
37507 /* Hook to determine if one function can safely inline another. */
37508
37509 static bool
37510 rs6000_can_inline_p (tree caller, tree callee)
37511 {
37512 bool ret = false;
37513 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
37514 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
37515
37516 /* If callee has no option attributes, then it is ok to inline. */
37517 if (!callee_tree)
37518 ret = true;
37519
37520 /* If caller has no option attributes, but callee does then it is not ok to
37521 inline. */
37522 else if (!caller_tree)
37523 ret = false;
37524
37525 else
37526 {
37527 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
37528 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
37529
37530 /* Callee's options should a subset of the caller's, i.e. a vsx function
37531 can inline an altivec function but a non-vsx function can't inline a
37532 vsx function. */
37533 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
37534 == callee_opts->x_rs6000_isa_flags)
37535 ret = true;
37536 }
37537
37538 if (TARGET_DEBUG_TARGET)
37539 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37540 get_decl_name (caller), get_decl_name (callee),
37541 (ret ? "can" : "cannot"));
37542
37543 return ret;
37544 }
37545 \f
37546 /* Allocate a stack temp and fixup the address so it meets the particular
37547 memory requirements (either offetable or REG+REG addressing). */
37548
37549 rtx
37550 rs6000_allocate_stack_temp (machine_mode mode,
37551 bool offsettable_p,
37552 bool reg_reg_p)
37553 {
37554 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
37555 rtx addr = XEXP (stack, 0);
37556 int strict_p = reload_completed;
37557
37558 if (!legitimate_indirect_address_p (addr, strict_p))
37559 {
37560 if (offsettable_p
37561 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
37562 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37563
37564 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
37565 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
37566 }
37567
37568 return stack;
37569 }
37570
37571 /* Given a memory reference, if it is not a reg or reg+reg addressing,
37572 convert to such a form to deal with memory reference instructions
37573 like STFIWX and LDBRX that only take reg+reg addressing. */
37574
37575 rtx
37576 rs6000_force_indexed_or_indirect_mem (rtx x)
37577 {
37578 machine_mode mode = GET_MODE (x);
37579
37580 gcc_assert (MEM_P (x));
37581 if (can_create_pseudo_p () && !indexed_or_indirect_operand (x, mode))
37582 {
37583 rtx addr = XEXP (x, 0);
37584 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
37585 {
37586 rtx reg = XEXP (addr, 0);
37587 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
37588 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
37589 gcc_assert (REG_P (reg));
37590 emit_insn (gen_add3_insn (reg, reg, size_rtx));
37591 addr = reg;
37592 }
37593 else if (GET_CODE (addr) == PRE_MODIFY)
37594 {
37595 rtx reg = XEXP (addr, 0);
37596 rtx expr = XEXP (addr, 1);
37597 gcc_assert (REG_P (reg));
37598 gcc_assert (GET_CODE (expr) == PLUS);
37599 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
37600 addr = reg;
37601 }
37602
37603 x = replace_equiv_address (x, force_reg (Pmode, addr));
37604 }
37605
37606 return x;
37607 }
37608
37609 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37610
37611 On the RS/6000, all integer constants are acceptable, most won't be valid
37612 for particular insns, though. Only easy FP constants are acceptable. */
37613
37614 static bool
37615 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
37616 {
37617 if (TARGET_ELF && tls_referenced_p (x))
37618 return false;
37619
37620 if (CONST_DOUBLE_P (x))
37621 return easy_fp_constant (x, mode);
37622
37623 if (GET_CODE (x) == CONST_VECTOR)
37624 return easy_vector_constant (x, mode);
37625
37626 return true;
37627 }
37628
37629 \f
37630 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37631
37632 static bool
37633 chain_already_loaded (rtx_insn *last)
37634 {
37635 for (; last != NULL; last = PREV_INSN (last))
37636 {
37637 if (NONJUMP_INSN_P (last))
37638 {
37639 rtx patt = PATTERN (last);
37640
37641 if (GET_CODE (patt) == SET)
37642 {
37643 rtx lhs = XEXP (patt, 0);
37644
37645 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
37646 return true;
37647 }
37648 }
37649 }
37650 return false;
37651 }
37652
37653 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37654
37655 void
37656 rs6000_call_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37657 {
37658 rtx func = func_desc;
37659 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
37660 rtx toc_load = NULL_RTX;
37661 rtx toc_restore = NULL_RTX;
37662 rtx func_addr;
37663 rtx abi_reg = NULL_RTX;
37664 rtx call[4];
37665 int n_call;
37666 rtx insn;
37667 bool is_pltseq_longcall;
37668
37669 if (global_tlsarg)
37670 tlsarg = global_tlsarg;
37671
37672 /* Handle longcall attributes. */
37673 is_pltseq_longcall = false;
37674 if ((INTVAL (cookie) & CALL_LONG) != 0
37675 && GET_CODE (func_desc) == SYMBOL_REF)
37676 {
37677 func = rs6000_longcall_ref (func_desc, tlsarg);
37678 if (TARGET_PLTSEQ)
37679 is_pltseq_longcall = true;
37680 }
37681
37682 /* Handle indirect calls. */
37683 if (!SYMBOL_REF_P (func)
37684 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func)))
37685 {
37686 /* Save the TOC into its reserved slot before the call,
37687 and prepare to restore it after the call. */
37688 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
37689 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
37690 gen_rtvec (1, stack_toc_offset),
37691 UNSPEC_TOCSLOT);
37692 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
37693
37694 /* Can we optimize saving the TOC in the prologue or
37695 do we need to do it at every call? */
37696 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
37697 cfun->machine->save_toc_in_prologue = true;
37698 else
37699 {
37700 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
37701 rtx stack_toc_mem = gen_frame_mem (Pmode,
37702 gen_rtx_PLUS (Pmode, stack_ptr,
37703 stack_toc_offset));
37704 MEM_VOLATILE_P (stack_toc_mem) = 1;
37705 if (is_pltseq_longcall)
37706 {
37707 /* Use USPEC_PLTSEQ here to emit every instruction in an
37708 inline PLT call sequence with a reloc, enabling the
37709 linker to edit the sequence back to a direct call
37710 when that makes sense. */
37711 rtvec v = gen_rtvec (3, toc_reg, func_desc, tlsarg);
37712 rtx mark_toc_reg = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37713 emit_insn (gen_rtx_SET (stack_toc_mem, mark_toc_reg));
37714 }
37715 else
37716 emit_move_insn (stack_toc_mem, toc_reg);
37717 }
37718
37719 if (DEFAULT_ABI == ABI_ELFv2)
37720 {
37721 /* A function pointer in the ELFv2 ABI is just a plain address, but
37722 the ABI requires it to be loaded into r12 before the call. */
37723 func_addr = gen_rtx_REG (Pmode, 12);
37724 if (!rtx_equal_p (func_addr, func))
37725 emit_move_insn (func_addr, func);
37726 abi_reg = func_addr;
37727 /* Indirect calls via CTR are strongly preferred over indirect
37728 calls via LR, so move the address there. Needed to mark
37729 this insn for linker plt sequence editing too. */
37730 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37731 if (is_pltseq_longcall)
37732 {
37733 rtvec v = gen_rtvec (3, abi_reg, func_desc, tlsarg);
37734 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37735 emit_insn (gen_rtx_SET (func_addr, mark_func));
37736 v = gen_rtvec (2, func_addr, func_desc);
37737 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37738 }
37739 else
37740 emit_move_insn (func_addr, abi_reg);
37741 }
37742 else
37743 {
37744 /* A function pointer under AIX is a pointer to a data area whose
37745 first word contains the actual address of the function, whose
37746 second word contains a pointer to its TOC, and whose third word
37747 contains a value to place in the static chain register (r11).
37748 Note that if we load the static chain, our "trampoline" need
37749 not have any executable code. */
37750
37751 /* Load up address of the actual function. */
37752 func = force_reg (Pmode, func);
37753 func_addr = gen_reg_rtx (Pmode);
37754 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func));
37755
37756 /* Indirect calls via CTR are strongly preferred over indirect
37757 calls via LR, so move the address there. */
37758 rtx ctr_reg = gen_rtx_REG (Pmode, CTR_REGNO);
37759 emit_move_insn (ctr_reg, func_addr);
37760 func_addr = ctr_reg;
37761
37762 /* Prepare to load the TOC of the called function. Note that the
37763 TOC load must happen immediately before the actual call so
37764 that unwinding the TOC registers works correctly. See the
37765 comment in frob_update_context. */
37766 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
37767 rtx func_toc_mem = gen_rtx_MEM (Pmode,
37768 gen_rtx_PLUS (Pmode, func,
37769 func_toc_offset));
37770 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
37771
37772 /* If we have a static chain, load it up. But, if the call was
37773 originally direct, the 3rd word has not been written since no
37774 trampoline has been built, so we ought not to load it, lest we
37775 override a static chain value. */
37776 if (!(GET_CODE (func_desc) == SYMBOL_REF
37777 && SYMBOL_REF_FUNCTION_P (func_desc))
37778 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37779 && !chain_already_loaded (get_current_sequence ()->next->last))
37780 {
37781 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
37782 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
37783 rtx func_sc_mem = gen_rtx_MEM (Pmode,
37784 gen_rtx_PLUS (Pmode, func,
37785 func_sc_offset));
37786 emit_move_insn (sc_reg, func_sc_mem);
37787 abi_reg = sc_reg;
37788 }
37789 }
37790 }
37791 else
37792 {
37793 /* Direct calls use the TOC: for local calls, the callee will
37794 assume the TOC register is set; for non-local calls, the
37795 PLT stub needs the TOC register. */
37796 abi_reg = toc_reg;
37797 func_addr = func;
37798 }
37799
37800 /* Create the call. */
37801 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37802 if (value != NULL_RTX)
37803 call[0] = gen_rtx_SET (value, call[0]);
37804 n_call = 1;
37805
37806 if (toc_load)
37807 call[n_call++] = toc_load;
37808 if (toc_restore)
37809 call[n_call++] = toc_restore;
37810
37811 call[n_call++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37812
37813 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
37814 insn = emit_call_insn (insn);
37815
37816 /* Mention all registers defined by the ABI to hold information
37817 as uses in CALL_INSN_FUNCTION_USAGE. */
37818 if (abi_reg)
37819 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37820 }
37821
37822 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37823
37824 void
37825 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37826 {
37827 rtx call[2];
37828 rtx insn;
37829
37830 gcc_assert (INTVAL (cookie) == 0);
37831
37832 if (global_tlsarg)
37833 tlsarg = global_tlsarg;
37834
37835 /* Create the call. */
37836 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), tlsarg);
37837 if (value != NULL_RTX)
37838 call[0] = gen_rtx_SET (value, call[0]);
37839
37840 call[1] = simple_return_rtx;
37841
37842 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
37843 insn = emit_call_insn (insn);
37844
37845 /* Note use of the TOC register. */
37846 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
37847 }
37848
37849 /* Expand code to perform a call under the SYSV4 ABI. */
37850
37851 void
37852 rs6000_call_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37853 {
37854 rtx func = func_desc;
37855 rtx func_addr;
37856 rtx call[4];
37857 rtx insn;
37858 rtx abi_reg = NULL_RTX;
37859 int n;
37860
37861 if (global_tlsarg)
37862 tlsarg = global_tlsarg;
37863
37864 /* Handle longcall attributes. */
37865 if ((INTVAL (cookie) & CALL_LONG) != 0
37866 && GET_CODE (func_desc) == SYMBOL_REF)
37867 {
37868 func = rs6000_longcall_ref (func_desc, tlsarg);
37869 /* If the longcall was implemented as an inline PLT call using
37870 PLT unspecs then func will be REG:r11. If not, func will be
37871 a pseudo reg. The inline PLT call sequence supports lazy
37872 linking (and longcalls to functions in dlopen'd libraries).
37873 The other style of longcalls don't. The lazy linking entry
37874 to the dynamic symbol resolver requires r11 be the function
37875 address (as it is for linker generated PLT stubs). Ensure
37876 r11 stays valid to the bctrl by marking r11 used by the call. */
37877 if (TARGET_PLTSEQ)
37878 abi_reg = func;
37879 }
37880
37881 /* Handle indirect calls. */
37882 if (GET_CODE (func) != SYMBOL_REF)
37883 {
37884 func = force_reg (Pmode, func);
37885
37886 /* Indirect calls via CTR are strongly preferred over indirect
37887 calls via LR, so move the address there. That can't be left
37888 to reload because we want to mark every instruction in an
37889 inline PLT call sequence with a reloc, enabling the linker to
37890 edit the sequence back to a direct call when that makes sense. */
37891 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37892 if (abi_reg)
37893 {
37894 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
37895 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37896 emit_insn (gen_rtx_SET (func_addr, mark_func));
37897 v = gen_rtvec (2, func_addr, func_desc);
37898 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37899 }
37900 else
37901 emit_move_insn (func_addr, func);
37902 }
37903 else
37904 func_addr = func;
37905
37906 /* Create the call. */
37907 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37908 if (value != NULL_RTX)
37909 call[0] = gen_rtx_SET (value, call[0]);
37910
37911 call[1] = gen_rtx_USE (VOIDmode, cookie);
37912 n = 2;
37913 if (TARGET_SECURE_PLT
37914 && flag_pic
37915 && GET_CODE (func_addr) == SYMBOL_REF
37916 && !SYMBOL_REF_LOCAL_P (func_addr))
37917 call[n++] = gen_rtx_USE (VOIDmode, pic_offset_table_rtx);
37918
37919 call[n++] = gen_hard_reg_clobber (Pmode, LR_REGNO);
37920
37921 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n, call));
37922 insn = emit_call_insn (insn);
37923 if (abi_reg)
37924 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37925 }
37926
37927 /* Expand code to perform a sibling call under the SysV4 ABI. */
37928
37929 void
37930 rs6000_sibcall_sysv (rtx value, rtx func_desc, rtx tlsarg, rtx cookie)
37931 {
37932 rtx func = func_desc;
37933 rtx func_addr;
37934 rtx call[3];
37935 rtx insn;
37936 rtx abi_reg = NULL_RTX;
37937
37938 if (global_tlsarg)
37939 tlsarg = global_tlsarg;
37940
37941 /* Handle longcall attributes. */
37942 if ((INTVAL (cookie) & CALL_LONG) != 0
37943 && GET_CODE (func_desc) == SYMBOL_REF)
37944 {
37945 func = rs6000_longcall_ref (func_desc, tlsarg);
37946 /* If the longcall was implemented as an inline PLT call using
37947 PLT unspecs then func will be REG:r11. If not, func will be
37948 a pseudo reg. The inline PLT call sequence supports lazy
37949 linking (and longcalls to functions in dlopen'd libraries).
37950 The other style of longcalls don't. The lazy linking entry
37951 to the dynamic symbol resolver requires r11 be the function
37952 address (as it is for linker generated PLT stubs). Ensure
37953 r11 stays valid to the bctr by marking r11 used by the call. */
37954 if (TARGET_PLTSEQ)
37955 abi_reg = func;
37956 }
37957
37958 /* Handle indirect calls. */
37959 if (GET_CODE (func) != SYMBOL_REF)
37960 {
37961 func = force_reg (Pmode, func);
37962
37963 /* Indirect sibcalls must go via CTR. That can't be left to
37964 reload because we want to mark every instruction in an inline
37965 PLT call sequence with a reloc, enabling the linker to edit
37966 the sequence back to a direct call when that makes sense. */
37967 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
37968 if (abi_reg)
37969 {
37970 rtvec v = gen_rtvec (3, func, func_desc, tlsarg);
37971 rtx mark_func = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37972 emit_insn (gen_rtx_SET (func_addr, mark_func));
37973 v = gen_rtvec (2, func_addr, func_desc);
37974 func_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_PLTSEQ);
37975 }
37976 else
37977 emit_move_insn (func_addr, func);
37978 }
37979 else
37980 func_addr = func;
37981
37982 /* Create the call. */
37983 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
37984 if (value != NULL_RTX)
37985 call[0] = gen_rtx_SET (value, call[0]);
37986
37987 call[1] = gen_rtx_USE (VOIDmode, cookie);
37988 call[2] = simple_return_rtx;
37989
37990 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
37991 insn = emit_call_insn (insn);
37992 if (abi_reg)
37993 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
37994 }
37995
37996 #if TARGET_MACHO
37997
37998 /* Expand code to perform a call under the Darwin ABI.
37999 Modulo handling of mlongcall, this is much the same as sysv.
38000 if/when the longcall optimisation is removed, we could drop this
38001 code and use the sysv case (taking care to avoid the tls stuff).
38002
38003 We can use this for sibcalls too, if needed. */
38004
38005 void
38006 rs6000_call_darwin_1 (rtx value, rtx func_desc, rtx tlsarg,
38007 rtx cookie, bool sibcall)
38008 {
38009 rtx func = func_desc;
38010 rtx func_addr;
38011 rtx call[3];
38012 rtx insn;
38013 int cookie_val = INTVAL (cookie);
38014 bool make_island = false;
38015
38016 /* Handle longcall attributes, there are two cases for Darwin:
38017 1) Newer linkers are capable of synthesising any branch islands needed.
38018 2) We need a helper branch island synthesised by the compiler.
38019 The second case has mostly been retired and we don't use it for m64.
38020 In fact, it's is an optimisation, we could just indirect as sysv does..
38021 ... however, backwards compatibility for now.
38022 If we're going to use this, then we need to keep the CALL_LONG bit set,
38023 so that we can pick up the special insn form later. */
38024 if ((cookie_val & CALL_LONG) != 0
38025 && GET_CODE (func_desc) == SYMBOL_REF)
38026 {
38027 if (darwin_emit_branch_islands && TARGET_32BIT)
38028 make_island = true; /* Do nothing yet, retain the CALL_LONG flag. */
38029 else
38030 {
38031 /* The linker is capable of doing this, but the user explicitly
38032 asked for -mlongcall, so we'll do the 'normal' version. */
38033 func = rs6000_longcall_ref (func_desc, NULL_RTX);
38034 cookie_val &= ~CALL_LONG; /* Handled, zap it. */
38035 }
38036 }
38037
38038 /* Handle indirect calls. */
38039 if (GET_CODE (func) != SYMBOL_REF)
38040 {
38041 func = force_reg (Pmode, func);
38042
38043 /* Indirect calls via CTR are strongly preferred over indirect
38044 calls via LR, and are required for indirect sibcalls, so move
38045 the address there. */
38046 func_addr = gen_rtx_REG (Pmode, CTR_REGNO);
38047 emit_move_insn (func_addr, func);
38048 }
38049 else
38050 func_addr = func;
38051
38052 /* Create the call. */
38053 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), tlsarg);
38054 if (value != NULL_RTX)
38055 call[0] = gen_rtx_SET (value, call[0]);
38056
38057 call[1] = gen_rtx_USE (VOIDmode, GEN_INT (cookie_val));
38058
38059 if (sibcall)
38060 call[2] = simple_return_rtx;
38061 else
38062 call[2] = gen_hard_reg_clobber (Pmode, LR_REGNO);
38063
38064 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (3, call));
38065 insn = emit_call_insn (insn);
38066 /* Now we have the debug info in the insn, we can set up the branch island
38067 if we're using one. */
38068 if (make_island)
38069 {
38070 tree funname = get_identifier (XSTR (func_desc, 0));
38071
38072 if (no_previous_def (funname))
38073 {
38074 rtx label_rtx = gen_label_rtx ();
38075 char *label_buf, temp_buf[256];
38076 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
38077 CODE_LABEL_NUMBER (label_rtx));
38078 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
38079 tree labelname = get_identifier (label_buf);
38080 add_compiler_branch_island (labelname, funname,
38081 insn_line ((const rtx_insn*)insn));
38082 }
38083 }
38084 }
38085 #endif
38086
38087 void
38088 rs6000_call_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38089 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38090 {
38091 #if TARGET_MACHO
38092 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, false);
38093 #else
38094 gcc_unreachable();
38095 #endif
38096 }
38097
38098
38099 void
38100 rs6000_sibcall_darwin (rtx value ATTRIBUTE_UNUSED, rtx func_desc ATTRIBUTE_UNUSED,
38101 rtx tlsarg ATTRIBUTE_UNUSED, rtx cookie ATTRIBUTE_UNUSED)
38102 {
38103 #if TARGET_MACHO
38104 rs6000_call_darwin_1 (value, func_desc, tlsarg, cookie, true);
38105 #else
38106 gcc_unreachable();
38107 #endif
38108 }
38109
38110
38111 /* Return whether we need to always update the saved TOC pointer when we update
38112 the stack pointer. */
38113
38114 static bool
38115 rs6000_save_toc_in_prologue_p (void)
38116 {
38117 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
38118 }
38119
38120 #ifdef HAVE_GAS_HIDDEN
38121 # define USE_HIDDEN_LINKONCE 1
38122 #else
38123 # define USE_HIDDEN_LINKONCE 0
38124 #endif
38125
38126 /* Fills in the label name that should be used for a 476 link stack thunk. */
38127
38128 void
38129 get_ppc476_thunk_name (char name[32])
38130 {
38131 gcc_assert (TARGET_LINK_STACK);
38132
38133 if (USE_HIDDEN_LINKONCE)
38134 sprintf (name, "__ppc476.get_thunk");
38135 else
38136 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
38137 }
38138
38139 /* This function emits the simple thunk routine that is used to preserve
38140 the link stack on the 476 cpu. */
38141
38142 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
38143 static void
38144 rs6000_code_end (void)
38145 {
38146 char name[32];
38147 tree decl;
38148
38149 if (!TARGET_LINK_STACK)
38150 return;
38151
38152 get_ppc476_thunk_name (name);
38153
38154 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
38155 build_function_type_list (void_type_node, NULL_TREE));
38156 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
38157 NULL_TREE, void_type_node);
38158 TREE_PUBLIC (decl) = 1;
38159 TREE_STATIC (decl) = 1;
38160
38161 #if RS6000_WEAK
38162 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
38163 {
38164 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
38165 targetm.asm_out.unique_section (decl, 0);
38166 switch_to_section (get_named_section (decl, NULL, 0));
38167 DECL_WEAK (decl) = 1;
38168 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
38169 targetm.asm_out.globalize_label (asm_out_file, name);
38170 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
38171 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
38172 }
38173 else
38174 #endif
38175 {
38176 switch_to_section (text_section);
38177 ASM_OUTPUT_LABEL (asm_out_file, name);
38178 }
38179
38180 DECL_INITIAL (decl) = make_node (BLOCK);
38181 current_function_decl = decl;
38182 allocate_struct_function (decl, false);
38183 init_function_start (decl);
38184 first_function_block_is_cold = false;
38185 /* Make sure unwind info is emitted for the thunk if needed. */
38186 final_start_function (emit_barrier (), asm_out_file, 1);
38187
38188 fputs ("\tblr\n", asm_out_file);
38189
38190 final_end_function ();
38191 init_insn_lengths ();
38192 free_after_compilation (cfun);
38193 set_cfun (NULL);
38194 current_function_decl = NULL;
38195 }
38196
38197 /* Add r30 to hard reg set if the prologue sets it up and it is not
38198 pic_offset_table_rtx. */
38199
38200 static void
38201 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
38202 {
38203 if (!TARGET_SINGLE_PIC_BASE
38204 && TARGET_TOC
38205 && TARGET_MINIMAL_TOC
38206 && !constant_pool_empty_p ())
38207 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
38208 if (cfun->machine->split_stack_argp_used)
38209 add_to_hard_reg_set (&set->set, Pmode, 12);
38210
38211 /* Make sure the hard reg set doesn't include r2, which was possibly added
38212 via PIC_OFFSET_TABLE_REGNUM. */
38213 if (TARGET_TOC)
38214 remove_from_hard_reg_set (&set->set, Pmode, TOC_REGNUM);
38215 }
38216
38217 \f
38218 /* Helper function for rs6000_split_logical to emit a logical instruction after
38219 spliting the operation to single GPR registers.
38220
38221 DEST is the destination register.
38222 OP1 and OP2 are the input source registers.
38223 CODE is the base operation (AND, IOR, XOR, NOT).
38224 MODE is the machine mode.
38225 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38226 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38227 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38228
38229 static void
38230 rs6000_split_logical_inner (rtx dest,
38231 rtx op1,
38232 rtx op2,
38233 enum rtx_code code,
38234 machine_mode mode,
38235 bool complement_final_p,
38236 bool complement_op1_p,
38237 bool complement_op2_p)
38238 {
38239 rtx bool_rtx;
38240
38241 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
38242 if (op2 && CONST_INT_P (op2)
38243 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
38244 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38245 {
38246 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
38247 HOST_WIDE_INT value = INTVAL (op2) & mask;
38248
38249 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
38250 if (code == AND)
38251 {
38252 if (value == 0)
38253 {
38254 emit_insn (gen_rtx_SET (dest, const0_rtx));
38255 return;
38256 }
38257
38258 else if (value == mask)
38259 {
38260 if (!rtx_equal_p (dest, op1))
38261 emit_insn (gen_rtx_SET (dest, op1));
38262 return;
38263 }
38264 }
38265
38266 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
38267 into separate ORI/ORIS or XORI/XORIS instrucitons. */
38268 else if (code == IOR || code == XOR)
38269 {
38270 if (value == 0)
38271 {
38272 if (!rtx_equal_p (dest, op1))
38273 emit_insn (gen_rtx_SET (dest, op1));
38274 return;
38275 }
38276 }
38277 }
38278
38279 if (code == AND && mode == SImode
38280 && !complement_final_p && !complement_op1_p && !complement_op2_p)
38281 {
38282 emit_insn (gen_andsi3 (dest, op1, op2));
38283 return;
38284 }
38285
38286 if (complement_op1_p)
38287 op1 = gen_rtx_NOT (mode, op1);
38288
38289 if (complement_op2_p)
38290 op2 = gen_rtx_NOT (mode, op2);
38291
38292 /* For canonical RTL, if only one arm is inverted it is the first. */
38293 if (!complement_op1_p && complement_op2_p)
38294 std::swap (op1, op2);
38295
38296 bool_rtx = ((code == NOT)
38297 ? gen_rtx_NOT (mode, op1)
38298 : gen_rtx_fmt_ee (code, mode, op1, op2));
38299
38300 if (complement_final_p)
38301 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
38302
38303 emit_insn (gen_rtx_SET (dest, bool_rtx));
38304 }
38305
38306 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
38307 operations are split immediately during RTL generation to allow for more
38308 optimizations of the AND/IOR/XOR.
38309
38310 OPERANDS is an array containing the destination and two input operands.
38311 CODE is the base operation (AND, IOR, XOR, NOT).
38312 MODE is the machine mode.
38313 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38314 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38315 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
38316 CLOBBER_REG is either NULL or a scratch register of type CC to allow
38317 formation of the AND instructions. */
38318
38319 static void
38320 rs6000_split_logical_di (rtx operands[3],
38321 enum rtx_code code,
38322 bool complement_final_p,
38323 bool complement_op1_p,
38324 bool complement_op2_p)
38325 {
38326 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
38327 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
38328 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
38329 enum hi_lo { hi = 0, lo = 1 };
38330 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
38331 size_t i;
38332
38333 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
38334 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
38335 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
38336 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
38337
38338 if (code == NOT)
38339 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
38340 else
38341 {
38342 if (!CONST_INT_P (operands[2]))
38343 {
38344 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
38345 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
38346 }
38347 else
38348 {
38349 HOST_WIDE_INT value = INTVAL (operands[2]);
38350 HOST_WIDE_INT value_hi_lo[2];
38351
38352 gcc_assert (!complement_final_p);
38353 gcc_assert (!complement_op1_p);
38354 gcc_assert (!complement_op2_p);
38355
38356 value_hi_lo[hi] = value >> 32;
38357 value_hi_lo[lo] = value & lower_32bits;
38358
38359 for (i = 0; i < 2; i++)
38360 {
38361 HOST_WIDE_INT sub_value = value_hi_lo[i];
38362
38363 if (sub_value & sign_bit)
38364 sub_value |= upper_32bits;
38365
38366 op2_hi_lo[i] = GEN_INT (sub_value);
38367
38368 /* If this is an AND instruction, check to see if we need to load
38369 the value in a register. */
38370 if (code == AND && sub_value != -1 && sub_value != 0
38371 && !and_operand (op2_hi_lo[i], SImode))
38372 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
38373 }
38374 }
38375 }
38376
38377 for (i = 0; i < 2; i++)
38378 {
38379 /* Split large IOR/XOR operations. */
38380 if ((code == IOR || code == XOR)
38381 && CONST_INT_P (op2_hi_lo[i])
38382 && !complement_final_p
38383 && !complement_op1_p
38384 && !complement_op2_p
38385 && !logical_const_operand (op2_hi_lo[i], SImode))
38386 {
38387 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
38388 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
38389 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
38390 rtx tmp = gen_reg_rtx (SImode);
38391
38392 /* Make sure the constant is sign extended. */
38393 if ((hi_16bits & sign_bit) != 0)
38394 hi_16bits |= upper_32bits;
38395
38396 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
38397 code, SImode, false, false, false);
38398
38399 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
38400 code, SImode, false, false, false);
38401 }
38402 else
38403 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
38404 code, SImode, complement_final_p,
38405 complement_op1_p, complement_op2_p);
38406 }
38407
38408 return;
38409 }
38410
38411 /* Split the insns that make up boolean operations operating on multiple GPR
38412 registers. The boolean MD patterns ensure that the inputs either are
38413 exactly the same as the output registers, or there is no overlap.
38414
38415 OPERANDS is an array containing the destination and two input operands.
38416 CODE is the base operation (AND, IOR, XOR, NOT).
38417 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
38418 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
38419 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
38420
38421 void
38422 rs6000_split_logical (rtx operands[3],
38423 enum rtx_code code,
38424 bool complement_final_p,
38425 bool complement_op1_p,
38426 bool complement_op2_p)
38427 {
38428 machine_mode mode = GET_MODE (operands[0]);
38429 machine_mode sub_mode;
38430 rtx op0, op1, op2;
38431 int sub_size, regno0, regno1, nregs, i;
38432
38433 /* If this is DImode, use the specialized version that can run before
38434 register allocation. */
38435 if (mode == DImode && !TARGET_POWERPC64)
38436 {
38437 rs6000_split_logical_di (operands, code, complement_final_p,
38438 complement_op1_p, complement_op2_p);
38439 return;
38440 }
38441
38442 op0 = operands[0];
38443 op1 = operands[1];
38444 op2 = (code == NOT) ? NULL_RTX : operands[2];
38445 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
38446 sub_size = GET_MODE_SIZE (sub_mode);
38447 regno0 = REGNO (op0);
38448 regno1 = REGNO (op1);
38449
38450 gcc_assert (reload_completed);
38451 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38452 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
38453
38454 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
38455 gcc_assert (nregs > 1);
38456
38457 if (op2 && REG_P (op2))
38458 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
38459
38460 for (i = 0; i < nregs; i++)
38461 {
38462 int offset = i * sub_size;
38463 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
38464 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
38465 rtx sub_op2 = ((code == NOT)
38466 ? NULL_RTX
38467 : simplify_subreg (sub_mode, op2, mode, offset));
38468
38469 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
38470 complement_final_p, complement_op1_p,
38471 complement_op2_p);
38472 }
38473
38474 return;
38475 }
38476
38477 \f
38478 /* Return true if the peephole2 can combine a load involving a combination of
38479 an addis instruction and a load with an offset that can be fused together on
38480 a power8. */
38481
38482 bool
38483 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
38484 rtx addis_value, /* addis value. */
38485 rtx target, /* target register that is loaded. */
38486 rtx mem) /* bottom part of the memory addr. */
38487 {
38488 rtx addr;
38489 rtx base_reg;
38490
38491 /* Validate arguments. */
38492 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
38493 return false;
38494
38495 if (!base_reg_operand (target, GET_MODE (target)))
38496 return false;
38497
38498 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
38499 return false;
38500
38501 /* Allow sign/zero extension. */
38502 if (GET_CODE (mem) == ZERO_EXTEND
38503 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
38504 mem = XEXP (mem, 0);
38505
38506 if (!MEM_P (mem))
38507 return false;
38508
38509 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
38510 return false;
38511
38512 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
38513 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
38514 return false;
38515
38516 /* Validate that the register used to load the high value is either the
38517 register being loaded, or we can safely replace its use.
38518
38519 This function is only called from the peephole2 pass and we assume that
38520 there are 2 instructions in the peephole (addis and load), so we want to
38521 check if the target register was not used in the memory address and the
38522 register to hold the addis result is dead after the peephole. */
38523 if (REGNO (addis_reg) != REGNO (target))
38524 {
38525 if (reg_mentioned_p (target, mem))
38526 return false;
38527
38528 if (!peep2_reg_dead_p (2, addis_reg))
38529 return false;
38530
38531 /* If the target register being loaded is the stack pointer, we must
38532 avoid loading any other value into it, even temporarily. */
38533 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
38534 return false;
38535 }
38536
38537 base_reg = XEXP (addr, 0);
38538 return REGNO (addis_reg) == REGNO (base_reg);
38539 }
38540
38541 /* During the peephole2 pass, adjust and expand the insns for a load fusion
38542 sequence. We adjust the addis register to use the target register. If the
38543 load sign extends, we adjust the code to do the zero extending load, and an
38544 explicit sign extension later since the fusion only covers zero extending
38545 loads.
38546
38547 The operands are:
38548 operands[0] register set with addis (to be replaced with target)
38549 operands[1] value set via addis
38550 operands[2] target register being loaded
38551 operands[3] D-form memory reference using operands[0]. */
38552
38553 void
38554 expand_fusion_gpr_load (rtx *operands)
38555 {
38556 rtx addis_value = operands[1];
38557 rtx target = operands[2];
38558 rtx orig_mem = operands[3];
38559 rtx new_addr, new_mem, orig_addr, offset;
38560 enum rtx_code plus_or_lo_sum;
38561 machine_mode target_mode = GET_MODE (target);
38562 machine_mode extend_mode = target_mode;
38563 machine_mode ptr_mode = Pmode;
38564 enum rtx_code extend = UNKNOWN;
38565
38566 if (GET_CODE (orig_mem) == ZERO_EXTEND
38567 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
38568 {
38569 extend = GET_CODE (orig_mem);
38570 orig_mem = XEXP (orig_mem, 0);
38571 target_mode = GET_MODE (orig_mem);
38572 }
38573
38574 gcc_assert (MEM_P (orig_mem));
38575
38576 orig_addr = XEXP (orig_mem, 0);
38577 plus_or_lo_sum = GET_CODE (orig_addr);
38578 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
38579
38580 offset = XEXP (orig_addr, 1);
38581 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
38582 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
38583
38584 if (extend != UNKNOWN)
38585 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
38586
38587 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
38588 UNSPEC_FUSION_GPR);
38589 emit_insn (gen_rtx_SET (target, new_mem));
38590
38591 if (extend == SIGN_EXTEND)
38592 {
38593 int sub_off = ((BYTES_BIG_ENDIAN)
38594 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
38595 : 0);
38596 rtx sign_reg
38597 = simplify_subreg (target_mode, target, extend_mode, sub_off);
38598
38599 emit_insn (gen_rtx_SET (target,
38600 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
38601 }
38602
38603 return;
38604 }
38605
38606 /* Emit the addis instruction that will be part of a fused instruction
38607 sequence. */
38608
38609 void
38610 emit_fusion_addis (rtx target, rtx addis_value)
38611 {
38612 rtx fuse_ops[10];
38613 const char *addis_str = NULL;
38614
38615 /* Emit the addis instruction. */
38616 fuse_ops[0] = target;
38617 if (satisfies_constraint_L (addis_value))
38618 {
38619 fuse_ops[1] = addis_value;
38620 addis_str = "lis %0,%v1";
38621 }
38622
38623 else if (GET_CODE (addis_value) == PLUS)
38624 {
38625 rtx op0 = XEXP (addis_value, 0);
38626 rtx op1 = XEXP (addis_value, 1);
38627
38628 if (REG_P (op0) && CONST_INT_P (op1)
38629 && satisfies_constraint_L (op1))
38630 {
38631 fuse_ops[1] = op0;
38632 fuse_ops[2] = op1;
38633 addis_str = "addis %0,%1,%v2";
38634 }
38635 }
38636
38637 else if (GET_CODE (addis_value) == HIGH)
38638 {
38639 rtx value = XEXP (addis_value, 0);
38640 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
38641 {
38642 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
38643 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
38644 if (TARGET_ELF)
38645 addis_str = "addis %0,%2,%1@toc@ha";
38646
38647 else if (TARGET_XCOFF)
38648 addis_str = "addis %0,%1@u(%2)";
38649
38650 else
38651 gcc_unreachable ();
38652 }
38653
38654 else if (GET_CODE (value) == PLUS)
38655 {
38656 rtx op0 = XEXP (value, 0);
38657 rtx op1 = XEXP (value, 1);
38658
38659 if (GET_CODE (op0) == UNSPEC
38660 && XINT (op0, 1) == UNSPEC_TOCREL
38661 && CONST_INT_P (op1))
38662 {
38663 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
38664 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
38665 fuse_ops[3] = op1;
38666 if (TARGET_ELF)
38667 addis_str = "addis %0,%2,%1+%3@toc@ha";
38668
38669 else if (TARGET_XCOFF)
38670 addis_str = "addis %0,%1+%3@u(%2)";
38671
38672 else
38673 gcc_unreachable ();
38674 }
38675 }
38676
38677 else if (satisfies_constraint_L (value))
38678 {
38679 fuse_ops[1] = value;
38680 addis_str = "lis %0,%v1";
38681 }
38682
38683 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
38684 {
38685 fuse_ops[1] = value;
38686 addis_str = "lis %0,%1@ha";
38687 }
38688 }
38689
38690 if (!addis_str)
38691 fatal_insn ("Could not generate addis value for fusion", addis_value);
38692
38693 output_asm_insn (addis_str, fuse_ops);
38694 }
38695
38696 /* Emit a D-form load or store instruction that is the second instruction
38697 of a fusion sequence. */
38698
38699 static void
38700 emit_fusion_load (rtx load_reg, rtx addis_reg, rtx offset, const char *insn_str)
38701 {
38702 rtx fuse_ops[10];
38703 char insn_template[80];
38704
38705 fuse_ops[0] = load_reg;
38706 fuse_ops[1] = addis_reg;
38707
38708 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
38709 {
38710 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
38711 fuse_ops[2] = offset;
38712 output_asm_insn (insn_template, fuse_ops);
38713 }
38714
38715 else if (GET_CODE (offset) == UNSPEC
38716 && XINT (offset, 1) == UNSPEC_TOCREL)
38717 {
38718 if (TARGET_ELF)
38719 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
38720
38721 else if (TARGET_XCOFF)
38722 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38723
38724 else
38725 gcc_unreachable ();
38726
38727 fuse_ops[2] = XVECEXP (offset, 0, 0);
38728 output_asm_insn (insn_template, fuse_ops);
38729 }
38730
38731 else if (GET_CODE (offset) == PLUS
38732 && GET_CODE (XEXP (offset, 0)) == UNSPEC
38733 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
38734 && CONST_INT_P (XEXP (offset, 1)))
38735 {
38736 rtx tocrel_unspec = XEXP (offset, 0);
38737 if (TARGET_ELF)
38738 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
38739
38740 else if (TARGET_XCOFF)
38741 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
38742
38743 else
38744 gcc_unreachable ();
38745
38746 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
38747 fuse_ops[3] = XEXP (offset, 1);
38748 output_asm_insn (insn_template, fuse_ops);
38749 }
38750
38751 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
38752 {
38753 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
38754
38755 fuse_ops[2] = offset;
38756 output_asm_insn (insn_template, fuse_ops);
38757 }
38758
38759 else
38760 fatal_insn ("Unable to generate load/store offset for fusion", offset);
38761
38762 return;
38763 }
38764
38765 /* Given an address, convert it into the addis and load offset parts. Addresses
38766 created during the peephole2 process look like:
38767 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38768 (unspec [(...)] UNSPEC_TOCREL)) */
38769
38770 static void
38771 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
38772 {
38773 rtx hi, lo;
38774
38775 if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
38776 {
38777 hi = XEXP (addr, 0);
38778 lo = XEXP (addr, 1);
38779 }
38780 else
38781 gcc_unreachable ();
38782
38783 *p_hi = hi;
38784 *p_lo = lo;
38785 }
38786
38787 /* Return a string to fuse an addis instruction with a gpr load to the same
38788 register that we loaded up the addis instruction. The address that is used
38789 is the logical address that was formed during peephole2:
38790 (lo_sum (high) (low-part))
38791
38792 The code is complicated, so we call output_asm_insn directly, and just
38793 return "". */
38794
38795 const char *
38796 emit_fusion_gpr_load (rtx target, rtx mem)
38797 {
38798 rtx addis_value;
38799 rtx addr;
38800 rtx load_offset;
38801 const char *load_str = NULL;
38802 machine_mode mode;
38803
38804 if (GET_CODE (mem) == ZERO_EXTEND)
38805 mem = XEXP (mem, 0);
38806
38807 gcc_assert (REG_P (target) && MEM_P (mem));
38808
38809 addr = XEXP (mem, 0);
38810 fusion_split_address (addr, &addis_value, &load_offset);
38811
38812 /* Now emit the load instruction to the same register. */
38813 mode = GET_MODE (mem);
38814 switch (mode)
38815 {
38816 case E_QImode:
38817 load_str = "lbz";
38818 break;
38819
38820 case E_HImode:
38821 load_str = "lhz";
38822 break;
38823
38824 case E_SImode:
38825 case E_SFmode:
38826 load_str = "lwz";
38827 break;
38828
38829 case E_DImode:
38830 case E_DFmode:
38831 gcc_assert (TARGET_POWERPC64);
38832 load_str = "ld";
38833 break;
38834
38835 default:
38836 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
38837 }
38838
38839 /* Emit the addis instruction. */
38840 emit_fusion_addis (target, addis_value);
38841
38842 /* Emit the D-form load instruction. */
38843 emit_fusion_load (target, target, load_offset, load_str);
38844
38845 return "";
38846 }
38847 \f
38848
38849 #ifdef RS6000_GLIBC_ATOMIC_FENV
38850 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38851 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
38852 #endif
38853
38854 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38855
38856 static void
38857 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
38858 {
38859 if (!TARGET_HARD_FLOAT)
38860 {
38861 #ifdef RS6000_GLIBC_ATOMIC_FENV
38862 if (atomic_hold_decl == NULL_TREE)
38863 {
38864 atomic_hold_decl
38865 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38866 get_identifier ("__atomic_feholdexcept"),
38867 build_function_type_list (void_type_node,
38868 double_ptr_type_node,
38869 NULL_TREE));
38870 TREE_PUBLIC (atomic_hold_decl) = 1;
38871 DECL_EXTERNAL (atomic_hold_decl) = 1;
38872 }
38873
38874 if (atomic_clear_decl == NULL_TREE)
38875 {
38876 atomic_clear_decl
38877 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38878 get_identifier ("__atomic_feclearexcept"),
38879 build_function_type_list (void_type_node,
38880 NULL_TREE));
38881 TREE_PUBLIC (atomic_clear_decl) = 1;
38882 DECL_EXTERNAL (atomic_clear_decl) = 1;
38883 }
38884
38885 tree const_double = build_qualified_type (double_type_node,
38886 TYPE_QUAL_CONST);
38887 tree const_double_ptr = build_pointer_type (const_double);
38888 if (atomic_update_decl == NULL_TREE)
38889 {
38890 atomic_update_decl
38891 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
38892 get_identifier ("__atomic_feupdateenv"),
38893 build_function_type_list (void_type_node,
38894 const_double_ptr,
38895 NULL_TREE));
38896 TREE_PUBLIC (atomic_update_decl) = 1;
38897 DECL_EXTERNAL (atomic_update_decl) = 1;
38898 }
38899
38900 tree fenv_var = create_tmp_var_raw (double_type_node);
38901 TREE_ADDRESSABLE (fenv_var) = 1;
38902 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
38903
38904 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
38905 *clear = build_call_expr (atomic_clear_decl, 0);
38906 *update = build_call_expr (atomic_update_decl, 1,
38907 fold_convert (const_double_ptr, fenv_addr));
38908 #endif
38909 return;
38910 }
38911
38912 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
38913 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
38914 tree call_mffs = build_call_expr (mffs, 0);
38915
38916 /* Generates the equivalent of feholdexcept (&fenv_var)
38917
38918 *fenv_var = __builtin_mffs ();
38919 double fenv_hold;
38920 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38921 __builtin_mtfsf (0xff, fenv_hold); */
38922
38923 /* Mask to clear everything except for the rounding modes and non-IEEE
38924 arithmetic flag. */
38925 const unsigned HOST_WIDE_INT hold_exception_mask =
38926 HOST_WIDE_INT_C (0xffffffff00000007);
38927
38928 tree fenv_var = create_tmp_var_raw (double_type_node);
38929
38930 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
38931
38932 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
38933 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38934 build_int_cst (uint64_type_node,
38935 hold_exception_mask));
38936
38937 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38938 fenv_llu_and);
38939
38940 tree hold_mtfsf = build_call_expr (mtfsf, 2,
38941 build_int_cst (unsigned_type_node, 0xff),
38942 fenv_hold_mtfsf);
38943
38944 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
38945
38946 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38947
38948 double fenv_clear = __builtin_mffs ();
38949 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38950 __builtin_mtfsf (0xff, fenv_clear); */
38951
38952 /* Mask to clear everything except for the rounding modes and non-IEEE
38953 arithmetic flag. */
38954 const unsigned HOST_WIDE_INT clear_exception_mask =
38955 HOST_WIDE_INT_C (0xffffffff00000000);
38956
38957 tree fenv_clear = create_tmp_var_raw (double_type_node);
38958
38959 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
38960
38961 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
38962 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
38963 fenv_clean_llu,
38964 build_int_cst (uint64_type_node,
38965 clear_exception_mask));
38966
38967 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
38968 fenv_clear_llu_and);
38969
38970 tree clear_mtfsf = build_call_expr (mtfsf, 2,
38971 build_int_cst (unsigned_type_node, 0xff),
38972 fenv_clear_mtfsf);
38973
38974 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
38975
38976 /* Generates the equivalent of feupdateenv (&fenv_var)
38977
38978 double old_fenv = __builtin_mffs ();
38979 double fenv_update;
38980 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38981 (*(uint64_t*)fenv_var 0x1ff80fff);
38982 __builtin_mtfsf (0xff, fenv_update); */
38983
38984 const unsigned HOST_WIDE_INT update_exception_mask =
38985 HOST_WIDE_INT_C (0xffffffff1fffff00);
38986 const unsigned HOST_WIDE_INT new_exception_mask =
38987 HOST_WIDE_INT_C (0x1ff80fff);
38988
38989 tree old_fenv = create_tmp_var_raw (double_type_node);
38990 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
38991
38992 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
38993 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
38994 build_int_cst (uint64_type_node,
38995 update_exception_mask));
38996
38997 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
38998 build_int_cst (uint64_type_node,
38999 new_exception_mask));
39000
39001 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
39002 old_llu_and, new_llu_and);
39003
39004 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
39005 new_llu_mask);
39006
39007 tree update_mtfsf = build_call_expr (mtfsf, 2,
39008 build_int_cst (unsigned_type_node, 0xff),
39009 fenv_update_mtfsf);
39010
39011 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
39012 }
39013
39014 void
39015 rs6000_generate_float2_double_code (rtx dst, rtx src1, rtx src2)
39016 {
39017 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39018
39019 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39020 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39021
39022 /* The destination of the vmrgew instruction layout is:
39023 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39024 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39025 vmrgew instruction will be correct. */
39026 if (BYTES_BIG_ENDIAN)
39027 {
39028 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0, src1, src2,
39029 GEN_INT (0)));
39030 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1, src1, src2,
39031 GEN_INT (3)));
39032 }
39033 else
39034 {
39035 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (3)));
39036 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (0)));
39037 }
39038
39039 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39040 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39041
39042 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2, rtx_tmp0));
39043 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3, rtx_tmp1));
39044
39045 if (BYTES_BIG_ENDIAN)
39046 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39047 else
39048 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39049 }
39050
39051 void
39052 rs6000_generate_float2_code (bool signed_convert, rtx dst, rtx src1, rtx src2)
39053 {
39054 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39055
39056 rtx_tmp0 = gen_reg_rtx (V2DImode);
39057 rtx_tmp1 = gen_reg_rtx (V2DImode);
39058
39059 /* The destination of the vmrgew instruction layout is:
39060 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
39061 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
39062 vmrgew instruction will be correct. */
39063 if (BYTES_BIG_ENDIAN)
39064 {
39065 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0, src1, src2, GEN_INT (0)));
39066 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1, src1, src2, GEN_INT (3)));
39067 }
39068 else
39069 {
39070 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0, src1, src2, GEN_INT (3)));
39071 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1, src1, src2, GEN_INT (0)));
39072 }
39073
39074 rtx_tmp2 = gen_reg_rtx (V4SFmode);
39075 rtx_tmp3 = gen_reg_rtx (V4SFmode);
39076
39077 if (signed_convert)
39078 {
39079 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2, rtx_tmp0));
39080 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3, rtx_tmp1));
39081 }
39082 else
39083 {
39084 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2, rtx_tmp0));
39085 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3, rtx_tmp1));
39086 }
39087
39088 if (BYTES_BIG_ENDIAN)
39089 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp2, rtx_tmp3));
39090 else
39091 emit_insn (gen_p8_vmrgew_v4sf (dst, rtx_tmp3, rtx_tmp2));
39092 }
39093
39094 void
39095 rs6000_generate_vsigned2_code (bool signed_convert, rtx dst, rtx src1,
39096 rtx src2)
39097 {
39098 rtx rtx_tmp0, rtx_tmp1, rtx_tmp2, rtx_tmp3;
39099
39100 rtx_tmp0 = gen_reg_rtx (V2DFmode);
39101 rtx_tmp1 = gen_reg_rtx (V2DFmode);
39102
39103 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0, src1, src2, GEN_INT (0)));
39104 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1, src1, src2, GEN_INT (3)));
39105
39106 rtx_tmp2 = gen_reg_rtx (V4SImode);
39107 rtx_tmp3 = gen_reg_rtx (V4SImode);
39108
39109 if (signed_convert)
39110 {
39111 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2, rtx_tmp0));
39112 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3, rtx_tmp1));
39113 }
39114 else
39115 {
39116 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2, rtx_tmp0));
39117 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3, rtx_tmp1));
39118 }
39119
39120 emit_insn (gen_p8_vmrgew_v4si (dst, rtx_tmp2, rtx_tmp3));
39121 }
39122
39123 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
39124
39125 static bool
39126 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
39127 optimization_type opt_type)
39128 {
39129 switch (op)
39130 {
39131 case rsqrt_optab:
39132 return (opt_type == OPTIMIZE_FOR_SPEED
39133 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
39134
39135 default:
39136 return true;
39137 }
39138 }
39139
39140 /* Implement TARGET_CONSTANT_ALIGNMENT. */
39141
39142 static HOST_WIDE_INT
39143 rs6000_constant_alignment (const_tree exp, HOST_WIDE_INT align)
39144 {
39145 if (TREE_CODE (exp) == STRING_CST
39146 && (STRICT_ALIGNMENT || !optimize_size))
39147 return MAX (align, BITS_PER_WORD);
39148 return align;
39149 }
39150
39151 /* Implement TARGET_STARTING_FRAME_OFFSET. */
39152
39153 static HOST_WIDE_INT
39154 rs6000_starting_frame_offset (void)
39155 {
39156 if (FRAME_GROWS_DOWNWARD)
39157 return 0;
39158 return RS6000_STARTING_FRAME_OFFSET;
39159 }
39160 \f
39161
39162 /* Create an alias for a mangled name where we have changed the mangling (in
39163 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
39164 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
39165
39166 #if TARGET_ELF && RS6000_WEAK
39167 static void
39168 rs6000_globalize_decl_name (FILE * stream, tree decl)
39169 {
39170 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
39171
39172 targetm.asm_out.globalize_label (stream, name);
39173
39174 if (rs6000_passes_ieee128 && name[0] == '_' && name[1] == 'Z')
39175 {
39176 tree save_asm_name = DECL_ASSEMBLER_NAME (decl);
39177 const char *old_name;
39178
39179 ieee128_mangling_gcc_8_1 = true;
39180 lang_hooks.set_decl_assembler_name (decl);
39181 old_name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
39182 SET_DECL_ASSEMBLER_NAME (decl, save_asm_name);
39183 ieee128_mangling_gcc_8_1 = false;
39184
39185 if (strcmp (name, old_name) != 0)
39186 {
39187 fprintf (stream, "\t.weak %s\n", old_name);
39188 fprintf (stream, "\t.set %s,%s\n", old_name, name);
39189 }
39190 }
39191 }
39192 #endif
39193
39194 \f
39195 /* On 64-bit Linux and Freebsd systems, possibly switch the long double library
39196 function names from <foo>l to <foo>f128 if the default long double type is
39197 IEEE 128-bit. Typically, with the C and C++ languages, the standard math.h
39198 include file switches the names on systems that support long double as IEEE
39199 128-bit, but that doesn't work if the user uses __builtin_<foo>l directly.
39200 In the future, glibc will export names like __ieee128_sinf128 and we can
39201 switch to using those instead of using sinf128, which pollutes the user's
39202 namespace.
39203
39204 This will switch the names for Fortran math functions as well (which doesn't
39205 use math.h). However, Fortran needs other changes to the compiler and
39206 library before you can switch the real*16 type at compile time.
39207
39208 We use the TARGET_MANGLE_DECL_ASSEMBLER_NAME hook to change this name. We
39209 only do this if the default is that long double is IBM extended double, and
39210 the user asked for IEEE 128-bit. */
39211
39212 static tree
39213 rs6000_mangle_decl_assembler_name (tree decl, tree id)
39214 {
39215 if (!TARGET_IEEEQUAD_DEFAULT && TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
39216 && TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl) )
39217 {
39218 size_t len = IDENTIFIER_LENGTH (id);
39219 const char *name = IDENTIFIER_POINTER (id);
39220
39221 if (name[len - 1] == 'l')
39222 {
39223 bool uses_ieee128_p = false;
39224 tree type = TREE_TYPE (decl);
39225 machine_mode ret_mode = TYPE_MODE (type);
39226
39227 /* See if the function returns a IEEE 128-bit floating point type or
39228 complex type. */
39229 if (ret_mode == TFmode || ret_mode == TCmode)
39230 uses_ieee128_p = true;
39231 else
39232 {
39233 function_args_iterator args_iter;
39234 tree arg;
39235
39236 /* See if the function passes a IEEE 128-bit floating point type
39237 or complex type. */
39238 FOREACH_FUNCTION_ARGS (type, arg, args_iter)
39239 {
39240 machine_mode arg_mode = TYPE_MODE (arg);
39241 if (arg_mode == TFmode || arg_mode == TCmode)
39242 {
39243 uses_ieee128_p = true;
39244 break;
39245 }
39246 }
39247 }
39248
39249 /* If we passed or returned an IEEE 128-bit floating point type,
39250 change the name. */
39251 if (uses_ieee128_p)
39252 {
39253 char *name2 = (char *) alloca (len + 4);
39254 memcpy (name2, name, len - 1);
39255 strcpy (name2 + len - 1, "f128");
39256 id = get_identifier (name2);
39257 }
39258 }
39259 }
39260
39261 return id;
39262 }
39263
39264 \f
39265 struct gcc_target targetm = TARGET_INITIALIZER;
39266
39267 #include "gt-rs6000.h"